ESPHome 2026.4.3
Loading...
Searching...
No Matches
scheduler.h
Go to the documentation of this file.
1#pragma once
2
4#include <cstring>
5#include <string>
6#include <vector>
7#ifdef ESPHOME_THREAD_MULTI_ATOMICS
8#include <atomic>
9#endif
10
12#include "esphome/core/hal.h"
15
16namespace esphome {
17
18class Component;
19struct RetryArgs;
20
21// Forward declaration of retry_handler - needs to be non-static for friend declaration
22void retry_handler(const std::shared_ptr<RetryArgs> &args);
23
24class Scheduler {
25 // Allow retry_handler to access protected members for internal retry mechanism
26 friend void ::esphome::retry_handler(const std::shared_ptr<RetryArgs> &args);
27 // Allow DelayAction to call set_timer_common_ with skip_cancel=true for parallel script delays.
28 // This is needed to fix issue #10264 where parallel scripts with delays interfere with each other.
29 // We use friend instead of a public API because skip_cancel is dangerous - it can cause delays
30 // to accumulate and overload the scheduler if misused.
31 template<typename... Ts> friend class DelayAction;
32
33 public:
34 // std::string overload - deprecated, use const char* or uint32_t instead
35 // Remove before 2026.7.0
36 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
37 void set_timeout(Component *component, const std::string &name, uint32_t timeout, std::function<void()> &&func);
38
47 void set_timeout(Component *component, const char *name, uint32_t timeout, std::function<void()> &&func);
49 void set_timeout(Component *component, uint32_t id, uint32_t timeout, std::function<void()> &&func);
51 void set_timeout(Component *component, InternalSchedulerID id, uint32_t timeout, std::function<void()> &&func) {
52 this->set_timer_common_(component, SchedulerItem::TIMEOUT, NameType::NUMERIC_ID_INTERNAL, nullptr,
53 static_cast<uint32_t>(id), timeout, std::move(func));
54 }
55
56 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
57 bool cancel_timeout(Component *component, const std::string &name);
58 bool cancel_timeout(Component *component, const char *name);
59 bool cancel_timeout(Component *component, uint32_t id);
60 bool cancel_timeout(Component *component, InternalSchedulerID id) {
61 return this->cancel_item_(component, NameType::NUMERIC_ID_INTERNAL, nullptr, static_cast<uint32_t>(id),
62 SchedulerItem::TIMEOUT);
63 }
64
65 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
66 void set_interval(Component *component, const std::string &name, uint32_t interval, std::function<void()> &&func);
67
76 void set_interval(Component *component, const char *name, uint32_t interval, std::function<void()> &&func);
78 void set_interval(Component *component, uint32_t id, uint32_t interval, std::function<void()> &&func);
80 void set_interval(Component *component, InternalSchedulerID id, uint32_t interval, std::function<void()> &&func) {
81 this->set_timer_common_(component, SchedulerItem::INTERVAL, NameType::NUMERIC_ID_INTERNAL, nullptr,
82 static_cast<uint32_t>(id), interval, std::move(func));
83 }
84
85 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
86 bool cancel_interval(Component *component, const std::string &name);
87 bool cancel_interval(Component *component, const char *name);
88 bool cancel_interval(Component *component, uint32_t id);
89 bool cancel_interval(Component *component, InternalSchedulerID id) {
90 return this->cancel_item_(component, NameType::NUMERIC_ID_INTERNAL, nullptr, static_cast<uint32_t>(id),
91 SchedulerItem::INTERVAL);
92 }
93
94 // Remove before 2026.8.0
95 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
96 "2026.2.0")
97 void set_retry(Component *component, const std::string &name, uint32_t initial_wait_time, uint8_t max_attempts,
98 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
99 // Remove before 2026.8.0
100 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
101 "2026.2.0")
102 void set_retry(Component *component, const char *name, uint32_t initial_wait_time, uint8_t max_attempts,
103 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
104 // Remove before 2026.8.0
105 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
106 "2026.2.0")
107 void set_retry(Component *component, uint32_t id, uint32_t initial_wait_time, uint8_t max_attempts,
108 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
109
110 // Remove before 2026.8.0
111 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
112 bool cancel_retry(Component *component, const std::string &name);
113 // Remove before 2026.8.0
114 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
115 bool cancel_retry(Component *component, const char *name);
116 // Remove before 2026.8.0
117 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
118 bool cancel_retry(Component *component, uint32_t id);
119
121 uint64_t millis_64() { return esphome::millis_64(); }
122
123 // Calculate when the next scheduled item should run.
124 // @param now On ESP32, unused for 64-bit extension (native); on other platforms, extended to 64-bit via rollover.
125 // Returns the time in milliseconds until the next scheduled item, or nullopt if no items.
126 // This method performs cleanup of removed items before checking the schedule.
127 // IMPORTANT: This method should only be called from the main thread (loop task).
128 optional<uint32_t> next_schedule_in(uint32_t now);
129
130 // Execute all scheduled items that are ready
131 // @param now Fresh timestamp from millis() - must not be stale/cached
132 // @return Timestamp of the last item that ran, or `now` unchanged if none ran.
133 uint32_t call(uint32_t now);
134
135 // Move items from to_add_ into the main heap.
136 // IMPORTANT: This method should only be called from the main thread (loop task).
137 // Inlined: the fast path (nothing to add) is just an atomic load / empty check.
138 // The lock-free fast path uses to_add_count_ (atomic) or to_add_.empty()
139 // (single-threaded). This is safe because the main loop is the only thread
140 // that reads to_add_ without holding lock_; other threads may read it only
141 // while holding the mutex (e.g. cancel_item_locked_).
142 inline void HOT process_to_add() {
143 if (this->to_add_empty_())
144 return;
145 this->process_to_add_slow_path_();
146 }
147
148 // Name storage type discriminator for SchedulerItem
149 // Used to distinguish between static strings, hashed strings, numeric IDs, and internal numeric IDs
150 enum class NameType : uint8_t {
151 STATIC_STRING = 0, // const char* pointer to static/flash storage
152 HASHED_STRING = 1, // uint32_t FNV-1a hash of a runtime string
153 NUMERIC_ID = 2, // uint32_t numeric identifier (component-level)
154 NUMERIC_ID_INTERNAL = 3 // uint32_t numeric identifier (core/internal, separate namespace)
155 };
156
157 protected:
158 struct SchedulerItem {
159 // Ordered by size to minimize padding
160 Component *component;
161 // Optimized name storage using tagged union - zero heap allocation
162 union {
163 const char *static_name; // For STATIC_STRING (string literals, no allocation)
164 uint32_t hash_or_id; // For HASHED_STRING or NUMERIC_ID
165 } name_;
166 uint32_t interval;
167 // Split time to handle millis() rollover. The scheduler combines the 32-bit millis()
168 // with a 16-bit rollover counter to create a 48-bit time space (using 32+16 bits).
169 // This is intentionally limited to 48 bits, not stored as a full 64-bit value.
170 // With 49.7 days per 32-bit rollover, the 16-bit counter supports
171 // 49.7 days × 65536 = ~8900 years. This ensures correct scheduling
172 // even when devices run for months. Split into two fields for better memory
173 // alignment on 32-bit systems.
174 uint32_t next_execution_low_; // Lower 32 bits of execution time (millis value)
175 std::function<void()> callback;
176 uint16_t next_execution_high_; // Upper 16 bits (millis_major counter)
177
178#ifdef ESPHOME_THREAD_MULTI_ATOMICS
179 // Multi-threaded with atomics: use atomic uint8_t for lock-free access.
180 // std::atomic<bool> is not used because GCC on Xtensa generates an indirect
181 // function call for std::atomic<bool>::load() instead of inlining it.
182 // std::atomic<uint8_t> inlines correctly on all platforms.
183 std::atomic<uint8_t> remove{0};
184
185 // Bit-packed fields (4 bits used, 4 bits padding in 1 byte)
186 enum Type : uint8_t { TIMEOUT, INTERVAL } type : 1;
187 NameType name_type_ : 2; // Discriminator for name_ union (0–3, see NameType enum)
188 bool is_retry : 1; // True if this is a retry timeout
189 // 4 bits padding
190#else
191 // Single-threaded or multi-threaded without atomics: can pack all fields together
192 // Bit-packed fields (5 bits used, 3 bits padding in 1 byte)
193 enum Type : uint8_t { TIMEOUT, INTERVAL } type : 1;
194 bool remove : 1;
195 NameType name_type_ : 2; // Discriminator for name_ union (0–3, see NameType enum)
196 bool is_retry : 1; // True if this is a retry timeout
197 // 3 bits padding
198#endif
199
200 // Constructor
201 SchedulerItem()
202 : component(nullptr),
203 interval(0),
204 next_execution_low_(0),
205 next_execution_high_(0),
206#ifdef ESPHOME_THREAD_MULTI_ATOMICS
207 // remove is initialized in the member declaration
208 type(TIMEOUT),
209 name_type_(NameType::STATIC_STRING),
210 is_retry(false) {
211#else
212 type(TIMEOUT),
213 remove(false),
214 name_type_(NameType::STATIC_STRING),
215 is_retry(false) {
216#endif
217 name_.static_name = nullptr;
218 }
219
220 // Destructor - no dynamic memory to clean up (callback's std::function handles its own)
221 ~SchedulerItem() = default;
222
223 // Delete copy operations to prevent accidental copies
224 SchedulerItem(const SchedulerItem &) = delete;
225 SchedulerItem &operator=(const SchedulerItem &) = delete;
226
227 // Delete move operations: SchedulerItem objects are managed via raw pointers, never moved directly
228 SchedulerItem(SchedulerItem &&) = delete;
229 SchedulerItem &operator=(SchedulerItem &&) = delete;
230
231 // Helper to get the static name (only valid for STATIC_STRING type)
232 const char *get_name() const { return (name_type_ == NameType::STATIC_STRING) ? name_.static_name : nullptr; }
233
234 // Helper to get the hash or numeric ID (only valid for HASHED_STRING or NUMERIC_ID types)
235 uint32_t get_name_hash_or_id() const { return (name_type_ != NameType::STATIC_STRING) ? name_.hash_or_id : 0; }
236
237 // Helper to get the name type
238 NameType get_name_type() const { return name_type_; }
239
240 // Set name storage: for STATIC_STRING stores the pointer, for all other types stores hash_or_id.
241 // Both union members occupy the same offset, so only one store is needed.
242 void set_name(NameType type, const char *static_name, uint32_t hash_or_id) {
243 if (type == NameType::STATIC_STRING) {
244 name_.static_name = static_name;
245 } else {
246 name_.hash_or_id = hash_or_id;
247 }
248 name_type_ = type;
249 }
250
251 static bool cmp(SchedulerItem *a, SchedulerItem *b);
252
253 // Note: We use 48 bits total (32 + 16), stored in a 64-bit value for API compatibility.
254 // The upper 16 bits of the 64-bit value are always zero, which is fine since
255 // millis_major_ is also 16 bits and they must match.
256 constexpr uint64_t get_next_execution() const {
257 return (static_cast<uint64_t>(next_execution_high_) << 32) | next_execution_low_;
258 }
259
260 constexpr void set_next_execution(uint64_t value) {
261 next_execution_low_ = static_cast<uint32_t>(value);
262 // Cast to uint16_t intentionally truncates to lower 16 bits of the upper 32 bits.
263 // This is correct because millis_major_ that creates these values is also 16 bits.
264 next_execution_high_ = static_cast<uint16_t>(value >> 32);
265 }
266 constexpr const char *get_type_str() const { return (type == TIMEOUT) ? "timeout" : "interval"; }
267 const LogString *get_source() const { return component ? component->get_component_log_str() : LOG_STR("unknown"); }
268 };
269
270 // Common implementation for both timeout and interval
271 // name_type determines storage type: STATIC_STRING uses static_name, others use hash_or_id
272 void set_timer_common_(Component *component, SchedulerItem::Type type, NameType name_type, const char *static_name,
273 uint32_t hash_or_id, uint32_t delay, std::function<void()> &&func, bool is_retry = false,
274 bool skip_cancel = false);
275
276 // Common implementation for retry - Remove before 2026.8.0
277 // name_type determines storage type: STATIC_STRING uses static_name, others use hash_or_id
278#pragma GCC diagnostic push
279#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
280 void set_retry_common_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
281 uint32_t initial_wait_time, uint8_t max_attempts, std::function<RetryResult(uint8_t)> func,
282 float backoff_increase_factor);
283#pragma GCC diagnostic pop
284 // Common implementation for cancel_retry
285 bool cancel_retry_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id);
286
287 // Extend a 32-bit millis() value to 64-bit. Use when the caller already has a fresh now.
288 // On platforms with native 64-bit time, ignores now and uses millis_64() directly.
289 // On other platforms, extends now to 64-bit using rollover tracking.
290 uint64_t millis_64_from_(uint32_t now) {
291#ifdef USE_NATIVE_64BIT_TIME
292 (void) now;
293 return millis_64();
294#else
295 return Millis64Impl::compute(now);
296#endif
297 }
298 // Cleanup logically deleted items from the scheduler
299 // Returns true if items remain after cleanup
300 // IMPORTANT: This method should only be called from the main thread (loop task).
301 // Inlined: the fast path (nothing to remove) is just an atomic load + empty check.
302 // Reading items_.empty() without the lock is safe here because only the main
303 // loop thread structurally modifies items_ (push/pop/erase). Other threads may
304 // iterate items_ and mark items removed under lock_, but never change the
305 // vector's size or data pointer.
306 inline bool HOT cleanup_() {
307 if (this->to_remove_empty_())
308 return !this->items_.empty();
309 return this->cleanup_slow_path_();
310 }
311 // Slow path for cleanup_() when there are items to remove - defined in scheduler.cpp
312 bool cleanup_slow_path_();
313 // Slow path for process_to_add() when there are items to merge - defined in scheduler.cpp
314 void process_to_add_slow_path_();
315 // Remove and return the front item from the heap as a raw pointer.
316 // Caller takes ownership and must either recycle or delete the item.
317 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
318 SchedulerItem *pop_raw_locked_();
319 // Get or create a scheduler item from the pool
320 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
321 SchedulerItem *get_item_from_pool_locked_();
322
323 private:
324 // Helper to cancel matching items - must be called with lock held.
325 // When find_first=true, stops after the first match (used by set_timer_common_ where
326 // the cancel-before-add invariant guarantees at most one match).
327 // When find_first=false (default), cancels ALL matches (needed for DelayAction parallel
328 // mode where skip_cancel=true allows multiple items with the same key).
329 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
330 bool cancel_item_locked_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
331 SchedulerItem::Type type, bool match_retry = false, bool find_first = false);
332
333 // Common implementation for cancel operations - handles locking
334 bool cancel_item_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
335 SchedulerItem::Type type, bool match_retry = false);
336
337 // Helper to check if two static string names match
338 inline bool HOT names_match_static_(const char *name1, const char *name2) const {
339 // Check pointer equality first (common for static strings), then string contents
340 // The core ESPHome codebase uses static strings (const char*) for component names,
341 // making pointer comparison effective. The std::string overloads exist only for
342 // compatibility with external components but are rarely used in practice.
343 return (name1 != nullptr && name2 != nullptr) && ((name1 == name2) || (strcmp(name1, name2) == 0));
344 }
345
346 // Helper function to check if item matches criteria for cancellation
347 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
348 // IMPORTANT: Must be called with scheduler lock held
349 inline bool HOT matches_item_locked_(SchedulerItem *item, Component *component, NameType name_type,
350 const char *static_name, uint32_t hash_or_id, SchedulerItem::Type type,
351 bool match_retry, bool skip_removed = true) const {
352 // THREAD SAFETY: Check for nullptr first to prevent LoadProhibited crashes. On multi-threaded
353 // platforms, items can be nulled in defer_queue_ during processing.
354 // Fixes: https://github.com/esphome/esphome/issues/11940
355 if (item == nullptr)
356 return false;
357 if (item->component != component || item->type != type || (skip_removed && this->is_item_removed_locked_(item)) ||
358 (match_retry && !item->is_retry)) {
359 return false;
360 }
361 // Name type must match
362 if (item->get_name_type() != name_type)
363 return false;
364 // For static strings, compare the string content; for hash/ID, compare the value
365 if (name_type == NameType::STATIC_STRING) {
366 return this->names_match_static_(item->get_name(), static_name);
367 }
368 return item->get_name_hash_or_id() == hash_or_id;
369 }
370
371 // Helper to execute a scheduler item
372 uint32_t execute_item_(SchedulerItem *item, uint32_t now);
373
374 // Helper to check if item should be skipped
375 bool should_skip_item_(SchedulerItem *item) const {
376 return is_item_removed_(item) || (item->component != nullptr && item->component->is_failed());
377 }
378
379 // Helper to recycle a SchedulerItem back to the pool.
380 // Takes a raw pointer — caller transfers ownership. The item is either added to the
381 // pool or deleted if the pool is full.
382 // IMPORTANT: Only call from main loop context! Recycling clears the callback,
383 // so calling from another thread while the callback is executing causes use-after-free.
384 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
385 void recycle_item_main_loop_(SchedulerItem *item);
386
387 // Helper to perform full cleanup when too many items are cancelled
388 void full_cleanup_removed_items_();
389
390 // Helper to calculate random offset for interval timers - extracted to reduce code size of set_timer_common_
391 // IMPORTANT: Must not be inlined - called only for intervals, keeping it out of the hot path saves flash.
392 uint32_t __attribute__((noinline)) calculate_interval_offset_(uint32_t delay);
393
394 // Helper to check if a retry was already cancelled - extracted to reduce code size of set_timer_common_
395 // Remove before 2026.8.0 along with all retry code.
396 // IMPORTANT: Must not be inlined - retry path is cold and deprecated.
397 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
398 bool __attribute__((noinline))
399 is_retry_cancelled_locked_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id);
400
401#ifdef ESPHOME_DEBUG_SCHEDULER
402 // Helper for debug logging in set_timer_common_ - extracted to reduce code size
403 void debug_log_timer_(const SchedulerItem *item, NameType name_type, const char *static_name, uint32_t hash_or_id,
404 SchedulerItem::Type type, uint32_t delay, uint64_t now);
405#endif /* ESPHOME_DEBUG_SCHEDULER */
406
407#ifndef ESPHOME_THREAD_SINGLE
408 // Process defer queue for FIFO execution of deferred items.
409 // IMPORTANT: This method should only be called from the main thread (loop task).
410 // Inlined: the fast path (nothing deferred) is just an atomic load check.
411 inline void HOT process_defer_queue_(uint32_t &now) {
412 // Fast path: nothing to process, avoid lock entirely.
413 // Worst case is a one-loop-iteration delay before newly deferred items are processed.
414 if (this->defer_empty_())
415 return;
416 this->process_defer_queue_slow_path_(now);
417 }
418
419 // Slow path for process_defer_queue_() - defined in scheduler.cpp
420 void process_defer_queue_slow_path_(uint32_t &now);
421
422 // Helper to cleanup defer_queue_ after processing.
423 // Keeps the common clear() path inline, outlines the rare compaction to keep
424 // cold code out of the hot instruction cache lines.
425 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
426 inline void cleanup_defer_queue_locked_() {
427 // Check if new items were added by producers during processing
428 if (this->defer_queue_front_ >= this->defer_queue_.size()) {
429 // Common case: no new items - clear everything
430 this->defer_queue_.clear();
431 } else {
432 // Rare case: new items were added during processing - outlined to keep cold code
433 // out of the hot instruction cache lines
434 this->compact_defer_queue_locked_();
435 }
436 this->defer_queue_front_ = 0;
437 }
438
439 // Cold path for compacting defer_queue_ when new items were added during processing.
440 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
441 // IMPORTANT: Must not be inlined - rare path, outlined to keep it out of the hot instruction cache lines.
442 void __attribute__((noinline)) compact_defer_queue_locked_();
443#endif /* not ESPHOME_THREAD_SINGLE */
444
445 // Helper to check if item is marked for removal (platform-specific)
446 // Returns true if item should be skipped, handles platform-specific synchronization
447 // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
448 // function.
449 bool is_item_removed_(SchedulerItem *item) const {
450#ifdef ESPHOME_THREAD_MULTI_ATOMICS
451 // Multi-threaded with atomics: use atomic load for lock-free access
452 return item->remove.load(std::memory_order_acquire);
453#else
454 // Single-threaded (ESPHOME_THREAD_SINGLE) or
455 // multi-threaded without atomics (ESPHOME_THREAD_MULTI_NO_ATOMICS): direct read
456 // For ESPHOME_THREAD_MULTI_NO_ATOMICS, caller MUST hold lock!
457 return item->remove;
458#endif
459 }
460
461 // Helper to check if item is marked for removal when lock is already held.
462 // Uses relaxed ordering since the mutex provides all necessary synchronization.
463 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
464 bool is_item_removed_locked_(SchedulerItem *item) const {
465#ifdef ESPHOME_THREAD_MULTI_ATOMICS
466 // Lock already held - relaxed is sufficient, mutex provides ordering
467 return item->remove.load(std::memory_order_relaxed);
468#else
469 return item->remove;
470#endif
471 }
472
473 // Helper to set item removal flag (platform-specific)
474 // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
475 // function. Uses memory_order_release when setting to true (for cancellation synchronization),
476 // and memory_order_relaxed when setting to false (for initialization).
477 void set_item_removed_(SchedulerItem *item, bool removed) {
478#ifdef ESPHOME_THREAD_MULTI_ATOMICS
479 // Multi-threaded with atomics: use atomic store with appropriate ordering
480 // Release ordering when setting to true ensures cancellation is visible to other threads
481 // Relaxed ordering when setting to false is sufficient for initialization
482 item->remove.store(removed ? 1 : 0, removed ? std::memory_order_release : std::memory_order_relaxed);
483#else
484 // Single-threaded (ESPHOME_THREAD_SINGLE) or
485 // multi-threaded without atomics (ESPHOME_THREAD_MULTI_NO_ATOMICS): direct write
486 // For ESPHOME_THREAD_MULTI_NO_ATOMICS, caller MUST hold lock!
487 item->remove = removed;
488#endif
489 }
490
491 // Helper to mark matching items in a container as removed.
492 // When find_first=true, stops after the first match (used by set_timer_common_ where
493 // the cancel-before-add invariant guarantees at most one match).
494 // When find_first=false, marks ALL matches (needed for public cancel path where
495 // DelayAction parallel mode with skip_cancel=true can create multiple items with the same key).
496 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
497 // Returns the number of items marked for removal.
498 // IMPORTANT: Must be called with scheduler lock held
499 // Inlined: the fast path (empty container) avoids calling the out-of-line scan.
500 inline size_t HOT mark_matching_items_removed_locked_(std::vector<SchedulerItem *> &container, Component *component,
501 NameType name_type, const char *static_name,
502 uint32_t hash_or_id, SchedulerItem::Type type, bool match_retry,
503 bool find_first = false) {
504 if (container.empty())
505 return 0;
506 return this->mark_matching_items_removed_slow_locked_(container, component, name_type, static_name, hash_or_id,
507 type, match_retry, find_first);
508 }
509
510 // Out-of-line slow path for mark_matching_items_removed_locked_ when container is non-empty.
511 // IMPORTANT: Must be called with scheduler lock held
512 __attribute__((noinline)) size_t mark_matching_items_removed_slow_locked_(
513 std::vector<SchedulerItem *> &container, Component *component, NameType name_type, const char *static_name,
514 uint32_t hash_or_id, SchedulerItem::Type type, bool match_retry, bool find_first);
515
516 Mutex lock_;
517 std::vector<SchedulerItem *> items_;
518 std::vector<SchedulerItem *> to_add_;
519
520#ifndef ESPHOME_THREAD_SINGLE
521 // Fast-path counter for process_to_add() to skip taking the lock when there is
522 // nothing to add. Uses std::atomic on platforms that support it, plain uint32_t
523 // otherwise. On non-atomic platforms, callers must hold the scheduler lock when
524 // mutating this counter. Not needed on single-threaded platforms where we can
525 // check to_add_.empty() directly.
526#ifdef ESPHOME_THREAD_MULTI_ATOMICS
527 std::atomic<uint32_t> to_add_count_{0};
528#else
529 uint32_t to_add_count_{0};
530#endif
531#endif /* ESPHOME_THREAD_SINGLE */
532
533 // Fast-path helper for process_to_add() to decide if it can try the lock-free path.
534 // - On ESPHOME_THREAD_SINGLE: direct container check is safe (no concurrent writers).
535 // - On ESPHOME_THREAD_MULTI_ATOMICS: performs a lock-free check via to_add_count_.
536 // - On ESPHOME_THREAD_MULTI_NO_ATOMICS: always returns false to force the caller
537 // down the locked path; this is NOT a lock-free emptiness check on that platform.
538 bool to_add_empty_() const {
539#ifdef ESPHOME_THREAD_SINGLE
540 return this->to_add_.empty();
541#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
542 return this->to_add_count_.load(std::memory_order_relaxed) == 0;
543#else
544 return false;
545#endif
546 }
547
548 // Increment to_add_count_ (no-op on single-threaded platforms)
549 void to_add_count_increment_() {
550#ifdef ESPHOME_THREAD_SINGLE
551 // No counter needed — to_add_empty_() checks the vector directly
552#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
553 this->to_add_count_.fetch_add(1, std::memory_order_relaxed);
554#else
555 this->to_add_count_++;
556#endif
557 }
558
559 // Reset to_add_count_ (no-op on single-threaded platforms)
560 void to_add_count_clear_() {
561#ifdef ESPHOME_THREAD_SINGLE
562 // No counter needed — to_add_empty_() checks the vector directly
563#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
564 this->to_add_count_.store(0, std::memory_order_relaxed);
565#else
566 this->to_add_count_ = 0;
567#endif
568 }
569
570#ifndef ESPHOME_THREAD_SINGLE
571 // Single-core platforms don't need the defer queue and save ~32 bytes of RAM
572 // Using std::vector instead of std::deque avoids 512-byte chunked allocations
573 // Index tracking avoids O(n) erase() calls when draining the queue each loop
574 std::vector<SchedulerItem *> defer_queue_; // FIFO queue for defer() calls
575 size_t defer_queue_front_{0}; // Index of first valid item in defer_queue_ (tracks consumed items)
576
577 // Fast-path counter for process_defer_queue_() to skip lock when nothing to process.
578#ifdef ESPHOME_THREAD_MULTI_ATOMICS
579 std::atomic<uint32_t> defer_count_{0};
580#else
581 uint32_t defer_count_{0};
582#endif
583
584 bool defer_empty_() const {
585 // defer_queue_ only exists on multi-threaded platforms, so no ESPHOME_THREAD_SINGLE path
586 // ESPHOME_THREAD_MULTI_NO_ATOMICS: always take the lock
587#ifdef ESPHOME_THREAD_MULTI_ATOMICS
588 return this->defer_count_.load(std::memory_order_relaxed) == 0;
589#else
590 return false;
591#endif
592 }
593
594 void defer_count_increment_() {
595#ifdef ESPHOME_THREAD_MULTI_ATOMICS
596 this->defer_count_.fetch_add(1, std::memory_order_relaxed);
597#else
598 this->defer_count_++;
599#endif
600 }
601
602 void defer_count_clear_() {
603#ifdef ESPHOME_THREAD_MULTI_ATOMICS
604 this->defer_count_.store(0, std::memory_order_relaxed);
605#else
606 this->defer_count_ = 0;
607#endif
608 }
609
610#endif /* ESPHOME_THREAD_SINGLE */
611
612 // Counter for items marked for removal. Incremented cross-thread in cancel_item_locked_().
613 // On ESPHOME_THREAD_MULTI_ATOMICS this is read without a lock in the cleanup_() fast path;
614 // on ESPHOME_THREAD_MULTI_NO_ATOMICS the fast path is disabled so cleanup_() always takes the lock.
615#ifdef ESPHOME_THREAD_MULTI_ATOMICS
616 std::atomic<uint32_t> to_remove_{0};
617#else
618 uint32_t to_remove_{0};
619#endif
620
621 // Lock-free check if there are items to remove (for fast-path in cleanup_)
622 bool to_remove_empty_() const {
623#ifdef ESPHOME_THREAD_MULTI_ATOMICS
624 return this->to_remove_.load(std::memory_order_relaxed) == 0;
625#elif defined(ESPHOME_THREAD_SINGLE)
626 return this->to_remove_ == 0;
627#else
628 return false; // Always take the lock path
629#endif
630 }
631
632 void to_remove_add_(uint32_t count) {
633#ifdef ESPHOME_THREAD_MULTI_ATOMICS
634 this->to_remove_.fetch_add(count, std::memory_order_relaxed);
635#else
636 this->to_remove_ += count;
637#endif
638 }
639
640 void to_remove_decrement_() {
641#ifdef ESPHOME_THREAD_MULTI_ATOMICS
642 this->to_remove_.fetch_sub(1, std::memory_order_relaxed);
643#else
644 this->to_remove_--;
645#endif
646 }
647
648 void to_remove_clear_() {
649#ifdef ESPHOME_THREAD_MULTI_ATOMICS
650 this->to_remove_.store(0, std::memory_order_relaxed);
651#else
652 this->to_remove_ = 0;
653#endif
654 }
655
656 uint32_t to_remove_count_() const {
657#ifdef ESPHOME_THREAD_MULTI_ATOMICS
658 return this->to_remove_.load(std::memory_order_relaxed);
659#else
660 return this->to_remove_;
661#endif
662 }
663
664 // Memory pool for recycling SchedulerItem objects to reduce heap churn.
665 // Design decisions:
666 // - std::vector is used instead of a fixed array because many systems only need 1-2 scheduler items
667 // - The vector grows dynamically up to MAX_POOL_SIZE (5) only when needed, saving memory on simple setups
668 // - Pool size of 5 matches typical usage (2-4 timers) while keeping memory overhead low (~250 bytes on ESP32)
669 // - The pool significantly reduces heap fragmentation which is critical because heap allocation/deallocation
670 // can stall the entire system, causing timing issues and dropped events for any components that need
671 // to synchronize between tasks (see https://github.com/esphome/backlog/issues/52)
672 std::vector<SchedulerItem *> scheduler_item_pool_;
673
674#ifdef ESPHOME_DEBUG_SCHEDULER
675 // Leak detection: tracks total live SchedulerItem allocations.
676 // Invariant: debug_live_items_ == items_.size() + to_add_.size() + defer_queue_.size() + scheduler_item_pool_.size()
677 // Verified periodically in call() to catch leaks early.
678 size_t debug_live_items_{0};
679
680 // Verify the scheduler memory invariant: all allocated items are accounted for.
681 // Returns true if no leak detected. Logs an error and asserts on failure.
682 bool debug_verify_no_leak_() const;
683#endif
684};
685
686} // namespace esphome
struct @65::@66 __attribute__
const Component * component
Definition component.cpp:34
uint16_t type
ESPDEPRECATED("Use modbus::helpers::value_type_is_float() instead. Removed in 2026.10.0", "2026.4.0") inline bool value_type_is_float(SensorValueType v)
Providing packet encoding functions for exchanging data with a remote host.
Definition a01nyub.cpp:7
void retry_handler(const std::shared_ptr< RetryArgs > &args)
const char int const __FlashStringHelper va_list args
Definition log.h:74
uint64_t HOT millis_64()
Definition core.cpp:27
static void uint32_t