ESPHome 2026.2.3
Loading...
Searching...
No Matches
scheduler.h
Go to the documentation of this file.
1#pragma once
2
4#include <cstring>
5#include <memory>
6#include <string>
7#include <vector>
8#ifdef ESPHOME_THREAD_MULTI_ATOMICS
9#include <atomic>
10#endif
11
14
15namespace esphome {
16
17class Component;
18struct RetryArgs;
19
20// Forward declaration of retry_handler - needs to be non-static for friend declaration
21void retry_handler(const std::shared_ptr<RetryArgs> &args);
22
23class Scheduler {
24 // Allow retry_handler to access protected members for internal retry mechanism
25 friend void ::esphome::retry_handler(const std::shared_ptr<RetryArgs> &args);
26 // Allow DelayAction to call set_timer_common_ with skip_cancel=true for parallel script delays.
27 // This is needed to fix issue #10264 where parallel scripts with delays interfere with each other.
28 // We use friend instead of a public API because skip_cancel is dangerous - it can cause delays
29 // to accumulate and overload the scheduler if misused.
30 template<typename... Ts> friend class DelayAction;
31
32 public:
33 // std::string overload - deprecated, use const char* or uint32_t instead
34 // Remove before 2026.7.0
35 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
36 void set_timeout(Component *component, const std::string &name, uint32_t timeout, std::function<void()> func);
37
46 void set_timeout(Component *component, const char *name, uint32_t timeout, std::function<void()> func);
48 void set_timeout(Component *component, uint32_t id, uint32_t timeout, std::function<void()> func);
50 void set_timeout(Component *component, InternalSchedulerID id, uint32_t timeout, std::function<void()> func) {
51 this->set_timer_common_(component, SchedulerItem::TIMEOUT, NameType::NUMERIC_ID_INTERNAL, nullptr,
52 static_cast<uint32_t>(id), timeout, std::move(func));
53 }
54
55 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
56 bool cancel_timeout(Component *component, const std::string &name);
57 bool cancel_timeout(Component *component, const char *name);
58 bool cancel_timeout(Component *component, uint32_t id);
59 bool cancel_timeout(Component *component, InternalSchedulerID id) {
60 return this->cancel_item_(component, NameType::NUMERIC_ID_INTERNAL, nullptr, static_cast<uint32_t>(id),
61 SchedulerItem::TIMEOUT);
62 }
63
64 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
65 void set_interval(Component *component, const std::string &name, uint32_t interval, std::function<void()> func);
66
75 void set_interval(Component *component, const char *name, uint32_t interval, std::function<void()> func);
77 void set_interval(Component *component, uint32_t id, uint32_t interval, std::function<void()> func);
79 void set_interval(Component *component, InternalSchedulerID id, uint32_t interval, std::function<void()> func) {
80 this->set_timer_common_(component, SchedulerItem::INTERVAL, NameType::NUMERIC_ID_INTERNAL, nullptr,
81 static_cast<uint32_t>(id), interval, std::move(func));
82 }
83
84 ESPDEPRECATED("Use const char* or uint32_t overload instead. Removed in 2026.7.0", "2026.1.0")
85 bool cancel_interval(Component *component, const std::string &name);
86 bool cancel_interval(Component *component, const char *name);
87 bool cancel_interval(Component *component, uint32_t id);
88 bool cancel_interval(Component *component, InternalSchedulerID id) {
89 return this->cancel_item_(component, NameType::NUMERIC_ID_INTERNAL, nullptr, static_cast<uint32_t>(id),
90 SchedulerItem::INTERVAL);
91 }
92
93 // Remove before 2026.8.0
94 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
95 "2026.2.0")
96 void set_retry(Component *component, const std::string &name, uint32_t initial_wait_time, uint8_t max_attempts,
97 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
98 // Remove before 2026.8.0
99 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
100 "2026.2.0")
101 void set_retry(Component *component, const char *name, uint32_t initial_wait_time, uint8_t max_attempts,
102 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
103 // Remove before 2026.8.0
104 ESPDEPRECATED("set_retry is deprecated and will be removed in 2026.8.0. Use set_timeout or set_interval instead.",
105 "2026.2.0")
106 void set_retry(Component *component, uint32_t id, uint32_t initial_wait_time, uint8_t max_attempts,
107 std::function<RetryResult(uint8_t)> func, float backoff_increase_factor = 1.0f);
108
109 // Remove before 2026.8.0
110 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
111 bool cancel_retry(Component *component, const std::string &name);
112 // Remove before 2026.8.0
113 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
114 bool cancel_retry(Component *component, const char *name);
115 // Remove before 2026.8.0
116 ESPDEPRECATED("cancel_retry is deprecated and will be removed in 2026.8.0.", "2026.2.0")
117 bool cancel_retry(Component *component, uint32_t id);
118
119 // Calculate when the next scheduled item should run
120 // @param now Fresh timestamp from millis() - must not be stale/cached
121 // Returns the time in milliseconds until the next scheduled item, or nullopt if no items
122 // This method performs cleanup of removed items before checking the schedule
123 // IMPORTANT: This method should only be called from the main thread (loop task).
124 optional<uint32_t> next_schedule_in(uint32_t now);
125
126 // Execute all scheduled items that are ready
127 // @param now Fresh timestamp from millis() - must not be stale/cached
128 void call(uint32_t now);
129
130 void process_to_add();
131
132 // Name storage type discriminator for SchedulerItem
133 // Used to distinguish between static strings, hashed strings, numeric IDs, and internal numeric IDs
134 enum class NameType : uint8_t {
135 STATIC_STRING = 0, // const char* pointer to static/flash storage
136 HASHED_STRING = 1, // uint32_t FNV-1a hash of a runtime string
137 NUMERIC_ID = 2, // uint32_t numeric identifier (component-level)
138 NUMERIC_ID_INTERNAL = 3 // uint32_t numeric identifier (core/internal, separate namespace)
139 };
140
141 protected:
142 struct SchedulerItem {
143 // Ordered by size to minimize padding
144 Component *component;
145 // Optimized name storage using tagged union - zero heap allocation
146 union {
147 const char *static_name; // For STATIC_STRING (string literals, no allocation)
148 uint32_t hash_or_id; // For HASHED_STRING or NUMERIC_ID
149 } name_;
150 uint32_t interval;
151 // Split time to handle millis() rollover. The scheduler combines the 32-bit millis()
152 // with a 16-bit rollover counter to create a 48-bit time space (using 32+16 bits).
153 // This is intentionally limited to 48 bits, not stored as a full 64-bit value.
154 // With 49.7 days per 32-bit rollover, the 16-bit counter supports
155 // 49.7 days × 65536 = ~8900 years. This ensures correct scheduling
156 // even when devices run for months. Split into two fields for better memory
157 // alignment on 32-bit systems.
158 uint32_t next_execution_low_; // Lower 32 bits of execution time (millis value)
159 std::function<void()> callback;
160 uint16_t next_execution_high_; // Upper 16 bits (millis_major counter)
161
162#ifdef ESPHOME_THREAD_MULTI_ATOMICS
163 // Multi-threaded with atomics: use atomic for lock-free access
164 // Place atomic<bool> separately since it can't be packed with bit fields
165 std::atomic<bool> remove{false};
166
167 // Bit-packed fields (4 bits used, 4 bits padding in 1 byte)
168 enum Type : uint8_t { TIMEOUT, INTERVAL } type : 1;
169 NameType name_type_ : 2; // Discriminator for name_ union (0–3, see NameType enum)
170 bool is_retry : 1; // True if this is a retry timeout
171 // 4 bits padding
172#else
173 // Single-threaded or multi-threaded without atomics: can pack all fields together
174 // Bit-packed fields (5 bits used, 3 bits padding in 1 byte)
175 enum Type : uint8_t { TIMEOUT, INTERVAL } type : 1;
176 bool remove : 1;
177 NameType name_type_ : 2; // Discriminator for name_ union (0–3, see NameType enum)
178 bool is_retry : 1; // True if this is a retry timeout
179 // 3 bits padding
180#endif
181
182 // Constructor
183 SchedulerItem()
184 : component(nullptr),
185 interval(0),
186 next_execution_low_(0),
187 next_execution_high_(0),
188#ifdef ESPHOME_THREAD_MULTI_ATOMICS
189 // remove is initialized in the member declaration as std::atomic<bool>{false}
190 type(TIMEOUT),
191 name_type_(NameType::STATIC_STRING),
192 is_retry(false) {
193#else
194 type(TIMEOUT),
195 remove(false),
196 name_type_(NameType::STATIC_STRING),
197 is_retry(false) {
198#endif
199 name_.static_name = nullptr;
200 }
201
202 // Destructor - no dynamic memory to clean up
203 ~SchedulerItem() = default;
204
205 // Delete copy operations to prevent accidental copies
206 SchedulerItem(const SchedulerItem &) = delete;
207 SchedulerItem &operator=(const SchedulerItem &) = delete;
208
209 // Delete move operations: SchedulerItem objects are only managed via unique_ptr, never moved directly
210 SchedulerItem(SchedulerItem &&) = delete;
211 SchedulerItem &operator=(SchedulerItem &&) = delete;
212
213 // Helper to get the static name (only valid for STATIC_STRING type)
214 const char *get_name() const { return (name_type_ == NameType::STATIC_STRING) ? name_.static_name : nullptr; }
215
216 // Helper to get the hash or numeric ID (only valid for HASHED_STRING or NUMERIC_ID types)
217 uint32_t get_name_hash_or_id() const { return (name_type_ != NameType::STATIC_STRING) ? name_.hash_or_id : 0; }
218
219 // Helper to get the name type
220 NameType get_name_type() const { return name_type_; }
221
222 // Set name storage: for STATIC_STRING stores the pointer, for all other types stores hash_or_id.
223 // Both union members occupy the same offset, so only one store is needed.
224 void set_name(NameType type, const char *static_name, uint32_t hash_or_id) {
225 if (type == NameType::STATIC_STRING) {
226 name_.static_name = static_name;
227 } else {
228 name_.hash_or_id = hash_or_id;
229 }
230 name_type_ = type;
231 }
232
233 static bool cmp(const std::unique_ptr<SchedulerItem> &a, const std::unique_ptr<SchedulerItem> &b);
234
235 // Note: We use 48 bits total (32 + 16), stored in a 64-bit value for API compatibility.
236 // The upper 16 bits of the 64-bit value are always zero, which is fine since
237 // millis_major_ is also 16 bits and they must match.
238 constexpr uint64_t get_next_execution() const {
239 return (static_cast<uint64_t>(next_execution_high_) << 32) | next_execution_low_;
240 }
241
242 constexpr void set_next_execution(uint64_t value) {
243 next_execution_low_ = static_cast<uint32_t>(value);
244 // Cast to uint16_t intentionally truncates to lower 16 bits of the upper 32 bits.
245 // This is correct because millis_major_ that creates these values is also 16 bits.
246 next_execution_high_ = static_cast<uint16_t>(value >> 32);
247 }
248 constexpr const char *get_type_str() const { return (type == TIMEOUT) ? "timeout" : "interval"; }
249 const LogString *get_source() const { return component ? component->get_component_log_str() : LOG_STR("unknown"); }
250 };
251
252 // Common implementation for both timeout and interval
253 // name_type determines storage type: STATIC_STRING uses static_name, others use hash_or_id
254 void set_timer_common_(Component *component, SchedulerItem::Type type, NameType name_type, const char *static_name,
255 uint32_t hash_or_id, uint32_t delay, std::function<void()> func, bool is_retry = false,
256 bool skip_cancel = false);
257
258 // Common implementation for retry - Remove before 2026.8.0
259 // name_type determines storage type: STATIC_STRING uses static_name, others use hash_or_id
260#pragma GCC diagnostic push
261#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
262 void set_retry_common_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
263 uint32_t initial_wait_time, uint8_t max_attempts, std::function<RetryResult(uint8_t)> func,
264 float backoff_increase_factor);
265#pragma GCC diagnostic pop
266 // Common implementation for cancel_retry
267 bool cancel_retry_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id);
268
269 uint64_t millis_64_(uint32_t now);
270 // Cleanup logically deleted items from the scheduler
271 // Returns the number of items remaining after cleanup
272 // IMPORTANT: This method should only be called from the main thread (loop task).
273 size_t cleanup_();
274 // Remove and return the front item from the heap
275 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
276 std::unique_ptr<SchedulerItem> pop_raw_locked_();
277 // Get or create a scheduler item from the pool
278 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
279 std::unique_ptr<SchedulerItem> get_item_from_pool_locked_();
280
281 private:
282 // Helper to cancel items - must be called with lock held
283 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
284 bool cancel_item_locked_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
285 SchedulerItem::Type type, bool match_retry = false);
286
287 // Common implementation for cancel operations - handles locking
288 bool cancel_item_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id,
289 SchedulerItem::Type type, bool match_retry = false);
290
291 // Helper to check if two static string names match
292 inline bool HOT names_match_static_(const char *name1, const char *name2) const {
293 // Check pointer equality first (common for static strings), then string contents
294 // The core ESPHome codebase uses static strings (const char*) for component names,
295 // making pointer comparison effective. The std::string overloads exist only for
296 // compatibility with external components but are rarely used in practice.
297 return (name1 != nullptr && name2 != nullptr) && ((name1 == name2) || (strcmp(name1, name2) == 0));
298 }
299
300 // Helper function to check if item matches criteria for cancellation
301 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
302 // IMPORTANT: Must be called with scheduler lock held
303 inline bool HOT matches_item_locked_(const std::unique_ptr<SchedulerItem> &item, Component *component,
304 NameType name_type, const char *static_name, uint32_t hash_or_id,
305 SchedulerItem::Type type, bool match_retry, bool skip_removed = true) const {
306 // THREAD SAFETY: Check for nullptr first to prevent LoadProhibited crashes. On multi-threaded
307 // platforms, items can be moved out of defer_queue_ during processing, leaving nullptr entries.
308 // PR #11305 added nullptr checks in callers (mark_matching_items_removed_locked_() and
309 // has_cancelled_timeout_in_container_locked_()), but this check provides defense-in-depth: helper
310 // functions should be safe regardless of caller behavior.
311 // Fixes: https://github.com/esphome/esphome/issues/11940
312 if (!item)
313 return false;
314 if (item->component != component || item->type != type || (skip_removed && item->remove) ||
315 (match_retry && !item->is_retry)) {
316 return false;
317 }
318 // Name type must match
319 if (item->get_name_type() != name_type)
320 return false;
321 // For static strings, compare the string content; for hash/ID, compare the value
322 if (name_type == NameType::STATIC_STRING) {
323 return this->names_match_static_(item->get_name(), static_name);
324 }
325 return item->get_name_hash_or_id() == hash_or_id;
326 }
327
328 // Helper to execute a scheduler item
329 uint32_t execute_item_(SchedulerItem *item, uint32_t now);
330
331 // Helper to check if item should be skipped
332 bool should_skip_item_(SchedulerItem *item) const {
333 return is_item_removed_(item) || (item->component != nullptr && item->component->is_failed());
334 }
335
336 // Helper to recycle a SchedulerItem back to the pool.
337 // IMPORTANT: Only call from main loop context! Recycling clears the callback,
338 // so calling from another thread while the callback is executing causes use-after-free.
339 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
340 void recycle_item_main_loop_(std::unique_ptr<SchedulerItem> item);
341
342 // Helper to perform full cleanup when too many items are cancelled
343 void full_cleanup_removed_items_();
344
345 // Helper to calculate random offset for interval timers - extracted to reduce code size of set_timer_common_
346 // IMPORTANT: Must not be inlined - called only for intervals, keeping it out of the hot path saves flash.
347 uint32_t __attribute__((noinline)) calculate_interval_offset_(uint32_t delay);
348
349 // Helper to check if a retry was already cancelled - extracted to reduce code size of set_timer_common_
350 // Remove before 2026.8.0 along with all retry code.
351 // IMPORTANT: Must not be inlined - retry path is cold and deprecated.
352 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
353 bool __attribute__((noinline))
354 is_retry_cancelled_locked_(Component *component, NameType name_type, const char *static_name, uint32_t hash_or_id);
355
356#ifdef ESPHOME_DEBUG_SCHEDULER
357 // Helper for debug logging in set_timer_common_ - extracted to reduce code size
358 void debug_log_timer_(const SchedulerItem *item, NameType name_type, const char *static_name, uint32_t hash_or_id,
359 SchedulerItem::Type type, uint32_t delay, uint64_t now);
360#endif /* ESPHOME_DEBUG_SCHEDULER */
361
362#ifndef ESPHOME_THREAD_SINGLE
363 // Helper to process defer queue - inline for performance in hot path
364 inline void process_defer_queue_(uint32_t &now) {
365 // Process defer queue first to guarantee FIFO execution order for deferred items.
366 // Previously, defer() used the heap which gave undefined order for equal timestamps,
367 // causing race conditions on multi-core systems (ESP32, BK7200).
368 // With the defer queue:
369 // - Deferred items (delay=0) go directly to defer_queue_ in set_timer_common_
370 // - Items execute in exact order they were deferred (FIFO guarantee)
371 // - No deferred items exist in to_add_, so processing order doesn't affect correctness
372 // Single-core platforms don't use this queue and fall back to the heap-based approach.
373 //
374 // Note: Items cancelled via cancel_item_locked_() are marked with remove=true but still
375 // processed here. They are skipped during execution by should_skip_item_().
376 // This is intentional - no memory leak occurs.
377 //
378 // We use an index (defer_queue_front_) to track the read position instead of calling
379 // erase() on every pop, which would be O(n). The queue is processed once per loop -
380 // any items added during processing are left for the next loop iteration.
381
382 // Snapshot the queue end point - only process items that existed at loop start
383 // Items added during processing (by callbacks or other threads) run next loop
384 // No lock needed: single consumer (main loop), stale read just means we process less this iteration
385 size_t defer_queue_end = this->defer_queue_.size();
386
387 while (this->defer_queue_front_ < defer_queue_end) {
388 std::unique_ptr<SchedulerItem> item;
389 {
390 LockGuard lock(this->lock_);
391 // SAFETY: Moving out the unique_ptr leaves a nullptr in the vector at defer_queue_front_.
392 // This is intentional and safe because:
393 // 1. The vector is only cleaned up by cleanup_defer_queue_locked_() at the end of this function
394 // 2. Any code iterating defer_queue_ MUST check for nullptr items (see mark_matching_items_removed_locked_
395 // and has_cancelled_timeout_in_container_locked_ in scheduler.h)
396 // 3. The lock protects concurrent access, but the nullptr remains until cleanup
397 item = std::move(this->defer_queue_[this->defer_queue_front_]);
398 this->defer_queue_front_++;
399 }
400
401 // Execute callback without holding lock to prevent deadlocks
402 // if the callback tries to call defer() again
403 if (!this->should_skip_item_(item.get())) {
404 now = this->execute_item_(item.get(), now);
405 }
406 // Recycle the defer item after execution
407 {
408 LockGuard lock(this->lock_);
409 this->recycle_item_main_loop_(std::move(item));
410 }
411 }
412
413 // If we've consumed all items up to the snapshot point, clean up the dead space
414 // Single consumer (main loop), so no lock needed for this check
415 if (this->defer_queue_front_ >= defer_queue_end) {
416 LockGuard lock(this->lock_);
417 this->cleanup_defer_queue_locked_();
418 }
419 }
420
421 // Helper to cleanup defer_queue_ after processing
422 // IMPORTANT: Caller must hold the scheduler lock before calling this function.
423 inline void cleanup_defer_queue_locked_() {
424 // Check if new items were added by producers during processing
425 if (this->defer_queue_front_ >= this->defer_queue_.size()) {
426 // Common case: no new items - clear everything
427 this->defer_queue_.clear();
428 } else {
429 // Rare case: new items were added during processing - compact the vector
430 // This only happens when:
431 // 1. A deferred callback calls defer() again, or
432 // 2. Another thread calls defer() while we're processing
433 //
434 // Move unprocessed items (added during this loop) to the front for next iteration
435 //
436 // SAFETY: Compacted items may include cancelled items (marked for removal via
437 // cancel_item_locked_() during execution). This is safe because should_skip_item_()
438 // checks is_item_removed_() before executing, so cancelled items will be skipped
439 // and recycled on the next loop iteration.
440 size_t remaining = this->defer_queue_.size() - this->defer_queue_front_;
441 for (size_t i = 0; i < remaining; i++) {
442 this->defer_queue_[i] = std::move(this->defer_queue_[this->defer_queue_front_ + i]);
443 }
444 // Use erase() instead of resize() to avoid instantiating _M_default_append
445 // (saves ~156 bytes flash). Erasing from the end is O(1) - no shifting needed.
446 this->defer_queue_.erase(this->defer_queue_.begin() + remaining, this->defer_queue_.end());
447 }
448 this->defer_queue_front_ = 0;
449 }
450#endif /* not ESPHOME_THREAD_SINGLE */
451
452 // Helper to check if item is marked for removal (platform-specific)
453 // Returns true if item should be skipped, handles platform-specific synchronization
454 // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
455 // function.
456 bool is_item_removed_(SchedulerItem *item) const {
457#ifdef ESPHOME_THREAD_MULTI_ATOMICS
458 // Multi-threaded with atomics: use atomic load for lock-free access
459 return item->remove.load(std::memory_order_acquire);
460#else
461 // Single-threaded (ESPHOME_THREAD_SINGLE) or
462 // multi-threaded without atomics (ESPHOME_THREAD_MULTI_NO_ATOMICS): direct read
463 // For ESPHOME_THREAD_MULTI_NO_ATOMICS, caller MUST hold lock!
464 return item->remove;
465#endif
466 }
467
468 // Helper to set item removal flag (platform-specific)
469 // For ESPHOME_THREAD_MULTI_NO_ATOMICS platforms, the caller must hold the scheduler lock before calling this
470 // function. Uses memory_order_release when setting to true (for cancellation synchronization),
471 // and memory_order_relaxed when setting to false (for initialization).
472 void set_item_removed_(SchedulerItem *item, bool removed) {
473#ifdef ESPHOME_THREAD_MULTI_ATOMICS
474 // Multi-threaded with atomics: use atomic store with appropriate ordering
475 // Release ordering when setting to true ensures cancellation is visible to other threads
476 // Relaxed ordering when setting to false is sufficient for initialization
477 item->remove.store(removed, removed ? std::memory_order_release : std::memory_order_relaxed);
478#else
479 // Single-threaded (ESPHOME_THREAD_SINGLE) or
480 // multi-threaded without atomics (ESPHOME_THREAD_MULTI_NO_ATOMICS): direct write
481 // For ESPHOME_THREAD_MULTI_NO_ATOMICS, caller MUST hold lock!
482 item->remove = removed;
483#endif
484 }
485
486 // Helper to mark matching items in a container as removed
487 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
488 // Returns the number of items marked for removal
489 // IMPORTANT: Must be called with scheduler lock held
490 template<typename Container>
491 size_t mark_matching_items_removed_locked_(Container &container, Component *component, NameType name_type,
492 const char *static_name, uint32_t hash_or_id, SchedulerItem::Type type,
493 bool match_retry) {
494 size_t count = 0;
495 for (auto &item : container) {
496 // Skip nullptr items (can happen in defer_queue_ when items are being processed)
497 // The defer_queue_ uses index-based processing: items are std::moved out but left in the
498 // vector as nullptr until cleanup. Even though this function is called with lock held,
499 // the vector can still contain nullptr items from the processing loop. This check prevents crashes.
500 if (!item)
501 continue;
502 if (this->matches_item_locked_(item, component, name_type, static_name, hash_or_id, type, match_retry)) {
503 this->set_item_removed_(item.get(), true);
504 count++;
505 }
506 }
507 return count;
508 }
509
510 // Template helper to check if any item in a container matches our criteria
511 // name_type determines matching: STATIC_STRING uses static_name, others use hash_or_id
512 // IMPORTANT: Must be called with scheduler lock held
513 template<typename Container>
514 bool has_cancelled_timeout_in_container_locked_(const Container &container, Component *component, NameType name_type,
515 const char *static_name, uint32_t hash_or_id,
516 bool match_retry) const {
517 for (const auto &item : container) {
518 // Skip nullptr items (can happen in defer_queue_ when items are being processed)
519 // The defer_queue_ uses index-based processing: items are std::moved out but left in the
520 // vector as nullptr until cleanup. If this function is called during defer queue processing,
521 // it will iterate over these nullptr items. This check prevents crashes.
522 if (!item)
523 continue;
524 if (is_item_removed_(item.get()) &&
525 this->matches_item_locked_(item, component, name_type, static_name, hash_or_id, SchedulerItem::TIMEOUT,
526 match_retry, /* skip_removed= */ false)) {
527 return true;
528 }
529 }
530 return false;
531 }
532
533 Mutex lock_;
534 std::vector<std::unique_ptr<SchedulerItem>> items_;
535 std::vector<std::unique_ptr<SchedulerItem>> to_add_;
536#ifndef ESPHOME_THREAD_SINGLE
537 // Single-core platforms don't need the defer queue and save ~32 bytes of RAM
538 // Using std::vector instead of std::deque avoids 512-byte chunked allocations
539 // Index tracking avoids O(n) erase() calls when draining the queue each loop
540 std::vector<std::unique_ptr<SchedulerItem>> defer_queue_; // FIFO queue for defer() calls
541 size_t defer_queue_front_{0}; // Index of first valid item in defer_queue_ (tracks consumed items)
542#endif /* ESPHOME_THREAD_SINGLE */
543 uint32_t to_remove_{0};
544
545 // Memory pool for recycling SchedulerItem objects to reduce heap churn.
546 // Design decisions:
547 // - std::vector is used instead of a fixed array because many systems only need 1-2 scheduler items
548 // - The vector grows dynamically up to MAX_POOL_SIZE (5) only when needed, saving memory on simple setups
549 // - Pool size of 5 matches typical usage (2-4 timers) while keeping memory overhead low (~250 bytes on ESP32)
550 // - The pool significantly reduces heap fragmentation which is critical because heap allocation/deallocation
551 // can stall the entire system, causing timing issues and dropped events for any components that need
552 // to synchronize between tasks (see https://github.com/esphome/backlog/issues/52)
553 std::vector<std::unique_ptr<SchedulerItem>> scheduler_item_pool_;
554
555#ifdef ESPHOME_THREAD_MULTI_ATOMICS
556 /*
557 * Multi-threaded platforms with atomic support: last_millis_ needs atomic for lock-free updates
558 *
559 * MEMORY-ORDERING NOTE
560 * --------------------
561 * `last_millis_` and `millis_major_` form a single 64-bit timestamp split in half.
562 * Writers publish `last_millis_` with memory_order_release and readers use
563 * memory_order_acquire. This ensures that once a reader sees the new low word,
564 * it also observes the corresponding increment of `millis_major_`.
565 */
566 std::atomic<uint32_t> last_millis_{0};
567#else /* not ESPHOME_THREAD_MULTI_ATOMICS */
568 // Platforms without atomic support or single-threaded platforms
569 uint32_t last_millis_{0};
570#endif /* else ESPHOME_THREAD_MULTI_ATOMICS */
571
572 /*
573 * Upper 16 bits of the 64-bit millis counter. Incremented only while holding
574 * `lock_`; read concurrently. Atomic (relaxed) avoids a formal data race.
575 * Ordering relative to `last_millis_` is provided by its release store and the
576 * corresponding acquire loads.
577 */
578#ifdef ESPHOME_THREAD_MULTI_ATOMICS
579 std::atomic<uint16_t> millis_major_{0};
580#else /* not ESPHOME_THREAD_MULTI_ATOMICS */
581 uint16_t millis_major_{0};
582#endif /* else ESPHOME_THREAD_MULTI_ATOMICS */
583};
584
585} // namespace esphome
struct @65::@66 __attribute__
const Component * component
Definition component.cpp:37
uint16_t type
Providing packet encoding functions for exchanging data with a remote host.
Definition a01nyub.cpp:7
void retry_handler(const std::shared_ptr< RetryArgs > &args)
struct ESPDEPRECATED("Use std::index_sequence instead. Removed in 2026.6.0", "2025.12.0") seq
Definition automation.h:25