ESPHome 2026.5.0b1
Loading...
Searching...
No Matches
time_64.cpp
Go to the documentation of this file.
2
3#ifndef USE_NATIVE_64BIT_TIME
4
5#include "time_64.h"
6
8#ifdef ESPHOME_DEBUG_SCHEDULER
9#include "esphome/core/log.h"
10#include <cinttypes>
11#endif
12#ifdef ESPHOME_THREAD_MULTI_ATOMICS
13#include <atomic>
14#endif
15#include <limits>
16
17namespace esphome {
18
19#ifdef ESPHOME_DEBUG_SCHEDULER
20static const char *const TAG = "time_64";
21#endif
22
23#ifdef ESPHOME_THREAD_SINGLE
24// Storage for Millis64Impl inline compute() — defined here so all TUs share one copy.
25uint32_t Millis64Impl::last_millis{0};
26uint16_t Millis64Impl::millis_major{0};
27#else
28
29uint64_t Millis64Impl::compute(uint32_t now) {
30 // Half the 32-bit range - used to detect rollovers vs normal time progression
31 static constexpr uint32_t HALF_MAX_UINT32 = std::numeric_limits<uint32_t>::max() / 2;
32
33 // State variables for rollover tracking - static to persist across calls
34#ifdef ESPHOME_THREAD_MULTI_ATOMICS
35 // Mutex for rollover serialization (taken only every ~49.7 days).
36 // A spinlock would be smaller (~1 byte vs ~80-100 bytes) but is unsafe on
37 // preemptive single-core RTOS platforms due to priority inversion: a high-priority
38 // task spinning would prevent the lock holder from running to release it.
39 static Mutex lock;
40 /*
41 * Multi-threaded platforms with atomic support: last_millis needs atomic for lock-free updates.
42 * Writers publish last_millis with memory_order_release and readers use memory_order_acquire.
43 * This ensures that once a reader sees the new low word, it also observes the corresponding
44 * increment of millis_major.
45 */
46 static std::atomic<uint32_t> last_millis{0};
47 /*
48 * Upper 16 bits of the 64-bit millis counter. Incremented only while holding lock;
49 * read concurrently. Atomic (relaxed) avoids a formal data race. Ordering relative
50 * to last_millis is provided by its release store and the corresponding acquire loads.
51 */
52 static std::atomic<uint16_t> millis_major{0};
53#else /* ESPHOME_THREAD_MULTI_NO_ATOMICS */
54 static Mutex lock;
55 static uint32_t last_millis{0};
56 static uint16_t millis_major{0};
57#endif
58
59 // THREAD SAFETY NOTE:
60 // This function has two out-of-line implementations, based on the preprocessor flags:
61 // - ESPHOME_THREAD_MULTI_NO_ATOMICS - Runs on multi-threaded platforms without atomics (LibreTiny BK72xx)
62 // - ESPHOME_THREAD_MULTI_ATOMICS - Runs on multi-threaded platforms with atomics (LibreTiny RTL87xx/LN882x, etc.)
63 //
64 // The ESPHOME_THREAD_SINGLE path is inlined in time_64.h.
65 // Make sure all changes are synchronized if you edit this function.
66 //
67 // IMPORTANT: Always pass fresh millis() values to this function. The implementation
68 // handles out-of-order timestamps between threads, but minimizing time differences
69 // helps maintain accuracy.
70
71#if defined(ESPHOME_THREAD_MULTI_NO_ATOMICS)
72 // Without atomics, this implementation uses locks more aggressively:
73 // 1. Always locks when near the rollover boundary (within 10 seconds)
74 // 2. Always locks when detecting a large backwards jump
75 // 3. Updates without lock in normal forward progression (accepting minor races)
76 // This is less efficient but necessary without atomic operations.
77 uint16_t major = __atomic_load_n(&millis_major, __ATOMIC_RELAXED);
78 uint32_t last = __atomic_load_n(&last_millis, __ATOMIC_RELAXED);
79
80 // Define a safe window around the rollover point (10 seconds)
81 // This covers any reasonable scheduler delays or thread preemption
82 static constexpr uint32_t ROLLOVER_WINDOW = 10000; // 10 seconds in milliseconds
83
84 // Check if we're near the rollover boundary (close to std::numeric_limits<uint32_t>::max() or just past 0)
85 bool near_rollover = (last > (std::numeric_limits<uint32_t>::max() - ROLLOVER_WINDOW)) || (now < ROLLOVER_WINDOW);
86
87 if (near_rollover || (now < last && (last - now) > HALF_MAX_UINT32)) {
88 // Near rollover or detected a rollover - need lock for safety
89 LockGuard guard{lock};
90 // Re-read both values with lock held. last_millis can be updated
91 // unlocked from the forward-progression branch below, so use an atomic
92 // load. millis_major can only be updated under this lock, but another
93 // thread may have completed a rollover between our unlocked loads above
94 // and the lock acquisition — reload or we'd return a stale high word.
95 last = __atomic_load_n(&last_millis, __ATOMIC_RELAXED);
96 major = __atomic_load_n(&millis_major, __ATOMIC_RELAXED);
97
98 if (now < last && (last - now) > HALF_MAX_UINT32) {
99 // True rollover detected (happens every ~49.7 days).
100 // Use the already-loaded `major` local; avoids a second read of the
101 // global (equivalent under the held lock).
102 major++;
103 __atomic_store_n(&millis_major, major, __ATOMIC_RELAXED);
104#ifdef ESPHOME_DEBUG_SCHEDULER
105 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
106#endif /* ESPHOME_DEBUG_SCHEDULER */
107 }
108 // Update last_millis while holding lock
109 __atomic_store_n(&last_millis, now, __ATOMIC_RELAXED);
110 } else if (now > last) {
111 // Normal case: Not near rollover and time moved forward
112 // Update without lock. While this may cause minor races (microseconds of
113 // backwards time movement), they're acceptable because:
114 // 1. The scheduler operates at millisecond resolution, not microsecond
115 // 2. We've already prevented the critical rollover race condition
116 // 3. Any backwards movement is orders of magnitude smaller than scheduler delays
117 __atomic_store_n(&last_millis, now, __ATOMIC_RELAXED);
118 }
119 // If now <= last and we're not near rollover, don't update
120 // This minimizes backwards time movement
121
122 // Combine major (high 32 bits) and now (low 32 bits) into 64-bit time
123 return now + (static_cast<uint64_t>(major) << 32);
124
125#elif defined(ESPHOME_THREAD_MULTI_ATOMICS)
126 // Uses atomic operations with acquire/release semantics to ensure coherent
127 // reads of millis_major and last_millis across cores. Features:
128 // 1. Epoch-coherency retry loop to handle concurrent updates
129 // 2. Lock only taken for actual rollover detection and update
130 // 3. Lock-free CAS updates for normal forward time progression
131 // 4. Memory ordering ensures cores see consistent time values
132
133 for (;;) {
134 uint16_t major = millis_major.load(std::memory_order_acquire);
135
136 /*
137 * Acquire so that if we later decide **not** to take the lock we still
138 * observe a millis_major value coherent with the loaded last_millis.
139 * The acquire load ensures any later read of millis_major sees its
140 * corresponding increment.
141 */
142 uint32_t last = last_millis.load(std::memory_order_acquire);
143
144 // If we might be near a rollover (large backwards jump), take the lock
145 // This ensures rollover detection and last_millis update are atomic together
146 if (now < last && (last - now) > HALF_MAX_UINT32) {
147 // Potential rollover - need lock for atomic rollover detection + update
148 LockGuard guard{lock};
149 // Re-read with lock held; mutex already provides ordering
150 last = last_millis.load(std::memory_order_relaxed);
151
152 if (now < last && (last - now) > HALF_MAX_UINT32) {
153 // True rollover detected (happens every ~49.7 days)
154 millis_major.fetch_add(1, std::memory_order_relaxed);
155 major++;
156#ifdef ESPHOME_DEBUG_SCHEDULER
157 ESP_LOGD(TAG, "Detected true 32-bit rollover at %" PRIu32 "ms (was %" PRIu32 ")", now, last);
158#endif /* ESPHOME_DEBUG_SCHEDULER */
159 }
160 /*
161 * Update last_millis while holding the lock to prevent races.
162 * Publish the new low-word *after* bumping millis_major (done above)
163 * so readers never see a mismatched pair.
164 */
165 last_millis.store(now, std::memory_order_release);
166 } else {
167 // Normal case: Try lock-free update, but only allow forward movement within same epoch
168 // This prevents accidentally moving backwards across a rollover boundary
169 while (now > last && (now - last) < HALF_MAX_UINT32) {
170 if (last_millis.compare_exchange_weak(last, now,
171 std::memory_order_release, // success
172 std::memory_order_relaxed)) { // failure
173 break;
174 }
175 // CAS failure means no data was published; relaxed is fine
176 // last is automatically updated by compare_exchange_weak if it fails
177 }
178 }
179 uint16_t major_end = millis_major.load(std::memory_order_relaxed);
180 if (major_end == major)
181 return now + (static_cast<uint64_t>(major) << 32);
182 }
183 // Unreachable - the loop always returns when major_end == major
184 __builtin_unreachable();
185
186#else
187#error \
188 "No platform threading model defined. One of ESPHOME_THREAD_SINGLE, ESPHOME_THREAD_MULTI_NO_ATOMICS, or ESPHOME_THREAD_MULTI_ATOMICS must be defined."
189#endif
190}
191
192#endif // !ESPHOME_THREAD_SINGLE
193
194} // namespace esphome
195
196#endif // !USE_NATIVE_64BIT_TIME
const char *const TAG
Definition spi.cpp:7
static void uint32_t