michael@0: // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. michael@0: // Use of this source code is governed by a BSD-style license that can be michael@0: // found in the LICENSE file. michael@0: michael@0: michael@0: // Windows Timer Primer michael@0: // michael@0: // A good article: http://www.ddj.com/windows/184416651 michael@0: // A good mozilla bug: http://bugzilla.mozilla.org/show_bug.cgi?id=363258 michael@0: // michael@0: // The default windows timer, GetSystemTimeAsFileTime is not very precise. michael@0: // It is only good to ~15.5ms. michael@0: // michael@0: // QueryPerformanceCounter is the logical choice for a high-precision timer. michael@0: // However, it is known to be buggy on some hardware. Specifically, it can michael@0: // sometimes "jump". On laptops, QPC can also be very expensive to call. michael@0: // It's 3-4x slower than timeGetTime() on desktops, but can be 10x slower michael@0: // on laptops. A unittest exists which will show the relative cost of various michael@0: // timers on any system. michael@0: // michael@0: // The next logical choice is timeGetTime(). timeGetTime has a precision of michael@0: // 1ms, but only if you call APIs (timeBeginPeriod()) which affect all other michael@0: // applications on the system. By default, precision is only 15.5ms. michael@0: // Unfortunately, we don't want to call timeBeginPeriod because we don't michael@0: // want to affect other applications. Further, on mobile platforms, use of michael@0: // faster multimedia timers can hurt battery life. See the intel michael@0: // article about this here: michael@0: // http://softwarecommunity.intel.com/articles/eng/1086.htm michael@0: // michael@0: // To work around all this, we're going to generally use timeGetTime(). We michael@0: // will only increase the system-wide timer if we're not running on battery michael@0: // power. Using timeBeginPeriod(1) is a requirement in order to make our michael@0: // message loop waits have the same resolution that our time measurements michael@0: // do. Otherwise, WaitForSingleObject(..., 1) will no less than 15ms when michael@0: // there is nothing else to waken the Wait. michael@0: michael@0: #include "base/time.h" michael@0: michael@0: #pragma comment(lib, "winmm.lib") michael@0: #include michael@0: #include michael@0: michael@0: #include "base/basictypes.h" michael@0: #include "base/lock.h" michael@0: #include "base/logging.h" michael@0: #include "base/cpu.h" michael@0: #include "base/singleton.h" michael@0: #include "base/system_monitor.h" michael@0: #include "mozilla/Casting.h" michael@0: michael@0: using base::Time; michael@0: using base::TimeDelta; michael@0: using base::TimeTicks; michael@0: using mozilla::BitwiseCast; michael@0: michael@0: namespace { michael@0: michael@0: // From MSDN, FILETIME "Contains a 64-bit value representing the number of michael@0: // 100-nanosecond intervals since January 1, 1601 (UTC)." michael@0: int64_t FileTimeToMicroseconds(const FILETIME& ft) { michael@0: // Need to BitwiseCast to fix alignment, then divide by 10 to convert michael@0: // 100-nanoseconds to milliseconds. This only works on little-endian michael@0: // machines. michael@0: return BitwiseCast(ft) / 10; michael@0: } michael@0: michael@0: void MicrosecondsToFileTime(int64_t us, FILETIME* ft) { michael@0: DCHECK(us >= 0) << "Time is less than 0, negative values are not " michael@0: "representable in FILETIME"; michael@0: michael@0: // Multiply by 10 to convert milliseconds to 100-nanoseconds. BitwiseCast will michael@0: // handle alignment problems. This only works on little-endian machines. michael@0: *ft = BitwiseCast(us * 10); michael@0: } michael@0: michael@0: int64_t CurrentWallclockMicroseconds() { michael@0: FILETIME ft; michael@0: ::GetSystemTimeAsFileTime(&ft); michael@0: return FileTimeToMicroseconds(ft); michael@0: } michael@0: michael@0: // Time between resampling the un-granular clock for this API. 60 seconds. michael@0: const int kMaxMillisecondsToAvoidDrift = 60 * Time::kMillisecondsPerSecond; michael@0: michael@0: int64_t initial_time = 0; michael@0: TimeTicks initial_ticks; michael@0: michael@0: void InitializeClock() { michael@0: initial_ticks = TimeTicks::Now(); michael@0: initial_time = CurrentWallclockMicroseconds(); michael@0: } michael@0: michael@0: } // namespace michael@0: michael@0: // Time ----------------------------------------------------------------------- michael@0: michael@0: // The internal representation of Time uses FILETIME, whose epoch is 1601-01-01 michael@0: // 00:00:00 UTC. ((1970-1601)*365+89)*24*60*60*1000*1000, where 89 is the michael@0: // number of leap year days between 1601 and 1970: (1970-1601)/4 excluding michael@0: // 1700, 1800, and 1900. michael@0: // static michael@0: const int64_t Time::kTimeTToMicrosecondsOffset = GG_INT64_C(11644473600000000); michael@0: michael@0: // static michael@0: Time Time::Now() { michael@0: if (initial_time == 0) michael@0: InitializeClock(); michael@0: michael@0: // We implement time using the high-resolution timers so that we can get michael@0: // timeouts which are smaller than 10-15ms. If we just used michael@0: // CurrentWallclockMicroseconds(), we'd have the less-granular timer. michael@0: // michael@0: // To make this work, we initialize the clock (initial_time) and the michael@0: // counter (initial_ctr). To compute the initial time, we can check michael@0: // the number of ticks that have elapsed, and compute the delta. michael@0: // michael@0: // To avoid any drift, we periodically resync the counters to the system michael@0: // clock. michael@0: while(true) { michael@0: TimeTicks ticks = TimeTicks::Now(); michael@0: michael@0: // Calculate the time elapsed since we started our timer michael@0: TimeDelta elapsed = ticks - initial_ticks; michael@0: michael@0: // Check if enough time has elapsed that we need to resync the clock. michael@0: if (elapsed.InMilliseconds() > kMaxMillisecondsToAvoidDrift) { michael@0: InitializeClock(); michael@0: continue; michael@0: } michael@0: michael@0: return Time(elapsed + initial_time); michael@0: } michael@0: } michael@0: michael@0: // static michael@0: Time Time::NowFromSystemTime() { michael@0: // Force resync. michael@0: InitializeClock(); michael@0: return Time(initial_time); michael@0: } michael@0: michael@0: // static michael@0: Time Time::FromFileTime(FILETIME ft) { michael@0: return Time(FileTimeToMicroseconds(ft)); michael@0: } michael@0: michael@0: FILETIME Time::ToFileTime() const { michael@0: FILETIME utc_ft; michael@0: MicrosecondsToFileTime(us_, &utc_ft); michael@0: return utc_ft; michael@0: } michael@0: michael@0: // static michael@0: Time Time::FromExploded(bool is_local, const Exploded& exploded) { michael@0: // Create the system struct representing our exploded time. It will either be michael@0: // in local time or UTC. michael@0: SYSTEMTIME st; michael@0: st.wYear = exploded.year; michael@0: st.wMonth = exploded.month; michael@0: st.wDayOfWeek = exploded.day_of_week; michael@0: st.wDay = exploded.day_of_month; michael@0: st.wHour = exploded.hour; michael@0: st.wMinute = exploded.minute; michael@0: st.wSecond = exploded.second; michael@0: st.wMilliseconds = exploded.millisecond; michael@0: michael@0: // Convert to FILETIME. michael@0: FILETIME ft; michael@0: if (!SystemTimeToFileTime(&st, &ft)) { michael@0: NOTREACHED() << "Unable to convert time"; michael@0: return Time(0); michael@0: } michael@0: michael@0: // Ensure that it's in UTC. michael@0: if (is_local) { michael@0: FILETIME utc_ft; michael@0: LocalFileTimeToFileTime(&ft, &utc_ft); michael@0: return Time(FileTimeToMicroseconds(utc_ft)); michael@0: } michael@0: return Time(FileTimeToMicroseconds(ft)); michael@0: } michael@0: michael@0: void Time::Explode(bool is_local, Exploded* exploded) const { michael@0: // FILETIME in UTC. michael@0: FILETIME utc_ft; michael@0: MicrosecondsToFileTime(us_, &utc_ft); michael@0: michael@0: // FILETIME in local time if necessary. michael@0: BOOL success = TRUE; michael@0: FILETIME ft; michael@0: if (is_local) michael@0: success = FileTimeToLocalFileTime(&utc_ft, &ft); michael@0: else michael@0: ft = utc_ft; michael@0: michael@0: // FILETIME in SYSTEMTIME (exploded). michael@0: SYSTEMTIME st; michael@0: if (!success || !FileTimeToSystemTime(&ft, &st)) { michael@0: NOTREACHED() << "Unable to convert time, don't know why"; michael@0: ZeroMemory(exploded, sizeof(*exploded)); michael@0: return; michael@0: } michael@0: michael@0: exploded->year = st.wYear; michael@0: exploded->month = st.wMonth; michael@0: exploded->day_of_week = st.wDayOfWeek; michael@0: exploded->day_of_month = st.wDay; michael@0: exploded->hour = st.wHour; michael@0: exploded->minute = st.wMinute; michael@0: exploded->second = st.wSecond; michael@0: exploded->millisecond = st.wMilliseconds; michael@0: } michael@0: michael@0: // TimeTicks ------------------------------------------------------------------ michael@0: namespace { michael@0: michael@0: // We define a wrapper to adapt between the __stdcall and __cdecl call of the michael@0: // mock function, and to avoid a static constructor. Assigning an import to a michael@0: // function pointer directly would require setup code to fetch from the IAT. michael@0: DWORD timeGetTimeWrapper() { michael@0: return timeGetTime(); michael@0: } michael@0: michael@0: michael@0: DWORD (*tick_function)(void) = &timeGetTimeWrapper; michael@0: michael@0: // We use timeGetTime() to implement TimeTicks::Now(). This can be problematic michael@0: // because it returns the number of milliseconds since Windows has started, michael@0: // which will roll over the 32-bit value every ~49 days. We try to track michael@0: // rollover ourselves, which works if TimeTicks::Now() is called at least every michael@0: // 49 days. michael@0: class NowSingleton { michael@0: public: michael@0: NowSingleton() michael@0: : rollover_(TimeDelta::FromMilliseconds(0)), michael@0: last_seen_(0) { michael@0: } michael@0: michael@0: TimeDelta Now() { michael@0: AutoLock locked(lock_); michael@0: // We should hold the lock while calling tick_function to make sure that michael@0: // we keep our last_seen_ stay correctly in sync. michael@0: DWORD now = tick_function(); michael@0: if (now < last_seen_) michael@0: rollover_ += TimeDelta::FromMilliseconds(GG_LONGLONG(0x100000000)); // ~49.7 days. michael@0: last_seen_ = now; michael@0: return TimeDelta::FromMilliseconds(now) + rollover_; michael@0: } michael@0: michael@0: private: michael@0: Lock lock_; // To protected last_seen_ and rollover_. michael@0: TimeDelta rollover_; // Accumulation of time lost due to rollover. michael@0: DWORD last_seen_; // The last timeGetTime value we saw, to detect rollover. michael@0: michael@0: DISALLOW_COPY_AND_ASSIGN(NowSingleton); michael@0: }; michael@0: michael@0: // Overview of time counters: michael@0: // (1) CPU cycle counter. (Retrieved via RDTSC) michael@0: // The CPU counter provides the highest resolution time stamp and is the least michael@0: // expensive to retrieve. However, the CPU counter is unreliable and should not michael@0: // be used in production. Its biggest issue is that it is per processor and it michael@0: // is not synchronized between processors. Also, on some computers, the counters michael@0: // will change frequency due to thermal and power changes, and stop in some michael@0: // states. michael@0: // michael@0: // (2) QueryPerformanceCounter (QPC). The QPC counter provides a high- michael@0: // resolution (100 nanoseconds) time stamp but is comparatively more expensive michael@0: // to retrieve. What QueryPerformanceCounter actually does is up to the HAL. michael@0: // (with some help from ACPI). michael@0: // According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx michael@0: // in the worst case, it gets the counter from the rollover interrupt on the michael@0: // programmable interrupt timer. In best cases, the HAL may conclude that the michael@0: // RDTSC counter runs at a constant frequency, then it uses that instead. On michael@0: // multiprocessor machines, it will try to verify the values returned from michael@0: // RDTSC on each processor are consistent with each other, and apply a handful michael@0: // of workarounds for known buggy hardware. In other words, QPC is supposed to michael@0: // give consistent result on a multiprocessor computer, but it is unreliable in michael@0: // reality due to bugs in BIOS or HAL on some, especially old computers. michael@0: // With recent updates on HAL and newer BIOS, QPC is getting more reliable but michael@0: // it should be used with caution. michael@0: // michael@0: // (3) System time. The system time provides a low-resolution (typically 10ms michael@0: // to 55 milliseconds) time stamp but is comparatively less expensive to michael@0: // retrieve and more reliable. michael@0: class HighResNowSingleton { michael@0: public: michael@0: HighResNowSingleton() michael@0: : ticks_per_microsecond_(0.0), michael@0: skew_(0) { michael@0: InitializeClock(); michael@0: michael@0: // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is michael@0: // unreliable. Fallback to low-res clock. michael@0: base::CPU cpu; michael@0: if (cpu.vendor_name() == "AuthenticAMD" && cpu.family() == 15) michael@0: DisableHighResClock(); michael@0: } michael@0: michael@0: bool IsUsingHighResClock() { michael@0: return ticks_per_microsecond_ != 0.0; michael@0: } michael@0: michael@0: void DisableHighResClock() { michael@0: ticks_per_microsecond_ = 0.0; michael@0: } michael@0: michael@0: TimeDelta Now() { michael@0: // Our maximum tolerance for QPC drifting. michael@0: const int kMaxTimeDrift = 50 * Time::kMicrosecondsPerMillisecond; michael@0: michael@0: if (IsUsingHighResClock()) { michael@0: int64_t now = UnreliableNow(); michael@0: michael@0: // Verify that QPC does not seem to drift. michael@0: DCHECK(now - ReliableNow() - skew_ < kMaxTimeDrift); michael@0: michael@0: return TimeDelta::FromMicroseconds(now); michael@0: } michael@0: michael@0: // Just fallback to the slower clock. michael@0: return Singleton::get()->Now(); michael@0: } michael@0: michael@0: private: michael@0: // Synchronize the QPC clock with GetSystemTimeAsFileTime. michael@0: void InitializeClock() { michael@0: LARGE_INTEGER ticks_per_sec = {0}; michael@0: if (!QueryPerformanceFrequency(&ticks_per_sec)) michael@0: return; // Broken, we don't guarantee this function works. michael@0: ticks_per_microsecond_ = static_cast(ticks_per_sec.QuadPart) / michael@0: static_cast(Time::kMicrosecondsPerSecond); michael@0: michael@0: skew_ = UnreliableNow() - ReliableNow(); michael@0: } michael@0: michael@0: // Get the number of microseconds since boot in a reliable fashion michael@0: int64_t UnreliableNow() { michael@0: LARGE_INTEGER now; michael@0: QueryPerformanceCounter(&now); michael@0: return static_cast(now.QuadPart / ticks_per_microsecond_); michael@0: } michael@0: michael@0: // Get the number of microseconds since boot in a reliable fashion michael@0: int64_t ReliableNow() { michael@0: return Singleton::get()->Now().InMicroseconds(); michael@0: } michael@0: michael@0: // Cached clock frequency -> microseconds. This assumes that the clock michael@0: // frequency is faster than one microsecond (which is 1MHz, should be OK). michael@0: float ticks_per_microsecond_; // 0 indicates QPF failed and we're broken. michael@0: int64_t skew_; // Skew between lo-res and hi-res clocks (for debugging). michael@0: michael@0: DISALLOW_COPY_AND_ASSIGN(HighResNowSingleton); michael@0: }; michael@0: michael@0: } // namespace michael@0: michael@0: // static michael@0: TimeTicks::TickFunctionType TimeTicks::SetMockTickFunction( michael@0: TickFunctionType ticker) { michael@0: TickFunctionType old = tick_function; michael@0: tick_function = ticker; michael@0: return old; michael@0: } michael@0: michael@0: // static michael@0: TimeTicks TimeTicks::Now() { michael@0: return TimeTicks() + Singleton::get()->Now(); michael@0: } michael@0: michael@0: // static michael@0: TimeTicks TimeTicks::HighResNow() { michael@0: return TimeTicks() + Singleton::get()->Now(); michael@0: }