/* * Default clocksources are *special* and self-define their mult/shift. * But, you're not special, so you should specify a freq value. */ if (freq) { /* * Calc the maximum number of seconds which we can run before * wrapping around. For clocksources which have a mask > 32-bit * we need to limit the max sleep time to have a good * conversion precision. 10 minutes is still a reasonable * amount. That results in a shift value of 24 for a * clocksource with mask >= 40-bit and f >= 4GHz. That maps to * ~ 0.06ppm granularity for NTP. */ /* (1.1.1) 计算timer计数器到溢出, 最大能计数多少秒 = sec */ sec = cs->mask; do_div(sec, freq); do_div(sec, scale); if (!sec) sec = 1; elseif (sec > 600 && cs->mask > UINT_MAX) sec = 600;
/* * Only warn for *special* clocksources that self-define * their mult/shift values and don't specify a freq. */ WARN_ONCE(cs->mult + cs->maxadj < cs->mult, "timekeeping: Clocksource %s might overflow on 11%% adjustment\n", cs->name);
/* Find the best suitable clocksource */ /* (1.3.1) 选择best clocksource */ best = clocksource_find_best(oneshot, skipcur); if (!best) return;
/* Check for the override clocksource. */ list_for_each_entry(cs, &clocksource_list, list) { if (skipcur && cs == curr_clocksource) continue; if (strcmp(cs->name, override_name) != 0) continue; /* * Check to make sure we don't switch to a non-highres * capable clocksource if the tick code is in oneshot * mode (highres or nohz) */ if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) { /* Override clocksource cannot be used. */ pr_warn("Override clocksource %s is not HRT compatible - cannot switch while in HRT/NOHZ mode\n", cs->name); override_name[0] = 0; } else /* Override clocksource can be used. */ best = cs; break; }
/* (1.3.2) 通知timekeeper更新clocksource,tick-sched更新 */ if (curr_clocksource != best && !timekeeping_notify(best)) { pr_info("Switched to clocksource %s\n", best->name); curr_clocksource = best; } } ||||→ int timekeeping_notify(structclocksource *clock) { structtimekeeper *tk = &tk_core.timekeeper;
/** * struct timekeeper - Structure holding internal timekeeping values. * @tkr_mono: The readout base structure for CLOCK_MONOTONIC * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW * @xtime_sec: Current CLOCK_REALTIME time in seconds * @ktime_sec: Current CLOCK_MONOTONIC time in seconds * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset * @offs_real: Offset clock monotonic -> clock realtime * @offs_boot: Offset clock monotonic -> clock boottime * @offs_tai: Offset clock monotonic -> clock tai * @tai_offset: The current UTC to TAI offset in seconds * @clock_was_set_seq: The sequence number of clock was set events * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second * @raw_time: Monotonic raw base time in timespec64 format * @cycle_interval: Number of clock cycles in one NTP interval * @xtime_interval: Number of clock shifted nano seconds in one NTP * interval. * @xtime_remainder: Shifted nano seconds left over when rounding * @cycle_interval * @raw_interval: Raw nano seconds accumulated per NTP interval. * @ntp_error: Difference between accumulated time and NTP time in ntp * shifted nano seconds. * @ntp_error_shift: Shift conversion between clock shifted nano seconds and * ntp shifted nano seconds. * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING) * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING) * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING) * * Note: For timespec(64) based interfaces wall_to_monotonic is what * we need to add to xtime (or xtime corrected for sub jiffie times) * to get to monotonic time. Monotonic is pegged at zero at system * boot time, so wall_to_monotonic will be negative, however, we will * ALWAYS keep the tv_nsec part positive so we can use the usual * normalization. * * wall_to_monotonic is moved after resume from suspend for the * monotonic time not to jump. We need to add total_sleep_time to * wall_to_monotonic to get the real boot based time offset. * * wall_to_monotonic is no longer the boot time, getboottime must be * used instead. */ struct timekeeper { struct tk_read_base tkr_mono; // tkr_mono.xtime_nsec:xtime/monotonic time 的ns // tkr_mono.base:monotonic time的base部分 struct tk_read_base tkr_raw; // tkr_mono.base:raw time的base部分 u64 xtime_sec; // xtime的sec unsigned long ktime_sec; // monotonic time 的整sec struct timespec64 wall_to_monotonic; // xtime + wall_to_monotonic = monotonic time ktime_t offs_real; // monotonic time + offs_real = xtime, // 和wall_to_monotonic是相反的值 ktime_t offs_boot; // monotonic time + offs_boot = boot time ktime_t offs_tai; s32 tai_offset; unsigned int clock_was_set_seq; ktime_t next_leap_ktime; struct timespec64 raw_time; // raw time
/* The following members are for timekeeping internal use */ cycle_t cycle_interval; u64 xtime_interval; s64 xtime_remainder; u32 raw_interval; /* The ntp_tick_length() value currently being used. * This cached copy ensures we consistently apply the tick * length for an entire tick, as ntp_tick_length may change * mid-tick, and we don't want to apply that new value to * the tick in progress. */ u64 ntp_tick; /* Difference between accumulated time and NTP time in ntp * shifted nano seconds. */ s64 ntp_error; u32 ntp_error_shift; u32 ntp_err_mult; #ifdef CONFIG_DEBUG_TIMEKEEPING long last_warning; /* * These simple flag variables are managed * without locks, which is racy, but they are * ok since we don't really care about being * super precise about how many events were * seen, just that a problem was observed. */ int underflow_seen; int overflow_seen; #endif };
/* Check if there's really nothing to do */ if (offset < real_tk->cycle_interval) goto out;
/* Do some additional sanity checking */ timekeeping_check_update(real_tk, offset);
/* * With NO_HZ we may have to accumulate many cycle_intervals * (think "ticks") worth of time at once. To do this efficiently, * we calculate the largest doubling multiple of cycle_intervals * that is smaller than the offset. We then accumulate that * chunk in one go, and then try to consume the next smaller * doubled multiple. */ shift = ilog2(offset) - ilog2(tk->cycle_interval); shift = max(0, shift); /* Bound shift to one less than what overflows tick_length */ maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; shift = min(shift, maxshift); /* (2) 如果offset的值是多个cycle_interval, 不要一次update,使用2的n次方cycle_interval的方式逐个update。 tk->cycle_interval的值在tk_setup_internals()时被赋值,默认为1 tick */ while (offset >= tk->cycle_interval) { /* (3) 将offset更新到timekeeper中 */ offset = logarithmic_accumulation(tk, offset, shift, &clock_set); if (offset < tk->cycle_interval<<shift) shift--; }
/* correct the clock when NTP error is too big */ timekeeping_adjust(tk, offset);
/* * XXX This can be killed once everyone converts * to the new update_vsyscall. */ old_vsyscall_fixup(tk);
/* * Finally, make sure that after the rounding * xtime_nsec isn't larger than NSEC_PER_SEC */ clock_set |= accumulate_nsecs_to_secs(tk);
write_seqcount_begin(&tk_core.seq); /* * Update the real timekeeper. * * We could avoid this memcpy by switching pointers, but that * requires changes to all other timekeeper usage sites as * well, i.e. move the timekeeper pointer getter into the * spinlocked/seqcount protected sections. And we trade this * memcpy under the tk_core.seq against one before we start * updating. */ /* (4) */ timekeeping_update(tk, clock_set); memcpy(real_tk, tk, sizeof(*tk)); /* The memcpy must come last. Do not put anything here! */ write_seqcount_end(&tk_core.seq); out: raw_spin_unlock_irqrestore(&timekeeper_lock, flags); if (clock_set) /* Have to call _delayed version, since in irq context*/ clock_was_set_delayed(); } |→ static cycle_t logarithmic_accumulation(structtimekeeper *tk, cycle_t offset, u32 shift, unsigned int *clock_set) { cycle_t interval = tk->cycle_interval << shift; u64 raw_nsecs;
/* If the offset is smaller than a shifted interval, do nothing */ if (offset < interval) return offset;
if (action & TK_CLOCK_WAS_SET) tk->clock_was_set_seq++; /* * The mirroring of the data to the shadow-timekeeper needs * to happen last here to ensure we don't over-write the * timekeeper structure on the next update with stale data */ if (action & TK_MIRROR) memcpy(&shadow_timekeeper, &tk_core.timekeeper, sizeof(tk_core.timekeeper)); } ||→ static inline void tk_update_ktime_data(structtimekeeper *tk) { u64 seconds; u32 nsec;
/* Update the monotonic raw base */ /* (4.1.2) update tk->tkr_raw.base的值, 直接转换tk->raw_time */ tk->tkr_raw.base = timespec64_to_ktime(tk->raw_time);
/* * The sum of the nanoseconds portions of xtime and * wall_to_monotonic can be greater/equal one second. Take * this into account before updating tk->ktime_sec. */ /* (4.1.3) update tk->ktime_sec的值 nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); if (nsec >= NSEC_PER_SEC) seconds++; tk->ktime_sec = seconds; }
/* * Do not bail out early, in case there were callers still using * the value, even in the face of the WARN_ON. */ if (unlikely(timekeeping_suspended)) return -EAGAIN; return 0; } |→ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) { cycle_t delta; s64 nsec;
/* If arch requires, add in get_arch_timeoffset() */ return nsec + arch_gettimeoffset(); } ||→ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr) { struct timekeeper *tk = &tk_core.timekeeper; cycle_t now, last, mask, max, delta; unsigned int seq;
/* * Since we're called holding a seqlock, the data may shift * under us while we're doing the calculation. This can cause * false positives, since we'd note a problem but throw the * results away. So nest another seqlock here to atomically * grab the points we are checking with. */ do { seq = read_seqcount_begin(&tk_core.seq); /* (2.1.1) 使用read函数读取当前timer的计数 */ now = tkr->read(tkr->clock); last = tkr->cycle_last; mask = tkr->mask; max = tkr->clock->max_cycles; } while (read_seqcount_retry(&tk_core.seq, seq));
/* * Try to catch underflows by checking if we are seeing small * mask-relative negative values. */ if (unlikely((~delta & mask) < (mask >> 3))) { tk->underflow_seen = 1; delta = 0; }
/* Cap delta value to the max_cycles values to avoid mult overflows */ if (unlikely(delta > max)) { tk->overflow_seen = 1; delta = tkr->clock->max_cycles; }
/* * After system resumes, we need to calculate the suspended time and * compensate it for the OS time. There are 3 sources that could be * used: Nonstop clocksource during suspend, persistent clock and rtc * device. * * One specific platform may have 1 or 2 or all of them, and the * preference will be: * suspend-nonstop clocksource -> persistent clock -> rtc * The less preferred source will only be tried if there is no better * usable source. The rtc part is handled separately in rtc core code. */ cycle_now = tk->tkr_mono.read(clock); if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && cycle_now > tk->tkr_mono.cycle_last) { u64 num, max = ULLONG_MAX; u32 mult = clock->mult; u32 shift = clock->shift; s64 nsec = 0;
/* * "cycle_delta * mutl" may cause 64 bits overflow, if the * suspended time is too long. In that case we need do the * 64 bits math carefully */ do_div(max, mult); if (cycle_delta > max) { num = div64_u64(cycle_delta, max); nsec = (((u64) max * mult) >> shift) * num; cycle_delta -= num * max; } nsec += ((u64) cycle_delta * mult) >> shift;
/* * On some systems the persistent_clock can not be detected at * timekeeping_init by its return value, so if we see a valid * value returned, update the persistent_clock_exists flag. */ if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec) persistent_clock_exists = true;
if (persistent_clock_exists) { /* * To avoid drift caused by repeated suspend/resumes, * which each can add ~1 second drift error, * try to compensate so the difference in system time * and persistent_clock time stays close to constant. */ delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time); delta_delta = timespec64_sub(delta, old_delta); if (abs(delta_delta.tv_sec) >= 2) { /* * if delta_delta is too large, assume time correction * has occurred and set old_delta to the current delta. */ old_delta = delta; } else { /* Otherwise try to adjust old_system to compensate */ timekeeping_suspend_time = timespec64_add(timekeeping_suspend_time, delta_delta); } }
static int rtc_suspend(structdevice *dev) { structrtc_device *rtc = to_rtc_device(dev); structrtc_time tm; structtimespec64 delta, delta_delta; int err;
if (timekeeping_rtc_skipsuspend()) return0;
if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) return0;
/* snapshot the current RTC and system time at suspend*/ /* (1.1) 读取suspend时候的rtc时间 */ err = rtc_read_time(rtc, &tm); if (err < 0) { pr_debug("%s: fail to read rtc time\n", dev_name(&rtc->dev)); return0; }
/* * To avoid drift caused by repeated suspend/resumes, * which each can add ~1 second drift error, * try to compensate so the difference in system time * and rtc time stays close to constant. */ /* (1.3) 如果rtc时间和xtime有偏差,尝试纠正xtime */ delta = timespec64_sub(old_system, old_rtc); delta_delta = timespec64_sub(delta, old_delta); if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) { /* * if delta_delta is too large, assume time correction * has occured and set old_delta to the current delta. */ old_delta = delta; } else { /* Otherwise try to adjust old_system to compensate */ old_system = timespec64_sub(old_system, delta_delta); }
return0; }
static int rtc_resume(structdevice *dev) { structrtc_device *rtc = to_rtc_device(dev); structrtc_time tm; structtimespec64 new_system, new_rtc; structtimespec64 sleep_time; int err;
if (timekeeping_rtc_skipresume()) return0;
rtc_hctosys_ret = -ENODEV; if (strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE) != 0) return0;
/* snapshot the current rtc and system time at resume */ /* (2.1) 读取resume后的rtc时间和xtime */ getnstimeofday64(&new_system); err = rtc_read_time(rtc, &tm); if (err < 0) { pr_debug("%s: fail to read rtc time\n", dev_name(&rtc->dev)); return0; }
if (new_rtc.tv_sec < old_rtc.tv_sec) { pr_debug("%s: time travel!\n", dev_name(&rtc->dev)); return0; }
/* calculate the RTC time delta (sleep time)*/ /* (2.2) 计算suspend和resume之间rtc的差值 */ sleep_time = timespec64_sub(new_rtc, old_rtc);
/* * Since these RTC suspend/resume handlers are not called * at the very end of suspend or the start of resume, * some run-time may pass on either sides of the sleep time * so subtract kernel run-time between rtc_suspend to rtc_resume * to keep things accurate. */ /* (2.3) 使用上一步的差值,再减去,suspend和resume之间xtime的差值 得到实际的sleep时间*/ sleep_time = timespec64_sub(sleep_time, timespec64_sub(new_system, old_system));
/* (1.1) 如果不支持oneshot模式,只是period模式, 定时周期是固定的,不需要动态计算ns到cycle的转换 */ if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) return;
/* * Calculate the maximum number of seconds we can sleep. Limit * to 10 minutes for hardware which can program more than * 32bit ticks so we still get reasonable conversion values. */ sec = dev->max_delta_ticks; do_div(sec, freq); if (!sec) sec = 1; elseif (sec > 600 && dev->max_delta_ticks > UINT_MAX) sec = 600;
/* * Replace the eventually existing device by the new * device. If the current device is the broadcast device, do * not give it back to the clockevents layer ! */ if (tick_is_broadcast_device(curdev)) { clockevents_shutdown(curdev); curdev = NULL; } /* (2.2.3) 关闭curdev、newdev */ clockevents_exchange_device(curdev, newdev); /* (2.2.4) 继续clock_event_device注册 */ tick_setup_device(td, newdev, cpu, cpumask_of(cpu)); if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) tick_oneshot_notify(); return;
out_bc: /* * Can the new device be used as a broadcast device ? */ /* (2.2.5) 如果newdev不适合注册成本cpu的td->evtdev, 尝试将其注册成broadcast clockevent */ tick_install_broadcast_device(newdev); } |||→ static void tick_setup_device(structtick_device *td, structclock_event_device *newdev, int cpu, conststructcpumask *cpumask) { ktime_t next_event; void (*handler)(structclock_event_device *) = NULL;
/* * First device setup ? */ if (!td->evtdev) { /* (2.2.4.1) 如果是tick_do_timer_cpu没有被设置,且没有使能tick_nohz_full_cpu 把tick_do_timer_cpu设置成本cpu, tick_do_timer_cpu负责在tick中update jiffies、update_wall_time */ /* * If no cpu took the do_timer update, assign it to * this cpu: */ if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { if (!tick_nohz_full_cpu(cpu)) tick_do_timer_cpu = cpu; else tick_do_timer_cpu = TICK_DO_TIMER_NONE; tick_next_period = ktime_get(); tick_period = ktime_set(0, NSEC_PER_SEC / HZ); }
/* * When the device is not per cpu, pin the interrupt to the * current cpu: */ if (!cpumask_equal(newdev->cpumask, cpumask)) irq_set_affinity(newdev->irq, cpumask);
/* * When global broadcasting is active, check if the current * device is registered as a placeholder for broadcast mode. * This allows us to handle this x86 misfeature in a generic * way. This function also returns !=0 when we keep the * current active broadcast state for this CPU. */ /* (2.2.4.5) 如果全局的brodcast clockevent服务已经启动, 本cpu的clockevent注册需要向brodcas服务, 这是为了解决x86的一个失误(misfeature),其他架构不需要? */ if (tick_device_uses_broadcast(newdev, cpu)) return;
void tick_handle_periodic(structclock_event_device*dev) { int cpu = smp_processor_id(); ktime_t next = dev->next_event;
/* (1) 周期性的tick任务 */ tick_periodic(cpu);
#if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON) /* * The cpu might have transitioned to HIGHRES or NOHZ mode via * update_process_times() -> run_local_timers() -> * hrtimer_run_queues(). */ if (dev->event_handler != tick_handle_periodic) return; #endif
if (!clockevent_state_oneshot(dev)) return; /* (2) 如果tick_device是period mode,而clockevent是oneshot模式, 编程oneshot模式clockevent在下一周期触发: tick_device->mode = TICKDEV_MODE_PERIODIC clock_event_device->state_use_accessors = CLOCK_EVT_STATE_ONESHOT */ for (;;) { /* * Setup the next period for devices, which do not have * periodic mode: */ next = ktime_add(next, tick_period);
if (!clockevents_program_event(dev, next, false)) return; /* * Have to be careful here. If we're in oneshot mode, * before we call tick_periodic() in a loop, we need * to be sure we're using a real hardware clocksource. * Otherwise we could get trapped in an infinite * loop, as the tick_periodic() increments jiffies, * which then will increment time, possibly causing * the loop to trigger again and again. */ if (timekeeping_valid_for_hres()) tick_periodic(cpu); } } |→ static void tick_periodic(intcpu) { /* (1.1) 如果本cpu是tick_do_timer_cpu,更新全局时间戳类型的任务, 包括update jiffies、update_wall_time*/ if (tick_do_timer_cpu== cpu) { write_seqlock(&jiffies_lock); /*Keep track of the next tick event */ tick_next_period = ktime_add(tick_next_period, tick_period); /* (1.1.1) 更新jiffies */ do_timer(1); write_sequnlock(&jiffies_lock); /* (1.1.2) 读取clocksource来更新timekeeper */ update_wall_time(); } /* (1.2) 运行软件timer(run_local_timers())和运行调度tick任务(scheduler_tick()) */ update_process_times(user_mode(get_irq_regs())); profile_tick(CPU_PROFILING); }
/* (3) 如果hrtimer已经切换到高精度模式, 则不会从run_local_timers()低精度定时器路径来运行hrtimer */ if (__hrtimer_hres_active(cpu_base)) return;
/* * This _is_ ugly: We have to check periodically, whether we * can switch to highres and / or nohz mode. The clocksource * switch happens with xtime_lock held. Notification from * there only sets the check bit in the tick_oneshot code, * otherwise we might deadlock vs. xtime_lock. */ /* (1) 如果hrtimer没有使能、noHZ使能, 则调用:tick_check_oneshot_change() -> tick_nohz_switch_to_nohz(), 切换到NOHZ_MODE_LOWRES模式 */ if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) { /* (2) 如果hrtimer使能、noHZ使能, 则调用:hrtimer_switch_to_hres(), 切换到NOHZ_MODE_HIGHRES模式 */ hrtimer_switch_to_hres(); return; }
/* Read jiffies and the time when jiffies were updated last */ do { seq = read_seqbegin(&jiffies_lock); basemono = last_jiffies_update.tv64; basejiff = jiffies; } while (read_seqretry(&jiffies_lock, seq)); ts->last_jiffies = basejiff;
if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() || irq_work_needs_cpu()) { next_tick = basemono + TICK_NSEC; } else { /* * Get the next pending timer. If high resolution * timers are enabled this only takes the timer wheel * timers into account. If high resolution timers are * disabled this also looks at the next expiring * hrtimer. */ /* (2.1) 获取下一个timer的到期时间(包括低精度和高精度timer) */ next_tmr = get_next_timer_interrupt(basejiff, basemono); ts->next_timer = next_tmr; /* Take the next rcu event into account */ next_tick = next_rcu < next_tmr ? next_rcu : next_tmr; }
/* * If the tick is due in the next period, keep it ticking or * restart it proper. */ /* (2.2) 如果差距小于一个tick,不需要进入noHZ模式 */ delta = next_tick - basemono; if (delta <= (u64)TICK_NSEC) { tick.tv64 = 0; if (!ts->tick_stopped) goto out; if (delta == 0) { /* Tick is stopped, but required now. Enforce it */ tick_nohz_restart(ts, now); goto out; } }
/* * If this cpu is the one which updates jiffies, then give up * the assignment and let it be taken by the cpu which runs * the tick timer next, which might be this cpu as well. If we * don't drop this here the jiffies might be stale and * do_timer() never invoked. Keep track of the fact that it * was the one which had the do_timer() duty last. If this cpu * is the one which had the do_timer() duty last, we limit the * sleep time to the timekeeping max_deferement value. * Otherwise we can sleep as long as we want. */ /* (2.3) 根据timekeeper的可能溢出的位宽,得到的idle最大值 */ delta = timekeeping_max_deferment(); if (cpu == tick_do_timer_cpu) { tick_do_timer_cpu = TICK_DO_TIMER_NONE; ts->do_timer_last = 1; } elseif (tick_do_timer_cpu != TICK_DO_TIMER_NONE) { delta = KTIME_MAX; ts->do_timer_last = 0; } elseif (!ts->do_timer_last) { delta = KTIME_MAX; }
#ifdef CONFIG_NO_HZ_FULL /* Limit the tick delta to the maximum scheduler deferment */ if (!ts->inidle) delta = min(delta, scheduler_tick_max_deferment()); #endif
/* Calculate the next expiry time */ if (delta < (KTIME_MAX - basemono)) expires = basemono + delta; else expires = KTIME_MAX;
/* Skip reprogram of event if its not changed */ if (ts->tick_stopped && (expires == dev->next_event.tv64)) goto out;
/* * nohz_stop_sched_tick can be called several times before * the nohz_restart_sched_tick is called. This happens when * interrupts arrive which do not cause a reschedule. In the * first call we save the current tick time, so we can restart * the scheduler tick in nohz_restart_sched_tick. */ if (!ts->tick_stopped) { nohz_balance_enter_idle(cpu); calc_load_enter_idle();
/* * If the expiration time == KTIME_MAX, then we simply stop * the tick timer. */ if (unlikely(expires == KTIME_MAX)) { if (ts->nohz_mode == NOHZ_MODE_HIGHRES) hrtimer_cancel(&ts->sched_timer); goto out; }
This is copyright.