if (kthread) __kthread_unpark(k, kthread); } | → staticvoid __kthread_unpark(struct task_struct *k, struct kthread *kthread) { // (4) 清除 KTHREAD_IS_PARKED 标志位 clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); /* * We clear the IS_PARKED bit here as we don't wait * until the task has left the park code. So if we'd * park before that happens we'd see the IS_PARKED bit * which might be about to be cleared. */ // 如果进程已经被 park,并且 wake_up 唤醒进程 if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { // 如果是 per_cpu 进程,重新绑定进程 cpu if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) __kthread_bind(k, kthread->cpu, TASK_PARKED); wake_up_state(k, TASK_PARKED); } }
2.2 cpu hotplug 支持
我们前面说到 park 机制的主要目的是为了 per_cpu 进程支持 cpu hotplug,具体怎么响应热插拔事件呢?
/* * There might have been an OOM kill while we were * freezing tasks and the killed task might be still * on the way out so we have to double check for race. */ if (oom_kills_count() != oom_kills_saved && !check_frozen_processes()) { __usermodehelper_set_disable_depth(UMH_ENABLED); printk("OOM in progress."); error = -EBUSY; } else { printk("done."); } } printk("\n"); BUG_ON(in_atomic());
/* * We need to retry, but first give the freezing tasks some * time to enter the refrigerator. Start with an initial * 1 ms sleep followed by exponential backoff until 8 ms. */ usleep_range(sleep_usecs / 2, sleep_usecs); if (sleep_usecs < 8 * USEC_PER_MSEC) sleep_usecs *= 2; }
/* * This check can race with freezer_do_not_count, but worst case that * will result in an extra wakeup being sent to the task. It does not * race with freezer_count(), the barriers in freezer_count() and * freezer_should_skip() ensure that either freezer_count() sees * freezing == true in try_to_freeze() and freezes, or * freezer_should_skip() sees !PF_FREEZE_SKIP and freezes the task * normally. */ if (freezer_should_skip(p)) returnfalse;
if (!(current->flags & PF_FROZEN)) break; was_frozen = true; schedule(); }
pr_debug("%s left refrigerator\n", current->comm);
/* * Restore saved task state before returning. The mb'd version * needs to be used; otherwise, it might silently break * synchronization which depends on ordered task state change. */ set_current_state(save);
for (;;) { /* * Lets try to take the lock again - this is needed even if * we get here for the first time (shortly after failing to * acquire the lock), to make sure that we get a wakeup once * it's unlocked. Later on, if we sleep, this is the * operation that gives us the lock. We xchg it to -1, so * that when we release the lock, we properly wake up the * other waiters. We only attempt the xchg if the count is * non-negative in order to avoid unnecessary xchg operations: */ // (1) 如果 mutex_lock 条件成立,才退出 if (atomic_read(&lock->count) >= 0 && (atomic_xchg(&lock->count, -1) == 1)) break;
// (2) 如果如果有信号阻塞,也退出 /* * got a signal? (This code gets eliminated in the * TASK_UNINTERRUPTIBLE case.) */ if (unlikely(signal_pending_state(state, task))) { ret = -EINTR; goto err; }
if (use_ww_ctx && ww_ctx->acquired > 0) { ret = __mutex_lock_check_stamp(lock, ww_ctx); if (ret) goto err; }
/* didn't get the lock, go to sleep: */ spin_unlock_mutex(&lock->wait_lock, flags); schedule_preempt_disabled(); spin_lock_mutex(&lock->wait_lock, flags); }
This is copyright.