/* * Sanity-check: shared interrupts must pass in a real dev-ID, * otherwise we'll have trouble later trying to figure out * which interrupt is which (messes up the interrupt freeing * logic etc). */ if ((irqflags & IRQF_SHARED) && !dev_id) return -EINVAL;
#ifdef CONFIG_DEBUG_SHIRQ_FIXME if (!retval && (irqflags & IRQF_SHARED)) { /* * It's a shared IRQ -- the driver ought to be prepared for it * to happen immediately, so let's make sure.... * We disable the irq to make sure that a 'real' IRQ doesn't * run in parallel with our fake. */ unsignedlong flags;
if (desc->irq_data.chip == &no_irq_chip) return -ENOSYS; if (!try_module_get(desc->owner)) return -ENODEV;
/* * Check whether the interrupt nests into another interrupt * thread. */ nested = irq_settings_is_nested_thread(desc); // (4.1) 判断中断是否是支持嵌套 if (nested) { if (!new->thread_fn) { ret = -EINVAL; goto out_mput; } /* * Replace the primary handler which was provided from * the driver for non nested interrupt handling by the * dummy function which warns when called. */ new->handler = irq_nested_primary_handler; } else { // (4.2) 判断中断是否可以被线程化 // 如果中断没有设置 _IRQ_NOTHREAD 标志 & 强制中断线程化标志被设置 (force_irqthreads=1) // 强制把中断线程化: // new->thread_fn = new->handler;new->handler = irq_default_primary_handler; if (irq_settings_can_thread(desc)) irq_setup_forced_threading(new); }
/* * Create a handler thread when a thread function is supplied * and the interrupt does not nest into another interrupt * thread. */ // (4.3) 如果是线程化中断,创建线程化中断对应的线程 if (new->thread_fn && !nested) { structtask_struct *t; staticconststructsched_paramparam = { .sched_priority = MAX_USER_RT_PRIO/2, };
// 创建线程 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, new->name); if (IS_ERR(t)) { ret = PTR_ERR(t); goto out_mput; }
/* * We keep the reference to the task struct even if * the thread dies to avoid that the interrupt code * references an already freed task_struct. */ get_task_struct(t); // 赋值给 ->thread 成员 new->thread = t; /* * Tell the thread to set its affinity. This is * important for shared interrupt handlers as we do * not invoke setup_affinity() for the secondary * handlers as everything is already set up. Even for * interrupts marked with IRQF_NO_BALANCE this is * correct as we want the thread to move to the cpu(s) * on which the requesting code placed the interrupt. */ set_bit(IRQTF_AFFINITY, &new->thread_flags); }
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { ret = -ENOMEM; goto out_thread; }
/* * Drivers are often written to work w/o knowledge about the * underlying irq chip implementation, so a request for a * threaded irq without a primary hard irq context handler * requires the ONESHOT flag to be set. Some irq chips like * MSI based interrupts are per se one shot safe. Check the * chip flags, so we can avoid the unmask dance at the end of * the threaded handler for those. */ if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) new->flags &= ~IRQF_ONESHOT;
/* * The following block of code has to be executed atomically */ // (4.4) 找到最后一个 action 结构 raw_spin_lock_irqsave(&desc->lock, flags); old_ptr = &desc->action; old = *old_ptr; if (old) { /* * Can't share interrupts unless both agree to and are * the same type (level, edge, polarity). So both flag * fields must have IRQF_SHARED set and the bits which * set the trigger type must match. Also all must * agree on ONESHOT. */ if (!((old->flags & new->flags) & IRQF_SHARED) || ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || ((old->flags ^ new->flags) & IRQF_ONESHOT)) goto mismatch;
/* All handlers must agree on per-cpuness */ if ((old->flags & IRQF_PERCPU) != (new->flags & IRQF_PERCPU)) goto mismatch;
/* add new interrupt at end of irq queue */ do { /* * Or all existing action->thread_mask bits, * so we can find the next zero bit for this * new action. */ thread_mask |= old->thread_mask; old_ptr = &old->next; old = *old_ptr; } while (old); // 如果有多个 action,共享标志设为 1 shared = 1; }
/* * Setup the thread mask for this irqaction for ONESHOT. For * !ONESHOT irqs the thread mask is 0 so we can avoid a * conditional in irq_wake_thread(). */ if (new->flags & IRQF_ONESHOT) { /* * Unlikely to have 32 resp 64 irqs sharing one line, * but who knows. */ if (thread_mask == ~0UL) { ret = -EBUSY; goto out_mask; } /* * The thread_mask for the action is or'ed to * desc->thread_active to indicate that the * IRQF_ONESHOT thread handler has been woken, but not * yet finished. The bit is cleared when a thread * completes. When all threads of a shared interrupt * line have completed desc->threads_active becomes * zero and the interrupt line is unmasked. See * handle.c:irq_wake_thread() for further information. * * If no thread is woken by primary (hard irq context) * interrupt handlers, then desc->threads_active is * also checked for zero to unmask the irq line in the * affected hard irq flow handlers * (handle_[fasteoi|level]_irq). * * The new action gets the first zero bit of * thread_mask assigned. See the loop above which or's * all existing action->thread_mask bits. */ new->thread_mask = 1 << ffz(thread_mask);
} elseif (new->handler == irq_default_primary_handler && !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { /* * The interrupt was requested with handler = NULL, so * we use the default primary handler for it. But it * does not have the oneshot flag set. In combination * with level interrupts this is deadly, because the * default primary handler just wakes the thread, then * the irq lines is reenabled, but the device still * has the level irq asserted. Rinse and repeat.... * * While this works for edge type interrupts, we play * it safe and reject unconditionally because we can't * say for sure which type this interrupt really * has. The type flags are unreliable as the * underlying chip implementation can override them. */ pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", irq); ret = -EINVAL; goto out_mask; }
// (4.5) 如果是第一个 action,做一些初始化工作 if (!shared) { ret = irq_request_resources(desc); if (ret) { pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", new->name, irq, desc->irq_data.chip->name); goto out_mask; }
init_waitqueue_head(&desc->wait_for_threads);
/* Setup the type (level, edge polarity) if configured: */ if (new->flags & IRQF_TRIGGER_MASK) { ret = __irq_set_trigger(desc, irq, new->flags & IRQF_TRIGGER_MASK);
/* Reset broken irq detection when installing new handler */ desc->irq_count = 0; desc->irqs_unhandled = 0;
/* * Check whether we disabled the irq via the spurious handler * before. Reenable it and give it another chance. */ // (4.7) 如果中断之前被虚假 disable 了,重新 enable 中断 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { desc->istate &= ~IRQS_SPURIOUS_DISABLED; __enable_irq(desc, irq); }
raw_spin_unlock_irqrestore(&desc->lock, flags);
/* * Strictly no need to wake it up, but hung_task complains * when no hard interrupt wakes the thread up. */ // (4.8) 唤醒线程化中断对应的线程 if (new->thread) wake_up_process(new->thread);
if (desc) { __synchronize_hardirq(desc); /* * We made sure that no hardirq handler is * running. Now verify that no threaded handlers are * active. */ // 如果是线程化中断,需要等到线程执行完成 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); } }
This is copyright.