123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471 |
- /*
- * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
- * Licensed under the GPL
- * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
- * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
- */
- #include <linux/cpumask.h>
- #include <linux/hardirq.h>
- #include <linux/interrupt.h>
- #include <linux/kernel_stat.h>
- #include <linux/module.h>
- #include <linux/sched.h>
- #include <linux/seq_file.h>
- #include <linux/slab.h>
- #include <as-layout.h>
- #include <kern_util.h>
- #include <os.h>
- /*
- * This list is accessed under irq_lock, except in sigio_handler,
- * where it is safe from being modified. IRQ handlers won't change it -
- * if an IRQ source has vanished, it will be freed by free_irqs just
- * before returning from sigio_handler. That will process a separate
- * list of irqs to free, with its own locking, coming back here to
- * remove list elements, taking the irq_lock to do so.
- */
- static struct irq_fd *active_fds = NULL;
- static struct irq_fd **last_irq_ptr = &active_fds;
- extern void free_irqs(void);
- void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
- {
- struct irq_fd *irq_fd;
- int n;
- while (1) {
- n = os_waiting_for_events(active_fds);
- if (n <= 0) {
- if (n == -EINTR)
- continue;
- else break;
- }
- for (irq_fd = active_fds; irq_fd != NULL;
- irq_fd = irq_fd->next) {
- if (irq_fd->current_events != 0) {
- irq_fd->current_events = 0;
- do_IRQ(irq_fd->irq, regs);
- }
- }
- }
- free_irqs();
- }
- static DEFINE_SPINLOCK(irq_lock);
- static int activate_fd(int irq, int fd, int type, void *dev_id)
- {
- struct pollfd *tmp_pfd;
- struct irq_fd *new_fd, *irq_fd;
- unsigned long flags;
- int events, err, n;
- err = os_set_fd_async(fd);
- if (err < 0)
- goto out;
- err = -ENOMEM;
- new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
- if (new_fd == NULL)
- goto out;
- if (type == IRQ_READ)
- events = UM_POLLIN | UM_POLLPRI;
- else events = UM_POLLOUT;
- *new_fd = ((struct irq_fd) { .next = NULL,
- .id = dev_id,
- .fd = fd,
- .type = type,
- .irq = irq,
- .events = events,
- .current_events = 0 } );
- err = -EBUSY;
- spin_lock_irqsave(&irq_lock, flags);
- for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
- if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
- printk(KERN_ERR "Registering fd %d twice\n", fd);
- printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
- printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
- dev_id);
- goto out_unlock;
- }
- }
- if (type == IRQ_WRITE)
- fd = -1;
- tmp_pfd = NULL;
- n = 0;
- while (1) {
- n = os_create_pollfd(fd, events, tmp_pfd, n);
- if (n == 0)
- break;
- /*
- * n > 0
- * It means we couldn't put new pollfd to current pollfds
- * and tmp_fds is NULL or too small for new pollfds array.
- * Needed size is equal to n as minimum.
- *
- * Here we have to drop the lock in order to call
- * kmalloc, which might sleep.
- * If something else came in and changed the pollfds array
- * so we will not be able to put new pollfd struct to pollfds
- * then we free the buffer tmp_fds and try again.
- */
- spin_unlock_irqrestore(&irq_lock, flags);
- kfree(tmp_pfd);
- tmp_pfd = kmalloc(n, GFP_KERNEL);
- if (tmp_pfd == NULL)
- goto out_kfree;
- spin_lock_irqsave(&irq_lock, flags);
- }
- *last_irq_ptr = new_fd;
- last_irq_ptr = &new_fd->next;
- spin_unlock_irqrestore(&irq_lock, flags);
- /*
- * This calls activate_fd, so it has to be outside the critical
- * section.
- */
- maybe_sigio_broken(fd, (type == IRQ_READ));
- return 0;
- out_unlock:
- spin_unlock_irqrestore(&irq_lock, flags);
- out_kfree:
- kfree(new_fd);
- out:
- return err;
- }
- static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
- {
- unsigned long flags;
- spin_lock_irqsave(&irq_lock, flags);
- os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
- spin_unlock_irqrestore(&irq_lock, flags);
- }
- struct irq_and_dev {
- int irq;
- void *dev;
- };
- static int same_irq_and_dev(struct irq_fd *irq, void *d)
- {
- struct irq_and_dev *data = d;
- return ((irq->irq == data->irq) && (irq->id == data->dev));
- }
- static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
- {
- struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
- .dev = dev });
- free_irq_by_cb(same_irq_and_dev, &data);
- }
- static int same_fd(struct irq_fd *irq, void *fd)
- {
- return (irq->fd == *((int *)fd));
- }
- void free_irq_by_fd(int fd)
- {
- free_irq_by_cb(same_fd, &fd);
- }
- /* Must be called with irq_lock held */
- static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
- {
- struct irq_fd *irq;
- int i = 0;
- int fdi;
- for (irq = active_fds; irq != NULL; irq = irq->next) {
- if ((irq->fd == fd) && (irq->irq == irqnum))
- break;
- i++;
- }
- if (irq == NULL) {
- printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
- fd);
- goto out;
- }
- fdi = os_get_pollfd(i);
- if ((fdi != -1) && (fdi != fd)) {
- printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
- "and pollfds, fd %d vs %d, need %d\n", irq->fd,
- fdi, fd);
- irq = NULL;
- goto out;
- }
- *index_out = i;
- out:
- return irq;
- }
- void reactivate_fd(int fd, int irqnum)
- {
- struct irq_fd *irq;
- unsigned long flags;
- int i;
- spin_lock_irqsave(&irq_lock, flags);
- irq = find_irq_by_fd(fd, irqnum, &i);
- if (irq == NULL) {
- spin_unlock_irqrestore(&irq_lock, flags);
- return;
- }
- os_set_pollfd(i, irq->fd);
- spin_unlock_irqrestore(&irq_lock, flags);
- add_sigio_fd(fd);
- }
- void deactivate_fd(int fd, int irqnum)
- {
- struct irq_fd *irq;
- unsigned long flags;
- int i;
- spin_lock_irqsave(&irq_lock, flags);
- irq = find_irq_by_fd(fd, irqnum, &i);
- if (irq == NULL) {
- spin_unlock_irqrestore(&irq_lock, flags);
- return;
- }
- os_set_pollfd(i, -1);
- spin_unlock_irqrestore(&irq_lock, flags);
- ignore_sigio_fd(fd);
- }
- EXPORT_SYMBOL(deactivate_fd);
- /*
- * Called just before shutdown in order to provide a clean exec
- * environment in case the system is rebooting. No locking because
- * that would cause a pointless shutdown hang if something hadn't
- * released the lock.
- */
- int deactivate_all_fds(void)
- {
- struct irq_fd *irq;
- int err;
- for (irq = active_fds; irq != NULL; irq = irq->next) {
- err = os_clear_fd_async(irq->fd);
- if (err)
- return err;
- }
- /* If there is a signal already queued, after unblocking ignore it */
- os_set_ioignore();
- return 0;
- }
- /*
- * do_IRQ handles all normal device IRQs (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
- unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
- {
- struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
- irq_enter();
- generic_handle_irq(irq);
- irq_exit();
- set_irq_regs(old_regs);
- return 1;
- }
- void um_free_irq(unsigned int irq, void *dev)
- {
- free_irq_by_irq_and_dev(irq, dev);
- free_irq(irq, dev);
- }
- EXPORT_SYMBOL(um_free_irq);
- int um_request_irq(unsigned int irq, int fd, int type,
- irq_handler_t handler,
- unsigned long irqflags, const char * devname,
- void *dev_id)
- {
- int err;
- if (fd != -1) {
- err = activate_fd(irq, fd, type, dev_id);
- if (err)
- return err;
- }
- return request_irq(irq, handler, irqflags, devname, dev_id);
- }
- EXPORT_SYMBOL(um_request_irq);
- EXPORT_SYMBOL(reactivate_fd);
- /*
- * irq_chip must define at least enable/disable and ack when
- * the edge handler is used.
- */
- static void dummy(struct irq_data *d)
- {
- }
- /* This is used for everything else than the timer. */
- static struct irq_chip normal_irq_type = {
- .name = "SIGIO",
- .irq_disable = dummy,
- .irq_enable = dummy,
- .irq_ack = dummy,
- .irq_mask = dummy,
- .irq_unmask = dummy,
- };
- static struct irq_chip SIGVTALRM_irq_type = {
- .name = "SIGVTALRM",
- .irq_disable = dummy,
- .irq_enable = dummy,
- .irq_ack = dummy,
- .irq_mask = dummy,
- .irq_unmask = dummy,
- };
- void __init init_IRQ(void)
- {
- int i;
- irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
- for (i = 1; i < NR_IRQS; i++)
- irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
- }
- /*
- * IRQ stack entry and exit:
- *
- * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
- * and switch over to the IRQ stack after some preparation. We use
- * sigaltstack to receive signals on a separate stack from the start.
- * These two functions make sure the rest of the kernel won't be too
- * upset by being on a different stack. The IRQ stack has a
- * thread_info structure at the bottom so that current et al continue
- * to work.
- *
- * to_irq_stack copies the current task's thread_info to the IRQ stack
- * thread_info and sets the tasks's stack to point to the IRQ stack.
- *
- * from_irq_stack copies the thread_info struct back (flags may have
- * been modified) and resets the task's stack pointer.
- *
- * Tricky bits -
- *
- * What happens when two signals race each other? UML doesn't block
- * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
- * could arrive while a previous one is still setting up the
- * thread_info.
- *
- * There are three cases -
- * The first interrupt on the stack - sets up the thread_info and
- * handles the interrupt
- * A nested interrupt interrupting the copying of the thread_info -
- * can't handle the interrupt, as the stack is in an unknown state
- * A nested interrupt not interrupting the copying of the
- * thread_info - doesn't do any setup, just handles the interrupt
- *
- * The first job is to figure out whether we interrupted stack setup.
- * This is done by xchging the signal mask with thread_info->pending.
- * If the value that comes back is zero, then there is no setup in
- * progress, and the interrupt can be handled. If the value is
- * non-zero, then there is stack setup in progress. In order to have
- * the interrupt handled, we leave our signal in the mask, and it will
- * be handled by the upper handler after it has set up the stack.
- *
- * Next is to figure out whether we are the outer handler or a nested
- * one. As part of setting up the stack, thread_info->real_thread is
- * set to non-NULL (and is reset to NULL on exit). This is the
- * nesting indicator. If it is non-NULL, then the stack is already
- * set up and the handler can run.
- */
- static unsigned long pending_mask;
- unsigned long to_irq_stack(unsigned long *mask_out)
- {
- struct thread_info *ti;
- unsigned long mask, old;
- int nested;
- mask = xchg(&pending_mask, *mask_out);
- if (mask != 0) {
- /*
- * If any interrupts come in at this point, we want to
- * make sure that their bits aren't lost by our
- * putting our bit in. So, this loop accumulates bits
- * until xchg returns the same value that we put in.
- * When that happens, there were no new interrupts,
- * and pending_mask contains a bit for each interrupt
- * that came in.
- */
- old = *mask_out;
- do {
- old |= mask;
- mask = xchg(&pending_mask, old);
- } while (mask != old);
- return 1;
- }
- ti = current_thread_info();
- nested = (ti->real_thread != NULL);
- if (!nested) {
- struct task_struct *task;
- struct thread_info *tti;
- task = cpu_tasks[ti->cpu].task;
- tti = task_thread_info(task);
- *ti = *tti;
- ti->real_thread = tti;
- task->stack = ti;
- }
- mask = xchg(&pending_mask, 0);
- *mask_out |= mask | nested;
- return 0;
- }
- unsigned long from_irq_stack(int nested)
- {
- struct thread_info *ti, *to;
- unsigned long mask;
- ti = current_thread_info();
- pending_mask = 1;
- to = ti->real_thread;
- current->stack = to;
- ti->real_thread = NULL;
- *to = *ti;
- mask = xchg(&pending_mask, 0);
- return mask & ~1;
- }
|