123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140 |
- /*
- * Precise Delay Loops for i386
- *
- * Copyright (C) 1993 Linus Torvalds
- * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
- * Copyright (C) 2008 Jiri Hladky <hladky _dot_ jiri _at_ gmail _dot_ com>
- *
- * The __delay function must _NOT_ be inlined as its execution time
- * depends wildly on alignment on many x86 processors. The additional
- * jump magic is needed to get the timing stable on all the CPU's
- * we have to worry about.
- */
- #include <linux/module.h>
- #include <linux/sched.h>
- #include <linux/timex.h>
- #include <linux/preempt.h>
- #include <linux/delay.h>
- #include <asm/processor.h>
- #include <asm/delay.h>
- #include <asm/timer.h>
- #ifdef CONFIG_SMP
- # include <asm/smp.h>
- #endif
- /* simple loop based delay: */
- static void delay_loop(unsigned long loops)
- {
- asm volatile(
- " test %0,%0 \n"
- " jz 3f \n"
- " jmp 1f \n"
- ".align 16 \n"
- "1: jmp 2f \n"
- ".align 16 \n"
- "2: dec %0 \n"
- " jnz 2b \n"
- "3: dec %0 \n"
- : /* we don't need output */
- :"a" (loops)
- );
- }
- /* TSC based delay: */
- static void delay_tsc(unsigned long __loops)
- {
- u32 bclock, now, loops = __loops;
- int cpu;
- preempt_disable();
- cpu = smp_processor_id();
- rdtsc_barrier();
- rdtscl(bclock);
- for (;;) {
- rdtsc_barrier();
- rdtscl(now);
- if ((now - bclock) >= loops)
- break;
- /* Allow RT tasks to run */
- preempt_enable();
- rep_nop();
- preempt_disable();
- /*
- * It is possible that we moved to another CPU, and
- * since TSC's are per-cpu we need to calculate
- * that. The delay must guarantee that we wait "at
- * least" the amount of time. Being moved to another
- * CPU could make the wait longer but we just need to
- * make sure we waited long enough. Rebalance the
- * counter for this CPU.
- */
- if (unlikely(cpu != smp_processor_id())) {
- loops -= (now - bclock);
- cpu = smp_processor_id();
- rdtsc_barrier();
- rdtscl(bclock);
- }
- }
- preempt_enable();
- }
- /*
- * Since we calibrate only once at boot, this
- * function should be set once at boot and not changed
- */
- static void (*delay_fn)(unsigned long) = delay_loop;
- void use_tsc_delay(void)
- {
- delay_fn = delay_tsc;
- }
- int read_current_timer(unsigned long *timer_val)
- {
- if (delay_fn == delay_tsc) {
- rdtscll(*timer_val);
- return 0;
- }
- return -1;
- }
- void __delay(unsigned long loops)
- {
- delay_fn(loops);
- }
- EXPORT_SYMBOL(__delay);
- inline void __const_udelay(unsigned long xloops)
- {
- int d0;
- xloops *= 4;
- asm("mull %%edx"
- :"=d" (xloops), "=&a" (d0)
- :"1" (xloops), "0"
- (this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4)));
- __delay(++xloops);
- }
- EXPORT_SYMBOL(__const_udelay);
- void __udelay(unsigned long usecs)
- {
- __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
- }
- EXPORT_SYMBOL(__udelay);
- void __ndelay(unsigned long nsecs)
- {
- __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
- }
- EXPORT_SYMBOL(__ndelay);
|