entry.S 2.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/linkage.h>
  15. #include <linux/unistd.h>
  16. #include <asm/irqflags.h>
  17. #include <asm/processor.h>
  18. #include <arch/abi.h>
  19. #include <arch/spr_def.h>
  20. #ifdef __tilegx__
  21. #define bnzt bnezt
  22. #endif
  23. STD_ENTRY(current_text_addr)
  24. { move r0, lr; jrp lr }
  25. STD_ENDPROC(current_text_addr)
  26. STD_ENTRY(KBacktraceIterator_init_current)
  27. { move r2, lr; lnk r1 }
  28. { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . }
  29. { move r3, sp; j _KBacktraceIterator_init_current }
  30. jrp lr /* keep backtracer happy */
  31. STD_ENDPROC(KBacktraceIterator_init_current)
  32. /* Loop forever on a nap during SMP boot. */
  33. STD_ENTRY(smp_nap)
  34. nap
  35. nop /* avoid provoking the icache prefetch with a jump */
  36. j smp_nap /* we are not architecturally guaranteed not to exit nap */
  37. jrp lr /* clue in the backtracer */
  38. STD_ENDPROC(smp_nap)
  39. /*
  40. * Enable interrupts racelessly and then nap until interrupted.
  41. * Architecturally, we are guaranteed that enabling interrupts via
  42. * mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC.
  43. * This function's _cpu_idle_nap address is special; see intvec.S.
  44. * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
  45. * as a result return to the function that called _cpu_idle().
  46. */
  47. STD_ENTRY_SECTION(_cpu_idle, .cpuidle.text)
  48. movei r1, 1
  49. IRQ_ENABLE_LOAD(r2, r3)
  50. mtspr INTERRUPT_CRITICAL_SECTION, r1
  51. IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */
  52. mtspr INTERRUPT_CRITICAL_SECTION, zero
  53. .global _cpu_idle_nap
  54. _cpu_idle_nap:
  55. nap
  56. nop /* avoid provoking the icache prefetch with a jump */
  57. jrp lr
  58. STD_ENDPROC(_cpu_idle)