suspend.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. #include <linux/init.h>
  2. #include <linux/slab.h>
  3. #include <asm/cacheflush.h>
  4. #include <asm/idmap.h>
  5. #include <asm/pgalloc.h>
  6. #include <asm/pgtable.h>
  7. #include <asm/memory.h>
  8. #include <asm/smp_plat.h>
  9. #include <asm/suspend.h>
  10. #include <asm/tlbflush.h>
  11. extern int __cpu_suspend(unsigned long, int (*)(unsigned long), u32 cpuid);
  12. extern void cpu_resume_mmu(void);
  13. #ifdef CONFIG_MMU
  14. int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
  15. {
  16. struct mm_struct *mm = current->active_mm;
  17. u32 __mpidr = cpu_logical_map(smp_processor_id());
  18. int ret;
  19. if (!idmap_pgd)
  20. return -EINVAL;
  21. /*
  22. * Provide a temporary page table with an identity mapping for
  23. * the MMU-enable code, required for resuming. On successful
  24. * resume (indicated by a zero return code), we need to switch
  25. * back to the correct page tables.
  26. */
  27. ret = __cpu_suspend(arg, fn, __mpidr);
  28. if (ret == 0) {
  29. cpu_switch_mm(mm->pgd, mm);
  30. local_flush_bp_all();
  31. local_flush_tlb_all();
  32. }
  33. return ret;
  34. }
  35. #else
  36. int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
  37. {
  38. u32 __mpidr = cpu_logical_map(smp_processor_id());
  39. return __cpu_suspend(arg, fn, __mpidr);
  40. }
  41. #define idmap_pgd NULL
  42. #endif
  43. /*
  44. * This is called by __cpu_suspend() to save the state, and do whatever
  45. * flushing is required to ensure that when the CPU goes to sleep we have
  46. * the necessary data available when the caches are not searched.
  47. */
  48. void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
  49. {
  50. u32 *ctx = ptr;
  51. *save_ptr = virt_to_phys(ptr);
  52. /* This must correspond to the LDM in cpu_resume() assembly */
  53. *ptr++ = virt_to_phys(idmap_pgd);
  54. *ptr++ = sp;
  55. *ptr++ = virt_to_phys(cpu_do_resume);
  56. cpu_do_suspend(ptr);
  57. flush_cache_louis();
  58. /*
  59. * flush_cache_louis does not guarantee that
  60. * save_ptr and ptr are cleaned to main memory,
  61. * just up to the Level of Unification Inner Shareable.
  62. * Since the context pointer and context itself
  63. * are to be retrieved with the MMU off that
  64. * data must be cleaned from all cache levels
  65. * to main memory using "area" cache primitives.
  66. */
  67. __cpuc_flush_dcache_area(ctx, ptrsz);
  68. __cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));
  69. outer_clean_range(*save_ptr, *save_ptr + ptrsz);
  70. outer_clean_range(virt_to_phys(save_ptr),
  71. virt_to_phys(save_ptr) + sizeof(*save_ptr));
  72. }
  73. extern struct sleep_save_sp sleep_save_sp;
  74. static int cpu_suspend_alloc_sp(void)
  75. {
  76. void *ctx_ptr;
  77. /* ctx_ptr is an array of physical addresses */
  78. ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL);
  79. if (WARN_ON(!ctx_ptr))
  80. return -ENOMEM;
  81. sleep_save_sp.save_ptr_stash = ctx_ptr;
  82. sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
  83. sync_cache_w(&sleep_save_sp);
  84. return 0;
  85. }
  86. early_initcall(cpu_suspend_alloc_sp);