tlb.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. */
  15. #include <linux/cpumask.h>
  16. #include <linux/module.h>
  17. #include <linux/hugetlb.h>
  18. #include <asm/tlbflush.h>
  19. #include <asm/homecache.h>
  20. #include <hv/hypervisor.h>
  21. /* From tlbflush.h */
  22. DEFINE_PER_CPU(int, current_asid);
  23. int min_asid, max_asid;
  24. /*
  25. * Note that we flush the L1I (for VM_EXEC pages) as well as the TLB
  26. * so that when we are unmapping an executable page, we also flush it.
  27. * Combined with flushing the L1I at context switch time, this means
  28. * we don't have to do any other icache flushes.
  29. */
  30. void flush_tlb_mm(struct mm_struct *mm)
  31. {
  32. HV_Remote_ASID asids[NR_CPUS];
  33. int i = 0, cpu;
  34. for_each_cpu(cpu, mm_cpumask(mm)) {
  35. HV_Remote_ASID *asid = &asids[i++];
  36. asid->y = cpu / smp_topology.width;
  37. asid->x = cpu % smp_topology.width;
  38. asid->asid = per_cpu(current_asid, cpu);
  39. }
  40. flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(mm),
  41. 0, 0, 0, NULL, asids, i);
  42. }
  43. void flush_tlb_current_task(void)
  44. {
  45. flush_tlb_mm(current->mm);
  46. }
  47. void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm,
  48. unsigned long va)
  49. {
  50. unsigned long size = vma_kernel_pagesize(vma);
  51. int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
  52. flush_remote(0, cache, mm_cpumask(mm),
  53. va, size, size, mm_cpumask(mm), NULL, 0);
  54. }
  55. void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
  56. {
  57. flush_tlb_page_mm(vma, vma->vm_mm, va);
  58. }
  59. EXPORT_SYMBOL(flush_tlb_page);
  60. void flush_tlb_range(struct vm_area_struct *vma,
  61. unsigned long start, unsigned long end)
  62. {
  63. unsigned long size = vma_kernel_pagesize(vma);
  64. struct mm_struct *mm = vma->vm_mm;
  65. int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
  66. flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
  67. mm_cpumask(mm), NULL, 0);
  68. }
  69. void flush_tlb_all(void)
  70. {
  71. int i;
  72. for (i = 0; ; ++i) {
  73. HV_VirtAddrRange r = hv_inquire_virtual(i);
  74. if (r.size == 0)
  75. break;
  76. flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask,
  77. r.start, r.size, PAGE_SIZE, cpu_online_mask,
  78. NULL, 0);
  79. flush_remote(0, 0, NULL,
  80. r.start, r.size, HPAGE_SIZE, cpu_online_mask,
  81. NULL, 0);
  82. }
  83. }
  84. /*
  85. * Callers need to flush the L1I themselves if necessary, e.g. for
  86. * kernel module unload. Otherwise we assume callers are not using
  87. * executable pgprot_t's. Using EVICT_L1I means that dataplane cpus
  88. * will get an unnecessary interrupt otherwise.
  89. */
  90. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  91. {
  92. flush_remote(0, 0, NULL,
  93. start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0);
  94. }