cache-inv-icache.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /* Invalidate icache when dcache doesn't need invalidation as it's in
  2. * write-through mode
  3. *
  4. * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
  5. * Written by David Howells (dhowells@redhat.com)
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public Licence
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the Licence, or (at your option) any later version.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/mm.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/smp.h>
  16. #include "cache-smp.h"
  17. /**
  18. * flush_icache_page_range - Flush dcache and invalidate icache for part of a
  19. * single page
  20. * @start: The starting virtual address of the page part.
  21. * @end: The ending virtual address of the page part.
  22. *
  23. * Invalidate the icache for part of a single page, as determined by the
  24. * virtual addresses given. The page must be in the paged area. The dcache is
  25. * not flushed as the cache must be in write-through mode to get here.
  26. */
  27. static void flush_icache_page_range(unsigned long start, unsigned long end)
  28. {
  29. unsigned long addr, size, off;
  30. struct page *page;
  31. pgd_t *pgd;
  32. pud_t *pud;
  33. pmd_t *pmd;
  34. pte_t *ppte, pte;
  35. /* work out how much of the page to flush */
  36. off = start & ~PAGE_MASK;
  37. size = end - start;
  38. /* get the physical address the page is mapped to from the page
  39. * tables */
  40. pgd = pgd_offset(current->mm, start);
  41. if (!pgd || !pgd_val(*pgd))
  42. return;
  43. pud = pud_offset(pgd, start);
  44. if (!pud || !pud_val(*pud))
  45. return;
  46. pmd = pmd_offset(pud, start);
  47. if (!pmd || !pmd_val(*pmd))
  48. return;
  49. ppte = pte_offset_map(pmd, start);
  50. if (!ppte)
  51. return;
  52. pte = *ppte;
  53. pte_unmap(ppte);
  54. if (pte_none(pte))
  55. return;
  56. page = pte_page(pte);
  57. if (!page)
  58. return;
  59. addr = page_to_phys(page);
  60. /* invalidate the icache coverage on that region */
  61. mn10300_local_icache_inv_range2(addr + off, size);
  62. smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
  63. }
  64. /**
  65. * flush_icache_range - Globally flush dcache and invalidate icache for region
  66. * @start: The starting virtual address of the region.
  67. * @end: The ending virtual address of the region.
  68. *
  69. * This is used by the kernel to globally flush some code it has just written
  70. * from the dcache back to RAM and then to globally invalidate the icache over
  71. * that region so that that code can be run on all CPUs in the system.
  72. */
  73. void flush_icache_range(unsigned long start, unsigned long end)
  74. {
  75. unsigned long start_page, end_page;
  76. unsigned long flags;
  77. flags = smp_lock_cache();
  78. if (end > 0x80000000UL) {
  79. /* addresses above 0xa0000000 do not go through the cache */
  80. if (end > 0xa0000000UL) {
  81. end = 0xa0000000UL;
  82. if (start >= end)
  83. goto done;
  84. }
  85. /* kernel addresses between 0x80000000 and 0x9fffffff do not
  86. * require page tables, so we just map such addresses
  87. * directly */
  88. start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
  89. mn10300_icache_inv_range(start_page, end);
  90. smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
  91. if (start_page == start)
  92. goto done;
  93. end = start_page;
  94. }
  95. start_page = start & PAGE_MASK;
  96. end_page = (end - 1) & PAGE_MASK;
  97. if (start_page == end_page) {
  98. /* the first and last bytes are on the same page */
  99. flush_icache_page_range(start, end);
  100. } else if (start_page + 1 == end_page) {
  101. /* split over two virtually contiguous pages */
  102. flush_icache_page_range(start, end_page);
  103. flush_icache_page_range(end_page, end);
  104. } else {
  105. /* more than 2 pages; just flush the entire cache */
  106. mn10300_local_icache_inv();
  107. smp_cache_call(SMP_ICACHE_INV, 0, 0);
  108. }
  109. done:
  110. smp_unlock_cache(flags);
  111. }
  112. EXPORT_SYMBOL(flush_icache_range);