sys_m68k.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * linux/arch/m68k/kernel/sys_m68k.c
  4. *
  5. * This file contains various random system calls that
  6. * have a non-standard calling sequence on the Linux/m68k
  7. * platform.
  8. */
  9. #include <linux/capability.h>
  10. #include <linux/errno.h>
  11. #include <linux/sched.h>
  12. #include <linux/mm.h>
  13. #include <linux/fs.h>
  14. #include <linux/smp.h>
  15. #include <linux/sem.h>
  16. #include <linux/msg.h>
  17. #include <linux/shm.h>
  18. #include <linux/stat.h>
  19. #include <linux/syscalls.h>
  20. #include <linux/mman.h>
  21. #include <linux/file.h>
  22. #include <linux/ipc.h>
  23. #include <asm/setup.h>
  24. #include <linux/uaccess.h>
  25. #include <asm/cachectl.h>
  26. #include <asm/traps.h>
  27. #include <asm/page.h>
  28. #include <asm/unistd.h>
  29. #include <asm/cacheflush.h>
  30. #ifdef CONFIG_MMU
  31. #include <asm/tlb.h>
  32. asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
  33. unsigned long error_code);
  34. asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
  35. unsigned long prot, unsigned long flags,
  36. unsigned long fd, unsigned long pgoff)
  37. {
  38. /*
  39. * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
  40. * so we need to shift the argument down by 1; m68k mmap64(3)
  41. * (in libc) expects the last argument of mmap2 in 4Kb units.
  42. */
  43. return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
  44. }
  45. /* Convert virtual (user) address VADDR to physical address PADDR */
  46. #define virt_to_phys_040(vaddr) \
  47. ({ \
  48. unsigned long _mmusr, _paddr; \
  49. \
  50. __asm__ __volatile__ (".chip 68040\n\t" \
  51. "ptestr (%1)\n\t" \
  52. "movec %%mmusr,%0\n\t" \
  53. ".chip 68k" \
  54. : "=r" (_mmusr) \
  55. : "a" (vaddr)); \
  56. _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
  57. _paddr; \
  58. })
  59. static inline int
  60. cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
  61. {
  62. unsigned long paddr, i;
  63. switch (scope)
  64. {
  65. case FLUSH_SCOPE_ALL:
  66. switch (cache)
  67. {
  68. case FLUSH_CACHE_DATA:
  69. /* This nop is needed for some broken versions of the 68040. */
  70. __asm__ __volatile__ ("nop\n\t"
  71. ".chip 68040\n\t"
  72. "cpusha %dc\n\t"
  73. ".chip 68k");
  74. break;
  75. case FLUSH_CACHE_INSN:
  76. __asm__ __volatile__ ("nop\n\t"
  77. ".chip 68040\n\t"
  78. "cpusha %ic\n\t"
  79. ".chip 68k");
  80. break;
  81. default:
  82. case FLUSH_CACHE_BOTH:
  83. __asm__ __volatile__ ("nop\n\t"
  84. ".chip 68040\n\t"
  85. "cpusha %bc\n\t"
  86. ".chip 68k");
  87. break;
  88. }
  89. break;
  90. case FLUSH_SCOPE_LINE:
  91. /* Find the physical address of the first mapped page in the
  92. address range. */
  93. if ((paddr = virt_to_phys_040(addr))) {
  94. paddr += addr & ~(PAGE_MASK | 15);
  95. len = (len + (addr & 15) + 15) >> 4;
  96. } else {
  97. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  98. if (len <= tmp)
  99. return 0;
  100. addr += tmp;
  101. len -= tmp;
  102. tmp = PAGE_SIZE;
  103. for (;;)
  104. {
  105. if ((paddr = virt_to_phys_040(addr)))
  106. break;
  107. if (len <= tmp)
  108. return 0;
  109. addr += tmp;
  110. len -= tmp;
  111. }
  112. len = (len + 15) >> 4;
  113. }
  114. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  115. while (len--)
  116. {
  117. switch (cache)
  118. {
  119. case FLUSH_CACHE_DATA:
  120. __asm__ __volatile__ ("nop\n\t"
  121. ".chip 68040\n\t"
  122. "cpushl %%dc,(%0)\n\t"
  123. ".chip 68k"
  124. : : "a" (paddr));
  125. break;
  126. case FLUSH_CACHE_INSN:
  127. __asm__ __volatile__ ("nop\n\t"
  128. ".chip 68040\n\t"
  129. "cpushl %%ic,(%0)\n\t"
  130. ".chip 68k"
  131. : : "a" (paddr));
  132. break;
  133. default:
  134. case FLUSH_CACHE_BOTH:
  135. __asm__ __volatile__ ("nop\n\t"
  136. ".chip 68040\n\t"
  137. "cpushl %%bc,(%0)\n\t"
  138. ".chip 68k"
  139. : : "a" (paddr));
  140. break;
  141. }
  142. if (!--i && len)
  143. {
  144. /*
  145. * No need to page align here since it is done by
  146. * virt_to_phys_040().
  147. */
  148. addr += PAGE_SIZE;
  149. i = PAGE_SIZE / 16;
  150. /* Recompute physical address when crossing a page
  151. boundary. */
  152. for (;;)
  153. {
  154. if ((paddr = virt_to_phys_040(addr)))
  155. break;
  156. if (len <= i)
  157. return 0;
  158. len -= i;
  159. addr += PAGE_SIZE;
  160. }
  161. }
  162. else
  163. paddr += 16;
  164. }
  165. break;
  166. default:
  167. case FLUSH_SCOPE_PAGE:
  168. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  169. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  170. {
  171. if (!(paddr = virt_to_phys_040(addr)))
  172. continue;
  173. switch (cache)
  174. {
  175. case FLUSH_CACHE_DATA:
  176. __asm__ __volatile__ ("nop\n\t"
  177. ".chip 68040\n\t"
  178. "cpushp %%dc,(%0)\n\t"
  179. ".chip 68k"
  180. : : "a" (paddr));
  181. break;
  182. case FLUSH_CACHE_INSN:
  183. __asm__ __volatile__ ("nop\n\t"
  184. ".chip 68040\n\t"
  185. "cpushp %%ic,(%0)\n\t"
  186. ".chip 68k"
  187. : : "a" (paddr));
  188. break;
  189. default:
  190. case FLUSH_CACHE_BOTH:
  191. __asm__ __volatile__ ("nop\n\t"
  192. ".chip 68040\n\t"
  193. "cpushp %%bc,(%0)\n\t"
  194. ".chip 68k"
  195. : : "a" (paddr));
  196. break;
  197. }
  198. }
  199. break;
  200. }
  201. return 0;
  202. }
  203. #define virt_to_phys_060(vaddr) \
  204. ({ \
  205. unsigned long paddr; \
  206. __asm__ __volatile__ (".chip 68060\n\t" \
  207. "plpar (%0)\n\t" \
  208. ".chip 68k" \
  209. : "=a" (paddr) \
  210. : "0" (vaddr)); \
  211. (paddr); /* XXX */ \
  212. })
  213. static inline int
  214. cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
  215. {
  216. unsigned long paddr, i;
  217. /*
  218. * 68060 manual says:
  219. * cpush %dc : flush DC, remains valid (with our %cacr setup)
  220. * cpush %ic : invalidate IC
  221. * cpush %bc : flush DC + invalidate IC
  222. */
  223. switch (scope)
  224. {
  225. case FLUSH_SCOPE_ALL:
  226. switch (cache)
  227. {
  228. case FLUSH_CACHE_DATA:
  229. __asm__ __volatile__ (".chip 68060\n\t"
  230. "cpusha %dc\n\t"
  231. ".chip 68k");
  232. break;
  233. case FLUSH_CACHE_INSN:
  234. __asm__ __volatile__ (".chip 68060\n\t"
  235. "cpusha %ic\n\t"
  236. ".chip 68k");
  237. break;
  238. default:
  239. case FLUSH_CACHE_BOTH:
  240. __asm__ __volatile__ (".chip 68060\n\t"
  241. "cpusha %bc\n\t"
  242. ".chip 68k");
  243. break;
  244. }
  245. break;
  246. case FLUSH_SCOPE_LINE:
  247. /* Find the physical address of the first mapped page in the
  248. address range. */
  249. len += addr & 15;
  250. addr &= -16;
  251. if (!(paddr = virt_to_phys_060(addr))) {
  252. unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
  253. if (len <= tmp)
  254. return 0;
  255. addr += tmp;
  256. len -= tmp;
  257. tmp = PAGE_SIZE;
  258. for (;;)
  259. {
  260. if ((paddr = virt_to_phys_060(addr)))
  261. break;
  262. if (len <= tmp)
  263. return 0;
  264. addr += tmp;
  265. len -= tmp;
  266. }
  267. }
  268. len = (len + 15) >> 4;
  269. i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
  270. while (len--)
  271. {
  272. switch (cache)
  273. {
  274. case FLUSH_CACHE_DATA:
  275. __asm__ __volatile__ (".chip 68060\n\t"
  276. "cpushl %%dc,(%0)\n\t"
  277. ".chip 68k"
  278. : : "a" (paddr));
  279. break;
  280. case FLUSH_CACHE_INSN:
  281. __asm__ __volatile__ (".chip 68060\n\t"
  282. "cpushl %%ic,(%0)\n\t"
  283. ".chip 68k"
  284. : : "a" (paddr));
  285. break;
  286. default:
  287. case FLUSH_CACHE_BOTH:
  288. __asm__ __volatile__ (".chip 68060\n\t"
  289. "cpushl %%bc,(%0)\n\t"
  290. ".chip 68k"
  291. : : "a" (paddr));
  292. break;
  293. }
  294. if (!--i && len)
  295. {
  296. /*
  297. * We just want to jump to the first cache line
  298. * in the next page.
  299. */
  300. addr += PAGE_SIZE;
  301. addr &= PAGE_MASK;
  302. i = PAGE_SIZE / 16;
  303. /* Recompute physical address when crossing a page
  304. boundary. */
  305. for (;;)
  306. {
  307. if ((paddr = virt_to_phys_060(addr)))
  308. break;
  309. if (len <= i)
  310. return 0;
  311. len -= i;
  312. addr += PAGE_SIZE;
  313. }
  314. }
  315. else
  316. paddr += 16;
  317. }
  318. break;
  319. default:
  320. case FLUSH_SCOPE_PAGE:
  321. len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
  322. addr &= PAGE_MASK; /* Workaround for bug in some
  323. revisions of the 68060 */
  324. for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
  325. {
  326. if (!(paddr = virt_to_phys_060(addr)))
  327. continue;
  328. switch (cache)
  329. {
  330. case FLUSH_CACHE_DATA:
  331. __asm__ __volatile__ (".chip 68060\n\t"
  332. "cpushp %%dc,(%0)\n\t"
  333. ".chip 68k"
  334. : : "a" (paddr));
  335. break;
  336. case FLUSH_CACHE_INSN:
  337. __asm__ __volatile__ (".chip 68060\n\t"
  338. "cpushp %%ic,(%0)\n\t"
  339. ".chip 68k"
  340. : : "a" (paddr));
  341. break;
  342. default:
  343. case FLUSH_CACHE_BOTH:
  344. __asm__ __volatile__ (".chip 68060\n\t"
  345. "cpushp %%bc,(%0)\n\t"
  346. ".chip 68k"
  347. : : "a" (paddr));
  348. break;
  349. }
  350. }
  351. break;
  352. }
  353. return 0;
  354. }
  355. /* sys_cacheflush -- flush (part of) the processor cache. */
  356. asmlinkage int
  357. sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  358. {
  359. int ret = -EINVAL;
  360. if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
  361. cache & ~FLUSH_CACHE_BOTH)
  362. goto out;
  363. if (scope == FLUSH_SCOPE_ALL) {
  364. /* Only the superuser may explicitly flush the whole cache. */
  365. ret = -EPERM;
  366. if (!capable(CAP_SYS_ADMIN))
  367. goto out;
  368. } else {
  369. struct vm_area_struct *vma;
  370. /* Check for overflow. */
  371. if (addr + len < addr)
  372. goto out;
  373. /*
  374. * Verify that the specified address region actually belongs
  375. * to this process.
  376. */
  377. down_read(&current->mm->mmap_sem);
  378. vma = find_vma(current->mm, addr);
  379. if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
  380. goto out_unlock;
  381. }
  382. if (CPU_IS_020_OR_030) {
  383. if (scope == FLUSH_SCOPE_LINE && len < 256) {
  384. unsigned long cacr;
  385. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  386. if (cache & FLUSH_CACHE_INSN)
  387. cacr |= 4;
  388. if (cache & FLUSH_CACHE_DATA)
  389. cacr |= 0x400;
  390. len >>= 2;
  391. while (len--) {
  392. __asm__ __volatile__ ("movec %1, %%caar\n\t"
  393. "movec %0, %%cacr"
  394. : /* no outputs */
  395. : "r" (cacr), "r" (addr));
  396. addr += 4;
  397. }
  398. } else {
  399. /* Flush the whole cache, even if page granularity requested. */
  400. unsigned long cacr;
  401. __asm__ ("movec %%cacr, %0" : "=r" (cacr));
  402. if (cache & FLUSH_CACHE_INSN)
  403. cacr |= 8;
  404. if (cache & FLUSH_CACHE_DATA)
  405. cacr |= 0x800;
  406. __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
  407. }
  408. ret = 0;
  409. goto out_unlock;
  410. } else {
  411. /*
  412. * 040 or 060: don't blindly trust 'scope', someone could
  413. * try to flush a few megs of memory.
  414. */
  415. if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
  416. scope=FLUSH_SCOPE_PAGE;
  417. if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
  418. scope=FLUSH_SCOPE_ALL;
  419. if (CPU_IS_040) {
  420. ret = cache_flush_040 (addr, scope, cache, len);
  421. } else if (CPU_IS_060) {
  422. ret = cache_flush_060 (addr, scope, cache, len);
  423. }
  424. }
  425. out_unlock:
  426. up_read(&current->mm->mmap_sem);
  427. out:
  428. return ret;
  429. }
  430. /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
  431. D1 (newval). */
  432. asmlinkage int
  433. sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
  434. unsigned long __user * mem)
  435. {
  436. /* This was borrowed from ARM's implementation. */
  437. for (;;) {
  438. struct mm_struct *mm = current->mm;
  439. pgd_t *pgd;
  440. pmd_t *pmd;
  441. pte_t *pte;
  442. spinlock_t *ptl;
  443. unsigned long mem_value;
  444. down_read(&mm->mmap_sem);
  445. pgd = pgd_offset(mm, (unsigned long)mem);
  446. if (!pgd_present(*pgd))
  447. goto bad_access;
  448. pmd = pmd_offset(pgd, (unsigned long)mem);
  449. if (!pmd_present(*pmd))
  450. goto bad_access;
  451. pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
  452. if (!pte_present(*pte) || !pte_dirty(*pte)
  453. || !pte_write(*pte)) {
  454. pte_unmap_unlock(pte, ptl);
  455. goto bad_access;
  456. }
  457. /*
  458. * No need to check for EFAULT; we know that the page is
  459. * present and writable.
  460. */
  461. __get_user(mem_value, mem);
  462. if (mem_value == oldval)
  463. __put_user(newval, mem);
  464. pte_unmap_unlock(pte, ptl);
  465. up_read(&mm->mmap_sem);
  466. return mem_value;
  467. bad_access:
  468. up_read(&mm->mmap_sem);
  469. /* This is not necessarily a bad access, we can get here if
  470. a memory we're trying to write to should be copied-on-write.
  471. Make the kernel do the necessary page stuff, then re-iterate.
  472. Simulate a write access fault to do that. */
  473. {
  474. /* The first argument of the function corresponds to
  475. D1, which is the first field of struct pt_regs. */
  476. struct pt_regs *fp = (struct pt_regs *)&newval;
  477. /* '3' is an RMW flag. */
  478. if (do_page_fault(fp, (unsigned long)mem, 3))
  479. /* If the do_page_fault() failed, we don't
  480. have anything meaningful to return.
  481. There should be a SIGSEGV pending for
  482. the process. */
  483. return 0xdeadbeef;
  484. }
  485. }
  486. }
  487. #else
  488. /* sys_cacheflush -- flush (part of) the processor cache. */
  489. asmlinkage int
  490. sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
  491. {
  492. flush_cache_all();
  493. return 0;
  494. }
  495. /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
  496. D1 (newval). */
  497. asmlinkage int
  498. sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
  499. unsigned long __user * mem)
  500. {
  501. struct mm_struct *mm = current->mm;
  502. unsigned long mem_value;
  503. down_read(&mm->mmap_sem);
  504. mem_value = *mem;
  505. if (mem_value == oldval)
  506. *mem = newval;
  507. up_read(&mm->mmap_sem);
  508. return mem_value;
  509. }
  510. #endif /* CONFIG_MMU */
  511. asmlinkage int sys_getpagesize(void)
  512. {
  513. return PAGE_SIZE;
  514. }
  515. asmlinkage unsigned long sys_get_thread_area(void)
  516. {
  517. return current_thread_info()->tp_value;
  518. }
  519. asmlinkage int sys_set_thread_area(unsigned long tp)
  520. {
  521. current_thread_info()->tp_value = tp;
  522. return 0;
  523. }
  524. asmlinkage int sys_atomic_barrier(void)
  525. {
  526. /* no code needed for uniprocs */
  527. return 0;
  528. }