vmx-helper.c 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  15. *
  16. * Copyright (C) IBM Corporation, 2011
  17. *
  18. * Authors: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com>
  19. * Anton Blanchard <anton@au.ibm.com>
  20. */
  21. #include <linux/uaccess.h>
  22. #include <linux/hardirq.h>
  23. #include <asm/switch_to.h>
  24. #include <asm/asm-prototypes.h>
  25. int enter_vmx_usercopy(void)
  26. {
  27. if (in_interrupt())
  28. return 0;
  29. preempt_disable();
  30. /*
  31. * We need to disable page faults as they can call schedule and
  32. * thus make us lose the VMX context. So on page faults, we just
  33. * fail which will cause a fallback to the normal non-vmx copy.
  34. */
  35. pagefault_disable();
  36. enable_kernel_altivec();
  37. return 1;
  38. }
  39. /*
  40. * This function must return 0 because we tail call optimise when calling
  41. * from __copy_tofrom_user_power7 which returns 0 on success.
  42. */
  43. int exit_vmx_usercopy(void)
  44. {
  45. disable_kernel_altivec();
  46. pagefault_enable();
  47. preempt_enable();
  48. return 0;
  49. }
  50. int enter_vmx_copy(void)
  51. {
  52. if (in_interrupt())
  53. return 0;
  54. preempt_disable();
  55. enable_kernel_altivec();
  56. return 1;
  57. }
  58. /*
  59. * All calls to this function will be optimised into tail calls. We are
  60. * passed a pointer to the destination which we return as required by a
  61. * memcpy implementation.
  62. */
  63. void *exit_vmx_copy(void *dest)
  64. {
  65. disable_kernel_altivec();
  66. preempt_enable();
  67. return dest;
  68. }