cpcmd.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999, 2007
  4. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  5. * Christian Borntraeger (cborntra@de.ibm.com),
  6. */
  7. #define KMSG_COMPONENT "cpcmd"
  8. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/slab.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/stddef.h>
  14. #include <linux/string.h>
  15. #include <asm/ebcdic.h>
  16. #include <asm/cpcmd.h>
  17. #include <asm/io.h>
  18. static DEFINE_SPINLOCK(cpcmd_lock);
  19. static char cpcmd_buf[241];
  20. static int diag8_noresponse(int cmdlen)
  21. {
  22. register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
  23. register unsigned long reg3 asm ("3") = cmdlen;
  24. asm volatile(
  25. " sam31\n"
  26. " diag %1,%0,0x8\n"
  27. " sam64\n"
  28. : "+d" (reg3) : "d" (reg2) : "cc");
  29. return reg3;
  30. }
  31. static int diag8_response(int cmdlen, char *response, int *rlen)
  32. {
  33. register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
  34. register unsigned long reg3 asm ("3") = (addr_t) response;
  35. register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
  36. register unsigned long reg5 asm ("5") = *rlen;
  37. asm volatile(
  38. " sam31\n"
  39. " diag %2,%0,0x8\n"
  40. " sam64\n"
  41. " brc 8,1f\n"
  42. " agr %1,%4\n"
  43. "1:\n"
  44. : "+d" (reg4), "+d" (reg5)
  45. : "d" (reg2), "d" (reg3), "d" (*rlen) : "cc");
  46. *rlen = reg5;
  47. return reg4;
  48. }
  49. /*
  50. * __cpcmd has some restrictions over cpcmd
  51. * - the response buffer must reside below 2GB (if any)
  52. * - __cpcmd is unlocked and therefore not SMP-safe
  53. */
  54. int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
  55. {
  56. int cmdlen;
  57. int rc;
  58. int response_len;
  59. cmdlen = strlen(cmd);
  60. BUG_ON(cmdlen > 240);
  61. memcpy(cpcmd_buf, cmd, cmdlen);
  62. ASCEBC(cpcmd_buf, cmdlen);
  63. if (response) {
  64. memset(response, 0, rlen);
  65. response_len = rlen;
  66. rc = diag8_response(cmdlen, response, &rlen);
  67. EBCASC(response, response_len);
  68. } else {
  69. rc = diag8_noresponse(cmdlen);
  70. }
  71. if (response_code)
  72. *response_code = rc;
  73. return rlen;
  74. }
  75. EXPORT_SYMBOL(__cpcmd);
  76. int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
  77. {
  78. char *lowbuf;
  79. int len;
  80. unsigned long flags;
  81. if ((virt_to_phys(response) != (unsigned long) response) ||
  82. (((unsigned long)response + rlen) >> 31)) {
  83. lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
  84. if (!lowbuf) {
  85. pr_warning("The cpcmd kernel function failed to "
  86. "allocate a response buffer\n");
  87. return -ENOMEM;
  88. }
  89. spin_lock_irqsave(&cpcmd_lock, flags);
  90. len = __cpcmd(cmd, lowbuf, rlen, response_code);
  91. spin_unlock_irqrestore(&cpcmd_lock, flags);
  92. memcpy(response, lowbuf, rlen);
  93. kfree(lowbuf);
  94. } else {
  95. spin_lock_irqsave(&cpcmd_lock, flags);
  96. len = __cpcmd(cmd, response, rlen, response_code);
  97. spin_unlock_irqrestore(&cpcmd_lock, flags);
  98. }
  99. return len;
  100. }
  101. EXPORT_SYMBOL(cpcmd);