shim.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. /* Raw shim communication appears here.
  2. * If/when the shim is replaced by something less hacky,
  3. * only this file will change.
  4. */
  5. #include <stdio.h>
  6. #include <stdlib.h>
  7. #include <sys/mman.h>
  8. #include <fcntl.h>
  9. #include <string.h>
  10. #include <unistd.h>
  11. #include <errno.h>
  12. #include <pandriver.h>
  13. #ifdef __PANWRAP
  14. int ioctl(int fd, int request, ...);
  15. #else
  16. #include <sys/ioctl.h>
  17. #endif
  18. #define __m_ioctl(fd, data, ioc) \
  19. data.header.id = ((_IOC_TYPE(ioc) & 0xF) << 8) | _IOC_NR(ioc); \
  20. if (ioctl(fd, ioc, &data)) { \
  21. printf("Bad ioctl %d (%s)\n", ioc, strerror(errno)); \
  22. exit(1); \
  23. }
  24. /* Typecast m_ioctl args properly if we're dealing with crappy bionic */
  25. #ifdef __BIONIC__
  26. #define m_ioctl(fd, data, ioc) __m_ioctl(fd, data, ((int)ioc))
  27. #else
  28. #define m_ioctl(fd, data, ioc) __m_ioctl(fd, data, ioc)
  29. #endif
  30. int open_kernel_module()
  31. {
  32. int fd = open("/dev/mali0", O_RDWR | O_CLOEXEC);
  33. uint8_t *mtp;
  34. struct mali_ioctl_get_version check = {
  35. .major = /* MALI_UK_VERSION_MAJOR */ 0x8,
  36. .minor = /* MALI_UK_VERSION_MINOR */ 0x4,
  37. };
  38. struct mali_ioctl_set_flags flags = {
  39. .create_flags = MALI_CONTEXT_CREATE_FLAG_NONE
  40. };
  41. if (fd == -1) {
  42. printf("Failed to open /dev/mali0\n");
  43. return 1;
  44. }
  45. /* Declare the ABI version (handshake 1/3) */
  46. m_ioctl(fd, check, MALI_IOCTL_GET_VERSION);
  47. /* Map the Memmap Tracking Handle (handshake 2/3) */
  48. mtp = mmap(NULL, PAGE_SIZE, PROT_NONE, MAP_SHARED, fd,
  49. MALI_MEM_MAP_TRACKING_HANDLE);
  50. if (mtp == MAP_FAILED) {
  51. printf("MP map failed (%s)\n", strerror(errno));
  52. return -1;
  53. }
  54. /* Declare special flags (handshake 3/3) */
  55. m_ioctl(fd, flags, MALI_IOCTL_SET_FLAGS);
  56. return fd;
  57. }
  58. uint64_t alloc_gpu_pages(int fd, int pages, int e_flags)
  59. {
  60. struct mali_ioctl_mem_alloc alloc = {
  61. .va_pages = pages,
  62. .commit_pages = pages,
  63. .extent = 0,
  64. .flags = e_flags
  65. };
  66. printf("Allocing %d pages flag %X to %d\n", pages, e_flags, fd);
  67. m_ioctl(fd, alloc, MALI_IOCTL_MEM_ALLOC);
  68. // return alloc.gpu_va;
  69. /* Only necessary when we report old versions */
  70. if (e_flags & MALI_MEM_SAME_VA) {
  71. return (uint32_t) mmap64(
  72. NULL, pages << PAGE_SHIFT, PROT_READ | PROT_WRITE,
  73. MAP_SHARED, fd, alloc.gpu_va);
  74. } else {
  75. return alloc.gpu_va;
  76. }
  77. }
  78. uint64_t alloc_gpu_heap(int fd, int pages)
  79. {
  80. struct mali_ioctl_mem_alloc alloc = {
  81. .va_pages = pages,
  82. .commit_pages = 1,
  83. .extent = 0x80,
  84. .flags = 0x26F
  85. };
  86. m_ioctl(fd, alloc, MALI_IOCTL_MEM_ALLOC);
  87. return alloc.gpu_va;
  88. }
  89. void free_gpu(int fd, uint64_t addr)
  90. {
  91. struct mali_ioctl_mem_free gfree = { .gpu_addr = addr };
  92. m_ioctl(fd, gfree, MALI_IOCTL_MEM_FREE);
  93. }
  94. void sync_gpu(int fd, uint8_t* cpu, uint64_t gpu, size_t bytes)
  95. {
  96. struct mali_ioctl_sync sync = {
  97. .handle = gpu & PAGE_MASK,
  98. .user_addr = cpu - (gpu & ~PAGE_MASK),
  99. .size = (gpu & ~PAGE_MASK) + bytes,
  100. .type = MALI_SYNC_TO_DEVICE
  101. };
  102. m_ioctl(fd, sync, MALI_IOCTL_SYNC);
  103. }
  104. static void submit_job_internal(int fd, struct mali_jd_atom_v2 *atoms, size_t count)
  105. {
  106. struct mali_ioctl_job_submit submit = {
  107. .addr = atoms,
  108. .nr_atoms = count,
  109. .stride = sizeof(struct mali_jd_atom_v2)
  110. };
  111. m_ioctl(fd, submit, MALI_IOCTL_JOB_SUBMIT);
  112. }
  113. #define ATOM_QUEUE_MAX 16
  114. struct mali_jd_atom_v2 atom_queue[ATOM_QUEUE_MAX];
  115. int atom_queue_size = 0;
  116. void flush_job_queue(int fd)
  117. {
  118. if (atom_queue_size) {
  119. submit_job_internal(fd, atom_queue, atom_queue_size);
  120. atom_queue_size = 0;
  121. } else {
  122. printf("Warning... flushing job queue with no atoms\n");
  123. }
  124. }
  125. void submit_job(int fd, struct mali_jd_atom_v2 atom)
  126. {
  127. memcpy(&atom_queue[atom_queue_size++], &atom, sizeof(atom));
  128. if (atom_queue_size == ATOM_QUEUE_MAX)
  129. flush_job_queue(fd);
  130. }
  131. /* Not strictly an ioctl but still shim related */
  132. uint8_t* mmap_gpu(int fd, uint64_t addr, int page_count)
  133. {
  134. uint8_t* buffer = mmap64(NULL, page_count << PAGE_SHIFT,
  135. PROT_READ | PROT_WRITE, MAP_SHARED,
  136. fd, addr);
  137. if (buffer == MAP_FAILED) {
  138. printf("Buffer map failed (%s)\n", strerror(errno));
  139. exit(1);
  140. }
  141. return buffer;
  142. }
  143. /* Seems to fail but called anyway by the blob */
  144. void stream_create(int fd, char *stream)
  145. {
  146. struct mali_ioctl_stream_create s;
  147. strcpy(s.name, stream);
  148. m_ioctl(fd, s, MALI_IOCTL_STREAM_CREATE);
  149. }
  150. void query_gpu_props(int fd)
  151. {
  152. struct mali_ioctl_gpu_props_reg_dump v;
  153. m_ioctl(fd, v, MALI_IOCTL_GPU_PROPS_REG_DUMP);
  154. }