plugin-nvptx.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897
  1. /* Plugin for NVPTX execution.
  2. Copyright (C) 2013-2015 Free Software Foundation, Inc.
  3. Contributed by Mentor Embedded.
  4. This file is part of the GNU Offloading and Multi Processing Library
  5. (libgomp).
  6. Libgomp is free software; you can redistribute it and/or modify it
  7. under the terms of the GNU General Public License as published by
  8. the Free Software Foundation; either version 3, or (at your option)
  9. any later version.
  10. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
  11. WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  12. FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. more details.
  14. Under Section 7 of GPL version 3, you are granted additional
  15. permissions described in the GCC Runtime Library Exception, version
  16. 3.1, as published by the Free Software Foundation.
  17. You should have received a copy of the GNU General Public License and
  18. a copy of the GCC Runtime Library Exception along with this program;
  19. see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
  20. <http://www.gnu.org/licenses/>. */
  21. /* Nvidia PTX-specific parts of OpenACC support. The cuda driver
  22. library appears to hold some implicit state, but the documentation
  23. is not clear as to what that state might be. Or how one might
  24. propagate it from one thread to another. */
  25. #include "openacc.h"
  26. #include "config.h"
  27. #include "libgomp-plugin.h"
  28. #include "oacc-ptx.h"
  29. #include "oacc-plugin.h"
  30. #include <pthread.h>
  31. #include <cuda.h>
  32. #include <stdbool.h>
  33. #include <stdint.h>
  34. #include <string.h>
  35. #include <stdio.h>
  36. #include <dlfcn.h>
  37. #include <unistd.h>
  38. #include <assert.h>
  39. #define ARRAYSIZE(X) (sizeof (X) / sizeof ((X)[0]))
  40. static struct
  41. {
  42. CUresult r;
  43. char *m;
  44. } cuda_errlist[]=
  45. {
  46. { CUDA_ERROR_INVALID_VALUE, "invalid value" },
  47. { CUDA_ERROR_OUT_OF_MEMORY, "out of memory" },
  48. { CUDA_ERROR_NOT_INITIALIZED, "not initialized" },
  49. { CUDA_ERROR_DEINITIALIZED, "deinitialized" },
  50. { CUDA_ERROR_PROFILER_DISABLED, "profiler disabled" },
  51. { CUDA_ERROR_PROFILER_NOT_INITIALIZED, "profiler not initialized" },
  52. { CUDA_ERROR_PROFILER_ALREADY_STARTED, "already started" },
  53. { CUDA_ERROR_PROFILER_ALREADY_STOPPED, "already stopped" },
  54. { CUDA_ERROR_NO_DEVICE, "no device" },
  55. { CUDA_ERROR_INVALID_DEVICE, "invalid device" },
  56. { CUDA_ERROR_INVALID_IMAGE, "invalid image" },
  57. { CUDA_ERROR_INVALID_CONTEXT, "invalid context" },
  58. { CUDA_ERROR_CONTEXT_ALREADY_CURRENT, "context already current" },
  59. { CUDA_ERROR_MAP_FAILED, "map error" },
  60. { CUDA_ERROR_UNMAP_FAILED, "unmap error" },
  61. { CUDA_ERROR_ARRAY_IS_MAPPED, "array is mapped" },
  62. { CUDA_ERROR_ALREADY_MAPPED, "already mapped" },
  63. { CUDA_ERROR_NO_BINARY_FOR_GPU, "no binary for gpu" },
  64. { CUDA_ERROR_ALREADY_ACQUIRED, "already acquired" },
  65. { CUDA_ERROR_NOT_MAPPED, "not mapped" },
  66. { CUDA_ERROR_NOT_MAPPED_AS_ARRAY, "not mapped as array" },
  67. { CUDA_ERROR_NOT_MAPPED_AS_POINTER, "not mapped as pointer" },
  68. { CUDA_ERROR_ECC_UNCORRECTABLE, "ecc uncorrectable" },
  69. { CUDA_ERROR_UNSUPPORTED_LIMIT, "unsupported limit" },
  70. { CUDA_ERROR_CONTEXT_ALREADY_IN_USE, "context already in use" },
  71. { CUDA_ERROR_PEER_ACCESS_UNSUPPORTED, "peer access unsupported" },
  72. { CUDA_ERROR_INVALID_SOURCE, "invalid source" },
  73. { CUDA_ERROR_FILE_NOT_FOUND, "file not found" },
  74. { CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND,
  75. "shared object symbol not found" },
  76. { CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, "shared object init error" },
  77. { CUDA_ERROR_OPERATING_SYSTEM, "operating system" },
  78. { CUDA_ERROR_INVALID_HANDLE, "invalid handle" },
  79. { CUDA_ERROR_NOT_FOUND, "not found" },
  80. { CUDA_ERROR_NOT_READY, "not ready" },
  81. { CUDA_ERROR_LAUNCH_FAILED, "launch error" },
  82. { CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, "launch out of resources" },
  83. { CUDA_ERROR_LAUNCH_TIMEOUT, "launch timeout" },
  84. { CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING,
  85. "launch incompatibe texturing" },
  86. { CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED, "peer access already enabled" },
  87. { CUDA_ERROR_PEER_ACCESS_NOT_ENABLED, "peer access not enabled " },
  88. { CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE, "primary cotext active" },
  89. { CUDA_ERROR_CONTEXT_IS_DESTROYED, "context is destroyed" },
  90. { CUDA_ERROR_ASSERT, "assert" },
  91. { CUDA_ERROR_TOO_MANY_PEERS, "too many peers" },
  92. { CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED,
  93. "host memory already registered" },
  94. { CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED, "host memory not registered" },
  95. { CUDA_ERROR_NOT_PERMITTED, "not permitted" },
  96. { CUDA_ERROR_NOT_SUPPORTED, "not supported" },
  97. { CUDA_ERROR_UNKNOWN, "unknown" }
  98. };
  99. static char errmsg[128];
  100. static char *
  101. cuda_error (CUresult r)
  102. {
  103. int i;
  104. for (i = 0; i < ARRAYSIZE (cuda_errlist); i++)
  105. {
  106. if (cuda_errlist[i].r == r)
  107. return &cuda_errlist[i].m[0];
  108. }
  109. sprintf (&errmsg[0], "unknown result code: %5d", r);
  110. return &errmsg[0];
  111. }
  112. struct targ_fn_descriptor
  113. {
  114. CUfunction fn;
  115. const char *name;
  116. };
  117. static unsigned int instantiated_devices = 0;
  118. static pthread_mutex_t ptx_dev_lock = PTHREAD_MUTEX_INITIALIZER;
  119. struct ptx_stream
  120. {
  121. CUstream stream;
  122. pthread_t host_thread;
  123. bool multithreaded;
  124. CUdeviceptr d;
  125. void *h;
  126. void *h_begin;
  127. void *h_end;
  128. void *h_next;
  129. void *h_prev;
  130. void *h_tail;
  131. struct ptx_stream *next;
  132. };
  133. /* Thread-specific data for PTX. */
  134. struct nvptx_thread
  135. {
  136. struct ptx_stream *current_stream;
  137. struct ptx_device *ptx_dev;
  138. };
  139. struct map
  140. {
  141. int async;
  142. size_t size;
  143. char mappings[0];
  144. };
  145. static void
  146. map_init (struct ptx_stream *s)
  147. {
  148. CUresult r;
  149. int size = getpagesize ();
  150. assert (s);
  151. assert (!s->d);
  152. assert (!s->h);
  153. r = cuMemAllocHost (&s->h, size);
  154. if (r != CUDA_SUCCESS)
  155. GOMP_PLUGIN_fatal ("cuMemAllocHost error: %s", cuda_error (r));
  156. r = cuMemHostGetDevicePointer (&s->d, s->h, 0);
  157. if (r != CUDA_SUCCESS)
  158. GOMP_PLUGIN_fatal ("cuMemHostGetDevicePointer error: %s", cuda_error (r));
  159. assert (s->h);
  160. s->h_begin = s->h;
  161. s->h_end = s->h_begin + size;
  162. s->h_next = s->h_prev = s->h_tail = s->h_begin;
  163. assert (s->h_next);
  164. assert (s->h_end);
  165. }
  166. static void
  167. map_fini (struct ptx_stream *s)
  168. {
  169. CUresult r;
  170. r = cuMemFreeHost (s->h);
  171. if (r != CUDA_SUCCESS)
  172. GOMP_PLUGIN_fatal ("cuMemFreeHost error: %s", cuda_error (r));
  173. }
  174. static void
  175. map_pop (struct ptx_stream *s)
  176. {
  177. struct map *m;
  178. assert (s != NULL);
  179. assert (s->h_next);
  180. assert (s->h_prev);
  181. assert (s->h_tail);
  182. m = s->h_tail;
  183. s->h_tail += m->size;
  184. if (s->h_tail >= s->h_end)
  185. s->h_tail = s->h_begin + (int) (s->h_tail - s->h_end);
  186. if (s->h_next == s->h_tail)
  187. s->h_prev = s->h_next;
  188. assert (s->h_next >= s->h_begin);
  189. assert (s->h_tail >= s->h_begin);
  190. assert (s->h_prev >= s->h_begin);
  191. assert (s->h_next <= s->h_end);
  192. assert (s->h_tail <= s->h_end);
  193. assert (s->h_prev <= s->h_end);
  194. }
  195. static void
  196. map_push (struct ptx_stream *s, int async, size_t size, void **h, void **d)
  197. {
  198. int left;
  199. int offset;
  200. struct map *m;
  201. assert (s != NULL);
  202. left = s->h_end - s->h_next;
  203. size += sizeof (struct map);
  204. assert (s->h_prev);
  205. assert (s->h_next);
  206. if (size >= left)
  207. {
  208. m = s->h_prev;
  209. m->size += left;
  210. s->h_next = s->h_begin;
  211. if (s->h_next + size > s->h_end)
  212. GOMP_PLUGIN_fatal ("unable to push map");
  213. }
  214. assert (s->h_next);
  215. m = s->h_next;
  216. m->async = async;
  217. m->size = size;
  218. offset = (void *)&m->mappings[0] - s->h;
  219. *d = (void *)(s->d + offset);
  220. *h = (void *)(s->h + offset);
  221. s->h_prev = s->h_next;
  222. s->h_next += size;
  223. assert (s->h_prev);
  224. assert (s->h_next);
  225. assert (s->h_next >= s->h_begin);
  226. assert (s->h_tail >= s->h_begin);
  227. assert (s->h_prev >= s->h_begin);
  228. assert (s->h_next <= s->h_end);
  229. assert (s->h_tail <= s->h_end);
  230. assert (s->h_prev <= s->h_end);
  231. return;
  232. }
  233. struct ptx_device
  234. {
  235. CUcontext ctx;
  236. bool ctx_shared;
  237. CUdevice dev;
  238. struct ptx_stream *null_stream;
  239. /* All non-null streams associated with this device (actually context),
  240. either created implicitly or passed in from the user (via
  241. acc_set_cuda_stream). */
  242. struct ptx_stream *active_streams;
  243. struct {
  244. struct ptx_stream **arr;
  245. int size;
  246. } async_streams;
  247. /* A lock for use when manipulating the above stream list and array. */
  248. pthread_mutex_t stream_lock;
  249. int ord;
  250. bool overlap;
  251. bool map;
  252. bool concur;
  253. int mode;
  254. bool mkern;
  255. struct ptx_device *next;
  256. };
  257. enum ptx_event_type
  258. {
  259. PTX_EVT_MEM,
  260. PTX_EVT_KNL,
  261. PTX_EVT_SYNC,
  262. PTX_EVT_ASYNC_CLEANUP
  263. };
  264. struct ptx_event
  265. {
  266. CUevent *evt;
  267. int type;
  268. void *addr;
  269. int ord;
  270. struct ptx_event *next;
  271. };
  272. struct ptx_image_data
  273. {
  274. void *target_data;
  275. CUmodule module;
  276. struct ptx_image_data *next;
  277. };
  278. static pthread_mutex_t ptx_event_lock;
  279. static struct ptx_event *ptx_events;
  280. static struct ptx_device **ptx_devices;
  281. static struct ptx_image_data *ptx_images = NULL;
  282. static pthread_mutex_t ptx_image_lock = PTHREAD_MUTEX_INITIALIZER;
  283. #define _XSTR(s) _STR(s)
  284. #define _STR(s) #s
  285. static struct _synames
  286. {
  287. char *n;
  288. } cuda_symnames[] =
  289. {
  290. { _XSTR (cuCtxCreate) },
  291. { _XSTR (cuCtxDestroy) },
  292. { _XSTR (cuCtxGetCurrent) },
  293. { _XSTR (cuCtxPushCurrent) },
  294. { _XSTR (cuCtxSynchronize) },
  295. { _XSTR (cuDeviceGet) },
  296. { _XSTR (cuDeviceGetAttribute) },
  297. { _XSTR (cuDeviceGetCount) },
  298. { _XSTR (cuEventCreate) },
  299. { _XSTR (cuEventDestroy) },
  300. { _XSTR (cuEventQuery) },
  301. { _XSTR (cuEventRecord) },
  302. { _XSTR (cuInit) },
  303. { _XSTR (cuLaunchKernel) },
  304. { _XSTR (cuLinkAddData) },
  305. { _XSTR (cuLinkComplete) },
  306. { _XSTR (cuLinkCreate) },
  307. { _XSTR (cuMemAlloc) },
  308. { _XSTR (cuMemAllocHost) },
  309. { _XSTR (cuMemcpy) },
  310. { _XSTR (cuMemcpyDtoH) },
  311. { _XSTR (cuMemcpyDtoHAsync) },
  312. { _XSTR (cuMemcpyHtoD) },
  313. { _XSTR (cuMemcpyHtoDAsync) },
  314. { _XSTR (cuMemFree) },
  315. { _XSTR (cuMemFreeHost) },
  316. { _XSTR (cuMemGetAddressRange) },
  317. { _XSTR (cuMemHostGetDevicePointer) },
  318. { _XSTR (cuMemHostRegister) },
  319. { _XSTR (cuMemHostUnregister) },
  320. { _XSTR (cuModuleGetFunction) },
  321. { _XSTR (cuModuleLoadData) },
  322. { _XSTR (cuStreamDestroy) },
  323. { _XSTR (cuStreamQuery) },
  324. { _XSTR (cuStreamSynchronize) },
  325. { _XSTR (cuStreamWaitEvent) }
  326. };
  327. static int
  328. verify_device_library (void)
  329. {
  330. int i;
  331. void *dh, *ds;
  332. dh = dlopen ("libcuda.so", RTLD_LAZY);
  333. if (!dh)
  334. return -1;
  335. for (i = 0; i < ARRAYSIZE (cuda_symnames); i++)
  336. {
  337. ds = dlsym (dh, cuda_symnames[i].n);
  338. if (!ds)
  339. return -1;
  340. }
  341. dlclose (dh);
  342. return 0;
  343. }
  344. static inline struct nvptx_thread *
  345. nvptx_thread (void)
  346. {
  347. return (struct nvptx_thread *) GOMP_PLUGIN_acc_thread ();
  348. }
  349. static void
  350. init_streams_for_device (struct ptx_device *ptx_dev, int concurrency)
  351. {
  352. int i;
  353. struct ptx_stream *null_stream
  354. = GOMP_PLUGIN_malloc (sizeof (struct ptx_stream));
  355. null_stream->stream = NULL;
  356. null_stream->host_thread = pthread_self ();
  357. null_stream->multithreaded = true;
  358. null_stream->d = (CUdeviceptr) NULL;
  359. null_stream->h = NULL;
  360. map_init (null_stream);
  361. ptx_dev->null_stream = null_stream;
  362. ptx_dev->active_streams = NULL;
  363. pthread_mutex_init (&ptx_dev->stream_lock, NULL);
  364. if (concurrency < 1)
  365. concurrency = 1;
  366. /* This is just a guess -- make space for as many async streams as the
  367. current device is capable of concurrently executing. This can grow
  368. later as necessary. No streams are created yet. */
  369. ptx_dev->async_streams.arr
  370. = GOMP_PLUGIN_malloc (concurrency * sizeof (struct ptx_stream *));
  371. ptx_dev->async_streams.size = concurrency;
  372. for (i = 0; i < concurrency; i++)
  373. ptx_dev->async_streams.arr[i] = NULL;
  374. }
  375. static void
  376. fini_streams_for_device (struct ptx_device *ptx_dev)
  377. {
  378. free (ptx_dev->async_streams.arr);
  379. while (ptx_dev->active_streams != NULL)
  380. {
  381. struct ptx_stream *s = ptx_dev->active_streams;
  382. ptx_dev->active_streams = ptx_dev->active_streams->next;
  383. map_fini (s);
  384. cuStreamDestroy (s->stream);
  385. free (s);
  386. }
  387. map_fini (ptx_dev->null_stream);
  388. free (ptx_dev->null_stream);
  389. }
  390. /* Select a stream for (OpenACC-semantics) ASYNC argument for the current
  391. thread THREAD (and also current device/context). If CREATE is true, create
  392. the stream if it does not exist (or use EXISTING if it is non-NULL), and
  393. associate the stream with the same thread argument. Returns stream to use
  394. as result. */
  395. static struct ptx_stream *
  396. select_stream_for_async (int async, pthread_t thread, bool create,
  397. CUstream existing)
  398. {
  399. struct nvptx_thread *nvthd = nvptx_thread ();
  400. /* Local copy of TLS variable. */
  401. struct ptx_device *ptx_dev = nvthd->ptx_dev;
  402. struct ptx_stream *stream = NULL;
  403. int orig_async = async;
  404. /* The special value acc_async_noval (-1) maps (for now) to an
  405. implicitly-created stream, which is then handled the same as any other
  406. numbered async stream. Other options are available, e.g. using the null
  407. stream for anonymous async operations, or choosing an idle stream from an
  408. active set. But, stick with this for now. */
  409. if (async > acc_async_sync)
  410. async++;
  411. if (create)
  412. pthread_mutex_lock (&ptx_dev->stream_lock);
  413. /* NOTE: AFAICT there's no particular need for acc_async_sync to map to the
  414. null stream, and in fact better performance may be obtainable if it doesn't
  415. (because the null stream enforces overly-strict synchronisation with
  416. respect to other streams for legacy reasons, and that's probably not
  417. needed with OpenACC). Maybe investigate later. */
  418. if (async == acc_async_sync)
  419. stream = ptx_dev->null_stream;
  420. else if (async >= 0 && async < ptx_dev->async_streams.size
  421. && ptx_dev->async_streams.arr[async] && !(create && existing))
  422. stream = ptx_dev->async_streams.arr[async];
  423. else if (async >= 0 && create)
  424. {
  425. if (async >= ptx_dev->async_streams.size)
  426. {
  427. int i, newsize = ptx_dev->async_streams.size * 2;
  428. if (async >= newsize)
  429. newsize = async + 1;
  430. ptx_dev->async_streams.arr
  431. = GOMP_PLUGIN_realloc (ptx_dev->async_streams.arr,
  432. newsize * sizeof (struct ptx_stream *));
  433. for (i = ptx_dev->async_streams.size; i < newsize; i++)
  434. ptx_dev->async_streams.arr[i] = NULL;
  435. ptx_dev->async_streams.size = newsize;
  436. }
  437. /* Create a new stream on-demand if there isn't one already, or if we're
  438. setting a particular async value to an existing (externally-provided)
  439. stream. */
  440. if (!ptx_dev->async_streams.arr[async] || existing)
  441. {
  442. CUresult r;
  443. struct ptx_stream *s
  444. = GOMP_PLUGIN_malloc (sizeof (struct ptx_stream));
  445. if (existing)
  446. s->stream = existing;
  447. else
  448. {
  449. r = cuStreamCreate (&s->stream, CU_STREAM_DEFAULT);
  450. if (r != CUDA_SUCCESS)
  451. GOMP_PLUGIN_fatal ("cuStreamCreate error: %s", cuda_error (r));
  452. }
  453. /* If CREATE is true, we're going to be queueing some work on this
  454. stream. Associate it with the current host thread. */
  455. s->host_thread = thread;
  456. s->multithreaded = false;
  457. s->d = (CUdeviceptr) NULL;
  458. s->h = NULL;
  459. map_init (s);
  460. s->next = ptx_dev->active_streams;
  461. ptx_dev->active_streams = s;
  462. ptx_dev->async_streams.arr[async] = s;
  463. }
  464. stream = ptx_dev->async_streams.arr[async];
  465. }
  466. else if (async < 0)
  467. GOMP_PLUGIN_fatal ("bad async %d", async);
  468. if (create)
  469. {
  470. assert (stream != NULL);
  471. /* If we're trying to use the same stream from different threads
  472. simultaneously, set stream->multithreaded to true. This affects the
  473. behaviour of acc_async_test_all and acc_wait_all, which are supposed to
  474. only wait for asynchronous launches from the same host thread they are
  475. invoked on. If multiple threads use the same async value, we make note
  476. of that here and fall back to testing/waiting for all threads in those
  477. functions. */
  478. if (thread != stream->host_thread)
  479. stream->multithreaded = true;
  480. pthread_mutex_unlock (&ptx_dev->stream_lock);
  481. }
  482. else if (stream && !stream->multithreaded
  483. && !pthread_equal (stream->host_thread, thread))
  484. GOMP_PLUGIN_fatal ("async %d used on wrong thread", orig_async);
  485. return stream;
  486. }
  487. /* Initialize the device. Return TRUE on success, else FALSE. PTX_DEV_LOCK
  488. should be locked on entry and remains locked on exit. */
  489. static bool
  490. nvptx_init (void)
  491. {
  492. CUresult r;
  493. int rc;
  494. int ndevs;
  495. if (instantiated_devices != 0)
  496. return true;
  497. rc = verify_device_library ();
  498. if (rc < 0)
  499. return false;
  500. r = cuInit (0);
  501. if (r != CUDA_SUCCESS)
  502. GOMP_PLUGIN_fatal ("cuInit error: %s", cuda_error (r));
  503. ptx_events = NULL;
  504. pthread_mutex_init (&ptx_event_lock, NULL);
  505. r = cuDeviceGetCount (&ndevs);
  506. if (r != CUDA_SUCCESS)
  507. GOMP_PLUGIN_fatal ("cuDeviceGetCount error: %s", cuda_error (r));
  508. ptx_devices = GOMP_PLUGIN_malloc_cleared (sizeof (struct ptx_device *)
  509. * ndevs);
  510. return true;
  511. }
  512. /* Select the N'th PTX device for the current host thread. The device must
  513. have been previously opened before calling this function. */
  514. static void
  515. nvptx_attach_host_thread_to_device (int n)
  516. {
  517. CUdevice dev;
  518. CUresult r;
  519. struct ptx_device *ptx_dev;
  520. CUcontext thd_ctx;
  521. r = cuCtxGetDevice (&dev);
  522. if (r != CUDA_SUCCESS && r != CUDA_ERROR_INVALID_CONTEXT)
  523. GOMP_PLUGIN_fatal ("cuCtxGetDevice error: %s", cuda_error (r));
  524. if (r != CUDA_ERROR_INVALID_CONTEXT && dev == n)
  525. return;
  526. else
  527. {
  528. CUcontext old_ctx;
  529. ptx_dev = ptx_devices[n];
  530. assert (ptx_dev);
  531. r = cuCtxGetCurrent (&thd_ctx);
  532. if (r != CUDA_SUCCESS)
  533. GOMP_PLUGIN_fatal ("cuCtxGetCurrent error: %s", cuda_error (r));
  534. /* We don't necessarily have a current context (e.g. if it has been
  535. destroyed. Pop it if we do though. */
  536. if (thd_ctx != NULL)
  537. {
  538. r = cuCtxPopCurrent (&old_ctx);
  539. if (r != CUDA_SUCCESS)
  540. GOMP_PLUGIN_fatal ("cuCtxPopCurrent error: %s", cuda_error (r));
  541. }
  542. r = cuCtxPushCurrent (ptx_dev->ctx);
  543. if (r != CUDA_SUCCESS)
  544. GOMP_PLUGIN_fatal ("cuCtxPushCurrent error: %s", cuda_error (r));
  545. }
  546. }
  547. static struct ptx_device *
  548. nvptx_open_device (int n)
  549. {
  550. struct ptx_device *ptx_dev;
  551. CUdevice dev, ctx_dev;
  552. CUresult r;
  553. int async_engines, pi;
  554. r = cuDeviceGet (&dev, n);
  555. if (r != CUDA_SUCCESS)
  556. GOMP_PLUGIN_fatal ("cuDeviceGet error: %s", cuda_error (r));
  557. ptx_dev = GOMP_PLUGIN_malloc (sizeof (struct ptx_device));
  558. ptx_dev->ord = n;
  559. ptx_dev->dev = dev;
  560. ptx_dev->ctx_shared = false;
  561. r = cuCtxGetDevice (&ctx_dev);
  562. if (r != CUDA_SUCCESS && r != CUDA_ERROR_INVALID_CONTEXT)
  563. GOMP_PLUGIN_fatal ("cuCtxGetDevice error: %s", cuda_error (r));
  564. if (r != CUDA_ERROR_INVALID_CONTEXT && ctx_dev != dev)
  565. {
  566. /* The current host thread has an active context for a different device.
  567. Detach it. */
  568. CUcontext old_ctx;
  569. r = cuCtxPopCurrent (&old_ctx);
  570. if (r != CUDA_SUCCESS)
  571. GOMP_PLUGIN_fatal ("cuCtxPopCurrent error: %s", cuda_error (r));
  572. }
  573. r = cuCtxGetCurrent (&ptx_dev->ctx);
  574. if (r != CUDA_SUCCESS)
  575. GOMP_PLUGIN_fatal ("cuCtxGetCurrent error: %s", cuda_error (r));
  576. if (!ptx_dev->ctx)
  577. {
  578. r = cuCtxCreate (&ptx_dev->ctx, CU_CTX_SCHED_AUTO, dev);
  579. if (r != CUDA_SUCCESS)
  580. GOMP_PLUGIN_fatal ("cuCtxCreate error: %s", cuda_error (r));
  581. }
  582. else
  583. ptx_dev->ctx_shared = true;
  584. r = cuDeviceGetAttribute (&pi, CU_DEVICE_ATTRIBUTE_GPU_OVERLAP, dev);
  585. if (r != CUDA_SUCCESS)
  586. GOMP_PLUGIN_fatal ("cuDeviceGetAttribute error: %s", cuda_error (r));
  587. ptx_dev->overlap = pi;
  588. r = cuDeviceGetAttribute (&pi, CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY, dev);
  589. if (r != CUDA_SUCCESS)
  590. GOMP_PLUGIN_fatal ("cuDeviceGetAttribute error: %s", cuda_error (r));
  591. ptx_dev->map = pi;
  592. r = cuDeviceGetAttribute (&pi, CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS, dev);
  593. if (r != CUDA_SUCCESS)
  594. GOMP_PLUGIN_fatal ("cuDeviceGetAttribute error: %s", cuda_error (r));
  595. ptx_dev->concur = pi;
  596. r = cuDeviceGetAttribute (&pi, CU_DEVICE_ATTRIBUTE_COMPUTE_MODE, dev);
  597. if (r != CUDA_SUCCESS)
  598. GOMP_PLUGIN_fatal ("cuDeviceGetAttribute error: %s", cuda_error (r));
  599. ptx_dev->mode = pi;
  600. r = cuDeviceGetAttribute (&pi, CU_DEVICE_ATTRIBUTE_INTEGRATED, dev);
  601. if (r != CUDA_SUCCESS)
  602. GOMP_PLUGIN_fatal ("cuDeviceGetAttribute error: %s", cuda_error (r));
  603. ptx_dev->mkern = pi;
  604. r = cuDeviceGetAttribute (&async_engines,
  605. CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT, dev);
  606. if (r != CUDA_SUCCESS)
  607. async_engines = 1;
  608. init_streams_for_device (ptx_dev, async_engines);
  609. return ptx_dev;
  610. }
  611. static void
  612. nvptx_close_device (struct ptx_device *ptx_dev)
  613. {
  614. CUresult r;
  615. if (!ptx_dev)
  616. return;
  617. fini_streams_for_device (ptx_dev);
  618. if (!ptx_dev->ctx_shared)
  619. {
  620. r = cuCtxDestroy (ptx_dev->ctx);
  621. if (r != CUDA_SUCCESS)
  622. GOMP_PLUGIN_fatal ("cuCtxDestroy error: %s", cuda_error (r));
  623. }
  624. free (ptx_dev);
  625. }
  626. static int
  627. nvptx_get_num_devices (void)
  628. {
  629. int n;
  630. CUresult r;
  631. /* PR libgomp/65099: Currently, we only support offloading in 64-bit
  632. configurations. */
  633. if (sizeof (void *) != 8)
  634. return 0;
  635. /* This function will be called before the plugin has been initialized in
  636. order to enumerate available devices, but CUDA API routines can't be used
  637. until cuInit has been called. Just call it now (but don't yet do any
  638. further initialization). */
  639. if (instantiated_devices == 0)
  640. cuInit (0);
  641. r = cuDeviceGetCount (&n);
  642. if (r!= CUDA_SUCCESS)
  643. GOMP_PLUGIN_fatal ("cuDeviceGetCount error: %s", cuda_error (r));
  644. return n;
  645. }
  646. static void
  647. link_ptx (CUmodule *module, char *ptx_code)
  648. {
  649. CUjit_option opts[7];
  650. void *optvals[7];
  651. float elapsed = 0.0;
  652. #define LOGSIZE 8192
  653. char elog[LOGSIZE];
  654. char ilog[LOGSIZE];
  655. unsigned long logsize = LOGSIZE;
  656. CUlinkState linkstate;
  657. CUresult r;
  658. void *linkout;
  659. size_t linkoutsize __attribute__ ((unused));
  660. GOMP_PLUGIN_debug (0, "attempting to load:\n---\n%s\n---\n", ptx_code);
  661. opts[0] = CU_JIT_WALL_TIME;
  662. optvals[0] = &elapsed;
  663. opts[1] = CU_JIT_INFO_LOG_BUFFER;
  664. optvals[1] = &ilog[0];
  665. opts[2] = CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES;
  666. optvals[2] = (void *) logsize;
  667. opts[3] = CU_JIT_ERROR_LOG_BUFFER;
  668. optvals[3] = &elog[0];
  669. opts[4] = CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES;
  670. optvals[4] = (void *) logsize;
  671. opts[5] = CU_JIT_LOG_VERBOSE;
  672. optvals[5] = (void *) 1;
  673. opts[6] = CU_JIT_TARGET;
  674. optvals[6] = (void *) CU_TARGET_COMPUTE_30;
  675. r = cuLinkCreate (7, opts, optvals, &linkstate);
  676. if (r != CUDA_SUCCESS)
  677. GOMP_PLUGIN_fatal ("cuLinkCreate error: %s", cuda_error (r));
  678. char *abort_ptx = ABORT_PTX;
  679. r = cuLinkAddData (linkstate, CU_JIT_INPUT_PTX, abort_ptx,
  680. strlen (abort_ptx) + 1, 0, 0, 0, 0);
  681. if (r != CUDA_SUCCESS)
  682. {
  683. GOMP_PLUGIN_error ("Link error log %s\n", &elog[0]);
  684. GOMP_PLUGIN_fatal ("cuLinkAddData (abort) error: %s", cuda_error (r));
  685. }
  686. char *acc_on_device_ptx = ACC_ON_DEVICE_PTX;
  687. r = cuLinkAddData (linkstate, CU_JIT_INPUT_PTX, acc_on_device_ptx,
  688. strlen (acc_on_device_ptx) + 1, 0, 0, 0, 0);
  689. if (r != CUDA_SUCCESS)
  690. {
  691. GOMP_PLUGIN_error ("Link error log %s\n", &elog[0]);
  692. GOMP_PLUGIN_fatal ("cuLinkAddData (acc_on_device) error: %s",
  693. cuda_error (r));
  694. }
  695. char *goacc_internal_ptx = GOACC_INTERNAL_PTX;
  696. r = cuLinkAddData (linkstate, CU_JIT_INPUT_PTX, goacc_internal_ptx,
  697. strlen (goacc_internal_ptx) + 1, 0, 0, 0, 0);
  698. if (r != CUDA_SUCCESS)
  699. {
  700. GOMP_PLUGIN_error ("Link error log %s\n", &elog[0]);
  701. GOMP_PLUGIN_fatal ("cuLinkAddData (goacc_internal_ptx) error: %s",
  702. cuda_error (r));
  703. }
  704. r = cuLinkAddData (linkstate, CU_JIT_INPUT_PTX, ptx_code,
  705. strlen (ptx_code) + 1, 0, 0, 0, 0);
  706. if (r != CUDA_SUCCESS)
  707. {
  708. GOMP_PLUGIN_error ("Link error log %s\n", &elog[0]);
  709. GOMP_PLUGIN_fatal ("cuLinkAddData (ptx_code) error: %s", cuda_error (r));
  710. }
  711. r = cuLinkComplete (linkstate, &linkout, &linkoutsize);
  712. if (r != CUDA_SUCCESS)
  713. GOMP_PLUGIN_fatal ("cuLinkComplete error: %s", cuda_error (r));
  714. GOMP_PLUGIN_debug (0, "Link complete: %fms\n", elapsed);
  715. GOMP_PLUGIN_debug (0, "Link log %s\n", &ilog[0]);
  716. r = cuModuleLoadData (module, linkout);
  717. if (r != CUDA_SUCCESS)
  718. GOMP_PLUGIN_fatal ("cuModuleLoadData error: %s", cuda_error (r));
  719. }
  720. static void
  721. event_gc (bool memmap_lockable)
  722. {
  723. struct ptx_event *ptx_event = ptx_events;
  724. struct nvptx_thread *nvthd = nvptx_thread ();
  725. pthread_mutex_lock (&ptx_event_lock);
  726. while (ptx_event != NULL)
  727. {
  728. CUresult r;
  729. struct ptx_event *e = ptx_event;
  730. ptx_event = ptx_event->next;
  731. if (e->ord != nvthd->ptx_dev->ord)
  732. continue;
  733. r = cuEventQuery (*e->evt);
  734. if (r == CUDA_SUCCESS)
  735. {
  736. CUevent *te;
  737. te = e->evt;
  738. switch (e->type)
  739. {
  740. case PTX_EVT_MEM:
  741. case PTX_EVT_SYNC:
  742. break;
  743. case PTX_EVT_KNL:
  744. map_pop (e->addr);
  745. break;
  746. case PTX_EVT_ASYNC_CLEANUP:
  747. {
  748. /* The function gomp_plugin_async_unmap_vars needs to claim the
  749. memory-map splay tree lock for the current device, so we
  750. can't call it when one of our callers has already claimed
  751. the lock. In that case, just delay the GC for this event
  752. until later. */
  753. if (!memmap_lockable)
  754. continue;
  755. GOMP_PLUGIN_async_unmap_vars (e->addr);
  756. }
  757. break;
  758. }
  759. cuEventDestroy (*te);
  760. free ((void *)te);
  761. if (ptx_events == e)
  762. ptx_events = ptx_events->next;
  763. else
  764. {
  765. struct ptx_event *e_ = ptx_events;
  766. while (e_->next != e)
  767. e_ = e_->next;
  768. e_->next = e_->next->next;
  769. }
  770. free (e);
  771. }
  772. }
  773. pthread_mutex_unlock (&ptx_event_lock);
  774. }
  775. static void
  776. event_add (enum ptx_event_type type, CUevent *e, void *h)
  777. {
  778. struct ptx_event *ptx_event;
  779. struct nvptx_thread *nvthd = nvptx_thread ();
  780. assert (type == PTX_EVT_MEM || type == PTX_EVT_KNL || type == PTX_EVT_SYNC
  781. || type == PTX_EVT_ASYNC_CLEANUP);
  782. ptx_event = GOMP_PLUGIN_malloc (sizeof (struct ptx_event));
  783. ptx_event->type = type;
  784. ptx_event->evt = e;
  785. ptx_event->addr = h;
  786. ptx_event->ord = nvthd->ptx_dev->ord;
  787. pthread_mutex_lock (&ptx_event_lock);
  788. ptx_event->next = ptx_events;
  789. ptx_events = ptx_event;
  790. pthread_mutex_unlock (&ptx_event_lock);
  791. }
  792. void
  793. nvptx_exec (void (*fn), size_t mapnum, void **hostaddrs, void **devaddrs,
  794. size_t *sizes, unsigned short *kinds, int num_gangs, int num_workers,
  795. int vector_length, int async, void *targ_mem_desc)
  796. {
  797. struct targ_fn_descriptor *targ_fn = (struct targ_fn_descriptor *) fn;
  798. CUfunction function;
  799. CUresult r;
  800. int i;
  801. struct ptx_stream *dev_str;
  802. void *kargs[1];
  803. void *hp, *dp;
  804. unsigned int nthreads_in_block;
  805. struct nvptx_thread *nvthd = nvptx_thread ();
  806. const char *maybe_abort_msg = "(perhaps abort was called)";
  807. function = targ_fn->fn;
  808. dev_str = select_stream_for_async (async, pthread_self (), false, NULL);
  809. assert (dev_str == nvthd->current_stream);
  810. /* This reserves a chunk of a pre-allocated page of memory mapped on both
  811. the host and the device. HP is a host pointer to the new chunk, and DP is
  812. the corresponding device pointer. */
  813. map_push (dev_str, async, mapnum * sizeof (void *), &hp, &dp);
  814. GOMP_PLUGIN_debug (0, " %s: prepare mappings\n", __FUNCTION__);
  815. /* Copy the array of arguments to the mapped page. */
  816. for (i = 0; i < mapnum; i++)
  817. ((void **) hp)[i] = devaddrs[i];
  818. /* Copy the (device) pointers to arguments to the device (dp and hp might in
  819. fact have the same value on a unified-memory system). */
  820. r = cuMemcpy ((CUdeviceptr)dp, (CUdeviceptr)hp, mapnum * sizeof (void *));
  821. if (r != CUDA_SUCCESS)
  822. GOMP_PLUGIN_fatal ("cuMemcpy failed: %s", cuda_error (r));
  823. GOMP_PLUGIN_debug (0, " %s: kernel %s: launch\n", __FUNCTION__, targ_fn->name);
  824. // OpenACC CUDA
  825. //
  826. // num_gangs blocks
  827. // num_workers warps (where a warp is equivalent to 32 threads)
  828. // vector length threads
  829. //
  830. /* The openacc vector_length clause 'determines the vector length to use for
  831. vector or SIMD operations'. The question is how to map this to CUDA.
  832. In CUDA, the warp size is the vector length of a CUDA device. However, the
  833. CUDA interface abstracts away from that, and only shows us warp size
  834. indirectly in maximum number of threads per block, which is a product of
  835. warp size and the number of hyperthreads of a multiprocessor.
  836. We choose to map openacc vector_length directly onto the number of threads
  837. in a block, in the x dimension. This is reflected in gcc code generation
  838. that uses ThreadIdx.x to access vector elements.
  839. Attempting to use an openacc vector_length of more than the maximum number
  840. of threads per block will result in a cuda error. */
  841. nthreads_in_block = vector_length;
  842. kargs[0] = &dp;
  843. r = cuLaunchKernel (function,
  844. num_gangs, 1, 1,
  845. nthreads_in_block, 1, 1,
  846. 0, dev_str->stream, kargs, 0);
  847. if (r != CUDA_SUCCESS)
  848. GOMP_PLUGIN_fatal ("cuLaunchKernel error: %s", cuda_error (r));
  849. #ifndef DISABLE_ASYNC
  850. if (async < acc_async_noval)
  851. {
  852. r = cuStreamSynchronize (dev_str->stream);
  853. if (r == CUDA_ERROR_LAUNCH_FAILED)
  854. GOMP_PLUGIN_fatal ("cuStreamSynchronize error: %s %s\n", cuda_error (r),
  855. maybe_abort_msg);
  856. else if (r != CUDA_SUCCESS)
  857. GOMP_PLUGIN_fatal ("cuStreamSynchronize error: %s", cuda_error (r));
  858. }
  859. else
  860. {
  861. CUevent *e;
  862. e = (CUevent *)GOMP_PLUGIN_malloc (sizeof (CUevent));
  863. r = cuEventCreate (e, CU_EVENT_DISABLE_TIMING);
  864. if (r == CUDA_ERROR_LAUNCH_FAILED)
  865. GOMP_PLUGIN_fatal ("cuEventCreate error: %s %s\n", cuda_error (r),
  866. maybe_abort_msg);
  867. else if (r != CUDA_SUCCESS)
  868. GOMP_PLUGIN_fatal ("cuEventCreate error: %s", cuda_error (r));
  869. event_gc (true);
  870. r = cuEventRecord (*e, dev_str->stream);
  871. if (r != CUDA_SUCCESS)
  872. GOMP_PLUGIN_fatal ("cuEventRecord error: %s", cuda_error (r));
  873. event_add (PTX_EVT_KNL, e, (void *)dev_str);
  874. }
  875. #else
  876. r = cuCtxSynchronize ();
  877. if (r == CUDA_ERROR_LAUNCH_FAILED)
  878. GOMP_PLUGIN_fatal ("cuCtxSynchronize error: %s %s\n", cuda_error (r),
  879. maybe_abort_msg);
  880. else if (r != CUDA_SUCCESS)
  881. GOMP_PLUGIN_fatal ("cuCtxSynchronize error: %s", cuda_error (r));
  882. #endif
  883. GOMP_PLUGIN_debug (0, " %s: kernel %s: finished\n", __FUNCTION__,
  884. targ_fn->name);
  885. #ifndef DISABLE_ASYNC
  886. if (async < acc_async_noval)
  887. #endif
  888. map_pop (dev_str);
  889. }
  890. void * openacc_get_current_cuda_context (void);
  891. static void *
  892. nvptx_alloc (size_t s)
  893. {
  894. CUdeviceptr d;
  895. CUresult r;
  896. r = cuMemAlloc (&d, s);
  897. if (r == CUDA_ERROR_OUT_OF_MEMORY)
  898. return 0;
  899. if (r != CUDA_SUCCESS)
  900. GOMP_PLUGIN_fatal ("cuMemAlloc error: %s", cuda_error (r));
  901. return (void *)d;
  902. }
  903. static void
  904. nvptx_free (void *p)
  905. {
  906. CUresult r;
  907. CUdeviceptr pb;
  908. size_t ps;
  909. r = cuMemGetAddressRange (&pb, &ps, (CUdeviceptr)p);
  910. if (r != CUDA_SUCCESS)
  911. GOMP_PLUGIN_fatal ("cuMemGetAddressRange error: %s", cuda_error (r));
  912. if ((CUdeviceptr)p != pb)
  913. GOMP_PLUGIN_fatal ("invalid device address");
  914. r = cuMemFree ((CUdeviceptr)p);
  915. if (r != CUDA_SUCCESS)
  916. GOMP_PLUGIN_fatal ("cuMemFree error: %s", cuda_error (r));
  917. }
  918. static void *
  919. nvptx_host2dev (void *d, const void *h, size_t s)
  920. {
  921. CUresult r;
  922. CUdeviceptr pb;
  923. size_t ps;
  924. struct nvptx_thread *nvthd = nvptx_thread ();
  925. if (!s)
  926. return 0;
  927. if (!d)
  928. GOMP_PLUGIN_fatal ("invalid device address");
  929. r = cuMemGetAddressRange (&pb, &ps, (CUdeviceptr)d);
  930. if (r != CUDA_SUCCESS)
  931. GOMP_PLUGIN_fatal ("cuMemGetAddressRange error: %s", cuda_error (r));
  932. if (!pb)
  933. GOMP_PLUGIN_fatal ("invalid device address");
  934. if (!h)
  935. GOMP_PLUGIN_fatal ("invalid host address");
  936. if (d == h)
  937. GOMP_PLUGIN_fatal ("invalid host or device address");
  938. if ((void *)(d + s) > (void *)(pb + ps))
  939. GOMP_PLUGIN_fatal ("invalid size");
  940. #ifndef DISABLE_ASYNC
  941. if (nvthd->current_stream != nvthd->ptx_dev->null_stream)
  942. {
  943. CUevent *e;
  944. e = (CUevent *)GOMP_PLUGIN_malloc (sizeof (CUevent));
  945. r = cuEventCreate (e, CU_EVENT_DISABLE_TIMING);
  946. if (r != CUDA_SUCCESS)
  947. GOMP_PLUGIN_fatal ("cuEventCreate error: %s", cuda_error (r));
  948. event_gc (false);
  949. r = cuMemcpyHtoDAsync ((CUdeviceptr)d, h, s,
  950. nvthd->current_stream->stream);
  951. if (r != CUDA_SUCCESS)
  952. GOMP_PLUGIN_fatal ("cuMemcpyHtoDAsync error: %s", cuda_error (r));
  953. r = cuEventRecord (*e, nvthd->current_stream->stream);
  954. if (r != CUDA_SUCCESS)
  955. GOMP_PLUGIN_fatal ("cuEventRecord error: %s", cuda_error (r));
  956. event_add (PTX_EVT_MEM, e, (void *)h);
  957. }
  958. else
  959. #endif
  960. {
  961. r = cuMemcpyHtoD ((CUdeviceptr)d, h, s);
  962. if (r != CUDA_SUCCESS)
  963. GOMP_PLUGIN_fatal ("cuMemcpyHtoD error: %s", cuda_error (r));
  964. }
  965. return 0;
  966. }
  967. static void *
  968. nvptx_dev2host (void *h, const void *d, size_t s)
  969. {
  970. CUresult r;
  971. CUdeviceptr pb;
  972. size_t ps;
  973. struct nvptx_thread *nvthd = nvptx_thread ();
  974. if (!s)
  975. return 0;
  976. if (!d)
  977. GOMP_PLUGIN_fatal ("invalid device address");
  978. r = cuMemGetAddressRange (&pb, &ps, (CUdeviceptr)d);
  979. if (r != CUDA_SUCCESS)
  980. GOMP_PLUGIN_fatal ("cuMemGetAddressRange error: %s", cuda_error (r));
  981. if (!pb)
  982. GOMP_PLUGIN_fatal ("invalid device address");
  983. if (!h)
  984. GOMP_PLUGIN_fatal ("invalid host address");
  985. if (d == h)
  986. GOMP_PLUGIN_fatal ("invalid host or device address");
  987. if ((void *)(d + s) > (void *)(pb + ps))
  988. GOMP_PLUGIN_fatal ("invalid size");
  989. #ifndef DISABLE_ASYNC
  990. if (nvthd->current_stream != nvthd->ptx_dev->null_stream)
  991. {
  992. CUevent *e;
  993. e = (CUevent *)GOMP_PLUGIN_malloc (sizeof (CUevent));
  994. r = cuEventCreate (e, CU_EVENT_DISABLE_TIMING);
  995. if (r != CUDA_SUCCESS)
  996. GOMP_PLUGIN_fatal ("cuEventCreate error: %s\n", cuda_error (r));
  997. event_gc (false);
  998. r = cuMemcpyDtoHAsync (h, (CUdeviceptr)d, s,
  999. nvthd->current_stream->stream);
  1000. if (r != CUDA_SUCCESS)
  1001. GOMP_PLUGIN_fatal ("cuMemcpyDtoHAsync error: %s", cuda_error (r));
  1002. r = cuEventRecord (*e, nvthd->current_stream->stream);
  1003. if (r != CUDA_SUCCESS)
  1004. GOMP_PLUGIN_fatal ("cuEventRecord error: %s", cuda_error (r));
  1005. event_add (PTX_EVT_MEM, e, (void *)h);
  1006. }
  1007. else
  1008. #endif
  1009. {
  1010. r = cuMemcpyDtoH (h, (CUdeviceptr)d, s);
  1011. if (r != CUDA_SUCCESS)
  1012. GOMP_PLUGIN_fatal ("cuMemcpyDtoH error: %s", cuda_error (r));
  1013. }
  1014. return 0;
  1015. }
  1016. static void
  1017. nvptx_set_async (int async)
  1018. {
  1019. struct nvptx_thread *nvthd = nvptx_thread ();
  1020. nvthd->current_stream
  1021. = select_stream_for_async (async, pthread_self (), true, NULL);
  1022. }
  1023. static int
  1024. nvptx_async_test (int async)
  1025. {
  1026. CUresult r;
  1027. struct ptx_stream *s;
  1028. s = select_stream_for_async (async, pthread_self (), false, NULL);
  1029. if (!s)
  1030. GOMP_PLUGIN_fatal ("unknown async %d", async);
  1031. r = cuStreamQuery (s->stream);
  1032. if (r == CUDA_SUCCESS)
  1033. {
  1034. /* The oacc-parallel.c:goacc_wait function calls this hook to determine
  1035. whether all work has completed on this stream, and if so omits the call
  1036. to the wait hook. If that happens, event_gc might not get called
  1037. (which prevents variables from getting unmapped and their associated
  1038. device storage freed), so call it here. */
  1039. event_gc (true);
  1040. return 1;
  1041. }
  1042. else if (r == CUDA_ERROR_NOT_READY)
  1043. return 0;
  1044. GOMP_PLUGIN_fatal ("cuStreamQuery error: %s", cuda_error (r));
  1045. return 0;
  1046. }
  1047. static int
  1048. nvptx_async_test_all (void)
  1049. {
  1050. struct ptx_stream *s;
  1051. pthread_t self = pthread_self ();
  1052. struct nvptx_thread *nvthd = nvptx_thread ();
  1053. pthread_mutex_lock (&nvthd->ptx_dev->stream_lock);
  1054. for (s = nvthd->ptx_dev->active_streams; s != NULL; s = s->next)
  1055. {
  1056. if ((s->multithreaded || pthread_equal (s->host_thread, self))
  1057. && cuStreamQuery (s->stream) == CUDA_ERROR_NOT_READY)
  1058. {
  1059. pthread_mutex_unlock (&nvthd->ptx_dev->stream_lock);
  1060. return 0;
  1061. }
  1062. }
  1063. pthread_mutex_unlock (&nvthd->ptx_dev->stream_lock);
  1064. event_gc (true);
  1065. return 1;
  1066. }
  1067. static void
  1068. nvptx_wait (int async)
  1069. {
  1070. CUresult r;
  1071. struct ptx_stream *s;
  1072. s = select_stream_for_async (async, pthread_self (), false, NULL);
  1073. if (!s)
  1074. GOMP_PLUGIN_fatal ("unknown async %d", async);
  1075. r = cuStreamSynchronize (s->stream);
  1076. if (r != CUDA_SUCCESS)
  1077. GOMP_PLUGIN_fatal ("cuStreamSynchronize error: %s", cuda_error (r));
  1078. event_gc (true);
  1079. }
  1080. static void
  1081. nvptx_wait_async (int async1, int async2)
  1082. {
  1083. CUresult r;
  1084. CUevent *e;
  1085. struct ptx_stream *s1, *s2;
  1086. pthread_t self = pthread_self ();
  1087. /* The stream that is waiting (rather than being waited for) doesn't
  1088. necessarily have to exist already. */
  1089. s2 = select_stream_for_async (async2, self, true, NULL);
  1090. s1 = select_stream_for_async (async1, self, false, NULL);
  1091. if (!s1)
  1092. GOMP_PLUGIN_fatal ("invalid async 1\n");
  1093. if (s1 == s2)
  1094. GOMP_PLUGIN_fatal ("identical parameters");
  1095. e = (CUevent *)GOMP_PLUGIN_malloc (sizeof (CUevent));
  1096. r = cuEventCreate (e, CU_EVENT_DISABLE_TIMING);
  1097. if (r != CUDA_SUCCESS)
  1098. GOMP_PLUGIN_fatal ("cuEventCreate error: %s", cuda_error (r));
  1099. event_gc (true);
  1100. r = cuEventRecord (*e, s1->stream);
  1101. if (r != CUDA_SUCCESS)
  1102. GOMP_PLUGIN_fatal ("cuEventRecord error: %s", cuda_error (r));
  1103. event_add (PTX_EVT_SYNC, e, NULL);
  1104. r = cuStreamWaitEvent (s2->stream, *e, 0);
  1105. if (r != CUDA_SUCCESS)
  1106. GOMP_PLUGIN_fatal ("cuStreamWaitEvent error: %s", cuda_error (r));
  1107. }
  1108. static void
  1109. nvptx_wait_all (void)
  1110. {
  1111. CUresult r;
  1112. struct ptx_stream *s;
  1113. pthread_t self = pthread_self ();
  1114. struct nvptx_thread *nvthd = nvptx_thread ();
  1115. pthread_mutex_lock (&nvthd->ptx_dev->stream_lock);
  1116. /* Wait for active streams initiated by this thread (or by multiple threads)
  1117. to complete. */
  1118. for (s = nvthd->ptx_dev->active_streams; s != NULL; s = s->next)
  1119. {
  1120. if (s->multithreaded || pthread_equal (s->host_thread, self))
  1121. {
  1122. r = cuStreamQuery (s->stream);
  1123. if (r == CUDA_SUCCESS)
  1124. continue;
  1125. else if (r != CUDA_ERROR_NOT_READY)
  1126. GOMP_PLUGIN_fatal ("cuStreamQuery error: %s", cuda_error (r));
  1127. r = cuStreamSynchronize (s->stream);
  1128. if (r != CUDA_SUCCESS)
  1129. GOMP_PLUGIN_fatal ("cuStreamSynchronize error: %s", cuda_error (r));
  1130. }
  1131. }
  1132. pthread_mutex_unlock (&nvthd->ptx_dev->stream_lock);
  1133. event_gc (true);
  1134. }
  1135. static void
  1136. nvptx_wait_all_async (int async)
  1137. {
  1138. CUresult r;
  1139. struct ptx_stream *waiting_stream, *other_stream;
  1140. CUevent *e;
  1141. struct nvptx_thread *nvthd = nvptx_thread ();
  1142. pthread_t self = pthread_self ();
  1143. /* The stream doing the waiting. This could be the first mention of the
  1144. stream, so create it if necessary. */
  1145. waiting_stream
  1146. = select_stream_for_async (async, pthread_self (), true, NULL);
  1147. /* Launches on the null stream already block on other streams in the
  1148. context. */
  1149. if (!waiting_stream || waiting_stream == nvthd->ptx_dev->null_stream)
  1150. return;
  1151. event_gc (true);
  1152. pthread_mutex_lock (&nvthd->ptx_dev->stream_lock);
  1153. for (other_stream = nvthd->ptx_dev->active_streams;
  1154. other_stream != NULL;
  1155. other_stream = other_stream->next)
  1156. {
  1157. if (!other_stream->multithreaded
  1158. && !pthread_equal (other_stream->host_thread, self))
  1159. continue;
  1160. e = (CUevent *) GOMP_PLUGIN_malloc (sizeof (CUevent));
  1161. r = cuEventCreate (e, CU_EVENT_DISABLE_TIMING);
  1162. if (r != CUDA_SUCCESS)
  1163. GOMP_PLUGIN_fatal ("cuEventCreate error: %s", cuda_error (r));
  1164. /* Record an event on the waited-for stream. */
  1165. r = cuEventRecord (*e, other_stream->stream);
  1166. if (r != CUDA_SUCCESS)
  1167. GOMP_PLUGIN_fatal ("cuEventRecord error: %s", cuda_error (r));
  1168. event_add (PTX_EVT_SYNC, e, NULL);
  1169. r = cuStreamWaitEvent (waiting_stream->stream, *e, 0);
  1170. if (r != CUDA_SUCCESS)
  1171. GOMP_PLUGIN_fatal ("cuStreamWaitEvent error: %s", cuda_error (r));
  1172. }
  1173. pthread_mutex_unlock (&nvthd->ptx_dev->stream_lock);
  1174. }
  1175. static void *
  1176. nvptx_get_current_cuda_device (void)
  1177. {
  1178. struct nvptx_thread *nvthd = nvptx_thread ();
  1179. if (!nvthd || !nvthd->ptx_dev)
  1180. return NULL;
  1181. return &nvthd->ptx_dev->dev;
  1182. }
  1183. static void *
  1184. nvptx_get_current_cuda_context (void)
  1185. {
  1186. struct nvptx_thread *nvthd = nvptx_thread ();
  1187. if (!nvthd || !nvthd->ptx_dev)
  1188. return NULL;
  1189. return nvthd->ptx_dev->ctx;
  1190. }
  1191. static void *
  1192. nvptx_get_cuda_stream (int async)
  1193. {
  1194. struct ptx_stream *s;
  1195. struct nvptx_thread *nvthd = nvptx_thread ();
  1196. if (!nvthd || !nvthd->ptx_dev)
  1197. return NULL;
  1198. s = select_stream_for_async (async, pthread_self (), false, NULL);
  1199. return s ? s->stream : NULL;
  1200. }
  1201. static int
  1202. nvptx_set_cuda_stream (int async, void *stream)
  1203. {
  1204. struct ptx_stream *oldstream;
  1205. pthread_t self = pthread_self ();
  1206. struct nvptx_thread *nvthd = nvptx_thread ();
  1207. pthread_mutex_lock (&nvthd->ptx_dev->stream_lock);
  1208. if (async < 0)
  1209. GOMP_PLUGIN_fatal ("bad async %d", async);
  1210. /* We have a list of active streams and an array mapping async values to
  1211. entries of that list. We need to take "ownership" of the passed-in stream,
  1212. and add it to our list, removing the previous entry also (if there was one)
  1213. in order to prevent resource leaks. Note the potential for surprise
  1214. here: maybe we should keep track of passed-in streams and leave it up to
  1215. the user to tidy those up, but that doesn't work for stream handles
  1216. returned from acc_get_cuda_stream above... */
  1217. oldstream = select_stream_for_async (async, self, false, NULL);
  1218. if (oldstream)
  1219. {
  1220. if (nvthd->ptx_dev->active_streams == oldstream)
  1221. nvthd->ptx_dev->active_streams = nvthd->ptx_dev->active_streams->next;
  1222. else
  1223. {
  1224. struct ptx_stream *s = nvthd->ptx_dev->active_streams;
  1225. while (s->next != oldstream)
  1226. s = s->next;
  1227. s->next = s->next->next;
  1228. }
  1229. cuStreamDestroy (oldstream->stream);
  1230. map_fini (oldstream);
  1231. free (oldstream);
  1232. }
  1233. pthread_mutex_unlock (&nvthd->ptx_dev->stream_lock);
  1234. (void) select_stream_for_async (async, self, true, (CUstream) stream);
  1235. return 1;
  1236. }
  1237. /* Plugin entry points. */
  1238. const char *
  1239. GOMP_OFFLOAD_get_name (void)
  1240. {
  1241. return "nvptx";
  1242. }
  1243. unsigned int
  1244. GOMP_OFFLOAD_get_caps (void)
  1245. {
  1246. return GOMP_OFFLOAD_CAP_OPENACC_200;
  1247. }
  1248. int
  1249. GOMP_OFFLOAD_get_type (void)
  1250. {
  1251. return OFFLOAD_TARGET_TYPE_NVIDIA_PTX;
  1252. }
  1253. int
  1254. GOMP_OFFLOAD_get_num_devices (void)
  1255. {
  1256. return nvptx_get_num_devices ();
  1257. }
  1258. void
  1259. GOMP_OFFLOAD_init_device (int n)
  1260. {
  1261. pthread_mutex_lock (&ptx_dev_lock);
  1262. if (!nvptx_init () || ptx_devices[n] != NULL)
  1263. {
  1264. pthread_mutex_unlock (&ptx_dev_lock);
  1265. return;
  1266. }
  1267. ptx_devices[n] = nvptx_open_device (n);
  1268. instantiated_devices++;
  1269. pthread_mutex_unlock (&ptx_dev_lock);
  1270. }
  1271. void
  1272. GOMP_OFFLOAD_fini_device (int n)
  1273. {
  1274. pthread_mutex_lock (&ptx_dev_lock);
  1275. if (ptx_devices[n] != NULL)
  1276. {
  1277. nvptx_attach_host_thread_to_device (n);
  1278. nvptx_close_device (ptx_devices[n]);
  1279. ptx_devices[n] = NULL;
  1280. instantiated_devices--;
  1281. }
  1282. pthread_mutex_unlock (&ptx_dev_lock);
  1283. }
  1284. int
  1285. GOMP_OFFLOAD_load_image (int ord, void *target_data,
  1286. struct addr_pair **target_table)
  1287. {
  1288. CUmodule module;
  1289. char **fn_names, **var_names;
  1290. unsigned int fn_entries, var_entries, i, j;
  1291. CUresult r;
  1292. struct targ_fn_descriptor *targ_fns;
  1293. void **img_header = (void **) target_data;
  1294. struct ptx_image_data *new_image;
  1295. GOMP_OFFLOAD_init_device (ord);
  1296. nvptx_attach_host_thread_to_device (ord);
  1297. link_ptx (&module, img_header[0]);
  1298. pthread_mutex_lock (&ptx_image_lock);
  1299. new_image = GOMP_PLUGIN_malloc (sizeof (struct ptx_image_data));
  1300. new_image->target_data = target_data;
  1301. new_image->module = module;
  1302. new_image->next = ptx_images;
  1303. ptx_images = new_image;
  1304. pthread_mutex_unlock (&ptx_image_lock);
  1305. /* The mkoffload utility emits a table of pointers/integers at the start of
  1306. each offload image:
  1307. img_header[0] -> ptx code
  1308. img_header[1] -> number of variables
  1309. img_header[2] -> array of variable names (pointers to strings)
  1310. img_header[3] -> number of kernels
  1311. img_header[4] -> array of kernel names (pointers to strings)
  1312. The array of kernel names and the functions addresses form a
  1313. one-to-one correspondence. */
  1314. var_entries = (uintptr_t) img_header[1];
  1315. var_names = (char **) img_header[2];
  1316. fn_entries = (uintptr_t) img_header[3];
  1317. fn_names = (char **) img_header[4];
  1318. *target_table = GOMP_PLUGIN_malloc (sizeof (struct addr_pair)
  1319. * (fn_entries + var_entries));
  1320. targ_fns = GOMP_PLUGIN_malloc (sizeof (struct targ_fn_descriptor)
  1321. * fn_entries);
  1322. for (i = 0; i < fn_entries; i++)
  1323. {
  1324. CUfunction function;
  1325. r = cuModuleGetFunction (&function, module, fn_names[i]);
  1326. if (r != CUDA_SUCCESS)
  1327. GOMP_PLUGIN_fatal ("cuModuleGetFunction error: %s", cuda_error (r));
  1328. targ_fns[i].fn = function;
  1329. targ_fns[i].name = (const char *) fn_names[i];
  1330. (*target_table)[i].start = (uintptr_t) &targ_fns[i];
  1331. (*target_table)[i].end = (*target_table)[i].start + 1;
  1332. }
  1333. for (j = 0; j < var_entries; j++, i++)
  1334. {
  1335. CUdeviceptr var;
  1336. size_t bytes;
  1337. r = cuModuleGetGlobal (&var, &bytes, module, var_names[j]);
  1338. if (r != CUDA_SUCCESS)
  1339. GOMP_PLUGIN_fatal ("cuModuleGetGlobal error: %s", cuda_error (r));
  1340. (*target_table)[i].start = (uintptr_t) var;
  1341. (*target_table)[i].end = (*target_table)[i].start + bytes;
  1342. }
  1343. return i;
  1344. }
  1345. void
  1346. GOMP_OFFLOAD_unload_image (int tid __attribute__((unused)), void *target_data)
  1347. {
  1348. void **img_header = (void **) target_data;
  1349. struct targ_fn_descriptor *targ_fns
  1350. = (struct targ_fn_descriptor *) img_header[0];
  1351. struct ptx_image_data *image, *prev = NULL, *newhd = NULL;
  1352. free (targ_fns);
  1353. pthread_mutex_lock (&ptx_image_lock);
  1354. for (image = ptx_images; image != NULL;)
  1355. {
  1356. struct ptx_image_data *next = image->next;
  1357. if (image->target_data == target_data)
  1358. {
  1359. cuModuleUnload (image->module);
  1360. free (image);
  1361. if (prev)
  1362. prev->next = next;
  1363. }
  1364. else
  1365. {
  1366. prev = image;
  1367. if (!newhd)
  1368. newhd = image;
  1369. }
  1370. image = next;
  1371. }
  1372. ptx_images = newhd;
  1373. pthread_mutex_unlock (&ptx_image_lock);
  1374. }
  1375. void *
  1376. GOMP_OFFLOAD_alloc (int ord, size_t size)
  1377. {
  1378. nvptx_attach_host_thread_to_device (ord);
  1379. return nvptx_alloc (size);
  1380. }
  1381. void
  1382. GOMP_OFFLOAD_free (int ord, void *ptr)
  1383. {
  1384. nvptx_attach_host_thread_to_device (ord);
  1385. nvptx_free (ptr);
  1386. }
  1387. void *
  1388. GOMP_OFFLOAD_dev2host (int ord, void *dst, const void *src, size_t n)
  1389. {
  1390. nvptx_attach_host_thread_to_device (ord);
  1391. return nvptx_dev2host (dst, src, n);
  1392. }
  1393. void *
  1394. GOMP_OFFLOAD_host2dev (int ord, void *dst, const void *src, size_t n)
  1395. {
  1396. nvptx_attach_host_thread_to_device (ord);
  1397. return nvptx_host2dev (dst, src, n);
  1398. }
  1399. void (*device_run) (int n, void *fn_ptr, void *vars) = NULL;
  1400. void
  1401. GOMP_OFFLOAD_openacc_parallel (void (*fn) (void *), size_t mapnum,
  1402. void **hostaddrs, void **devaddrs, size_t *sizes,
  1403. unsigned short *kinds, int num_gangs,
  1404. int num_workers, int vector_length, int async,
  1405. void *targ_mem_desc)
  1406. {
  1407. nvptx_exec (fn, mapnum, hostaddrs, devaddrs, sizes, kinds, num_gangs,
  1408. num_workers, vector_length, async, targ_mem_desc);
  1409. }
  1410. void
  1411. GOMP_OFFLOAD_openacc_register_async_cleanup (void *targ_mem_desc)
  1412. {
  1413. CUevent *e;
  1414. CUresult r;
  1415. struct nvptx_thread *nvthd = nvptx_thread ();
  1416. e = (CUevent *) GOMP_PLUGIN_malloc (sizeof (CUevent));
  1417. r = cuEventCreate (e, CU_EVENT_DISABLE_TIMING);
  1418. if (r != CUDA_SUCCESS)
  1419. GOMP_PLUGIN_fatal ("cuEventCreate error: %s", cuda_error (r));
  1420. r = cuEventRecord (*e, nvthd->current_stream->stream);
  1421. if (r != CUDA_SUCCESS)
  1422. GOMP_PLUGIN_fatal ("cuEventRecord error: %s", cuda_error (r));
  1423. event_add (PTX_EVT_ASYNC_CLEANUP, e, targ_mem_desc);
  1424. }
  1425. int
  1426. GOMP_OFFLOAD_openacc_async_test (int async)
  1427. {
  1428. return nvptx_async_test (async);
  1429. }
  1430. int
  1431. GOMP_OFFLOAD_openacc_async_test_all (void)
  1432. {
  1433. return nvptx_async_test_all ();
  1434. }
  1435. void
  1436. GOMP_OFFLOAD_openacc_async_wait (int async)
  1437. {
  1438. nvptx_wait (async);
  1439. }
  1440. void
  1441. GOMP_OFFLOAD_openacc_async_wait_async (int async1, int async2)
  1442. {
  1443. nvptx_wait_async (async1, async2);
  1444. }
  1445. void
  1446. GOMP_OFFLOAD_openacc_async_wait_all (void)
  1447. {
  1448. nvptx_wait_all ();
  1449. }
  1450. void
  1451. GOMP_OFFLOAD_openacc_async_wait_all_async (int async)
  1452. {
  1453. nvptx_wait_all_async (async);
  1454. }
  1455. void
  1456. GOMP_OFFLOAD_openacc_async_set_async (int async)
  1457. {
  1458. nvptx_set_async (async);
  1459. }
  1460. void *
  1461. GOMP_OFFLOAD_openacc_create_thread_data (int ord)
  1462. {
  1463. struct ptx_device *ptx_dev;
  1464. struct nvptx_thread *nvthd
  1465. = GOMP_PLUGIN_malloc (sizeof (struct nvptx_thread));
  1466. CUresult r;
  1467. CUcontext thd_ctx;
  1468. ptx_dev = ptx_devices[ord];
  1469. assert (ptx_dev);
  1470. r = cuCtxGetCurrent (&thd_ctx);
  1471. if (r != CUDA_SUCCESS)
  1472. GOMP_PLUGIN_fatal ("cuCtxGetCurrent error: %s", cuda_error (r));
  1473. assert (ptx_dev->ctx);
  1474. if (!thd_ctx)
  1475. {
  1476. r = cuCtxPushCurrent (ptx_dev->ctx);
  1477. if (r != CUDA_SUCCESS)
  1478. GOMP_PLUGIN_fatal ("cuCtxPushCurrent error: %s", cuda_error (r));
  1479. }
  1480. nvthd->current_stream = ptx_dev->null_stream;
  1481. nvthd->ptx_dev = ptx_dev;
  1482. return (void *) nvthd;
  1483. }
  1484. void
  1485. GOMP_OFFLOAD_openacc_destroy_thread_data (void *data)
  1486. {
  1487. free (data);
  1488. }
  1489. void *
  1490. GOMP_OFFLOAD_openacc_get_current_cuda_device (void)
  1491. {
  1492. return nvptx_get_current_cuda_device ();
  1493. }
  1494. void *
  1495. GOMP_OFFLOAD_openacc_get_current_cuda_context (void)
  1496. {
  1497. return nvptx_get_current_cuda_context ();
  1498. }
  1499. /* NOTE: This returns a CUstream, not a ptx_stream pointer. */
  1500. void *
  1501. GOMP_OFFLOAD_openacc_get_cuda_stream (int async)
  1502. {
  1503. return nvptx_get_cuda_stream (async);
  1504. }
  1505. /* NOTE: This takes a CUstream, not a ptx_stream pointer. */
  1506. int
  1507. GOMP_OFFLOAD_openacc_set_cuda_stream (int async, void *stream)
  1508. {
  1509. return nvptx_set_cuda_stream (async, stream);
  1510. }