N_01-WIP-nouveau-add-locking.patch 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676
  1. --- a/src/gallium/drivers/nouveau/nouveau_buffer.c
  2. +++ b/src/gallium/drivers/nouveau/nouveau_buffer.c
  3. @@ -380,6 +380,7 @@
  4. struct pipe_transfer **ptransfer)
  5. {
  6. struct nouveau_context *nv = nouveau_context(pipe);
  7. + struct nouveau_screen *screen = nv->screen;
  8. struct nv04_resource *buf = nv04_resource(resource);
  9. struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
  10. uint8_t *map;
  11. @@ -427,14 +428,19 @@
  12. buf->data = NULL;
  13. }
  14. nouveau_transfer_staging(nv, tx, false);
  15. + pipe_mutex_lock(screen->push_mutex);
  16. nouveau_transfer_read(nv, tx);
  17. + pipe_mutex_unlock(screen->push_mutex);
  18. } else {
  19. /* The buffer is currently idle. Create a staging area for writes,
  20. * and make sure that the cached data is up-to-date. */
  21. if (usage & PIPE_TRANSFER_WRITE)
  22. nouveau_transfer_staging(nv, tx, true);
  23. - if (!buf->data)
  24. + if (!buf->data) {
  25. + pipe_mutex_lock(screen->push_mutex);
  26. nouveau_buffer_cache(nv, buf);
  27. + pipe_mutex_unlock(screen->push_mutex);
  28. + }
  29. }
  30. }
  31. return buf->data ? (buf->data + box->x) : tx->map;
  32. @@ -479,7 +485,9 @@
  33. if (unlikely(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)) {
  34. /* Discarding was not possible, must sync because
  35. * subsequent transfers might use UNSYNCHRONIZED. */
  36. + pipe_mutex_lock(screen->push_mutex);
  37. nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE);
  38. + pipe_mutex_unlock(screen->push_mutex);
  39. } else
  40. if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
  41. /* The whole range is being discarded, so it doesn't matter what was
  42. @@ -488,10 +496,13 @@
  43. map = tx->map;
  44. } else
  45. if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
  46. - if (usage & PIPE_TRANSFER_DONTBLOCK)
  47. + if (usage & PIPE_TRANSFER_DONTBLOCK) {
  48. map = NULL;
  49. - else
  50. + } else {
  51. + pipe_mutex_lock(screen->push_mutex);
  52. nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE);
  53. + pipe_mutex_unlock(screen->push_mutex);
  54. + }
  55. } else {
  56. /* It is expected that the returned buffer be a representation of the
  57. * data in question, so we must copy it over from the buffer. */
  58. @@ -515,9 +526,13 @@
  59. {
  60. struct nouveau_transfer *tx = nouveau_transfer(transfer);
  61. struct nv04_resource *buf = nv04_resource(transfer->resource);
  62. -
  63. - if (tx->map)
  64. + struct nouveau_screen *screen = nouveau_context(pipe)->screen;
  65. +
  66. + if (tx->map) {
  67. + pipe_mutex_lock(screen->push_mutex);
  68. nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width);
  69. + pipe_mutex_unlock(screen->push_mutex);
  70. + }
  71. util_range_add(&buf->valid_buffer_range,
  72. tx->base.box.x + box->x,
  73. @@ -537,11 +552,15 @@
  74. struct nouveau_context *nv = nouveau_context(pipe);
  75. struct nouveau_transfer *tx = nouveau_transfer(transfer);
  76. struct nv04_resource *buf = nv04_resource(transfer->resource);
  77. + struct nouveau_screen *screen = nouveau_context(pipe)->screen;
  78. if (tx->base.usage & PIPE_TRANSFER_WRITE) {
  79. if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
  80. - if (tx->map)
  81. + if (tx->map) {
  82. + pipe_mutex_lock(screen->push_mutex);
  83. nouveau_transfer_write(nv, tx, 0, tx->base.box.width);
  84. + pipe_mutex_unlock(screen->push_mutex);
  85. + }
  86. util_range_add(&buf->valid_buffer_range,
  87. tx->base.box.x, tx->base.box.x + tx->base.box.width);
  88. --- a/src/gallium/drivers/nouveau/nouveau_fence.c
  89. +++ b/src/gallium/drivers/nouveau/nouveau_fence.c
  90. @@ -71,12 +71,14 @@
  91. ++fence->ref;
  92. + pipe_mutex_lock(screen->fence.list_mutex);
  93. if (screen->fence.tail)
  94. screen->fence.tail->next = fence;
  95. else
  96. screen->fence.head = fence;
  97. screen->fence.tail = fence;
  98. + pipe_mutex_unlock(screen->fence.list_mutex);
  99. screen->fence.emit(&screen->base, &fence->sequence);
  100. @@ -90,6 +92,9 @@
  101. struct nouveau_fence *it;
  102. struct nouveau_screen *screen = fence->screen;
  103. + /* XXX This can race against fence_update. But fence_update can also call
  104. + * into this, so ... be have to be careful.
  105. + */
  106. if (fence->state == NOUVEAU_FENCE_STATE_EMITTED ||
  107. fence->state == NOUVEAU_FENCE_STATE_FLUSHED) {
  108. if (fence == screen->fence.head) {
  109. @@ -123,6 +128,7 @@
  110. return;
  111. screen->fence.sequence_ack = sequence;
  112. + pipe_mutex_lock(screen->fence.list_mutex);
  113. for (fence = screen->fence.head; fence; fence = next) {
  114. next = fence->next;
  115. sequence = fence->sequence;
  116. @@ -144,6 +150,7 @@
  117. if (fence->state == NOUVEAU_FENCE_STATE_EMITTED)
  118. fence->state = NOUVEAU_FENCE_STATE_FLUSHED;
  119. }
  120. + pipe_mutex_unlock(screen->fence.list_mutex);
  121. }
  122. #define NOUVEAU_FENCE_MAX_SPINS (1 << 31)
  123. @@ -198,11 +205,19 @@
  124. uint32_t spins = 0;
  125. int64_t start = 0;
  126. + /* Fast-path for the case where the fence is already signaled to avoid
  127. + * messing around with mutexes and timing.
  128. + */
  129. + if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED)
  130. + return true;
  131. +
  132. if (debug && debug->debug_message)
  133. start = os_time_get_nano();
  134. if (!nouveau_fence_kick(fence))
  135. return false;
  136. +
  137. + pipe_mutex_unlock(screen->push_mutex);
  138. do {
  139. if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) {
  140. @@ -210,6 +225,7 @@
  141. pipe_debug_message(debug, PERF_INFO,
  142. "stalled %.3f ms waiting for fence",
  143. (os_time_get_nano() - start) / 1000000.f);
  144. + pipe_mutex_lock(screen->push_mutex);
  145. return true;
  146. }
  147. if (!spins)
  148. @@ -226,6 +242,8 @@
  149. debug_printf("Wait on fence %u (ack = %u, next = %u) timed out !\n",
  150. fence->sequence,
  151. screen->fence.sequence_ack, screen->fence.sequence);
  152. +
  153. + pipe_mutex_lock(screen->push_mutex);
  154. return false;
  155. }
  156. --- a/src/gallium/drivers/nouveau/nouveau_fence.h
  157. +++ b/src/gallium/drivers/nouveau/nouveau_fence.h
  158. @@ -2,6 +2,7 @@
  159. #ifndef __NOUVEAU_FENCE_H__
  160. #define __NOUVEAU_FENCE_H__
  161. +#include "util/u_atomic.h"
  162. #include "util/u_inlines.h"
  163. #include "util/list.h"
  164. @@ -47,10 +48,10 @@
  165. nouveau_fence_ref(struct nouveau_fence *fence, struct nouveau_fence **ref)
  166. {
  167. if (fence)
  168. - ++fence->ref;
  169. + p_atomic_inc(&fence->ref);
  170. if (*ref) {
  171. - if (--(*ref)->ref == 0)
  172. + if (p_atomic_dec_zero(&(*ref)->ref))
  173. nouveau_fence_del(*ref);
  174. }
  175. src/gallium/drivers/nouveau/nouveau_screen.c
  176. Diff
  177. Switch to side-by-side view
  178. --- a/src/gallium/drivers/nouveau/nouveau_screen.c
  179. +++ b/src/gallium/drivers/nouveau/nouveau_screen.c
  180. @@ -74,10 +74,14 @@
  181. struct pipe_fence_handle *pfence,
  182. uint64_t timeout)
  183. {
  184. + bool ret;
  185. if (!timeout)
  186. return nouveau_fence_signalled(nouveau_fence(pfence));
  187. - return nouveau_fence_wait(nouveau_fence(pfence), NULL);
  188. + pipe_mutex_lock(nouveau_screen(screen)->push_mutex);
  189. + ret = nouveau_fence_wait(nouveau_fence(pfence), NULL);
  190. + pipe_mutex_unlock(nouveau_screen(screen)->push_mutex);
  191. + return ret;
  192. }
  193. @@ -153,6 +157,9 @@
  194. char *nv_dbg = getenv("NOUVEAU_MESA_DEBUG");
  195. if (nv_dbg)
  196. nouveau_mesa_debug = atoi(nv_dbg);
  197. +
  198. + pipe_mutex_init(screen->push_mutex);
  199. + pipe_mutex_init(screen->fence.list_mutex);
  200. /* These must be set before any failure is possible, as the cleanup
  201. * paths assume they're responsible for deleting them.
  202. @@ -254,6 +261,9 @@
  203. nouveau_device_del(&screen->device);
  204. nouveau_drm_del(&screen->drm);
  205. close(fd);
  206. +
  207. + pipe_mutex_destroy(screen->push_mutex);
  208. + pipe_mutex_destroy(screen->fence.list_mutex);
  209. }
  210. static void
  211. --- a/src/gallium/drivers/nouveau/nouveau_screen.h
  212. +++ b/src/gallium/drivers/nouveau/nouveau_screen.h
  213. @@ -3,6 +3,7 @@
  214. #include "pipe/p_screen.h"
  215. #include "util/u_memory.h"
  216. +#include "os/os_thread.h"
  217. #ifdef DEBUG
  218. # define NOUVEAU_ENABLE_DRIVER_STATISTICS
  219. @@ -22,6 +23,7 @@
  220. struct nouveau_object *channel;
  221. struct nouveau_client *client;
  222. struct nouveau_pushbuf *pushbuf;
  223. + pipe_mutex push_mutex;
  224. int refcount;
  225. @@ -39,6 +41,7 @@
  226. struct nouveau_fence *head;
  227. struct nouveau_fence *tail;
  228. struct nouveau_fence *current;
  229. + pipe_mutex list_mutex;
  230. u32 sequence;
  231. u32 sequence_ack;
  232. void (*emit)(struct pipe_screen *, u32 *sequence);
  233. --- a/src/gallium/drivers/nouveau/nv30/nv30_clear.c
  234. +++ b/src/gallium/drivers/nouveau/nv30/nv30_clear.c
  235. @@ -58,8 +58,11 @@
  236. struct pipe_framebuffer_state *fb = &nv30->framebuffer;
  237. uint32_t colr = 0, zeta = 0, mode = 0;
  238. - if (!nv30_state_validate(nv30, NV30_NEW_FRAMEBUFFER | NV30_NEW_SCISSOR, true))
  239. + pipe_mutex_lock(nv30->screen->base.push_mutex);
  240. + if (!nv30_state_validate(nv30, NV30_NEW_FRAMEBUFFER | NV30_NEW_SCISSOR, true)) {
  241. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  242. return;
  243. + }
  244. if (buffers & PIPE_CLEAR_COLOR && fb->nr_cbufs) {
  245. colr = pack_rgba(fb->cbufs[0]->format, color->f);
  246. @@ -96,6 +99,7 @@
  247. PUSH_DATA (push, mode);
  248. nv30_state_release(nv30);
  249. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  250. }
  251. static void
  252. @@ -126,11 +130,15 @@
  253. rt_format |= NV30_3D_RT_FORMAT_TYPE_LINEAR;
  254. }
  255. + pipe_mutex_lock(nv30->screen->base.push_mutex);
  256. +
  257. refn.bo = mt->base.bo;
  258. refn.flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_WR;
  259. if (nouveau_pushbuf_space(push, 32, 1, 0) ||
  260. - nouveau_pushbuf_refn (push, &refn, 1))
  261. + nouveau_pushbuf_refn (push, &refn, 1)) {
  262. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  263. return;
  264. + }
  265. BEGIN_NV04(push, NV30_3D(RT_ENABLE), 1);
  266. PUSH_DATA (push, NV30_3D_RT_ENABLE_COLOR0);
  267. @@ -155,6 +163,8 @@
  268. NV30_3D_CLEAR_BUFFERS_COLOR_B |
  269. NV30_3D_CLEAR_BUFFERS_COLOR_A);
  270. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  271. +
  272. nv30->dirty |= NV30_NEW_FRAMEBUFFER | NV30_NEW_SCISSOR;
  273. }
  274. @@ -191,11 +201,15 @@
  275. if (buffers & PIPE_CLEAR_STENCIL)
  276. mode |= NV30_3D_CLEAR_BUFFERS_STENCIL;
  277. + pipe_mutex_lock(nv30->screen->base.push_mutex);
  278. +
  279. refn.bo = mt->base.bo;
  280. refn.flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_WR;
  281. if (nouveau_pushbuf_space(push, 32, 1, 0) ||
  282. - nouveau_pushbuf_refn (push, &refn, 1))
  283. + nouveau_pushbuf_refn (push, &refn, 1)) {
  284. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  285. return;
  286. + }
  287. BEGIN_NV04(push, NV30_3D(RT_ENABLE), 1);
  288. PUSH_DATA (push, 0);
  289. @@ -221,6 +235,8 @@
  290. BEGIN_NV04(push, NV30_3D(CLEAR_BUFFERS), 1);
  291. PUSH_DATA (push, mode);
  292. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  293. +
  294. nv30->dirty |= NV30_NEW_FRAMEBUFFER | NV30_NEW_SCISSOR;
  295. }
  296. --- a/src/gallium/drivers/nouveau/nv30/nv30_context.c
  297. +++ b/src/gallium/drivers/nouveau/nv30/nv30_context.c
  298. @@ -201,6 +201,8 @@
  299. if (!nv30)
  300. return NULL;
  301. + pipe_mutex_lock(screen->base.push_mutex);
  302. +
  303. nv30->screen = screen;
  304. nv30->base.screen = &screen->base;
  305. nv30->base.copy_data = nv30_transfer_copy_data;
  306. @@ -226,6 +228,7 @@
  307. ret = nouveau_bufctx_new(nv30->base.client, 64, &nv30->bufctx);
  308. if (ret) {
  309. nv30_context_destroy(pipe);
  310. + pipe_mutex_unlock(screen->base.push_mutex);
  311. return NULL;
  312. }
  313. @@ -259,10 +262,13 @@
  314. nv30->blitter = util_blitter_create(pipe);
  315. if (!nv30->blitter) {
  316. nv30_context_destroy(pipe);
  317. + pipe_mutex_unlock(screen->base.push_mutex);
  318. return NULL;
  319. }
  320. nouveau_context_init_vdec(&nv30->base);
  321. + pipe_mutex_unlock(screen->base.push_mutex);
  322. +
  323. return pipe;
  324. }
  325. --- a/src/gallium/drivers/nouveau/nv30/nv30_miptree.c
  326. +++ b/src/gallium/drivers/nouveau/nv30/nv30_miptree.c
  327. @@ -130,10 +130,12 @@
  328. struct nv30_context *nv30 = nv30_context(pipe);
  329. struct nv30_rect src, dst;
  330. + pipe_mutex_lock(nv30->screen->base.push_mutex);
  331. if (dstres->target == PIPE_BUFFER && srcres->target == PIPE_BUFFER) {
  332. nouveau_copy_buffer(&nv30->base,
  333. nv04_resource(dstres), dstx,
  334. nv04_resource(srcres), src_box->x, src_box->width);
  335. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  336. return;
  337. }
  338. @@ -143,6 +145,7 @@
  339. src_box->width, src_box->height, &dst);
  340. nv30_transfer_rect(nv30, NEAREST, &src, &dst);
  341. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  342. }
  343. static void
  344. @@ -163,6 +166,7 @@
  345. y1 = src.y1;
  346. /* On nv3x we must use sifm which is restricted to 1024x1024 tiles */
  347. + pipe_mutex_lock(nv30->screen->base.push_mutex);
  348. for (y = src.y0; y < y1; y += h) {
  349. h = y1 - y;
  350. if (h > 1024)
  351. @@ -193,6 +197,7 @@
  352. nv30_transfer_rect(nv30, BILINEAR, &src, &dst);
  353. }
  354. }
  355. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  356. }
  357. void
  358. @@ -308,8 +313,12 @@
  359. tx->tmp.y1 = tx->tmp.h;
  360. tx->tmp.z = 0;
  361. - if (usage & PIPE_TRANSFER_READ)
  362. + if (usage & PIPE_TRANSFER_READ) {
  363. + pipe_mutex_lock(nv30->screen->base.push_mutex);
  364. nv30_transfer_rect(nv30, NEAREST, &tx->img, &tx->tmp);
  365. + PUSH_KICK(nv30->base.pushbuf);
  366. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  367. + }
  368. if (tx->tmp.bo->map) {
  369. *ptransfer = &tx->base;
  370. @@ -340,11 +349,13 @@
  371. struct nv30_transfer *tx = nv30_transfer(ptx);
  372. if (ptx->usage & PIPE_TRANSFER_WRITE) {
  373. + pipe_mutex_lock(nv30->screen->base.push_mutex);
  374. nv30_transfer_rect(nv30, NEAREST, &tx->tmp, &tx->img);
  375. /* Allow the copies above to finish executing before freeing the source */
  376. nouveau_fence_work(nv30->screen->base.fence.current,
  377. nouveau_fence_unref_bo, tx->tmp.bo);
  378. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  379. } else {
  380. nouveau_bo_ref(NULL, &tx->tmp.bo);
  381. }
  382. --- a/src/gallium/drivers/nouveau/nv30/nv30_query.c
  383. +++ b/src/gallium/drivers/nouveau/nv30/nv30_query.c
  384. @@ -152,6 +152,7 @@
  385. struct nv30_query *q = nv30_query(pq);
  386. struct nouveau_pushbuf *push = nv30->base.pushbuf;
  387. + pipe_mutex_lock(nv30->screen->base.push_mutex);
  388. switch (q->type) {
  389. case PIPE_QUERY_TIME_ELAPSED:
  390. q->qo[0] = nv30_query_object_new(nv30->screen);
  391. @@ -161,7 +162,7 @@
  392. }
  393. break;
  394. case PIPE_QUERY_TIMESTAMP:
  395. - return true;
  396. + break;
  397. default:
  398. BEGIN_NV04(push, NV30_3D(QUERY_RESET), 1);
  399. PUSH_DATA (push, q->report);
  400. @@ -172,6 +173,7 @@
  401. BEGIN_NV04(push, SUBC_3D(q->enable), 1);
  402. PUSH_DATA (push, 1);
  403. }
  404. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  405. return true;
  406. }
  407. @@ -183,6 +185,7 @@
  408. struct nv30_query *q = nv30_query(pq);
  409. struct nouveau_pushbuf *push = nv30->base.pushbuf;
  410. + pipe_mutex_lock(nv30->screen->base.push_mutex);
  411. q->qo[1] = nv30_query_object_new(screen);
  412. if (q->qo[1]) {
  413. BEGIN_NV04(push, NV30_3D(QUERY_GET), 1);
  414. @@ -194,6 +197,7 @@
  415. PUSH_DATA (push, 0);
  416. }
  417. PUSH_KICK (push);
  418. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  419. return true;
  420. }
  421. @@ -248,9 +252,11 @@
  422. nv30->render_cond_mode = mode;
  423. nv30->render_cond_cond = condition;
  424. + pipe_mutex_lock(nv30->screen->base.push_mutex);
  425. if (!pq) {
  426. BEGIN_NV04(push, SUBC_3D(0x1e98), 1);
  427. PUSH_DATA (push, 0x01000000);
  428. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  429. return;
  430. }
  431. @@ -262,6 +268,7 @@
  432. BEGIN_NV04(push, SUBC_3D(0x1e98), 1);
  433. PUSH_DATA (push, 0x02000000 | q->qo[1]->hw->start);
  434. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  435. }
  436. static void
  437. --- a/src/gallium/drivers/nouveau/nv30/nv30_vbo.c
  438. +++ b/src/gallium/drivers/nouveau/nv30/nv30_vbo.c
  439. @@ -563,6 +563,8 @@
  440. if (nv30->vbo_push_hint != !!nv30->vbo_fifo)
  441. nv30->dirty |= NV30_NEW_ARRAYS;
  442. + pipe_mutex_lock(nv30->screen->base.push_mutex);
  443. +
  444. push->user_priv = &nv30->bufctx;
  445. if (nv30->vbo_user && !(nv30->dirty & (NV30_NEW_VERTEX | NV30_NEW_ARRAYS)))
  446. nv30_update_user_vbufs(nv30);
  447. @@ -570,10 +572,12 @@
  448. nv30_state_validate(nv30, ~0, true);
  449. if (nv30->draw_flags) {
  450. nv30_render_vbo(pipe, info);
  451. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  452. return;
  453. } else
  454. if (nv30->vbo_fifo) {
  455. nv30_push_vbo(nv30, info);
  456. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  457. return;
  458. }
  459. @@ -630,6 +634,7 @@
  460. nv30_state_release(nv30);
  461. nv30_release_user_vbufs(nv30);
  462. + pipe_mutex_unlock(nv30->screen->base.push_mutex);
  463. }
  464. void
  465. --- a/src/gallium/drivers/nouveau/nv50/nv50_compute.c
  466. +++ b/src/gallium/drivers/nouveau/nv50/nv50_compute.c
  467. @@ -249,9 +249,11 @@
  468. struct nv50_program *cp = nv50->compprog;
  469. bool ret;
  470. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  471. ret = !nv50_state_validate_cp(nv50, ~0);
  472. if (ret) {
  473. NOUVEAU_ERR("Failed to launch grid !\n");
  474. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  475. return;
  476. }
  477. @@ -284,6 +286,8 @@
  478. BEGIN_NV04(push, SUBC_CP(NV50_GRAPH_SERIALIZE), 1);
  479. PUSH_DATA (push, 0);
  480. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  481. +
  482. /* bind a compute shader clobbers fragment shader state */
  483. nv50->dirty_3d |= NV50_NEW_3D_FRAGPROG;
  484. }
  485. --- a/src/gallium/drivers/nouveau/nv50/nv50_context.c
  486. +++ b/src/gallium/drivers/nouveau/nv50/nv50_context.c
  487. @@ -37,7 +37,9 @@
  488. if (fence)
  489. nouveau_fence_ref(screen->fence.current, (struct nouveau_fence **)fence);
  490. + pipe_mutex_lock(screen->push_mutex);
  491. PUSH_KICK(screen->pushbuf);
  492. + pipe_mutex_unlock(screen->push_mutex);
  493. nouveau_context_update_frame_stats(nouveau_context(pipe));
  494. }
  495. @@ -47,10 +49,12 @@
  496. {
  497. struct nouveau_pushbuf *push = nv50_context(pipe)->base.pushbuf;
  498. + pipe_mutex_lock(nouveau_context(pipe)->screen->push_mutex);
  499. BEGIN_NV04(push, SUBC_3D(NV50_GRAPH_SERIALIZE), 1);
  500. PUSH_DATA (push, 0);
  501. BEGIN_NV04(push, NV50_3D(TEX_CACHE_CTL), 1);
  502. PUSH_DATA (push, 0x20);
  503. + pipe_mutex_unlock(nouveau_context(pipe)->screen->push_mutex);
  504. }
  505. static void
  506. @@ -107,6 +111,7 @@
  507. data_words = string_words;
  508. else
  509. data_words = string_words + !!(len & 3);
  510. + pipe_mutex_lock(nouveau_context(pipe)->screen->push_mutex);
  511. BEGIN_NI04(push, SUBC_3D(NV04_GRAPH_NOP), data_words);
  512. if (string_words)
  513. PUSH_DATAp(push, str, string_words);
  514. @@ -115,6 +120,7 @@
  515. memcpy(&data, &str[string_words * 4], len & 3);
  516. PUSH_DATA (push, data);
  517. }
  518. + pipe_mutex_unlock(nouveau_context(pipe)->screen->push_mutex);
  519. }
  520. void
  521. @@ -290,6 +296,8 @@
  522. if (!nv50)
  523. return NULL;
  524. pipe = &nv50->base.pipe;
  525. +
  526. + pipe_mutex_lock(screen->base.push_mutex);
  527. if (!nv50_blitctx_create(nv50))
  528. goto out_err;
  529. @@ -384,9 +392,12 @@
  530. util_dynarray_init(&nv50->global_residents);
  531. + pipe_mutex_unlock(screen->base.push_mutex);
  532. +
  533. return pipe;
  534. out_err:
  535. + pipe_mutex_unlock(screen->base.push_mutex);
  536. if (nv50->bufctx_3d)
  537. nouveau_bufctx_del(&nv50->bufctx_3d);
  538. if (nv50->bufctx_cp)
  539. --- a/src/gallium/drivers/nouveau/nv50/nv50_context.h
  540. +++ b/src/gallium/drivers/nouveau/nv50/nv50_context.h
  541. @@ -222,6 +222,11 @@
  542. /* nv50_draw.c */
  543. extern struct draw_stage *nv50_draw_render_stage(struct nv50_context *);
  544. +/* nv50_query.c */
  545. +void nv50_render_condition(struct pipe_context *pipe,
  546. + struct pipe_query *pq,
  547. + boolean condition, uint mode);
  548. +
  549. /* nv50_shader_state.c */
  550. void nv50_vertprog_validate(struct nv50_context *);
  551. void nv50_gmtyprog_validate(struct nv50_context *);
  552. --- a/src/gallium/drivers/nouveau/nv50/nv50_query.c
  553. +++ b/src/gallium/drivers/nouveau/nv50/nv50_query.c
  554. @@ -70,7 +70,7 @@
  555. return q->funcs->get_query_result(nv50_context(pipe), q, wait, result);
  556. }
  557. -static void
  558. +void
  559. nv50_render_condition(struct pipe_context *pipe,
  560. struct pipe_query *pq,
  561. boolean condition, uint mode)
  562. @@ -145,6 +145,16 @@
  563. }
  564. static void
  565. +nv50_render_condition_locked(struct pipe_context *pipe,
  566. + struct pipe_query *pq,
  567. + boolean condition, uint mode)
  568. +{
  569. + pipe_mutex_lock(nouveau_context(pipe)->screen->push_mutex);
  570. + nv50_render_condition(pipe, pq, condition, mode);
  571. + pipe_mutex_unlock(nouveau_context(pipe)->screen->push_mutex);
  572. +}
  573. +
  574. +static void
  575. nv50_set_active_query_state(struct pipe_context *pipe, boolean enable)
  576. {
  577. }
  578. @@ -160,7 +170,7 @@
  579. pipe->end_query = nv50_end_query;
  580. pipe->get_query_result = nv50_get_query_result;
  581. pipe->set_active_query_state = nv50_set_active_query_state;
  582. - pipe->render_condition = nv50_render_condition;
  583. + pipe->render_condition = nv50_render_condition_locked;
  584. nv50->cond_condmode = NV50_3D_COND_MODE_ALWAYS;
  585. }
  586. --- a/src/gallium/drivers/nouveau/nv50/nv50_query_hw.c
  587. +++ b/src/gallium/drivers/nouveau/nv50/nv50_query_hw.c
  588. @@ -129,6 +129,7 @@
  589. {
  590. struct nouveau_pushbuf *push = nv50->base.pushbuf;
  591. struct nv50_hw_query *hq = nv50_hw_query(q);
  592. + bool ret = true;
  593. if (hq->funcs && hq->funcs->begin_query)
  594. return hq->funcs->begin_query(nv50, hq);
  595. @@ -154,6 +155,7 @@
  596. if (!hq->is64bit)
  597. hq->data[0] = hq->sequence++; /* the previously used one */
  598. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  599. switch (q->type) {
  600. case PIPE_QUERY_OCCLUSION_COUNTER:
  601. case PIPE_QUERY_OCCLUSION_PREDICATE:
  602. @@ -193,10 +195,13 @@
  603. break;
  604. default:
  605. assert(0);
  606. - return false;
  607. - }
  608. - hq->state = NV50_HW_QUERY_STATE_ACTIVE;
  609. - return true;
  610. + ret = false;
  611. + break;
  612. + }
  613. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  614. + if (ret)
  615. + hq->state = NV50_HW_QUERY_STATE_ACTIVE;
  616. + return ret;
  617. }
  618. static void
  619. @@ -212,6 +217,7 @@
  620. hq->state = NV50_HW_QUERY_STATE_ENDED;
  621. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  622. switch (q->type) {
  623. case PIPE_QUERY_OCCLUSION_COUNTER:
  624. case PIPE_QUERY_OCCLUSION_PREDICATE:
  625. @@ -264,6 +270,7 @@
  626. assert(0);
  627. break;
  628. }
  629. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  630. if (hq->is64bit)
  631. nouveau_fence_ref(nv50->screen->base.fence.current, &hq->fence);
  632. }
  633. @@ -286,16 +293,21 @@
  634. nv50_hw_query_update(q);
  635. if (hq->state != NV50_HW_QUERY_STATE_READY) {
  636. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  637. if (!wait) {
  638. /* for broken apps that spin on GL_QUERY_RESULT_AVAILABLE */
  639. if (hq->state != NV50_HW_QUERY_STATE_FLUSHED) {
  640. hq->state = NV50_HW_QUERY_STATE_FLUSHED;
  641. PUSH_KICK(nv50->base.pushbuf);
  642. }
  643. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  644. return false;
  645. }
  646. - if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nv50->screen->base.client))
  647. + if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nv50->screen->base.client)) {
  648. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  649. return false;
  650. + }
  651. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  652. }
  653. hq->state = NV50_HW_QUERY_STATE_READY;
  654. --- a/src/gallium/drivers/nouveau/nv50/nv50_query_hw_sm.c
  655. +++ b/src/gallium/drivers/nouveau/nv50/nv50_query_hw_sm.c
  656. @@ -176,6 +176,7 @@
  657. return false;
  658. }
  659. + pipe_mutex_lock(screen->base.push_mutex);
  660. assert(cfg->num_counters <= 4);
  661. PUSH_SPACE(push, 4 * 4);
  662. @@ -208,6 +209,7 @@
  663. BEGIN_NV04(push, NV50_CP(MP_PM_SET(c)), 1);
  664. PUSH_DATA (push, 0);
  665. }
  666. + pipe_mutex_unlock(screen->base.push_mutex);
  667. return true;
  668. }
  669. @@ -237,6 +239,7 @@
  670. screen->pm.prog = prog;
  671. }
  672. + pipe_mutex_lock(screen->base.push_mutex);
  673. /* disable all counting */
  674. PUSH_SPACE(push, 8);
  675. for (c = 0; c < 4; c++) {
  676. @@ -260,6 +263,7 @@
  677. PUSH_SPACE(push, 2);
  678. BEGIN_NV04(push, SUBC_CP(NV50_GRAPH_SERIALIZE), 1);
  679. PUSH_DATA (push, 0);
  680. + pipe_mutex_unlock(screen->base.push_mutex);
  681. pipe->bind_compute_state(pipe, screen->pm.prog);
  682. input[0] = hq->bo->offset + hq->base_offset;
  683. @@ -276,6 +280,7 @@
  684. nouveau_bufctx_reset(nv50->bufctx_cp, NV50_BIND_CP_QUERY);
  685. + pipe_mutex_lock(screen->base.push_mutex);
  686. /* re-active other counters */
  687. PUSH_SPACE(push, 8);
  688. mask = 0;
  689. @@ -302,6 +307,7 @@
  690. | cfg->ctr[i].unit | cfg->ctr[i].mode);
  691. }
  692. }
  693. + pipe_mutex_unlock(screen->base.push_mutex);
  694. }
  695. static inline bool
  696. @@ -343,7 +349,9 @@
  697. cfg = nv50_hw_sm_query_get_cfg(nv50, hq);
  698. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  699. ret = nv50_hw_sm_query_read_data(count, nv50, wait, hq, cfg, mp_count);
  700. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  701. if (!ret)
  702. return false;
  703. --- a/src/gallium/drivers/nouveau/nv50/nv50_surface.c
  704. +++ b/src/gallium/drivers/nouveau/nv50/nv50_surface.c
  705. @@ -204,10 +204,13 @@
  706. bool m2mf;
  707. unsigned dst_layer = dstz, src_layer = src_box->z;
  708. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  709. +
  710. if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
  711. nouveau_copy_buffer(&nv50->base,
  712. nv04_resource(dst), dstx,
  713. nv04_resource(src), src_box->x, src_box->width);
  714. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  715. return;
  716. }
  717. @@ -247,6 +250,7 @@
  718. else
  719. srect.base += src_mt->layer_stride;
  720. }
  721. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  722. return;
  723. }
  724. @@ -270,6 +274,7 @@
  725. break;
  726. }
  727. nouveau_bufctx_reset(nv50->bufctx, NV50_BIND_2D);
  728. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  729. }
  730. static void
  731. @@ -289,14 +294,18 @@
  732. assert(dst->texture->target != PIPE_BUFFER);
  733. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  734. +
  735. BEGIN_NV04(push, NV50_3D(CLEAR_COLOR(0)), 4);
  736. PUSH_DATAf(push, color->f[0]);
  737. PUSH_DATAf(push, color->f[1]);
  738. PUSH_DATAf(push, color->f[2]);
  739. PUSH_DATAf(push, color->f[3]);
  740. - if (nouveau_pushbuf_space(push, 64 + sf->depth, 1, 0))
  741. + if (nouveau_pushbuf_space(push, 64 + sf->depth, 1, 0)) {
  742. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  743. return;
  744. + }
  745. PUSH_REFN(push, bo, mt->base.domain | NOUVEAU_BO_WR);
  746. @@ -358,6 +367,8 @@
  747. PUSH_DATA (push, nv50->cond_condmode);
  748. }
  749. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  750. +
  751. nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER | NV50_NEW_3D_SCISSOR;
  752. }
  753. @@ -382,6 +393,8 @@
  754. assert(dst->texture->target != PIPE_BUFFER);
  755. assert(nouveau_bo_memtype(bo)); /* ZETA cannot be linear */
  756. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  757. +
  758. if (clear_flags & PIPE_CLEAR_DEPTH) {
  759. BEGIN_NV04(push, NV50_3D(CLEAR_DEPTH), 1);
  760. PUSH_DATAf(push, depth);
  761. @@ -394,8 +407,10 @@
  762. mode |= NV50_3D_CLEAR_BUFFERS_S;
  763. }
  764. - if (nouveau_pushbuf_space(push, 64 + sf->depth, 1, 0))
  765. + if (nouveau_pushbuf_space(push, 64 + sf->depth, 1, 0)) {
  766. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  767. return;
  768. + }
  769. PUSH_REFN(push, bo, mt->base.domain | NOUVEAU_BO_WR);
  770. @@ -445,6 +460,8 @@
  771. BEGIN_NV04(push, NV50_3D(COND_MODE), 1);
  772. PUSH_DATA (push, nv50->cond_condmode);
  773. }
  774. +
  775. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  776. nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER | NV50_NEW_3D_SCISSOR;
  777. }
  778. @@ -534,9 +551,12 @@
  779. unsigned i, j, k;
  780. uint32_t mode = 0;
  781. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  782. /* don't need NEW_BLEND, COLOR_MASK doesn't affect CLEAR_BUFFERS */
  783. - if (!nv50_state_validate_3d(nv50, NV50_NEW_3D_FRAMEBUFFER))
  784. + if (!nv50_state_validate_3d(nv50, NV50_NEW_3D_FRAMEBUFFER)) {
  785. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  786. return;
  787. + }
  788. /* We have to clear ALL of the layers, not up to the min number of layers
  789. * of any attachment. */
  790. @@ -602,6 +622,7 @@
  791. /* restore the array mode */
  792. BEGIN_NV04(push, NV50_3D(RT_ARRAY_MODE), 1);
  793. PUSH_DATA (push, nv50->rt_array_mode);
  794. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  795. }
  796. static void
  797. @@ -729,14 +750,18 @@
  798. assert(size % data_size == 0);
  799. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  800. +
  801. if (offset & 0xff) {
  802. unsigned fixup_size = MIN2(size, align(offset, 0x100) - offset);
  803. assert(fixup_size % data_size == 0);
  804. nv50_clear_buffer_push(pipe, res, offset, fixup_size, data, data_size);
  805. offset += fixup_size;
  806. size -= fixup_size;
  807. - if (!size)
  808. + if (!size) {
  809. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  810. return;
  811. + }
  812. }
  813. elements = size / data_size;
  814. @@ -752,8 +777,10 @@
  815. PUSH_DATAf(push, color.f[2]);
  816. PUSH_DATAf(push, color.f[3]);
  817. - if (nouveau_pushbuf_space(push, 64, 1, 0))
  818. + if (nouveau_pushbuf_space(push, 64, 1, 0)) {
  819. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  820. return;
  821. + }
  822. PUSH_REFN(push, buf->bo, buf->domain | NOUVEAU_BO_WR);
  823. @@ -807,6 +834,8 @@
  824. nv50_clear_buffer_push(pipe, res, offset, width * data_size,
  825. data, data_size);
  826. }
  827. +
  828. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  829. nv50->dirty_3d |= NV50_NEW_3D_FRAMEBUFFER | NV50_NEW_3D_SCISSOR;
  830. }
  831. @@ -1724,6 +1753,8 @@
  832. info->src.box.height != -info->dst.box.height))
  833. eng3d = true;
  834. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  835. +
  836. if (nv50->screen->num_occlusion_queries_active) {
  837. BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1);
  838. PUSH_DATA (push, 0);
  839. @@ -1738,6 +1769,8 @@
  840. BEGIN_NV04(push, NV50_3D(SAMPLECNT_ENABLE), 1);
  841. PUSH_DATA (push, 1);
  842. }
  843. +
  844. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  845. }
  846. static void
  847. --- a/src/gallium/drivers/nouveau/nv50/nv50_transfer.c
  848. +++ b/src/gallium/drivers/nouveau/nv50/nv50_transfer.c
  849. @@ -304,6 +304,7 @@
  850. unsigned base = tx->rect[0].base;
  851. unsigned z = tx->rect[0].z;
  852. unsigned i;
  853. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  854. for (i = 0; i < box->depth; ++i) {
  855. nv50_m2mf_transfer_rect(nv50, &tx->rect[1], &tx->rect[0],
  856. tx->nblocksx, tx->nblocksy);
  857. @@ -313,6 +314,9 @@
  858. tx->rect[0].base += mt->layer_stride;
  859. tx->rect[1].base += size;
  860. }
  861. + /* Kick these reads out so we don't have to reacquire a lock below */
  862. + PUSH_KICK(nv50->base.pushbuf);
  863. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  864. tx->rect[0].z = z;
  865. tx->rect[0].base = base;
  866. tx->rect[1].base = 0;
  867. @@ -349,6 +353,7 @@
  868. unsigned i;
  869. if (tx->base.usage & PIPE_TRANSFER_WRITE) {
  870. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  871. for (i = 0; i < tx->base.box.depth; ++i) {
  872. nv50_m2mf_transfer_rect(nv50, &tx->rect[0], &tx->rect[1],
  873. tx->nblocksx, tx->nblocksy);
  874. @@ -362,6 +367,7 @@
  875. /* Allow the copies above to finish executing before freeing the source */
  876. nouveau_fence_work(nv50->screen->base.fence.current,
  877. nouveau_fence_unref_bo, tx->rect[1].bo);
  878. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  879. } else {
  880. nouveau_bo_ref(NULL, &tx->rect[1].bo);
  881. }
  882. --- a/src/gallium/drivers/nouveau/nv50/nv50_vbo.c
  883. +++ b/src/gallium/drivers/nouveau/nv50/nv50_vbo.c
  884. @@ -767,6 +767,8 @@
  885. bool tex_dirty = false;
  886. int s;
  887. + pipe_mutex_lock(nv50->screen->base.push_mutex);
  888. +
  889. /* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
  890. nv50->vb_elt_first = info->min_index + info->index_bias;
  891. nv50->vb_elt_limit = info->max_index - info->min_index;
  892. @@ -827,6 +829,7 @@
  893. nv50_push_vbo(nv50, info);
  894. push->kick_notify = nv50_default_kick_notify;
  895. nouveau_pushbuf_bufctx(push, NULL);
  896. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  897. return;
  898. }
  899. @@ -886,4 +889,6 @@
  900. nv50_release_user_vbufs(nv50);
  901. nouveau_pushbuf_bufctx(push, NULL);
  902. -}
  903. +
  904. + pipe_mutex_unlock(nv50->screen->base.push_mutex);
  905. +}
  906. --- a/src/gallium/drivers/nouveau/nvc0/nvc0_compute.c
  907. +++ b/src/gallium/drivers/nouveau/nvc0/nvc0_compute.c
  908. @@ -424,13 +424,17 @@
  909. nvc0_launch_grid(struct pipe_context *pipe, const struct pipe_grid_info *info)
  910. {
  911. struct nvc0_context *nvc0 = nvc0_context(pipe);
  912. + struct nvc0_screen *screen = nvc0->screen;
  913. struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  914. struct nvc0_program *cp = nvc0->compprog;
  915. int ret;
  916. + pipe_mutex_lock(screen->base.push_mutex);
  917. +
  918. ret = !nvc0_state_validate_cp(nvc0, ~0);
  919. if (ret) {
  920. NOUVEAU_ERR("Failed to launch grid !\n");
  921. + pipe_mutex_unlock(screen->base.push_mutex);
  922. return;
  923. }
  924. @@ -498,4 +502,6 @@
  925. nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_SUF);
  926. nvc0->dirty_cp |= NVC0_NEW_CP_SURFACES;
  927. nvc0->images_dirty[5] |= nvc0->images_valid[5];
  928. -}
  929. +
  930. + pipe_mutex_unlock(screen->base.push_mutex);
  931. +}
  932. --- a/src/gallium/drivers/nouveau/nvc0/nvc0_context.c
  933. +++ b/src/gallium/drivers/nouveau/nvc0/nvc0_context.c
  934. @@ -38,7 +38,9 @@
  935. if (fence)
  936. nouveau_fence_ref(screen->fence.current, (struct nouveau_fence **)fence);
  937. + pipe_mutex_lock(screen->push_mutex);
  938. PUSH_KICK(nvc0->base.pushbuf); /* fencing handled in kick_notify */
  939. + pipe_mutex_unlock(screen->push_mutex);
  940. nouveau_context_update_frame_stats(&nvc0->base);
  941. }
  942. @@ -48,8 +50,10 @@
  943. {
  944. struct nouveau_pushbuf *push = nvc0_context(pipe)->base.pushbuf;
  945. + pipe_mutex_lock(nvc0_context(pipe)->screen->base.push_mutex);
  946. IMMED_NVC0(push, NVC0_3D(SERIALIZE), 0);
  947. IMMED_NVC0(push, NVC0_3D(TEX_CACHE_CTL), 0);
  948. + pipe_mutex_unlock(nvc0_context(pipe)->screen->base.push_mutex);
  949. }
  950. static void
  951. @@ -58,6 +62,8 @@
  952. struct nvc0_context *nvc0 = nvc0_context(pipe);
  953. struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  954. int i, s;
  955. +
  956. + pipe_mutex_lock(nvc0_context(pipe)->screen->base.push_mutex);
  957. if (flags & PIPE_BARRIER_MAPPED_BUFFER) {
  958. for (i = 0; i < nvc0->num_vtxbufs; ++i) {
  959. @@ -108,6 +114,8 @@
  960. nvc0->cb_dirty = true;
  961. if (flags & (PIPE_BARRIER_VERTEX_BUFFER | PIPE_BARRIER_INDEX_BUFFER))
  962. nvc0->base.vbo_dirty = true;
  963. +
  964. + pipe_mutex_unlock(nvc0_context(pipe)->screen->base.push_mutex);
  965. }
  966. static void
  967. @@ -124,6 +132,7 @@
  968. data_words = string_words;
  969. else
  970. data_words = string_words + !!(len & 3);
  971. + pipe_mutex_lock(nvc0_context(pipe)->screen->base.push_mutex);
  972. BEGIN_NIC0(push, SUBC_3D(NV04_GRAPH_NOP), data_words);
  973. if (string_words)
  974. PUSH_DATAp(push, str, string_words);
  975. @@ -132,6 +141,7 @@
  976. memcpy(&data, &str[string_words * 4], len & 3);
  977. PUSH_DATA (push, data);
  978. }
  979. + pipe_mutex_unlock(nvc0_context(pipe)->screen->base.push_mutex);
  980. }
  981. static void
  982. @@ -365,6 +375,8 @@
  983. return NULL;
  984. pipe = &nvc0->base.pipe;
  985. + pipe_mutex_lock(screen->base.push_mutex);
  986. +
  987. if (!nvc0_blitctx_create(nvc0))
  988. goto out_err;
  989. @@ -468,9 +480,12 @@
  990. util_dynarray_init(&nvc0->global_residents);
  991. + pipe_mutex_unlock(screen->base.push_mutex);
  992. +
  993. return pipe;
  994. out_err:
  995. + pipe_mutex_unlock(screen->base.push_mutex);
  996. if (nvc0) {
  997. if (nvc0->bufctx_3d)
  998. nouveau_bufctx_del(&nvc0->bufctx_3d);
  999. --- a/src/gallium/drivers/nouveau/nvc0/nvc0_context.h
  1000. +++ b/src/gallium/drivers/nouveau/nvc0/nvc0_context.h
  1001. @@ -299,6 +299,11 @@
  1002. uint32_t label);
  1003. void nvc0_program_init_tcp_empty(struct nvc0_context *);
  1004. +/* nvc0_query.c */
  1005. +void nvc0_render_condition(struct pipe_context *pipe,
  1006. + struct pipe_query *pq,
  1007. + boolean condition, uint mode);
  1008. +
  1009. /* nvc0_shader_state.c */
  1010. void nvc0_vertprog_validate(struct nvc0_context *);
  1011. void nvc0_tctlprog_validate(struct nvc0_context *);
  1012. --- a/src/gallium/drivers/nouveau/nvc0/nvc0_query.c
  1013. +++ b/src/gallium/drivers/nouveau/nvc0/nvc0_query.c
  1014. @@ -92,7 +92,7 @@
  1015. index, resource, offset);
  1016. }
  1017. -static void
  1018. +void
  1019. nvc0_render_condition(struct pipe_context *pipe,
  1020. struct pipe_query *pq,
  1021. boolean condition, uint mode)
  1022. @@ -161,6 +161,16 @@
  1023. PUSH_DATA (push, hq->bo->offset + hq->offset);
  1024. }
  1025. +static void
  1026. +nvc0_render_condition_locked(struct pipe_context *pipe,
  1027. + struct pipe_query *pq,
  1028. + boolean condition, uint mode)
  1029. +{
  1030. + pipe_mutex_lock(nouveau_context(pipe)->screen->push_mutex);
  1031. + nvc0_render_condition(pipe, pq, condition, mode);
  1032. + pipe_mutex_unlock(nouveau_context(pipe)->screen->push_mutex);
  1033. +}
  1034. +
  1035. int
  1036. nvc0_screen_get_driver_query_info(struct pipe_screen *pscreen,
  1037. unsigned id,
  1038. @@ -272,6 +282,6 @@
  1039. pipe->get_query_result = nvc0_get_query_result;
  1040. pipe->get_query_result_resource = nvc0_get_query_result_resource;
  1041. pipe->set_active_query_state = nvc0_set_active_query_state;
  1042. - pipe->render_condition = nvc0_render_condition;
  1043. + pipe->render_condition = nvc0_render_condition_locked;
  1044. nvc0->cond_condmode = NVC0_3D_COND_MODE_ALWAYS;
  1045. }
  1046. --- a/src/gallium/drivers/nouveau/nvc0/nvc0_query_hw.c
  1047. +++ b/src/gallium/drivers/nouveau/nvc0/nvc0_query_hw.c
  1048. @@ -154,6 +154,7 @@
  1049. }
  1050. hq->sequence++;
  1051. + pipe_mutex_lock(nvc0->screen->base.push_mutex);
  1052. switch (q->type) {
  1053. case PIPE_QUERY_OCCLUSION_COUNTER:
  1054. case PIPE_QUERY_OCCLUSION_PREDICATE:
  1055. @@ -198,6 +199,7 @@
  1056. default:
  1057. break;
  1058. }
  1059. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1060. hq->state = NVC0_HW_QUERY_STATE_ACTIVE;
  1061. return ret;
  1062. }
  1063. @@ -221,6 +223,7 @@
  1064. }
  1065. hq->state = NVC0_HW_QUERY_STATE_ENDED;
  1066. + pipe_mutex_lock(nvc0->screen->base.push_mutex);
  1067. switch (q->type) {
  1068. case PIPE_QUERY_OCCLUSION_COUNTER:
  1069. case PIPE_QUERY_OCCLUSION_PREDICATE:
  1070. @@ -276,6 +279,7 @@
  1071. default:
  1072. break;
  1073. }
  1074. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1075. if (hq->is64bit)
  1076. nouveau_fence_ref(nvc0->screen->base.fence.current, &hq->fence);
  1077. }
  1078. @@ -298,16 +302,21 @@
  1079. nvc0_hw_query_update(nvc0->screen->base.client, q);
  1080. if (hq->state != NVC0_HW_QUERY_STATE_READY) {
  1081. + pipe_mutex_lock(nvc0->screen->base.push_mutex);
  1082. if (!wait) {
  1083. if (hq->state != NVC0_HW_QUERY_STATE_FLUSHED) {
  1084. hq->state = NVC0_HW_QUERY_STATE_FLUSHED;
  1085. /* flush for silly apps that spin on GL_QUERY_RESULT_AVAILABLE */
  1086. PUSH_KICK(nvc0->base.pushbuf);
  1087. }
  1088. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1089. return false;
  1090. }
  1091. - if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nvc0->screen->base.client))
  1092. + if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nvc0->screen->base.client)) {
  1093. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1094. return false;
  1095. + }
  1096. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1097. NOUVEAU_DRV_STAT(&nvc0->screen->base, query_sync_count, 1);
  1098. }
  1099. hq->state = NVC0_HW_QUERY_STATE_READY;
  1100. @@ -374,6 +383,8 @@
  1101. assert(!hq->funcs || !hq->funcs->get_query_result);
  1102. + pipe_mutex_lock(nvc0->screen->base.push_mutex);
  1103. +
  1104. if (index == -1) {
  1105. /* TODO: Use a macro to write the availability of the query */
  1106. if (hq->state != NVC0_HW_QUERY_STATE_READY)
  1107. @@ -382,6 +393,7 @@
  1108. nvc0->base.push_cb(&nvc0->base, buf, offset,
  1109. result_type >= PIPE_QUERY_TYPE_I64 ? 2 : 1,
  1110. ready);
  1111. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1112. return;
  1113. }
  1114. @@ -469,6 +481,8 @@
  1115. 4 | NVC0_IB_ENTRY_1_NO_PREFETCH);
  1116. }
  1117. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1118. +
  1119. if (buf->mm) {
  1120. nouveau_fence_ref(nvc0->screen->base.fence.current, &buf->fence);
  1121. nouveau_fence_ref(nvc0->screen->base.fence.current, &buf->fence_wr);
  1122. --- a/src/gallium/drivers/nouveau/nvc0/nvc0_query_hw_sm.c
  1123. +++ b/src/gallium/drivers/nouveau/nvc0/nvc0_query_hw_sm.c
  1124. @@ -1662,6 +1662,7 @@
  1125. return false;
  1126. }
  1127. + pipe_mutex_lock(screen->base.push_mutex);
  1128. assert(cfg->num_counters <= 4);
  1129. PUSH_SPACE(push, 4 * 8 * + 6);
  1130. @@ -1710,6 +1711,7 @@
  1131. BEGIN_NVC0(push, NVE4_CP(MP_PM_SET(c)), 1);
  1132. PUSH_DATA (push, 0);
  1133. }
  1134. + pipe_mutex_unlock(screen->base.push_mutex);
  1135. return true;
  1136. }
  1137. @@ -1733,6 +1735,7 @@
  1138. return false;
  1139. }
  1140. + pipe_mutex_lock(screen->base.push_mutex);
  1141. assert(cfg->num_counters <= 8);
  1142. PUSH_SPACE(push, 8 * 8 + 2);
  1143. @@ -1779,6 +1782,7 @@
  1144. BEGIN_NVC0(push, NVC0_CP(MP_PM_SET(c)), 1);
  1145. PUSH_DATA (push, 0);
  1146. }
  1147. + pipe_mutex_unlock(screen->base.push_mutex);
  1148. return true;
  1149. }
  1150. @@ -1866,6 +1870,7 @@
  1151. if (unlikely(!screen->pm.prog))
  1152. screen->pm.prog = nvc0_hw_sm_get_program(screen);
  1153. + pipe_mutex_lock(screen->base.push_mutex);
  1154. /* disable all counting */
  1155. PUSH_SPACE(push, 8);
  1156. for (c = 0; c < 8; ++c)
  1157. @@ -1893,6 +1898,7 @@
  1158. /* upload input data for the compute shader which reads MP counters */
  1159. nvc0_hw_sm_upload_input(nvc0, hq);
  1160. + pipe_mutex_unlock(screen->base.push_mutex);
  1161. pipe->bind_compute_state(pipe, screen->pm.prog);
  1162. for (i = 0; i < 3; i++) {
  1163. @@ -1906,6 +1912,7 @@
  1164. nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_QUERY);
  1165. + pipe_mutex_lock(screen->base.push_mutex);
  1166. /* re-activate other counters */
  1167. PUSH_SPACE(push, 16);
  1168. mask = 0;
  1169. @@ -1930,6 +1937,7 @@
  1170. PUSH_DATA (push, (cfg->ctr[i].func << 4) | cfg->ctr[i].mode);
  1171. }
  1172. }
  1173. + pipe_mutex_unlock(screen->base.push_mutex);
  1174. }
  1175. static inline bool
  1176. --- a/src/gallium/drivers/nouveau/nvc0/nvc0_screen.c
  1177. +++ b/src/gallium/drivers/nouveau/nvc0/nvc0_screen.c
  1178. @@ -507,7 +507,9 @@
  1179. * _current_ one, and remove both.
  1180. */
  1181. nouveau_fence_ref(screen->base.fence.current, &current);
  1182. + pipe_mutex_lock(screen->base.push_mutex);
  1183. nouveau_fence_wait(current, NULL);
  1184. + pipe_mutex_unlock(screen->base.push_mutex);
  1185. nouveau_fence_ref(NULL, &current);
  1186. nouveau_fence_ref(NULL, &screen->base.fence.current);
  1187. }
  1188. --- a/src/gallium/drivers/nouveau/nvc0/nvc0_surface.c
  1189. +++ b/src/gallium/drivers/nouveau/nvc0/nvc0_surface.c
  1190. @@ -206,11 +206,14 @@
  1191. bool m2mf;
  1192. unsigned dst_layer = dstz, src_layer = src_box->z;
  1193. + pipe_mutex_lock(nvc0->screen->base.push_mutex);
  1194. +
  1195. if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) {
  1196. nouveau_copy_buffer(&nvc0->base,
  1197. nv04_resource(dst), dstx,
  1198. nv04_resource(src), src_box->x, src_box->width);
  1199. NOUVEAU_DRV_STAT(&nvc0->screen->base, buf_copy_bytes, src_box->width);
  1200. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1201. return;
  1202. }
  1203. NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_copy_count, 1);
  1204. @@ -251,6 +254,7 @@
  1205. else
  1206. srect.base += src_mt->layer_stride;
  1207. }
  1208. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1209. return;
  1210. }
  1211. @@ -273,6 +277,7 @@
  1212. break;
  1213. }
  1214. nouveau_bufctx_reset(nvc0->bufctx, 0);
  1215. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1216. }
  1217. static void
  1218. @@ -291,8 +296,12 @@
  1219. assert(dst->texture->target != PIPE_BUFFER);
  1220. - if (!PUSH_SPACE(push, 32 + sf->depth))
  1221. + pipe_mutex_lock(nvc0->screen->base.push_mutex);
  1222. +
  1223. + if (!PUSH_SPACE(push, 32 + sf->depth)) {
  1224. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1225. return;
  1226. + }
  1227. PUSH_REFN (push, res->bo, res->domain | NOUVEAU_BO_WR);
  1228. @@ -357,6 +366,8 @@
  1229. IMMED_NVC0(push, NVC0_3D(COND_MODE), nvc0->cond_condmode);
  1230. nvc0->dirty_3d |= NVC0_NEW_3D_FRAMEBUFFER;
  1231. +
  1232. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1233. }
  1234. static void
  1235. @@ -542,8 +553,11 @@
  1236. assert(size % data_size == 0);
  1237. + pipe_mutex_lock(nvc0->screen->base.push_mutex);
  1238. +
  1239. if (data_size == 12) {
  1240. nvc0_clear_buffer_push(pipe, res, offset, size, data, data_size);
  1241. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1242. return;
  1243. }
  1244. @@ -553,8 +567,10 @@
  1245. nvc0_clear_buffer_push(pipe, res, offset, fixup_size, data, data_size);
  1246. offset += fixup_size;
  1247. size -= fixup_size;
  1248. - if (!size)
  1249. + if (!size) {
  1250. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1251. return;
  1252. + }
  1253. }
  1254. elements = size / data_size;
  1255. @@ -564,8 +580,10 @@
  1256. width &= ~0xff;
  1257. assert(width > 0);
  1258. - if (!PUSH_SPACE(push, 40))
  1259. + if (!PUSH_SPACE(push, 40)) {
  1260. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1261. return;
  1262. + }
  1263. PUSH_REFN (push, buf->bo, buf->domain | NOUVEAU_BO_WR);
  1264. @@ -613,6 +631,8 @@
  1265. }
  1266. nvc0->dirty_3d |= NVC0_NEW_3D_FRAMEBUFFER;
  1267. +
  1268. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1269. }
  1270. static void
  1271. @@ -635,8 +655,11 @@
  1272. assert(dst->texture->target != PIPE_BUFFER);
  1273. - if (!PUSH_SPACE(push, 32 + sf->depth))
  1274. + pipe_mutex_lock(nvc0->screen->base.push_mutex);
  1275. + if (!PUSH_SPACE(push, 32 + sf->depth)) {
  1276. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1277. return;
  1278. + }
  1279. PUSH_REFN (push, mt->base.bo, mt->base.domain | NOUVEAU_BO_WR);
  1280. @@ -685,6 +708,8 @@
  1281. IMMED_NVC0(push, NVC0_3D(COND_MODE), nvc0->cond_condmode);
  1282. nvc0->dirty_3d |= NVC0_NEW_3D_FRAMEBUFFER;
  1283. +
  1284. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1285. }
  1286. void
  1287. @@ -698,9 +723,13 @@
  1288. unsigned i, j, k;
  1289. uint32_t mode = 0;
  1290. + pipe_mutex_lock(nvc0->screen->base.push_mutex);
  1291. +
  1292. /* don't need NEW_BLEND, COLOR_MASK doesn't affect CLEAR_BUFFERS */
  1293. - if (!nvc0_state_validate_3d(nvc0, NVC0_NEW_3D_FRAMEBUFFER))
  1294. + if (!nvc0_state_validate_3d(nvc0, NVC0_NEW_3D_FRAMEBUFFER)) {
  1295. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1296. return;
  1297. + }
  1298. if (buffers & PIPE_CLEAR_COLOR && fb->nr_cbufs) {
  1299. BEGIN_NVC0(push, NVC0_3D(CLEAR_COLOR(0)), 4);
  1300. @@ -759,6 +788,8 @@
  1301. (j << NVC0_3D_CLEAR_BUFFERS_LAYER__SHIFT));
  1302. }
  1303. }
  1304. +
  1305. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1306. }
  1307. @@ -1163,8 +1194,8 @@
  1308. nvc0->samplers_dirty[4] |= 3;
  1309. if (nvc0->cond_query && !blit->render_condition_enable)
  1310. - nvc0->base.pipe.render_condition(&nvc0->base.pipe, nvc0->cond_query,
  1311. - nvc0->cond_cond, nvc0->cond_mode);
  1312. + nvc0_render_condition(&nvc0->base.pipe, nvc0->cond_query,
  1313. + nvc0->cond_cond, nvc0->cond_mode);
  1314. nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_VTX_TMP);
  1315. nouveau_bufctx_reset(nvc0->bufctx_3d, NVC0_BIND_3D_FB);
  1316. @@ -1626,6 +1657,8 @@
  1317. if (info->num_window_rectangles > 0 || info->window_rectangle_include)
  1318. eng3d = true;
  1319. + pipe_mutex_lock(nvc0->screen->base.push_mutex);
  1320. +
  1321. if (nvc0->screen->num_occlusion_queries_active)
  1322. IMMED_NVC0(push, NVC0_3D(SAMPLECNT_ENABLE), 0);
  1323. @@ -1636,6 +1669,8 @@
  1324. if (nvc0->screen->num_occlusion_queries_active)
  1325. IMMED_NVC0(push, NVC0_3D(SAMPLECNT_ENABLE), 1);
  1326. +
  1327. + pipe_mutex_unlock(nvc0->screen->base.push_mutex);
  1328. NOUVEAU_DRV_STAT(&nvc0->screen->base, tex_blit_count, 1);
  1329. }
  1330. --- a/src/gallium/drivers/nouveau/nvc0/nvc0_transfer.c
  1331. +++ b/src/gallium/drivers/nouveau/nvc0/nvc0_transfer.c
  1332. @@ -342,16 +342,18 @@
  1333. return !mt->base.fence_wr || nouveau_fence_wait(mt->base.fence_wr, &nvc0->base.debug);
  1334. }
  1335. -void *
  1336. -nvc0_miptree_transfer_map(struct pipe_context *pctx,
  1337. - struct pipe_resource *res,
  1338. - unsigned level,
  1339. - unsigned usage,
  1340. - const struct pipe_box *box,
  1341. - struct pipe_transfer **ptransfer)
  1342. +static void *
  1343. +nvc0_miptree_transfer_map_unlocked(
  1344. + struct pipe_context *pctx,
  1345. + struct pipe_resource *res,
  1346. + unsigned level,
  1347. + unsigned usage,
  1348. + const struct pipe_box *box,
  1349. + struct pipe_transfer **ptransfer)
  1350. {
  1351. struct nvc0_context *nvc0 = nvc0_context(pctx);
  1352. - struct nouveau_device *dev = nvc0->screen->base.device;
  1353. + struct nvc0_screen *screen = nvc0->screen;
  1354. + struct nouveau_device *dev = screen->base.device;
  1355. struct nv50_miptree *mt = nv50_miptree(res);
  1356. struct nvc0_transfer *tx;
  1357. uint32_t size;
  1358. @@ -465,9 +467,29 @@
  1359. return tx->rect[1].bo->map;
  1360. }
  1361. -void
  1362. -nvc0_miptree_transfer_unmap(struct pipe_context *pctx,
  1363. - struct pipe_transfer *transfer)
  1364. +void *
  1365. +nvc0_miptree_transfer_map(
  1366. + struct pipe_context *pctx,
  1367. + struct pipe_resource *res,
  1368. + unsigned level,
  1369. + unsigned usage,
  1370. + const struct pipe_box *box,
  1371. + struct pipe_transfer **ptransfer)
  1372. +{
  1373. + struct nvc0_context *nvc0 = nvc0_context(pctx);
  1374. + struct nvc0_screen *screen = nvc0->screen;
  1375. +
  1376. + pipe_mutex_lock(screen->base.push_mutex);
  1377. + void *ret = nvc0_miptree_transfer_map_unlocked(
  1378. + pctx, res, level, usage, box, ptransfer);
  1379. + pipe_mutex_unlock(screen->base.push_mutex);
  1380. +
  1381. + return ret;
  1382. +}
  1383. +
  1384. +static void
  1385. +nvc0_miptree_transfer_unmap_unlocked(struct pipe_context *pctx,
  1386. + struct pipe_transfer *transfer)
  1387. {
  1388. struct nvc0_context *nvc0 = nvc0_context(pctx);
  1389. struct nvc0_transfer *tx = (struct nvc0_transfer *)transfer;
  1390. @@ -507,6 +529,18 @@
  1391. FREE(tx);
  1392. }
  1393. +void
  1394. +nvc0_miptree_transfer_unmap(struct pipe_context *pctx,
  1395. + struct pipe_transfer *transfer)
  1396. +{
  1397. + struct nvc0_context *nvc0 = nvc0_context(pctx);
  1398. + struct nvc0_screen *screen = nvc0->screen;
  1399. +
  1400. + pipe_mutex_lock(screen->base.push_mutex);
  1401. + nvc0_miptree_transfer_unmap_unlocked(pctx, transfer);
  1402. + pipe_mutex_unlock(screen->base.push_mutex);
  1403. +}
  1404. +
  1405. /* This happens rather often with DTD9/st. */
  1406. static void
  1407. nvc0_cb_push(struct nouveau_context *nv,
  1408. --- a/src/gallium/drivers/nouveau/nvc0/nvc0_vbo.c
  1409. +++ b/src/gallium/drivers/nouveau/nvc0/nvc0_vbo.c
  1410. @@ -940,6 +940,8 @@
  1411. struct nvc0_screen *screen = nvc0->screen;
  1412. int s;
  1413. + pipe_mutex_lock(screen->base.push_mutex);
  1414. +
  1415. /* NOTE: caller must ensure that (min_index + index_bias) is >= 0 */
  1416. nvc0->vb_elt_first = info->min_index + info->index_bias;
  1417. nvc0->vb_elt_limit = info->max_index - info->min_index;
  1418. @@ -1033,6 +1035,7 @@
  1419. nvc0_push_vbo(nvc0, info);
  1420. push->kick_notify = nvc0_default_kick_notify;
  1421. nouveau_pushbuf_bufctx(push, NULL);
  1422. + pipe_mutex_unlock(screen->base.push_mutex);
  1423. return;
  1424. }
  1425. @@ -1085,4 +1088,5 @@
  1426. nvc0_release_user_vbufs(nvc0);
  1427. nouveau_pushbuf_bufctx(push, NULL);
  1428. -}
  1429. + pipe_mutex_unlock(screen->base.push_mutex);
  1430. +}
  1431. --- a/src/gallium/drivers/nouveau/nvc0/nve4_compute.c
  1432. +++ b/src/gallium/drivers/nouveau/nvc0/nve4_compute.c
  1433. @@ -604,11 +604,14 @@
  1434. nve4_launch_grid(struct pipe_context *pipe, const struct pipe_grid_info *info)
  1435. {
  1436. struct nvc0_context *nvc0 = nvc0_context(pipe);
  1437. + struct nvc0_screen *screen = nvc0->screen;
  1438. struct nouveau_pushbuf *push = nvc0->base.pushbuf;
  1439. struct nve4_cp_launch_desc *desc;
  1440. uint64_t desc_gpuaddr;
  1441. struct nouveau_bo *desc_bo;
  1442. int ret;
  1443. +
  1444. + pipe_mutex_lock(screen->base.push_mutex);
  1445. desc = nve4_compute_alloc_launch_desc(&nvc0->base, &desc_bo, &desc_gpuaddr);
  1446. if (!desc) {
  1447. @@ -690,6 +693,7 @@
  1448. NOUVEAU_ERR("Failed to launch grid !\n");
  1449. nouveau_scratch_done(&nvc0->base);
  1450. nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_DESC);
  1451. + pipe_mutex_unlock(screen->base.push_mutex);
  1452. }