intel_lrc.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585
  1. /*
  2. * SPDX-License-Identifier: MIT
  3. *
  4. * Copyright © 2018 Intel Corporation
  5. */
  6. #include "../i915_selftest.h"
  7. #include "igt_flush_test.h"
  8. #include "mock_context.h"
  9. struct spinner {
  10. struct drm_i915_private *i915;
  11. struct drm_i915_gem_object *hws;
  12. struct drm_i915_gem_object *obj;
  13. u32 *batch;
  14. void *seqno;
  15. };
  16. static int spinner_init(struct spinner *spin, struct drm_i915_private *i915)
  17. {
  18. unsigned int mode;
  19. void *vaddr;
  20. int err;
  21. GEM_BUG_ON(INTEL_GEN(i915) < 8);
  22. memset(spin, 0, sizeof(*spin));
  23. spin->i915 = i915;
  24. spin->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
  25. if (IS_ERR(spin->hws)) {
  26. err = PTR_ERR(spin->hws);
  27. goto err;
  28. }
  29. spin->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
  30. if (IS_ERR(spin->obj)) {
  31. err = PTR_ERR(spin->obj);
  32. goto err_hws;
  33. }
  34. i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
  35. vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
  36. if (IS_ERR(vaddr)) {
  37. err = PTR_ERR(vaddr);
  38. goto err_obj;
  39. }
  40. spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
  41. mode = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
  42. vaddr = i915_gem_object_pin_map(spin->obj, mode);
  43. if (IS_ERR(vaddr)) {
  44. err = PTR_ERR(vaddr);
  45. goto err_unpin_hws;
  46. }
  47. spin->batch = vaddr;
  48. return 0;
  49. err_unpin_hws:
  50. i915_gem_object_unpin_map(spin->hws);
  51. err_obj:
  52. i915_gem_object_put(spin->obj);
  53. err_hws:
  54. i915_gem_object_put(spin->hws);
  55. err:
  56. return err;
  57. }
  58. static unsigned int seqno_offset(u64 fence)
  59. {
  60. return offset_in_page(sizeof(u32) * fence);
  61. }
  62. static u64 hws_address(const struct i915_vma *hws,
  63. const struct i915_request *rq)
  64. {
  65. return hws->node.start + seqno_offset(rq->fence.context);
  66. }
  67. static int emit_recurse_batch(struct spinner *spin,
  68. struct i915_request *rq,
  69. u32 arbitration_command)
  70. {
  71. struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
  72. struct i915_vma *hws, *vma;
  73. u32 *batch;
  74. int err;
  75. vma = i915_vma_instance(spin->obj, vm, NULL);
  76. if (IS_ERR(vma))
  77. return PTR_ERR(vma);
  78. hws = i915_vma_instance(spin->hws, vm, NULL);
  79. if (IS_ERR(hws))
  80. return PTR_ERR(hws);
  81. err = i915_vma_pin(vma, 0, 0, PIN_USER);
  82. if (err)
  83. return err;
  84. err = i915_vma_pin(hws, 0, 0, PIN_USER);
  85. if (err)
  86. goto unpin_vma;
  87. err = i915_vma_move_to_active(vma, rq, 0);
  88. if (err)
  89. goto unpin_hws;
  90. if (!i915_gem_object_has_active_reference(vma->obj)) {
  91. i915_gem_object_get(vma->obj);
  92. i915_gem_object_set_active_reference(vma->obj);
  93. }
  94. err = i915_vma_move_to_active(hws, rq, 0);
  95. if (err)
  96. goto unpin_hws;
  97. if (!i915_gem_object_has_active_reference(hws->obj)) {
  98. i915_gem_object_get(hws->obj);
  99. i915_gem_object_set_active_reference(hws->obj);
  100. }
  101. batch = spin->batch;
  102. *batch++ = MI_STORE_DWORD_IMM_GEN4;
  103. *batch++ = lower_32_bits(hws_address(hws, rq));
  104. *batch++ = upper_32_bits(hws_address(hws, rq));
  105. *batch++ = rq->fence.seqno;
  106. *batch++ = arbitration_command;
  107. *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
  108. *batch++ = lower_32_bits(vma->node.start);
  109. *batch++ = upper_32_bits(vma->node.start);
  110. *batch++ = MI_BATCH_BUFFER_END; /* not reached */
  111. i915_gem_chipset_flush(spin->i915);
  112. err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
  113. unpin_hws:
  114. i915_vma_unpin(hws);
  115. unpin_vma:
  116. i915_vma_unpin(vma);
  117. return err;
  118. }
  119. static struct i915_request *
  120. spinner_create_request(struct spinner *spin,
  121. struct i915_gem_context *ctx,
  122. struct intel_engine_cs *engine,
  123. u32 arbitration_command)
  124. {
  125. struct i915_request *rq;
  126. int err;
  127. rq = i915_request_alloc(engine, ctx);
  128. if (IS_ERR(rq))
  129. return rq;
  130. err = emit_recurse_batch(spin, rq, arbitration_command);
  131. if (err) {
  132. i915_request_add(rq);
  133. return ERR_PTR(err);
  134. }
  135. return rq;
  136. }
  137. static u32 hws_seqno(const struct spinner *spin, const struct i915_request *rq)
  138. {
  139. u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
  140. return READ_ONCE(*seqno);
  141. }
  142. static void spinner_end(struct spinner *spin)
  143. {
  144. *spin->batch = MI_BATCH_BUFFER_END;
  145. i915_gem_chipset_flush(spin->i915);
  146. }
  147. static void spinner_fini(struct spinner *spin)
  148. {
  149. spinner_end(spin);
  150. i915_gem_object_unpin_map(spin->obj);
  151. i915_gem_object_put(spin->obj);
  152. i915_gem_object_unpin_map(spin->hws);
  153. i915_gem_object_put(spin->hws);
  154. }
  155. static bool wait_for_spinner(struct spinner *spin, struct i915_request *rq)
  156. {
  157. if (!wait_event_timeout(rq->execute,
  158. READ_ONCE(rq->global_seqno),
  159. msecs_to_jiffies(10)))
  160. return false;
  161. return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
  162. rq->fence.seqno),
  163. 10) &&
  164. wait_for(i915_seqno_passed(hws_seqno(spin, rq),
  165. rq->fence.seqno),
  166. 1000));
  167. }
  168. static int live_sanitycheck(void *arg)
  169. {
  170. struct drm_i915_private *i915 = arg;
  171. struct intel_engine_cs *engine;
  172. struct i915_gem_context *ctx;
  173. enum intel_engine_id id;
  174. struct spinner spin;
  175. int err = -ENOMEM;
  176. if (!HAS_LOGICAL_RING_CONTEXTS(i915))
  177. return 0;
  178. mutex_lock(&i915->drm.struct_mutex);
  179. if (spinner_init(&spin, i915))
  180. goto err_unlock;
  181. ctx = kernel_context(i915);
  182. if (!ctx)
  183. goto err_spin;
  184. for_each_engine(engine, i915, id) {
  185. struct i915_request *rq;
  186. rq = spinner_create_request(&spin, ctx, engine, MI_NOOP);
  187. if (IS_ERR(rq)) {
  188. err = PTR_ERR(rq);
  189. goto err_ctx;
  190. }
  191. i915_request_add(rq);
  192. if (!wait_for_spinner(&spin, rq)) {
  193. GEM_TRACE("spinner failed to start\n");
  194. GEM_TRACE_DUMP();
  195. i915_gem_set_wedged(i915);
  196. err = -EIO;
  197. goto err_ctx;
  198. }
  199. spinner_end(&spin);
  200. if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
  201. err = -EIO;
  202. goto err_ctx;
  203. }
  204. }
  205. err = 0;
  206. err_ctx:
  207. kernel_context_close(ctx);
  208. err_spin:
  209. spinner_fini(&spin);
  210. err_unlock:
  211. igt_flush_test(i915, I915_WAIT_LOCKED);
  212. mutex_unlock(&i915->drm.struct_mutex);
  213. return err;
  214. }
  215. static int live_preempt(void *arg)
  216. {
  217. struct drm_i915_private *i915 = arg;
  218. struct i915_gem_context *ctx_hi, *ctx_lo;
  219. struct spinner spin_hi, spin_lo;
  220. struct intel_engine_cs *engine;
  221. enum intel_engine_id id;
  222. int err = -ENOMEM;
  223. if (!HAS_LOGICAL_RING_PREEMPTION(i915))
  224. return 0;
  225. mutex_lock(&i915->drm.struct_mutex);
  226. if (spinner_init(&spin_hi, i915))
  227. goto err_unlock;
  228. if (spinner_init(&spin_lo, i915))
  229. goto err_spin_hi;
  230. ctx_hi = kernel_context(i915);
  231. if (!ctx_hi)
  232. goto err_spin_lo;
  233. ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
  234. ctx_lo = kernel_context(i915);
  235. if (!ctx_lo)
  236. goto err_ctx_hi;
  237. ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
  238. for_each_engine(engine, i915, id) {
  239. struct i915_request *rq;
  240. rq = spinner_create_request(&spin_lo, ctx_lo, engine,
  241. MI_ARB_CHECK);
  242. if (IS_ERR(rq)) {
  243. err = PTR_ERR(rq);
  244. goto err_ctx_lo;
  245. }
  246. i915_request_add(rq);
  247. if (!wait_for_spinner(&spin_lo, rq)) {
  248. GEM_TRACE("lo spinner failed to start\n");
  249. GEM_TRACE_DUMP();
  250. i915_gem_set_wedged(i915);
  251. err = -EIO;
  252. goto err_ctx_lo;
  253. }
  254. rq = spinner_create_request(&spin_hi, ctx_hi, engine,
  255. MI_ARB_CHECK);
  256. if (IS_ERR(rq)) {
  257. spinner_end(&spin_lo);
  258. err = PTR_ERR(rq);
  259. goto err_ctx_lo;
  260. }
  261. i915_request_add(rq);
  262. if (!wait_for_spinner(&spin_hi, rq)) {
  263. GEM_TRACE("hi spinner failed to start\n");
  264. GEM_TRACE_DUMP();
  265. i915_gem_set_wedged(i915);
  266. err = -EIO;
  267. goto err_ctx_lo;
  268. }
  269. spinner_end(&spin_hi);
  270. spinner_end(&spin_lo);
  271. if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
  272. err = -EIO;
  273. goto err_ctx_lo;
  274. }
  275. }
  276. err = 0;
  277. err_ctx_lo:
  278. kernel_context_close(ctx_lo);
  279. err_ctx_hi:
  280. kernel_context_close(ctx_hi);
  281. err_spin_lo:
  282. spinner_fini(&spin_lo);
  283. err_spin_hi:
  284. spinner_fini(&spin_hi);
  285. err_unlock:
  286. igt_flush_test(i915, I915_WAIT_LOCKED);
  287. mutex_unlock(&i915->drm.struct_mutex);
  288. return err;
  289. }
  290. static int live_late_preempt(void *arg)
  291. {
  292. struct drm_i915_private *i915 = arg;
  293. struct i915_gem_context *ctx_hi, *ctx_lo;
  294. struct spinner spin_hi, spin_lo;
  295. struct intel_engine_cs *engine;
  296. struct i915_sched_attr attr = {};
  297. enum intel_engine_id id;
  298. int err = -ENOMEM;
  299. if (!HAS_LOGICAL_RING_PREEMPTION(i915))
  300. return 0;
  301. mutex_lock(&i915->drm.struct_mutex);
  302. if (spinner_init(&spin_hi, i915))
  303. goto err_unlock;
  304. if (spinner_init(&spin_lo, i915))
  305. goto err_spin_hi;
  306. ctx_hi = kernel_context(i915);
  307. if (!ctx_hi)
  308. goto err_spin_lo;
  309. ctx_lo = kernel_context(i915);
  310. if (!ctx_lo)
  311. goto err_ctx_hi;
  312. for_each_engine(engine, i915, id) {
  313. struct i915_request *rq;
  314. rq = spinner_create_request(&spin_lo, ctx_lo, engine,
  315. MI_ARB_CHECK);
  316. if (IS_ERR(rq)) {
  317. err = PTR_ERR(rq);
  318. goto err_ctx_lo;
  319. }
  320. i915_request_add(rq);
  321. if (!wait_for_spinner(&spin_lo, rq)) {
  322. pr_err("First context failed to start\n");
  323. goto err_wedged;
  324. }
  325. rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
  326. if (IS_ERR(rq)) {
  327. spinner_end(&spin_lo);
  328. err = PTR_ERR(rq);
  329. goto err_ctx_lo;
  330. }
  331. i915_request_add(rq);
  332. if (wait_for_spinner(&spin_hi, rq)) {
  333. pr_err("Second context overtook first?\n");
  334. goto err_wedged;
  335. }
  336. attr.priority = I915_PRIORITY_MAX;
  337. engine->schedule(rq, &attr);
  338. if (!wait_for_spinner(&spin_hi, rq)) {
  339. pr_err("High priority context failed to preempt the low priority context\n");
  340. GEM_TRACE_DUMP();
  341. goto err_wedged;
  342. }
  343. spinner_end(&spin_hi);
  344. spinner_end(&spin_lo);
  345. if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
  346. err = -EIO;
  347. goto err_ctx_lo;
  348. }
  349. }
  350. err = 0;
  351. err_ctx_lo:
  352. kernel_context_close(ctx_lo);
  353. err_ctx_hi:
  354. kernel_context_close(ctx_hi);
  355. err_spin_lo:
  356. spinner_fini(&spin_lo);
  357. err_spin_hi:
  358. spinner_fini(&spin_hi);
  359. err_unlock:
  360. igt_flush_test(i915, I915_WAIT_LOCKED);
  361. mutex_unlock(&i915->drm.struct_mutex);
  362. return err;
  363. err_wedged:
  364. spinner_end(&spin_hi);
  365. spinner_end(&spin_lo);
  366. i915_gem_set_wedged(i915);
  367. err = -EIO;
  368. goto err_ctx_lo;
  369. }
  370. static int live_preempt_hang(void *arg)
  371. {
  372. struct drm_i915_private *i915 = arg;
  373. struct i915_gem_context *ctx_hi, *ctx_lo;
  374. struct spinner spin_hi, spin_lo;
  375. struct intel_engine_cs *engine;
  376. enum intel_engine_id id;
  377. int err = -ENOMEM;
  378. if (!HAS_LOGICAL_RING_PREEMPTION(i915))
  379. return 0;
  380. if (!intel_has_reset_engine(i915))
  381. return 0;
  382. mutex_lock(&i915->drm.struct_mutex);
  383. if (spinner_init(&spin_hi, i915))
  384. goto err_unlock;
  385. if (spinner_init(&spin_lo, i915))
  386. goto err_spin_hi;
  387. ctx_hi = kernel_context(i915);
  388. if (!ctx_hi)
  389. goto err_spin_lo;
  390. ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
  391. ctx_lo = kernel_context(i915);
  392. if (!ctx_lo)
  393. goto err_ctx_hi;
  394. ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
  395. for_each_engine(engine, i915, id) {
  396. struct i915_request *rq;
  397. if (!intel_engine_has_preemption(engine))
  398. continue;
  399. rq = spinner_create_request(&spin_lo, ctx_lo, engine,
  400. MI_ARB_CHECK);
  401. if (IS_ERR(rq)) {
  402. err = PTR_ERR(rq);
  403. goto err_ctx_lo;
  404. }
  405. i915_request_add(rq);
  406. if (!wait_for_spinner(&spin_lo, rq)) {
  407. GEM_TRACE("lo spinner failed to start\n");
  408. GEM_TRACE_DUMP();
  409. i915_gem_set_wedged(i915);
  410. err = -EIO;
  411. goto err_ctx_lo;
  412. }
  413. rq = spinner_create_request(&spin_hi, ctx_hi, engine,
  414. MI_ARB_CHECK);
  415. if (IS_ERR(rq)) {
  416. spinner_end(&spin_lo);
  417. err = PTR_ERR(rq);
  418. goto err_ctx_lo;
  419. }
  420. init_completion(&engine->execlists.preempt_hang.completion);
  421. engine->execlists.preempt_hang.inject_hang = true;
  422. i915_request_add(rq);
  423. if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
  424. HZ / 10)) {
  425. pr_err("Preemption did not occur within timeout!");
  426. GEM_TRACE_DUMP();
  427. i915_gem_set_wedged(i915);
  428. err = -EIO;
  429. goto err_ctx_lo;
  430. }
  431. set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
  432. i915_reset_engine(engine, NULL);
  433. clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
  434. engine->execlists.preempt_hang.inject_hang = false;
  435. if (!wait_for_spinner(&spin_hi, rq)) {
  436. GEM_TRACE("hi spinner failed to start\n");
  437. GEM_TRACE_DUMP();
  438. i915_gem_set_wedged(i915);
  439. err = -EIO;
  440. goto err_ctx_lo;
  441. }
  442. spinner_end(&spin_hi);
  443. spinner_end(&spin_lo);
  444. if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
  445. err = -EIO;
  446. goto err_ctx_lo;
  447. }
  448. }
  449. err = 0;
  450. err_ctx_lo:
  451. kernel_context_close(ctx_lo);
  452. err_ctx_hi:
  453. kernel_context_close(ctx_hi);
  454. err_spin_lo:
  455. spinner_fini(&spin_lo);
  456. err_spin_hi:
  457. spinner_fini(&spin_hi);
  458. err_unlock:
  459. igt_flush_test(i915, I915_WAIT_LOCKED);
  460. mutex_unlock(&i915->drm.struct_mutex);
  461. return err;
  462. }
  463. int intel_execlists_live_selftests(struct drm_i915_private *i915)
  464. {
  465. static const struct i915_subtest tests[] = {
  466. SUBTEST(live_sanitycheck),
  467. SUBTEST(live_preempt),
  468. SUBTEST(live_late_preempt),
  469. SUBTEST(live_preempt_hang),
  470. };
  471. if (!HAS_EXECLISTS(i915))
  472. return 0;
  473. if (i915_terminally_wedged(&i915->gpu_error))
  474. return 0;
  475. return i915_subtests(tests, i915);
  476. }