intel_hdcp.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887
  1. /* SPDX-License-Identifier: MIT */
  2. /*
  3. * Copyright (C) 2017 Google, Inc.
  4. *
  5. * Authors:
  6. * Sean Paul <seanpaul@chromium.org>
  7. */
  8. #include <drm/drmP.h>
  9. #include <drm/drm_hdcp.h>
  10. #include <linux/i2c.h>
  11. #include <linux/random.h>
  12. #include "intel_drv.h"
  13. #include "i915_reg.h"
  14. #define KEY_LOAD_TRIES 5
  15. static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
  16. const struct intel_hdcp_shim *shim)
  17. {
  18. int ret, read_ret;
  19. bool ksv_ready;
  20. /* Poll for ksv list ready (spec says max time allowed is 5s) */
  21. ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
  22. &ksv_ready),
  23. read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
  24. 100 * 1000);
  25. if (ret)
  26. return ret;
  27. if (read_ret)
  28. return read_ret;
  29. if (!ksv_ready)
  30. return -ETIMEDOUT;
  31. return 0;
  32. }
  33. static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
  34. {
  35. struct i915_power_domains *power_domains = &dev_priv->power_domains;
  36. struct i915_power_well *power_well;
  37. enum i915_power_well_id id;
  38. bool enabled = false;
  39. /*
  40. * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
  41. * On all BXT+, SW can load the keys only when the PW#1 is turned on.
  42. */
  43. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  44. id = HSW_DISP_PW_GLOBAL;
  45. else
  46. id = SKL_DISP_PW_1;
  47. mutex_lock(&power_domains->lock);
  48. /* PG1 (power well #1) needs to be enabled */
  49. for_each_power_well(dev_priv, power_well) {
  50. if (power_well->id == id) {
  51. enabled = power_well->ops->is_enabled(dev_priv,
  52. power_well);
  53. break;
  54. }
  55. }
  56. mutex_unlock(&power_domains->lock);
  57. /*
  58. * Another req for hdcp key loadability is enabled state of pll for
  59. * cdclk. Without active crtc we wont land here. So we are assuming that
  60. * cdclk is already on.
  61. */
  62. return enabled;
  63. }
  64. static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
  65. {
  66. I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
  67. I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
  68. HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
  69. }
  70. static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
  71. {
  72. int ret;
  73. u32 val;
  74. val = I915_READ(HDCP_KEY_STATUS);
  75. if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
  76. return 0;
  77. /*
  78. * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
  79. * out of reset. So if Key is not already loaded, its an error state.
  80. */
  81. if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
  82. if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
  83. return -ENXIO;
  84. /*
  85. * Initiate loading the HDCP key from fuses.
  86. *
  87. * BXT+ platforms, HDCP key needs to be loaded by SW. Only SKL and KBL
  88. * differ in the key load trigger process from other platforms.
  89. */
  90. if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
  91. mutex_lock(&dev_priv->pcu_lock);
  92. ret = sandybridge_pcode_write(dev_priv,
  93. SKL_PCODE_LOAD_HDCP_KEYS, 1);
  94. mutex_unlock(&dev_priv->pcu_lock);
  95. if (ret) {
  96. DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
  97. ret);
  98. return ret;
  99. }
  100. } else {
  101. I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
  102. }
  103. /* Wait for the keys to load (500us) */
  104. ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
  105. HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
  106. 10, 1, &val);
  107. if (ret)
  108. return ret;
  109. else if (!(val & HDCP_KEY_LOAD_STATUS))
  110. return -ENXIO;
  111. /* Send Aksv over to PCH display for use in authentication */
  112. I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
  113. return 0;
  114. }
  115. /* Returns updated SHA-1 index */
  116. static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
  117. {
  118. I915_WRITE(HDCP_SHA_TEXT, sha_text);
  119. if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
  120. HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
  121. DRM_ERROR("Timed out waiting for SHA1 ready\n");
  122. return -ETIMEDOUT;
  123. }
  124. return 0;
  125. }
  126. static
  127. u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
  128. {
  129. enum port port = intel_dig_port->base.port;
  130. switch (port) {
  131. case PORT_A:
  132. return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
  133. case PORT_B:
  134. return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
  135. case PORT_C:
  136. return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
  137. case PORT_D:
  138. return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
  139. case PORT_E:
  140. return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
  141. default:
  142. break;
  143. }
  144. DRM_ERROR("Unknown port %d\n", port);
  145. return -EINVAL;
  146. }
  147. static
  148. bool intel_hdcp_is_ksv_valid(u8 *ksv)
  149. {
  150. int i, ones = 0;
  151. /* KSV has 20 1's and 20 0's */
  152. for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
  153. ones += hweight8(ksv[i]);
  154. if (ones != 20)
  155. return false;
  156. return true;
  157. }
  158. static
  159. int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
  160. const struct intel_hdcp_shim *shim,
  161. u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
  162. {
  163. struct drm_i915_private *dev_priv;
  164. u32 vprime, sha_text, sha_leftovers, rep_ctl;
  165. int ret, i, j, sha_idx;
  166. dev_priv = intel_dig_port->base.base.dev->dev_private;
  167. /* Process V' values from the receiver */
  168. for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
  169. ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
  170. if (ret)
  171. return ret;
  172. I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
  173. }
  174. /*
  175. * We need to write the concatenation of all device KSVs, BINFO (DP) ||
  176. * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
  177. * stream is written via the HDCP_SHA_TEXT register in 32-bit
  178. * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
  179. * index will keep track of our progress through the 64 bytes as well as
  180. * helping us work the 40-bit KSVs through our 32-bit register.
  181. *
  182. * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
  183. */
  184. sha_idx = 0;
  185. sha_text = 0;
  186. sha_leftovers = 0;
  187. rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
  188. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
  189. for (i = 0; i < num_downstream; i++) {
  190. unsigned int sha_empty;
  191. u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
  192. /* Fill up the empty slots in sha_text and write it out */
  193. sha_empty = sizeof(sha_text) - sha_leftovers;
  194. for (j = 0; j < sha_empty; j++)
  195. sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
  196. ret = intel_write_sha_text(dev_priv, sha_text);
  197. if (ret < 0)
  198. return ret;
  199. /* Programming guide writes this every 64 bytes */
  200. sha_idx += sizeof(sha_text);
  201. if (!(sha_idx % 64))
  202. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
  203. /* Store the leftover bytes from the ksv in sha_text */
  204. sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
  205. sha_text = 0;
  206. for (j = 0; j < sha_leftovers; j++)
  207. sha_text |= ksv[sha_empty + j] <<
  208. ((sizeof(sha_text) - j - 1) * 8);
  209. /*
  210. * If we still have room in sha_text for more data, continue.
  211. * Otherwise, write it out immediately.
  212. */
  213. if (sizeof(sha_text) > sha_leftovers)
  214. continue;
  215. ret = intel_write_sha_text(dev_priv, sha_text);
  216. if (ret < 0)
  217. return ret;
  218. sha_leftovers = 0;
  219. sha_text = 0;
  220. sha_idx += sizeof(sha_text);
  221. }
  222. /*
  223. * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
  224. * bytes are leftover from the last ksv, we might be able to fit them
  225. * all in sha_text (first 2 cases), or we might need to split them up
  226. * into 2 writes (last 2 cases).
  227. */
  228. if (sha_leftovers == 0) {
  229. /* Write 16 bits of text, 16 bits of M0 */
  230. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
  231. ret = intel_write_sha_text(dev_priv,
  232. bstatus[0] << 8 | bstatus[1]);
  233. if (ret < 0)
  234. return ret;
  235. sha_idx += sizeof(sha_text);
  236. /* Write 32 bits of M0 */
  237. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
  238. ret = intel_write_sha_text(dev_priv, 0);
  239. if (ret < 0)
  240. return ret;
  241. sha_idx += sizeof(sha_text);
  242. /* Write 16 bits of M0 */
  243. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
  244. ret = intel_write_sha_text(dev_priv, 0);
  245. if (ret < 0)
  246. return ret;
  247. sha_idx += sizeof(sha_text);
  248. } else if (sha_leftovers == 1) {
  249. /* Write 24 bits of text, 8 bits of M0 */
  250. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
  251. sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
  252. /* Only 24-bits of data, must be in the LSB */
  253. sha_text = (sha_text & 0xffffff00) >> 8;
  254. ret = intel_write_sha_text(dev_priv, sha_text);
  255. if (ret < 0)
  256. return ret;
  257. sha_idx += sizeof(sha_text);
  258. /* Write 32 bits of M0 */
  259. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
  260. ret = intel_write_sha_text(dev_priv, 0);
  261. if (ret < 0)
  262. return ret;
  263. sha_idx += sizeof(sha_text);
  264. /* Write 24 bits of M0 */
  265. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
  266. ret = intel_write_sha_text(dev_priv, 0);
  267. if (ret < 0)
  268. return ret;
  269. sha_idx += sizeof(sha_text);
  270. } else if (sha_leftovers == 2) {
  271. /* Write 32 bits of text */
  272. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
  273. sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
  274. ret = intel_write_sha_text(dev_priv, sha_text);
  275. if (ret < 0)
  276. return ret;
  277. sha_idx += sizeof(sha_text);
  278. /* Write 64 bits of M0 */
  279. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
  280. for (i = 0; i < 2; i++) {
  281. ret = intel_write_sha_text(dev_priv, 0);
  282. if (ret < 0)
  283. return ret;
  284. sha_idx += sizeof(sha_text);
  285. }
  286. } else if (sha_leftovers == 3) {
  287. /* Write 32 bits of text */
  288. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
  289. sha_text |= bstatus[0] << 24;
  290. ret = intel_write_sha_text(dev_priv, sha_text);
  291. if (ret < 0)
  292. return ret;
  293. sha_idx += sizeof(sha_text);
  294. /* Write 8 bits of text, 24 bits of M0 */
  295. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
  296. ret = intel_write_sha_text(dev_priv, bstatus[1]);
  297. if (ret < 0)
  298. return ret;
  299. sha_idx += sizeof(sha_text);
  300. /* Write 32 bits of M0 */
  301. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
  302. ret = intel_write_sha_text(dev_priv, 0);
  303. if (ret < 0)
  304. return ret;
  305. sha_idx += sizeof(sha_text);
  306. /* Write 8 bits of M0 */
  307. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
  308. ret = intel_write_sha_text(dev_priv, 0);
  309. if (ret < 0)
  310. return ret;
  311. sha_idx += sizeof(sha_text);
  312. } else {
  313. DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
  314. sha_leftovers);
  315. return -EINVAL;
  316. }
  317. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
  318. /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
  319. while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
  320. ret = intel_write_sha_text(dev_priv, 0);
  321. if (ret < 0)
  322. return ret;
  323. sha_idx += sizeof(sha_text);
  324. }
  325. /*
  326. * Last write gets the length of the concatenation in bits. That is:
  327. * - 5 bytes per device
  328. * - 10 bytes for BINFO/BSTATUS(2), M0(8)
  329. */
  330. sha_text = (num_downstream * 5 + 10) * 8;
  331. ret = intel_write_sha_text(dev_priv, sha_text);
  332. if (ret < 0)
  333. return ret;
  334. /* Tell the HW we're done with the hash and wait for it to ACK */
  335. I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
  336. if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
  337. HDCP_SHA1_COMPLETE,
  338. HDCP_SHA1_COMPLETE, 1)) {
  339. DRM_DEBUG_KMS("Timed out waiting for SHA1 complete\n");
  340. return -ETIMEDOUT;
  341. }
  342. if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
  343. DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
  344. return -ENXIO;
  345. }
  346. return 0;
  347. }
  348. /* Implements Part 2 of the HDCP authorization procedure */
  349. static
  350. int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
  351. const struct intel_hdcp_shim *shim)
  352. {
  353. u8 bstatus[2], num_downstream, *ksv_fifo;
  354. int ret, i, tries = 3;
  355. ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
  356. if (ret) {
  357. DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
  358. return ret;
  359. }
  360. ret = shim->read_bstatus(intel_dig_port, bstatus);
  361. if (ret)
  362. return ret;
  363. if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
  364. DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
  365. DRM_ERROR("Max Topology Limit Exceeded\n");
  366. return -EPERM;
  367. }
  368. /*
  369. * When repeater reports 0 device count, HDCP1.4 spec allows disabling
  370. * the HDCP encryption. That implies that repeater can't have its own
  371. * display. As there is no consumption of encrypted content in the
  372. * repeater with 0 downstream devices, we are failing the
  373. * authentication.
  374. */
  375. num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
  376. if (num_downstream == 0)
  377. return -EINVAL;
  378. ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
  379. if (!ksv_fifo)
  380. return -ENOMEM;
  381. ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
  382. if (ret)
  383. goto err;
  384. /*
  385. * When V prime mismatches, DP Spec mandates re-read of
  386. * V prime atleast twice.
  387. */
  388. for (i = 0; i < tries; i++) {
  389. ret = intel_hdcp_validate_v_prime(intel_dig_port, shim,
  390. ksv_fifo, num_downstream,
  391. bstatus);
  392. if (!ret)
  393. break;
  394. }
  395. if (i == tries) {
  396. DRM_ERROR("V Prime validation failed.(%d)\n", ret);
  397. goto err;
  398. }
  399. DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
  400. num_downstream);
  401. ret = 0;
  402. err:
  403. kfree(ksv_fifo);
  404. return ret;
  405. }
  406. /* Implements Part 1 of the HDCP authorization procedure */
  407. static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
  408. const struct intel_hdcp_shim *shim)
  409. {
  410. struct drm_i915_private *dev_priv;
  411. enum port port;
  412. unsigned long r0_prime_gen_start;
  413. int ret, i, tries = 2;
  414. union {
  415. u32 reg[2];
  416. u8 shim[DRM_HDCP_AN_LEN];
  417. } an;
  418. union {
  419. u32 reg[2];
  420. u8 shim[DRM_HDCP_KSV_LEN];
  421. } bksv;
  422. union {
  423. u32 reg;
  424. u8 shim[DRM_HDCP_RI_LEN];
  425. } ri;
  426. bool repeater_present, hdcp_capable;
  427. dev_priv = intel_dig_port->base.base.dev->dev_private;
  428. port = intel_dig_port->base.port;
  429. /*
  430. * Detects whether the display is HDCP capable. Although we check for
  431. * valid Bksv below, the HDCP over DP spec requires that we check
  432. * whether the display supports HDCP before we write An. For HDMI
  433. * displays, this is not necessary.
  434. */
  435. if (shim->hdcp_capable) {
  436. ret = shim->hdcp_capable(intel_dig_port, &hdcp_capable);
  437. if (ret)
  438. return ret;
  439. if (!hdcp_capable) {
  440. DRM_ERROR("Panel is not HDCP capable\n");
  441. return -EINVAL;
  442. }
  443. }
  444. /* Initialize An with 2 random values and acquire it */
  445. for (i = 0; i < 2; i++)
  446. I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32());
  447. I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
  448. /* Wait for An to be acquired */
  449. if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
  450. HDCP_STATUS_AN_READY,
  451. HDCP_STATUS_AN_READY, 1)) {
  452. DRM_ERROR("Timed out waiting for An\n");
  453. return -ETIMEDOUT;
  454. }
  455. an.reg[0] = I915_READ(PORT_HDCP_ANLO(port));
  456. an.reg[1] = I915_READ(PORT_HDCP_ANHI(port));
  457. ret = shim->write_an_aksv(intel_dig_port, an.shim);
  458. if (ret)
  459. return ret;
  460. r0_prime_gen_start = jiffies;
  461. memset(&bksv, 0, sizeof(bksv));
  462. /* HDCP spec states that we must retry the bksv if it is invalid */
  463. for (i = 0; i < tries; i++) {
  464. ret = shim->read_bksv(intel_dig_port, bksv.shim);
  465. if (ret)
  466. return ret;
  467. if (intel_hdcp_is_ksv_valid(bksv.shim))
  468. break;
  469. }
  470. if (i == tries) {
  471. DRM_ERROR("HDCP failed, Bksv is invalid\n");
  472. return -ENODEV;
  473. }
  474. I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]);
  475. I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]);
  476. ret = shim->repeater_present(intel_dig_port, &repeater_present);
  477. if (ret)
  478. return ret;
  479. if (repeater_present)
  480. I915_WRITE(HDCP_REP_CTL,
  481. intel_hdcp_get_repeater_ctl(intel_dig_port));
  482. ret = shim->toggle_signalling(intel_dig_port, true);
  483. if (ret)
  484. return ret;
  485. I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC);
  486. /* Wait for R0 ready */
  487. if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
  488. (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
  489. DRM_ERROR("Timed out waiting for R0 ready\n");
  490. return -ETIMEDOUT;
  491. }
  492. /*
  493. * Wait for R0' to become available. The spec says 100ms from Aksv, but
  494. * some monitors can take longer than this. We'll set the timeout at
  495. * 300ms just to be sure.
  496. *
  497. * On DP, there's an R0_READY bit available but no such bit
  498. * exists on HDMI. Since the upper-bound is the same, we'll just do
  499. * the stupid thing instead of polling on one and not the other.
  500. */
  501. wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
  502. tries = 3;
  503. /*
  504. * DP HDCP Spec mandates the two more reattempt to read R0, incase
  505. * of R0 mismatch.
  506. */
  507. for (i = 0; i < tries; i++) {
  508. ri.reg = 0;
  509. ret = shim->read_ri_prime(intel_dig_port, ri.shim);
  510. if (ret)
  511. return ret;
  512. I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg);
  513. /* Wait for Ri prime match */
  514. if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) &
  515. (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
  516. break;
  517. }
  518. if (i == tries) {
  519. DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
  520. I915_READ(PORT_HDCP_STATUS(port)));
  521. return -ETIMEDOUT;
  522. }
  523. /* Wait for encryption confirmation */
  524. if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
  525. HDCP_STATUS_ENC, HDCP_STATUS_ENC, 20)) {
  526. DRM_ERROR("Timed out waiting for encryption\n");
  527. return -ETIMEDOUT;
  528. }
  529. /*
  530. * XXX: If we have MST-connected devices, we need to enable encryption
  531. * on those as well.
  532. */
  533. if (repeater_present)
  534. return intel_hdcp_auth_downstream(intel_dig_port, shim);
  535. DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
  536. return 0;
  537. }
  538. static
  539. struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
  540. {
  541. return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
  542. }
  543. static int _intel_hdcp_disable(struct intel_connector *connector)
  544. {
  545. struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
  546. struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
  547. enum port port = intel_dig_port->base.port;
  548. int ret;
  549. DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
  550. connector->base.name, connector->base.base.id);
  551. I915_WRITE(PORT_HDCP_CONF(port), 0);
  552. if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
  553. 20)) {
  554. DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
  555. return -ETIMEDOUT;
  556. }
  557. ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
  558. if (ret) {
  559. DRM_ERROR("Failed to disable HDCP signalling\n");
  560. return ret;
  561. }
  562. DRM_DEBUG_KMS("HDCP is disabled\n");
  563. return 0;
  564. }
  565. static int _intel_hdcp_enable(struct intel_connector *connector)
  566. {
  567. struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
  568. int i, ret, tries = 3;
  569. DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
  570. connector->base.name, connector->base.base.id);
  571. if (!hdcp_key_loadable(dev_priv)) {
  572. DRM_ERROR("HDCP key Load is not possible\n");
  573. return -ENXIO;
  574. }
  575. for (i = 0; i < KEY_LOAD_TRIES; i++) {
  576. ret = intel_hdcp_load_keys(dev_priv);
  577. if (!ret)
  578. break;
  579. intel_hdcp_clear_keys(dev_priv);
  580. }
  581. if (ret) {
  582. DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
  583. return ret;
  584. }
  585. /* Incase of authentication failures, HDCP spec expects reauth. */
  586. for (i = 0; i < tries; i++) {
  587. ret = intel_hdcp_auth(conn_to_dig_port(connector),
  588. connector->hdcp_shim);
  589. if (!ret)
  590. return 0;
  591. DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
  592. /* Ensuring HDCP encryption and signalling are stopped. */
  593. _intel_hdcp_disable(connector);
  594. }
  595. DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret);
  596. return ret;
  597. }
  598. static void intel_hdcp_check_work(struct work_struct *work)
  599. {
  600. struct intel_connector *connector = container_of(to_delayed_work(work),
  601. struct intel_connector,
  602. hdcp_check_work);
  603. if (!intel_hdcp_check_link(connector))
  604. schedule_delayed_work(&connector->hdcp_check_work,
  605. DRM_HDCP_CHECK_PERIOD_MS);
  606. }
  607. static void intel_hdcp_prop_work(struct work_struct *work)
  608. {
  609. struct intel_connector *connector = container_of(work,
  610. struct intel_connector,
  611. hdcp_prop_work);
  612. struct drm_device *dev = connector->base.dev;
  613. struct drm_connector_state *state;
  614. drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
  615. mutex_lock(&connector->hdcp_mutex);
  616. /*
  617. * This worker is only used to flip between ENABLED/DESIRED. Either of
  618. * those to UNDESIRED is handled by core. If hdcp_value == UNDESIRED,
  619. * we're running just after hdcp has been disabled, so just exit
  620. */
  621. if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
  622. state = connector->base.state;
  623. state->content_protection = connector->hdcp_value;
  624. }
  625. mutex_unlock(&connector->hdcp_mutex);
  626. drm_modeset_unlock(&dev->mode_config.connection_mutex);
  627. }
  628. bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
  629. {
  630. /* PORT E doesn't have HDCP, and PORT F is disabled */
  631. return ((INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
  632. !IS_CHERRYVIEW(dev_priv) && port < PORT_E);
  633. }
  634. int intel_hdcp_init(struct intel_connector *connector,
  635. const struct intel_hdcp_shim *hdcp_shim)
  636. {
  637. int ret;
  638. ret = drm_connector_attach_content_protection_property(
  639. &connector->base);
  640. if (ret)
  641. return ret;
  642. connector->hdcp_shim = hdcp_shim;
  643. mutex_init(&connector->hdcp_mutex);
  644. INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work);
  645. INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work);
  646. return 0;
  647. }
  648. int intel_hdcp_enable(struct intel_connector *connector)
  649. {
  650. int ret;
  651. if (!connector->hdcp_shim)
  652. return -ENOENT;
  653. mutex_lock(&connector->hdcp_mutex);
  654. ret = _intel_hdcp_enable(connector);
  655. if (ret)
  656. goto out;
  657. connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
  658. schedule_work(&connector->hdcp_prop_work);
  659. schedule_delayed_work(&connector->hdcp_check_work,
  660. DRM_HDCP_CHECK_PERIOD_MS);
  661. out:
  662. mutex_unlock(&connector->hdcp_mutex);
  663. return ret;
  664. }
  665. int intel_hdcp_disable(struct intel_connector *connector)
  666. {
  667. int ret = 0;
  668. if (!connector->hdcp_shim)
  669. return -ENOENT;
  670. mutex_lock(&connector->hdcp_mutex);
  671. if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
  672. connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
  673. ret = _intel_hdcp_disable(connector);
  674. }
  675. mutex_unlock(&connector->hdcp_mutex);
  676. cancel_delayed_work_sync(&connector->hdcp_check_work);
  677. return ret;
  678. }
  679. void intel_hdcp_atomic_check(struct drm_connector *connector,
  680. struct drm_connector_state *old_state,
  681. struct drm_connector_state *new_state)
  682. {
  683. uint64_t old_cp = old_state->content_protection;
  684. uint64_t new_cp = new_state->content_protection;
  685. struct drm_crtc_state *crtc_state;
  686. if (!new_state->crtc) {
  687. /*
  688. * If the connector is being disabled with CP enabled, mark it
  689. * desired so it's re-enabled when the connector is brought back
  690. */
  691. if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
  692. new_state->content_protection =
  693. DRM_MODE_CONTENT_PROTECTION_DESIRED;
  694. return;
  695. }
  696. /*
  697. * Nothing to do if the state didn't change, or HDCP was activated since
  698. * the last commit
  699. */
  700. if (old_cp == new_cp ||
  701. (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
  702. new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
  703. return;
  704. crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
  705. new_state->crtc);
  706. crtc_state->mode_changed = true;
  707. }
  708. /* Implements Part 3 of the HDCP authorization procedure */
  709. int intel_hdcp_check_link(struct intel_connector *connector)
  710. {
  711. struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
  712. struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
  713. enum port port = intel_dig_port->base.port;
  714. int ret = 0;
  715. if (!connector->hdcp_shim)
  716. return -ENOENT;
  717. mutex_lock(&connector->hdcp_mutex);
  718. if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
  719. goto out;
  720. if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
  721. DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n",
  722. connector->base.name, connector->base.base.id,
  723. I915_READ(PORT_HDCP_STATUS(port)));
  724. ret = -ENXIO;
  725. connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
  726. schedule_work(&connector->hdcp_prop_work);
  727. goto out;
  728. }
  729. if (connector->hdcp_shim->check_link(intel_dig_port)) {
  730. if (connector->hdcp_value !=
  731. DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
  732. connector->hdcp_value =
  733. DRM_MODE_CONTENT_PROTECTION_ENABLED;
  734. schedule_work(&connector->hdcp_prop_work);
  735. }
  736. goto out;
  737. }
  738. DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
  739. connector->base.name, connector->base.base.id);
  740. ret = _intel_hdcp_disable(connector);
  741. if (ret) {
  742. DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
  743. connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
  744. schedule_work(&connector->hdcp_prop_work);
  745. goto out;
  746. }
  747. ret = _intel_hdcp_enable(connector);
  748. if (ret) {
  749. DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
  750. connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
  751. schedule_work(&connector->hdcp_prop_work);
  752. goto out;
  753. }
  754. out:
  755. mutex_unlock(&connector->hdcp_mutex);
  756. return ret;
  757. }