xen-tpmfront.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458
  1. /*
  2. * Implementation of the Xen vTPM device frontend
  3. *
  4. * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2,
  8. * as published by the Free Software Foundation.
  9. */
  10. #include <linux/errno.h>
  11. #include <linux/err.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/freezer.h>
  14. #include <xen/xen.h>
  15. #include <xen/events.h>
  16. #include <xen/interface/io/tpmif.h>
  17. #include <xen/grant_table.h>
  18. #include <xen/xenbus.h>
  19. #include <xen/page.h>
  20. #include "tpm.h"
  21. #include <xen/platform_pci.h>
  22. struct tpm_private {
  23. struct tpm_chip *chip;
  24. struct xenbus_device *dev;
  25. struct vtpm_shared_page *shr;
  26. unsigned int evtchn;
  27. int ring_ref;
  28. domid_t backend_id;
  29. int irq;
  30. wait_queue_head_t read_queue;
  31. };
  32. enum status_bits {
  33. VTPM_STATUS_RUNNING = 0x1,
  34. VTPM_STATUS_IDLE = 0x2,
  35. VTPM_STATUS_RESULT = 0x4,
  36. VTPM_STATUS_CANCELED = 0x8,
  37. };
  38. static bool wait_for_tpm_stat_cond(struct tpm_chip *chip, u8 mask,
  39. bool check_cancel, bool *canceled)
  40. {
  41. u8 status = chip->ops->status(chip);
  42. *canceled = false;
  43. if ((status & mask) == mask)
  44. return true;
  45. if (check_cancel && chip->ops->req_canceled(chip, status)) {
  46. *canceled = true;
  47. return true;
  48. }
  49. return false;
  50. }
  51. static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
  52. unsigned long timeout, wait_queue_head_t *queue,
  53. bool check_cancel)
  54. {
  55. unsigned long stop;
  56. long rc;
  57. u8 status;
  58. bool canceled = false;
  59. /* check current status */
  60. status = chip->ops->status(chip);
  61. if ((status & mask) == mask)
  62. return 0;
  63. stop = jiffies + timeout;
  64. if (chip->flags & TPM_CHIP_FLAG_IRQ) {
  65. again:
  66. timeout = stop - jiffies;
  67. if ((long)timeout <= 0)
  68. return -ETIME;
  69. rc = wait_event_interruptible_timeout(*queue,
  70. wait_for_tpm_stat_cond(chip, mask, check_cancel,
  71. &canceled),
  72. timeout);
  73. if (rc > 0) {
  74. if (canceled)
  75. return -ECANCELED;
  76. return 0;
  77. }
  78. if (rc == -ERESTARTSYS && freezing(current)) {
  79. clear_thread_flag(TIF_SIGPENDING);
  80. goto again;
  81. }
  82. } else {
  83. do {
  84. tpm_msleep(TPM_TIMEOUT);
  85. status = chip->ops->status(chip);
  86. if ((status & mask) == mask)
  87. return 0;
  88. } while (time_before(jiffies, stop));
  89. }
  90. return -ETIME;
  91. }
  92. static u8 vtpm_status(struct tpm_chip *chip)
  93. {
  94. struct tpm_private *priv = dev_get_drvdata(&chip->dev);
  95. switch (priv->shr->state) {
  96. case VTPM_STATE_IDLE:
  97. return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
  98. case VTPM_STATE_FINISH:
  99. return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
  100. case VTPM_STATE_SUBMIT:
  101. case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
  102. return VTPM_STATUS_RUNNING;
  103. default:
  104. return 0;
  105. }
  106. }
  107. static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
  108. {
  109. return status & VTPM_STATUS_CANCELED;
  110. }
  111. static void vtpm_cancel(struct tpm_chip *chip)
  112. {
  113. struct tpm_private *priv = dev_get_drvdata(&chip->dev);
  114. priv->shr->state = VTPM_STATE_CANCEL;
  115. wmb();
  116. notify_remote_via_evtchn(priv->evtchn);
  117. }
  118. static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
  119. {
  120. return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
  121. }
  122. static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
  123. {
  124. struct tpm_private *priv = dev_get_drvdata(&chip->dev);
  125. struct vtpm_shared_page *shr = priv->shr;
  126. unsigned int offset = shr_data_offset(shr);
  127. u32 ordinal;
  128. unsigned long duration;
  129. if (offset > PAGE_SIZE)
  130. return -EINVAL;
  131. if (offset + count > PAGE_SIZE)
  132. return -EINVAL;
  133. /* Wait for completion of any existing command or cancellation */
  134. if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->timeout_c,
  135. &priv->read_queue, true) < 0) {
  136. vtpm_cancel(chip);
  137. return -ETIME;
  138. }
  139. memcpy(offset + (u8 *)shr, buf, count);
  140. shr->length = count;
  141. barrier();
  142. shr->state = VTPM_STATE_SUBMIT;
  143. wmb();
  144. notify_remote_via_evtchn(priv->evtchn);
  145. ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal);
  146. duration = tpm_calc_ordinal_duration(chip, ordinal);
  147. if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
  148. &priv->read_queue, true) < 0) {
  149. /* got a signal or timeout, try to cancel */
  150. vtpm_cancel(chip);
  151. return -ETIME;
  152. }
  153. return 0;
  154. }
  155. static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
  156. {
  157. struct tpm_private *priv = dev_get_drvdata(&chip->dev);
  158. struct vtpm_shared_page *shr = priv->shr;
  159. unsigned int offset = shr_data_offset(shr);
  160. size_t length = shr->length;
  161. if (shr->state == VTPM_STATE_IDLE)
  162. return -ECANCELED;
  163. /* In theory the wait at the end of _send makes this one unnecessary */
  164. if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->timeout_c,
  165. &priv->read_queue, true) < 0) {
  166. vtpm_cancel(chip);
  167. return -ETIME;
  168. }
  169. if (offset > PAGE_SIZE)
  170. return -EIO;
  171. if (offset + length > PAGE_SIZE)
  172. length = PAGE_SIZE - offset;
  173. if (length > count)
  174. length = count;
  175. memcpy(buf, offset + (u8 *)shr, length);
  176. return length;
  177. }
  178. static const struct tpm_class_ops tpm_vtpm = {
  179. .status = vtpm_status,
  180. .recv = vtpm_recv,
  181. .send = vtpm_send,
  182. .cancel = vtpm_cancel,
  183. .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
  184. .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
  185. .req_canceled = vtpm_req_canceled,
  186. };
  187. static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
  188. {
  189. struct tpm_private *priv = dev_id;
  190. switch (priv->shr->state) {
  191. case VTPM_STATE_IDLE:
  192. case VTPM_STATE_FINISH:
  193. wake_up_interruptible(&priv->read_queue);
  194. break;
  195. case VTPM_STATE_SUBMIT:
  196. case VTPM_STATE_CANCEL:
  197. default:
  198. break;
  199. }
  200. return IRQ_HANDLED;
  201. }
  202. static int setup_chip(struct device *dev, struct tpm_private *priv)
  203. {
  204. struct tpm_chip *chip;
  205. chip = tpmm_chip_alloc(dev, &tpm_vtpm);
  206. if (IS_ERR(chip))
  207. return PTR_ERR(chip);
  208. init_waitqueue_head(&priv->read_queue);
  209. priv->chip = chip;
  210. dev_set_drvdata(&chip->dev, priv);
  211. return 0;
  212. }
  213. /* caller must clean up in case of errors */
  214. static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
  215. {
  216. struct xenbus_transaction xbt;
  217. const char *message = NULL;
  218. int rv;
  219. grant_ref_t gref;
  220. priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
  221. if (!priv->shr) {
  222. xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
  223. return -ENOMEM;
  224. }
  225. rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
  226. if (rv < 0)
  227. return rv;
  228. priv->ring_ref = gref;
  229. rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
  230. if (rv)
  231. return rv;
  232. rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
  233. "tpmif", priv);
  234. if (rv <= 0) {
  235. xenbus_dev_fatal(dev, rv, "allocating TPM irq");
  236. return rv;
  237. }
  238. priv->irq = rv;
  239. again:
  240. rv = xenbus_transaction_start(&xbt);
  241. if (rv) {
  242. xenbus_dev_fatal(dev, rv, "starting transaction");
  243. return rv;
  244. }
  245. rv = xenbus_printf(xbt, dev->nodename,
  246. "ring-ref", "%u", priv->ring_ref);
  247. if (rv) {
  248. message = "writing ring-ref";
  249. goto abort_transaction;
  250. }
  251. rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
  252. priv->evtchn);
  253. if (rv) {
  254. message = "writing event-channel";
  255. goto abort_transaction;
  256. }
  257. rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
  258. if (rv) {
  259. message = "writing feature-protocol-v2";
  260. goto abort_transaction;
  261. }
  262. rv = xenbus_transaction_end(xbt, 0);
  263. if (rv == -EAGAIN)
  264. goto again;
  265. if (rv) {
  266. xenbus_dev_fatal(dev, rv, "completing transaction");
  267. return rv;
  268. }
  269. xenbus_switch_state(dev, XenbusStateInitialised);
  270. return 0;
  271. abort_transaction:
  272. xenbus_transaction_end(xbt, 1);
  273. if (message)
  274. xenbus_dev_error(dev, rv, "%s", message);
  275. return rv;
  276. }
  277. static void ring_free(struct tpm_private *priv)
  278. {
  279. if (!priv)
  280. return;
  281. if (priv->ring_ref)
  282. gnttab_end_foreign_access(priv->ring_ref, 0,
  283. (unsigned long)priv->shr);
  284. else
  285. free_page((unsigned long)priv->shr);
  286. if (priv->irq)
  287. unbind_from_irqhandler(priv->irq, priv);
  288. kfree(priv);
  289. }
  290. static int tpmfront_probe(struct xenbus_device *dev,
  291. const struct xenbus_device_id *id)
  292. {
  293. struct tpm_private *priv;
  294. int rv;
  295. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  296. if (!priv) {
  297. xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
  298. return -ENOMEM;
  299. }
  300. rv = setup_chip(&dev->dev, priv);
  301. if (rv) {
  302. kfree(priv);
  303. return rv;
  304. }
  305. rv = setup_ring(dev, priv);
  306. if (rv) {
  307. ring_free(priv);
  308. return rv;
  309. }
  310. tpm_get_timeouts(priv->chip);
  311. return tpm_chip_register(priv->chip);
  312. }
  313. static int tpmfront_remove(struct xenbus_device *dev)
  314. {
  315. struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
  316. struct tpm_private *priv = dev_get_drvdata(&chip->dev);
  317. tpm_chip_unregister(chip);
  318. ring_free(priv);
  319. dev_set_drvdata(&chip->dev, NULL);
  320. return 0;
  321. }
  322. static int tpmfront_resume(struct xenbus_device *dev)
  323. {
  324. /* A suspend/resume/migrate will interrupt a vTPM anyway */
  325. tpmfront_remove(dev);
  326. return tpmfront_probe(dev, NULL);
  327. }
  328. static void backend_changed(struct xenbus_device *dev,
  329. enum xenbus_state backend_state)
  330. {
  331. switch (backend_state) {
  332. case XenbusStateInitialised:
  333. case XenbusStateConnected:
  334. if (dev->state == XenbusStateConnected)
  335. break;
  336. if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2",
  337. 0)) {
  338. xenbus_dev_fatal(dev, -EINVAL,
  339. "vTPM protocol 2 required");
  340. return;
  341. }
  342. xenbus_switch_state(dev, XenbusStateConnected);
  343. break;
  344. case XenbusStateClosing:
  345. case XenbusStateClosed:
  346. device_unregister(&dev->dev);
  347. xenbus_frontend_closed(dev);
  348. break;
  349. default:
  350. break;
  351. }
  352. }
  353. static const struct xenbus_device_id tpmfront_ids[] = {
  354. { "vtpm" },
  355. { "" }
  356. };
  357. MODULE_ALIAS("xen:vtpm");
  358. static struct xenbus_driver tpmfront_driver = {
  359. .ids = tpmfront_ids,
  360. .probe = tpmfront_probe,
  361. .remove = tpmfront_remove,
  362. .resume = tpmfront_resume,
  363. .otherend_changed = backend_changed,
  364. };
  365. static int __init xen_tpmfront_init(void)
  366. {
  367. if (!xen_domain())
  368. return -ENODEV;
  369. if (!xen_has_pv_devices())
  370. return -ENODEV;
  371. return xenbus_register_frontend(&tpmfront_driver);
  372. }
  373. module_init(xen_tpmfront_init);
  374. static void __exit xen_tpmfront_exit(void)
  375. {
  376. xenbus_unregister_driver(&tpmfront_driver);
  377. }
  378. module_exit(xen_tpmfront_exit);
  379. MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
  380. MODULE_DESCRIPTION("Xen vTPM Driver");
  381. MODULE_LICENSE("GPL");