virtio_crypto_core.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Driver for Virtio crypto device.
  3. *
  4. * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
  5. */
  6. #include <linux/err.h>
  7. #include <linux/module.h>
  8. #include <linux/virtio_config.h>
  9. #include <linux/cpu.h>
  10. #include <uapi/linux/virtio_crypto.h>
  11. #include "virtio_crypto_common.h"
  12. void
  13. virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
  14. {
  15. if (vc_req) {
  16. kzfree(vc_req->req_data);
  17. kfree(vc_req->sgs);
  18. }
  19. }
  20. static void virtcrypto_dataq_callback(struct virtqueue *vq)
  21. {
  22. struct virtio_crypto *vcrypto = vq->vdev->priv;
  23. struct virtio_crypto_request *vc_req;
  24. unsigned long flags;
  25. unsigned int len;
  26. unsigned int qid = vq->index;
  27. spin_lock_irqsave(&vcrypto->data_vq[qid].lock, flags);
  28. do {
  29. virtqueue_disable_cb(vq);
  30. while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
  31. spin_unlock_irqrestore(
  32. &vcrypto->data_vq[qid].lock, flags);
  33. if (vc_req->alg_cb)
  34. vc_req->alg_cb(vc_req, len);
  35. spin_lock_irqsave(
  36. &vcrypto->data_vq[qid].lock, flags);
  37. }
  38. } while (!virtqueue_enable_cb(vq));
  39. spin_unlock_irqrestore(&vcrypto->data_vq[qid].lock, flags);
  40. }
  41. static int virtcrypto_find_vqs(struct virtio_crypto *vi)
  42. {
  43. vq_callback_t **callbacks;
  44. struct virtqueue **vqs;
  45. int ret = -ENOMEM;
  46. int i, total_vqs;
  47. const char **names;
  48. struct device *dev = &vi->vdev->dev;
  49. /*
  50. * We expect 1 data virtqueue, followed by
  51. * possible N-1 data queues used in multiqueue mode,
  52. * followed by control vq.
  53. */
  54. total_vqs = vi->max_data_queues + 1;
  55. /* Allocate space for find_vqs parameters */
  56. vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
  57. if (!vqs)
  58. goto err_vq;
  59. callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
  60. if (!callbacks)
  61. goto err_callback;
  62. names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
  63. if (!names)
  64. goto err_names;
  65. /* Parameters for control virtqueue */
  66. callbacks[total_vqs - 1] = NULL;
  67. names[total_vqs - 1] = "controlq";
  68. /* Allocate/initialize parameters for data virtqueues */
  69. for (i = 0; i < vi->max_data_queues; i++) {
  70. callbacks[i] = virtcrypto_dataq_callback;
  71. snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
  72. "dataq.%d", i);
  73. names[i] = vi->data_vq[i].name;
  74. }
  75. ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
  76. if (ret)
  77. goto err_find;
  78. vi->ctrl_vq = vqs[total_vqs - 1];
  79. for (i = 0; i < vi->max_data_queues; i++) {
  80. spin_lock_init(&vi->data_vq[i].lock);
  81. vi->data_vq[i].vq = vqs[i];
  82. /* Initialize crypto engine */
  83. vi->data_vq[i].engine = crypto_engine_alloc_init(dev, 1);
  84. if (!vi->data_vq[i].engine) {
  85. ret = -ENOMEM;
  86. goto err_engine;
  87. }
  88. }
  89. kfree(names);
  90. kfree(callbacks);
  91. kfree(vqs);
  92. return 0;
  93. err_engine:
  94. err_find:
  95. kfree(names);
  96. err_names:
  97. kfree(callbacks);
  98. err_callback:
  99. kfree(vqs);
  100. err_vq:
  101. return ret;
  102. }
  103. static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
  104. {
  105. vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
  106. GFP_KERNEL);
  107. if (!vi->data_vq)
  108. return -ENOMEM;
  109. return 0;
  110. }
  111. static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
  112. {
  113. int i;
  114. if (vi->affinity_hint_set) {
  115. for (i = 0; i < vi->max_data_queues; i++)
  116. virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
  117. vi->affinity_hint_set = false;
  118. }
  119. }
  120. static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
  121. {
  122. int i = 0;
  123. int cpu;
  124. /*
  125. * In single queue mode, we don't set the cpu affinity.
  126. */
  127. if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
  128. virtcrypto_clean_affinity(vcrypto, -1);
  129. return;
  130. }
  131. /*
  132. * In multiqueue mode, we let the queue to be private to one cpu
  133. * by setting the affinity hint to eliminate the contention.
  134. *
  135. * TODO: adds cpu hotplug support by register cpu notifier.
  136. *
  137. */
  138. for_each_online_cpu(cpu) {
  139. virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
  140. if (++i >= vcrypto->max_data_queues)
  141. break;
  142. }
  143. vcrypto->affinity_hint_set = true;
  144. }
  145. static void virtcrypto_free_queues(struct virtio_crypto *vi)
  146. {
  147. kfree(vi->data_vq);
  148. }
  149. static int virtcrypto_init_vqs(struct virtio_crypto *vi)
  150. {
  151. int ret;
  152. /* Allocate send & receive queues */
  153. ret = virtcrypto_alloc_queues(vi);
  154. if (ret)
  155. goto err;
  156. ret = virtcrypto_find_vqs(vi);
  157. if (ret)
  158. goto err_free;
  159. get_online_cpus();
  160. virtcrypto_set_affinity(vi);
  161. put_online_cpus();
  162. return 0;
  163. err_free:
  164. virtcrypto_free_queues(vi);
  165. err:
  166. return ret;
  167. }
  168. static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
  169. {
  170. u32 status;
  171. int err;
  172. virtio_cread(vcrypto->vdev,
  173. struct virtio_crypto_config, status, &status);
  174. /*
  175. * Unknown status bits would be a host error and the driver
  176. * should consider the device to be broken.
  177. */
  178. if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
  179. dev_warn(&vcrypto->vdev->dev,
  180. "Unknown status bits: 0x%x\n", status);
  181. virtio_break_device(vcrypto->vdev);
  182. return -EPERM;
  183. }
  184. if (vcrypto->status == status)
  185. return 0;
  186. vcrypto->status = status;
  187. if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
  188. err = virtcrypto_dev_start(vcrypto);
  189. if (err) {
  190. dev_err(&vcrypto->vdev->dev,
  191. "Failed to start virtio crypto device.\n");
  192. return -EPERM;
  193. }
  194. dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
  195. } else {
  196. virtcrypto_dev_stop(vcrypto);
  197. dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
  198. }
  199. return 0;
  200. }
  201. static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
  202. {
  203. int32_t i;
  204. int ret;
  205. for (i = 0; i < vcrypto->max_data_queues; i++) {
  206. if (vcrypto->data_vq[i].engine) {
  207. ret = crypto_engine_start(vcrypto->data_vq[i].engine);
  208. if (ret)
  209. goto err;
  210. }
  211. }
  212. return 0;
  213. err:
  214. while (--i >= 0)
  215. if (vcrypto->data_vq[i].engine)
  216. crypto_engine_exit(vcrypto->data_vq[i].engine);
  217. return ret;
  218. }
  219. static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
  220. {
  221. u32 i;
  222. for (i = 0; i < vcrypto->max_data_queues; i++)
  223. if (vcrypto->data_vq[i].engine)
  224. crypto_engine_exit(vcrypto->data_vq[i].engine);
  225. }
  226. static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
  227. {
  228. struct virtio_device *vdev = vcrypto->vdev;
  229. virtcrypto_clean_affinity(vcrypto, -1);
  230. vdev->config->del_vqs(vdev);
  231. virtcrypto_free_queues(vcrypto);
  232. }
  233. static int virtcrypto_probe(struct virtio_device *vdev)
  234. {
  235. int err = -EFAULT;
  236. struct virtio_crypto *vcrypto;
  237. u32 max_data_queues = 0, max_cipher_key_len = 0;
  238. u32 max_auth_key_len = 0;
  239. u64 max_size = 0;
  240. u32 cipher_algo_l = 0;
  241. u32 cipher_algo_h = 0;
  242. u32 hash_algo = 0;
  243. u32 mac_algo_l = 0;
  244. u32 mac_algo_h = 0;
  245. u32 aead_algo = 0;
  246. u32 crypto_services = 0;
  247. if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
  248. return -ENODEV;
  249. if (!vdev->config->get) {
  250. dev_err(&vdev->dev, "%s failure: config access disabled\n",
  251. __func__);
  252. return -EINVAL;
  253. }
  254. if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
  255. /*
  256. * If the accelerator is connected to a node with no memory
  257. * there is no point in using the accelerator since the remote
  258. * memory transaction will be very slow.
  259. */
  260. dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
  261. return -EINVAL;
  262. }
  263. vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
  264. dev_to_node(&vdev->dev));
  265. if (!vcrypto)
  266. return -ENOMEM;
  267. virtio_cread(vdev, struct virtio_crypto_config,
  268. max_dataqueues, &max_data_queues);
  269. if (max_data_queues < 1)
  270. max_data_queues = 1;
  271. virtio_cread(vdev, struct virtio_crypto_config,
  272. max_cipher_key_len, &max_cipher_key_len);
  273. virtio_cread(vdev, struct virtio_crypto_config,
  274. max_auth_key_len, &max_auth_key_len);
  275. virtio_cread(vdev, struct virtio_crypto_config,
  276. max_size, &max_size);
  277. virtio_cread(vdev, struct virtio_crypto_config,
  278. crypto_services, &crypto_services);
  279. virtio_cread(vdev, struct virtio_crypto_config,
  280. cipher_algo_l, &cipher_algo_l);
  281. virtio_cread(vdev, struct virtio_crypto_config,
  282. cipher_algo_h, &cipher_algo_h);
  283. virtio_cread(vdev, struct virtio_crypto_config,
  284. hash_algo, &hash_algo);
  285. virtio_cread(vdev, struct virtio_crypto_config,
  286. mac_algo_l, &mac_algo_l);
  287. virtio_cread(vdev, struct virtio_crypto_config,
  288. mac_algo_h, &mac_algo_h);
  289. virtio_cread(vdev, struct virtio_crypto_config,
  290. aead_algo, &aead_algo);
  291. /* Add virtio crypto device to global table */
  292. err = virtcrypto_devmgr_add_dev(vcrypto);
  293. if (err) {
  294. dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
  295. goto free;
  296. }
  297. vcrypto->owner = THIS_MODULE;
  298. vcrypto = vdev->priv = vcrypto;
  299. vcrypto->vdev = vdev;
  300. spin_lock_init(&vcrypto->ctrl_lock);
  301. /* Use single data queue as default */
  302. vcrypto->curr_queue = 1;
  303. vcrypto->max_data_queues = max_data_queues;
  304. vcrypto->max_cipher_key_len = max_cipher_key_len;
  305. vcrypto->max_auth_key_len = max_auth_key_len;
  306. vcrypto->max_size = max_size;
  307. vcrypto->crypto_services = crypto_services;
  308. vcrypto->cipher_algo_l = cipher_algo_l;
  309. vcrypto->cipher_algo_h = cipher_algo_h;
  310. vcrypto->mac_algo_l = mac_algo_l;
  311. vcrypto->mac_algo_h = mac_algo_h;
  312. vcrypto->hash_algo = hash_algo;
  313. vcrypto->aead_algo = aead_algo;
  314. dev_info(&vdev->dev,
  315. "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
  316. vcrypto->max_data_queues,
  317. vcrypto->max_cipher_key_len,
  318. vcrypto->max_auth_key_len,
  319. vcrypto->max_size);
  320. err = virtcrypto_init_vqs(vcrypto);
  321. if (err) {
  322. dev_err(&vdev->dev, "Failed to initialize vqs.\n");
  323. goto free_dev;
  324. }
  325. err = virtcrypto_start_crypto_engines(vcrypto);
  326. if (err)
  327. goto free_vqs;
  328. virtio_device_ready(vdev);
  329. err = virtcrypto_update_status(vcrypto);
  330. if (err)
  331. goto free_engines;
  332. return 0;
  333. free_engines:
  334. virtcrypto_clear_crypto_engines(vcrypto);
  335. free_vqs:
  336. vcrypto->vdev->config->reset(vdev);
  337. virtcrypto_del_vqs(vcrypto);
  338. free_dev:
  339. virtcrypto_devmgr_rm_dev(vcrypto);
  340. free:
  341. kfree(vcrypto);
  342. return err;
  343. }
  344. static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
  345. {
  346. struct virtio_crypto_request *vc_req;
  347. int i;
  348. struct virtqueue *vq;
  349. for (i = 0; i < vcrypto->max_data_queues; i++) {
  350. vq = vcrypto->data_vq[i].vq;
  351. while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
  352. kfree(vc_req->req_data);
  353. kfree(vc_req->sgs);
  354. }
  355. }
  356. }
  357. static void virtcrypto_remove(struct virtio_device *vdev)
  358. {
  359. struct virtio_crypto *vcrypto = vdev->priv;
  360. dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
  361. if (virtcrypto_dev_started(vcrypto))
  362. virtcrypto_dev_stop(vcrypto);
  363. vdev->config->reset(vdev);
  364. virtcrypto_free_unused_reqs(vcrypto);
  365. virtcrypto_clear_crypto_engines(vcrypto);
  366. virtcrypto_del_vqs(vcrypto);
  367. virtcrypto_devmgr_rm_dev(vcrypto);
  368. kfree(vcrypto);
  369. }
  370. static void virtcrypto_config_changed(struct virtio_device *vdev)
  371. {
  372. struct virtio_crypto *vcrypto = vdev->priv;
  373. virtcrypto_update_status(vcrypto);
  374. }
  375. #ifdef CONFIG_PM_SLEEP
  376. static int virtcrypto_freeze(struct virtio_device *vdev)
  377. {
  378. struct virtio_crypto *vcrypto = vdev->priv;
  379. vdev->config->reset(vdev);
  380. virtcrypto_free_unused_reqs(vcrypto);
  381. if (virtcrypto_dev_started(vcrypto))
  382. virtcrypto_dev_stop(vcrypto);
  383. virtcrypto_clear_crypto_engines(vcrypto);
  384. virtcrypto_del_vqs(vcrypto);
  385. return 0;
  386. }
  387. static int virtcrypto_restore(struct virtio_device *vdev)
  388. {
  389. struct virtio_crypto *vcrypto = vdev->priv;
  390. int err;
  391. err = virtcrypto_init_vqs(vcrypto);
  392. if (err)
  393. return err;
  394. err = virtcrypto_start_crypto_engines(vcrypto);
  395. if (err)
  396. goto free_vqs;
  397. virtio_device_ready(vdev);
  398. err = virtcrypto_dev_start(vcrypto);
  399. if (err) {
  400. dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
  401. goto free_engines;
  402. }
  403. return 0;
  404. free_engines:
  405. virtcrypto_clear_crypto_engines(vcrypto);
  406. free_vqs:
  407. vcrypto->vdev->config->reset(vdev);
  408. virtcrypto_del_vqs(vcrypto);
  409. return err;
  410. }
  411. #endif
  412. static unsigned int features[] = {
  413. /* none */
  414. };
  415. static struct virtio_device_id id_table[] = {
  416. { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
  417. { 0 },
  418. };
  419. static struct virtio_driver virtio_crypto_driver = {
  420. .driver.name = KBUILD_MODNAME,
  421. .driver.owner = THIS_MODULE,
  422. .feature_table = features,
  423. .feature_table_size = ARRAY_SIZE(features),
  424. .id_table = id_table,
  425. .probe = virtcrypto_probe,
  426. .remove = virtcrypto_remove,
  427. .config_changed = virtcrypto_config_changed,
  428. #ifdef CONFIG_PM_SLEEP
  429. .freeze = virtcrypto_freeze,
  430. .restore = virtcrypto_restore,
  431. #endif
  432. };
  433. module_virtio_driver(virtio_crypto_driver);
  434. MODULE_DEVICE_TABLE(virtio, id_table);
  435. MODULE_DESCRIPTION("virtio crypto device driver");
  436. MODULE_LICENSE("GPL");
  437. MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");