tee_core.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965
  1. /*
  2. * Copyright (c) 2015-2016, Linaro Limited
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. */
  14. #define pr_fmt(fmt) "%s: " fmt, __func__
  15. #include <linux/cdev.h>
  16. #include <linux/device.h>
  17. #include <linux/fs.h>
  18. #include <linux/idr.h>
  19. #include <linux/module.h>
  20. #include <linux/slab.h>
  21. #include <linux/tee_drv.h>
  22. #include <linux/uaccess.h>
  23. #include "tee_private.h"
  24. #define TEE_NUM_DEVICES 32
  25. #define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
  26. /*
  27. * Unprivileged devices in the lower half range and privileged devices in
  28. * the upper half range.
  29. */
  30. static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
  31. static DEFINE_SPINLOCK(driver_lock);
  32. static struct class *tee_class;
  33. static dev_t tee_devt;
  34. static int tee_open(struct inode *inode, struct file *filp)
  35. {
  36. int rc;
  37. struct tee_device *teedev;
  38. struct tee_context *ctx;
  39. teedev = container_of(inode->i_cdev, struct tee_device, cdev);
  40. if (!tee_device_get(teedev))
  41. return -EINVAL;
  42. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  43. if (!ctx) {
  44. rc = -ENOMEM;
  45. goto err;
  46. }
  47. kref_init(&ctx->refcount);
  48. ctx->teedev = teedev;
  49. INIT_LIST_HEAD(&ctx->list_shm);
  50. filp->private_data = ctx;
  51. rc = teedev->desc->ops->open(ctx);
  52. if (rc)
  53. goto err;
  54. return 0;
  55. err:
  56. kfree(ctx);
  57. tee_device_put(teedev);
  58. return rc;
  59. }
  60. void teedev_ctx_get(struct tee_context *ctx)
  61. {
  62. if (ctx->releasing)
  63. return;
  64. kref_get(&ctx->refcount);
  65. }
  66. static void teedev_ctx_release(struct kref *ref)
  67. {
  68. struct tee_context *ctx = container_of(ref, struct tee_context,
  69. refcount);
  70. ctx->releasing = true;
  71. ctx->teedev->desc->ops->release(ctx);
  72. kfree(ctx);
  73. }
  74. void teedev_ctx_put(struct tee_context *ctx)
  75. {
  76. if (ctx->releasing)
  77. return;
  78. kref_put(&ctx->refcount, teedev_ctx_release);
  79. }
  80. static void teedev_close_context(struct tee_context *ctx)
  81. {
  82. tee_device_put(ctx->teedev);
  83. teedev_ctx_put(ctx);
  84. }
  85. static int tee_release(struct inode *inode, struct file *filp)
  86. {
  87. teedev_close_context(filp->private_data);
  88. return 0;
  89. }
  90. static int tee_ioctl_version(struct tee_context *ctx,
  91. struct tee_ioctl_version_data __user *uvers)
  92. {
  93. struct tee_ioctl_version_data vers;
  94. ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
  95. if (ctx->teedev->desc->flags & TEE_DESC_PRIVILEGED)
  96. vers.gen_caps |= TEE_GEN_CAP_PRIVILEGED;
  97. if (copy_to_user(uvers, &vers, sizeof(vers)))
  98. return -EFAULT;
  99. return 0;
  100. }
  101. static int tee_ioctl_shm_alloc(struct tee_context *ctx,
  102. struct tee_ioctl_shm_alloc_data __user *udata)
  103. {
  104. long ret;
  105. struct tee_ioctl_shm_alloc_data data;
  106. struct tee_shm *shm;
  107. if (copy_from_user(&data, udata, sizeof(data)))
  108. return -EFAULT;
  109. /* Currently no input flags are supported */
  110. if (data.flags)
  111. return -EINVAL;
  112. shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
  113. if (IS_ERR(shm))
  114. return PTR_ERR(shm);
  115. data.id = shm->id;
  116. data.flags = shm->flags;
  117. data.size = shm->size;
  118. if (copy_to_user(udata, &data, sizeof(data)))
  119. ret = -EFAULT;
  120. else
  121. ret = tee_shm_get_fd(shm);
  122. /*
  123. * When user space closes the file descriptor the shared memory
  124. * should be freed or if tee_shm_get_fd() failed then it will
  125. * be freed immediately.
  126. */
  127. tee_shm_put(shm);
  128. return ret;
  129. }
  130. static int
  131. tee_ioctl_shm_register(struct tee_context *ctx,
  132. struct tee_ioctl_shm_register_data __user *udata)
  133. {
  134. long ret;
  135. struct tee_ioctl_shm_register_data data;
  136. struct tee_shm *shm;
  137. if (copy_from_user(&data, udata, sizeof(data)))
  138. return -EFAULT;
  139. /* Currently no input flags are supported */
  140. if (data.flags)
  141. return -EINVAL;
  142. shm = tee_shm_register(ctx, data.addr, data.length,
  143. TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
  144. if (IS_ERR(shm))
  145. return PTR_ERR(shm);
  146. data.id = shm->id;
  147. data.flags = shm->flags;
  148. data.length = shm->size;
  149. if (copy_to_user(udata, &data, sizeof(data)))
  150. ret = -EFAULT;
  151. else
  152. ret = tee_shm_get_fd(shm);
  153. /*
  154. * When user space closes the file descriptor the shared memory
  155. * should be freed or if tee_shm_get_fd() failed then it will
  156. * be freed immediately.
  157. */
  158. tee_shm_put(shm);
  159. return ret;
  160. }
  161. static int params_from_user(struct tee_context *ctx, struct tee_param *params,
  162. size_t num_params,
  163. struct tee_ioctl_param __user *uparams)
  164. {
  165. size_t n;
  166. for (n = 0; n < num_params; n++) {
  167. struct tee_shm *shm;
  168. struct tee_ioctl_param ip;
  169. if (copy_from_user(&ip, uparams + n, sizeof(ip)))
  170. return -EFAULT;
  171. /* All unused attribute bits has to be zero */
  172. if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
  173. return -EINVAL;
  174. params[n].attr = ip.attr;
  175. switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
  176. case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
  177. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
  178. break;
  179. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
  180. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
  181. params[n].u.value.a = ip.a;
  182. params[n].u.value.b = ip.b;
  183. params[n].u.value.c = ip.c;
  184. break;
  185. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
  186. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
  187. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
  188. /*
  189. * If we fail to get a pointer to a shared memory
  190. * object (and increase the ref count) from an
  191. * identifier we return an error. All pointers that
  192. * has been added in params have an increased ref
  193. * count. It's the callers responibility to do
  194. * tee_shm_put() on all resolved pointers.
  195. */
  196. shm = tee_shm_get_from_id(ctx, ip.c);
  197. if (IS_ERR(shm))
  198. return PTR_ERR(shm);
  199. /*
  200. * Ensure offset + size does not overflow offset
  201. * and does not overflow the size of the referred
  202. * shared memory object.
  203. */
  204. if ((ip.a + ip.b) < ip.a ||
  205. (ip.a + ip.b) > shm->size) {
  206. tee_shm_put(shm);
  207. return -EINVAL;
  208. }
  209. params[n].u.memref.shm_offs = ip.a;
  210. params[n].u.memref.size = ip.b;
  211. params[n].u.memref.shm = shm;
  212. break;
  213. default:
  214. /* Unknown attribute */
  215. return -EINVAL;
  216. }
  217. }
  218. return 0;
  219. }
  220. static int params_to_user(struct tee_ioctl_param __user *uparams,
  221. size_t num_params, struct tee_param *params)
  222. {
  223. size_t n;
  224. for (n = 0; n < num_params; n++) {
  225. struct tee_ioctl_param __user *up = uparams + n;
  226. struct tee_param *p = params + n;
  227. switch (p->attr) {
  228. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
  229. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
  230. if (put_user(p->u.value.a, &up->a) ||
  231. put_user(p->u.value.b, &up->b) ||
  232. put_user(p->u.value.c, &up->c))
  233. return -EFAULT;
  234. break;
  235. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
  236. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
  237. if (put_user((u64)p->u.memref.size, &up->b))
  238. return -EFAULT;
  239. default:
  240. break;
  241. }
  242. }
  243. return 0;
  244. }
  245. static int tee_ioctl_open_session(struct tee_context *ctx,
  246. struct tee_ioctl_buf_data __user *ubuf)
  247. {
  248. int rc;
  249. size_t n;
  250. struct tee_ioctl_buf_data buf;
  251. struct tee_ioctl_open_session_arg __user *uarg;
  252. struct tee_ioctl_open_session_arg arg;
  253. struct tee_ioctl_param __user *uparams = NULL;
  254. struct tee_param *params = NULL;
  255. bool have_session = false;
  256. if (!ctx->teedev->desc->ops->open_session)
  257. return -EINVAL;
  258. if (copy_from_user(&buf, ubuf, sizeof(buf)))
  259. return -EFAULT;
  260. if (buf.buf_len > TEE_MAX_ARG_SIZE ||
  261. buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
  262. return -EINVAL;
  263. uarg = u64_to_user_ptr(buf.buf_ptr);
  264. if (copy_from_user(&arg, uarg, sizeof(arg)))
  265. return -EFAULT;
  266. if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
  267. return -EINVAL;
  268. if (arg.num_params) {
  269. params = kcalloc(arg.num_params, sizeof(struct tee_param),
  270. GFP_KERNEL);
  271. if (!params)
  272. return -ENOMEM;
  273. uparams = uarg->params;
  274. rc = params_from_user(ctx, params, arg.num_params, uparams);
  275. if (rc)
  276. goto out;
  277. }
  278. rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
  279. if (rc)
  280. goto out;
  281. have_session = true;
  282. if (put_user(arg.session, &uarg->session) ||
  283. put_user(arg.ret, &uarg->ret) ||
  284. put_user(arg.ret_origin, &uarg->ret_origin)) {
  285. rc = -EFAULT;
  286. goto out;
  287. }
  288. rc = params_to_user(uparams, arg.num_params, params);
  289. out:
  290. /*
  291. * If we've succeeded to open the session but failed to communicate
  292. * it back to user space, close the session again to avoid leakage.
  293. */
  294. if (rc && have_session && ctx->teedev->desc->ops->close_session)
  295. ctx->teedev->desc->ops->close_session(ctx, arg.session);
  296. if (params) {
  297. /* Decrease ref count for all valid shared memory pointers */
  298. for (n = 0; n < arg.num_params; n++)
  299. if (tee_param_is_memref(params + n) &&
  300. params[n].u.memref.shm)
  301. tee_shm_put(params[n].u.memref.shm);
  302. kfree(params);
  303. }
  304. return rc;
  305. }
  306. static int tee_ioctl_invoke(struct tee_context *ctx,
  307. struct tee_ioctl_buf_data __user *ubuf)
  308. {
  309. int rc;
  310. size_t n;
  311. struct tee_ioctl_buf_data buf;
  312. struct tee_ioctl_invoke_arg __user *uarg;
  313. struct tee_ioctl_invoke_arg arg;
  314. struct tee_ioctl_param __user *uparams = NULL;
  315. struct tee_param *params = NULL;
  316. if (!ctx->teedev->desc->ops->invoke_func)
  317. return -EINVAL;
  318. if (copy_from_user(&buf, ubuf, sizeof(buf)))
  319. return -EFAULT;
  320. if (buf.buf_len > TEE_MAX_ARG_SIZE ||
  321. buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
  322. return -EINVAL;
  323. uarg = u64_to_user_ptr(buf.buf_ptr);
  324. if (copy_from_user(&arg, uarg, sizeof(arg)))
  325. return -EFAULT;
  326. if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
  327. return -EINVAL;
  328. if (arg.num_params) {
  329. params = kcalloc(arg.num_params, sizeof(struct tee_param),
  330. GFP_KERNEL);
  331. if (!params)
  332. return -ENOMEM;
  333. uparams = uarg->params;
  334. rc = params_from_user(ctx, params, arg.num_params, uparams);
  335. if (rc)
  336. goto out;
  337. }
  338. rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
  339. if (rc)
  340. goto out;
  341. if (put_user(arg.ret, &uarg->ret) ||
  342. put_user(arg.ret_origin, &uarg->ret_origin)) {
  343. rc = -EFAULT;
  344. goto out;
  345. }
  346. rc = params_to_user(uparams, arg.num_params, params);
  347. out:
  348. if (params) {
  349. /* Decrease ref count for all valid shared memory pointers */
  350. for (n = 0; n < arg.num_params; n++)
  351. if (tee_param_is_memref(params + n) &&
  352. params[n].u.memref.shm)
  353. tee_shm_put(params[n].u.memref.shm);
  354. kfree(params);
  355. }
  356. return rc;
  357. }
  358. static int tee_ioctl_cancel(struct tee_context *ctx,
  359. struct tee_ioctl_cancel_arg __user *uarg)
  360. {
  361. struct tee_ioctl_cancel_arg arg;
  362. if (!ctx->teedev->desc->ops->cancel_req)
  363. return -EINVAL;
  364. if (copy_from_user(&arg, uarg, sizeof(arg)))
  365. return -EFAULT;
  366. return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
  367. arg.session);
  368. }
  369. static int
  370. tee_ioctl_close_session(struct tee_context *ctx,
  371. struct tee_ioctl_close_session_arg __user *uarg)
  372. {
  373. struct tee_ioctl_close_session_arg arg;
  374. if (!ctx->teedev->desc->ops->close_session)
  375. return -EINVAL;
  376. if (copy_from_user(&arg, uarg, sizeof(arg)))
  377. return -EFAULT;
  378. return ctx->teedev->desc->ops->close_session(ctx, arg.session);
  379. }
  380. static int params_to_supp(struct tee_context *ctx,
  381. struct tee_ioctl_param __user *uparams,
  382. size_t num_params, struct tee_param *params)
  383. {
  384. size_t n;
  385. for (n = 0; n < num_params; n++) {
  386. struct tee_ioctl_param ip;
  387. struct tee_param *p = params + n;
  388. ip.attr = p->attr;
  389. switch (p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
  390. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
  391. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
  392. ip.a = p->u.value.a;
  393. ip.b = p->u.value.b;
  394. ip.c = p->u.value.c;
  395. break;
  396. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
  397. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
  398. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
  399. ip.b = p->u.memref.size;
  400. if (!p->u.memref.shm) {
  401. ip.a = 0;
  402. ip.c = (u64)-1; /* invalid shm id */
  403. break;
  404. }
  405. ip.a = p->u.memref.shm_offs;
  406. ip.c = p->u.memref.shm->id;
  407. break;
  408. default:
  409. ip.a = 0;
  410. ip.b = 0;
  411. ip.c = 0;
  412. break;
  413. }
  414. if (copy_to_user(uparams + n, &ip, sizeof(ip)))
  415. return -EFAULT;
  416. }
  417. return 0;
  418. }
  419. static int tee_ioctl_supp_recv(struct tee_context *ctx,
  420. struct tee_ioctl_buf_data __user *ubuf)
  421. {
  422. int rc;
  423. struct tee_ioctl_buf_data buf;
  424. struct tee_iocl_supp_recv_arg __user *uarg;
  425. struct tee_param *params;
  426. u32 num_params;
  427. u32 func;
  428. if (!ctx->teedev->desc->ops->supp_recv)
  429. return -EINVAL;
  430. if (copy_from_user(&buf, ubuf, sizeof(buf)))
  431. return -EFAULT;
  432. if (buf.buf_len > TEE_MAX_ARG_SIZE ||
  433. buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
  434. return -EINVAL;
  435. uarg = u64_to_user_ptr(buf.buf_ptr);
  436. if (get_user(num_params, &uarg->num_params))
  437. return -EFAULT;
  438. if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
  439. return -EINVAL;
  440. params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
  441. if (!params)
  442. return -ENOMEM;
  443. rc = params_from_user(ctx, params, num_params, uarg->params);
  444. if (rc)
  445. goto out;
  446. rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
  447. if (rc)
  448. goto out;
  449. if (put_user(func, &uarg->func) ||
  450. put_user(num_params, &uarg->num_params)) {
  451. rc = -EFAULT;
  452. goto out;
  453. }
  454. rc = params_to_supp(ctx, uarg->params, num_params, params);
  455. out:
  456. kfree(params);
  457. return rc;
  458. }
  459. static int params_from_supp(struct tee_param *params, size_t num_params,
  460. struct tee_ioctl_param __user *uparams)
  461. {
  462. size_t n;
  463. for (n = 0; n < num_params; n++) {
  464. struct tee_param *p = params + n;
  465. struct tee_ioctl_param ip;
  466. if (copy_from_user(&ip, uparams + n, sizeof(ip)))
  467. return -EFAULT;
  468. /* All unused attribute bits has to be zero */
  469. if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_MASK)
  470. return -EINVAL;
  471. p->attr = ip.attr;
  472. switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
  473. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
  474. case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
  475. /* Only out and in/out values can be updated */
  476. p->u.value.a = ip.a;
  477. p->u.value.b = ip.b;
  478. p->u.value.c = ip.c;
  479. break;
  480. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
  481. case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
  482. /*
  483. * Only the size of the memref can be updated.
  484. * Since we don't have access to the original
  485. * parameters here, only store the supplied size.
  486. * The driver will copy the updated size into the
  487. * original parameters.
  488. */
  489. p->u.memref.shm = NULL;
  490. p->u.memref.shm_offs = 0;
  491. p->u.memref.size = ip.b;
  492. break;
  493. default:
  494. memset(&p->u, 0, sizeof(p->u));
  495. break;
  496. }
  497. }
  498. return 0;
  499. }
  500. static int tee_ioctl_supp_send(struct tee_context *ctx,
  501. struct tee_ioctl_buf_data __user *ubuf)
  502. {
  503. long rc;
  504. struct tee_ioctl_buf_data buf;
  505. struct tee_iocl_supp_send_arg __user *uarg;
  506. struct tee_param *params;
  507. u32 num_params;
  508. u32 ret;
  509. /* Not valid for this driver */
  510. if (!ctx->teedev->desc->ops->supp_send)
  511. return -EINVAL;
  512. if (copy_from_user(&buf, ubuf, sizeof(buf)))
  513. return -EFAULT;
  514. if (buf.buf_len > TEE_MAX_ARG_SIZE ||
  515. buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
  516. return -EINVAL;
  517. uarg = u64_to_user_ptr(buf.buf_ptr);
  518. if (get_user(ret, &uarg->ret) ||
  519. get_user(num_params, &uarg->num_params))
  520. return -EFAULT;
  521. if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
  522. return -EINVAL;
  523. params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
  524. if (!params)
  525. return -ENOMEM;
  526. rc = params_from_supp(params, num_params, uarg->params);
  527. if (rc)
  528. goto out;
  529. rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
  530. out:
  531. kfree(params);
  532. return rc;
  533. }
  534. static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  535. {
  536. struct tee_context *ctx = filp->private_data;
  537. void __user *uarg = (void __user *)arg;
  538. switch (cmd) {
  539. case TEE_IOC_VERSION:
  540. return tee_ioctl_version(ctx, uarg);
  541. case TEE_IOC_SHM_ALLOC:
  542. return tee_ioctl_shm_alloc(ctx, uarg);
  543. case TEE_IOC_SHM_REGISTER:
  544. return tee_ioctl_shm_register(ctx, uarg);
  545. case TEE_IOC_OPEN_SESSION:
  546. return tee_ioctl_open_session(ctx, uarg);
  547. case TEE_IOC_INVOKE:
  548. return tee_ioctl_invoke(ctx, uarg);
  549. case TEE_IOC_CANCEL:
  550. return tee_ioctl_cancel(ctx, uarg);
  551. case TEE_IOC_CLOSE_SESSION:
  552. return tee_ioctl_close_session(ctx, uarg);
  553. case TEE_IOC_SUPPL_RECV:
  554. return tee_ioctl_supp_recv(ctx, uarg);
  555. case TEE_IOC_SUPPL_SEND:
  556. return tee_ioctl_supp_send(ctx, uarg);
  557. default:
  558. return -EINVAL;
  559. }
  560. }
  561. static const struct file_operations tee_fops = {
  562. .owner = THIS_MODULE,
  563. .open = tee_open,
  564. .release = tee_release,
  565. .unlocked_ioctl = tee_ioctl,
  566. .compat_ioctl = tee_ioctl,
  567. };
  568. static void tee_release_device(struct device *dev)
  569. {
  570. struct tee_device *teedev = container_of(dev, struct tee_device, dev);
  571. spin_lock(&driver_lock);
  572. clear_bit(teedev->id, dev_mask);
  573. spin_unlock(&driver_lock);
  574. mutex_destroy(&teedev->mutex);
  575. idr_destroy(&teedev->idr);
  576. kfree(teedev);
  577. }
  578. /**
  579. * tee_device_alloc() - Allocate a new struct tee_device instance
  580. * @teedesc: Descriptor for this driver
  581. * @dev: Parent device for this device
  582. * @pool: Shared memory pool, NULL if not used
  583. * @driver_data: Private driver data for this device
  584. *
  585. * Allocates a new struct tee_device instance. The device is
  586. * removed by tee_device_unregister().
  587. *
  588. * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
  589. */
  590. struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
  591. struct device *dev,
  592. struct tee_shm_pool *pool,
  593. void *driver_data)
  594. {
  595. struct tee_device *teedev;
  596. void *ret;
  597. int rc, max_id;
  598. int offs = 0;
  599. if (!teedesc || !teedesc->name || !teedesc->ops ||
  600. !teedesc->ops->get_version || !teedesc->ops->open ||
  601. !teedesc->ops->release || !pool)
  602. return ERR_PTR(-EINVAL);
  603. teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
  604. if (!teedev) {
  605. ret = ERR_PTR(-ENOMEM);
  606. goto err;
  607. }
  608. max_id = TEE_NUM_DEVICES / 2;
  609. if (teedesc->flags & TEE_DESC_PRIVILEGED) {
  610. offs = TEE_NUM_DEVICES / 2;
  611. max_id = TEE_NUM_DEVICES;
  612. }
  613. spin_lock(&driver_lock);
  614. teedev->id = find_next_zero_bit(dev_mask, max_id, offs);
  615. if (teedev->id < max_id)
  616. set_bit(teedev->id, dev_mask);
  617. spin_unlock(&driver_lock);
  618. if (teedev->id >= max_id) {
  619. ret = ERR_PTR(-ENOMEM);
  620. goto err;
  621. }
  622. snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
  623. teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
  624. teedev->id - offs);
  625. teedev->dev.class = tee_class;
  626. teedev->dev.release = tee_release_device;
  627. teedev->dev.parent = dev;
  628. teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
  629. rc = dev_set_name(&teedev->dev, "%s", teedev->name);
  630. if (rc) {
  631. ret = ERR_PTR(rc);
  632. goto err_devt;
  633. }
  634. cdev_init(&teedev->cdev, &tee_fops);
  635. teedev->cdev.owner = teedesc->owner;
  636. teedev->cdev.kobj.parent = &teedev->dev.kobj;
  637. dev_set_drvdata(&teedev->dev, driver_data);
  638. device_initialize(&teedev->dev);
  639. /* 1 as tee_device_unregister() does one final tee_device_put() */
  640. teedev->num_users = 1;
  641. init_completion(&teedev->c_no_users);
  642. mutex_init(&teedev->mutex);
  643. idr_init(&teedev->idr);
  644. teedev->desc = teedesc;
  645. teedev->pool = pool;
  646. return teedev;
  647. err_devt:
  648. unregister_chrdev_region(teedev->dev.devt, 1);
  649. err:
  650. pr_err("could not register %s driver\n",
  651. teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
  652. if (teedev && teedev->id < TEE_NUM_DEVICES) {
  653. spin_lock(&driver_lock);
  654. clear_bit(teedev->id, dev_mask);
  655. spin_unlock(&driver_lock);
  656. }
  657. kfree(teedev);
  658. return ret;
  659. }
  660. EXPORT_SYMBOL_GPL(tee_device_alloc);
  661. static ssize_t implementation_id_show(struct device *dev,
  662. struct device_attribute *attr, char *buf)
  663. {
  664. struct tee_device *teedev = container_of(dev, struct tee_device, dev);
  665. struct tee_ioctl_version_data vers;
  666. teedev->desc->ops->get_version(teedev, &vers);
  667. return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
  668. }
  669. static DEVICE_ATTR_RO(implementation_id);
  670. static struct attribute *tee_dev_attrs[] = {
  671. &dev_attr_implementation_id.attr,
  672. NULL
  673. };
  674. static const struct attribute_group tee_dev_group = {
  675. .attrs = tee_dev_attrs,
  676. };
  677. /**
  678. * tee_device_register() - Registers a TEE device
  679. * @teedev: Device to register
  680. *
  681. * tee_device_unregister() need to be called to remove the @teedev if
  682. * this function fails.
  683. *
  684. * @returns < 0 on failure
  685. */
  686. int tee_device_register(struct tee_device *teedev)
  687. {
  688. int rc;
  689. if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
  690. dev_err(&teedev->dev, "attempt to register twice\n");
  691. return -EINVAL;
  692. }
  693. rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1);
  694. if (rc) {
  695. dev_err(&teedev->dev,
  696. "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
  697. teedev->name, MAJOR(teedev->dev.devt),
  698. MINOR(teedev->dev.devt), rc);
  699. return rc;
  700. }
  701. rc = device_add(&teedev->dev);
  702. if (rc) {
  703. dev_err(&teedev->dev,
  704. "unable to device_add() %s, major %d, minor %d, err=%d\n",
  705. teedev->name, MAJOR(teedev->dev.devt),
  706. MINOR(teedev->dev.devt), rc);
  707. goto err_device_add;
  708. }
  709. rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group);
  710. if (rc) {
  711. dev_err(&teedev->dev,
  712. "failed to create sysfs attributes, err=%d\n", rc);
  713. goto err_sysfs_create_group;
  714. }
  715. teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
  716. return 0;
  717. err_sysfs_create_group:
  718. device_del(&teedev->dev);
  719. err_device_add:
  720. cdev_del(&teedev->cdev);
  721. return rc;
  722. }
  723. EXPORT_SYMBOL_GPL(tee_device_register);
  724. void tee_device_put(struct tee_device *teedev)
  725. {
  726. mutex_lock(&teedev->mutex);
  727. /* Shouldn't put in this state */
  728. if (!WARN_ON(!teedev->desc)) {
  729. teedev->num_users--;
  730. if (!teedev->num_users) {
  731. teedev->desc = NULL;
  732. complete(&teedev->c_no_users);
  733. }
  734. }
  735. mutex_unlock(&teedev->mutex);
  736. }
  737. bool tee_device_get(struct tee_device *teedev)
  738. {
  739. mutex_lock(&teedev->mutex);
  740. if (!teedev->desc) {
  741. mutex_unlock(&teedev->mutex);
  742. return false;
  743. }
  744. teedev->num_users++;
  745. mutex_unlock(&teedev->mutex);
  746. return true;
  747. }
  748. /**
  749. * tee_device_unregister() - Removes a TEE device
  750. * @teedev: Device to unregister
  751. *
  752. * This function should be called to remove the @teedev even if
  753. * tee_device_register() hasn't been called yet. Does nothing if
  754. * @teedev is NULL.
  755. */
  756. void tee_device_unregister(struct tee_device *teedev)
  757. {
  758. if (!teedev)
  759. return;
  760. if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
  761. sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group);
  762. cdev_del(&teedev->cdev);
  763. device_del(&teedev->dev);
  764. }
  765. tee_device_put(teedev);
  766. wait_for_completion(&teedev->c_no_users);
  767. /*
  768. * No need to take a mutex any longer now since teedev->desc was
  769. * set to NULL before teedev->c_no_users was completed.
  770. */
  771. teedev->pool = NULL;
  772. put_device(&teedev->dev);
  773. }
  774. EXPORT_SYMBOL_GPL(tee_device_unregister);
  775. /**
  776. * tee_get_drvdata() - Return driver_data pointer
  777. * @teedev: Device containing the driver_data pointer
  778. * @returns the driver_data pointer supplied to tee_register().
  779. */
  780. void *tee_get_drvdata(struct tee_device *teedev)
  781. {
  782. return dev_get_drvdata(&teedev->dev);
  783. }
  784. EXPORT_SYMBOL_GPL(tee_get_drvdata);
  785. static int __init tee_init(void)
  786. {
  787. int rc;
  788. tee_class = class_create(THIS_MODULE, "tee");
  789. if (IS_ERR(tee_class)) {
  790. pr_err("couldn't create class\n");
  791. return PTR_ERR(tee_class);
  792. }
  793. rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
  794. if (rc) {
  795. pr_err("failed to allocate char dev region\n");
  796. class_destroy(tee_class);
  797. tee_class = NULL;
  798. }
  799. return rc;
  800. }
  801. static void __exit tee_exit(void)
  802. {
  803. class_destroy(tee_class);
  804. tee_class = NULL;
  805. unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
  806. }
  807. subsys_initcall(tee_init);
  808. module_exit(tee_exit);
  809. MODULE_AUTHOR("Linaro");
  810. MODULE_DESCRIPTION("TEE Driver");
  811. MODULE_VERSION("1.0");
  812. MODULE_LICENSE("GPL v2");