bpf-lirc.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. // SPDX-License-Identifier: GPL-2.0
  2. // bpf-lirc.c - handles bpf
  3. //
  4. // Copyright (C) 2018 Sean Young <sean@mess.org>
  5. #include <linux/bpf.h>
  6. #include <linux/filter.h>
  7. #include <linux/bpf_lirc.h>
  8. #include "rc-core-priv.h"
  9. /*
  10. * BPF interface for raw IR
  11. */
  12. const struct bpf_prog_ops lirc_mode2_prog_ops = {
  13. };
  14. BPF_CALL_1(bpf_rc_repeat, u32*, sample)
  15. {
  16. struct ir_raw_event_ctrl *ctrl;
  17. ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
  18. rc_repeat(ctrl->dev);
  19. return 0;
  20. }
  21. static const struct bpf_func_proto rc_repeat_proto = {
  22. .func = bpf_rc_repeat,
  23. .gpl_only = true, /* rc_repeat is EXPORT_SYMBOL_GPL */
  24. .ret_type = RET_INTEGER,
  25. .arg1_type = ARG_PTR_TO_CTX,
  26. };
  27. /*
  28. * Currently rc-core does not support 64-bit scancodes, but there are many
  29. * known protocols with more than 32 bits. So, define the interface as u64
  30. * as a future-proof.
  31. */
  32. BPF_CALL_4(bpf_rc_keydown, u32*, sample, u32, protocol, u64, scancode,
  33. u32, toggle)
  34. {
  35. struct ir_raw_event_ctrl *ctrl;
  36. ctrl = container_of(sample, struct ir_raw_event_ctrl, bpf_sample);
  37. rc_keydown(ctrl->dev, protocol, scancode, toggle != 0);
  38. return 0;
  39. }
  40. static const struct bpf_func_proto rc_keydown_proto = {
  41. .func = bpf_rc_keydown,
  42. .gpl_only = true, /* rc_keydown is EXPORT_SYMBOL_GPL */
  43. .ret_type = RET_INTEGER,
  44. .arg1_type = ARG_PTR_TO_CTX,
  45. .arg2_type = ARG_ANYTHING,
  46. .arg3_type = ARG_ANYTHING,
  47. .arg4_type = ARG_ANYTHING,
  48. };
  49. static const struct bpf_func_proto *
  50. lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
  51. {
  52. switch (func_id) {
  53. case BPF_FUNC_rc_repeat:
  54. return &rc_repeat_proto;
  55. case BPF_FUNC_rc_keydown:
  56. return &rc_keydown_proto;
  57. case BPF_FUNC_map_lookup_elem:
  58. return &bpf_map_lookup_elem_proto;
  59. case BPF_FUNC_map_update_elem:
  60. return &bpf_map_update_elem_proto;
  61. case BPF_FUNC_map_delete_elem:
  62. return &bpf_map_delete_elem_proto;
  63. case BPF_FUNC_ktime_get_ns:
  64. return &bpf_ktime_get_ns_proto;
  65. case BPF_FUNC_tail_call:
  66. return &bpf_tail_call_proto;
  67. case BPF_FUNC_get_prandom_u32:
  68. return &bpf_get_prandom_u32_proto;
  69. case BPF_FUNC_trace_printk:
  70. if (capable(CAP_SYS_ADMIN))
  71. return bpf_get_trace_printk_proto();
  72. /* fall through */
  73. default:
  74. return NULL;
  75. }
  76. }
  77. static bool lirc_mode2_is_valid_access(int off, int size,
  78. enum bpf_access_type type,
  79. const struct bpf_prog *prog,
  80. struct bpf_insn_access_aux *info)
  81. {
  82. /* We have one field of u32 */
  83. return type == BPF_READ && off == 0 && size == sizeof(u32);
  84. }
  85. const struct bpf_verifier_ops lirc_mode2_verifier_ops = {
  86. .get_func_proto = lirc_mode2_func_proto,
  87. .is_valid_access = lirc_mode2_is_valid_access
  88. };
  89. #define BPF_MAX_PROGS 64
  90. static int lirc_bpf_attach(struct rc_dev *rcdev, struct bpf_prog *prog)
  91. {
  92. struct bpf_prog_array __rcu *old_array;
  93. struct bpf_prog_array *new_array;
  94. struct ir_raw_event_ctrl *raw;
  95. int ret;
  96. if (rcdev->driver_type != RC_DRIVER_IR_RAW)
  97. return -EINVAL;
  98. ret = mutex_lock_interruptible(&ir_raw_handler_lock);
  99. if (ret)
  100. return ret;
  101. raw = rcdev->raw;
  102. if (!raw) {
  103. ret = -ENODEV;
  104. goto unlock;
  105. }
  106. if (raw->progs && bpf_prog_array_length(raw->progs) >= BPF_MAX_PROGS) {
  107. ret = -E2BIG;
  108. goto unlock;
  109. }
  110. old_array = raw->progs;
  111. ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
  112. if (ret < 0)
  113. goto unlock;
  114. rcu_assign_pointer(raw->progs, new_array);
  115. bpf_prog_array_free(old_array);
  116. unlock:
  117. mutex_unlock(&ir_raw_handler_lock);
  118. return ret;
  119. }
  120. static int lirc_bpf_detach(struct rc_dev *rcdev, struct bpf_prog *prog)
  121. {
  122. struct bpf_prog_array __rcu *old_array;
  123. struct bpf_prog_array *new_array;
  124. struct ir_raw_event_ctrl *raw;
  125. int ret;
  126. if (rcdev->driver_type != RC_DRIVER_IR_RAW)
  127. return -EINVAL;
  128. ret = mutex_lock_interruptible(&ir_raw_handler_lock);
  129. if (ret)
  130. return ret;
  131. raw = rcdev->raw;
  132. if (!raw) {
  133. ret = -ENODEV;
  134. goto unlock;
  135. }
  136. old_array = raw->progs;
  137. ret = bpf_prog_array_copy(old_array, prog, NULL, &new_array);
  138. /*
  139. * Do not use bpf_prog_array_delete_safe() as we would end up
  140. * with a dummy entry in the array, and the we would free the
  141. * dummy in lirc_bpf_free()
  142. */
  143. if (ret)
  144. goto unlock;
  145. rcu_assign_pointer(raw->progs, new_array);
  146. bpf_prog_array_free(old_array);
  147. bpf_prog_put(prog);
  148. unlock:
  149. mutex_unlock(&ir_raw_handler_lock);
  150. return ret;
  151. }
  152. void lirc_bpf_run(struct rc_dev *rcdev, u32 sample)
  153. {
  154. struct ir_raw_event_ctrl *raw = rcdev->raw;
  155. raw->bpf_sample = sample;
  156. if (raw->progs)
  157. BPF_PROG_RUN_ARRAY(raw->progs, &raw->bpf_sample, BPF_PROG_RUN);
  158. }
  159. /*
  160. * This should be called once the rc thread has been stopped, so there can be
  161. * no concurrent bpf execution.
  162. */
  163. void lirc_bpf_free(struct rc_dev *rcdev)
  164. {
  165. struct bpf_prog_array_item *item;
  166. if (!rcdev->raw->progs)
  167. return;
  168. item = rcu_dereference(rcdev->raw->progs)->items;
  169. while (item->prog) {
  170. bpf_prog_put(item->prog);
  171. item++;
  172. }
  173. bpf_prog_array_free(rcdev->raw->progs);
  174. }
  175. int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
  176. {
  177. struct rc_dev *rcdev;
  178. int ret;
  179. if (attr->attach_flags)
  180. return -EINVAL;
  181. rcdev = rc_dev_get_from_fd(attr->target_fd);
  182. if (IS_ERR(rcdev))
  183. return PTR_ERR(rcdev);
  184. ret = lirc_bpf_attach(rcdev, prog);
  185. put_device(&rcdev->dev);
  186. return ret;
  187. }
  188. int lirc_prog_detach(const union bpf_attr *attr)
  189. {
  190. struct bpf_prog *prog;
  191. struct rc_dev *rcdev;
  192. int ret;
  193. if (attr->attach_flags)
  194. return -EINVAL;
  195. prog = bpf_prog_get_type(attr->attach_bpf_fd,
  196. BPF_PROG_TYPE_LIRC_MODE2);
  197. if (IS_ERR(prog))
  198. return PTR_ERR(prog);
  199. rcdev = rc_dev_get_from_fd(attr->target_fd);
  200. if (IS_ERR(rcdev)) {
  201. bpf_prog_put(prog);
  202. return PTR_ERR(rcdev);
  203. }
  204. ret = lirc_bpf_detach(rcdev, prog);
  205. bpf_prog_put(prog);
  206. put_device(&rcdev->dev);
  207. return ret;
  208. }
  209. int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
  210. {
  211. __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
  212. struct bpf_prog_array __rcu *progs;
  213. struct rc_dev *rcdev;
  214. u32 cnt, flags = 0;
  215. int ret;
  216. if (attr->query.query_flags)
  217. return -EINVAL;
  218. rcdev = rc_dev_get_from_fd(attr->query.target_fd);
  219. if (IS_ERR(rcdev))
  220. return PTR_ERR(rcdev);
  221. if (rcdev->driver_type != RC_DRIVER_IR_RAW) {
  222. ret = -EINVAL;
  223. goto put;
  224. }
  225. ret = mutex_lock_interruptible(&ir_raw_handler_lock);
  226. if (ret)
  227. goto put;
  228. progs = rcdev->raw->progs;
  229. cnt = progs ? bpf_prog_array_length(progs) : 0;
  230. if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) {
  231. ret = -EFAULT;
  232. goto unlock;
  233. }
  234. if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) {
  235. ret = -EFAULT;
  236. goto unlock;
  237. }
  238. if (attr->query.prog_cnt != 0 && prog_ids && cnt)
  239. ret = bpf_prog_array_copy_to_user(progs, prog_ids, cnt);
  240. unlock:
  241. mutex_unlock(&ir_raw_handler_lock);
  242. put:
  243. put_device(&rcdev->dev);
  244. return ret;
  245. }