yama_lsm.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. /*
  2. * Yama Linux Security Module
  3. *
  4. * Author: Kees Cook <keescook@chromium.org>
  5. *
  6. * Copyright (C) 2010 Canonical, Ltd.
  7. * Copyright (C) 2011 The Chromium OS Authors.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. */
  14. #include <linux/lsm_hooks.h>
  15. #include <linux/sysctl.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/prctl.h>
  18. #include <linux/ratelimit.h>
  19. #include <linux/workqueue.h>
  20. #define YAMA_SCOPE_DISABLED 0
  21. #define YAMA_SCOPE_RELATIONAL 1
  22. #define YAMA_SCOPE_CAPABILITY 2
  23. #define YAMA_SCOPE_NO_ATTACH 3
  24. static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
  25. /* describe a ptrace relationship for potential exception */
  26. struct ptrace_relation {
  27. struct task_struct *tracer;
  28. struct task_struct *tracee;
  29. bool invalid;
  30. struct list_head node;
  31. struct rcu_head rcu;
  32. };
  33. static LIST_HEAD(ptracer_relations);
  34. static DEFINE_SPINLOCK(ptracer_relations_lock);
  35. static void yama_relation_cleanup(struct work_struct *work);
  36. static DECLARE_WORK(yama_relation_work, yama_relation_cleanup);
  37. /**
  38. * yama_relation_cleanup - remove invalid entries from the relation list
  39. *
  40. */
  41. static void yama_relation_cleanup(struct work_struct *work)
  42. {
  43. struct ptrace_relation *relation;
  44. spin_lock(&ptracer_relations_lock);
  45. rcu_read_lock();
  46. list_for_each_entry_rcu(relation, &ptracer_relations, node) {
  47. if (relation->invalid) {
  48. list_del_rcu(&relation->node);
  49. kfree_rcu(relation, rcu);
  50. }
  51. }
  52. rcu_read_unlock();
  53. spin_unlock(&ptracer_relations_lock);
  54. }
  55. /**
  56. * yama_ptracer_add - add/replace an exception for this tracer/tracee pair
  57. * @tracer: the task_struct of the process doing the ptrace
  58. * @tracee: the task_struct of the process to be ptraced
  59. *
  60. * Each tracee can have, at most, one tracer registered. Each time this
  61. * is called, the prior registered tracer will be replaced for the tracee.
  62. *
  63. * Returns 0 if relationship was added, -ve on error.
  64. */
  65. static int yama_ptracer_add(struct task_struct *tracer,
  66. struct task_struct *tracee)
  67. {
  68. struct ptrace_relation *relation, *added;
  69. added = kmalloc(sizeof(*added), GFP_KERNEL);
  70. if (!added)
  71. return -ENOMEM;
  72. added->tracee = tracee;
  73. added->tracer = tracer;
  74. added->invalid = false;
  75. spin_lock(&ptracer_relations_lock);
  76. rcu_read_lock();
  77. list_for_each_entry_rcu(relation, &ptracer_relations, node) {
  78. if (relation->invalid)
  79. continue;
  80. if (relation->tracee == tracee) {
  81. list_replace_rcu(&relation->node, &added->node);
  82. kfree_rcu(relation, rcu);
  83. goto out;
  84. }
  85. }
  86. list_add_rcu(&added->node, &ptracer_relations);
  87. out:
  88. rcu_read_unlock();
  89. spin_unlock(&ptracer_relations_lock);
  90. return 0;
  91. }
  92. /**
  93. * yama_ptracer_del - remove exceptions related to the given tasks
  94. * @tracer: remove any relation where tracer task matches
  95. * @tracee: remove any relation where tracee task matches
  96. */
  97. static void yama_ptracer_del(struct task_struct *tracer,
  98. struct task_struct *tracee)
  99. {
  100. struct ptrace_relation *relation;
  101. bool marked = false;
  102. rcu_read_lock();
  103. list_for_each_entry_rcu(relation, &ptracer_relations, node) {
  104. if (relation->invalid)
  105. continue;
  106. if (relation->tracee == tracee ||
  107. (tracer && relation->tracer == tracer)) {
  108. relation->invalid = true;
  109. marked = true;
  110. }
  111. }
  112. rcu_read_unlock();
  113. if (marked)
  114. schedule_work(&yama_relation_work);
  115. }
  116. /**
  117. * yama_task_free - check for task_pid to remove from exception list
  118. * @task: task being removed
  119. */
  120. void yama_task_free(struct task_struct *task)
  121. {
  122. yama_ptracer_del(task, task);
  123. }
  124. /**
  125. * yama_task_prctl - check for Yama-specific prctl operations
  126. * @option: operation
  127. * @arg2: argument
  128. * @arg3: argument
  129. * @arg4: argument
  130. * @arg5: argument
  131. *
  132. * Return 0 on success, -ve on error. -ENOSYS is returned when Yama
  133. * does not handle the given option.
  134. */
  135. int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
  136. unsigned long arg4, unsigned long arg5)
  137. {
  138. int rc = -ENOSYS;
  139. struct task_struct *myself = current;
  140. switch (option) {
  141. case PR_SET_PTRACER:
  142. /* Since a thread can call prctl(), find the group leader
  143. * before calling _add() or _del() on it, since we want
  144. * process-level granularity of control. The tracer group
  145. * leader checking is handled later when walking the ancestry
  146. * at the time of PTRACE_ATTACH check.
  147. */
  148. rcu_read_lock();
  149. if (!thread_group_leader(myself))
  150. myself = rcu_dereference(myself->group_leader);
  151. get_task_struct(myself);
  152. rcu_read_unlock();
  153. if (arg2 == 0) {
  154. yama_ptracer_del(NULL, myself);
  155. rc = 0;
  156. } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
  157. rc = yama_ptracer_add(NULL, myself);
  158. } else {
  159. struct task_struct *tracer;
  160. rcu_read_lock();
  161. tracer = find_task_by_vpid(arg2);
  162. if (tracer)
  163. get_task_struct(tracer);
  164. else
  165. rc = -EINVAL;
  166. rcu_read_unlock();
  167. if (tracer) {
  168. rc = yama_ptracer_add(tracer, myself);
  169. put_task_struct(tracer);
  170. }
  171. }
  172. put_task_struct(myself);
  173. break;
  174. }
  175. return rc;
  176. }
  177. /**
  178. * task_is_descendant - walk up a process family tree looking for a match
  179. * @parent: the process to compare against while walking up from child
  180. * @child: the process to start from while looking upwards for parent
  181. *
  182. * Returns 1 if child is a descendant of parent, 0 if not.
  183. */
  184. static int task_is_descendant(struct task_struct *parent,
  185. struct task_struct *child)
  186. {
  187. int rc = 0;
  188. struct task_struct *walker = child;
  189. if (!parent || !child)
  190. return 0;
  191. rcu_read_lock();
  192. if (!thread_group_leader(parent))
  193. parent = rcu_dereference(parent->group_leader);
  194. while (walker->pid > 0) {
  195. if (!thread_group_leader(walker))
  196. walker = rcu_dereference(walker->group_leader);
  197. if (walker == parent) {
  198. rc = 1;
  199. break;
  200. }
  201. walker = rcu_dereference(walker->real_parent);
  202. }
  203. rcu_read_unlock();
  204. return rc;
  205. }
  206. /**
  207. * ptracer_exception_found - tracer registered as exception for this tracee
  208. * @tracer: the task_struct of the process attempting ptrace
  209. * @tracee: the task_struct of the process to be ptraced
  210. *
  211. * Returns 1 if tracer has is ptracer exception ancestor for tracee.
  212. */
  213. static int ptracer_exception_found(struct task_struct *tracer,
  214. struct task_struct *tracee)
  215. {
  216. int rc = 0;
  217. struct ptrace_relation *relation;
  218. struct task_struct *parent = NULL;
  219. bool found = false;
  220. rcu_read_lock();
  221. if (!thread_group_leader(tracee))
  222. tracee = rcu_dereference(tracee->group_leader);
  223. list_for_each_entry_rcu(relation, &ptracer_relations, node) {
  224. if (relation->invalid)
  225. continue;
  226. if (relation->tracee == tracee) {
  227. parent = relation->tracer;
  228. found = true;
  229. break;
  230. }
  231. }
  232. if (found && (parent == NULL || task_is_descendant(parent, tracer)))
  233. rc = 1;
  234. rcu_read_unlock();
  235. return rc;
  236. }
  237. /**
  238. * yama_ptrace_access_check - validate PTRACE_ATTACH calls
  239. * @child: task that current task is attempting to ptrace
  240. * @mode: ptrace attach mode
  241. *
  242. * Returns 0 if following the ptrace is allowed, -ve on error.
  243. */
  244. static int yama_ptrace_access_check(struct task_struct *child,
  245. unsigned int mode)
  246. {
  247. int rc = 0;
  248. /* require ptrace target be a child of ptracer on attach */
  249. if (mode == PTRACE_MODE_ATTACH) {
  250. switch (ptrace_scope) {
  251. case YAMA_SCOPE_DISABLED:
  252. /* No additional restrictions. */
  253. break;
  254. case YAMA_SCOPE_RELATIONAL:
  255. rcu_read_lock();
  256. if (!task_is_descendant(current, child) &&
  257. !ptracer_exception_found(current, child) &&
  258. !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
  259. rc = -EPERM;
  260. rcu_read_unlock();
  261. break;
  262. case YAMA_SCOPE_CAPABILITY:
  263. rcu_read_lock();
  264. if (!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
  265. rc = -EPERM;
  266. rcu_read_unlock();
  267. break;
  268. case YAMA_SCOPE_NO_ATTACH:
  269. default:
  270. rc = -EPERM;
  271. break;
  272. }
  273. }
  274. if (rc) {
  275. printk_ratelimited(KERN_NOTICE
  276. "ptrace of pid %d was attempted by: %s (pid %d)\n",
  277. child->pid, current->comm, current->pid);
  278. }
  279. return rc;
  280. }
  281. /**
  282. * yama_ptrace_traceme - validate PTRACE_TRACEME calls
  283. * @parent: task that will become the ptracer of the current task
  284. *
  285. * Returns 0 if following the ptrace is allowed, -ve on error.
  286. */
  287. int yama_ptrace_traceme(struct task_struct *parent)
  288. {
  289. int rc = 0;
  290. /* Only disallow PTRACE_TRACEME on more aggressive settings. */
  291. switch (ptrace_scope) {
  292. case YAMA_SCOPE_CAPABILITY:
  293. if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE))
  294. rc = -EPERM;
  295. break;
  296. case YAMA_SCOPE_NO_ATTACH:
  297. rc = -EPERM;
  298. break;
  299. }
  300. if (rc) {
  301. printk_ratelimited(KERN_NOTICE
  302. "ptraceme of pid %d was attempted by: %s (pid %d)\n",
  303. current->pid, parent->comm, parent->pid);
  304. }
  305. return rc;
  306. }
  307. static struct security_hook_list yama_hooks[] = {
  308. LSM_HOOK_INIT(ptrace_access_check, yama_ptrace_access_check),
  309. LSM_HOOK_INIT(ptrace_traceme, yama_ptrace_traceme),
  310. LSM_HOOK_INIT(task_prctl, yama_task_prctl),
  311. LSM_HOOK_INIT(task_free, yama_task_free),
  312. };
  313. void __init yama_add_hooks(void)
  314. {
  315. security_add_hooks(yama_hooks, ARRAY_SIZE(yama_hooks));
  316. }
  317. #ifdef CONFIG_SYSCTL
  318. static int yama_dointvec_minmax(struct ctl_table *table, int write,
  319. void __user *buffer, size_t *lenp, loff_t *ppos)
  320. {
  321. struct ctl_table table_copy;
  322. if (write && !capable(CAP_SYS_PTRACE))
  323. return -EPERM;
  324. /* Lock the max value if it ever gets set. */
  325. table_copy = *table;
  326. if (*(int *)table_copy.data == *(int *)table_copy.extra2)
  327. table_copy.extra1 = table_copy.extra2;
  328. return proc_dointvec_minmax(&table_copy, write, buffer, lenp, ppos);
  329. }
  330. static int zero;
  331. static int max_scope = YAMA_SCOPE_NO_ATTACH;
  332. struct ctl_path yama_sysctl_path[] = {
  333. { .procname = "kernel", },
  334. { .procname = "yama", },
  335. { }
  336. };
  337. static struct ctl_table yama_sysctl_table[] = {
  338. {
  339. .procname = "ptrace_scope",
  340. .data = &ptrace_scope,
  341. .maxlen = sizeof(int),
  342. .mode = 0644,
  343. .proc_handler = yama_dointvec_minmax,
  344. .extra1 = &zero,
  345. .extra2 = &max_scope,
  346. },
  347. { }
  348. };
  349. #endif /* CONFIG_SYSCTL */
  350. static __init int yama_init(void)
  351. {
  352. #ifndef CONFIG_SECURITY_YAMA_STACKED
  353. /*
  354. * If yama is being stacked this is already taken care of.
  355. */
  356. if (!security_module_enable("yama"))
  357. return 0;
  358. #endif
  359. pr_info("Yama: becoming mindful.\n");
  360. #ifdef CONFIG_SYSCTL
  361. if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table))
  362. panic("Yama: sysctl registration failed.\n");
  363. #endif
  364. return 0;
  365. }
  366. security_initcall(yama_init);