core.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067
  1. /*
  2. * core.c - Kernel Live Patching Core
  3. *
  4. * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
  5. * Copyright (C) 2014 SUSE
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version 2
  10. * of the License, or (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/mutex.h>
  24. #include <linux/slab.h>
  25. #include <linux/ftrace.h>
  26. #include <linux/list.h>
  27. #include <linux/kallsyms.h>
  28. #include <linux/livepatch.h>
  29. #include <linux/elf.h>
  30. #include <linux/moduleloader.h>
  31. #include <asm/cacheflush.h>
  32. /**
  33. * struct klp_ops - structure for tracking registered ftrace ops structs
  34. *
  35. * A single ftrace_ops is shared between all enabled replacement functions
  36. * (klp_func structs) which have the same old_addr. This allows the switch
  37. * between function versions to happen instantaneously by updating the klp_ops
  38. * struct's func_stack list. The winner is the klp_func at the top of the
  39. * func_stack (front of the list).
  40. *
  41. * @node: node for the global klp_ops list
  42. * @func_stack: list head for the stack of klp_func's (active func is on top)
  43. * @fops: registered ftrace ops struct
  44. */
  45. struct klp_ops {
  46. struct list_head node;
  47. struct list_head func_stack;
  48. struct ftrace_ops fops;
  49. };
  50. /*
  51. * The klp_mutex protects the global lists and state transitions of any
  52. * structure reachable from them. References to any structure must be obtained
  53. * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
  54. * ensure it gets consistent data).
  55. */
  56. static DEFINE_MUTEX(klp_mutex);
  57. static LIST_HEAD(klp_patches);
  58. static LIST_HEAD(klp_ops);
  59. static struct kobject *klp_root_kobj;
  60. static struct klp_ops *klp_find_ops(unsigned long old_addr)
  61. {
  62. struct klp_ops *ops;
  63. struct klp_func *func;
  64. list_for_each_entry(ops, &klp_ops, node) {
  65. func = list_first_entry(&ops->func_stack, struct klp_func,
  66. stack_node);
  67. if (func->old_addr == old_addr)
  68. return ops;
  69. }
  70. return NULL;
  71. }
  72. static bool klp_is_module(struct klp_object *obj)
  73. {
  74. return obj->name;
  75. }
  76. static bool klp_is_object_loaded(struct klp_object *obj)
  77. {
  78. return !obj->name || obj->mod;
  79. }
  80. /* sets obj->mod if object is not vmlinux and module is found */
  81. static void klp_find_object_module(struct klp_object *obj)
  82. {
  83. struct module *mod;
  84. if (!klp_is_module(obj))
  85. return;
  86. mutex_lock(&module_mutex);
  87. /*
  88. * We do not want to block removal of patched modules and therefore
  89. * we do not take a reference here. The patches are removed by
  90. * klp_module_going() instead.
  91. */
  92. mod = find_module(obj->name);
  93. /*
  94. * Do not mess work of klp_module_coming() and klp_module_going().
  95. * Note that the patch might still be needed before klp_module_going()
  96. * is called. Module functions can be called even in the GOING state
  97. * until mod->exit() finishes. This is especially important for
  98. * patches that modify semantic of the functions.
  99. */
  100. if (mod && mod->klp_alive)
  101. obj->mod = mod;
  102. mutex_unlock(&module_mutex);
  103. }
  104. /* klp_mutex must be held by caller */
  105. static bool klp_is_patch_registered(struct klp_patch *patch)
  106. {
  107. struct klp_patch *mypatch;
  108. list_for_each_entry(mypatch, &klp_patches, list)
  109. if (mypatch == patch)
  110. return true;
  111. return false;
  112. }
  113. static bool klp_initialized(void)
  114. {
  115. return !!klp_root_kobj;
  116. }
  117. struct klp_find_arg {
  118. const char *objname;
  119. const char *name;
  120. unsigned long addr;
  121. unsigned long count;
  122. unsigned long pos;
  123. };
  124. static int klp_find_callback(void *data, const char *name,
  125. struct module *mod, unsigned long addr)
  126. {
  127. struct klp_find_arg *args = data;
  128. if ((mod && !args->objname) || (!mod && args->objname))
  129. return 0;
  130. if (strcmp(args->name, name))
  131. return 0;
  132. if (args->objname && strcmp(args->objname, mod->name))
  133. return 0;
  134. args->addr = addr;
  135. args->count++;
  136. /*
  137. * Finish the search when the symbol is found for the desired position
  138. * or the position is not defined for a non-unique symbol.
  139. */
  140. if ((args->pos && (args->count == args->pos)) ||
  141. (!args->pos && (args->count > 1)))
  142. return 1;
  143. return 0;
  144. }
  145. static int klp_find_object_symbol(const char *objname, const char *name,
  146. unsigned long sympos, unsigned long *addr)
  147. {
  148. struct klp_find_arg args = {
  149. .objname = objname,
  150. .name = name,
  151. .addr = 0,
  152. .count = 0,
  153. .pos = sympos,
  154. };
  155. mutex_lock(&module_mutex);
  156. kallsyms_on_each_symbol(klp_find_callback, &args);
  157. mutex_unlock(&module_mutex);
  158. /*
  159. * Ensure an address was found. If sympos is 0, ensure symbol is unique;
  160. * otherwise ensure the symbol position count matches sympos.
  161. */
  162. if (args.addr == 0)
  163. pr_err("symbol '%s' not found in symbol table\n", name);
  164. else if (args.count > 1 && sympos == 0) {
  165. pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
  166. name, objname);
  167. } else if (sympos != args.count && sympos > 0) {
  168. pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
  169. sympos, name, objname ? objname : "vmlinux");
  170. } else {
  171. *addr = args.addr;
  172. return 0;
  173. }
  174. *addr = 0;
  175. return -EINVAL;
  176. }
  177. static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
  178. {
  179. int i, cnt, vmlinux, ret;
  180. char objname[MODULE_NAME_LEN];
  181. char symname[KSYM_NAME_LEN];
  182. char *strtab = pmod->core_kallsyms.strtab;
  183. Elf_Rela *relas;
  184. Elf_Sym *sym;
  185. unsigned long sympos, addr;
  186. /*
  187. * Since the field widths for objname and symname in the sscanf()
  188. * call are hard-coded and correspond to MODULE_NAME_LEN and
  189. * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
  190. * and KSYM_NAME_LEN have the values we expect them to have.
  191. *
  192. * Because the value of MODULE_NAME_LEN can differ among architectures,
  193. * we use the smallest/strictest upper bound possible (56, based on
  194. * the current definition of MODULE_NAME_LEN) to prevent overflows.
  195. */
  196. BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
  197. relas = (Elf_Rela *) relasec->sh_addr;
  198. /* For each rela in this klp relocation section */
  199. for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
  200. sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
  201. if (sym->st_shndx != SHN_LIVEPATCH) {
  202. pr_err("symbol %s is not marked as a livepatch symbol",
  203. strtab + sym->st_name);
  204. return -EINVAL;
  205. }
  206. /* Format: .klp.sym.objname.symname,sympos */
  207. cnt = sscanf(strtab + sym->st_name,
  208. ".klp.sym.%55[^.].%127[^,],%lu",
  209. objname, symname, &sympos);
  210. if (cnt != 3) {
  211. pr_err("symbol %s has an incorrectly formatted name",
  212. strtab + sym->st_name);
  213. return -EINVAL;
  214. }
  215. /* klp_find_object_symbol() treats a NULL objname as vmlinux */
  216. vmlinux = !strcmp(objname, "vmlinux");
  217. ret = klp_find_object_symbol(vmlinux ? NULL : objname,
  218. symname, sympos, &addr);
  219. if (ret)
  220. return ret;
  221. sym->st_value = addr;
  222. }
  223. return 0;
  224. }
  225. static int klp_write_object_relocations(struct module *pmod,
  226. struct klp_object *obj)
  227. {
  228. int i, cnt, ret = 0;
  229. const char *objname, *secname;
  230. char sec_objname[MODULE_NAME_LEN];
  231. Elf_Shdr *sec;
  232. if (WARN_ON(!klp_is_object_loaded(obj)))
  233. return -EINVAL;
  234. objname = klp_is_module(obj) ? obj->name : "vmlinux";
  235. /* For each klp relocation section */
  236. for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
  237. sec = pmod->klp_info->sechdrs + i;
  238. secname = pmod->klp_info->secstrings + sec->sh_name;
  239. if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
  240. continue;
  241. /*
  242. * Format: .klp.rela.sec_objname.section_name
  243. * See comment in klp_resolve_symbols() for an explanation
  244. * of the selected field width value.
  245. */
  246. cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
  247. if (cnt != 1) {
  248. pr_err("section %s has an incorrectly formatted name",
  249. secname);
  250. ret = -EINVAL;
  251. break;
  252. }
  253. if (strcmp(objname, sec_objname))
  254. continue;
  255. ret = klp_resolve_symbols(sec, pmod);
  256. if (ret)
  257. break;
  258. ret = apply_relocate_add(pmod->klp_info->sechdrs,
  259. pmod->core_kallsyms.strtab,
  260. pmod->klp_info->symndx, i, pmod);
  261. if (ret)
  262. break;
  263. }
  264. return ret;
  265. }
  266. static void notrace klp_ftrace_handler(unsigned long ip,
  267. unsigned long parent_ip,
  268. struct ftrace_ops *fops,
  269. struct pt_regs *regs)
  270. {
  271. struct klp_ops *ops;
  272. struct klp_func *func;
  273. ops = container_of(fops, struct klp_ops, fops);
  274. rcu_read_lock();
  275. func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
  276. stack_node);
  277. if (WARN_ON_ONCE(!func))
  278. goto unlock;
  279. klp_arch_set_pc(regs, (unsigned long)func->new_func);
  280. unlock:
  281. rcu_read_unlock();
  282. }
  283. /*
  284. * Convert a function address into the appropriate ftrace location.
  285. *
  286. * Usually this is just the address of the function, but on some architectures
  287. * it's more complicated so allow them to provide a custom behaviour.
  288. */
  289. #ifndef klp_get_ftrace_location
  290. static unsigned long klp_get_ftrace_location(unsigned long faddr)
  291. {
  292. return faddr;
  293. }
  294. #endif
  295. static void klp_disable_func(struct klp_func *func)
  296. {
  297. struct klp_ops *ops;
  298. if (WARN_ON(func->state != KLP_ENABLED))
  299. return;
  300. if (WARN_ON(!func->old_addr))
  301. return;
  302. ops = klp_find_ops(func->old_addr);
  303. if (WARN_ON(!ops))
  304. return;
  305. if (list_is_singular(&ops->func_stack)) {
  306. unsigned long ftrace_loc;
  307. ftrace_loc = klp_get_ftrace_location(func->old_addr);
  308. if (WARN_ON(!ftrace_loc))
  309. return;
  310. WARN_ON(unregister_ftrace_function(&ops->fops));
  311. WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
  312. list_del_rcu(&func->stack_node);
  313. list_del(&ops->node);
  314. kfree(ops);
  315. } else {
  316. list_del_rcu(&func->stack_node);
  317. }
  318. func->state = KLP_DISABLED;
  319. }
  320. static int klp_enable_func(struct klp_func *func)
  321. {
  322. struct klp_ops *ops;
  323. int ret;
  324. if (WARN_ON(!func->old_addr))
  325. return -EINVAL;
  326. if (WARN_ON(func->state != KLP_DISABLED))
  327. return -EINVAL;
  328. ops = klp_find_ops(func->old_addr);
  329. if (!ops) {
  330. unsigned long ftrace_loc;
  331. ftrace_loc = klp_get_ftrace_location(func->old_addr);
  332. if (!ftrace_loc) {
  333. pr_err("failed to find location for function '%s'\n",
  334. func->old_name);
  335. return -EINVAL;
  336. }
  337. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  338. if (!ops)
  339. return -ENOMEM;
  340. ops->fops.func = klp_ftrace_handler;
  341. ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
  342. FTRACE_OPS_FL_DYNAMIC |
  343. FTRACE_OPS_FL_IPMODIFY;
  344. list_add(&ops->node, &klp_ops);
  345. INIT_LIST_HEAD(&ops->func_stack);
  346. list_add_rcu(&func->stack_node, &ops->func_stack);
  347. ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
  348. if (ret) {
  349. pr_err("failed to set ftrace filter for function '%s' (%d)\n",
  350. func->old_name, ret);
  351. goto err;
  352. }
  353. ret = register_ftrace_function(&ops->fops);
  354. if (ret) {
  355. pr_err("failed to register ftrace handler for function '%s' (%d)\n",
  356. func->old_name, ret);
  357. ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
  358. goto err;
  359. }
  360. } else {
  361. list_add_rcu(&func->stack_node, &ops->func_stack);
  362. }
  363. func->state = KLP_ENABLED;
  364. return 0;
  365. err:
  366. list_del_rcu(&func->stack_node);
  367. list_del(&ops->node);
  368. kfree(ops);
  369. return ret;
  370. }
  371. static void klp_disable_object(struct klp_object *obj)
  372. {
  373. struct klp_func *func;
  374. klp_for_each_func(obj, func)
  375. if (func->state == KLP_ENABLED)
  376. klp_disable_func(func);
  377. obj->state = KLP_DISABLED;
  378. }
  379. static int klp_enable_object(struct klp_object *obj)
  380. {
  381. struct klp_func *func;
  382. int ret;
  383. if (WARN_ON(obj->state != KLP_DISABLED))
  384. return -EINVAL;
  385. if (WARN_ON(!klp_is_object_loaded(obj)))
  386. return -EINVAL;
  387. klp_for_each_func(obj, func) {
  388. ret = klp_enable_func(func);
  389. if (ret) {
  390. klp_disable_object(obj);
  391. return ret;
  392. }
  393. }
  394. obj->state = KLP_ENABLED;
  395. return 0;
  396. }
  397. static int __klp_disable_patch(struct klp_patch *patch)
  398. {
  399. struct klp_object *obj;
  400. /* enforce stacking: only the last enabled patch can be disabled */
  401. if (!list_is_last(&patch->list, &klp_patches) &&
  402. list_next_entry(patch, list)->state == KLP_ENABLED)
  403. return -EBUSY;
  404. pr_notice("disabling patch '%s'\n", patch->mod->name);
  405. klp_for_each_object(patch, obj) {
  406. if (obj->state == KLP_ENABLED)
  407. klp_disable_object(obj);
  408. }
  409. patch->state = KLP_DISABLED;
  410. return 0;
  411. }
  412. /**
  413. * klp_disable_patch() - disables a registered patch
  414. * @patch: The registered, enabled patch to be disabled
  415. *
  416. * Unregisters the patched functions from ftrace.
  417. *
  418. * Return: 0 on success, otherwise error
  419. */
  420. int klp_disable_patch(struct klp_patch *patch)
  421. {
  422. int ret;
  423. mutex_lock(&klp_mutex);
  424. if (!klp_is_patch_registered(patch)) {
  425. ret = -EINVAL;
  426. goto err;
  427. }
  428. if (patch->state == KLP_DISABLED) {
  429. ret = -EINVAL;
  430. goto err;
  431. }
  432. ret = __klp_disable_patch(patch);
  433. err:
  434. mutex_unlock(&klp_mutex);
  435. return ret;
  436. }
  437. EXPORT_SYMBOL_GPL(klp_disable_patch);
  438. static int __klp_enable_patch(struct klp_patch *patch)
  439. {
  440. struct klp_object *obj;
  441. int ret;
  442. if (WARN_ON(patch->state != KLP_DISABLED))
  443. return -EINVAL;
  444. /* enforce stacking: only the first disabled patch can be enabled */
  445. if (patch->list.prev != &klp_patches &&
  446. list_prev_entry(patch, list)->state == KLP_DISABLED)
  447. return -EBUSY;
  448. pr_notice("enabling patch '%s'\n", patch->mod->name);
  449. klp_for_each_object(patch, obj) {
  450. if (!klp_is_object_loaded(obj))
  451. continue;
  452. ret = klp_enable_object(obj);
  453. if (ret)
  454. goto unregister;
  455. }
  456. patch->state = KLP_ENABLED;
  457. return 0;
  458. unregister:
  459. WARN_ON(__klp_disable_patch(patch));
  460. return ret;
  461. }
  462. /**
  463. * klp_enable_patch() - enables a registered patch
  464. * @patch: The registered, disabled patch to be enabled
  465. *
  466. * Performs the needed symbol lookups and code relocations,
  467. * then registers the patched functions with ftrace.
  468. *
  469. * Return: 0 on success, otherwise error
  470. */
  471. int klp_enable_patch(struct klp_patch *patch)
  472. {
  473. int ret;
  474. mutex_lock(&klp_mutex);
  475. if (!klp_is_patch_registered(patch)) {
  476. ret = -EINVAL;
  477. goto err;
  478. }
  479. ret = __klp_enable_patch(patch);
  480. err:
  481. mutex_unlock(&klp_mutex);
  482. return ret;
  483. }
  484. EXPORT_SYMBOL_GPL(klp_enable_patch);
  485. /*
  486. * Sysfs Interface
  487. *
  488. * /sys/kernel/livepatch
  489. * /sys/kernel/livepatch/<patch>
  490. * /sys/kernel/livepatch/<patch>/enabled
  491. * /sys/kernel/livepatch/<patch>/<object>
  492. * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
  493. */
  494. static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
  495. const char *buf, size_t count)
  496. {
  497. struct klp_patch *patch;
  498. int ret;
  499. unsigned long val;
  500. ret = kstrtoul(buf, 10, &val);
  501. if (ret)
  502. return -EINVAL;
  503. if (val != KLP_DISABLED && val != KLP_ENABLED)
  504. return -EINVAL;
  505. patch = container_of(kobj, struct klp_patch, kobj);
  506. mutex_lock(&klp_mutex);
  507. if (val == patch->state) {
  508. /* already in requested state */
  509. ret = -EINVAL;
  510. goto err;
  511. }
  512. if (val == KLP_ENABLED) {
  513. ret = __klp_enable_patch(patch);
  514. if (ret)
  515. goto err;
  516. } else {
  517. ret = __klp_disable_patch(patch);
  518. if (ret)
  519. goto err;
  520. }
  521. mutex_unlock(&klp_mutex);
  522. return count;
  523. err:
  524. mutex_unlock(&klp_mutex);
  525. return ret;
  526. }
  527. static ssize_t enabled_show(struct kobject *kobj,
  528. struct kobj_attribute *attr, char *buf)
  529. {
  530. struct klp_patch *patch;
  531. patch = container_of(kobj, struct klp_patch, kobj);
  532. return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
  533. }
  534. static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
  535. static struct attribute *klp_patch_attrs[] = {
  536. &enabled_kobj_attr.attr,
  537. NULL
  538. };
  539. static void klp_kobj_release_patch(struct kobject *kobj)
  540. {
  541. /*
  542. * Once we have a consistency model we'll need to module_put() the
  543. * patch module here. See klp_register_patch() for more details.
  544. */
  545. }
  546. static struct kobj_type klp_ktype_patch = {
  547. .release = klp_kobj_release_patch,
  548. .sysfs_ops = &kobj_sysfs_ops,
  549. .default_attrs = klp_patch_attrs,
  550. };
  551. static void klp_kobj_release_object(struct kobject *kobj)
  552. {
  553. }
  554. static struct kobj_type klp_ktype_object = {
  555. .release = klp_kobj_release_object,
  556. .sysfs_ops = &kobj_sysfs_ops,
  557. };
  558. static void klp_kobj_release_func(struct kobject *kobj)
  559. {
  560. }
  561. static struct kobj_type klp_ktype_func = {
  562. .release = klp_kobj_release_func,
  563. .sysfs_ops = &kobj_sysfs_ops,
  564. };
  565. /*
  566. * Free all functions' kobjects in the array up to some limit. When limit is
  567. * NULL, all kobjects are freed.
  568. */
  569. static void klp_free_funcs_limited(struct klp_object *obj,
  570. struct klp_func *limit)
  571. {
  572. struct klp_func *func;
  573. for (func = obj->funcs; func->old_name && func != limit; func++)
  574. kobject_put(&func->kobj);
  575. }
  576. /* Clean up when a patched object is unloaded */
  577. static void klp_free_object_loaded(struct klp_object *obj)
  578. {
  579. struct klp_func *func;
  580. obj->mod = NULL;
  581. klp_for_each_func(obj, func)
  582. func->old_addr = 0;
  583. }
  584. /*
  585. * Free all objects' kobjects in the array up to some limit. When limit is
  586. * NULL, all kobjects are freed.
  587. */
  588. static void klp_free_objects_limited(struct klp_patch *patch,
  589. struct klp_object *limit)
  590. {
  591. struct klp_object *obj;
  592. for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
  593. klp_free_funcs_limited(obj, NULL);
  594. kobject_put(&obj->kobj);
  595. }
  596. }
  597. static void klp_free_patch(struct klp_patch *patch)
  598. {
  599. klp_free_objects_limited(patch, NULL);
  600. if (!list_empty(&patch->list))
  601. list_del(&patch->list);
  602. kobject_put(&patch->kobj);
  603. }
  604. static int klp_init_func(struct klp_object *obj, struct klp_func *func)
  605. {
  606. if (!func->old_name || !func->new_func)
  607. return -EINVAL;
  608. INIT_LIST_HEAD(&func->stack_node);
  609. func->state = KLP_DISABLED;
  610. /* The format for the sysfs directory is <function,sympos> where sympos
  611. * is the nth occurrence of this symbol in kallsyms for the patched
  612. * object. If the user selects 0 for old_sympos, then 1 will be used
  613. * since a unique symbol will be the first occurrence.
  614. */
  615. return kobject_init_and_add(&func->kobj, &klp_ktype_func,
  616. &obj->kobj, "%s,%lu", func->old_name,
  617. func->old_sympos ? func->old_sympos : 1);
  618. }
  619. /* Arches may override this to finish any remaining arch-specific tasks */
  620. void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
  621. struct klp_object *obj)
  622. {
  623. }
  624. /* parts of the initialization that is done only when the object is loaded */
  625. static int klp_init_object_loaded(struct klp_patch *patch,
  626. struct klp_object *obj)
  627. {
  628. struct klp_func *func;
  629. int ret;
  630. module_disable_ro(patch->mod);
  631. ret = klp_write_object_relocations(patch->mod, obj);
  632. if (ret) {
  633. module_enable_ro(patch->mod, true);
  634. return ret;
  635. }
  636. arch_klp_init_object_loaded(patch, obj);
  637. module_enable_ro(patch->mod, true);
  638. klp_for_each_func(obj, func) {
  639. ret = klp_find_object_symbol(obj->name, func->old_name,
  640. func->old_sympos,
  641. &func->old_addr);
  642. if (ret)
  643. return ret;
  644. }
  645. return 0;
  646. }
  647. static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
  648. {
  649. struct klp_func *func;
  650. int ret;
  651. const char *name;
  652. if (!obj->funcs)
  653. return -EINVAL;
  654. obj->state = KLP_DISABLED;
  655. obj->mod = NULL;
  656. klp_find_object_module(obj);
  657. name = klp_is_module(obj) ? obj->name : "vmlinux";
  658. ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
  659. &patch->kobj, "%s", name);
  660. if (ret)
  661. return ret;
  662. klp_for_each_func(obj, func) {
  663. ret = klp_init_func(obj, func);
  664. if (ret)
  665. goto free;
  666. }
  667. if (klp_is_object_loaded(obj)) {
  668. ret = klp_init_object_loaded(patch, obj);
  669. if (ret)
  670. goto free;
  671. }
  672. return 0;
  673. free:
  674. klp_free_funcs_limited(obj, func);
  675. kobject_put(&obj->kobj);
  676. return ret;
  677. }
  678. static int klp_init_patch(struct klp_patch *patch)
  679. {
  680. struct klp_object *obj;
  681. int ret;
  682. if (!patch->objs)
  683. return -EINVAL;
  684. mutex_lock(&klp_mutex);
  685. patch->state = KLP_DISABLED;
  686. ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
  687. klp_root_kobj, "%s", patch->mod->name);
  688. if (ret)
  689. goto unlock;
  690. klp_for_each_object(patch, obj) {
  691. ret = klp_init_object(patch, obj);
  692. if (ret)
  693. goto free;
  694. }
  695. list_add_tail(&patch->list, &klp_patches);
  696. mutex_unlock(&klp_mutex);
  697. return 0;
  698. free:
  699. klp_free_objects_limited(patch, obj);
  700. kobject_put(&patch->kobj);
  701. unlock:
  702. mutex_unlock(&klp_mutex);
  703. return ret;
  704. }
  705. /**
  706. * klp_unregister_patch() - unregisters a patch
  707. * @patch: Disabled patch to be unregistered
  708. *
  709. * Frees the data structures and removes the sysfs interface.
  710. *
  711. * Return: 0 on success, otherwise error
  712. */
  713. int klp_unregister_patch(struct klp_patch *patch)
  714. {
  715. int ret = 0;
  716. mutex_lock(&klp_mutex);
  717. if (!klp_is_patch_registered(patch)) {
  718. ret = -EINVAL;
  719. goto out;
  720. }
  721. if (patch->state == KLP_ENABLED) {
  722. ret = -EBUSY;
  723. goto out;
  724. }
  725. klp_free_patch(patch);
  726. out:
  727. mutex_unlock(&klp_mutex);
  728. return ret;
  729. }
  730. EXPORT_SYMBOL_GPL(klp_unregister_patch);
  731. /**
  732. * klp_register_patch() - registers a patch
  733. * @patch: Patch to be registered
  734. *
  735. * Initializes the data structure associated with the patch and
  736. * creates the sysfs interface.
  737. *
  738. * Return: 0 on success, otherwise error
  739. */
  740. int klp_register_patch(struct klp_patch *patch)
  741. {
  742. int ret;
  743. if (!patch || !patch->mod)
  744. return -EINVAL;
  745. if (!is_livepatch_module(patch->mod)) {
  746. pr_err("module %s is not marked as a livepatch module",
  747. patch->mod->name);
  748. return -EINVAL;
  749. }
  750. if (!klp_initialized())
  751. return -ENODEV;
  752. /*
  753. * A reference is taken on the patch module to prevent it from being
  754. * unloaded. Right now, we don't allow patch modules to unload since
  755. * there is currently no method to determine if a thread is still
  756. * running in the patched code contained in the patch module once
  757. * the ftrace registration is successful.
  758. */
  759. if (!try_module_get(patch->mod))
  760. return -ENODEV;
  761. ret = klp_init_patch(patch);
  762. if (ret)
  763. module_put(patch->mod);
  764. return ret;
  765. }
  766. EXPORT_SYMBOL_GPL(klp_register_patch);
  767. int klp_module_coming(struct module *mod)
  768. {
  769. int ret;
  770. struct klp_patch *patch;
  771. struct klp_object *obj;
  772. if (WARN_ON(mod->state != MODULE_STATE_COMING))
  773. return -EINVAL;
  774. mutex_lock(&klp_mutex);
  775. /*
  776. * Each module has to know that klp_module_coming()
  777. * has been called. We never know what module will
  778. * get patched by a new patch.
  779. */
  780. mod->klp_alive = true;
  781. list_for_each_entry(patch, &klp_patches, list) {
  782. klp_for_each_object(patch, obj) {
  783. if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
  784. continue;
  785. obj->mod = mod;
  786. ret = klp_init_object_loaded(patch, obj);
  787. if (ret) {
  788. pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
  789. patch->mod->name, obj->mod->name, ret);
  790. goto err;
  791. }
  792. if (patch->state == KLP_DISABLED)
  793. break;
  794. pr_notice("applying patch '%s' to loading module '%s'\n",
  795. patch->mod->name, obj->mod->name);
  796. ret = klp_enable_object(obj);
  797. if (ret) {
  798. pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
  799. patch->mod->name, obj->mod->name, ret);
  800. goto err;
  801. }
  802. break;
  803. }
  804. }
  805. mutex_unlock(&klp_mutex);
  806. return 0;
  807. err:
  808. /*
  809. * If a patch is unsuccessfully applied, return
  810. * error to the module loader.
  811. */
  812. pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
  813. patch->mod->name, obj->mod->name, obj->mod->name);
  814. mod->klp_alive = false;
  815. klp_free_object_loaded(obj);
  816. mutex_unlock(&klp_mutex);
  817. return ret;
  818. }
  819. void klp_module_going(struct module *mod)
  820. {
  821. struct klp_patch *patch;
  822. struct klp_object *obj;
  823. if (WARN_ON(mod->state != MODULE_STATE_GOING &&
  824. mod->state != MODULE_STATE_COMING))
  825. return;
  826. mutex_lock(&klp_mutex);
  827. /*
  828. * Each module has to know that klp_module_going()
  829. * has been called. We never know what module will
  830. * get patched by a new patch.
  831. */
  832. mod->klp_alive = false;
  833. list_for_each_entry(patch, &klp_patches, list) {
  834. klp_for_each_object(patch, obj) {
  835. if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
  836. continue;
  837. if (patch->state != KLP_DISABLED) {
  838. pr_notice("reverting patch '%s' on unloading module '%s'\n",
  839. patch->mod->name, obj->mod->name);
  840. klp_disable_object(obj);
  841. }
  842. klp_free_object_loaded(obj);
  843. break;
  844. }
  845. }
  846. mutex_unlock(&klp_mutex);
  847. }
  848. static int __init klp_init(void)
  849. {
  850. int ret;
  851. ret = klp_check_compiler_support();
  852. if (ret) {
  853. pr_info("Your compiler is too old; turning off.\n");
  854. return -EINVAL;
  855. }
  856. klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
  857. if (!klp_root_kobj)
  858. return -ENOMEM;
  859. return 0;
  860. }
  861. module_init(klp_init);