core.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074
  1. /*
  2. * core.c - Kernel Live Patching Core
  3. *
  4. * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
  5. * Copyright (C) 2014 SUSE
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version 2
  10. * of the License, or (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/mutex.h>
  24. #include <linux/slab.h>
  25. #include <linux/list.h>
  26. #include <linux/kallsyms.h>
  27. #include <linux/livepatch.h>
  28. #include <linux/elf.h>
  29. #include <linux/moduleloader.h>
  30. #include <linux/completion.h>
  31. #include <linux/memory.h>
  32. #include <asm/cacheflush.h>
  33. #include "core.h"
  34. #include "patch.h"
  35. #include "transition.h"
  36. /*
  37. * klp_mutex is a coarse lock which serializes access to klp data. All
  38. * accesses to klp-related variables and structures must have mutex protection,
  39. * except within the following functions which carefully avoid the need for it:
  40. *
  41. * - klp_ftrace_handler()
  42. * - klp_update_patch_state()
  43. */
  44. DEFINE_MUTEX(klp_mutex);
  45. static LIST_HEAD(klp_patches);
  46. static struct kobject *klp_root_kobj;
  47. static bool klp_is_module(struct klp_object *obj)
  48. {
  49. return obj->name;
  50. }
  51. /* sets obj->mod if object is not vmlinux and module is found */
  52. static void klp_find_object_module(struct klp_object *obj)
  53. {
  54. struct module *mod;
  55. if (!klp_is_module(obj))
  56. return;
  57. mutex_lock(&module_mutex);
  58. /*
  59. * We do not want to block removal of patched modules and therefore
  60. * we do not take a reference here. The patches are removed by
  61. * klp_module_going() instead.
  62. */
  63. mod = find_module(obj->name);
  64. /*
  65. * Do not mess work of klp_module_coming() and klp_module_going().
  66. * Note that the patch might still be needed before klp_module_going()
  67. * is called. Module functions can be called even in the GOING state
  68. * until mod->exit() finishes. This is especially important for
  69. * patches that modify semantic of the functions.
  70. */
  71. if (mod && mod->klp_alive)
  72. obj->mod = mod;
  73. mutex_unlock(&module_mutex);
  74. }
  75. static bool klp_is_patch_registered(struct klp_patch *patch)
  76. {
  77. struct klp_patch *mypatch;
  78. list_for_each_entry(mypatch, &klp_patches, list)
  79. if (mypatch == patch)
  80. return true;
  81. return false;
  82. }
  83. static bool klp_initialized(void)
  84. {
  85. return !!klp_root_kobj;
  86. }
  87. struct klp_find_arg {
  88. const char *objname;
  89. const char *name;
  90. unsigned long addr;
  91. unsigned long count;
  92. unsigned long pos;
  93. };
  94. static int klp_find_callback(void *data, const char *name,
  95. struct module *mod, unsigned long addr)
  96. {
  97. struct klp_find_arg *args = data;
  98. if ((mod && !args->objname) || (!mod && args->objname))
  99. return 0;
  100. if (strcmp(args->name, name))
  101. return 0;
  102. if (args->objname && strcmp(args->objname, mod->name))
  103. return 0;
  104. args->addr = addr;
  105. args->count++;
  106. /*
  107. * Finish the search when the symbol is found for the desired position
  108. * or the position is not defined for a non-unique symbol.
  109. */
  110. if ((args->pos && (args->count == args->pos)) ||
  111. (!args->pos && (args->count > 1)))
  112. return 1;
  113. return 0;
  114. }
  115. static int klp_find_object_symbol(const char *objname, const char *name,
  116. unsigned long sympos, unsigned long *addr)
  117. {
  118. struct klp_find_arg args = {
  119. .objname = objname,
  120. .name = name,
  121. .addr = 0,
  122. .count = 0,
  123. .pos = sympos,
  124. };
  125. mutex_lock(&module_mutex);
  126. if (objname)
  127. module_kallsyms_on_each_symbol(klp_find_callback, &args);
  128. else
  129. kallsyms_on_each_symbol(klp_find_callback, &args);
  130. mutex_unlock(&module_mutex);
  131. /*
  132. * Ensure an address was found. If sympos is 0, ensure symbol is unique;
  133. * otherwise ensure the symbol position count matches sympos.
  134. */
  135. if (args.addr == 0)
  136. pr_err("symbol '%s' not found in symbol table\n", name);
  137. else if (args.count > 1 && sympos == 0) {
  138. pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
  139. name, objname);
  140. } else if (sympos != args.count && sympos > 0) {
  141. pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
  142. sympos, name, objname ? objname : "vmlinux");
  143. } else {
  144. *addr = args.addr;
  145. return 0;
  146. }
  147. *addr = 0;
  148. return -EINVAL;
  149. }
  150. static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
  151. {
  152. int i, cnt, vmlinux, ret;
  153. char objname[MODULE_NAME_LEN];
  154. char symname[KSYM_NAME_LEN];
  155. char *strtab = pmod->core_kallsyms.strtab;
  156. Elf_Rela *relas;
  157. Elf_Sym *sym;
  158. unsigned long sympos, addr;
  159. /*
  160. * Since the field widths for objname and symname in the sscanf()
  161. * call are hard-coded and correspond to MODULE_NAME_LEN and
  162. * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
  163. * and KSYM_NAME_LEN have the values we expect them to have.
  164. *
  165. * Because the value of MODULE_NAME_LEN can differ among architectures,
  166. * we use the smallest/strictest upper bound possible (56, based on
  167. * the current definition of MODULE_NAME_LEN) to prevent overflows.
  168. */
  169. BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
  170. relas = (Elf_Rela *) relasec->sh_addr;
  171. /* For each rela in this klp relocation section */
  172. for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
  173. sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
  174. if (sym->st_shndx != SHN_LIVEPATCH) {
  175. pr_err("symbol %s is not marked as a livepatch symbol\n",
  176. strtab + sym->st_name);
  177. return -EINVAL;
  178. }
  179. /* Format: .klp.sym.objname.symname,sympos */
  180. cnt = sscanf(strtab + sym->st_name,
  181. ".klp.sym.%55[^.].%127[^,],%lu",
  182. objname, symname, &sympos);
  183. if (cnt != 3) {
  184. pr_err("symbol %s has an incorrectly formatted name\n",
  185. strtab + sym->st_name);
  186. return -EINVAL;
  187. }
  188. /* klp_find_object_symbol() treats a NULL objname as vmlinux */
  189. vmlinux = !strcmp(objname, "vmlinux");
  190. ret = klp_find_object_symbol(vmlinux ? NULL : objname,
  191. symname, sympos, &addr);
  192. if (ret)
  193. return ret;
  194. sym->st_value = addr;
  195. }
  196. return 0;
  197. }
  198. static int klp_write_object_relocations(struct module *pmod,
  199. struct klp_object *obj)
  200. {
  201. int i, cnt, ret = 0;
  202. const char *objname, *secname;
  203. char sec_objname[MODULE_NAME_LEN];
  204. Elf_Shdr *sec;
  205. if (WARN_ON(!klp_is_object_loaded(obj)))
  206. return -EINVAL;
  207. objname = klp_is_module(obj) ? obj->name : "vmlinux";
  208. /* For each klp relocation section */
  209. for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
  210. sec = pmod->klp_info->sechdrs + i;
  211. secname = pmod->klp_info->secstrings + sec->sh_name;
  212. if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
  213. continue;
  214. /*
  215. * Format: .klp.rela.sec_objname.section_name
  216. * See comment in klp_resolve_symbols() for an explanation
  217. * of the selected field width value.
  218. */
  219. cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
  220. if (cnt != 1) {
  221. pr_err("section %s has an incorrectly formatted name\n",
  222. secname);
  223. ret = -EINVAL;
  224. break;
  225. }
  226. if (strcmp(objname, sec_objname))
  227. continue;
  228. ret = klp_resolve_symbols(sec, pmod);
  229. if (ret)
  230. break;
  231. ret = apply_relocate_add(pmod->klp_info->sechdrs,
  232. pmod->core_kallsyms.strtab,
  233. pmod->klp_info->symndx, i, pmod);
  234. if (ret)
  235. break;
  236. }
  237. return ret;
  238. }
  239. static int __klp_disable_patch(struct klp_patch *patch)
  240. {
  241. struct klp_object *obj;
  242. if (WARN_ON(!patch->enabled))
  243. return -EINVAL;
  244. if (klp_transition_patch)
  245. return -EBUSY;
  246. /* enforce stacking: only the last enabled patch can be disabled */
  247. if (!list_is_last(&patch->list, &klp_patches) &&
  248. list_next_entry(patch, list)->enabled)
  249. return -EBUSY;
  250. klp_init_transition(patch, KLP_UNPATCHED);
  251. klp_for_each_object(patch, obj)
  252. if (obj->patched)
  253. klp_pre_unpatch_callback(obj);
  254. /*
  255. * Enforce the order of the func->transition writes in
  256. * klp_init_transition() and the TIF_PATCH_PENDING writes in
  257. * klp_start_transition(). In the rare case where klp_ftrace_handler()
  258. * is called shortly after klp_update_patch_state() switches the task,
  259. * this ensures the handler sees that func->transition is set.
  260. */
  261. smp_wmb();
  262. klp_start_transition();
  263. klp_try_complete_transition();
  264. patch->enabled = false;
  265. return 0;
  266. }
  267. /**
  268. * klp_disable_patch() - disables a registered patch
  269. * @patch: The registered, enabled patch to be disabled
  270. *
  271. * Unregisters the patched functions from ftrace.
  272. *
  273. * Return: 0 on success, otherwise error
  274. */
  275. int klp_disable_patch(struct klp_patch *patch)
  276. {
  277. int ret;
  278. mutex_lock(&klp_mutex);
  279. if (!klp_is_patch_registered(patch)) {
  280. ret = -EINVAL;
  281. goto err;
  282. }
  283. if (!patch->enabled) {
  284. ret = -EINVAL;
  285. goto err;
  286. }
  287. ret = __klp_disable_patch(patch);
  288. err:
  289. mutex_unlock(&klp_mutex);
  290. return ret;
  291. }
  292. EXPORT_SYMBOL_GPL(klp_disable_patch);
  293. static int __klp_enable_patch(struct klp_patch *patch)
  294. {
  295. struct klp_object *obj;
  296. int ret;
  297. if (klp_transition_patch)
  298. return -EBUSY;
  299. if (WARN_ON(patch->enabled))
  300. return -EINVAL;
  301. /* enforce stacking: only the first disabled patch can be enabled */
  302. if (patch->list.prev != &klp_patches &&
  303. !list_prev_entry(patch, list)->enabled)
  304. return -EBUSY;
  305. /*
  306. * A reference is taken on the patch module to prevent it from being
  307. * unloaded.
  308. */
  309. if (!try_module_get(patch->mod))
  310. return -ENODEV;
  311. pr_notice("enabling patch '%s'\n", patch->mod->name);
  312. klp_init_transition(patch, KLP_PATCHED);
  313. /*
  314. * Enforce the order of the func->transition writes in
  315. * klp_init_transition() and the ops->func_stack writes in
  316. * klp_patch_object(), so that klp_ftrace_handler() will see the
  317. * func->transition updates before the handler is registered and the
  318. * new funcs become visible to the handler.
  319. */
  320. smp_wmb();
  321. klp_for_each_object(patch, obj) {
  322. if (!klp_is_object_loaded(obj))
  323. continue;
  324. ret = klp_pre_patch_callback(obj);
  325. if (ret) {
  326. pr_warn("pre-patch callback failed for object '%s'\n",
  327. klp_is_module(obj) ? obj->name : "vmlinux");
  328. goto err;
  329. }
  330. ret = klp_patch_object(obj);
  331. if (ret) {
  332. pr_warn("failed to patch object '%s'\n",
  333. klp_is_module(obj) ? obj->name : "vmlinux");
  334. goto err;
  335. }
  336. }
  337. klp_start_transition();
  338. klp_try_complete_transition();
  339. patch->enabled = true;
  340. return 0;
  341. err:
  342. pr_warn("failed to enable patch '%s'\n", patch->mod->name);
  343. klp_cancel_transition();
  344. return ret;
  345. }
  346. /**
  347. * klp_enable_patch() - enables a registered patch
  348. * @patch: The registered, disabled patch to be enabled
  349. *
  350. * Performs the needed symbol lookups and code relocations,
  351. * then registers the patched functions with ftrace.
  352. *
  353. * Return: 0 on success, otherwise error
  354. */
  355. int klp_enable_patch(struct klp_patch *patch)
  356. {
  357. int ret;
  358. mutex_lock(&klp_mutex);
  359. if (!klp_is_patch_registered(patch)) {
  360. ret = -EINVAL;
  361. goto err;
  362. }
  363. ret = __klp_enable_patch(patch);
  364. err:
  365. mutex_unlock(&klp_mutex);
  366. return ret;
  367. }
  368. EXPORT_SYMBOL_GPL(klp_enable_patch);
  369. /*
  370. * Sysfs Interface
  371. *
  372. * /sys/kernel/livepatch
  373. * /sys/kernel/livepatch/<patch>
  374. * /sys/kernel/livepatch/<patch>/enabled
  375. * /sys/kernel/livepatch/<patch>/transition
  376. * /sys/kernel/livepatch/<patch>/signal
  377. * /sys/kernel/livepatch/<patch>/force
  378. * /sys/kernel/livepatch/<patch>/<object>
  379. * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
  380. */
  381. static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
  382. const char *buf, size_t count)
  383. {
  384. struct klp_patch *patch;
  385. int ret;
  386. bool enabled;
  387. ret = kstrtobool(buf, &enabled);
  388. if (ret)
  389. return ret;
  390. patch = container_of(kobj, struct klp_patch, kobj);
  391. mutex_lock(&klp_mutex);
  392. if (!klp_is_patch_registered(patch)) {
  393. /*
  394. * Module with the patch could either disappear meanwhile or is
  395. * not properly initialized yet.
  396. */
  397. ret = -EINVAL;
  398. goto err;
  399. }
  400. if (patch->enabled == enabled) {
  401. /* already in requested state */
  402. ret = -EINVAL;
  403. goto err;
  404. }
  405. if (patch == klp_transition_patch) {
  406. klp_reverse_transition();
  407. } else if (enabled) {
  408. ret = __klp_enable_patch(patch);
  409. if (ret)
  410. goto err;
  411. } else {
  412. ret = __klp_disable_patch(patch);
  413. if (ret)
  414. goto err;
  415. }
  416. mutex_unlock(&klp_mutex);
  417. return count;
  418. err:
  419. mutex_unlock(&klp_mutex);
  420. return ret;
  421. }
  422. static ssize_t enabled_show(struct kobject *kobj,
  423. struct kobj_attribute *attr, char *buf)
  424. {
  425. struct klp_patch *patch;
  426. patch = container_of(kobj, struct klp_patch, kobj);
  427. return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
  428. }
  429. static ssize_t transition_show(struct kobject *kobj,
  430. struct kobj_attribute *attr, char *buf)
  431. {
  432. struct klp_patch *patch;
  433. patch = container_of(kobj, struct klp_patch, kobj);
  434. return snprintf(buf, PAGE_SIZE-1, "%d\n",
  435. patch == klp_transition_patch);
  436. }
  437. static ssize_t signal_store(struct kobject *kobj, struct kobj_attribute *attr,
  438. const char *buf, size_t count)
  439. {
  440. struct klp_patch *patch;
  441. int ret;
  442. bool val;
  443. ret = kstrtobool(buf, &val);
  444. if (ret)
  445. return ret;
  446. if (!val)
  447. return count;
  448. mutex_lock(&klp_mutex);
  449. patch = container_of(kobj, struct klp_patch, kobj);
  450. if (patch != klp_transition_patch) {
  451. mutex_unlock(&klp_mutex);
  452. return -EINVAL;
  453. }
  454. klp_send_signals();
  455. mutex_unlock(&klp_mutex);
  456. return count;
  457. }
  458. static ssize_t force_store(struct kobject *kobj, struct kobj_attribute *attr,
  459. const char *buf, size_t count)
  460. {
  461. struct klp_patch *patch;
  462. int ret;
  463. bool val;
  464. ret = kstrtobool(buf, &val);
  465. if (ret)
  466. return ret;
  467. if (!val)
  468. return count;
  469. mutex_lock(&klp_mutex);
  470. patch = container_of(kobj, struct klp_patch, kobj);
  471. if (patch != klp_transition_patch) {
  472. mutex_unlock(&klp_mutex);
  473. return -EINVAL;
  474. }
  475. klp_force_transition();
  476. mutex_unlock(&klp_mutex);
  477. return count;
  478. }
  479. static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
  480. static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
  481. static struct kobj_attribute signal_kobj_attr = __ATTR_WO(signal);
  482. static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
  483. static struct attribute *klp_patch_attrs[] = {
  484. &enabled_kobj_attr.attr,
  485. &transition_kobj_attr.attr,
  486. &signal_kobj_attr.attr,
  487. &force_kobj_attr.attr,
  488. NULL
  489. };
  490. static void klp_kobj_release_patch(struct kobject *kobj)
  491. {
  492. struct klp_patch *patch;
  493. patch = container_of(kobj, struct klp_patch, kobj);
  494. complete(&patch->finish);
  495. }
  496. static struct kobj_type klp_ktype_patch = {
  497. .release = klp_kobj_release_patch,
  498. .sysfs_ops = &kobj_sysfs_ops,
  499. .default_attrs = klp_patch_attrs,
  500. };
  501. static void klp_kobj_release_object(struct kobject *kobj)
  502. {
  503. }
  504. static struct kobj_type klp_ktype_object = {
  505. .release = klp_kobj_release_object,
  506. .sysfs_ops = &kobj_sysfs_ops,
  507. };
  508. static void klp_kobj_release_func(struct kobject *kobj)
  509. {
  510. }
  511. static struct kobj_type klp_ktype_func = {
  512. .release = klp_kobj_release_func,
  513. .sysfs_ops = &kobj_sysfs_ops,
  514. };
  515. /*
  516. * Free all functions' kobjects in the array up to some limit. When limit is
  517. * NULL, all kobjects are freed.
  518. */
  519. static void klp_free_funcs_limited(struct klp_object *obj,
  520. struct klp_func *limit)
  521. {
  522. struct klp_func *func;
  523. for (func = obj->funcs; func->old_name && func != limit; func++)
  524. kobject_put(&func->kobj);
  525. }
  526. /* Clean up when a patched object is unloaded */
  527. static void klp_free_object_loaded(struct klp_object *obj)
  528. {
  529. struct klp_func *func;
  530. obj->mod = NULL;
  531. klp_for_each_func(obj, func)
  532. func->old_addr = 0;
  533. }
  534. /*
  535. * Free all objects' kobjects in the array up to some limit. When limit is
  536. * NULL, all kobjects are freed.
  537. */
  538. static void klp_free_objects_limited(struct klp_patch *patch,
  539. struct klp_object *limit)
  540. {
  541. struct klp_object *obj;
  542. for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
  543. klp_free_funcs_limited(obj, NULL);
  544. kobject_put(&obj->kobj);
  545. }
  546. }
  547. static void klp_free_patch(struct klp_patch *patch)
  548. {
  549. klp_free_objects_limited(patch, NULL);
  550. if (!list_empty(&patch->list))
  551. list_del(&patch->list);
  552. }
  553. static int klp_init_func(struct klp_object *obj, struct klp_func *func)
  554. {
  555. if (!func->old_name || !func->new_func)
  556. return -EINVAL;
  557. if (strlen(func->old_name) >= KSYM_NAME_LEN)
  558. return -EINVAL;
  559. INIT_LIST_HEAD(&func->stack_node);
  560. func->patched = false;
  561. func->transition = false;
  562. /* The format for the sysfs directory is <function,sympos> where sympos
  563. * is the nth occurrence of this symbol in kallsyms for the patched
  564. * object. If the user selects 0 for old_sympos, then 1 will be used
  565. * since a unique symbol will be the first occurrence.
  566. */
  567. return kobject_init_and_add(&func->kobj, &klp_ktype_func,
  568. &obj->kobj, "%s,%lu", func->old_name,
  569. func->old_sympos ? func->old_sympos : 1);
  570. }
  571. /* Arches may override this to finish any remaining arch-specific tasks */
  572. void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
  573. struct klp_object *obj)
  574. {
  575. }
  576. /* parts of the initialization that is done only when the object is loaded */
  577. static int klp_init_object_loaded(struct klp_patch *patch,
  578. struct klp_object *obj)
  579. {
  580. struct klp_func *func;
  581. int ret;
  582. mutex_lock(&text_mutex);
  583. module_disable_ro(patch->mod);
  584. ret = klp_write_object_relocations(patch->mod, obj);
  585. if (ret) {
  586. module_enable_ro(patch->mod, true);
  587. mutex_unlock(&text_mutex);
  588. return ret;
  589. }
  590. arch_klp_init_object_loaded(patch, obj);
  591. module_enable_ro(patch->mod, true);
  592. mutex_unlock(&text_mutex);
  593. klp_for_each_func(obj, func) {
  594. ret = klp_find_object_symbol(obj->name, func->old_name,
  595. func->old_sympos,
  596. &func->old_addr);
  597. if (ret)
  598. return ret;
  599. ret = kallsyms_lookup_size_offset(func->old_addr,
  600. &func->old_size, NULL);
  601. if (!ret) {
  602. pr_err("kallsyms size lookup failed for '%s'\n",
  603. func->old_name);
  604. return -ENOENT;
  605. }
  606. ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
  607. &func->new_size, NULL);
  608. if (!ret) {
  609. pr_err("kallsyms size lookup failed for '%s' replacement\n",
  610. func->old_name);
  611. return -ENOENT;
  612. }
  613. }
  614. return 0;
  615. }
  616. static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
  617. {
  618. struct klp_func *func;
  619. int ret;
  620. const char *name;
  621. if (!obj->funcs)
  622. return -EINVAL;
  623. if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN)
  624. return -EINVAL;
  625. obj->patched = false;
  626. obj->mod = NULL;
  627. klp_find_object_module(obj);
  628. name = klp_is_module(obj) ? obj->name : "vmlinux";
  629. ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
  630. &patch->kobj, "%s", name);
  631. if (ret)
  632. return ret;
  633. klp_for_each_func(obj, func) {
  634. ret = klp_init_func(obj, func);
  635. if (ret)
  636. goto free;
  637. }
  638. if (klp_is_object_loaded(obj)) {
  639. ret = klp_init_object_loaded(patch, obj);
  640. if (ret)
  641. goto free;
  642. }
  643. return 0;
  644. free:
  645. klp_free_funcs_limited(obj, func);
  646. kobject_put(&obj->kobj);
  647. return ret;
  648. }
  649. static int klp_init_patch(struct klp_patch *patch)
  650. {
  651. struct klp_object *obj;
  652. int ret;
  653. if (!patch->objs)
  654. return -EINVAL;
  655. mutex_lock(&klp_mutex);
  656. patch->enabled = false;
  657. init_completion(&patch->finish);
  658. ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
  659. klp_root_kobj, "%s", patch->mod->name);
  660. if (ret) {
  661. mutex_unlock(&klp_mutex);
  662. return ret;
  663. }
  664. klp_for_each_object(patch, obj) {
  665. ret = klp_init_object(patch, obj);
  666. if (ret)
  667. goto free;
  668. }
  669. list_add_tail(&patch->list, &klp_patches);
  670. mutex_unlock(&klp_mutex);
  671. return 0;
  672. free:
  673. klp_free_objects_limited(patch, obj);
  674. mutex_unlock(&klp_mutex);
  675. kobject_put(&patch->kobj);
  676. wait_for_completion(&patch->finish);
  677. return ret;
  678. }
  679. /**
  680. * klp_unregister_patch() - unregisters a patch
  681. * @patch: Disabled patch to be unregistered
  682. *
  683. * Frees the data structures and removes the sysfs interface.
  684. *
  685. * Return: 0 on success, otherwise error
  686. */
  687. int klp_unregister_patch(struct klp_patch *patch)
  688. {
  689. int ret;
  690. mutex_lock(&klp_mutex);
  691. if (!klp_is_patch_registered(patch)) {
  692. ret = -EINVAL;
  693. goto err;
  694. }
  695. if (patch->enabled) {
  696. ret = -EBUSY;
  697. goto err;
  698. }
  699. klp_free_patch(patch);
  700. mutex_unlock(&klp_mutex);
  701. kobject_put(&patch->kobj);
  702. wait_for_completion(&patch->finish);
  703. return 0;
  704. err:
  705. mutex_unlock(&klp_mutex);
  706. return ret;
  707. }
  708. EXPORT_SYMBOL_GPL(klp_unregister_patch);
  709. /**
  710. * klp_register_patch() - registers a patch
  711. * @patch: Patch to be registered
  712. *
  713. * Initializes the data structure associated with the patch and
  714. * creates the sysfs interface.
  715. *
  716. * There is no need to take the reference on the patch module here. It is done
  717. * later when the patch is enabled.
  718. *
  719. * Return: 0 on success, otherwise error
  720. */
  721. int klp_register_patch(struct klp_patch *patch)
  722. {
  723. if (!patch || !patch->mod)
  724. return -EINVAL;
  725. if (!is_livepatch_module(patch->mod)) {
  726. pr_err("module %s is not marked as a livepatch module\n",
  727. patch->mod->name);
  728. return -EINVAL;
  729. }
  730. if (!klp_initialized())
  731. return -ENODEV;
  732. if (!klp_have_reliable_stack()) {
  733. pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
  734. return -ENOSYS;
  735. }
  736. return klp_init_patch(patch);
  737. }
  738. EXPORT_SYMBOL_GPL(klp_register_patch);
  739. /*
  740. * Remove parts of patches that touch a given kernel module. The list of
  741. * patches processed might be limited. When limit is NULL, all patches
  742. * will be handled.
  743. */
  744. static void klp_cleanup_module_patches_limited(struct module *mod,
  745. struct klp_patch *limit)
  746. {
  747. struct klp_patch *patch;
  748. struct klp_object *obj;
  749. list_for_each_entry(patch, &klp_patches, list) {
  750. if (patch == limit)
  751. break;
  752. klp_for_each_object(patch, obj) {
  753. if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
  754. continue;
  755. /*
  756. * Only unpatch the module if the patch is enabled or
  757. * is in transition.
  758. */
  759. if (patch->enabled || patch == klp_transition_patch) {
  760. if (patch != klp_transition_patch)
  761. klp_pre_unpatch_callback(obj);
  762. pr_notice("reverting patch '%s' on unloading module '%s'\n",
  763. patch->mod->name, obj->mod->name);
  764. klp_unpatch_object(obj);
  765. klp_post_unpatch_callback(obj);
  766. }
  767. klp_free_object_loaded(obj);
  768. break;
  769. }
  770. }
  771. }
  772. int klp_module_coming(struct module *mod)
  773. {
  774. int ret;
  775. struct klp_patch *patch;
  776. struct klp_object *obj;
  777. if (WARN_ON(mod->state != MODULE_STATE_COMING))
  778. return -EINVAL;
  779. mutex_lock(&klp_mutex);
  780. /*
  781. * Each module has to know that klp_module_coming()
  782. * has been called. We never know what module will
  783. * get patched by a new patch.
  784. */
  785. mod->klp_alive = true;
  786. list_for_each_entry(patch, &klp_patches, list) {
  787. klp_for_each_object(patch, obj) {
  788. if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
  789. continue;
  790. obj->mod = mod;
  791. ret = klp_init_object_loaded(patch, obj);
  792. if (ret) {
  793. pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
  794. patch->mod->name, obj->mod->name, ret);
  795. goto err;
  796. }
  797. /*
  798. * Only patch the module if the patch is enabled or is
  799. * in transition.
  800. */
  801. if (!patch->enabled && patch != klp_transition_patch)
  802. break;
  803. pr_notice("applying patch '%s' to loading module '%s'\n",
  804. patch->mod->name, obj->mod->name);
  805. ret = klp_pre_patch_callback(obj);
  806. if (ret) {
  807. pr_warn("pre-patch callback failed for object '%s'\n",
  808. obj->name);
  809. goto err;
  810. }
  811. ret = klp_patch_object(obj);
  812. if (ret) {
  813. pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
  814. patch->mod->name, obj->mod->name, ret);
  815. klp_post_unpatch_callback(obj);
  816. goto err;
  817. }
  818. if (patch != klp_transition_patch)
  819. klp_post_patch_callback(obj);
  820. break;
  821. }
  822. }
  823. mutex_unlock(&klp_mutex);
  824. return 0;
  825. err:
  826. /*
  827. * If a patch is unsuccessfully applied, return
  828. * error to the module loader.
  829. */
  830. pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
  831. patch->mod->name, obj->mod->name, obj->mod->name);
  832. mod->klp_alive = false;
  833. obj->mod = NULL;
  834. klp_cleanup_module_patches_limited(mod, patch);
  835. mutex_unlock(&klp_mutex);
  836. return ret;
  837. }
  838. void klp_module_going(struct module *mod)
  839. {
  840. if (WARN_ON(mod->state != MODULE_STATE_GOING &&
  841. mod->state != MODULE_STATE_COMING))
  842. return;
  843. mutex_lock(&klp_mutex);
  844. /*
  845. * Each module has to know that klp_module_going()
  846. * has been called. We never know what module will
  847. * get patched by a new patch.
  848. */
  849. mod->klp_alive = false;
  850. klp_cleanup_module_patches_limited(mod, NULL);
  851. mutex_unlock(&klp_mutex);
  852. }
  853. static int __init klp_init(void)
  854. {
  855. int ret;
  856. ret = klp_check_compiler_support();
  857. if (ret) {
  858. pr_info("Your compiler is too old; turning off.\n");
  859. return -EINVAL;
  860. }
  861. klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
  862. if (!klp_root_kobj)
  863. return -ENOMEM;
  864. return 0;
  865. }
  866. module_init(klp_init);