dm-uevent.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Device Mapper Uevent Support (dm-uevent)
  4. *
  5. * Copyright IBM Corporation, 2007
  6. * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
  7. */
  8. #include <linux/list.h>
  9. #include <linux/slab.h>
  10. #include <linux/kobject.h>
  11. #include <linux/dm-ioctl.h>
  12. #include <linux/export.h>
  13. #include "dm.h"
  14. #include "dm-uevent.h"
  15. #define DM_MSG_PREFIX "uevent"
  16. static const struct {
  17. enum dm_uevent_type type;
  18. enum kobject_action action;
  19. char *name;
  20. } _dm_uevent_type_names[] = {
  21. {DM_UEVENT_PATH_FAILED, KOBJ_CHANGE, "PATH_FAILED"},
  22. {DM_UEVENT_PATH_REINSTATED, KOBJ_CHANGE, "PATH_REINSTATED"},
  23. };
  24. static struct kmem_cache *_dm_event_cache;
  25. struct dm_uevent {
  26. struct mapped_device *md;
  27. enum kobject_action action;
  28. struct kobj_uevent_env ku_env;
  29. struct list_head elist;
  30. char name[DM_NAME_LEN];
  31. char uuid[DM_UUID_LEN];
  32. };
  33. static void dm_uevent_free(struct dm_uevent *event)
  34. {
  35. kmem_cache_free(_dm_event_cache, event);
  36. }
  37. static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md)
  38. {
  39. struct dm_uevent *event;
  40. event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC);
  41. if (!event)
  42. return NULL;
  43. INIT_LIST_HEAD(&event->elist);
  44. event->md = md;
  45. return event;
  46. }
  47. static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
  48. struct dm_target *ti,
  49. enum kobject_action action,
  50. const char *dm_action,
  51. const char *path,
  52. unsigned nr_valid_paths)
  53. {
  54. struct dm_uevent *event;
  55. event = dm_uevent_alloc(md);
  56. if (!event) {
  57. DMERR("%s: dm_uevent_alloc() failed", __func__);
  58. goto err_nomem;
  59. }
  60. event->action = action;
  61. if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) {
  62. DMERR("%s: add_uevent_var() for DM_TARGET failed",
  63. __func__);
  64. goto err_add;
  65. }
  66. if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) {
  67. DMERR("%s: add_uevent_var() for DM_ACTION failed",
  68. __func__);
  69. goto err_add;
  70. }
  71. if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u",
  72. dm_next_uevent_seq(md))) {
  73. DMERR("%s: add_uevent_var() for DM_SEQNUM failed",
  74. __func__);
  75. goto err_add;
  76. }
  77. if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) {
  78. DMERR("%s: add_uevent_var() for DM_PATH failed", __func__);
  79. goto err_add;
  80. }
  81. if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d",
  82. nr_valid_paths)) {
  83. DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed",
  84. __func__);
  85. goto err_add;
  86. }
  87. return event;
  88. err_add:
  89. dm_uevent_free(event);
  90. err_nomem:
  91. return ERR_PTR(-ENOMEM);
  92. }
  93. /**
  94. * dm_send_uevents - send uevents for given list
  95. *
  96. * @events: list of events to send
  97. * @kobj: kobject generating event
  98. *
  99. */
  100. void dm_send_uevents(struct list_head *events, struct kobject *kobj)
  101. {
  102. int r;
  103. struct dm_uevent *event, *next;
  104. list_for_each_entry_safe(event, next, events, elist) {
  105. list_del_init(&event->elist);
  106. /*
  107. * When a device is being removed this copy fails and we
  108. * discard these unsent events.
  109. */
  110. if (dm_copy_name_and_uuid(event->md, event->name,
  111. event->uuid)) {
  112. DMINFO("%s: skipping sending uevent for lost device",
  113. __func__);
  114. goto uevent_free;
  115. }
  116. if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) {
  117. DMERR("%s: add_uevent_var() for DM_NAME failed",
  118. __func__);
  119. goto uevent_free;
  120. }
  121. if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) {
  122. DMERR("%s: add_uevent_var() for DM_UUID failed",
  123. __func__);
  124. goto uevent_free;
  125. }
  126. r = kobject_uevent_env(kobj, event->action, event->ku_env.envp);
  127. if (r)
  128. DMERR("%s: kobject_uevent_env failed", __func__);
  129. uevent_free:
  130. dm_uevent_free(event);
  131. }
  132. }
  133. EXPORT_SYMBOL_GPL(dm_send_uevents);
  134. /**
  135. * dm_path_uevent - called to create a new path event and queue it
  136. *
  137. * @event_type: path event type enum
  138. * @ti: pointer to a dm_target
  139. * @path: string containing pathname
  140. * @nr_valid_paths: number of valid paths remaining
  141. *
  142. */
  143. void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
  144. const char *path, unsigned nr_valid_paths)
  145. {
  146. struct mapped_device *md = dm_table_get_md(ti->table);
  147. struct dm_uevent *event;
  148. if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
  149. DMERR("%s: Invalid event_type %d", __func__, event_type);
  150. return;
  151. }
  152. event = dm_build_path_uevent(md, ti,
  153. _dm_uevent_type_names[event_type].action,
  154. _dm_uevent_type_names[event_type].name,
  155. path, nr_valid_paths);
  156. if (IS_ERR(event))
  157. return;
  158. dm_uevent_add(md, &event->elist);
  159. }
  160. EXPORT_SYMBOL_GPL(dm_path_uevent);
  161. int dm_uevent_init(void)
  162. {
  163. _dm_event_cache = KMEM_CACHE(dm_uevent, 0);
  164. if (!_dm_event_cache)
  165. return -ENOMEM;
  166. DMINFO("version 1.0.3");
  167. return 0;
  168. }
  169. void dm_uevent_exit(void)
  170. {
  171. kmem_cache_destroy(_dm_event_cache);
  172. }