dm-core.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /*
  2. * Internal header file _only_ for device mapper core
  3. *
  4. * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
  5. *
  6. * This file is released under the LGPL.
  7. */
  8. #ifndef DM_CORE_INTERNAL_H
  9. #define DM_CORE_INTERNAL_H
  10. #include <linux/kthread.h>
  11. #include <linux/ktime.h>
  12. #include <linux/blk-mq.h>
  13. #include <trace/events/block.h>
  14. #include "dm.h"
  15. #define DM_RESERVED_MAX_IOS 1024
  16. struct dm_kobject_holder {
  17. struct kobject kobj;
  18. struct completion completion;
  19. };
  20. /*
  21. * DM core internal structure that used directly by dm.c and dm-rq.c
  22. * DM targets must _not_ deference a mapped_device to directly access its members!
  23. */
  24. struct mapped_device {
  25. struct mutex suspend_lock;
  26. struct mutex table_devices_lock;
  27. struct list_head table_devices;
  28. /*
  29. * The current mapping (struct dm_table *).
  30. * Use dm_get_live_table{_fast} or take suspend_lock for
  31. * dereference.
  32. */
  33. void __rcu *map;
  34. unsigned long flags;
  35. /* Protect queue and type against concurrent access. */
  36. struct mutex type_lock;
  37. enum dm_queue_mode type;
  38. int numa_node_id;
  39. struct request_queue *queue;
  40. atomic_t holders;
  41. atomic_t open_count;
  42. struct dm_target *immutable_target;
  43. struct target_type *immutable_target_type;
  44. char name[16];
  45. struct gendisk *disk;
  46. struct dax_device *dax_dev;
  47. /*
  48. * A list of ios that arrived while we were suspended.
  49. */
  50. struct work_struct work;
  51. wait_queue_head_t wait;
  52. spinlock_t deferred_lock;
  53. struct bio_list deferred;
  54. void *interface_ptr;
  55. /*
  56. * Event handling.
  57. */
  58. wait_queue_head_t eventq;
  59. atomic_t event_nr;
  60. atomic_t uevent_seq;
  61. struct list_head uevent_list;
  62. spinlock_t uevent_lock; /* Protect access to uevent_list */
  63. /* the number of internal suspends */
  64. unsigned internal_suspend_count;
  65. /*
  66. * io objects are allocated from here.
  67. */
  68. struct bio_set io_bs;
  69. struct bio_set bs;
  70. /*
  71. * Processing queue (flush)
  72. */
  73. struct workqueue_struct *wq;
  74. /*
  75. * freeze/thaw support require holding onto a super block
  76. */
  77. struct super_block *frozen_sb;
  78. /* forced geometry settings */
  79. struct hd_geometry geometry;
  80. /* kobject and completion */
  81. struct dm_kobject_holder kobj_holder;
  82. struct block_device *bdev;
  83. int swap_bios;
  84. struct semaphore swap_bios_semaphore;
  85. struct mutex swap_bios_lock;
  86. struct dm_stats stats;
  87. /* for blk-mq request-based DM support */
  88. struct blk_mq_tag_set *tag_set;
  89. bool init_tio_pdu:1;
  90. struct srcu_struct io_barrier;
  91. };
  92. void disable_discard(struct mapped_device *md);
  93. void disable_write_same(struct mapped_device *md);
  94. void disable_write_zeroes(struct mapped_device *md);
  95. static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
  96. {
  97. return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
  98. }
  99. unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
  100. static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
  101. {
  102. return !maxlen || strlen(result) + 1 >= maxlen;
  103. }
  104. extern atomic_t dm_global_event_nr;
  105. extern wait_queue_head_t dm_global_eventq;
  106. void dm_issue_global_event(void);
  107. #endif