dm-core.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. /*
  2. * Internal header file _only_ for device mapper core
  3. *
  4. * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
  5. *
  6. * This file is released under the LGPL.
  7. */
  8. #ifndef DM_CORE_INTERNAL_H
  9. #define DM_CORE_INTERNAL_H
  10. #include <linux/kthread.h>
  11. #include <linux/ktime.h>
  12. #include <linux/blk-mq.h>
  13. #include <trace/events/block.h>
  14. #include "dm.h"
  15. #define DM_RESERVED_MAX_IOS 1024
  16. struct dm_kobject_holder {
  17. struct kobject kobj;
  18. struct completion completion;
  19. };
  20. /*
  21. * DM core internal structure that used directly by dm.c and dm-rq.c
  22. * DM targets must _not_ deference a mapped_device to directly access its members!
  23. */
  24. struct mapped_device {
  25. struct mutex suspend_lock;
  26. struct mutex table_devices_lock;
  27. struct list_head table_devices;
  28. /*
  29. * The current mapping (struct dm_table *).
  30. * Use dm_get_live_table{_fast} or take suspend_lock for
  31. * dereference.
  32. */
  33. void __rcu *map;
  34. unsigned long flags;
  35. /* Protect queue and type against concurrent access. */
  36. struct mutex type_lock;
  37. enum dm_queue_mode type;
  38. int numa_node_id;
  39. struct request_queue *queue;
  40. atomic_t holders;
  41. atomic_t open_count;
  42. struct dm_target *immutable_target;
  43. struct target_type *immutable_target_type;
  44. char name[16];
  45. struct gendisk *disk;
  46. struct dax_device *dax_dev;
  47. /*
  48. * A list of ios that arrived while we were suspended.
  49. */
  50. struct work_struct work;
  51. wait_queue_head_t wait;
  52. atomic_t pending[2];
  53. spinlock_t deferred_lock;
  54. struct bio_list deferred;
  55. void *interface_ptr;
  56. /*
  57. * Event handling.
  58. */
  59. wait_queue_head_t eventq;
  60. atomic_t event_nr;
  61. atomic_t uevent_seq;
  62. struct list_head uevent_list;
  63. spinlock_t uevent_lock; /* Protect access to uevent_list */
  64. /* the number of internal suspends */
  65. unsigned internal_suspend_count;
  66. /*
  67. * io objects are allocated from here.
  68. */
  69. struct bio_set io_bs;
  70. struct bio_set bs;
  71. /*
  72. * Processing queue (flush)
  73. */
  74. struct workqueue_struct *wq;
  75. /*
  76. * freeze/thaw support require holding onto a super block
  77. */
  78. struct super_block *frozen_sb;
  79. /* forced geometry settings */
  80. struct hd_geometry geometry;
  81. /* kobject and completion */
  82. struct dm_kobject_holder kobj_holder;
  83. struct block_device *bdev;
  84. /* zero-length flush that will be cloned and submitted to targets */
  85. struct bio flush_bio;
  86. struct dm_stats stats;
  87. struct kthread_worker kworker;
  88. struct task_struct *kworker_task;
  89. /* for request-based merge heuristic in dm_request_fn() */
  90. unsigned seq_rq_merge_deadline_usecs;
  91. int last_rq_rw;
  92. sector_t last_rq_pos;
  93. ktime_t last_rq_start_time;
  94. /* for blk-mq request-based DM support */
  95. struct blk_mq_tag_set *tag_set;
  96. bool use_blk_mq:1;
  97. bool init_tio_pdu:1;
  98. struct srcu_struct io_barrier;
  99. };
  100. int md_in_flight(struct mapped_device *md);
  101. void disable_discard(struct mapped_device *md);
  102. void disable_write_same(struct mapped_device *md);
  103. void disable_write_zeroes(struct mapped_device *md);
  104. static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
  105. {
  106. return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
  107. }
  108. unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
  109. static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
  110. {
  111. return !maxlen || strlen(result) + 1 >= maxlen;
  112. }
  113. extern atomic_t dm_global_event_nr;
  114. extern wait_queue_head_t dm_global_eventq;
  115. void dm_issue_global_event(void);
  116. #endif