dm-core.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. /*
  2. * Internal header file _only_ for device mapper core
  3. *
  4. * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
  5. *
  6. * This file is released under the LGPL.
  7. */
  8. #ifndef DM_CORE_INTERNAL_H
  9. #define DM_CORE_INTERNAL_H
  10. #include <linux/kthread.h>
  11. #include <linux/ktime.h>
  12. #include <linux/blk-mq.h>
  13. #include <trace/events/block.h>
  14. #include "dm.h"
  15. #define DM_RESERVED_MAX_IOS 1024
  16. #define DM_WAIT_DEV_MAX_TIME 5000
  17. struct dm_kobject_holder {
  18. struct kobject kobj;
  19. struct completion completion;
  20. };
  21. /*
  22. * DM core internal structure that used directly by dm.c and dm-rq.c
  23. * DM targets must _not_ deference a mapped_device to directly access its members!
  24. */
  25. struct mapped_device {
  26. struct mutex suspend_lock;
  27. /*
  28. * The current mapping (struct dm_table *).
  29. * Use dm_get_live_table{_fast} or take suspend_lock for
  30. * dereference.
  31. */
  32. void __rcu *map;
  33. struct list_head table_devices;
  34. struct mutex table_devices_lock;
  35. unsigned long flags;
  36. struct request_queue *queue;
  37. int numa_node_id;
  38. enum dm_queue_mode type;
  39. /* Protect queue and type against concurrent access. */
  40. struct mutex type_lock;
  41. atomic_t holders;
  42. atomic_t open_count;
  43. struct dm_target *immutable_target;
  44. struct target_type *immutable_target_type;
  45. struct gendisk *disk;
  46. struct dax_device *dax_dev;
  47. char name[16];
  48. void *interface_ptr;
  49. /*
  50. * A list of ios that arrived while we were suspended.
  51. */
  52. atomic_t pending[2];
  53. wait_queue_head_t wait;
  54. struct work_struct work;
  55. spinlock_t deferred_lock;
  56. struct bio_list deferred;
  57. /*
  58. * Event handling.
  59. */
  60. wait_queue_head_t eventq;
  61. atomic_t event_nr;
  62. atomic_t uevent_seq;
  63. struct list_head uevent_list;
  64. spinlock_t uevent_lock; /* Protect access to uevent_list */
  65. /* the number of internal suspends */
  66. unsigned internal_suspend_count;
  67. /*
  68. * Processing queue (flush)
  69. */
  70. struct workqueue_struct *wq;
  71. /*
  72. * io objects are allocated from here.
  73. */
  74. mempool_t *io_pool;
  75. struct bio_set *bs;
  76. /*
  77. * freeze/thaw support require holding onto a super block
  78. */
  79. struct super_block *frozen_sb;
  80. /* forced geometry settings */
  81. struct hd_geometry geometry;
  82. struct block_device *bdev;
  83. /* kobject and completion */
  84. struct dm_kobject_holder kobj_holder;
  85. /* zero-length flush that will be cloned and submitted to targets */
  86. struct bio flush_bio;
  87. struct dm_stats stats;
  88. struct kthread_worker kworker;
  89. struct task_struct *kworker_task;
  90. /* for request-based merge heuristic in dm_request_fn() */
  91. unsigned seq_rq_merge_deadline_usecs;
  92. int last_rq_rw;
  93. sector_t last_rq_pos;
  94. ktime_t last_rq_start_time;
  95. /* for blk-mq request-based DM support */
  96. struct blk_mq_tag_set *tag_set;
  97. bool use_blk_mq:1;
  98. bool init_tio_pdu:1;
  99. struct srcu_struct io_barrier;
  100. };
  101. void dm_init_md_queue(struct mapped_device *md);
  102. void dm_init_normal_md_queue(struct mapped_device *md);
  103. int md_in_flight(struct mapped_device *md);
  104. void disable_write_same(struct mapped_device *md);
  105. void disable_write_zeroes(struct mapped_device *md);
  106. static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
  107. {
  108. return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
  109. }
  110. unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
  111. static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
  112. {
  113. return !maxlen || strlen(result) + 1 >= maxlen;
  114. }
  115. extern atomic_t dm_global_event_nr;
  116. extern wait_queue_head_t dm_global_eventq;
  117. void dm_issue_global_event(void);
  118. #endif