blk-cgroup.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. *
  13. * For policy-specific per-blkcg data:
  14. * Copyright (C) 2015 Paolo Valente <paolo.valente@unimore.it>
  15. * Arianna Avanzini <avanzini.arianna@gmail.com>
  16. */
  17. #include <linux/ioprio.h>
  18. #include <linux/kdev_t.h>
  19. #include <linux/module.h>
  20. #include <linux/sched/signal.h>
  21. #include <linux/err.h>
  22. #include <linux/blkdev.h>
  23. #include <linux/backing-dev.h>
  24. #include <linux/slab.h>
  25. #include <linux/genhd.h>
  26. #include <linux/delay.h>
  27. #include <linux/atomic.h>
  28. #include <linux/ctype.h>
  29. #include <linux/blk-cgroup.h>
  30. #include <linux/tracehook.h>
  31. #include "blk.h"
  32. #define MAX_KEY_LEN 100
  33. /*
  34. * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
  35. * blkcg_pol_register_mutex nests outside of it and synchronizes entire
  36. * policy [un]register operations including cgroup file additions /
  37. * removals. Putting cgroup file registration outside blkcg_pol_mutex
  38. * allows grabbing it from cgroup callbacks.
  39. */
  40. static DEFINE_MUTEX(blkcg_pol_register_mutex);
  41. static DEFINE_MUTEX(blkcg_pol_mutex);
  42. struct blkcg blkcg_root;
  43. EXPORT_SYMBOL_GPL(blkcg_root);
  44. struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
  45. static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
  46. static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
  47. static bool blkcg_debug_stats = false;
  48. static bool blkcg_policy_enabled(struct request_queue *q,
  49. const struct blkcg_policy *pol)
  50. {
  51. return pol && test_bit(pol->plid, q->blkcg_pols);
  52. }
  53. /**
  54. * blkg_free - free a blkg
  55. * @blkg: blkg to free
  56. *
  57. * Free @blkg which may be partially allocated.
  58. */
  59. static void blkg_free(struct blkcg_gq *blkg)
  60. {
  61. int i;
  62. if (!blkg)
  63. return;
  64. for (i = 0; i < BLKCG_MAX_POLS; i++)
  65. if (blkg->pd[i])
  66. blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
  67. if (blkg->blkcg != &blkcg_root)
  68. blk_exit_rl(blkg->q, &blkg->rl);
  69. blkg_rwstat_exit(&blkg->stat_ios);
  70. blkg_rwstat_exit(&blkg->stat_bytes);
  71. kfree(blkg);
  72. }
  73. /**
  74. * blkg_alloc - allocate a blkg
  75. * @blkcg: block cgroup the new blkg is associated with
  76. * @q: request_queue the new blkg is associated with
  77. * @gfp_mask: allocation mask to use
  78. *
  79. * Allocate a new blkg assocating @blkcg and @q.
  80. */
  81. static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
  82. gfp_t gfp_mask)
  83. {
  84. struct blkcg_gq *blkg;
  85. int i;
  86. /* alloc and init base part */
  87. blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
  88. if (!blkg)
  89. return NULL;
  90. if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) ||
  91. blkg_rwstat_init(&blkg->stat_ios, gfp_mask))
  92. goto err_free;
  93. blkg->q = q;
  94. INIT_LIST_HEAD(&blkg->q_node);
  95. blkg->blkcg = blkcg;
  96. atomic_set(&blkg->refcnt, 1);
  97. /* root blkg uses @q->root_rl, init rl only for !root blkgs */
  98. if (blkcg != &blkcg_root) {
  99. if (blk_init_rl(&blkg->rl, q, gfp_mask))
  100. goto err_free;
  101. blkg->rl.blkg = blkg;
  102. }
  103. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  104. struct blkcg_policy *pol = blkcg_policy[i];
  105. struct blkg_policy_data *pd;
  106. if (!blkcg_policy_enabled(q, pol))
  107. continue;
  108. /* alloc per-policy data and attach it to blkg */
  109. pd = pol->pd_alloc_fn(gfp_mask, q->node);
  110. if (!pd)
  111. goto err_free;
  112. blkg->pd[i] = pd;
  113. pd->blkg = blkg;
  114. pd->plid = i;
  115. }
  116. return blkg;
  117. err_free:
  118. blkg_free(blkg);
  119. return NULL;
  120. }
  121. struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
  122. struct request_queue *q, bool update_hint)
  123. {
  124. struct blkcg_gq *blkg;
  125. /*
  126. * Hint didn't match. Look up from the radix tree. Note that the
  127. * hint can only be updated under queue_lock as otherwise @blkg
  128. * could have already been removed from blkg_tree. The caller is
  129. * responsible for grabbing queue_lock if @update_hint.
  130. */
  131. blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
  132. if (blkg && blkg->q == q) {
  133. if (update_hint) {
  134. lockdep_assert_held(q->queue_lock);
  135. rcu_assign_pointer(blkcg->blkg_hint, blkg);
  136. }
  137. return blkg;
  138. }
  139. return NULL;
  140. }
  141. EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
  142. /*
  143. * If @new_blkg is %NULL, this function tries to allocate a new one as
  144. * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
  145. */
  146. static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
  147. struct request_queue *q,
  148. struct blkcg_gq *new_blkg)
  149. {
  150. struct blkcg_gq *blkg;
  151. struct bdi_writeback_congested *wb_congested;
  152. int i, ret;
  153. WARN_ON_ONCE(!rcu_read_lock_held());
  154. lockdep_assert_held(q->queue_lock);
  155. /* blkg holds a reference to blkcg */
  156. if (!css_tryget_online(&blkcg->css)) {
  157. ret = -ENODEV;
  158. goto err_free_blkg;
  159. }
  160. wb_congested = wb_congested_get_create(q->backing_dev_info,
  161. blkcg->css.id,
  162. GFP_NOWAIT | __GFP_NOWARN);
  163. if (!wb_congested) {
  164. ret = -ENOMEM;
  165. goto err_put_css;
  166. }
  167. /* allocate */
  168. if (!new_blkg) {
  169. new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
  170. if (unlikely(!new_blkg)) {
  171. ret = -ENOMEM;
  172. goto err_put_congested;
  173. }
  174. }
  175. blkg = new_blkg;
  176. blkg->wb_congested = wb_congested;
  177. /* link parent */
  178. if (blkcg_parent(blkcg)) {
  179. blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
  180. if (WARN_ON_ONCE(!blkg->parent)) {
  181. ret = -ENODEV;
  182. goto err_put_congested;
  183. }
  184. blkg_get(blkg->parent);
  185. }
  186. /* invoke per-policy init */
  187. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  188. struct blkcg_policy *pol = blkcg_policy[i];
  189. if (blkg->pd[i] && pol->pd_init_fn)
  190. pol->pd_init_fn(blkg->pd[i]);
  191. }
  192. /* insert */
  193. spin_lock(&blkcg->lock);
  194. ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
  195. if (likely(!ret)) {
  196. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  197. list_add(&blkg->q_node, &q->blkg_list);
  198. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  199. struct blkcg_policy *pol = blkcg_policy[i];
  200. if (blkg->pd[i] && pol->pd_online_fn)
  201. pol->pd_online_fn(blkg->pd[i]);
  202. }
  203. }
  204. blkg->online = true;
  205. spin_unlock(&blkcg->lock);
  206. if (!ret)
  207. return blkg;
  208. /* @blkg failed fully initialized, use the usual release path */
  209. blkg_put(blkg);
  210. return ERR_PTR(ret);
  211. err_put_congested:
  212. wb_congested_put(wb_congested);
  213. err_put_css:
  214. css_put(&blkcg->css);
  215. err_free_blkg:
  216. blkg_free(new_blkg);
  217. return ERR_PTR(ret);
  218. }
  219. /**
  220. * blkg_lookup_create - lookup blkg, try to create one if not there
  221. * @blkcg: blkcg of interest
  222. * @q: request_queue of interest
  223. *
  224. * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
  225. * create one. blkg creation is performed recursively from blkcg_root such
  226. * that all non-root blkg's have access to the parent blkg. This function
  227. * should be called under RCU read lock and @q->queue_lock.
  228. *
  229. * Returns pointer to the looked up or created blkg on success, ERR_PTR()
  230. * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
  231. * dead and bypassing, returns ERR_PTR(-EBUSY).
  232. */
  233. struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
  234. struct request_queue *q)
  235. {
  236. struct blkcg_gq *blkg;
  237. WARN_ON_ONCE(!rcu_read_lock_held());
  238. lockdep_assert_held(q->queue_lock);
  239. /*
  240. * This could be the first entry point of blkcg implementation and
  241. * we shouldn't allow anything to go through for a bypassing queue.
  242. */
  243. if (unlikely(blk_queue_bypass(q)))
  244. return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
  245. blkg = __blkg_lookup(blkcg, q, true);
  246. if (blkg)
  247. return blkg;
  248. /*
  249. * Create blkgs walking down from blkcg_root to @blkcg, so that all
  250. * non-root blkgs have access to their parents.
  251. */
  252. while (true) {
  253. struct blkcg *pos = blkcg;
  254. struct blkcg *parent = blkcg_parent(blkcg);
  255. while (parent && !__blkg_lookup(parent, q, false)) {
  256. pos = parent;
  257. parent = blkcg_parent(parent);
  258. }
  259. blkg = blkg_create(pos, q, NULL);
  260. if (pos == blkcg || IS_ERR(blkg))
  261. return blkg;
  262. }
  263. }
  264. static void blkg_destroy(struct blkcg_gq *blkg)
  265. {
  266. struct blkcg *blkcg = blkg->blkcg;
  267. struct blkcg_gq *parent = blkg->parent;
  268. int i;
  269. lockdep_assert_held(blkg->q->queue_lock);
  270. lockdep_assert_held(&blkcg->lock);
  271. /* Something wrong if we are trying to remove same group twice */
  272. WARN_ON_ONCE(list_empty(&blkg->q_node));
  273. WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
  274. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  275. struct blkcg_policy *pol = blkcg_policy[i];
  276. if (blkg->pd[i] && pol->pd_offline_fn)
  277. pol->pd_offline_fn(blkg->pd[i]);
  278. }
  279. if (parent) {
  280. blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes);
  281. blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios);
  282. }
  283. blkg->online = false;
  284. radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
  285. list_del_init(&blkg->q_node);
  286. hlist_del_init_rcu(&blkg->blkcg_node);
  287. /*
  288. * Both setting lookup hint to and clearing it from @blkg are done
  289. * under queue_lock. If it's not pointing to @blkg now, it never
  290. * will. Hint assignment itself can race safely.
  291. */
  292. if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
  293. rcu_assign_pointer(blkcg->blkg_hint, NULL);
  294. /*
  295. * Put the reference taken at the time of creation so that when all
  296. * queues are gone, group can be destroyed.
  297. */
  298. blkg_put(blkg);
  299. }
  300. /**
  301. * blkg_destroy_all - destroy all blkgs associated with a request_queue
  302. * @q: request_queue of interest
  303. *
  304. * Destroy all blkgs associated with @q.
  305. */
  306. static void blkg_destroy_all(struct request_queue *q)
  307. {
  308. struct blkcg_gq *blkg, *n;
  309. lockdep_assert_held(q->queue_lock);
  310. list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
  311. struct blkcg *blkcg = blkg->blkcg;
  312. spin_lock(&blkcg->lock);
  313. blkg_destroy(blkg);
  314. spin_unlock(&blkcg->lock);
  315. }
  316. q->root_blkg = NULL;
  317. q->root_rl.blkg = NULL;
  318. }
  319. /*
  320. * A group is RCU protected, but having an rcu lock does not mean that one
  321. * can access all the fields of blkg and assume these are valid. For
  322. * example, don't try to follow throtl_data and request queue links.
  323. *
  324. * Having a reference to blkg under an rcu allows accesses to only values
  325. * local to groups like group stats and group rate limits.
  326. */
  327. void __blkg_release_rcu(struct rcu_head *rcu_head)
  328. {
  329. struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
  330. /* release the blkcg and parent blkg refs this blkg has been holding */
  331. css_put(&blkg->blkcg->css);
  332. if (blkg->parent)
  333. blkg_put(blkg->parent);
  334. wb_congested_put(blkg->wb_congested);
  335. blkg_free(blkg);
  336. }
  337. EXPORT_SYMBOL_GPL(__blkg_release_rcu);
  338. /*
  339. * The next function used by blk_queue_for_each_rl(). It's a bit tricky
  340. * because the root blkg uses @q->root_rl instead of its own rl.
  341. */
  342. struct request_list *__blk_queue_next_rl(struct request_list *rl,
  343. struct request_queue *q)
  344. {
  345. struct list_head *ent;
  346. struct blkcg_gq *blkg;
  347. /*
  348. * Determine the current blkg list_head. The first entry is
  349. * root_rl which is off @q->blkg_list and mapped to the head.
  350. */
  351. if (rl == &q->root_rl) {
  352. ent = &q->blkg_list;
  353. /* There are no more block groups, hence no request lists */
  354. if (list_empty(ent))
  355. return NULL;
  356. } else {
  357. blkg = container_of(rl, struct blkcg_gq, rl);
  358. ent = &blkg->q_node;
  359. }
  360. /* walk to the next list_head, skip root blkcg */
  361. ent = ent->next;
  362. if (ent == &q->root_blkg->q_node)
  363. ent = ent->next;
  364. if (ent == &q->blkg_list)
  365. return NULL;
  366. blkg = container_of(ent, struct blkcg_gq, q_node);
  367. return &blkg->rl;
  368. }
  369. static int blkcg_reset_stats(struct cgroup_subsys_state *css,
  370. struct cftype *cftype, u64 val)
  371. {
  372. struct blkcg *blkcg = css_to_blkcg(css);
  373. struct blkcg_gq *blkg;
  374. int i;
  375. mutex_lock(&blkcg_pol_mutex);
  376. spin_lock_irq(&blkcg->lock);
  377. /*
  378. * Note that stat reset is racy - it doesn't synchronize against
  379. * stat updates. This is a debug feature which shouldn't exist
  380. * anyway. If you get hit by a race, retry.
  381. */
  382. hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
  383. blkg_rwstat_reset(&blkg->stat_bytes);
  384. blkg_rwstat_reset(&blkg->stat_ios);
  385. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  386. struct blkcg_policy *pol = blkcg_policy[i];
  387. if (blkg->pd[i] && pol->pd_reset_stats_fn)
  388. pol->pd_reset_stats_fn(blkg->pd[i]);
  389. }
  390. }
  391. spin_unlock_irq(&blkcg->lock);
  392. mutex_unlock(&blkcg_pol_mutex);
  393. return 0;
  394. }
  395. const char *blkg_dev_name(struct blkcg_gq *blkg)
  396. {
  397. /* some drivers (floppy) instantiate a queue w/o disk registered */
  398. if (blkg->q->backing_dev_info->dev)
  399. return dev_name(blkg->q->backing_dev_info->dev);
  400. return NULL;
  401. }
  402. EXPORT_SYMBOL_GPL(blkg_dev_name);
  403. /**
  404. * blkcg_print_blkgs - helper for printing per-blkg data
  405. * @sf: seq_file to print to
  406. * @blkcg: blkcg of interest
  407. * @prfill: fill function to print out a blkg
  408. * @pol: policy in question
  409. * @data: data to be passed to @prfill
  410. * @show_total: to print out sum of prfill return values or not
  411. *
  412. * This function invokes @prfill on each blkg of @blkcg if pd for the
  413. * policy specified by @pol exists. @prfill is invoked with @sf, the
  414. * policy data and @data and the matching queue lock held. If @show_total
  415. * is %true, the sum of the return values from @prfill is printed with
  416. * "Total" label at the end.
  417. *
  418. * This is to be used to construct print functions for
  419. * cftype->read_seq_string method.
  420. */
  421. void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
  422. u64 (*prfill)(struct seq_file *,
  423. struct blkg_policy_data *, int),
  424. const struct blkcg_policy *pol, int data,
  425. bool show_total)
  426. {
  427. struct blkcg_gq *blkg;
  428. u64 total = 0;
  429. rcu_read_lock();
  430. hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
  431. spin_lock_irq(blkg->q->queue_lock);
  432. if (blkcg_policy_enabled(blkg->q, pol))
  433. total += prfill(sf, blkg->pd[pol->plid], data);
  434. spin_unlock_irq(blkg->q->queue_lock);
  435. }
  436. rcu_read_unlock();
  437. if (show_total)
  438. seq_printf(sf, "Total %llu\n", (unsigned long long)total);
  439. }
  440. EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
  441. /**
  442. * __blkg_prfill_u64 - prfill helper for a single u64 value
  443. * @sf: seq_file to print to
  444. * @pd: policy private data of interest
  445. * @v: value to print
  446. *
  447. * Print @v to @sf for the device assocaited with @pd.
  448. */
  449. u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
  450. {
  451. const char *dname = blkg_dev_name(pd->blkg);
  452. if (!dname)
  453. return 0;
  454. seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
  455. return v;
  456. }
  457. EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
  458. /**
  459. * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
  460. * @sf: seq_file to print to
  461. * @pd: policy private data of interest
  462. * @rwstat: rwstat to print
  463. *
  464. * Print @rwstat to @sf for the device assocaited with @pd.
  465. */
  466. u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  467. const struct blkg_rwstat *rwstat)
  468. {
  469. static const char *rwstr[] = {
  470. [BLKG_RWSTAT_READ] = "Read",
  471. [BLKG_RWSTAT_WRITE] = "Write",
  472. [BLKG_RWSTAT_SYNC] = "Sync",
  473. [BLKG_RWSTAT_ASYNC] = "Async",
  474. [BLKG_RWSTAT_DISCARD] = "Discard",
  475. };
  476. const char *dname = blkg_dev_name(pd->blkg);
  477. u64 v;
  478. int i;
  479. if (!dname)
  480. return 0;
  481. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  482. seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
  483. (unsigned long long)atomic64_read(&rwstat->aux_cnt[i]));
  484. v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) +
  485. atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]) +
  486. atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_DISCARD]);
  487. seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
  488. return v;
  489. }
  490. EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
  491. /**
  492. * blkg_prfill_stat - prfill callback for blkg_stat
  493. * @sf: seq_file to print to
  494. * @pd: policy private data of interest
  495. * @off: offset to the blkg_stat in @pd
  496. *
  497. * prfill callback for printing a blkg_stat.
  498. */
  499. u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
  500. {
  501. return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
  502. }
  503. EXPORT_SYMBOL_GPL(blkg_prfill_stat);
  504. /**
  505. * blkg_prfill_rwstat - prfill callback for blkg_rwstat
  506. * @sf: seq_file to print to
  507. * @pd: policy private data of interest
  508. * @off: offset to the blkg_rwstat in @pd
  509. *
  510. * prfill callback for printing a blkg_rwstat.
  511. */
  512. u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
  513. int off)
  514. {
  515. struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
  516. return __blkg_prfill_rwstat(sf, pd, &rwstat);
  517. }
  518. EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
  519. static u64 blkg_prfill_rwstat_field(struct seq_file *sf,
  520. struct blkg_policy_data *pd, int off)
  521. {
  522. struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off);
  523. return __blkg_prfill_rwstat(sf, pd, &rwstat);
  524. }
  525. /**
  526. * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes
  527. * @sf: seq_file to print to
  528. * @v: unused
  529. *
  530. * To be used as cftype->seq_show to print blkg->stat_bytes.
  531. * cftype->private must be set to the blkcg_policy.
  532. */
  533. int blkg_print_stat_bytes(struct seq_file *sf, void *v)
  534. {
  535. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  536. blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
  537. offsetof(struct blkcg_gq, stat_bytes), true);
  538. return 0;
  539. }
  540. EXPORT_SYMBOL_GPL(blkg_print_stat_bytes);
  541. /**
  542. * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios
  543. * @sf: seq_file to print to
  544. * @v: unused
  545. *
  546. * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private
  547. * must be set to the blkcg_policy.
  548. */
  549. int blkg_print_stat_ios(struct seq_file *sf, void *v)
  550. {
  551. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  552. blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private,
  553. offsetof(struct blkcg_gq, stat_ios), true);
  554. return 0;
  555. }
  556. EXPORT_SYMBOL_GPL(blkg_print_stat_ios);
  557. static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf,
  558. struct blkg_policy_data *pd,
  559. int off)
  560. {
  561. struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg,
  562. NULL, off);
  563. return __blkg_prfill_rwstat(sf, pd, &rwstat);
  564. }
  565. /**
  566. * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes
  567. * @sf: seq_file to print to
  568. * @v: unused
  569. */
  570. int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v)
  571. {
  572. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  573. blkg_prfill_rwstat_field_recursive,
  574. (void *)seq_cft(sf)->private,
  575. offsetof(struct blkcg_gq, stat_bytes), true);
  576. return 0;
  577. }
  578. EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive);
  579. /**
  580. * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios
  581. * @sf: seq_file to print to
  582. * @v: unused
  583. */
  584. int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v)
  585. {
  586. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  587. blkg_prfill_rwstat_field_recursive,
  588. (void *)seq_cft(sf)->private,
  589. offsetof(struct blkcg_gq, stat_ios), true);
  590. return 0;
  591. }
  592. EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive);
  593. /**
  594. * blkg_stat_recursive_sum - collect hierarchical blkg_stat
  595. * @blkg: blkg of interest
  596. * @pol: blkcg_policy which contains the blkg_stat
  597. * @off: offset to the blkg_stat in blkg_policy_data or @blkg
  598. *
  599. * Collect the blkg_stat specified by @blkg, @pol and @off and all its
  600. * online descendants and their aux counts. The caller must be holding the
  601. * queue lock for online tests.
  602. *
  603. * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is
  604. * at @off bytes into @blkg's blkg_policy_data of the policy.
  605. */
  606. u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
  607. struct blkcg_policy *pol, int off)
  608. {
  609. struct blkcg_gq *pos_blkg;
  610. struct cgroup_subsys_state *pos_css;
  611. u64 sum = 0;
  612. lockdep_assert_held(blkg->q->queue_lock);
  613. rcu_read_lock();
  614. blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
  615. struct blkg_stat *stat;
  616. if (!pos_blkg->online)
  617. continue;
  618. if (pol)
  619. stat = (void *)blkg_to_pd(pos_blkg, pol) + off;
  620. else
  621. stat = (void *)blkg + off;
  622. sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt);
  623. }
  624. rcu_read_unlock();
  625. return sum;
  626. }
  627. EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
  628. /**
  629. * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
  630. * @blkg: blkg of interest
  631. * @pol: blkcg_policy which contains the blkg_rwstat
  632. * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg
  633. *
  634. * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its
  635. * online descendants and their aux counts. The caller must be holding the
  636. * queue lock for online tests.
  637. *
  638. * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it
  639. * is at @off bytes into @blkg's blkg_policy_data of the policy.
  640. */
  641. struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
  642. struct blkcg_policy *pol, int off)
  643. {
  644. struct blkcg_gq *pos_blkg;
  645. struct cgroup_subsys_state *pos_css;
  646. struct blkg_rwstat sum = { };
  647. int i;
  648. lockdep_assert_held(blkg->q->queue_lock);
  649. rcu_read_lock();
  650. blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
  651. struct blkg_rwstat *rwstat;
  652. if (!pos_blkg->online)
  653. continue;
  654. if (pol)
  655. rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off;
  656. else
  657. rwstat = (void *)pos_blkg + off;
  658. for (i = 0; i < BLKG_RWSTAT_NR; i++)
  659. atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) +
  660. percpu_counter_sum_positive(&rwstat->cpu_cnt[i]),
  661. &sum.aux_cnt[i]);
  662. }
  663. rcu_read_unlock();
  664. return sum;
  665. }
  666. EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
  667. /* Performs queue bypass and policy enabled checks then looks up blkg. */
  668. static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
  669. const struct blkcg_policy *pol,
  670. struct request_queue *q)
  671. {
  672. WARN_ON_ONCE(!rcu_read_lock_held());
  673. lockdep_assert_held(q->queue_lock);
  674. if (!blkcg_policy_enabled(q, pol))
  675. return ERR_PTR(-EOPNOTSUPP);
  676. /*
  677. * This could be the first entry point of blkcg implementation and
  678. * we shouldn't allow anything to go through for a bypassing queue.
  679. */
  680. if (unlikely(blk_queue_bypass(q)))
  681. return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
  682. return __blkg_lookup(blkcg, q, true /* update_hint */);
  683. }
  684. /**
  685. * blkg_conf_prep - parse and prepare for per-blkg config update
  686. * @blkcg: target block cgroup
  687. * @pol: target policy
  688. * @input: input string
  689. * @ctx: blkg_conf_ctx to be filled
  690. *
  691. * Parse per-blkg config update from @input and initialize @ctx with the
  692. * result. @ctx->blkg points to the blkg to be updated and @ctx->body the
  693. * part of @input following MAJ:MIN. This function returns with RCU read
  694. * lock and queue lock held and must be paired with blkg_conf_finish().
  695. */
  696. int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
  697. char *input, struct blkg_conf_ctx *ctx)
  698. __acquires(rcu) __acquires(disk->queue->queue_lock)
  699. {
  700. struct gendisk *disk;
  701. struct request_queue *q;
  702. struct blkcg_gq *blkg;
  703. unsigned int major, minor;
  704. int key_len, part, ret;
  705. char *body;
  706. if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
  707. return -EINVAL;
  708. body = input + key_len;
  709. if (!isspace(*body))
  710. return -EINVAL;
  711. body = skip_spaces(body);
  712. disk = get_gendisk(MKDEV(major, minor), &part);
  713. if (!disk)
  714. return -ENODEV;
  715. if (part) {
  716. ret = -ENODEV;
  717. goto fail;
  718. }
  719. q = disk->queue;
  720. rcu_read_lock();
  721. spin_lock_irq(q->queue_lock);
  722. blkg = blkg_lookup_check(blkcg, pol, q);
  723. if (IS_ERR(blkg)) {
  724. ret = PTR_ERR(blkg);
  725. goto fail_unlock;
  726. }
  727. if (blkg)
  728. goto success;
  729. /*
  730. * Create blkgs walking down from blkcg_root to @blkcg, so that all
  731. * non-root blkgs have access to their parents.
  732. */
  733. while (true) {
  734. struct blkcg *pos = blkcg;
  735. struct blkcg *parent;
  736. struct blkcg_gq *new_blkg;
  737. parent = blkcg_parent(blkcg);
  738. while (parent && !__blkg_lookup(parent, q, false)) {
  739. pos = parent;
  740. parent = blkcg_parent(parent);
  741. }
  742. /* Drop locks to do new blkg allocation with GFP_KERNEL. */
  743. spin_unlock_irq(q->queue_lock);
  744. rcu_read_unlock();
  745. new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
  746. if (unlikely(!new_blkg)) {
  747. ret = -ENOMEM;
  748. goto fail;
  749. }
  750. rcu_read_lock();
  751. spin_lock_irq(q->queue_lock);
  752. blkg = blkg_lookup_check(pos, pol, q);
  753. if (IS_ERR(blkg)) {
  754. ret = PTR_ERR(blkg);
  755. goto fail_unlock;
  756. }
  757. if (blkg) {
  758. blkg_free(new_blkg);
  759. } else {
  760. blkg = blkg_create(pos, q, new_blkg);
  761. if (unlikely(IS_ERR(blkg))) {
  762. ret = PTR_ERR(blkg);
  763. goto fail_unlock;
  764. }
  765. }
  766. if (pos == blkcg)
  767. goto success;
  768. }
  769. success:
  770. ctx->disk = disk;
  771. ctx->blkg = blkg;
  772. ctx->body = body;
  773. return 0;
  774. fail_unlock:
  775. spin_unlock_irq(q->queue_lock);
  776. rcu_read_unlock();
  777. fail:
  778. put_disk_and_module(disk);
  779. /*
  780. * If queue was bypassing, we should retry. Do so after a
  781. * short msleep(). It isn't strictly necessary but queue
  782. * can be bypassing for some time and it's always nice to
  783. * avoid busy looping.
  784. */
  785. if (ret == -EBUSY) {
  786. msleep(10);
  787. ret = restart_syscall();
  788. }
  789. return ret;
  790. }
  791. EXPORT_SYMBOL_GPL(blkg_conf_prep);
  792. /**
  793. * blkg_conf_finish - finish up per-blkg config update
  794. * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
  795. *
  796. * Finish up after per-blkg config update. This function must be paired
  797. * with blkg_conf_prep().
  798. */
  799. void blkg_conf_finish(struct blkg_conf_ctx *ctx)
  800. __releases(ctx->disk->queue->queue_lock) __releases(rcu)
  801. {
  802. spin_unlock_irq(ctx->disk->queue->queue_lock);
  803. rcu_read_unlock();
  804. put_disk_and_module(ctx->disk);
  805. }
  806. EXPORT_SYMBOL_GPL(blkg_conf_finish);
  807. static int blkcg_print_stat(struct seq_file *sf, void *v)
  808. {
  809. struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
  810. struct blkcg_gq *blkg;
  811. rcu_read_lock();
  812. hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
  813. const char *dname;
  814. char *buf;
  815. struct blkg_rwstat rwstat;
  816. u64 rbytes, wbytes, rios, wios, dbytes, dios;
  817. size_t size = seq_get_buf(sf, &buf), off = 0;
  818. int i;
  819. bool has_stats = false;
  820. spin_lock_irq(blkg->q->queue_lock);
  821. if (!blkg->online)
  822. goto skip;
  823. dname = blkg_dev_name(blkg);
  824. if (!dname)
  825. goto skip;
  826. /*
  827. * Hooray string manipulation, count is the size written NOT
  828. * INCLUDING THE \0, so size is now count+1 less than what we
  829. * had before, but we want to start writing the next bit from
  830. * the \0 so we only add count to buf.
  831. */
  832. off += scnprintf(buf+off, size-off, "%s ", dname);
  833. rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
  834. offsetof(struct blkcg_gq, stat_bytes));
  835. rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
  836. wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
  837. dbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
  838. rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
  839. offsetof(struct blkcg_gq, stat_ios));
  840. rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]);
  841. wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
  842. dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
  843. if (rbytes || wbytes || rios || wios) {
  844. has_stats = true;
  845. off += scnprintf(buf+off, size-off,
  846. "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
  847. rbytes, wbytes, rios, wios,
  848. dbytes, dios);
  849. }
  850. if (!blkcg_debug_stats)
  851. goto next;
  852. if (atomic_read(&blkg->use_delay)) {
  853. has_stats = true;
  854. off += scnprintf(buf+off, size-off,
  855. " use_delay=%d delay_nsec=%llu",
  856. atomic_read(&blkg->use_delay),
  857. (unsigned long long)atomic64_read(&blkg->delay_nsec));
  858. }
  859. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  860. struct blkcg_policy *pol = blkcg_policy[i];
  861. size_t written;
  862. if (!blkg->pd[i] || !pol->pd_stat_fn)
  863. continue;
  864. written = pol->pd_stat_fn(blkg->pd[i], buf+off, size-off);
  865. if (written)
  866. has_stats = true;
  867. off += written;
  868. }
  869. next:
  870. if (has_stats) {
  871. if (off < size - 1) {
  872. off += scnprintf(buf+off, size-off, "\n");
  873. seq_commit(sf, off);
  874. } else {
  875. seq_commit(sf, -1);
  876. }
  877. }
  878. skip:
  879. spin_unlock_irq(blkg->q->queue_lock);
  880. }
  881. rcu_read_unlock();
  882. return 0;
  883. }
  884. static struct cftype blkcg_files[] = {
  885. {
  886. .name = "stat",
  887. .flags = CFTYPE_NOT_ON_ROOT,
  888. .seq_show = blkcg_print_stat,
  889. },
  890. { } /* terminate */
  891. };
  892. static struct cftype blkcg_legacy_files[] = {
  893. {
  894. .name = "reset_stats",
  895. .write_u64 = blkcg_reset_stats,
  896. },
  897. { } /* terminate */
  898. };
  899. /*
  900. * blkcg destruction is a three-stage process.
  901. *
  902. * 1. Destruction starts. The blkcg_css_offline() callback is invoked
  903. * which offlines writeback. Here we tie the next stage of blkg destruction
  904. * to the completion of writeback associated with the blkcg. This lets us
  905. * avoid punting potentially large amounts of outstanding writeback to root
  906. * while maintaining any ongoing policies. The next stage is triggered when
  907. * the nr_cgwbs count goes to zero.
  908. *
  909. * 2. When the nr_cgwbs count goes to zero, blkcg_destroy_blkgs() is called
  910. * and handles the destruction of blkgs. Here the css reference held by
  911. * the blkg is put back eventually allowing blkcg_css_free() to be called.
  912. * This work may occur in cgwb_release_workfn() on the cgwb_release
  913. * workqueue. Any submitted ios that fail to get the blkg ref will be
  914. * punted to the root_blkg.
  915. *
  916. * 3. Once the blkcg ref count goes to zero, blkcg_css_free() is called.
  917. * This finally frees the blkcg.
  918. */
  919. /**
  920. * blkcg_css_offline - cgroup css_offline callback
  921. * @css: css of interest
  922. *
  923. * This function is called when @css is about to go away. Here the cgwbs are
  924. * offlined first and only once writeback associated with the blkcg has
  925. * finished do we start step 2 (see above).
  926. */
  927. static void blkcg_css_offline(struct cgroup_subsys_state *css)
  928. {
  929. struct blkcg *blkcg = css_to_blkcg(css);
  930. /* this prevents anyone from attaching or migrating to this blkcg */
  931. wb_blkcg_offline(blkcg);
  932. /* put the base cgwb reference allowing step 2 to be triggered */
  933. blkcg_cgwb_put(blkcg);
  934. }
  935. /**
  936. * blkcg_destroy_blkgs - responsible for shooting down blkgs
  937. * @blkcg: blkcg of interest
  938. *
  939. * blkgs should be removed while holding both q and blkcg locks. As blkcg lock
  940. * is nested inside q lock, this function performs reverse double lock dancing.
  941. * Destroying the blkgs releases the reference held on the blkcg's css allowing
  942. * blkcg_css_free to eventually be called.
  943. *
  944. * This is the blkcg counterpart of ioc_release_fn().
  945. */
  946. void blkcg_destroy_blkgs(struct blkcg *blkcg)
  947. {
  948. spin_lock_irq(&blkcg->lock);
  949. while (!hlist_empty(&blkcg->blkg_list)) {
  950. struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
  951. struct blkcg_gq, blkcg_node);
  952. struct request_queue *q = blkg->q;
  953. if (spin_trylock(q->queue_lock)) {
  954. blkg_destroy(blkg);
  955. spin_unlock(q->queue_lock);
  956. } else {
  957. spin_unlock_irq(&blkcg->lock);
  958. cpu_relax();
  959. spin_lock_irq(&blkcg->lock);
  960. }
  961. }
  962. spin_unlock_irq(&blkcg->lock);
  963. }
  964. static void blkcg_css_free(struct cgroup_subsys_state *css)
  965. {
  966. struct blkcg *blkcg = css_to_blkcg(css);
  967. int i;
  968. mutex_lock(&blkcg_pol_mutex);
  969. list_del(&blkcg->all_blkcgs_node);
  970. for (i = 0; i < BLKCG_MAX_POLS; i++)
  971. if (blkcg->cpd[i])
  972. blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
  973. mutex_unlock(&blkcg_pol_mutex);
  974. kfree(blkcg);
  975. }
  976. static struct cgroup_subsys_state *
  977. blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
  978. {
  979. struct blkcg *blkcg;
  980. struct cgroup_subsys_state *ret;
  981. int i;
  982. mutex_lock(&blkcg_pol_mutex);
  983. if (!parent_css) {
  984. blkcg = &blkcg_root;
  985. } else {
  986. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  987. if (!blkcg) {
  988. ret = ERR_PTR(-ENOMEM);
  989. goto unlock;
  990. }
  991. }
  992. for (i = 0; i < BLKCG_MAX_POLS ; i++) {
  993. struct blkcg_policy *pol = blkcg_policy[i];
  994. struct blkcg_policy_data *cpd;
  995. /*
  996. * If the policy hasn't been attached yet, wait for it
  997. * to be attached before doing anything else. Otherwise,
  998. * check if the policy requires any specific per-cgroup
  999. * data: if it does, allocate and initialize it.
  1000. */
  1001. if (!pol || !pol->cpd_alloc_fn)
  1002. continue;
  1003. cpd = pol->cpd_alloc_fn(GFP_KERNEL);
  1004. if (!cpd) {
  1005. ret = ERR_PTR(-ENOMEM);
  1006. goto free_pd_blkcg;
  1007. }
  1008. blkcg->cpd[i] = cpd;
  1009. cpd->blkcg = blkcg;
  1010. cpd->plid = i;
  1011. if (pol->cpd_init_fn)
  1012. pol->cpd_init_fn(cpd);
  1013. }
  1014. spin_lock_init(&blkcg->lock);
  1015. INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
  1016. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1017. #ifdef CONFIG_CGROUP_WRITEBACK
  1018. INIT_LIST_HEAD(&blkcg->cgwb_list);
  1019. refcount_set(&blkcg->cgwb_refcnt, 1);
  1020. #endif
  1021. list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
  1022. mutex_unlock(&blkcg_pol_mutex);
  1023. return &blkcg->css;
  1024. free_pd_blkcg:
  1025. for (i--; i >= 0; i--)
  1026. if (blkcg->cpd[i])
  1027. blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]);
  1028. if (blkcg != &blkcg_root)
  1029. kfree(blkcg);
  1030. unlock:
  1031. mutex_unlock(&blkcg_pol_mutex);
  1032. return ret;
  1033. }
  1034. /**
  1035. * blkcg_init_queue - initialize blkcg part of request queue
  1036. * @q: request_queue to initialize
  1037. *
  1038. * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
  1039. * part of new request_queue @q.
  1040. *
  1041. * RETURNS:
  1042. * 0 on success, -errno on failure.
  1043. */
  1044. int blkcg_init_queue(struct request_queue *q)
  1045. {
  1046. struct blkcg_gq *new_blkg, *blkg;
  1047. bool preloaded;
  1048. int ret;
  1049. new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
  1050. if (!new_blkg)
  1051. return -ENOMEM;
  1052. preloaded = !radix_tree_preload(GFP_KERNEL);
  1053. /* Make sure the root blkg exists. */
  1054. rcu_read_lock();
  1055. spin_lock_irq(q->queue_lock);
  1056. blkg = blkg_create(&blkcg_root, q, new_blkg);
  1057. if (IS_ERR(blkg))
  1058. goto err_unlock;
  1059. q->root_blkg = blkg;
  1060. q->root_rl.blkg = blkg;
  1061. spin_unlock_irq(q->queue_lock);
  1062. rcu_read_unlock();
  1063. if (preloaded)
  1064. radix_tree_preload_end();
  1065. ret = blk_iolatency_init(q);
  1066. if (ret) {
  1067. spin_lock_irq(q->queue_lock);
  1068. blkg_destroy_all(q);
  1069. spin_unlock_irq(q->queue_lock);
  1070. return ret;
  1071. }
  1072. ret = blk_throtl_init(q);
  1073. if (ret) {
  1074. spin_lock_irq(q->queue_lock);
  1075. blkg_destroy_all(q);
  1076. spin_unlock_irq(q->queue_lock);
  1077. }
  1078. return ret;
  1079. err_unlock:
  1080. spin_unlock_irq(q->queue_lock);
  1081. rcu_read_unlock();
  1082. if (preloaded)
  1083. radix_tree_preload_end();
  1084. return PTR_ERR(blkg);
  1085. }
  1086. /**
  1087. * blkcg_drain_queue - drain blkcg part of request_queue
  1088. * @q: request_queue to drain
  1089. *
  1090. * Called from blk_drain_queue(). Responsible for draining blkcg part.
  1091. */
  1092. void blkcg_drain_queue(struct request_queue *q)
  1093. {
  1094. lockdep_assert_held(q->queue_lock);
  1095. /*
  1096. * @q could be exiting and already have destroyed all blkgs as
  1097. * indicated by NULL root_blkg. If so, don't confuse policies.
  1098. */
  1099. if (!q->root_blkg)
  1100. return;
  1101. blk_throtl_drain(q);
  1102. }
  1103. /**
  1104. * blkcg_exit_queue - exit and release blkcg part of request_queue
  1105. * @q: request_queue being released
  1106. *
  1107. * Called from blk_release_queue(). Responsible for exiting blkcg part.
  1108. */
  1109. void blkcg_exit_queue(struct request_queue *q)
  1110. {
  1111. spin_lock_irq(q->queue_lock);
  1112. blkg_destroy_all(q);
  1113. spin_unlock_irq(q->queue_lock);
  1114. blk_throtl_exit(q);
  1115. }
  1116. /*
  1117. * We cannot support shared io contexts, as we have no mean to support
  1118. * two tasks with the same ioc in two different groups without major rework
  1119. * of the main cic data structures. For now we allow a task to change
  1120. * its cgroup only if it's the only owner of its ioc.
  1121. */
  1122. static int blkcg_can_attach(struct cgroup_taskset *tset)
  1123. {
  1124. struct task_struct *task;
  1125. struct cgroup_subsys_state *dst_css;
  1126. struct io_context *ioc;
  1127. int ret = 0;
  1128. /* task_lock() is needed to avoid races with exit_io_context() */
  1129. cgroup_taskset_for_each(task, dst_css, tset) {
  1130. task_lock(task);
  1131. ioc = task->io_context;
  1132. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1133. ret = -EINVAL;
  1134. task_unlock(task);
  1135. if (ret)
  1136. break;
  1137. }
  1138. return ret;
  1139. }
  1140. static void blkcg_bind(struct cgroup_subsys_state *root_css)
  1141. {
  1142. int i;
  1143. mutex_lock(&blkcg_pol_mutex);
  1144. for (i = 0; i < BLKCG_MAX_POLS; i++) {
  1145. struct blkcg_policy *pol = blkcg_policy[i];
  1146. struct blkcg *blkcg;
  1147. if (!pol || !pol->cpd_bind_fn)
  1148. continue;
  1149. list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node)
  1150. if (blkcg->cpd[pol->plid])
  1151. pol->cpd_bind_fn(blkcg->cpd[pol->plid]);
  1152. }
  1153. mutex_unlock(&blkcg_pol_mutex);
  1154. }
  1155. static void blkcg_exit(struct task_struct *tsk)
  1156. {
  1157. if (tsk->throttle_queue)
  1158. blk_put_queue(tsk->throttle_queue);
  1159. tsk->throttle_queue = NULL;
  1160. }
  1161. struct cgroup_subsys io_cgrp_subsys = {
  1162. .css_alloc = blkcg_css_alloc,
  1163. .css_offline = blkcg_css_offline,
  1164. .css_free = blkcg_css_free,
  1165. .can_attach = blkcg_can_attach,
  1166. .bind = blkcg_bind,
  1167. .dfl_cftypes = blkcg_files,
  1168. .legacy_cftypes = blkcg_legacy_files,
  1169. .legacy_name = "blkio",
  1170. .exit = blkcg_exit,
  1171. #ifdef CONFIG_MEMCG
  1172. /*
  1173. * This ensures that, if available, memcg is automatically enabled
  1174. * together on the default hierarchy so that the owner cgroup can
  1175. * be retrieved from writeback pages.
  1176. */
  1177. .depends_on = 1 << memory_cgrp_id,
  1178. #endif
  1179. };
  1180. EXPORT_SYMBOL_GPL(io_cgrp_subsys);
  1181. /**
  1182. * blkcg_activate_policy - activate a blkcg policy on a request_queue
  1183. * @q: request_queue of interest
  1184. * @pol: blkcg policy to activate
  1185. *
  1186. * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
  1187. * bypass mode to populate its blkgs with policy_data for @pol.
  1188. *
  1189. * Activation happens with @q bypassed, so nobody would be accessing blkgs
  1190. * from IO path. Update of each blkg is protected by both queue and blkcg
  1191. * locks so that holding either lock and testing blkcg_policy_enabled() is
  1192. * always enough for dereferencing policy data.
  1193. *
  1194. * The caller is responsible for synchronizing [de]activations and policy
  1195. * [un]registerations. Returns 0 on success, -errno on failure.
  1196. */
  1197. int blkcg_activate_policy(struct request_queue *q,
  1198. const struct blkcg_policy *pol)
  1199. {
  1200. struct blkg_policy_data *pd_prealloc = NULL;
  1201. struct blkcg_gq *blkg;
  1202. int ret;
  1203. if (blkcg_policy_enabled(q, pol))
  1204. return 0;
  1205. if (q->mq_ops)
  1206. blk_mq_freeze_queue(q);
  1207. else
  1208. blk_queue_bypass_start(q);
  1209. pd_prealloc:
  1210. if (!pd_prealloc) {
  1211. pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
  1212. if (!pd_prealloc) {
  1213. ret = -ENOMEM;
  1214. goto out_bypass_end;
  1215. }
  1216. }
  1217. spin_lock_irq(q->queue_lock);
  1218. list_for_each_entry(blkg, &q->blkg_list, q_node) {
  1219. struct blkg_policy_data *pd;
  1220. if (blkg->pd[pol->plid])
  1221. continue;
  1222. pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
  1223. if (!pd)
  1224. swap(pd, pd_prealloc);
  1225. if (!pd) {
  1226. spin_unlock_irq(q->queue_lock);
  1227. goto pd_prealloc;
  1228. }
  1229. blkg->pd[pol->plid] = pd;
  1230. pd->blkg = blkg;
  1231. pd->plid = pol->plid;
  1232. if (pol->pd_init_fn)
  1233. pol->pd_init_fn(pd);
  1234. }
  1235. __set_bit(pol->plid, q->blkcg_pols);
  1236. ret = 0;
  1237. spin_unlock_irq(q->queue_lock);
  1238. out_bypass_end:
  1239. if (q->mq_ops)
  1240. blk_mq_unfreeze_queue(q);
  1241. else
  1242. blk_queue_bypass_end(q);
  1243. if (pd_prealloc)
  1244. pol->pd_free_fn(pd_prealloc);
  1245. return ret;
  1246. }
  1247. EXPORT_SYMBOL_GPL(blkcg_activate_policy);
  1248. /**
  1249. * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
  1250. * @q: request_queue of interest
  1251. * @pol: blkcg policy to deactivate
  1252. *
  1253. * Deactivate @pol on @q. Follows the same synchronization rules as
  1254. * blkcg_activate_policy().
  1255. */
  1256. void blkcg_deactivate_policy(struct request_queue *q,
  1257. const struct blkcg_policy *pol)
  1258. {
  1259. struct blkcg_gq *blkg;
  1260. if (!blkcg_policy_enabled(q, pol))
  1261. return;
  1262. if (q->mq_ops)
  1263. blk_mq_freeze_queue(q);
  1264. else
  1265. blk_queue_bypass_start(q);
  1266. spin_lock_irq(q->queue_lock);
  1267. __clear_bit(pol->plid, q->blkcg_pols);
  1268. list_for_each_entry(blkg, &q->blkg_list, q_node) {
  1269. if (blkg->pd[pol->plid]) {
  1270. if (pol->pd_offline_fn)
  1271. pol->pd_offline_fn(blkg->pd[pol->plid]);
  1272. pol->pd_free_fn(blkg->pd[pol->plid]);
  1273. blkg->pd[pol->plid] = NULL;
  1274. }
  1275. }
  1276. spin_unlock_irq(q->queue_lock);
  1277. if (q->mq_ops)
  1278. blk_mq_unfreeze_queue(q);
  1279. else
  1280. blk_queue_bypass_end(q);
  1281. }
  1282. EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
  1283. /**
  1284. * blkcg_policy_register - register a blkcg policy
  1285. * @pol: blkcg policy to register
  1286. *
  1287. * Register @pol with blkcg core. Might sleep and @pol may be modified on
  1288. * successful registration. Returns 0 on success and -errno on failure.
  1289. */
  1290. int blkcg_policy_register(struct blkcg_policy *pol)
  1291. {
  1292. struct blkcg *blkcg;
  1293. int i, ret;
  1294. mutex_lock(&blkcg_pol_register_mutex);
  1295. mutex_lock(&blkcg_pol_mutex);
  1296. /* find an empty slot */
  1297. ret = -ENOSPC;
  1298. for (i = 0; i < BLKCG_MAX_POLS; i++)
  1299. if (!blkcg_policy[i])
  1300. break;
  1301. if (i >= BLKCG_MAX_POLS) {
  1302. pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
  1303. goto err_unlock;
  1304. }
  1305. /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
  1306. if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
  1307. (!pol->pd_alloc_fn ^ !pol->pd_free_fn))
  1308. goto err_unlock;
  1309. /* register @pol */
  1310. pol->plid = i;
  1311. blkcg_policy[pol->plid] = pol;
  1312. /* allocate and install cpd's */
  1313. if (pol->cpd_alloc_fn) {
  1314. list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
  1315. struct blkcg_policy_data *cpd;
  1316. cpd = pol->cpd_alloc_fn(GFP_KERNEL);
  1317. if (!cpd)
  1318. goto err_free_cpds;
  1319. blkcg->cpd[pol->plid] = cpd;
  1320. cpd->blkcg = blkcg;
  1321. cpd->plid = pol->plid;
  1322. pol->cpd_init_fn(cpd);
  1323. }
  1324. }
  1325. mutex_unlock(&blkcg_pol_mutex);
  1326. /* everything is in place, add intf files for the new policy */
  1327. if (pol->dfl_cftypes)
  1328. WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys,
  1329. pol->dfl_cftypes));
  1330. if (pol->legacy_cftypes)
  1331. WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys,
  1332. pol->legacy_cftypes));
  1333. mutex_unlock(&blkcg_pol_register_mutex);
  1334. return 0;
  1335. err_free_cpds:
  1336. if (pol->cpd_free_fn) {
  1337. list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
  1338. if (blkcg->cpd[pol->plid]) {
  1339. pol->cpd_free_fn(blkcg->cpd[pol->plid]);
  1340. blkcg->cpd[pol->plid] = NULL;
  1341. }
  1342. }
  1343. }
  1344. blkcg_policy[pol->plid] = NULL;
  1345. err_unlock:
  1346. mutex_unlock(&blkcg_pol_mutex);
  1347. mutex_unlock(&blkcg_pol_register_mutex);
  1348. return ret;
  1349. }
  1350. EXPORT_SYMBOL_GPL(blkcg_policy_register);
  1351. /**
  1352. * blkcg_policy_unregister - unregister a blkcg policy
  1353. * @pol: blkcg policy to unregister
  1354. *
  1355. * Undo blkcg_policy_register(@pol). Might sleep.
  1356. */
  1357. void blkcg_policy_unregister(struct blkcg_policy *pol)
  1358. {
  1359. struct blkcg *blkcg;
  1360. mutex_lock(&blkcg_pol_register_mutex);
  1361. if (WARN_ON(blkcg_policy[pol->plid] != pol))
  1362. goto out_unlock;
  1363. /* kill the intf files first */
  1364. if (pol->dfl_cftypes)
  1365. cgroup_rm_cftypes(pol->dfl_cftypes);
  1366. if (pol->legacy_cftypes)
  1367. cgroup_rm_cftypes(pol->legacy_cftypes);
  1368. /* remove cpds and unregister */
  1369. mutex_lock(&blkcg_pol_mutex);
  1370. if (pol->cpd_free_fn) {
  1371. list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
  1372. if (blkcg->cpd[pol->plid]) {
  1373. pol->cpd_free_fn(blkcg->cpd[pol->plid]);
  1374. blkcg->cpd[pol->plid] = NULL;
  1375. }
  1376. }
  1377. }
  1378. blkcg_policy[pol->plid] = NULL;
  1379. mutex_unlock(&blkcg_pol_mutex);
  1380. out_unlock:
  1381. mutex_unlock(&blkcg_pol_register_mutex);
  1382. }
  1383. EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
  1384. /*
  1385. * Scale the accumulated delay based on how long it has been since we updated
  1386. * the delay. We only call this when we are adding delay, in case it's been a
  1387. * while since we added delay, and when we are checking to see if we need to
  1388. * delay a task, to account for any delays that may have occurred.
  1389. */
  1390. static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
  1391. {
  1392. u64 old = atomic64_read(&blkg->delay_start);
  1393. /*
  1394. * We only want to scale down every second. The idea here is that we
  1395. * want to delay people for min(delay_nsec, NSEC_PER_SEC) in a certain
  1396. * time window. We only want to throttle tasks for recent delay that
  1397. * has occurred, in 1 second time windows since that's the maximum
  1398. * things can be throttled. We save the current delay window in
  1399. * blkg->last_delay so we know what amount is still left to be charged
  1400. * to the blkg from this point onward. blkg->last_use keeps track of
  1401. * the use_delay counter. The idea is if we're unthrottling the blkg we
  1402. * are ok with whatever is happening now, and we can take away more of
  1403. * the accumulated delay as we've already throttled enough that
  1404. * everybody is happy with their IO latencies.
  1405. */
  1406. if (time_before64(old + NSEC_PER_SEC, now) &&
  1407. atomic64_cmpxchg(&blkg->delay_start, old, now) == old) {
  1408. u64 cur = atomic64_read(&blkg->delay_nsec);
  1409. u64 sub = min_t(u64, blkg->last_delay, now - old);
  1410. int cur_use = atomic_read(&blkg->use_delay);
  1411. /*
  1412. * We've been unthrottled, subtract a larger chunk of our
  1413. * accumulated delay.
  1414. */
  1415. if (cur_use < blkg->last_use)
  1416. sub = max_t(u64, sub, blkg->last_delay >> 1);
  1417. /*
  1418. * This shouldn't happen, but handle it anyway. Our delay_nsec
  1419. * should only ever be growing except here where we subtract out
  1420. * min(last_delay, 1 second), but lord knows bugs happen and I'd
  1421. * rather not end up with negative numbers.
  1422. */
  1423. if (unlikely(cur < sub)) {
  1424. atomic64_set(&blkg->delay_nsec, 0);
  1425. blkg->last_delay = 0;
  1426. } else {
  1427. atomic64_sub(sub, &blkg->delay_nsec);
  1428. blkg->last_delay = cur - sub;
  1429. }
  1430. blkg->last_use = cur_use;
  1431. }
  1432. }
  1433. /*
  1434. * This is called when we want to actually walk up the hierarchy and check to
  1435. * see if we need to throttle, and then actually throttle if there is some
  1436. * accumulated delay. This should only be called upon return to user space so
  1437. * we're not holding some lock that would induce a priority inversion.
  1438. */
  1439. static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
  1440. {
  1441. u64 now = ktime_to_ns(ktime_get());
  1442. u64 exp;
  1443. u64 delay_nsec = 0;
  1444. int tok;
  1445. while (blkg->parent) {
  1446. if (atomic_read(&blkg->use_delay)) {
  1447. blkcg_scale_delay(blkg, now);
  1448. delay_nsec = max_t(u64, delay_nsec,
  1449. atomic64_read(&blkg->delay_nsec));
  1450. }
  1451. blkg = blkg->parent;
  1452. }
  1453. if (!delay_nsec)
  1454. return;
  1455. /*
  1456. * Let's not sleep for all eternity if we've amassed a huge delay.
  1457. * Swapping or metadata IO can accumulate 10's of seconds worth of
  1458. * delay, and we want userspace to be able to do _something_ so cap the
  1459. * delays at 1 second. If there's 10's of seconds worth of delay then
  1460. * the tasks will be delayed for 1 second for every syscall.
  1461. */
  1462. delay_nsec = min_t(u64, delay_nsec, 250 * NSEC_PER_MSEC);
  1463. /*
  1464. * TODO: the use_memdelay flag is going to be for the upcoming psi stuff
  1465. * that hasn't landed upstream yet. Once that stuff is in place we need
  1466. * to do a psi_memstall_enter/leave if memdelay is set.
  1467. */
  1468. exp = ktime_add_ns(now, delay_nsec);
  1469. tok = io_schedule_prepare();
  1470. do {
  1471. __set_current_state(TASK_KILLABLE);
  1472. if (!schedule_hrtimeout(&exp, HRTIMER_MODE_ABS))
  1473. break;
  1474. } while (!fatal_signal_pending(current));
  1475. io_schedule_finish(tok);
  1476. }
  1477. /**
  1478. * blkcg_maybe_throttle_current - throttle the current task if it has been marked
  1479. *
  1480. * This is only called if we've been marked with set_notify_resume(). Obviously
  1481. * we can be set_notify_resume() for reasons other than blkcg throttling, so we
  1482. * check to see if current->throttle_queue is set and if not this doesn't do
  1483. * anything. This should only ever be called by the resume code, it's not meant
  1484. * to be called by people willy-nilly as it will actually do the work to
  1485. * throttle the task if it is setup for throttling.
  1486. */
  1487. void blkcg_maybe_throttle_current(void)
  1488. {
  1489. struct request_queue *q = current->throttle_queue;
  1490. struct cgroup_subsys_state *css;
  1491. struct blkcg *blkcg;
  1492. struct blkcg_gq *blkg;
  1493. bool use_memdelay = current->use_memdelay;
  1494. if (!q)
  1495. return;
  1496. current->throttle_queue = NULL;
  1497. current->use_memdelay = false;
  1498. rcu_read_lock();
  1499. css = kthread_blkcg();
  1500. if (css)
  1501. blkcg = css_to_blkcg(css);
  1502. else
  1503. blkcg = css_to_blkcg(task_css(current, io_cgrp_id));
  1504. if (!blkcg)
  1505. goto out;
  1506. blkg = blkg_lookup(blkcg, q);
  1507. if (!blkg)
  1508. goto out;
  1509. blkg = blkg_try_get(blkg);
  1510. if (!blkg)
  1511. goto out;
  1512. rcu_read_unlock();
  1513. blkcg_maybe_throttle_blkg(blkg, use_memdelay);
  1514. blkg_put(blkg);
  1515. blk_put_queue(q);
  1516. return;
  1517. out:
  1518. rcu_read_unlock();
  1519. blk_put_queue(q);
  1520. }
  1521. EXPORT_SYMBOL_GPL(blkcg_maybe_throttle_current);
  1522. /**
  1523. * blkcg_schedule_throttle - this task needs to check for throttling
  1524. * @q - the request queue IO was submitted on
  1525. * @use_memdelay - do we charge this to memory delay for PSI
  1526. *
  1527. * This is called by the IO controller when we know there's delay accumulated
  1528. * for the blkg for this task. We do not pass the blkg because there are places
  1529. * we call this that may not have that information, the swapping code for
  1530. * instance will only have a request_queue at that point. This set's the
  1531. * notify_resume for the task to check and see if it requires throttling before
  1532. * returning to user space.
  1533. *
  1534. * We will only schedule once per syscall. You can call this over and over
  1535. * again and it will only do the check once upon return to user space, and only
  1536. * throttle once. If the task needs to be throttled again it'll need to be
  1537. * re-set at the next time we see the task.
  1538. */
  1539. void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
  1540. {
  1541. if (unlikely(current->flags & PF_KTHREAD))
  1542. return;
  1543. if (!blk_get_queue(q))
  1544. return;
  1545. if (current->throttle_queue)
  1546. blk_put_queue(current->throttle_queue);
  1547. current->throttle_queue = q;
  1548. if (use_memdelay)
  1549. current->use_memdelay = use_memdelay;
  1550. set_notify_resume(current);
  1551. }
  1552. EXPORT_SYMBOL_GPL(blkcg_schedule_throttle);
  1553. /**
  1554. * blkcg_add_delay - add delay to this blkg
  1555. * @now - the current time in nanoseconds
  1556. * @delta - how many nanoseconds of delay to add
  1557. *
  1558. * Charge @delta to the blkg's current delay accumulation. This is used to
  1559. * throttle tasks if an IO controller thinks we need more throttling.
  1560. */
  1561. void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
  1562. {
  1563. blkcg_scale_delay(blkg, now);
  1564. atomic64_add(delta, &blkg->delay_nsec);
  1565. }
  1566. EXPORT_SYMBOL_GPL(blkcg_add_delay);
  1567. module_param(blkcg_debug_stats, bool, 0644);
  1568. MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");