bfq-cgroup.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235
  1. /*
  2. * cgroups support for the BFQ I/O scheduler.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License as
  6. * published by the Free Software Foundation; either version 2 of the
  7. * License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include <linux/blkdev.h>
  17. #include <linux/cgroup.h>
  18. #include <linux/elevator.h>
  19. #include <linux/ktime.h>
  20. #include <linux/rbtree.h>
  21. #include <linux/ioprio.h>
  22. #include <linux/sbitmap.h>
  23. #include <linux/delay.h>
  24. #include "bfq-iosched.h"
  25. #if defined(CONFIG_BFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
  26. /* bfqg stats flags */
  27. enum bfqg_stats_flags {
  28. BFQG_stats_waiting = 0,
  29. BFQG_stats_idling,
  30. BFQG_stats_empty,
  31. };
  32. #define BFQG_FLAG_FNS(name) \
  33. static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
  34. { \
  35. stats->flags |= (1 << BFQG_stats_##name); \
  36. } \
  37. static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
  38. { \
  39. stats->flags &= ~(1 << BFQG_stats_##name); \
  40. } \
  41. static int bfqg_stats_##name(struct bfqg_stats *stats) \
  42. { \
  43. return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
  44. } \
  45. BFQG_FLAG_FNS(waiting)
  46. BFQG_FLAG_FNS(idling)
  47. BFQG_FLAG_FNS(empty)
  48. #undef BFQG_FLAG_FNS
  49. /* This should be called with the scheduler lock held. */
  50. static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
  51. {
  52. u64 now;
  53. if (!bfqg_stats_waiting(stats))
  54. return;
  55. now = ktime_get_ns();
  56. if (now > stats->start_group_wait_time)
  57. blkg_stat_add(&stats->group_wait_time,
  58. now - stats->start_group_wait_time);
  59. bfqg_stats_clear_waiting(stats);
  60. }
  61. /* This should be called with the scheduler lock held. */
  62. static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
  63. struct bfq_group *curr_bfqg)
  64. {
  65. struct bfqg_stats *stats = &bfqg->stats;
  66. if (bfqg_stats_waiting(stats))
  67. return;
  68. if (bfqg == curr_bfqg)
  69. return;
  70. stats->start_group_wait_time = ktime_get_ns();
  71. bfqg_stats_mark_waiting(stats);
  72. }
  73. /* This should be called with the scheduler lock held. */
  74. static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
  75. {
  76. u64 now;
  77. if (!bfqg_stats_empty(stats))
  78. return;
  79. now = ktime_get_ns();
  80. if (now > stats->start_empty_time)
  81. blkg_stat_add(&stats->empty_time,
  82. now - stats->start_empty_time);
  83. bfqg_stats_clear_empty(stats);
  84. }
  85. void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
  86. {
  87. blkg_stat_add(&bfqg->stats.dequeue, 1);
  88. }
  89. void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
  90. {
  91. struct bfqg_stats *stats = &bfqg->stats;
  92. if (blkg_rwstat_total(&stats->queued))
  93. return;
  94. /*
  95. * group is already marked empty. This can happen if bfqq got new
  96. * request in parent group and moved to this group while being added
  97. * to service tree. Just ignore the event and move on.
  98. */
  99. if (bfqg_stats_empty(stats))
  100. return;
  101. stats->start_empty_time = ktime_get_ns();
  102. bfqg_stats_mark_empty(stats);
  103. }
  104. void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
  105. {
  106. struct bfqg_stats *stats = &bfqg->stats;
  107. if (bfqg_stats_idling(stats)) {
  108. u64 now = ktime_get_ns();
  109. if (now > stats->start_idle_time)
  110. blkg_stat_add(&stats->idle_time,
  111. now - stats->start_idle_time);
  112. bfqg_stats_clear_idling(stats);
  113. }
  114. }
  115. void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
  116. {
  117. struct bfqg_stats *stats = &bfqg->stats;
  118. stats->start_idle_time = ktime_get_ns();
  119. bfqg_stats_mark_idling(stats);
  120. }
  121. void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
  122. {
  123. struct bfqg_stats *stats = &bfqg->stats;
  124. blkg_stat_add(&stats->avg_queue_size_sum,
  125. blkg_rwstat_total(&stats->queued));
  126. blkg_stat_add(&stats->avg_queue_size_samples, 1);
  127. bfqg_stats_update_group_wait_time(stats);
  128. }
  129. void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
  130. unsigned int op)
  131. {
  132. blkg_rwstat_add(&bfqg->stats.queued, op, 1);
  133. bfqg_stats_end_empty_time(&bfqg->stats);
  134. if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue))
  135. bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
  136. }
  137. void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op)
  138. {
  139. blkg_rwstat_add(&bfqg->stats.queued, op, -1);
  140. }
  141. void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op)
  142. {
  143. blkg_rwstat_add(&bfqg->stats.merged, op, 1);
  144. }
  145. void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
  146. u64 io_start_time_ns, unsigned int op)
  147. {
  148. struct bfqg_stats *stats = &bfqg->stats;
  149. u64 now = ktime_get_ns();
  150. if (now > io_start_time_ns)
  151. blkg_rwstat_add(&stats->service_time, op,
  152. now - io_start_time_ns);
  153. if (io_start_time_ns > start_time_ns)
  154. blkg_rwstat_add(&stats->wait_time, op,
  155. io_start_time_ns - start_time_ns);
  156. }
  157. #else /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
  158. void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
  159. unsigned int op) { }
  160. void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { }
  161. void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { }
  162. void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
  163. u64 io_start_time_ns, unsigned int op) { }
  164. void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
  165. void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
  166. void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
  167. void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
  168. void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
  169. #endif /* CONFIG_BFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
  170. #ifdef CONFIG_BFQ_GROUP_IOSCHED
  171. /*
  172. * blk-cgroup policy-related handlers
  173. * The following functions help in converting between blk-cgroup
  174. * internal structures and BFQ-specific structures.
  175. */
  176. static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
  177. {
  178. return pd ? container_of(pd, struct bfq_group, pd) : NULL;
  179. }
  180. struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
  181. {
  182. return pd_to_blkg(&bfqg->pd);
  183. }
  184. static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
  185. {
  186. return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
  187. }
  188. /*
  189. * bfq_group handlers
  190. * The following functions help in navigating the bfq_group hierarchy
  191. * by allowing to find the parent of a bfq_group or the bfq_group
  192. * associated to a bfq_queue.
  193. */
  194. static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
  195. {
  196. struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
  197. return pblkg ? blkg_to_bfqg(pblkg) : NULL;
  198. }
  199. struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
  200. {
  201. struct bfq_entity *group_entity = bfqq->entity.parent;
  202. return group_entity ? container_of(group_entity, struct bfq_group,
  203. entity) :
  204. bfqq->bfqd->root_group;
  205. }
  206. /*
  207. * The following two functions handle get and put of a bfq_group by
  208. * wrapping the related blk-cgroup hooks.
  209. */
  210. static void bfqg_get(struct bfq_group *bfqg)
  211. {
  212. bfqg->ref++;
  213. }
  214. static void bfqg_put(struct bfq_group *bfqg)
  215. {
  216. bfqg->ref--;
  217. if (bfqg->ref == 0)
  218. kfree(bfqg);
  219. }
  220. static void bfqg_and_blkg_get(struct bfq_group *bfqg)
  221. {
  222. /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
  223. bfqg_get(bfqg);
  224. blkg_get(bfqg_to_blkg(bfqg));
  225. }
  226. void bfqg_and_blkg_put(struct bfq_group *bfqg)
  227. {
  228. blkg_put(bfqg_to_blkg(bfqg));
  229. bfqg_put(bfqg);
  230. }
  231. /* @stats = 0 */
  232. static void bfqg_stats_reset(struct bfqg_stats *stats)
  233. {
  234. #ifdef CONFIG_DEBUG_BLK_CGROUP
  235. /* queued stats shouldn't be cleared */
  236. blkg_rwstat_reset(&stats->merged);
  237. blkg_rwstat_reset(&stats->service_time);
  238. blkg_rwstat_reset(&stats->wait_time);
  239. blkg_stat_reset(&stats->time);
  240. blkg_stat_reset(&stats->avg_queue_size_sum);
  241. blkg_stat_reset(&stats->avg_queue_size_samples);
  242. blkg_stat_reset(&stats->dequeue);
  243. blkg_stat_reset(&stats->group_wait_time);
  244. blkg_stat_reset(&stats->idle_time);
  245. blkg_stat_reset(&stats->empty_time);
  246. #endif
  247. }
  248. /* @to += @from */
  249. static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
  250. {
  251. if (!to || !from)
  252. return;
  253. #ifdef CONFIG_DEBUG_BLK_CGROUP
  254. /* queued stats shouldn't be cleared */
  255. blkg_rwstat_add_aux(&to->merged, &from->merged);
  256. blkg_rwstat_add_aux(&to->service_time, &from->service_time);
  257. blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
  258. blkg_stat_add_aux(&from->time, &from->time);
  259. blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
  260. blkg_stat_add_aux(&to->avg_queue_size_samples,
  261. &from->avg_queue_size_samples);
  262. blkg_stat_add_aux(&to->dequeue, &from->dequeue);
  263. blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
  264. blkg_stat_add_aux(&to->idle_time, &from->idle_time);
  265. blkg_stat_add_aux(&to->empty_time, &from->empty_time);
  266. #endif
  267. }
  268. /*
  269. * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
  270. * recursive stats can still account for the amount used by this bfqg after
  271. * it's gone.
  272. */
  273. static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
  274. {
  275. struct bfq_group *parent;
  276. if (!bfqg) /* root_group */
  277. return;
  278. parent = bfqg_parent(bfqg);
  279. lockdep_assert_held(bfqg_to_blkg(bfqg)->q->queue_lock);
  280. if (unlikely(!parent))
  281. return;
  282. bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
  283. bfqg_stats_reset(&bfqg->stats);
  284. }
  285. void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
  286. {
  287. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  288. entity->weight = entity->new_weight;
  289. entity->orig_weight = entity->new_weight;
  290. if (bfqq) {
  291. bfqq->ioprio = bfqq->new_ioprio;
  292. bfqq->ioprio_class = bfqq->new_ioprio_class;
  293. /*
  294. * Make sure that bfqg and its associated blkg do not
  295. * disappear before entity.
  296. */
  297. bfqg_and_blkg_get(bfqg);
  298. }
  299. entity->parent = bfqg->my_entity; /* NULL for root group */
  300. entity->sched_data = &bfqg->sched_data;
  301. }
  302. static void bfqg_stats_exit(struct bfqg_stats *stats)
  303. {
  304. #ifdef CONFIG_DEBUG_BLK_CGROUP
  305. blkg_rwstat_exit(&stats->merged);
  306. blkg_rwstat_exit(&stats->service_time);
  307. blkg_rwstat_exit(&stats->wait_time);
  308. blkg_rwstat_exit(&stats->queued);
  309. blkg_stat_exit(&stats->time);
  310. blkg_stat_exit(&stats->avg_queue_size_sum);
  311. blkg_stat_exit(&stats->avg_queue_size_samples);
  312. blkg_stat_exit(&stats->dequeue);
  313. blkg_stat_exit(&stats->group_wait_time);
  314. blkg_stat_exit(&stats->idle_time);
  315. blkg_stat_exit(&stats->empty_time);
  316. #endif
  317. }
  318. static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
  319. {
  320. #ifdef CONFIG_DEBUG_BLK_CGROUP
  321. if (blkg_rwstat_init(&stats->merged, gfp) ||
  322. blkg_rwstat_init(&stats->service_time, gfp) ||
  323. blkg_rwstat_init(&stats->wait_time, gfp) ||
  324. blkg_rwstat_init(&stats->queued, gfp) ||
  325. blkg_stat_init(&stats->time, gfp) ||
  326. blkg_stat_init(&stats->avg_queue_size_sum, gfp) ||
  327. blkg_stat_init(&stats->avg_queue_size_samples, gfp) ||
  328. blkg_stat_init(&stats->dequeue, gfp) ||
  329. blkg_stat_init(&stats->group_wait_time, gfp) ||
  330. blkg_stat_init(&stats->idle_time, gfp) ||
  331. blkg_stat_init(&stats->empty_time, gfp)) {
  332. bfqg_stats_exit(stats);
  333. return -ENOMEM;
  334. }
  335. #endif
  336. return 0;
  337. }
  338. static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
  339. {
  340. return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
  341. }
  342. static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
  343. {
  344. return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
  345. }
  346. static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
  347. {
  348. struct bfq_group_data *bgd;
  349. bgd = kzalloc(sizeof(*bgd), gfp);
  350. if (!bgd)
  351. return NULL;
  352. return &bgd->pd;
  353. }
  354. static void bfq_cpd_init(struct blkcg_policy_data *cpd)
  355. {
  356. struct bfq_group_data *d = cpd_to_bfqgd(cpd);
  357. d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
  358. CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
  359. }
  360. static void bfq_cpd_free(struct blkcg_policy_data *cpd)
  361. {
  362. kfree(cpd_to_bfqgd(cpd));
  363. }
  364. static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node)
  365. {
  366. struct bfq_group *bfqg;
  367. bfqg = kzalloc_node(sizeof(*bfqg), gfp, node);
  368. if (!bfqg)
  369. return NULL;
  370. if (bfqg_stats_init(&bfqg->stats, gfp)) {
  371. kfree(bfqg);
  372. return NULL;
  373. }
  374. /* see comments in bfq_bic_update_cgroup for why refcounting */
  375. bfqg_get(bfqg);
  376. return &bfqg->pd;
  377. }
  378. static void bfq_pd_init(struct blkg_policy_data *pd)
  379. {
  380. struct blkcg_gq *blkg = pd_to_blkg(pd);
  381. struct bfq_group *bfqg = blkg_to_bfqg(blkg);
  382. struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
  383. struct bfq_entity *entity = &bfqg->entity;
  384. struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
  385. entity->orig_weight = entity->weight = entity->new_weight = d->weight;
  386. entity->my_sched_data = &bfqg->sched_data;
  387. bfqg->my_entity = entity; /*
  388. * the root_group's will be set to NULL
  389. * in bfq_init_queue()
  390. */
  391. bfqg->bfqd = bfqd;
  392. bfqg->active_entities = 0;
  393. bfqg->rq_pos_tree = RB_ROOT;
  394. }
  395. static void bfq_pd_free(struct blkg_policy_data *pd)
  396. {
  397. struct bfq_group *bfqg = pd_to_bfqg(pd);
  398. bfqg_stats_exit(&bfqg->stats);
  399. bfqg_put(bfqg);
  400. }
  401. static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
  402. {
  403. struct bfq_group *bfqg = pd_to_bfqg(pd);
  404. bfqg_stats_reset(&bfqg->stats);
  405. }
  406. static void bfq_group_set_parent(struct bfq_group *bfqg,
  407. struct bfq_group *parent)
  408. {
  409. struct bfq_entity *entity;
  410. entity = &bfqg->entity;
  411. entity->parent = parent->my_entity;
  412. entity->sched_data = &parent->sched_data;
  413. }
  414. static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
  415. struct blkcg *blkcg)
  416. {
  417. struct blkcg_gq *blkg;
  418. blkg = blkg_lookup(blkcg, bfqd->queue);
  419. if (likely(blkg))
  420. return blkg_to_bfqg(blkg);
  421. return NULL;
  422. }
  423. struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
  424. struct blkcg *blkcg)
  425. {
  426. struct bfq_group *bfqg, *parent;
  427. struct bfq_entity *entity;
  428. bfqg = bfq_lookup_bfqg(bfqd, blkcg);
  429. if (unlikely(!bfqg))
  430. return NULL;
  431. /*
  432. * Update chain of bfq_groups as we might be handling a leaf group
  433. * which, along with some of its relatives, has not been hooked yet
  434. * to the private hierarchy of BFQ.
  435. */
  436. entity = &bfqg->entity;
  437. for_each_entity(entity) {
  438. struct bfq_group *curr_bfqg = container_of(entity,
  439. struct bfq_group, entity);
  440. if (curr_bfqg != bfqd->root_group) {
  441. parent = bfqg_parent(curr_bfqg);
  442. if (!parent)
  443. parent = bfqd->root_group;
  444. bfq_group_set_parent(curr_bfqg, parent);
  445. }
  446. }
  447. return bfqg;
  448. }
  449. /**
  450. * bfq_bfqq_move - migrate @bfqq to @bfqg.
  451. * @bfqd: queue descriptor.
  452. * @bfqq: the queue to move.
  453. * @bfqg: the group to move to.
  454. *
  455. * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
  456. * it on the new one. Avoid putting the entity on the old group idle tree.
  457. *
  458. * Must be called under the scheduler lock, to make sure that the blkg
  459. * owning @bfqg does not disappear (see comments in
  460. * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
  461. * objects).
  462. */
  463. void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  464. struct bfq_group *bfqg)
  465. {
  466. struct bfq_entity *entity = &bfqq->entity;
  467. /* If bfqq is empty, then bfq_bfqq_expire also invokes
  468. * bfq_del_bfqq_busy, thereby removing bfqq and its entity
  469. * from data structures related to current group. Otherwise we
  470. * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
  471. * we do below.
  472. */
  473. if (bfqq == bfqd->in_service_queue)
  474. bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
  475. false, BFQQE_PREEMPTED);
  476. if (bfq_bfqq_busy(bfqq))
  477. bfq_deactivate_bfqq(bfqd, bfqq, false, false);
  478. else if (entity->on_st)
  479. bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
  480. bfqg_and_blkg_put(bfqq_group(bfqq));
  481. entity->parent = bfqg->my_entity;
  482. entity->sched_data = &bfqg->sched_data;
  483. /* pin down bfqg and its associated blkg */
  484. bfqg_and_blkg_get(bfqg);
  485. if (bfq_bfqq_busy(bfqq)) {
  486. bfq_pos_tree_add_move(bfqd, bfqq);
  487. bfq_activate_bfqq(bfqd, bfqq);
  488. }
  489. if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
  490. bfq_schedule_dispatch(bfqd);
  491. }
  492. /**
  493. * __bfq_bic_change_cgroup - move @bic to @cgroup.
  494. * @bfqd: the queue descriptor.
  495. * @bic: the bic to move.
  496. * @blkcg: the blk-cgroup to move to.
  497. *
  498. * Move bic to blkcg, assuming that bfqd->lock is held; which makes
  499. * sure that the reference to cgroup is valid across the call (see
  500. * comments in bfq_bic_update_cgroup on this issue)
  501. *
  502. * NOTE: an alternative approach might have been to store the current
  503. * cgroup in bfqq and getting a reference to it, reducing the lookup
  504. * time here, at the price of slightly more complex code.
  505. */
  506. static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
  507. struct bfq_io_cq *bic,
  508. struct blkcg *blkcg)
  509. {
  510. struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
  511. struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
  512. struct bfq_group *bfqg;
  513. struct bfq_entity *entity;
  514. bfqg = bfq_find_set_group(bfqd, blkcg);
  515. if (unlikely(!bfqg))
  516. bfqg = bfqd->root_group;
  517. if (async_bfqq) {
  518. entity = &async_bfqq->entity;
  519. if (entity->sched_data != &bfqg->sched_data) {
  520. bic_set_bfqq(bic, NULL, 0);
  521. bfq_log_bfqq(bfqd, async_bfqq,
  522. "bic_change_group: %p %d",
  523. async_bfqq, async_bfqq->ref);
  524. bfq_put_queue(async_bfqq);
  525. }
  526. }
  527. if (sync_bfqq) {
  528. entity = &sync_bfqq->entity;
  529. if (entity->sched_data != &bfqg->sched_data)
  530. bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
  531. }
  532. return bfqg;
  533. }
  534. void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
  535. {
  536. struct bfq_data *bfqd = bic_to_bfqd(bic);
  537. struct bfq_group *bfqg = NULL;
  538. uint64_t serial_nr;
  539. rcu_read_lock();
  540. serial_nr = bio_blkcg(bio)->css.serial_nr;
  541. /*
  542. * Check whether blkcg has changed. The condition may trigger
  543. * spuriously on a newly created cic but there's no harm.
  544. */
  545. if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
  546. goto out;
  547. bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
  548. /*
  549. * Update blkg_path for bfq_log_* functions. We cache this
  550. * path, and update it here, for the following
  551. * reasons. Operations on blkg objects in blk-cgroup are
  552. * protected with the request_queue lock, and not with the
  553. * lock that protects the instances of this scheduler
  554. * (bfqd->lock). This exposes BFQ to the following sort of
  555. * race.
  556. *
  557. * The blkg_lookup performed in bfq_get_queue, protected
  558. * through rcu, may happen to return the address of a copy of
  559. * the original blkg. If this is the case, then the
  560. * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
  561. * the blkg, is useless: it does not prevent blk-cgroup code
  562. * from destroying both the original blkg and all objects
  563. * directly or indirectly referred by the copy of the
  564. * blkg.
  565. *
  566. * On the bright side, destroy operations on a blkg invoke, as
  567. * a first step, hooks of the scheduler associated with the
  568. * blkg. And these hooks are executed with bfqd->lock held for
  569. * BFQ. As a consequence, for any blkg associated with the
  570. * request queue this instance of the scheduler is attached
  571. * to, we are guaranteed that such a blkg is not destroyed, and
  572. * that all the pointers it contains are consistent, while we
  573. * are holding bfqd->lock. A blkg_lookup performed with
  574. * bfqd->lock held then returns a fully consistent blkg, which
  575. * remains consistent until this lock is held.
  576. *
  577. * Thanks to the last fact, and to the fact that: (1) bfqg has
  578. * been obtained through a blkg_lookup in the above
  579. * assignment, and (2) bfqd->lock is being held, here we can
  580. * safely use the policy data for the involved blkg (i.e., the
  581. * field bfqg->pd) to get to the blkg associated with bfqg,
  582. * and then we can safely use any field of blkg. After we
  583. * release bfqd->lock, even just getting blkg through this
  584. * bfqg may cause dangling references to be traversed, as
  585. * bfqg->pd may not exist any more.
  586. *
  587. * In view of the above facts, here we cache, in the bfqg, any
  588. * blkg data we may need for this bic, and for its associated
  589. * bfq_queue. As of now, we need to cache only the path of the
  590. * blkg, which is used in the bfq_log_* functions.
  591. *
  592. * Finally, note that bfqg itself needs to be protected from
  593. * destruction on the blkg_free of the original blkg (which
  594. * invokes bfq_pd_free). We use an additional private
  595. * refcounter for bfqg, to let it disappear only after no
  596. * bfq_queue refers to it any longer.
  597. */
  598. blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
  599. bic->blkcg_serial_nr = serial_nr;
  600. out:
  601. rcu_read_unlock();
  602. }
  603. /**
  604. * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
  605. * @st: the service tree being flushed.
  606. */
  607. static void bfq_flush_idle_tree(struct bfq_service_tree *st)
  608. {
  609. struct bfq_entity *entity = st->first_idle;
  610. for (; entity ; entity = st->first_idle)
  611. __bfq_deactivate_entity(entity, false);
  612. }
  613. /**
  614. * bfq_reparent_leaf_entity - move leaf entity to the root_group.
  615. * @bfqd: the device data structure with the root group.
  616. * @entity: the entity to move.
  617. */
  618. static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
  619. struct bfq_entity *entity)
  620. {
  621. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  622. bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
  623. }
  624. /**
  625. * bfq_reparent_active_entities - move to the root group all active
  626. * entities.
  627. * @bfqd: the device data structure with the root group.
  628. * @bfqg: the group to move from.
  629. * @st: the service tree with the entities.
  630. */
  631. static void bfq_reparent_active_entities(struct bfq_data *bfqd,
  632. struct bfq_group *bfqg,
  633. struct bfq_service_tree *st)
  634. {
  635. struct rb_root *active = &st->active;
  636. struct bfq_entity *entity = NULL;
  637. if (!RB_EMPTY_ROOT(&st->active))
  638. entity = bfq_entity_of(rb_first(active));
  639. for (; entity ; entity = bfq_entity_of(rb_first(active)))
  640. bfq_reparent_leaf_entity(bfqd, entity);
  641. if (bfqg->sched_data.in_service_entity)
  642. bfq_reparent_leaf_entity(bfqd,
  643. bfqg->sched_data.in_service_entity);
  644. }
  645. /**
  646. * bfq_pd_offline - deactivate the entity associated with @pd,
  647. * and reparent its children entities.
  648. * @pd: descriptor of the policy going offline.
  649. *
  650. * blkio already grabs the queue_lock for us, so no need to use
  651. * RCU-based magic
  652. */
  653. static void bfq_pd_offline(struct blkg_policy_data *pd)
  654. {
  655. struct bfq_service_tree *st;
  656. struct bfq_group *bfqg = pd_to_bfqg(pd);
  657. struct bfq_data *bfqd = bfqg->bfqd;
  658. struct bfq_entity *entity = bfqg->my_entity;
  659. unsigned long flags;
  660. int i;
  661. spin_lock_irqsave(&bfqd->lock, flags);
  662. if (!entity) /* root group */
  663. goto put_async_queues;
  664. /*
  665. * Empty all service_trees belonging to this group before
  666. * deactivating the group itself.
  667. */
  668. for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
  669. st = bfqg->sched_data.service_tree + i;
  670. /*
  671. * The idle tree may still contain bfq_queues belonging
  672. * to exited task because they never migrated to a different
  673. * cgroup from the one being destroyed now.
  674. */
  675. bfq_flush_idle_tree(st);
  676. /*
  677. * It may happen that some queues are still active
  678. * (busy) upon group destruction (if the corresponding
  679. * processes have been forced to terminate). We move
  680. * all the leaf entities corresponding to these queues
  681. * to the root_group.
  682. * Also, it may happen that the group has an entity
  683. * in service, which is disconnected from the active
  684. * tree: it must be moved, too.
  685. * There is no need to put the sync queues, as the
  686. * scheduler has taken no reference.
  687. */
  688. bfq_reparent_active_entities(bfqd, bfqg, st);
  689. }
  690. __bfq_deactivate_entity(entity, false);
  691. put_async_queues:
  692. bfq_put_async_queues(bfqd, bfqg);
  693. spin_unlock_irqrestore(&bfqd->lock, flags);
  694. /*
  695. * @blkg is going offline and will be ignored by
  696. * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
  697. * that they don't get lost. If IOs complete after this point, the
  698. * stats for them will be lost. Oh well...
  699. */
  700. bfqg_stats_xfer_dead(bfqg);
  701. }
  702. void bfq_end_wr_async(struct bfq_data *bfqd)
  703. {
  704. struct blkcg_gq *blkg;
  705. list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
  706. struct bfq_group *bfqg = blkg_to_bfqg(blkg);
  707. bfq_end_wr_async_queues(bfqd, bfqg);
  708. }
  709. bfq_end_wr_async_queues(bfqd, bfqd->root_group);
  710. }
  711. static int bfq_io_show_weight(struct seq_file *sf, void *v)
  712. {
  713. struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
  714. struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
  715. unsigned int val = 0;
  716. if (bfqgd)
  717. val = bfqgd->weight;
  718. seq_printf(sf, "%u\n", val);
  719. return 0;
  720. }
  721. static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
  722. struct cftype *cftype,
  723. u64 val)
  724. {
  725. struct blkcg *blkcg = css_to_blkcg(css);
  726. struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
  727. struct blkcg_gq *blkg;
  728. int ret = -ERANGE;
  729. if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
  730. return ret;
  731. ret = 0;
  732. spin_lock_irq(&blkcg->lock);
  733. bfqgd->weight = (unsigned short)val;
  734. hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
  735. struct bfq_group *bfqg = blkg_to_bfqg(blkg);
  736. if (!bfqg)
  737. continue;
  738. /*
  739. * Setting the prio_changed flag of the entity
  740. * to 1 with new_weight == weight would re-set
  741. * the value of the weight to its ioprio mapping.
  742. * Set the flag only if necessary.
  743. */
  744. if ((unsigned short)val != bfqg->entity.new_weight) {
  745. bfqg->entity.new_weight = (unsigned short)val;
  746. /*
  747. * Make sure that the above new value has been
  748. * stored in bfqg->entity.new_weight before
  749. * setting the prio_changed flag. In fact,
  750. * this flag may be read asynchronously (in
  751. * critical sections protected by a different
  752. * lock than that held here), and finding this
  753. * flag set may cause the execution of the code
  754. * for updating parameters whose value may
  755. * depend also on bfqg->entity.new_weight (in
  756. * __bfq_entity_update_weight_prio).
  757. * This barrier makes sure that the new value
  758. * of bfqg->entity.new_weight is correctly
  759. * seen in that code.
  760. */
  761. smp_wmb();
  762. bfqg->entity.prio_changed = 1;
  763. }
  764. }
  765. spin_unlock_irq(&blkcg->lock);
  766. return ret;
  767. }
  768. static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
  769. char *buf, size_t nbytes,
  770. loff_t off)
  771. {
  772. u64 weight;
  773. /* First unsigned long found in the file is used */
  774. int ret = kstrtoull(strim(buf), 0, &weight);
  775. if (ret)
  776. return ret;
  777. ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight);
  778. return ret ?: nbytes;
  779. }
  780. #ifdef CONFIG_DEBUG_BLK_CGROUP
  781. static int bfqg_print_stat(struct seq_file *sf, void *v)
  782. {
  783. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
  784. &blkcg_policy_bfq, seq_cft(sf)->private, false);
  785. return 0;
  786. }
  787. static int bfqg_print_rwstat(struct seq_file *sf, void *v)
  788. {
  789. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
  790. &blkcg_policy_bfq, seq_cft(sf)->private, true);
  791. return 0;
  792. }
  793. static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
  794. struct blkg_policy_data *pd, int off)
  795. {
  796. u64 sum = blkg_stat_recursive_sum(pd_to_blkg(pd),
  797. &blkcg_policy_bfq, off);
  798. return __blkg_prfill_u64(sf, pd, sum);
  799. }
  800. static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
  801. struct blkg_policy_data *pd, int off)
  802. {
  803. struct blkg_rwstat sum = blkg_rwstat_recursive_sum(pd_to_blkg(pd),
  804. &blkcg_policy_bfq,
  805. off);
  806. return __blkg_prfill_rwstat(sf, pd, &sum);
  807. }
  808. static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
  809. {
  810. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  811. bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
  812. seq_cft(sf)->private, false);
  813. return 0;
  814. }
  815. static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
  816. {
  817. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  818. bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
  819. seq_cft(sf)->private, true);
  820. return 0;
  821. }
  822. static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
  823. int off)
  824. {
  825. u64 sum = blkg_rwstat_total(&pd->blkg->stat_bytes);
  826. return __blkg_prfill_u64(sf, pd, sum >> 9);
  827. }
  828. static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
  829. {
  830. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  831. bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
  832. return 0;
  833. }
  834. static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
  835. struct blkg_policy_data *pd, int off)
  836. {
  837. struct blkg_rwstat tmp = blkg_rwstat_recursive_sum(pd->blkg, NULL,
  838. offsetof(struct blkcg_gq, stat_bytes));
  839. u64 sum = atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
  840. atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
  841. return __blkg_prfill_u64(sf, pd, sum >> 9);
  842. }
  843. static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
  844. {
  845. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  846. bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
  847. false);
  848. return 0;
  849. }
  850. static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
  851. struct blkg_policy_data *pd, int off)
  852. {
  853. struct bfq_group *bfqg = pd_to_bfqg(pd);
  854. u64 samples = blkg_stat_read(&bfqg->stats.avg_queue_size_samples);
  855. u64 v = 0;
  856. if (samples) {
  857. v = blkg_stat_read(&bfqg->stats.avg_queue_size_sum);
  858. v = div64_u64(v, samples);
  859. }
  860. __blkg_prfill_u64(sf, pd, v);
  861. return 0;
  862. }
  863. /* print avg_queue_size */
  864. static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
  865. {
  866. blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
  867. bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
  868. 0, false);
  869. return 0;
  870. }
  871. #endif /* CONFIG_DEBUG_BLK_CGROUP */
  872. struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
  873. {
  874. int ret;
  875. ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
  876. if (ret)
  877. return NULL;
  878. return blkg_to_bfqg(bfqd->queue->root_blkg);
  879. }
  880. struct blkcg_policy blkcg_policy_bfq = {
  881. .dfl_cftypes = bfq_blkg_files,
  882. .legacy_cftypes = bfq_blkcg_legacy_files,
  883. .cpd_alloc_fn = bfq_cpd_alloc,
  884. .cpd_init_fn = bfq_cpd_init,
  885. .cpd_bind_fn = bfq_cpd_init,
  886. .cpd_free_fn = bfq_cpd_free,
  887. .pd_alloc_fn = bfq_pd_alloc,
  888. .pd_init_fn = bfq_pd_init,
  889. .pd_offline_fn = bfq_pd_offline,
  890. .pd_free_fn = bfq_pd_free,
  891. .pd_reset_stats_fn = bfq_pd_reset_stats,
  892. };
  893. struct cftype bfq_blkcg_legacy_files[] = {
  894. {
  895. .name = "bfq.weight",
  896. .flags = CFTYPE_NOT_ON_ROOT,
  897. .seq_show = bfq_io_show_weight,
  898. .write_u64 = bfq_io_set_weight_legacy,
  899. },
  900. /* statistics, covers only the tasks in the bfqg */
  901. {
  902. .name = "bfq.io_service_bytes",
  903. .private = (unsigned long)&blkcg_policy_bfq,
  904. .seq_show = blkg_print_stat_bytes,
  905. },
  906. {
  907. .name = "bfq.io_serviced",
  908. .private = (unsigned long)&blkcg_policy_bfq,
  909. .seq_show = blkg_print_stat_ios,
  910. },
  911. #ifdef CONFIG_DEBUG_BLK_CGROUP
  912. {
  913. .name = "bfq.time",
  914. .private = offsetof(struct bfq_group, stats.time),
  915. .seq_show = bfqg_print_stat,
  916. },
  917. {
  918. .name = "bfq.sectors",
  919. .seq_show = bfqg_print_stat_sectors,
  920. },
  921. {
  922. .name = "bfq.io_service_time",
  923. .private = offsetof(struct bfq_group, stats.service_time),
  924. .seq_show = bfqg_print_rwstat,
  925. },
  926. {
  927. .name = "bfq.io_wait_time",
  928. .private = offsetof(struct bfq_group, stats.wait_time),
  929. .seq_show = bfqg_print_rwstat,
  930. },
  931. {
  932. .name = "bfq.io_merged",
  933. .private = offsetof(struct bfq_group, stats.merged),
  934. .seq_show = bfqg_print_rwstat,
  935. },
  936. {
  937. .name = "bfq.io_queued",
  938. .private = offsetof(struct bfq_group, stats.queued),
  939. .seq_show = bfqg_print_rwstat,
  940. },
  941. #endif /* CONFIG_DEBUG_BLK_CGROUP */
  942. /* the same statictics which cover the bfqg and its descendants */
  943. {
  944. .name = "bfq.io_service_bytes_recursive",
  945. .private = (unsigned long)&blkcg_policy_bfq,
  946. .seq_show = blkg_print_stat_bytes_recursive,
  947. },
  948. {
  949. .name = "bfq.io_serviced_recursive",
  950. .private = (unsigned long)&blkcg_policy_bfq,
  951. .seq_show = blkg_print_stat_ios_recursive,
  952. },
  953. #ifdef CONFIG_DEBUG_BLK_CGROUP
  954. {
  955. .name = "bfq.time_recursive",
  956. .private = offsetof(struct bfq_group, stats.time),
  957. .seq_show = bfqg_print_stat_recursive,
  958. },
  959. {
  960. .name = "bfq.sectors_recursive",
  961. .seq_show = bfqg_print_stat_sectors_recursive,
  962. },
  963. {
  964. .name = "bfq.io_service_time_recursive",
  965. .private = offsetof(struct bfq_group, stats.service_time),
  966. .seq_show = bfqg_print_rwstat_recursive,
  967. },
  968. {
  969. .name = "bfq.io_wait_time_recursive",
  970. .private = offsetof(struct bfq_group, stats.wait_time),
  971. .seq_show = bfqg_print_rwstat_recursive,
  972. },
  973. {
  974. .name = "bfq.io_merged_recursive",
  975. .private = offsetof(struct bfq_group, stats.merged),
  976. .seq_show = bfqg_print_rwstat_recursive,
  977. },
  978. {
  979. .name = "bfq.io_queued_recursive",
  980. .private = offsetof(struct bfq_group, stats.queued),
  981. .seq_show = bfqg_print_rwstat_recursive,
  982. },
  983. {
  984. .name = "bfq.avg_queue_size",
  985. .seq_show = bfqg_print_avg_queue_size,
  986. },
  987. {
  988. .name = "bfq.group_wait_time",
  989. .private = offsetof(struct bfq_group, stats.group_wait_time),
  990. .seq_show = bfqg_print_stat,
  991. },
  992. {
  993. .name = "bfq.idle_time",
  994. .private = offsetof(struct bfq_group, stats.idle_time),
  995. .seq_show = bfqg_print_stat,
  996. },
  997. {
  998. .name = "bfq.empty_time",
  999. .private = offsetof(struct bfq_group, stats.empty_time),
  1000. .seq_show = bfqg_print_stat,
  1001. },
  1002. {
  1003. .name = "bfq.dequeue",
  1004. .private = offsetof(struct bfq_group, stats.dequeue),
  1005. .seq_show = bfqg_print_stat,
  1006. },
  1007. #endif /* CONFIG_DEBUG_BLK_CGROUP */
  1008. { } /* terminate */
  1009. };
  1010. struct cftype bfq_blkg_files[] = {
  1011. {
  1012. .name = "bfq.weight",
  1013. .flags = CFTYPE_NOT_ON_ROOT,
  1014. .seq_show = bfq_io_show_weight,
  1015. .write = bfq_io_set_weight,
  1016. },
  1017. {} /* terminate */
  1018. };
  1019. #else /* CONFIG_BFQ_GROUP_IOSCHED */
  1020. void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  1021. struct bfq_group *bfqg) {}
  1022. void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
  1023. {
  1024. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  1025. entity->weight = entity->new_weight;
  1026. entity->orig_weight = entity->new_weight;
  1027. if (bfqq) {
  1028. bfqq->ioprio = bfqq->new_ioprio;
  1029. bfqq->ioprio_class = bfqq->new_ioprio_class;
  1030. }
  1031. entity->sched_data = &bfqg->sched_data;
  1032. }
  1033. void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
  1034. void bfq_end_wr_async(struct bfq_data *bfqd)
  1035. {
  1036. bfq_end_wr_async_queues(bfqd, bfqd->root_group);
  1037. }
  1038. struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
  1039. {
  1040. return bfqd->root_group;
  1041. }
  1042. struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
  1043. {
  1044. return bfqq->bfqd->root_group;
  1045. }
  1046. struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
  1047. {
  1048. struct bfq_group *bfqg;
  1049. int i;
  1050. bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
  1051. if (!bfqg)
  1052. return NULL;
  1053. for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
  1054. bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
  1055. return bfqg;
  1056. }
  1057. #endif /* CONFIG_BFQ_GROUP_IOSCHED */