sched.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557
  1. /*
  2. * This file is part of the Chelsio T4 Ethernet driver for Linux.
  3. *
  4. * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/module.h>
  35. #include <linux/netdevice.h>
  36. #include "cxgb4.h"
  37. #include "sched.h"
  38. /* Spinlock must be held by caller */
  39. static int t4_sched_class_fw_cmd(struct port_info *pi,
  40. struct ch_sched_params *p,
  41. enum sched_fw_ops op)
  42. {
  43. struct adapter *adap = pi->adapter;
  44. struct sched_table *s = pi->sched_tbl;
  45. struct sched_class *e;
  46. int err = 0;
  47. e = &s->tab[p->u.params.class];
  48. switch (op) {
  49. case SCHED_FW_OP_ADD:
  50. err = t4_sched_params(adap, p->type,
  51. p->u.params.level, p->u.params.mode,
  52. p->u.params.rateunit,
  53. p->u.params.ratemode,
  54. p->u.params.channel, e->idx,
  55. p->u.params.minrate, p->u.params.maxrate,
  56. p->u.params.weight, p->u.params.pktsize);
  57. break;
  58. default:
  59. err = -ENOTSUPP;
  60. break;
  61. }
  62. return err;
  63. }
  64. /* Spinlock must be held by caller */
  65. static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
  66. enum sched_bind_type type, bool bind)
  67. {
  68. struct adapter *adap = pi->adapter;
  69. u32 fw_mnem, fw_class, fw_param;
  70. unsigned int pf = adap->pf;
  71. unsigned int vf = 0;
  72. int err = 0;
  73. switch (type) {
  74. case SCHED_QUEUE: {
  75. struct sched_queue_entry *qe;
  76. qe = (struct sched_queue_entry *)arg;
  77. /* Create a template for the FW_PARAMS_CMD mnemonic and
  78. * value (TX Scheduling Class in this case).
  79. */
  80. fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  81. FW_PARAMS_PARAM_X_V(
  82. FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
  83. fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE;
  84. fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id));
  85. pf = adap->pf;
  86. vf = 0;
  87. break;
  88. }
  89. default:
  90. err = -ENOTSUPP;
  91. goto out;
  92. }
  93. err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class);
  94. out:
  95. return err;
  96. }
  97. static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
  98. const unsigned int qid,
  99. int *index)
  100. {
  101. struct sched_table *s = pi->sched_tbl;
  102. struct sched_class *e, *end;
  103. struct sched_class *found = NULL;
  104. int i;
  105. /* Look for a class with matching bound queue parameters */
  106. end = &s->tab[s->sched_size];
  107. for (e = &s->tab[0]; e != end; ++e) {
  108. struct sched_queue_entry *qe;
  109. i = 0;
  110. if (e->state == SCHED_STATE_UNUSED)
  111. continue;
  112. list_for_each_entry(qe, &e->queue_list, list) {
  113. if (qe->cntxt_id == qid) {
  114. found = e;
  115. if (index)
  116. *index = i;
  117. break;
  118. }
  119. i++;
  120. }
  121. if (found)
  122. break;
  123. }
  124. return found;
  125. }
  126. static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
  127. {
  128. struct adapter *adap = pi->adapter;
  129. struct sched_class *e;
  130. struct sched_queue_entry *qe = NULL;
  131. struct sge_eth_txq *txq;
  132. unsigned int qid;
  133. int index = -1;
  134. int err = 0;
  135. if (p->queue < 0 || p->queue >= pi->nqsets)
  136. return -ERANGE;
  137. txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
  138. qid = txq->q.cntxt_id;
  139. /* Find the existing class that the queue is bound to */
  140. e = t4_sched_queue_lookup(pi, qid, &index);
  141. if (e && index >= 0) {
  142. int i = 0;
  143. spin_lock(&e->lock);
  144. list_for_each_entry(qe, &e->queue_list, list) {
  145. if (i == index)
  146. break;
  147. i++;
  148. }
  149. err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
  150. false);
  151. if (err) {
  152. spin_unlock(&e->lock);
  153. goto out;
  154. }
  155. list_del(&qe->list);
  156. kvfree(qe);
  157. if (atomic_dec_and_test(&e->refcnt)) {
  158. e->state = SCHED_STATE_UNUSED;
  159. memset(&e->info, 0, sizeof(e->info));
  160. }
  161. spin_unlock(&e->lock);
  162. }
  163. out:
  164. return err;
  165. }
  166. static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
  167. {
  168. struct adapter *adap = pi->adapter;
  169. struct sched_table *s = pi->sched_tbl;
  170. struct sched_class *e;
  171. struct sched_queue_entry *qe = NULL;
  172. struct sge_eth_txq *txq;
  173. unsigned int qid;
  174. int err = 0;
  175. if (p->queue < 0 || p->queue >= pi->nqsets)
  176. return -ERANGE;
  177. qe = kvzalloc(sizeof(struct sched_queue_entry), GFP_KERNEL);
  178. if (!qe)
  179. return -ENOMEM;
  180. txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
  181. qid = txq->q.cntxt_id;
  182. /* Unbind queue from any existing class */
  183. err = t4_sched_queue_unbind(pi, p);
  184. if (err) {
  185. kvfree(qe);
  186. goto out;
  187. }
  188. /* Bind queue to specified class */
  189. memset(qe, 0, sizeof(*qe));
  190. qe->cntxt_id = qid;
  191. memcpy(&qe->param, p, sizeof(qe->param));
  192. e = &s->tab[qe->param.class];
  193. spin_lock(&e->lock);
  194. err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
  195. if (err) {
  196. kvfree(qe);
  197. spin_unlock(&e->lock);
  198. goto out;
  199. }
  200. list_add_tail(&qe->list, &e->queue_list);
  201. atomic_inc(&e->refcnt);
  202. spin_unlock(&e->lock);
  203. out:
  204. return err;
  205. }
  206. static void t4_sched_class_unbind_all(struct port_info *pi,
  207. struct sched_class *e,
  208. enum sched_bind_type type)
  209. {
  210. if (!e)
  211. return;
  212. switch (type) {
  213. case SCHED_QUEUE: {
  214. struct sched_queue_entry *qe;
  215. list_for_each_entry(qe, &e->queue_list, list)
  216. t4_sched_queue_unbind(pi, &qe->param);
  217. break;
  218. }
  219. default:
  220. break;
  221. }
  222. }
  223. static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
  224. enum sched_bind_type type, bool bind)
  225. {
  226. int err = 0;
  227. if (!arg)
  228. return -EINVAL;
  229. switch (type) {
  230. case SCHED_QUEUE: {
  231. struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
  232. if (bind)
  233. err = t4_sched_queue_bind(pi, qe);
  234. else
  235. err = t4_sched_queue_unbind(pi, qe);
  236. break;
  237. }
  238. default:
  239. err = -ENOTSUPP;
  240. break;
  241. }
  242. return err;
  243. }
  244. /**
  245. * cxgb4_sched_class_bind - Bind an entity to a scheduling class
  246. * @dev: net_device pointer
  247. * @arg: Entity opaque data
  248. * @type: Entity type (Queue)
  249. *
  250. * Binds an entity (queue) to a scheduling class. If the entity
  251. * is bound to another class, it will be unbound from the other class
  252. * and bound to the class specified in @arg.
  253. */
  254. int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
  255. enum sched_bind_type type)
  256. {
  257. struct port_info *pi = netdev2pinfo(dev);
  258. struct sched_table *s;
  259. int err = 0;
  260. u8 class_id;
  261. if (!can_sched(dev))
  262. return -ENOTSUPP;
  263. if (!arg)
  264. return -EINVAL;
  265. switch (type) {
  266. case SCHED_QUEUE: {
  267. struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
  268. class_id = qe->class;
  269. break;
  270. }
  271. default:
  272. return -ENOTSUPP;
  273. }
  274. if (!valid_class_id(dev, class_id))
  275. return -EINVAL;
  276. if (class_id == SCHED_CLS_NONE)
  277. return -ENOTSUPP;
  278. s = pi->sched_tbl;
  279. write_lock(&s->rw_lock);
  280. err = t4_sched_class_bind_unbind_op(pi, arg, type, true);
  281. write_unlock(&s->rw_lock);
  282. return err;
  283. }
  284. /**
  285. * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class
  286. * @dev: net_device pointer
  287. * @arg: Entity opaque data
  288. * @type: Entity type (Queue)
  289. *
  290. * Unbinds an entity (queue) from a scheduling class.
  291. */
  292. int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
  293. enum sched_bind_type type)
  294. {
  295. struct port_info *pi = netdev2pinfo(dev);
  296. struct sched_table *s;
  297. int err = 0;
  298. u8 class_id;
  299. if (!can_sched(dev))
  300. return -ENOTSUPP;
  301. if (!arg)
  302. return -EINVAL;
  303. switch (type) {
  304. case SCHED_QUEUE: {
  305. struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
  306. class_id = qe->class;
  307. break;
  308. }
  309. default:
  310. return -ENOTSUPP;
  311. }
  312. if (!valid_class_id(dev, class_id))
  313. return -EINVAL;
  314. s = pi->sched_tbl;
  315. write_lock(&s->rw_lock);
  316. err = t4_sched_class_bind_unbind_op(pi, arg, type, false);
  317. write_unlock(&s->rw_lock);
  318. return err;
  319. }
  320. /* If @p is NULL, fetch any available unused class */
  321. static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
  322. const struct ch_sched_params *p)
  323. {
  324. struct sched_table *s = pi->sched_tbl;
  325. struct sched_class *e, *end;
  326. struct sched_class *found = NULL;
  327. if (!p) {
  328. /* Get any available unused class */
  329. end = &s->tab[s->sched_size];
  330. for (e = &s->tab[0]; e != end; ++e) {
  331. if (e->state == SCHED_STATE_UNUSED) {
  332. found = e;
  333. break;
  334. }
  335. }
  336. } else {
  337. /* Look for a class with matching scheduling parameters */
  338. struct ch_sched_params info;
  339. struct ch_sched_params tp;
  340. memcpy(&tp, p, sizeof(tp));
  341. /* Don't try to match class parameter */
  342. tp.u.params.class = SCHED_CLS_NONE;
  343. end = &s->tab[s->sched_size];
  344. for (e = &s->tab[0]; e != end; ++e) {
  345. if (e->state == SCHED_STATE_UNUSED)
  346. continue;
  347. memcpy(&info, &e->info, sizeof(info));
  348. /* Don't try to match class parameter */
  349. info.u.params.class = SCHED_CLS_NONE;
  350. if ((info.type == tp.type) &&
  351. (!memcmp(&info.u.params, &tp.u.params,
  352. sizeof(info.u.params)))) {
  353. found = e;
  354. break;
  355. }
  356. }
  357. }
  358. return found;
  359. }
  360. static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
  361. struct ch_sched_params *p)
  362. {
  363. struct sched_table *s = pi->sched_tbl;
  364. struct sched_class *e;
  365. u8 class_id;
  366. int err;
  367. if (!p)
  368. return NULL;
  369. class_id = p->u.params.class;
  370. /* Only accept search for existing class with matching params
  371. * or allocation of new class with specified params
  372. */
  373. if (class_id != SCHED_CLS_NONE)
  374. return NULL;
  375. write_lock(&s->rw_lock);
  376. /* See if there's an exisiting class with same
  377. * requested sched params
  378. */
  379. e = t4_sched_class_lookup(pi, p);
  380. if (!e) {
  381. struct ch_sched_params np;
  382. /* Fetch any available unused class */
  383. e = t4_sched_class_lookup(pi, NULL);
  384. if (!e)
  385. goto out;
  386. memcpy(&np, p, sizeof(np));
  387. np.u.params.class = e->idx;
  388. spin_lock(&e->lock);
  389. /* New class */
  390. err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
  391. if (err) {
  392. spin_unlock(&e->lock);
  393. e = NULL;
  394. goto out;
  395. }
  396. memcpy(&e->info, &np, sizeof(e->info));
  397. atomic_set(&e->refcnt, 0);
  398. e->state = SCHED_STATE_ACTIVE;
  399. spin_unlock(&e->lock);
  400. }
  401. out:
  402. write_unlock(&s->rw_lock);
  403. return e;
  404. }
  405. /**
  406. * cxgb4_sched_class_alloc - allocate a scheduling class
  407. * @dev: net_device pointer
  408. * @p: new scheduling class to create.
  409. *
  410. * Returns pointer to the scheduling class created. If @p is NULL, then
  411. * it allocates and returns any available unused scheduling class. If a
  412. * scheduling class with matching @p is found, then the matching class is
  413. * returned.
  414. */
  415. struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
  416. struct ch_sched_params *p)
  417. {
  418. struct port_info *pi = netdev2pinfo(dev);
  419. u8 class_id;
  420. if (!can_sched(dev))
  421. return NULL;
  422. class_id = p->u.params.class;
  423. if (!valid_class_id(dev, class_id))
  424. return NULL;
  425. return t4_sched_class_alloc(pi, p);
  426. }
  427. static void t4_sched_class_free(struct port_info *pi, struct sched_class *e)
  428. {
  429. t4_sched_class_unbind_all(pi, e, SCHED_QUEUE);
  430. }
  431. struct sched_table *t4_init_sched(unsigned int sched_size)
  432. {
  433. struct sched_table *s;
  434. unsigned int i;
  435. s = kvzalloc(sizeof(*s) + sched_size * sizeof(struct sched_class), GFP_KERNEL);
  436. if (!s)
  437. return NULL;
  438. s->sched_size = sched_size;
  439. rwlock_init(&s->rw_lock);
  440. for (i = 0; i < s->sched_size; i++) {
  441. memset(&s->tab[i], 0, sizeof(struct sched_class));
  442. s->tab[i].idx = i;
  443. s->tab[i].state = SCHED_STATE_UNUSED;
  444. INIT_LIST_HEAD(&s->tab[i].queue_list);
  445. spin_lock_init(&s->tab[i].lock);
  446. atomic_set(&s->tab[i].refcnt, 0);
  447. }
  448. return s;
  449. }
  450. void t4_cleanup_sched(struct adapter *adap)
  451. {
  452. struct sched_table *s;
  453. unsigned int j, i;
  454. for_each_port(adap, j) {
  455. struct port_info *pi = netdev2pinfo(adap->port[j]);
  456. s = pi->sched_tbl;
  457. if (!s)
  458. continue;
  459. for (i = 0; i < s->sched_size; i++) {
  460. struct sched_class *e;
  461. write_lock(&s->rw_lock);
  462. e = &s->tab[i];
  463. if (e->state == SCHED_STATE_ACTIVE)
  464. t4_sched_class_free(pi, e);
  465. write_unlock(&s->rw_lock);
  466. }
  467. kvfree(s);
  468. }
  469. }