mrp.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. /*
  2. * IEEE 802.1Q Multiple Registration Protocol (MRP)
  3. *
  4. * Copyright (c) 2012 Massachusetts Institute of Technology
  5. *
  6. * Adapted from code in net/802/garp.c
  7. * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * version 2 as published by the Free Software Foundation.
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/timer.h>
  15. #include <linux/skbuff.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/etherdevice.h>
  18. #include <linux/rtnetlink.h>
  19. #include <linux/slab.h>
  20. #include <linux/module.h>
  21. #include <net/mrp.h>
  22. #include <asm/unaligned.h>
  23. static unsigned int mrp_join_time __read_mostly = 200;
  24. module_param(mrp_join_time, uint, 0644);
  25. MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
  26. static unsigned int mrp_periodic_time __read_mostly = 1000;
  27. module_param(mrp_periodic_time, uint, 0644);
  28. MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
  29. MODULE_LICENSE("GPL");
  30. static const u8
  31. mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
  32. [MRP_APPLICANT_VO] = {
  33. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  34. [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
  35. [MRP_EVENT_LV] = MRP_APPLICANT_VO,
  36. [MRP_EVENT_TX] = MRP_APPLICANT_VO,
  37. [MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
  38. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
  39. [MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
  40. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
  41. [MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
  42. [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
  43. [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
  44. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
  45. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
  46. },
  47. [MRP_APPLICANT_VP] = {
  48. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  49. [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
  50. [MRP_EVENT_LV] = MRP_APPLICANT_VO,
  51. [MRP_EVENT_TX] = MRP_APPLICANT_AA,
  52. [MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
  53. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
  54. [MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
  55. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
  56. [MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
  57. [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
  58. [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
  59. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
  60. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
  61. },
  62. [MRP_APPLICANT_VN] = {
  63. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  64. [MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
  65. [MRP_EVENT_LV] = MRP_APPLICANT_LA,
  66. [MRP_EVENT_TX] = MRP_APPLICANT_AN,
  67. [MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
  68. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
  69. [MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
  70. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
  71. [MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
  72. [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
  73. [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
  74. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
  75. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
  76. },
  77. [MRP_APPLICANT_AN] = {
  78. [MRP_EVENT_NEW] = MRP_APPLICANT_AN,
  79. [MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
  80. [MRP_EVENT_LV] = MRP_APPLICANT_LA,
  81. [MRP_EVENT_TX] = MRP_APPLICANT_QA,
  82. [MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
  83. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
  84. [MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
  85. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
  86. [MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
  87. [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
  88. [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
  89. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
  90. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
  91. },
  92. [MRP_APPLICANT_AA] = {
  93. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  94. [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
  95. [MRP_EVENT_LV] = MRP_APPLICANT_LA,
  96. [MRP_EVENT_TX] = MRP_APPLICANT_QA,
  97. [MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
  98. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
  99. [MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
  100. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
  101. [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
  102. [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
  103. [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
  104. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
  105. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
  106. },
  107. [MRP_APPLICANT_QA] = {
  108. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  109. [MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
  110. [MRP_EVENT_LV] = MRP_APPLICANT_LA,
  111. [MRP_EVENT_TX] = MRP_APPLICANT_QA,
  112. [MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
  113. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
  114. [MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
  115. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
  116. [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
  117. [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
  118. [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
  119. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
  120. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
  121. },
  122. [MRP_APPLICANT_LA] = {
  123. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  124. [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
  125. [MRP_EVENT_LV] = MRP_APPLICANT_LA,
  126. [MRP_EVENT_TX] = MRP_APPLICANT_VO,
  127. [MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
  128. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
  129. [MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
  130. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
  131. [MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
  132. [MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
  133. [MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
  134. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
  135. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
  136. },
  137. [MRP_APPLICANT_AO] = {
  138. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  139. [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
  140. [MRP_EVENT_LV] = MRP_APPLICANT_AO,
  141. [MRP_EVENT_TX] = MRP_APPLICANT_AO,
  142. [MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
  143. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
  144. [MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
  145. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
  146. [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
  147. [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
  148. [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
  149. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
  150. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
  151. },
  152. [MRP_APPLICANT_QO] = {
  153. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  154. [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
  155. [MRP_EVENT_LV] = MRP_APPLICANT_QO,
  156. [MRP_EVENT_TX] = MRP_APPLICANT_QO,
  157. [MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
  158. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
  159. [MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
  160. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
  161. [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
  162. [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
  163. [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
  164. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
  165. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
  166. },
  167. [MRP_APPLICANT_AP] = {
  168. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  169. [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
  170. [MRP_EVENT_LV] = MRP_APPLICANT_AO,
  171. [MRP_EVENT_TX] = MRP_APPLICANT_QA,
  172. [MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
  173. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
  174. [MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
  175. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
  176. [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
  177. [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
  178. [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
  179. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
  180. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
  181. },
  182. [MRP_APPLICANT_QP] = {
  183. [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
  184. [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
  185. [MRP_EVENT_LV] = MRP_APPLICANT_QO,
  186. [MRP_EVENT_TX] = MRP_APPLICANT_QP,
  187. [MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
  188. [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
  189. [MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
  190. [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
  191. [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
  192. [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
  193. [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
  194. [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
  195. [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
  196. },
  197. };
  198. static const u8
  199. mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
  200. [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
  201. [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
  202. [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
  203. [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
  204. [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
  205. [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
  206. [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
  207. [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
  208. [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
  209. [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
  210. [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
  211. };
  212. static void mrp_attrvalue_inc(void *value, u8 len)
  213. {
  214. u8 *v = (u8 *)value;
  215. /* Add 1 to the last byte. If it becomes zero,
  216. * go to the previous byte and repeat.
  217. */
  218. while (len > 0 && !++v[--len])
  219. ;
  220. }
  221. static int mrp_attr_cmp(const struct mrp_attr *attr,
  222. const void *value, u8 len, u8 type)
  223. {
  224. if (attr->type != type)
  225. return attr->type - type;
  226. if (attr->len != len)
  227. return attr->len - len;
  228. return memcmp(attr->value, value, len);
  229. }
  230. static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
  231. const void *value, u8 len, u8 type)
  232. {
  233. struct rb_node *parent = app->mad.rb_node;
  234. struct mrp_attr *attr;
  235. int d;
  236. while (parent) {
  237. attr = rb_entry(parent, struct mrp_attr, node);
  238. d = mrp_attr_cmp(attr, value, len, type);
  239. if (d > 0)
  240. parent = parent->rb_left;
  241. else if (d < 0)
  242. parent = parent->rb_right;
  243. else
  244. return attr;
  245. }
  246. return NULL;
  247. }
  248. static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
  249. const void *value, u8 len, u8 type)
  250. {
  251. struct rb_node *parent = NULL, **p = &app->mad.rb_node;
  252. struct mrp_attr *attr;
  253. int d;
  254. while (*p) {
  255. parent = *p;
  256. attr = rb_entry(parent, struct mrp_attr, node);
  257. d = mrp_attr_cmp(attr, value, len, type);
  258. if (d > 0)
  259. p = &parent->rb_left;
  260. else if (d < 0)
  261. p = &parent->rb_right;
  262. else {
  263. /* The attribute already exists; re-use it. */
  264. return attr;
  265. }
  266. }
  267. attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
  268. if (!attr)
  269. return attr;
  270. attr->state = MRP_APPLICANT_VO;
  271. attr->type = type;
  272. attr->len = len;
  273. memcpy(attr->value, value, len);
  274. rb_link_node(&attr->node, parent, p);
  275. rb_insert_color(&attr->node, &app->mad);
  276. return attr;
  277. }
  278. static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
  279. {
  280. rb_erase(&attr->node, &app->mad);
  281. kfree(attr);
  282. }
  283. static int mrp_pdu_init(struct mrp_applicant *app)
  284. {
  285. struct sk_buff *skb;
  286. struct mrp_pdu_hdr *ph;
  287. skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
  288. GFP_ATOMIC);
  289. if (!skb)
  290. return -ENOMEM;
  291. skb->dev = app->dev;
  292. skb->protocol = app->app->pkttype.type;
  293. skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
  294. skb_reset_network_header(skb);
  295. skb_reset_transport_header(skb);
  296. ph = __skb_put(skb, sizeof(*ph));
  297. ph->version = app->app->version;
  298. app->pdu = skb;
  299. return 0;
  300. }
  301. static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
  302. {
  303. __be16 *endmark;
  304. if (skb_tailroom(app->pdu) < sizeof(*endmark))
  305. return -1;
  306. endmark = __skb_put(app->pdu, sizeof(*endmark));
  307. put_unaligned(MRP_END_MARK, endmark);
  308. return 0;
  309. }
  310. static void mrp_pdu_queue(struct mrp_applicant *app)
  311. {
  312. if (!app->pdu)
  313. return;
  314. if (mrp_cb(app->pdu)->mh)
  315. mrp_pdu_append_end_mark(app);
  316. mrp_pdu_append_end_mark(app);
  317. dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
  318. app->app->group_address, app->dev->dev_addr,
  319. app->pdu->len);
  320. skb_queue_tail(&app->queue, app->pdu);
  321. app->pdu = NULL;
  322. }
  323. static void mrp_queue_xmit(struct mrp_applicant *app)
  324. {
  325. struct sk_buff *skb;
  326. while ((skb = skb_dequeue(&app->queue)))
  327. dev_queue_xmit(skb);
  328. }
  329. static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
  330. u8 attrtype, u8 attrlen)
  331. {
  332. struct mrp_msg_hdr *mh;
  333. if (mrp_cb(app->pdu)->mh) {
  334. if (mrp_pdu_append_end_mark(app) < 0)
  335. return -1;
  336. mrp_cb(app->pdu)->mh = NULL;
  337. mrp_cb(app->pdu)->vah = NULL;
  338. }
  339. if (skb_tailroom(app->pdu) < sizeof(*mh))
  340. return -1;
  341. mh = __skb_put(app->pdu, sizeof(*mh));
  342. mh->attrtype = attrtype;
  343. mh->attrlen = attrlen;
  344. mrp_cb(app->pdu)->mh = mh;
  345. return 0;
  346. }
  347. static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
  348. const void *firstattrvalue, u8 attrlen)
  349. {
  350. struct mrp_vecattr_hdr *vah;
  351. if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
  352. return -1;
  353. vah = __skb_put(app->pdu, sizeof(*vah) + attrlen);
  354. put_unaligned(0, &vah->lenflags);
  355. memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
  356. mrp_cb(app->pdu)->vah = vah;
  357. memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
  358. return 0;
  359. }
  360. static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
  361. const struct mrp_attr *attr,
  362. enum mrp_vecattr_event vaevent)
  363. {
  364. u16 len, pos;
  365. u8 *vaevents;
  366. int err;
  367. again:
  368. if (!app->pdu) {
  369. err = mrp_pdu_init(app);
  370. if (err < 0)
  371. return err;
  372. }
  373. /* If there is no Message header in the PDU, or the Message header is
  374. * for a different attribute type, add an EndMark (if necessary) and a
  375. * new Message header to the PDU.
  376. */
  377. if (!mrp_cb(app->pdu)->mh ||
  378. mrp_cb(app->pdu)->mh->attrtype != attr->type ||
  379. mrp_cb(app->pdu)->mh->attrlen != attr->len) {
  380. if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
  381. goto queue;
  382. }
  383. /* If there is no VectorAttribute header for this Message in the PDU,
  384. * or this attribute's value does not sequentially follow the previous
  385. * attribute's value, add a new VectorAttribute header to the PDU.
  386. */
  387. if (!mrp_cb(app->pdu)->vah ||
  388. memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
  389. if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
  390. goto queue;
  391. }
  392. len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
  393. pos = len % 3;
  394. /* Events are packed into Vectors in the PDU, three to a byte. Add a
  395. * byte to the end of the Vector if necessary.
  396. */
  397. if (!pos) {
  398. if (skb_tailroom(app->pdu) < sizeof(u8))
  399. goto queue;
  400. vaevents = __skb_put(app->pdu, sizeof(u8));
  401. } else {
  402. vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
  403. }
  404. switch (pos) {
  405. case 0:
  406. *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
  407. __MRP_VECATTR_EVENT_MAX);
  408. break;
  409. case 1:
  410. *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
  411. break;
  412. case 2:
  413. *vaevents += vaevent;
  414. break;
  415. default:
  416. WARN_ON(1);
  417. }
  418. /* Increment the length of the VectorAttribute in the PDU, as well as
  419. * the value of the next attribute that would continue its Vector.
  420. */
  421. put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
  422. mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
  423. return 0;
  424. queue:
  425. mrp_pdu_queue(app);
  426. goto again;
  427. }
  428. static void mrp_attr_event(struct mrp_applicant *app,
  429. struct mrp_attr *attr, enum mrp_event event)
  430. {
  431. enum mrp_applicant_state state;
  432. state = mrp_applicant_state_table[attr->state][event];
  433. if (state == MRP_APPLICANT_INVALID) {
  434. WARN_ON(1);
  435. return;
  436. }
  437. if (event == MRP_EVENT_TX) {
  438. /* When appending the attribute fails, don't update its state
  439. * in order to retry at the next TX event.
  440. */
  441. switch (mrp_tx_action_table[attr->state]) {
  442. case MRP_TX_ACTION_NONE:
  443. case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
  444. case MRP_TX_ACTION_S_IN_OPTIONAL:
  445. break;
  446. case MRP_TX_ACTION_S_NEW:
  447. if (mrp_pdu_append_vecattr_event(
  448. app, attr, MRP_VECATTR_EVENT_NEW) < 0)
  449. return;
  450. break;
  451. case MRP_TX_ACTION_S_JOIN_IN:
  452. if (mrp_pdu_append_vecattr_event(
  453. app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
  454. return;
  455. break;
  456. case MRP_TX_ACTION_S_LV:
  457. if (mrp_pdu_append_vecattr_event(
  458. app, attr, MRP_VECATTR_EVENT_LV) < 0)
  459. return;
  460. /* As a pure applicant, sending a leave message
  461. * implies that the attribute was unregistered and
  462. * can be destroyed.
  463. */
  464. mrp_attr_destroy(app, attr);
  465. return;
  466. default:
  467. WARN_ON(1);
  468. }
  469. }
  470. attr->state = state;
  471. }
  472. int mrp_request_join(const struct net_device *dev,
  473. const struct mrp_application *appl,
  474. const void *value, u8 len, u8 type)
  475. {
  476. struct mrp_port *port = rtnl_dereference(dev->mrp_port);
  477. struct mrp_applicant *app = rtnl_dereference(
  478. port->applicants[appl->type]);
  479. struct mrp_attr *attr;
  480. if (sizeof(struct mrp_skb_cb) + len >
  481. FIELD_SIZEOF(struct sk_buff, cb))
  482. return -ENOMEM;
  483. spin_lock_bh(&app->lock);
  484. attr = mrp_attr_create(app, value, len, type);
  485. if (!attr) {
  486. spin_unlock_bh(&app->lock);
  487. return -ENOMEM;
  488. }
  489. mrp_attr_event(app, attr, MRP_EVENT_JOIN);
  490. spin_unlock_bh(&app->lock);
  491. return 0;
  492. }
  493. EXPORT_SYMBOL_GPL(mrp_request_join);
  494. void mrp_request_leave(const struct net_device *dev,
  495. const struct mrp_application *appl,
  496. const void *value, u8 len, u8 type)
  497. {
  498. struct mrp_port *port = rtnl_dereference(dev->mrp_port);
  499. struct mrp_applicant *app = rtnl_dereference(
  500. port->applicants[appl->type]);
  501. struct mrp_attr *attr;
  502. if (sizeof(struct mrp_skb_cb) + len >
  503. FIELD_SIZEOF(struct sk_buff, cb))
  504. return;
  505. spin_lock_bh(&app->lock);
  506. attr = mrp_attr_lookup(app, value, len, type);
  507. if (!attr) {
  508. spin_unlock_bh(&app->lock);
  509. return;
  510. }
  511. mrp_attr_event(app, attr, MRP_EVENT_LV);
  512. spin_unlock_bh(&app->lock);
  513. }
  514. EXPORT_SYMBOL_GPL(mrp_request_leave);
  515. static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
  516. {
  517. struct rb_node *node, *next;
  518. struct mrp_attr *attr;
  519. for (node = rb_first(&app->mad);
  520. next = node ? rb_next(node) : NULL, node != NULL;
  521. node = next) {
  522. attr = rb_entry(node, struct mrp_attr, node);
  523. mrp_attr_event(app, attr, event);
  524. }
  525. }
  526. static void mrp_join_timer_arm(struct mrp_applicant *app)
  527. {
  528. unsigned long delay;
  529. delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32;
  530. mod_timer(&app->join_timer, jiffies + delay);
  531. }
  532. static void mrp_join_timer(struct timer_list *t)
  533. {
  534. struct mrp_applicant *app = from_timer(app, t, join_timer);
  535. spin_lock(&app->lock);
  536. mrp_mad_event(app, MRP_EVENT_TX);
  537. mrp_pdu_queue(app);
  538. spin_unlock(&app->lock);
  539. mrp_queue_xmit(app);
  540. mrp_join_timer_arm(app);
  541. }
  542. static void mrp_periodic_timer_arm(struct mrp_applicant *app)
  543. {
  544. mod_timer(&app->periodic_timer,
  545. jiffies + msecs_to_jiffies(mrp_periodic_time));
  546. }
  547. static void mrp_periodic_timer(struct timer_list *t)
  548. {
  549. struct mrp_applicant *app = from_timer(app, t, periodic_timer);
  550. spin_lock(&app->lock);
  551. mrp_mad_event(app, MRP_EVENT_PERIODIC);
  552. mrp_pdu_queue(app);
  553. spin_unlock(&app->lock);
  554. mrp_periodic_timer_arm(app);
  555. }
  556. static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
  557. {
  558. __be16 endmark;
  559. if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
  560. return -1;
  561. if (endmark == MRP_END_MARK) {
  562. *offset += sizeof(endmark);
  563. return -1;
  564. }
  565. return 0;
  566. }
  567. static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
  568. struct sk_buff *skb,
  569. enum mrp_vecattr_event vaevent)
  570. {
  571. struct mrp_attr *attr;
  572. enum mrp_event event;
  573. attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
  574. mrp_cb(skb)->mh->attrlen,
  575. mrp_cb(skb)->mh->attrtype);
  576. if (attr == NULL)
  577. return;
  578. switch (vaevent) {
  579. case MRP_VECATTR_EVENT_NEW:
  580. event = MRP_EVENT_R_NEW;
  581. break;
  582. case MRP_VECATTR_EVENT_JOIN_IN:
  583. event = MRP_EVENT_R_JOIN_IN;
  584. break;
  585. case MRP_VECATTR_EVENT_IN:
  586. event = MRP_EVENT_R_IN;
  587. break;
  588. case MRP_VECATTR_EVENT_JOIN_MT:
  589. event = MRP_EVENT_R_JOIN_MT;
  590. break;
  591. case MRP_VECATTR_EVENT_MT:
  592. event = MRP_EVENT_R_MT;
  593. break;
  594. case MRP_VECATTR_EVENT_LV:
  595. event = MRP_EVENT_R_LV;
  596. break;
  597. default:
  598. return;
  599. }
  600. mrp_attr_event(app, attr, event);
  601. }
  602. static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
  603. struct sk_buff *skb, int *offset)
  604. {
  605. struct mrp_vecattr_hdr _vah;
  606. u16 valen;
  607. u8 vaevents, vaevent;
  608. mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
  609. &_vah);
  610. if (!mrp_cb(skb)->vah)
  611. return -1;
  612. *offset += sizeof(_vah);
  613. if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
  614. MRP_VECATTR_HDR_FLAG_LA)
  615. mrp_mad_event(app, MRP_EVENT_R_LA);
  616. valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
  617. MRP_VECATTR_HDR_LEN_MASK);
  618. /* The VectorAttribute structure in a PDU carries event information
  619. * about one or more attributes having consecutive values. Only the
  620. * value for the first attribute is contained in the structure. So
  621. * we make a copy of that value, and then increment it each time we
  622. * advance to the next event in its Vector.
  623. */
  624. if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
  625. FIELD_SIZEOF(struct sk_buff, cb))
  626. return -1;
  627. if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
  628. mrp_cb(skb)->mh->attrlen) < 0)
  629. return -1;
  630. *offset += mrp_cb(skb)->mh->attrlen;
  631. /* In a VectorAttribute, the Vector contains events which are packed
  632. * three to a byte. We process one byte of the Vector at a time.
  633. */
  634. while (valen > 0) {
  635. if (skb_copy_bits(skb, *offset, &vaevents,
  636. sizeof(vaevents)) < 0)
  637. return -1;
  638. *offset += sizeof(vaevents);
  639. /* Extract and process the first event. */
  640. vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
  641. __MRP_VECATTR_EVENT_MAX);
  642. if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
  643. /* The byte is malformed; stop processing. */
  644. return -1;
  645. }
  646. mrp_pdu_parse_vecattr_event(app, skb, vaevent);
  647. /* If present, extract and process the second event. */
  648. if (!--valen)
  649. break;
  650. mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
  651. mrp_cb(skb)->mh->attrlen);
  652. vaevents %= (__MRP_VECATTR_EVENT_MAX *
  653. __MRP_VECATTR_EVENT_MAX);
  654. vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
  655. mrp_pdu_parse_vecattr_event(app, skb, vaevent);
  656. /* If present, extract and process the third event. */
  657. if (!--valen)
  658. break;
  659. mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
  660. mrp_cb(skb)->mh->attrlen);
  661. vaevents %= __MRP_VECATTR_EVENT_MAX;
  662. vaevent = vaevents;
  663. mrp_pdu_parse_vecattr_event(app, skb, vaevent);
  664. }
  665. return 0;
  666. }
  667. static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
  668. int *offset)
  669. {
  670. struct mrp_msg_hdr _mh;
  671. mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
  672. if (!mrp_cb(skb)->mh)
  673. return -1;
  674. *offset += sizeof(_mh);
  675. if (mrp_cb(skb)->mh->attrtype == 0 ||
  676. mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
  677. mrp_cb(skb)->mh->attrlen == 0)
  678. return -1;
  679. while (skb->len > *offset) {
  680. if (mrp_pdu_parse_end_mark(skb, offset) < 0)
  681. break;
  682. if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
  683. return -1;
  684. }
  685. return 0;
  686. }
  687. static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
  688. struct packet_type *pt, struct net_device *orig_dev)
  689. {
  690. struct mrp_application *appl = container_of(pt, struct mrp_application,
  691. pkttype);
  692. struct mrp_port *port;
  693. struct mrp_applicant *app;
  694. struct mrp_pdu_hdr _ph;
  695. const struct mrp_pdu_hdr *ph;
  696. int offset = skb_network_offset(skb);
  697. /* If the interface is in promiscuous mode, drop the packet if
  698. * it was unicast to another host.
  699. */
  700. if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
  701. goto out;
  702. skb = skb_share_check(skb, GFP_ATOMIC);
  703. if (unlikely(!skb))
  704. goto out;
  705. port = rcu_dereference(dev->mrp_port);
  706. if (unlikely(!port))
  707. goto out;
  708. app = rcu_dereference(port->applicants[appl->type]);
  709. if (unlikely(!app))
  710. goto out;
  711. ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
  712. if (!ph)
  713. goto out;
  714. offset += sizeof(_ph);
  715. if (ph->version != app->app->version)
  716. goto out;
  717. spin_lock(&app->lock);
  718. while (skb->len > offset) {
  719. if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
  720. break;
  721. if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
  722. break;
  723. }
  724. spin_unlock(&app->lock);
  725. out:
  726. kfree_skb(skb);
  727. return 0;
  728. }
  729. static int mrp_init_port(struct net_device *dev)
  730. {
  731. struct mrp_port *port;
  732. port = kzalloc(sizeof(*port), GFP_KERNEL);
  733. if (!port)
  734. return -ENOMEM;
  735. rcu_assign_pointer(dev->mrp_port, port);
  736. return 0;
  737. }
  738. static void mrp_release_port(struct net_device *dev)
  739. {
  740. struct mrp_port *port = rtnl_dereference(dev->mrp_port);
  741. unsigned int i;
  742. for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
  743. if (rtnl_dereference(port->applicants[i]))
  744. return;
  745. }
  746. RCU_INIT_POINTER(dev->mrp_port, NULL);
  747. kfree_rcu(port, rcu);
  748. }
  749. int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
  750. {
  751. struct mrp_applicant *app;
  752. int err;
  753. ASSERT_RTNL();
  754. if (!rtnl_dereference(dev->mrp_port)) {
  755. err = mrp_init_port(dev);
  756. if (err < 0)
  757. goto err1;
  758. }
  759. err = -ENOMEM;
  760. app = kzalloc(sizeof(*app), GFP_KERNEL);
  761. if (!app)
  762. goto err2;
  763. err = dev_mc_add(dev, appl->group_address);
  764. if (err < 0)
  765. goto err3;
  766. app->dev = dev;
  767. app->app = appl;
  768. app->mad = RB_ROOT;
  769. spin_lock_init(&app->lock);
  770. skb_queue_head_init(&app->queue);
  771. rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
  772. timer_setup(&app->join_timer, mrp_join_timer, 0);
  773. mrp_join_timer_arm(app);
  774. timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
  775. mrp_periodic_timer_arm(app);
  776. return 0;
  777. err3:
  778. kfree(app);
  779. err2:
  780. mrp_release_port(dev);
  781. err1:
  782. return err;
  783. }
  784. EXPORT_SYMBOL_GPL(mrp_init_applicant);
  785. void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
  786. {
  787. struct mrp_port *port = rtnl_dereference(dev->mrp_port);
  788. struct mrp_applicant *app = rtnl_dereference(
  789. port->applicants[appl->type]);
  790. ASSERT_RTNL();
  791. RCU_INIT_POINTER(port->applicants[appl->type], NULL);
  792. /* Delete timer and generate a final TX event to flush out
  793. * all pending messages before the applicant is gone.
  794. */
  795. del_timer_sync(&app->join_timer);
  796. del_timer_sync(&app->periodic_timer);
  797. spin_lock_bh(&app->lock);
  798. mrp_mad_event(app, MRP_EVENT_TX);
  799. mrp_pdu_queue(app);
  800. spin_unlock_bh(&app->lock);
  801. mrp_queue_xmit(app);
  802. dev_mc_del(dev, appl->group_address);
  803. kfree_rcu(app, rcu);
  804. mrp_release_port(dev);
  805. }
  806. EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
  807. int mrp_register_application(struct mrp_application *appl)
  808. {
  809. appl->pkttype.func = mrp_rcv;
  810. dev_add_pack(&appl->pkttype);
  811. return 0;
  812. }
  813. EXPORT_SYMBOL_GPL(mrp_register_application);
  814. void mrp_unregister_application(struct mrp_application *appl)
  815. {
  816. dev_remove_pack(&appl->pkttype);
  817. }
  818. EXPORT_SYMBOL_GPL(mrp_unregister_application);