cfrfml.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. /*
  2. * Copyright (C) ST-Ericsson AB 2010
  3. * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
  4. * License terms: GNU General Public License (GPL) version 2
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
  7. #include <linux/stddef.h>
  8. #include <linux/spinlock.h>
  9. #include <linux/slab.h>
  10. #include <asm/unaligned.h>
  11. #include <net/caif/caif_layer.h>
  12. #include <net/caif/cfsrvl.h>
  13. #include <net/caif/cfpkt.h>
  14. #define container_obj(layr) container_of(layr, struct cfrfml, serv.layer)
  15. #define RFM_SEGMENTATION_BIT 0x01
  16. #define RFM_HEAD_SIZE 7
  17. static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
  18. static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
  19. struct cfrfml {
  20. struct cfsrvl serv;
  21. struct cfpkt *incomplete_frm;
  22. int fragment_size;
  23. u8 seghead[6];
  24. u16 pdu_size;
  25. /* Protects serialized processing of packets */
  26. spinlock_t sync;
  27. };
  28. static void cfrfml_release(struct cflayer *layer)
  29. {
  30. struct cfsrvl *srvl = container_of(layer, struct cfsrvl, layer);
  31. struct cfrfml *rfml = container_obj(&srvl->layer);
  32. if (rfml->incomplete_frm)
  33. cfpkt_destroy(rfml->incomplete_frm);
  34. kfree(srvl);
  35. }
  36. struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
  37. int mtu_size)
  38. {
  39. int tmp;
  40. struct cfrfml *this =
  41. kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
  42. if (!this) {
  43. pr_warn("Out of memory\n");
  44. return NULL;
  45. }
  46. cfsrvl_init(&this->serv, channel_id, dev_info, false);
  47. this->serv.release = cfrfml_release;
  48. this->serv.layer.receive = cfrfml_receive;
  49. this->serv.layer.transmit = cfrfml_transmit;
  50. /* Round down to closest multiple of 16 */
  51. tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16;
  52. tmp *= 16;
  53. this->fragment_size = tmp;
  54. spin_lock_init(&this->sync);
  55. snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ,
  56. "rfm%d", channel_id);
  57. return &this->serv.layer;
  58. }
  59. static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead,
  60. struct cfpkt *pkt, int *err)
  61. {
  62. struct cfpkt *tmppkt;
  63. *err = -EPROTO;
  64. /* n-th but not last segment */
  65. if (cfpkt_extr_head(pkt, seghead, 6) < 0)
  66. return NULL;
  67. /* Verify correct header */
  68. if (memcmp(seghead, rfml->seghead, 6) != 0)
  69. return NULL;
  70. tmppkt = cfpkt_append(rfml->incomplete_frm, pkt,
  71. rfml->pdu_size + RFM_HEAD_SIZE);
  72. /* If cfpkt_append failes input pkts are not freed */
  73. *err = -ENOMEM;
  74. if (tmppkt == NULL)
  75. return NULL;
  76. *err = 0;
  77. return tmppkt;
  78. }
  79. static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
  80. {
  81. u8 tmp;
  82. bool segmented;
  83. int err;
  84. u8 seghead[6];
  85. struct cfrfml *rfml;
  86. struct cfpkt *tmppkt = NULL;
  87. caif_assert(layr->up != NULL);
  88. caif_assert(layr->receive != NULL);
  89. rfml = container_obj(layr);
  90. spin_lock(&rfml->sync);
  91. err = -EPROTO;
  92. if (cfpkt_extr_head(pkt, &tmp, 1) < 0)
  93. goto out;
  94. segmented = tmp & RFM_SEGMENTATION_BIT;
  95. if (segmented) {
  96. if (rfml->incomplete_frm == NULL) {
  97. /* Initial Segment */
  98. if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
  99. goto out;
  100. rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
  101. if (cfpkt_erroneous(pkt))
  102. goto out;
  103. rfml->incomplete_frm = pkt;
  104. pkt = NULL;
  105. } else {
  106. tmppkt = rfm_append(rfml, seghead, pkt, &err);
  107. if (tmppkt == NULL)
  108. goto out;
  109. if (cfpkt_erroneous(tmppkt))
  110. goto out;
  111. rfml->incomplete_frm = tmppkt;
  112. if (cfpkt_erroneous(tmppkt))
  113. goto out;
  114. }
  115. err = 0;
  116. goto out;
  117. }
  118. if (rfml->incomplete_frm) {
  119. /* Last Segment */
  120. tmppkt = rfm_append(rfml, seghead, pkt, &err);
  121. if (tmppkt == NULL)
  122. goto out;
  123. if (cfpkt_erroneous(tmppkt))
  124. goto out;
  125. rfml->incomplete_frm = NULL;
  126. pkt = tmppkt;
  127. tmppkt = NULL;
  128. /* Verify that length is correct */
  129. err = EPROTO;
  130. if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
  131. goto out;
  132. }
  133. err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt);
  134. out:
  135. if (err != 0) {
  136. if (tmppkt)
  137. cfpkt_destroy(tmppkt);
  138. if (pkt)
  139. cfpkt_destroy(pkt);
  140. if (rfml->incomplete_frm)
  141. cfpkt_destroy(rfml->incomplete_frm);
  142. rfml->incomplete_frm = NULL;
  143. pr_info("Connection error %d triggered on RFM link\n", err);
  144. /* Trigger connection error upon failure.*/
  145. layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
  146. rfml->serv.dev_info.id);
  147. }
  148. spin_unlock(&rfml->sync);
  149. return err;
  150. }
  151. static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
  152. {
  153. caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
  154. /* Add info for MUX-layer to route the packet out. */
  155. cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
  156. /*
  157. * To optimize alignment, we add up the size of CAIF header before
  158. * payload.
  159. */
  160. cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE;
  161. cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info;
  162. return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt);
  163. }
  164. static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
  165. {
  166. int err;
  167. u8 seg;
  168. u8 head[6];
  169. struct cfpkt *rearpkt = NULL;
  170. struct cfpkt *frontpkt = pkt;
  171. struct cfrfml *rfml = container_obj(layr);
  172. caif_assert(layr->dn != NULL);
  173. caif_assert(layr->dn->transmit != NULL);
  174. if (!cfsrvl_ready(&rfml->serv, &err))
  175. return err;
  176. err = -EPROTO;
  177. if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
  178. goto out;
  179. err = 0;
  180. if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
  181. err = cfpkt_peek_head(pkt, head, 6);
  182. if (err < 0)
  183. goto out;
  184. while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
  185. seg = 1;
  186. err = -EPROTO;
  187. if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
  188. goto out;
  189. /*
  190. * On OOM error cfpkt_split returns NULL.
  191. *
  192. * NOTE: Segmented pdu is not correctly aligned.
  193. * This has negative performance impact.
  194. */
  195. rearpkt = cfpkt_split(frontpkt, rfml->fragment_size);
  196. if (rearpkt == NULL)
  197. goto out;
  198. err = cfrfml_transmit_segment(rfml, frontpkt);
  199. if (err != 0)
  200. goto out;
  201. frontpkt = rearpkt;
  202. rearpkt = NULL;
  203. err = -ENOMEM;
  204. if (frontpkt == NULL)
  205. goto out;
  206. err = -EPROTO;
  207. if (cfpkt_add_head(frontpkt, head, 6) < 0)
  208. goto out;
  209. }
  210. seg = 0;
  211. err = -EPROTO;
  212. if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
  213. goto out;
  214. err = cfrfml_transmit_segment(rfml, frontpkt);
  215. frontpkt = NULL;
  216. out:
  217. if (err != 0) {
  218. pr_info("Connection error %d triggered on RFM link\n", err);
  219. /* Trigger connection error upon failure.*/
  220. layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
  221. rfml->serv.dev_info.id);
  222. if (rearpkt)
  223. cfpkt_destroy(rearpkt);
  224. if (frontpkt && frontpkt != pkt) {
  225. cfpkt_destroy(frontpkt);
  226. /*
  227. * Socket layer will free the original packet,
  228. * but this packet may already be sent and
  229. * freed. So we have to return 0 in this case
  230. * to avoid socket layer to re-free this packet.
  231. * The return of shutdown indication will
  232. * cause connection to be invalidated anyhow.
  233. */
  234. err = 0;
  235. }
  236. }
  237. return err;
  238. }