ozpd.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887
  1. /* -----------------------------------------------------------------------------
  2. * Copyright (c) 2011 Ozmo Inc
  3. * Released under the GNU General Public License Version 2 (GPLv2).
  4. * -----------------------------------------------------------------------------
  5. */
  6. #include <linux/module.h>
  7. #include <linux/timer.h>
  8. #include <linux/sched.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/etherdevice.h>
  11. #include <linux/errno.h>
  12. #include "ozdbg.h"
  13. #include "ozprotocol.h"
  14. #include "ozeltbuf.h"
  15. #include "ozpd.h"
  16. #include "ozproto.h"
  17. #include "ozcdev.h"
  18. #include "ozusbsvc.h"
  19. #include <asm/unaligned.h>
  20. #include <linux/uaccess.h>
  21. #include <net/psnap.h>
  22. static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
  23. static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
  24. static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
  25. static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
  26. static int oz_send_isoc_frame(struct oz_pd *pd);
  27. static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
  28. static void oz_isoc_stream_free(struct oz_isoc_stream *st);
  29. static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
  30. static void oz_isoc_destructor(struct sk_buff *skb);
  31. /*
  32. * Counts the uncompleted isoc frames submitted to netcard.
  33. */
  34. static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
  35. /* Application handler functions.
  36. */
  37. static const struct oz_app_if g_app_if[OZ_NB_APPS] = {
  38. [OZ_APPID_USB] = {
  39. .init = oz_usb_init,
  40. .term = oz_usb_term,
  41. .start = oz_usb_start,
  42. .stop = oz_usb_stop,
  43. .rx = oz_usb_rx,
  44. .heartbeat = oz_usb_heartbeat,
  45. .farewell = oz_usb_farewell,
  46. },
  47. [OZ_APPID_SERIAL] = {
  48. .init = oz_cdev_init,
  49. .term = oz_cdev_term,
  50. .start = oz_cdev_start,
  51. .stop = oz_cdev_stop,
  52. .rx = oz_cdev_rx,
  53. },
  54. };
  55. /*
  56. * Context: softirq or process
  57. */
  58. void oz_pd_set_state(struct oz_pd *pd, unsigned state)
  59. {
  60. pd->state = state;
  61. switch (state) {
  62. case OZ_PD_S_IDLE:
  63. oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n");
  64. break;
  65. case OZ_PD_S_CONNECTED:
  66. oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n");
  67. break;
  68. case OZ_PD_S_STOPPED:
  69. oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n");
  70. break;
  71. case OZ_PD_S_SLEEP:
  72. oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n");
  73. break;
  74. }
  75. }
  76. /*
  77. * Context: softirq or process
  78. */
  79. void oz_pd_get(struct oz_pd *pd)
  80. {
  81. atomic_inc(&pd->ref_count);
  82. }
  83. /*
  84. * Context: softirq or process
  85. */
  86. void oz_pd_put(struct oz_pd *pd)
  87. {
  88. if (atomic_dec_and_test(&pd->ref_count))
  89. oz_pd_destroy(pd);
  90. }
  91. /*
  92. * Context: softirq-serialized
  93. */
  94. struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
  95. {
  96. struct oz_pd *pd;
  97. int i;
  98. pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
  99. if (!pd)
  100. return NULL;
  101. atomic_set(&pd->ref_count, 2);
  102. for (i = 0; i < OZ_NB_APPS; i++)
  103. spin_lock_init(&pd->app_lock[i]);
  104. pd->last_rx_pkt_num = 0xffffffff;
  105. oz_pd_set_state(pd, OZ_PD_S_IDLE);
  106. pd->max_tx_size = OZ_MAX_TX_SIZE;
  107. ether_addr_copy(pd->mac_addr, mac_addr);
  108. oz_elt_buf_init(&pd->elt_buff);
  109. spin_lock_init(&pd->tx_frame_lock);
  110. INIT_LIST_HEAD(&pd->tx_queue);
  111. INIT_LIST_HEAD(&pd->farewell_list);
  112. pd->last_sent_frame = &pd->tx_queue;
  113. spin_lock_init(&pd->stream_lock);
  114. INIT_LIST_HEAD(&pd->stream_list);
  115. tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
  116. (unsigned long)pd);
  117. tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
  118. (unsigned long)pd);
  119. hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  120. hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  121. pd->heartbeat.function = oz_pd_heartbeat_event;
  122. pd->timeout.function = oz_pd_timeout_event;
  123. return pd;
  124. }
  125. /*
  126. * Context: softirq or process
  127. */
  128. static void oz_pd_free(struct work_struct *work)
  129. {
  130. struct list_head *e, *n;
  131. struct oz_pd *pd;
  132. oz_pd_dbg(pd, ON, "Destroying PD\n");
  133. pd = container_of(work, struct oz_pd, workitem);
  134. /*Disable timer tasklets*/
  135. tasklet_kill(&pd->heartbeat_tasklet);
  136. tasklet_kill(&pd->timeout_tasklet);
  137. /* Free streams, queued tx frames and farewells. */
  138. list_for_each_safe(e, n, &pd->stream_list)
  139. oz_isoc_stream_free(list_entry(e, struct oz_isoc_stream, link));
  140. list_for_each_safe(e, n, &pd->tx_queue) {
  141. struct oz_tx_frame *f = list_entry(e, struct oz_tx_frame, link);
  142. if (f->skb != NULL)
  143. kfree_skb(f->skb);
  144. oz_retire_frame(pd, f);
  145. }
  146. oz_elt_buf_term(&pd->elt_buff);
  147. list_for_each_safe(e, n, &pd->farewell_list)
  148. kfree(list_entry(e, struct oz_farewell, link));
  149. if (pd->net_dev)
  150. dev_put(pd->net_dev);
  151. kfree(pd);
  152. }
  153. /*
  154. * Context: softirq or Process
  155. */
  156. void oz_pd_destroy(struct oz_pd *pd)
  157. {
  158. if (hrtimer_active(&pd->timeout))
  159. hrtimer_cancel(&pd->timeout);
  160. if (hrtimer_active(&pd->heartbeat))
  161. hrtimer_cancel(&pd->heartbeat);
  162. INIT_WORK(&pd->workitem, oz_pd_free);
  163. if (!schedule_work(&pd->workitem))
  164. oz_pd_dbg(pd, ON, "failed to schedule workitem\n");
  165. }
  166. /*
  167. * Context: softirq-serialized
  168. */
  169. int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
  170. {
  171. int i, rc = 0;
  172. oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume);
  173. for (i = 0; i < OZ_NB_APPS; i++) {
  174. if (g_app_if[i].start && (apps & (1 << i))) {
  175. if (g_app_if[i].start(pd, resume)) {
  176. rc = -1;
  177. oz_pd_dbg(pd, ON,
  178. "Unable to start service %d\n", i);
  179. break;
  180. }
  181. spin_lock_bh(&g_polling_lock);
  182. pd->total_apps |= (1 << i);
  183. if (resume)
  184. pd->paused_apps &= ~(1 << i);
  185. spin_unlock_bh(&g_polling_lock);
  186. }
  187. }
  188. return rc;
  189. }
  190. /*
  191. * Context: softirq or process
  192. */
  193. void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
  194. {
  195. int i;
  196. oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
  197. for (i = 0; i < OZ_NB_APPS; i++) {
  198. if (g_app_if[i].stop && (apps & (1 << i))) {
  199. spin_lock_bh(&g_polling_lock);
  200. if (pause) {
  201. pd->paused_apps |= (1 << i);
  202. } else {
  203. pd->total_apps &= ~(1 << i);
  204. pd->paused_apps &= ~(1 << i);
  205. }
  206. spin_unlock_bh(&g_polling_lock);
  207. g_app_if[i].stop(pd, pause);
  208. }
  209. }
  210. }
  211. /*
  212. * Context: softirq
  213. */
  214. void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
  215. {
  216. int i, more = 0;
  217. for (i = 0; i < OZ_NB_APPS; i++) {
  218. if (g_app_if[i].heartbeat && (apps & (1 << i))) {
  219. if (g_app_if[i].heartbeat(pd))
  220. more = 1;
  221. }
  222. }
  223. if ((!more) && (hrtimer_active(&pd->heartbeat)))
  224. hrtimer_cancel(&pd->heartbeat);
  225. if (pd->mode & OZ_F_ISOC_ANYTIME) {
  226. int count = 8;
  227. while (count-- && (oz_send_isoc_frame(pd) >= 0))
  228. ;
  229. }
  230. }
  231. /*
  232. * Context: softirq or process
  233. */
  234. void oz_pd_stop(struct oz_pd *pd)
  235. {
  236. u16 stop_apps;
  237. oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
  238. oz_pd_indicate_farewells(pd);
  239. spin_lock_bh(&g_polling_lock);
  240. stop_apps = pd->total_apps;
  241. pd->total_apps = 0;
  242. pd->paused_apps = 0;
  243. spin_unlock_bh(&g_polling_lock);
  244. oz_services_stop(pd, stop_apps, 0);
  245. spin_lock_bh(&g_polling_lock);
  246. oz_pd_set_state(pd, OZ_PD_S_STOPPED);
  247. /* Remove from PD list.*/
  248. list_del(&pd->link);
  249. spin_unlock_bh(&g_polling_lock);
  250. oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
  251. oz_pd_put(pd);
  252. }
  253. /*
  254. * Context: softirq
  255. */
  256. int oz_pd_sleep(struct oz_pd *pd)
  257. {
  258. int do_stop = 0;
  259. u16 stop_apps;
  260. spin_lock_bh(&g_polling_lock);
  261. if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
  262. spin_unlock_bh(&g_polling_lock);
  263. return 0;
  264. }
  265. if (pd->keep_alive && pd->session_id)
  266. oz_pd_set_state(pd, OZ_PD_S_SLEEP);
  267. else
  268. do_stop = 1;
  269. stop_apps = pd->total_apps;
  270. spin_unlock_bh(&g_polling_lock);
  271. if (do_stop) {
  272. oz_pd_stop(pd);
  273. } else {
  274. oz_services_stop(pd, stop_apps, 1);
  275. oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
  276. }
  277. return do_stop;
  278. }
  279. /*
  280. * Context: softirq
  281. */
  282. static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
  283. {
  284. struct oz_tx_frame *f;
  285. f = kmem_cache_alloc(oz_tx_frame_cache, GFP_ATOMIC);
  286. if (f) {
  287. f->total_size = sizeof(struct oz_hdr);
  288. INIT_LIST_HEAD(&f->link);
  289. INIT_LIST_HEAD(&f->elt_list);
  290. }
  291. return f;
  292. }
  293. /*
  294. * Context: softirq or process
  295. */
  296. static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
  297. {
  298. pd->nb_queued_isoc_frames--;
  299. list_del_init(&f->link);
  300. kmem_cache_free(oz_tx_frame_cache, f);
  301. oz_dbg(TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
  302. pd->nb_queued_isoc_frames);
  303. }
  304. /*
  305. * Context: softirq or process
  306. */
  307. static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
  308. {
  309. kmem_cache_free(oz_tx_frame_cache, f);
  310. }
  311. /*
  312. * Context: softirq-serialized
  313. */
  314. static void oz_set_more_bit(struct sk_buff *skb)
  315. {
  316. struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
  317. oz_hdr->control |= OZ_F_MORE_DATA;
  318. }
  319. /*
  320. * Context: softirq-serialized
  321. */
  322. static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
  323. {
  324. struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
  325. oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
  326. }
  327. /*
  328. * Context: softirq
  329. */
  330. int oz_prepare_frame(struct oz_pd *pd, int empty)
  331. {
  332. struct oz_tx_frame *f;
  333. if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
  334. return -1;
  335. if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
  336. return -1;
  337. if (!empty && !oz_are_elts_available(&pd->elt_buff))
  338. return -1;
  339. f = oz_tx_frame_alloc(pd);
  340. if (f == NULL)
  341. return -1;
  342. f->skb = NULL;
  343. f->hdr.control =
  344. (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
  345. ++pd->last_tx_pkt_num;
  346. put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
  347. if (empty == 0) {
  348. oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
  349. pd->max_tx_size, &f->elt_list);
  350. }
  351. spin_lock(&pd->tx_frame_lock);
  352. list_add_tail(&f->link, &pd->tx_queue);
  353. pd->nb_queued_frames++;
  354. spin_unlock(&pd->tx_frame_lock);
  355. return 0;
  356. }
  357. /*
  358. * Context: softirq-serialized
  359. */
  360. static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
  361. {
  362. struct sk_buff *skb;
  363. struct net_device *dev = pd->net_dev;
  364. struct oz_hdr *oz_hdr;
  365. struct oz_elt *elt;
  366. struct oz_elt_info *ei;
  367. /* Allocate skb with enough space for the lower layers as well
  368. * as the space we need.
  369. */
  370. skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
  371. if (skb == NULL)
  372. return NULL;
  373. /* Reserve the head room for lower layers.
  374. */
  375. skb_reserve(skb, LL_RESERVED_SPACE(dev));
  376. skb_reset_network_header(skb);
  377. skb->dev = dev;
  378. skb->protocol = htons(OZ_ETHERTYPE);
  379. if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
  380. dev->dev_addr, skb->len) < 0)
  381. goto fail;
  382. /* Push the tail to the end of the area we are going to copy to.
  383. */
  384. oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
  385. f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
  386. memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
  387. /* Copy the elements into the frame body.
  388. */
  389. elt = (struct oz_elt *)(oz_hdr+1);
  390. list_for_each_entry(ei, &f->elt_list, link) {
  391. memcpy(elt, ei->data, ei->length);
  392. elt = oz_next_elt(elt);
  393. }
  394. return skb;
  395. fail:
  396. kfree_skb(skb);
  397. return NULL;
  398. }
  399. /*
  400. * Context: softirq or process
  401. */
  402. static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
  403. {
  404. struct oz_elt_info *ei, *n;
  405. list_for_each_entry_safe(ei, n, &f->elt_list, link) {
  406. list_del_init(&ei->link);
  407. if (ei->callback)
  408. ei->callback(pd, ei->context);
  409. spin_lock_bh(&pd->elt_buff.lock);
  410. oz_elt_info_free(&pd->elt_buff, ei);
  411. spin_unlock_bh(&pd->elt_buff.lock);
  412. }
  413. oz_tx_frame_free(pd, f);
  414. }
  415. /*
  416. * Context: softirq-serialized
  417. */
  418. static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
  419. {
  420. struct sk_buff *skb;
  421. struct oz_tx_frame *f;
  422. struct list_head *e;
  423. spin_lock(&pd->tx_frame_lock);
  424. e = pd->last_sent_frame->next;
  425. if (e == &pd->tx_queue) {
  426. spin_unlock(&pd->tx_frame_lock);
  427. return -1;
  428. }
  429. f = list_entry(e, struct oz_tx_frame, link);
  430. if (f->skb != NULL) {
  431. skb = f->skb;
  432. oz_tx_isoc_free(pd, f);
  433. spin_unlock(&pd->tx_frame_lock);
  434. if (more_data)
  435. oz_set_more_bit(skb);
  436. oz_set_last_pkt_nb(pd, skb);
  437. if ((int)atomic_read(&g_submitted_isoc) <
  438. OZ_MAX_SUBMITTED_ISOC) {
  439. if (dev_queue_xmit(skb) < 0) {
  440. oz_dbg(TX_FRAMES, "Dropping ISOC Frame\n");
  441. return -1;
  442. }
  443. atomic_inc(&g_submitted_isoc);
  444. oz_dbg(TX_FRAMES, "Sending ISOC Frame, nb_isoc= %d\n",
  445. pd->nb_queued_isoc_frames);
  446. return 0;
  447. }
  448. kfree_skb(skb);
  449. oz_dbg(TX_FRAMES, "Dropping ISOC Frame>\n");
  450. return -1;
  451. }
  452. pd->last_sent_frame = e;
  453. skb = oz_build_frame(pd, f);
  454. spin_unlock(&pd->tx_frame_lock);
  455. if (!skb)
  456. return -1;
  457. if (more_data)
  458. oz_set_more_bit(skb);
  459. oz_dbg(TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
  460. if (dev_queue_xmit(skb) < 0)
  461. return -1;
  462. return 0;
  463. }
  464. /*
  465. * Context: softirq-serialized
  466. */
  467. void oz_send_queued_frames(struct oz_pd *pd, int backlog)
  468. {
  469. while (oz_prepare_frame(pd, 0) >= 0)
  470. backlog++;
  471. switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
  472. case OZ_F_ISOC_NO_ELTS: {
  473. backlog += pd->nb_queued_isoc_frames;
  474. if (backlog <= 0)
  475. goto out;
  476. if (backlog > OZ_MAX_SUBMITTED_ISOC)
  477. backlog = OZ_MAX_SUBMITTED_ISOC;
  478. break;
  479. }
  480. case OZ_NO_ELTS_ANYTIME: {
  481. if ((backlog <= 0) && (pd->isoc_sent == 0))
  482. goto out;
  483. break;
  484. }
  485. default: {
  486. if (backlog <= 0)
  487. goto out;
  488. break;
  489. }
  490. }
  491. while (backlog--) {
  492. if (oz_send_next_queued_frame(pd, backlog) < 0)
  493. break;
  494. }
  495. return;
  496. out: oz_prepare_frame(pd, 1);
  497. oz_send_next_queued_frame(pd, 0);
  498. }
  499. /*
  500. * Context: softirq
  501. */
  502. static int oz_send_isoc_frame(struct oz_pd *pd)
  503. {
  504. struct sk_buff *skb;
  505. struct net_device *dev = pd->net_dev;
  506. struct oz_hdr *oz_hdr;
  507. struct oz_elt *elt;
  508. struct oz_elt_info *ei;
  509. LIST_HEAD(list);
  510. int total_size = sizeof(struct oz_hdr);
  511. oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
  512. pd->max_tx_size, &list);
  513. if (list_empty(&list))
  514. return 0;
  515. skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
  516. if (skb == NULL) {
  517. oz_dbg(ON, "Cannot alloc skb\n");
  518. oz_elt_info_free_chain(&pd->elt_buff, &list);
  519. return -1;
  520. }
  521. skb_reserve(skb, LL_RESERVED_SPACE(dev));
  522. skb_reset_network_header(skb);
  523. skb->dev = dev;
  524. skb->protocol = htons(OZ_ETHERTYPE);
  525. if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
  526. dev->dev_addr, skb->len) < 0) {
  527. kfree_skb(skb);
  528. return -1;
  529. }
  530. oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
  531. oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
  532. oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
  533. elt = (struct oz_elt *)(oz_hdr+1);
  534. list_for_each_entry(ei, &list, link) {
  535. memcpy(elt, ei->data, ei->length);
  536. elt = oz_next_elt(elt);
  537. }
  538. dev_queue_xmit(skb);
  539. oz_elt_info_free_chain(&pd->elt_buff, &list);
  540. return 0;
  541. }
  542. /*
  543. * Context: softirq-serialized
  544. */
  545. void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
  546. {
  547. struct oz_tx_frame *f, *tmp = NULL;
  548. u8 diff;
  549. u32 pkt_num;
  550. LIST_HEAD(list);
  551. spin_lock(&pd->tx_frame_lock);
  552. list_for_each_entry(f, &pd->tx_queue, link) {
  553. pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
  554. diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
  555. if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
  556. break;
  557. oz_dbg(TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
  558. pkt_num, pd->nb_queued_frames);
  559. tmp = f;
  560. pd->nb_queued_frames--;
  561. }
  562. if (tmp)
  563. list_cut_position(&list, &pd->tx_queue, &tmp->link);
  564. pd->last_sent_frame = &pd->tx_queue;
  565. spin_unlock(&pd->tx_frame_lock);
  566. list_for_each_entry_safe(f, tmp, &list, link)
  567. oz_retire_frame(pd, f);
  568. }
  569. /*
  570. * Precondition: stream_lock must be held.
  571. * Context: softirq
  572. */
  573. static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
  574. {
  575. struct oz_isoc_stream *st;
  576. list_for_each_entry(st, &pd->stream_list, link) {
  577. if (st->ep_num == ep_num)
  578. return st;
  579. }
  580. return NULL;
  581. }
  582. /*
  583. * Context: softirq
  584. */
  585. int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
  586. {
  587. struct oz_isoc_stream *st;
  588. st = kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
  589. if (!st)
  590. return -ENOMEM;
  591. st->ep_num = ep_num;
  592. spin_lock_bh(&pd->stream_lock);
  593. if (!pd_stream_find(pd, ep_num)) {
  594. list_add(&st->link, &pd->stream_list);
  595. st = NULL;
  596. }
  597. spin_unlock_bh(&pd->stream_lock);
  598. kfree(st);
  599. return 0;
  600. }
  601. /*
  602. * Context: softirq or process
  603. */
  604. static void oz_isoc_stream_free(struct oz_isoc_stream *st)
  605. {
  606. kfree_skb(st->skb);
  607. kfree(st);
  608. }
  609. /*
  610. * Context: softirq
  611. */
  612. int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
  613. {
  614. struct oz_isoc_stream *st;
  615. spin_lock_bh(&pd->stream_lock);
  616. st = pd_stream_find(pd, ep_num);
  617. if (st)
  618. list_del(&st->link);
  619. spin_unlock_bh(&pd->stream_lock);
  620. if (st)
  621. oz_isoc_stream_free(st);
  622. return 0;
  623. }
  624. /*
  625. * Context: any
  626. */
  627. static void oz_isoc_destructor(struct sk_buff *skb)
  628. {
  629. atomic_dec(&g_submitted_isoc);
  630. }
  631. /*
  632. * Context: softirq
  633. */
  634. int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
  635. {
  636. struct net_device *dev = pd->net_dev;
  637. struct oz_isoc_stream *st;
  638. u8 nb_units = 0;
  639. struct sk_buff *skb = NULL;
  640. struct oz_hdr *oz_hdr = NULL;
  641. int size = 0;
  642. spin_lock_bh(&pd->stream_lock);
  643. st = pd_stream_find(pd, ep_num);
  644. if (st) {
  645. skb = st->skb;
  646. st->skb = NULL;
  647. nb_units = st->nb_units;
  648. st->nb_units = 0;
  649. oz_hdr = st->oz_hdr;
  650. size = st->size;
  651. }
  652. spin_unlock_bh(&pd->stream_lock);
  653. if (!st)
  654. return 0;
  655. if (!skb) {
  656. /* Allocate enough space for max size frame. */
  657. skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
  658. GFP_ATOMIC);
  659. if (skb == NULL)
  660. return 0;
  661. /* Reserve the head room for lower layers. */
  662. skb_reserve(skb, LL_RESERVED_SPACE(dev));
  663. skb_reset_network_header(skb);
  664. skb->dev = dev;
  665. skb->protocol = htons(OZ_ETHERTYPE);
  666. /* For audio packet set priority to AC_VO */
  667. skb->priority = 0x7;
  668. size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
  669. oz_hdr = (struct oz_hdr *)skb_put(skb, size);
  670. }
  671. memcpy(skb_put(skb, len), data, len);
  672. size += len;
  673. if (++nb_units < pd->ms_per_isoc) {
  674. spin_lock_bh(&pd->stream_lock);
  675. st->skb = skb;
  676. st->nb_units = nb_units;
  677. st->oz_hdr = oz_hdr;
  678. st->size = size;
  679. spin_unlock_bh(&pd->stream_lock);
  680. } else {
  681. struct oz_hdr oz;
  682. struct oz_isoc_large iso;
  683. spin_lock_bh(&pd->stream_lock);
  684. iso.frame_number = st->frame_num;
  685. st->frame_num += nb_units;
  686. spin_unlock_bh(&pd->stream_lock);
  687. oz.control =
  688. (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
  689. oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
  690. oz.pkt_num = 0;
  691. iso.endpoint = ep_num;
  692. iso.format = OZ_DATA_F_ISOC_LARGE;
  693. iso.ms_data = nb_units;
  694. memcpy(oz_hdr, &oz, sizeof(oz));
  695. memcpy(oz_hdr+1, &iso, sizeof(iso));
  696. if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
  697. dev->dev_addr, skb->len) < 0)
  698. goto out;
  699. skb->destructor = oz_isoc_destructor;
  700. /*Queue for Xmit if mode is not ANYTIME*/
  701. if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
  702. struct oz_tx_frame *isoc_unit = NULL;
  703. int nb = pd->nb_queued_isoc_frames;
  704. if (nb >= pd->isoc_latency) {
  705. struct oz_tx_frame *f;
  706. oz_dbg(TX_FRAMES, "Dropping ISOC Unit nb= %d\n",
  707. nb);
  708. spin_lock(&pd->tx_frame_lock);
  709. list_for_each_entry(f, &pd->tx_queue, link) {
  710. if (f->skb != NULL) {
  711. oz_tx_isoc_free(pd, f);
  712. break;
  713. }
  714. }
  715. spin_unlock(&pd->tx_frame_lock);
  716. }
  717. isoc_unit = oz_tx_frame_alloc(pd);
  718. if (isoc_unit == NULL)
  719. goto out;
  720. isoc_unit->hdr = oz;
  721. isoc_unit->skb = skb;
  722. spin_lock_bh(&pd->tx_frame_lock);
  723. list_add_tail(&isoc_unit->link, &pd->tx_queue);
  724. pd->nb_queued_isoc_frames++;
  725. spin_unlock_bh(&pd->tx_frame_lock);
  726. oz_dbg(TX_FRAMES,
  727. "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
  728. pd->nb_queued_isoc_frames, pd->nb_queued_frames);
  729. return 0;
  730. }
  731. /*In ANYTIME mode Xmit unit immediately*/
  732. if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
  733. atomic_inc(&g_submitted_isoc);
  734. if (dev_queue_xmit(skb) < 0)
  735. return -1;
  736. return 0;
  737. }
  738. out: kfree_skb(skb);
  739. return -1;
  740. }
  741. return 0;
  742. }
  743. /*
  744. * Context: process
  745. */
  746. void oz_apps_init(void)
  747. {
  748. int i;
  749. for (i = 0; i < OZ_NB_APPS; i++) {
  750. if (g_app_if[i].init)
  751. g_app_if[i].init();
  752. }
  753. }
  754. /*
  755. * Context: process
  756. */
  757. void oz_apps_term(void)
  758. {
  759. int i;
  760. /* Terminate all the apps. */
  761. for (i = 0; i < OZ_NB_APPS; i++) {
  762. if (g_app_if[i].term)
  763. g_app_if[i].term();
  764. }
  765. }
  766. /*
  767. * Context: softirq-serialized
  768. */
  769. void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
  770. {
  771. if (app_id < OZ_NB_APPS && g_app_if[app_id].rx)
  772. g_app_if[app_id].rx(pd, elt);
  773. }
  774. /*
  775. * Context: softirq or process
  776. */
  777. void oz_pd_indicate_farewells(struct oz_pd *pd)
  778. {
  779. struct oz_farewell *f;
  780. const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB];
  781. while (1) {
  782. spin_lock_bh(&g_polling_lock);
  783. if (list_empty(&pd->farewell_list)) {
  784. spin_unlock_bh(&g_polling_lock);
  785. break;
  786. }
  787. f = list_first_entry(&pd->farewell_list,
  788. struct oz_farewell, link);
  789. list_del(&f->link);
  790. spin_unlock_bh(&g_polling_lock);
  791. if (ai->farewell)
  792. ai->farewell(pd, f->ep_num, f->report, f->len);
  793. kfree(f);
  794. }
  795. }