hci_h5.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. /*
  2. *
  3. * Bluetooth HCI Three-wire UART driver
  4. *
  5. * Copyright (C) 2012 Intel Corporation
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/acpi.h>
  24. #include <linux/errno.h>
  25. #include <linux/gpio/consumer.h>
  26. #include <linux/kernel.h>
  27. #include <linux/mod_devicetable.h>
  28. #include <linux/serdev.h>
  29. #include <linux/skbuff.h>
  30. #include <net/bluetooth/bluetooth.h>
  31. #include <net/bluetooth/hci_core.h>
  32. #include "btrtl.h"
  33. #include "hci_uart.h"
  34. #define HCI_3WIRE_ACK_PKT 0
  35. #define HCI_3WIRE_LINK_PKT 15
  36. /* Sliding window size */
  37. #define H5_TX_WIN_MAX 4
  38. #define H5_ACK_TIMEOUT msecs_to_jiffies(250)
  39. #define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
  40. /*
  41. * Maximum Three-wire packet:
  42. * 4 byte header + max value for 12-bit length + 2 bytes for CRC
  43. */
  44. #define H5_MAX_LEN (4 + 0xfff + 2)
  45. /* Convenience macros for reading Three-wire header values */
  46. #define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
  47. #define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
  48. #define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
  49. #define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
  50. #define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
  51. #define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
  52. #define SLIP_DELIMITER 0xc0
  53. #define SLIP_ESC 0xdb
  54. #define SLIP_ESC_DELIM 0xdc
  55. #define SLIP_ESC_ESC 0xdd
  56. /* H5 state flags */
  57. enum {
  58. H5_RX_ESC, /* SLIP escape mode */
  59. H5_TX_ACK_REQ, /* Pending ack to send */
  60. };
  61. struct h5 {
  62. /* Must be the first member, hci_serdev.c expects this. */
  63. struct hci_uart serdev_hu;
  64. struct sk_buff_head unack; /* Unack'ed packets queue */
  65. struct sk_buff_head rel; /* Reliable packets queue */
  66. struct sk_buff_head unrel; /* Unreliable packets queue */
  67. unsigned long flags;
  68. struct sk_buff *rx_skb; /* Receive buffer */
  69. size_t rx_pending; /* Expecting more bytes */
  70. u8 rx_ack; /* Last ack number received */
  71. int (*rx_func)(struct hci_uart *hu, u8 c);
  72. struct timer_list timer; /* Retransmission timer */
  73. struct hci_uart *hu; /* Parent HCI UART */
  74. u8 tx_seq; /* Next seq number to send */
  75. u8 tx_ack; /* Next ack number to send */
  76. u8 tx_win; /* Sliding window size */
  77. enum {
  78. H5_UNINITIALIZED,
  79. H5_INITIALIZED,
  80. H5_ACTIVE,
  81. } state;
  82. enum {
  83. H5_AWAKE,
  84. H5_SLEEPING,
  85. H5_WAKING_UP,
  86. } sleep;
  87. const struct h5_vnd *vnd;
  88. const char *id;
  89. struct gpio_desc *enable_gpio;
  90. struct gpio_desc *device_wake_gpio;
  91. };
  92. struct h5_vnd {
  93. int (*setup)(struct h5 *h5);
  94. void (*open)(struct h5 *h5);
  95. void (*close)(struct h5 *h5);
  96. const struct acpi_gpio_mapping *acpi_gpio_map;
  97. };
  98. static void h5_reset_rx(struct h5 *h5);
  99. static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
  100. {
  101. struct h5 *h5 = hu->priv;
  102. struct sk_buff *nskb;
  103. nskb = alloc_skb(3, GFP_ATOMIC);
  104. if (!nskb)
  105. return;
  106. hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
  107. skb_put_data(nskb, data, len);
  108. skb_queue_tail(&h5->unrel, nskb);
  109. }
  110. static u8 h5_cfg_field(struct h5 *h5)
  111. {
  112. /* Sliding window size (first 3 bits) */
  113. return h5->tx_win & 0x07;
  114. }
  115. static void h5_timed_event(struct timer_list *t)
  116. {
  117. const unsigned char sync_req[] = { 0x01, 0x7e };
  118. unsigned char conf_req[3] = { 0x03, 0xfc };
  119. struct h5 *h5 = from_timer(h5, t, timer);
  120. struct hci_uart *hu = h5->hu;
  121. struct sk_buff *skb;
  122. unsigned long flags;
  123. BT_DBG("%s", hu->hdev->name);
  124. if (h5->state == H5_UNINITIALIZED)
  125. h5_link_control(hu, sync_req, sizeof(sync_req));
  126. if (h5->state == H5_INITIALIZED) {
  127. conf_req[2] = h5_cfg_field(h5);
  128. h5_link_control(hu, conf_req, sizeof(conf_req));
  129. }
  130. if (h5->state != H5_ACTIVE) {
  131. mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
  132. goto wakeup;
  133. }
  134. if (h5->sleep != H5_AWAKE) {
  135. h5->sleep = H5_SLEEPING;
  136. goto wakeup;
  137. }
  138. BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
  139. spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
  140. while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
  141. h5->tx_seq = (h5->tx_seq - 1) & 0x07;
  142. skb_queue_head(&h5->rel, skb);
  143. }
  144. spin_unlock_irqrestore(&h5->unack.lock, flags);
  145. wakeup:
  146. hci_uart_tx_wakeup(hu);
  147. }
  148. static void h5_peer_reset(struct hci_uart *hu)
  149. {
  150. struct h5 *h5 = hu->priv;
  151. BT_ERR("Peer device has reset");
  152. h5->state = H5_UNINITIALIZED;
  153. del_timer(&h5->timer);
  154. skb_queue_purge(&h5->rel);
  155. skb_queue_purge(&h5->unrel);
  156. skb_queue_purge(&h5->unack);
  157. h5->tx_seq = 0;
  158. h5->tx_ack = 0;
  159. /* Send reset request to upper stack */
  160. hci_reset_dev(hu->hdev);
  161. }
  162. static int h5_open(struct hci_uart *hu)
  163. {
  164. struct h5 *h5;
  165. const unsigned char sync[] = { 0x01, 0x7e };
  166. BT_DBG("hu %p", hu);
  167. if (hu->serdev) {
  168. h5 = serdev_device_get_drvdata(hu->serdev);
  169. } else {
  170. h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
  171. if (!h5)
  172. return -ENOMEM;
  173. }
  174. hu->priv = h5;
  175. h5->hu = hu;
  176. skb_queue_head_init(&h5->unack);
  177. skb_queue_head_init(&h5->rel);
  178. skb_queue_head_init(&h5->unrel);
  179. h5_reset_rx(h5);
  180. timer_setup(&h5->timer, h5_timed_event, 0);
  181. h5->tx_win = H5_TX_WIN_MAX;
  182. if (h5->vnd && h5->vnd->open)
  183. h5->vnd->open(h5);
  184. set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
  185. /* Send initial sync request */
  186. h5_link_control(hu, sync, sizeof(sync));
  187. mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
  188. return 0;
  189. }
  190. static int h5_close(struct hci_uart *hu)
  191. {
  192. struct h5 *h5 = hu->priv;
  193. del_timer_sync(&h5->timer);
  194. skb_queue_purge(&h5->unack);
  195. skb_queue_purge(&h5->rel);
  196. skb_queue_purge(&h5->unrel);
  197. if (h5->vnd && h5->vnd->close)
  198. h5->vnd->close(h5);
  199. if (!hu->serdev)
  200. kfree(h5);
  201. return 0;
  202. }
  203. static int h5_setup(struct hci_uart *hu)
  204. {
  205. struct h5 *h5 = hu->priv;
  206. if (h5->vnd && h5->vnd->setup)
  207. return h5->vnd->setup(h5);
  208. return 0;
  209. }
  210. static void h5_pkt_cull(struct h5 *h5)
  211. {
  212. struct sk_buff *skb, *tmp;
  213. unsigned long flags;
  214. int i, to_remove;
  215. u8 seq;
  216. spin_lock_irqsave(&h5->unack.lock, flags);
  217. to_remove = skb_queue_len(&h5->unack);
  218. if (to_remove == 0)
  219. goto unlock;
  220. seq = h5->tx_seq;
  221. while (to_remove > 0) {
  222. if (h5->rx_ack == seq)
  223. break;
  224. to_remove--;
  225. seq = (seq - 1) & 0x07;
  226. }
  227. if (seq != h5->rx_ack)
  228. BT_ERR("Controller acked invalid packet");
  229. i = 0;
  230. skb_queue_walk_safe(&h5->unack, skb, tmp) {
  231. if (i++ >= to_remove)
  232. break;
  233. __skb_unlink(skb, &h5->unack);
  234. kfree_skb(skb);
  235. }
  236. if (skb_queue_empty(&h5->unack))
  237. del_timer(&h5->timer);
  238. unlock:
  239. spin_unlock_irqrestore(&h5->unack.lock, flags);
  240. }
  241. static void h5_handle_internal_rx(struct hci_uart *hu)
  242. {
  243. struct h5 *h5 = hu->priv;
  244. const unsigned char sync_req[] = { 0x01, 0x7e };
  245. const unsigned char sync_rsp[] = { 0x02, 0x7d };
  246. unsigned char conf_req[3] = { 0x03, 0xfc };
  247. const unsigned char conf_rsp[] = { 0x04, 0x7b };
  248. const unsigned char wakeup_req[] = { 0x05, 0xfa };
  249. const unsigned char woken_req[] = { 0x06, 0xf9 };
  250. const unsigned char sleep_req[] = { 0x07, 0x78 };
  251. const unsigned char *hdr = h5->rx_skb->data;
  252. const unsigned char *data = &h5->rx_skb->data[4];
  253. BT_DBG("%s", hu->hdev->name);
  254. if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
  255. return;
  256. if (H5_HDR_LEN(hdr) < 2)
  257. return;
  258. conf_req[2] = h5_cfg_field(h5);
  259. if (memcmp(data, sync_req, 2) == 0) {
  260. if (h5->state == H5_ACTIVE)
  261. h5_peer_reset(hu);
  262. h5_link_control(hu, sync_rsp, 2);
  263. } else if (memcmp(data, sync_rsp, 2) == 0) {
  264. if (h5->state == H5_ACTIVE)
  265. h5_peer_reset(hu);
  266. h5->state = H5_INITIALIZED;
  267. h5_link_control(hu, conf_req, 3);
  268. } else if (memcmp(data, conf_req, 2) == 0) {
  269. h5_link_control(hu, conf_rsp, 2);
  270. h5_link_control(hu, conf_req, 3);
  271. } else if (memcmp(data, conf_rsp, 2) == 0) {
  272. if (H5_HDR_LEN(hdr) > 2)
  273. h5->tx_win = (data[2] & 0x07);
  274. BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
  275. h5->state = H5_ACTIVE;
  276. hci_uart_init_ready(hu);
  277. return;
  278. } else if (memcmp(data, sleep_req, 2) == 0) {
  279. BT_DBG("Peer went to sleep");
  280. h5->sleep = H5_SLEEPING;
  281. return;
  282. } else if (memcmp(data, woken_req, 2) == 0) {
  283. BT_DBG("Peer woke up");
  284. h5->sleep = H5_AWAKE;
  285. } else if (memcmp(data, wakeup_req, 2) == 0) {
  286. BT_DBG("Peer requested wakeup");
  287. h5_link_control(hu, woken_req, 2);
  288. h5->sleep = H5_AWAKE;
  289. } else {
  290. BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
  291. return;
  292. }
  293. hci_uart_tx_wakeup(hu);
  294. }
  295. static void h5_complete_rx_pkt(struct hci_uart *hu)
  296. {
  297. struct h5 *h5 = hu->priv;
  298. const unsigned char *hdr = h5->rx_skb->data;
  299. if (H5_HDR_RELIABLE(hdr)) {
  300. h5->tx_ack = (h5->tx_ack + 1) % 8;
  301. set_bit(H5_TX_ACK_REQ, &h5->flags);
  302. hci_uart_tx_wakeup(hu);
  303. }
  304. h5->rx_ack = H5_HDR_ACK(hdr);
  305. h5_pkt_cull(h5);
  306. switch (H5_HDR_PKT_TYPE(hdr)) {
  307. case HCI_EVENT_PKT:
  308. case HCI_ACLDATA_PKT:
  309. case HCI_SCODATA_PKT:
  310. hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
  311. /* Remove Three-wire header */
  312. skb_pull(h5->rx_skb, 4);
  313. hci_recv_frame(hu->hdev, h5->rx_skb);
  314. h5->rx_skb = NULL;
  315. break;
  316. default:
  317. h5_handle_internal_rx(hu);
  318. break;
  319. }
  320. h5_reset_rx(h5);
  321. }
  322. static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
  323. {
  324. h5_complete_rx_pkt(hu);
  325. return 0;
  326. }
  327. static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
  328. {
  329. struct h5 *h5 = hu->priv;
  330. const unsigned char *hdr = h5->rx_skb->data;
  331. if (H5_HDR_CRC(hdr)) {
  332. h5->rx_func = h5_rx_crc;
  333. h5->rx_pending = 2;
  334. } else {
  335. h5_complete_rx_pkt(hu);
  336. }
  337. return 0;
  338. }
  339. static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
  340. {
  341. struct h5 *h5 = hu->priv;
  342. const unsigned char *hdr = h5->rx_skb->data;
  343. BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
  344. hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
  345. H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
  346. H5_HDR_LEN(hdr));
  347. if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
  348. BT_ERR("Invalid header checksum");
  349. h5_reset_rx(h5);
  350. return 0;
  351. }
  352. if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
  353. BT_ERR("Out-of-order packet arrived (%u != %u)",
  354. H5_HDR_SEQ(hdr), h5->tx_ack);
  355. h5_reset_rx(h5);
  356. return 0;
  357. }
  358. if (h5->state != H5_ACTIVE &&
  359. H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
  360. BT_ERR("Non-link packet received in non-active state");
  361. h5_reset_rx(h5);
  362. return 0;
  363. }
  364. h5->rx_func = h5_rx_payload;
  365. h5->rx_pending = H5_HDR_LEN(hdr);
  366. return 0;
  367. }
  368. static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
  369. {
  370. struct h5 *h5 = hu->priv;
  371. if (c == SLIP_DELIMITER)
  372. return 1;
  373. h5->rx_func = h5_rx_3wire_hdr;
  374. h5->rx_pending = 4;
  375. h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
  376. if (!h5->rx_skb) {
  377. BT_ERR("Can't allocate mem for new packet");
  378. h5_reset_rx(h5);
  379. return -ENOMEM;
  380. }
  381. h5->rx_skb->dev = (void *)hu->hdev;
  382. return 0;
  383. }
  384. static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
  385. {
  386. struct h5 *h5 = hu->priv;
  387. if (c == SLIP_DELIMITER)
  388. h5->rx_func = h5_rx_pkt_start;
  389. return 1;
  390. }
  391. static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
  392. {
  393. const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
  394. const u8 *byte = &c;
  395. if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
  396. set_bit(H5_RX_ESC, &h5->flags);
  397. return;
  398. }
  399. if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
  400. switch (c) {
  401. case SLIP_ESC_DELIM:
  402. byte = &delim;
  403. break;
  404. case SLIP_ESC_ESC:
  405. byte = &esc;
  406. break;
  407. default:
  408. BT_ERR("Invalid esc byte 0x%02hhx", c);
  409. h5_reset_rx(h5);
  410. return;
  411. }
  412. }
  413. skb_put_data(h5->rx_skb, byte, 1);
  414. h5->rx_pending--;
  415. BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
  416. }
  417. static void h5_reset_rx(struct h5 *h5)
  418. {
  419. if (h5->rx_skb) {
  420. kfree_skb(h5->rx_skb);
  421. h5->rx_skb = NULL;
  422. }
  423. h5->rx_func = h5_rx_delimiter;
  424. h5->rx_pending = 0;
  425. clear_bit(H5_RX_ESC, &h5->flags);
  426. }
  427. static int h5_recv(struct hci_uart *hu, const void *data, int count)
  428. {
  429. struct h5 *h5 = hu->priv;
  430. const unsigned char *ptr = data;
  431. BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
  432. count);
  433. while (count > 0) {
  434. int processed;
  435. if (h5->rx_pending > 0) {
  436. if (*ptr == SLIP_DELIMITER) {
  437. BT_ERR("Too short H5 packet");
  438. h5_reset_rx(h5);
  439. continue;
  440. }
  441. h5_unslip_one_byte(h5, *ptr);
  442. ptr++; count--;
  443. continue;
  444. }
  445. processed = h5->rx_func(hu, *ptr);
  446. if (processed < 0)
  447. return processed;
  448. ptr += processed;
  449. count -= processed;
  450. }
  451. return 0;
  452. }
  453. static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
  454. {
  455. struct h5 *h5 = hu->priv;
  456. if (skb->len > 0xfff) {
  457. BT_ERR("Packet too long (%u bytes)", skb->len);
  458. kfree_skb(skb);
  459. return 0;
  460. }
  461. if (h5->state != H5_ACTIVE) {
  462. BT_ERR("Ignoring HCI data in non-active state");
  463. kfree_skb(skb);
  464. return 0;
  465. }
  466. switch (hci_skb_pkt_type(skb)) {
  467. case HCI_ACLDATA_PKT:
  468. case HCI_COMMAND_PKT:
  469. skb_queue_tail(&h5->rel, skb);
  470. break;
  471. case HCI_SCODATA_PKT:
  472. skb_queue_tail(&h5->unrel, skb);
  473. break;
  474. default:
  475. BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
  476. kfree_skb(skb);
  477. break;
  478. }
  479. return 0;
  480. }
  481. static void h5_slip_delim(struct sk_buff *skb)
  482. {
  483. const char delim = SLIP_DELIMITER;
  484. skb_put_data(skb, &delim, 1);
  485. }
  486. static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
  487. {
  488. const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
  489. const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
  490. switch (c) {
  491. case SLIP_DELIMITER:
  492. skb_put_data(skb, &esc_delim, 2);
  493. break;
  494. case SLIP_ESC:
  495. skb_put_data(skb, &esc_esc, 2);
  496. break;
  497. default:
  498. skb_put_data(skb, &c, 1);
  499. }
  500. }
  501. static bool valid_packet_type(u8 type)
  502. {
  503. switch (type) {
  504. case HCI_ACLDATA_PKT:
  505. case HCI_COMMAND_PKT:
  506. case HCI_SCODATA_PKT:
  507. case HCI_3WIRE_LINK_PKT:
  508. case HCI_3WIRE_ACK_PKT:
  509. return true;
  510. default:
  511. return false;
  512. }
  513. }
  514. static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
  515. const u8 *data, size_t len)
  516. {
  517. struct h5 *h5 = hu->priv;
  518. struct sk_buff *nskb;
  519. u8 hdr[4];
  520. int i;
  521. if (!valid_packet_type(pkt_type)) {
  522. BT_ERR("Unknown packet type %u", pkt_type);
  523. return NULL;
  524. }
  525. /*
  526. * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
  527. * (because bytes 0xc0 and 0xdb are escaped, worst case is when
  528. * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
  529. * delimiters at start and end).
  530. */
  531. nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
  532. if (!nskb)
  533. return NULL;
  534. hci_skb_pkt_type(nskb) = pkt_type;
  535. h5_slip_delim(nskb);
  536. hdr[0] = h5->tx_ack << 3;
  537. clear_bit(H5_TX_ACK_REQ, &h5->flags);
  538. /* Reliable packet? */
  539. if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
  540. hdr[0] |= 1 << 7;
  541. hdr[0] |= h5->tx_seq;
  542. h5->tx_seq = (h5->tx_seq + 1) % 8;
  543. }
  544. hdr[1] = pkt_type | ((len & 0x0f) << 4);
  545. hdr[2] = len >> 4;
  546. hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
  547. BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
  548. hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
  549. H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
  550. H5_HDR_LEN(hdr));
  551. for (i = 0; i < 4; i++)
  552. h5_slip_one_byte(nskb, hdr[i]);
  553. for (i = 0; i < len; i++)
  554. h5_slip_one_byte(nskb, data[i]);
  555. h5_slip_delim(nskb);
  556. return nskb;
  557. }
  558. static struct sk_buff *h5_dequeue(struct hci_uart *hu)
  559. {
  560. struct h5 *h5 = hu->priv;
  561. unsigned long flags;
  562. struct sk_buff *skb, *nskb;
  563. if (h5->sleep != H5_AWAKE) {
  564. const unsigned char wakeup_req[] = { 0x05, 0xfa };
  565. if (h5->sleep == H5_WAKING_UP)
  566. return NULL;
  567. h5->sleep = H5_WAKING_UP;
  568. BT_DBG("Sending wakeup request");
  569. mod_timer(&h5->timer, jiffies + HZ / 100);
  570. return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
  571. }
  572. skb = skb_dequeue(&h5->unrel);
  573. if (skb) {
  574. nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
  575. skb->data, skb->len);
  576. if (nskb) {
  577. kfree_skb(skb);
  578. return nskb;
  579. }
  580. skb_queue_head(&h5->unrel, skb);
  581. BT_ERR("Could not dequeue pkt because alloc_skb failed");
  582. }
  583. spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
  584. if (h5->unack.qlen >= h5->tx_win)
  585. goto unlock;
  586. skb = skb_dequeue(&h5->rel);
  587. if (skb) {
  588. nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
  589. skb->data, skb->len);
  590. if (nskb) {
  591. __skb_queue_tail(&h5->unack, skb);
  592. mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
  593. spin_unlock_irqrestore(&h5->unack.lock, flags);
  594. return nskb;
  595. }
  596. skb_queue_head(&h5->rel, skb);
  597. BT_ERR("Could not dequeue pkt because alloc_skb failed");
  598. }
  599. unlock:
  600. spin_unlock_irqrestore(&h5->unack.lock, flags);
  601. if (test_bit(H5_TX_ACK_REQ, &h5->flags))
  602. return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
  603. return NULL;
  604. }
  605. static int h5_flush(struct hci_uart *hu)
  606. {
  607. BT_DBG("hu %p", hu);
  608. return 0;
  609. }
  610. static const struct hci_uart_proto h5p = {
  611. .id = HCI_UART_3WIRE,
  612. .name = "Three-wire (H5)",
  613. .open = h5_open,
  614. .close = h5_close,
  615. .setup = h5_setup,
  616. .recv = h5_recv,
  617. .enqueue = h5_enqueue,
  618. .dequeue = h5_dequeue,
  619. .flush = h5_flush,
  620. };
  621. static int h5_serdev_probe(struct serdev_device *serdev)
  622. {
  623. const struct acpi_device_id *match;
  624. struct device *dev = &serdev->dev;
  625. struct h5 *h5;
  626. h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
  627. if (!h5)
  628. return -ENOMEM;
  629. set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.flags);
  630. h5->hu = &h5->serdev_hu;
  631. h5->serdev_hu.serdev = serdev;
  632. serdev_device_set_drvdata(serdev, h5);
  633. if (has_acpi_companion(dev)) {
  634. match = acpi_match_device(dev->driver->acpi_match_table, dev);
  635. if (!match)
  636. return -ENODEV;
  637. h5->vnd = (const struct h5_vnd *)match->driver_data;
  638. h5->id = (char *)match->id;
  639. if (h5->vnd->acpi_gpio_map)
  640. devm_acpi_dev_add_driver_gpios(dev,
  641. h5->vnd->acpi_gpio_map);
  642. }
  643. h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
  644. if (IS_ERR(h5->enable_gpio))
  645. return PTR_ERR(h5->enable_gpio);
  646. h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake",
  647. GPIOD_OUT_LOW);
  648. if (IS_ERR(h5->device_wake_gpio))
  649. return PTR_ERR(h5->device_wake_gpio);
  650. return hci_uart_register_device(&h5->serdev_hu, &h5p);
  651. }
  652. static void h5_serdev_remove(struct serdev_device *serdev)
  653. {
  654. struct h5 *h5 = serdev_device_get_drvdata(serdev);
  655. hci_uart_unregister_device(&h5->serdev_hu);
  656. }
  657. #ifdef CONFIG_BT_HCIUART_RTL
  658. static int h5_btrtl_setup(struct h5 *h5)
  659. {
  660. struct btrtl_device_info *btrtl_dev;
  661. struct sk_buff *skb;
  662. __le32 baudrate_data;
  663. u32 device_baudrate;
  664. unsigned int controller_baudrate;
  665. bool flow_control;
  666. int err;
  667. btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id);
  668. if (IS_ERR(btrtl_dev))
  669. return PTR_ERR(btrtl_dev);
  670. err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev,
  671. &controller_baudrate, &device_baudrate,
  672. &flow_control);
  673. if (err)
  674. goto out_free;
  675. baudrate_data = cpu_to_le32(device_baudrate);
  676. skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data),
  677. &baudrate_data, HCI_INIT_TIMEOUT);
  678. if (IS_ERR(skb)) {
  679. rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n");
  680. err = PTR_ERR(skb);
  681. goto out_free;
  682. } else {
  683. kfree_skb(skb);
  684. }
  685. /* Give the device some time to set up the new baudrate. */
  686. usleep_range(10000, 20000);
  687. serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate);
  688. serdev_device_set_flow_control(h5->hu->serdev, flow_control);
  689. err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev);
  690. /* Give the device some time before the hci-core sends it a reset */
  691. usleep_range(10000, 20000);
  692. out_free:
  693. btrtl_free(btrtl_dev);
  694. return err;
  695. }
  696. static void h5_btrtl_open(struct h5 *h5)
  697. {
  698. /* Devices always start with these fixed parameters */
  699. serdev_device_set_flow_control(h5->hu->serdev, false);
  700. serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
  701. serdev_device_set_baudrate(h5->hu->serdev, 115200);
  702. /* The controller needs up to 500ms to wakeup */
  703. gpiod_set_value_cansleep(h5->enable_gpio, 1);
  704. gpiod_set_value_cansleep(h5->device_wake_gpio, 1);
  705. msleep(500);
  706. }
  707. static void h5_btrtl_close(struct h5 *h5)
  708. {
  709. gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
  710. gpiod_set_value_cansleep(h5->enable_gpio, 0);
  711. }
  712. static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false };
  713. static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false };
  714. static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false };
  715. static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = {
  716. { "device-wake-gpios", &btrtl_device_wake_gpios, 1 },
  717. { "enable-gpios", &btrtl_enable_gpios, 1 },
  718. { "host-wake-gpios", &btrtl_host_wake_gpios, 1 },
  719. {},
  720. };
  721. static struct h5_vnd rtl_vnd = {
  722. .setup = h5_btrtl_setup,
  723. .open = h5_btrtl_open,
  724. .close = h5_btrtl_close,
  725. .acpi_gpio_map = acpi_btrtl_gpios,
  726. };
  727. #endif
  728. #ifdef CONFIG_ACPI
  729. static const struct acpi_device_id h5_acpi_match[] = {
  730. #ifdef CONFIG_BT_HCIUART_RTL
  731. { "OBDA8723", (kernel_ulong_t)&rtl_vnd },
  732. #endif
  733. { },
  734. };
  735. MODULE_DEVICE_TABLE(acpi, h5_acpi_match);
  736. #endif
  737. static struct serdev_device_driver h5_serdev_driver = {
  738. .probe = h5_serdev_probe,
  739. .remove = h5_serdev_remove,
  740. .driver = {
  741. .name = "hci_uart_h5",
  742. .acpi_match_table = ACPI_PTR(h5_acpi_match),
  743. },
  744. };
  745. int __init h5_init(void)
  746. {
  747. serdev_device_driver_register(&h5_serdev_driver);
  748. return hci_uart_register_proto(&h5p);
  749. }
  750. int __exit h5_deinit(void)
  751. {
  752. serdev_device_driver_unregister(&h5_serdev_driver);
  753. return hci_uart_unregister_proto(&h5p);
  754. }