control.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. /*
  2. * Greybus CPort control protocol.
  3. *
  4. * Copyright 2015 Google Inc.
  5. * Copyright 2015 Linaro Ltd.
  6. *
  7. * Released under the GPLv2 only.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/slab.h>
  12. #include "greybus.h"
  13. /* Highest control-protocol version supported */
  14. #define GB_CONTROL_VERSION_MAJOR 0
  15. #define GB_CONTROL_VERSION_MINOR 1
  16. static int gb_control_get_version(struct gb_control *control)
  17. {
  18. struct gb_interface *intf = control->connection->intf;
  19. struct gb_control_version_request request;
  20. struct gb_control_version_response response;
  21. int ret;
  22. request.major = GB_CONTROL_VERSION_MAJOR;
  23. request.minor = GB_CONTROL_VERSION_MINOR;
  24. ret = gb_operation_sync(control->connection,
  25. GB_CONTROL_TYPE_VERSION,
  26. &request, sizeof(request), &response,
  27. sizeof(response));
  28. if (ret) {
  29. dev_err(&intf->dev,
  30. "failed to get control-protocol version: %d\n",
  31. ret);
  32. return ret;
  33. }
  34. if (response.major > request.major) {
  35. dev_err(&intf->dev,
  36. "unsupported major control-protocol version (%u > %u)\n",
  37. response.major, request.major);
  38. return -ENOTSUPP;
  39. }
  40. control->protocol_major = response.major;
  41. control->protocol_minor = response.minor;
  42. dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major,
  43. response.minor);
  44. return 0;
  45. }
  46. static int gb_control_get_bundle_version(struct gb_control *control,
  47. struct gb_bundle *bundle)
  48. {
  49. struct gb_interface *intf = control->connection->intf;
  50. struct gb_control_bundle_version_request request;
  51. struct gb_control_bundle_version_response response;
  52. int ret;
  53. request.bundle_id = bundle->id;
  54. ret = gb_operation_sync(control->connection,
  55. GB_CONTROL_TYPE_BUNDLE_VERSION,
  56. &request, sizeof(request),
  57. &response, sizeof(response));
  58. if (ret) {
  59. dev_err(&intf->dev,
  60. "failed to get bundle %u class version: %d\n",
  61. bundle->id, ret);
  62. return ret;
  63. }
  64. bundle->class_major = response.major;
  65. bundle->class_minor = response.minor;
  66. dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id,
  67. response.major, response.minor);
  68. return 0;
  69. }
  70. int gb_control_get_bundle_versions(struct gb_control *control)
  71. {
  72. struct gb_interface *intf = control->connection->intf;
  73. struct gb_bundle *bundle;
  74. int ret;
  75. if (!control->has_bundle_version)
  76. return 0;
  77. list_for_each_entry(bundle, &intf->bundles, links) {
  78. ret = gb_control_get_bundle_version(control, bundle);
  79. if (ret)
  80. return ret;
  81. }
  82. return 0;
  83. }
  84. /* Get Manifest's size from the interface */
  85. int gb_control_get_manifest_size_operation(struct gb_interface *intf)
  86. {
  87. struct gb_control_get_manifest_size_response response;
  88. struct gb_connection *connection = intf->control->connection;
  89. int ret;
  90. ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE,
  91. NULL, 0, &response, sizeof(response));
  92. if (ret) {
  93. dev_err(&connection->intf->dev,
  94. "failed to get manifest size: %d\n", ret);
  95. return ret;
  96. }
  97. return le16_to_cpu(response.size);
  98. }
  99. /* Reads Manifest from the interface */
  100. int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
  101. size_t size)
  102. {
  103. struct gb_connection *connection = intf->control->connection;
  104. return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST,
  105. NULL, 0, manifest, size);
  106. }
  107. int gb_control_connected_operation(struct gb_control *control, u16 cport_id)
  108. {
  109. struct gb_control_connected_request request;
  110. request.cport_id = cpu_to_le16(cport_id);
  111. return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED,
  112. &request, sizeof(request), NULL, 0);
  113. }
  114. int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id)
  115. {
  116. struct gb_control_disconnected_request request;
  117. request.cport_id = cpu_to_le16(cport_id);
  118. return gb_operation_sync(control->connection,
  119. GB_CONTROL_TYPE_DISCONNECTED, &request,
  120. sizeof(request), NULL, 0);
  121. }
  122. int gb_control_disconnecting_operation(struct gb_control *control,
  123. u16 cport_id)
  124. {
  125. struct gb_control_disconnecting_request *request;
  126. struct gb_operation *operation;
  127. int ret;
  128. operation = gb_operation_create_core(control->connection,
  129. GB_CONTROL_TYPE_DISCONNECTING,
  130. sizeof(*request), 0, 0,
  131. GFP_KERNEL);
  132. if (!operation)
  133. return -ENOMEM;
  134. request = operation->request->payload;
  135. request->cport_id = cpu_to_le16(cport_id);
  136. ret = gb_operation_request_send_sync(operation);
  137. if (ret) {
  138. dev_err(&control->dev, "failed to send disconnecting: %d\n",
  139. ret);
  140. }
  141. gb_operation_put(operation);
  142. return ret;
  143. }
  144. int gb_control_mode_switch_operation(struct gb_control *control)
  145. {
  146. struct gb_operation *operation;
  147. int ret;
  148. operation = gb_operation_create_core(control->connection,
  149. GB_CONTROL_TYPE_MODE_SWITCH,
  150. 0, 0, GB_OPERATION_FLAG_UNIDIRECTIONAL,
  151. GFP_KERNEL);
  152. if (!operation)
  153. return -ENOMEM;
  154. ret = gb_operation_request_send_sync(operation);
  155. if (ret)
  156. dev_err(&control->dev, "failed to send mode switch: %d\n", ret);
  157. gb_operation_put(operation);
  158. return ret;
  159. }
  160. int gb_control_timesync_enable(struct gb_control *control, u8 count,
  161. u64 frame_time, u32 strobe_delay, u32 refclk)
  162. {
  163. struct gb_control_timesync_enable_request request;
  164. request.count = count;
  165. request.frame_time = cpu_to_le64(frame_time);
  166. request.strobe_delay = cpu_to_le32(strobe_delay);
  167. request.refclk = cpu_to_le32(refclk);
  168. return gb_operation_sync(control->connection,
  169. GB_CONTROL_TYPE_TIMESYNC_ENABLE, &request,
  170. sizeof(request), NULL, 0);
  171. }
  172. int gb_control_timesync_disable(struct gb_control *control)
  173. {
  174. return gb_operation_sync(control->connection,
  175. GB_CONTROL_TYPE_TIMESYNC_DISABLE, NULL, 0,
  176. NULL, 0);
  177. }
  178. int gb_control_timesync_get_last_event(struct gb_control *control,
  179. u64 *frame_time)
  180. {
  181. struct gb_control_timesync_get_last_event_response response;
  182. int ret;
  183. ret = gb_operation_sync(control->connection,
  184. GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT,
  185. NULL, 0, &response, sizeof(response));
  186. if (!ret)
  187. *frame_time = le64_to_cpu(response.frame_time);
  188. return ret;
  189. }
  190. int gb_control_timesync_authoritative(struct gb_control *control,
  191. u64 *frame_time)
  192. {
  193. struct gb_control_timesync_authoritative_request request;
  194. int i;
  195. for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
  196. request.frame_time[i] = cpu_to_le64(frame_time[i]);
  197. return gb_operation_sync(control->connection,
  198. GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE,
  199. &request, sizeof(request),
  200. NULL, 0);
  201. }
  202. static int gb_control_bundle_pm_status_map(u8 status)
  203. {
  204. switch (status) {
  205. case GB_CONTROL_BUNDLE_PM_INVAL:
  206. return -EINVAL;
  207. case GB_CONTROL_BUNDLE_PM_BUSY:
  208. return -EBUSY;
  209. case GB_CONTROL_BUNDLE_PM_NA:
  210. return -ENOMSG;
  211. case GB_CONTROL_BUNDLE_PM_FAIL:
  212. default:
  213. return -EREMOTEIO;
  214. }
  215. }
  216. int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id)
  217. {
  218. struct gb_control_bundle_pm_request request;
  219. struct gb_control_bundle_pm_response response;
  220. int ret;
  221. request.bundle_id = bundle_id;
  222. ret = gb_operation_sync(control->connection,
  223. GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request,
  224. sizeof(request), &response, sizeof(response));
  225. if (ret) {
  226. dev_err(&control->dev, "failed to send bundle %u suspend: %d\n",
  227. bundle_id, ret);
  228. return ret;
  229. }
  230. if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
  231. dev_err(&control->dev, "failed to suspend bundle %u: %d\n",
  232. bundle_id, response.status);
  233. return gb_control_bundle_pm_status_map(response.status);
  234. }
  235. return 0;
  236. }
  237. int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id)
  238. {
  239. struct gb_control_bundle_pm_request request;
  240. struct gb_control_bundle_pm_response response;
  241. int ret;
  242. request.bundle_id = bundle_id;
  243. ret = gb_operation_sync(control->connection,
  244. GB_CONTROL_TYPE_BUNDLE_RESUME, &request,
  245. sizeof(request), &response, sizeof(response));
  246. if (ret) {
  247. dev_err(&control->dev, "failed to send bundle %u resume: %d\n",
  248. bundle_id, ret);
  249. return ret;
  250. }
  251. if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
  252. dev_err(&control->dev, "failed to resume bundle %u: %d\n",
  253. bundle_id, response.status);
  254. return gb_control_bundle_pm_status_map(response.status);
  255. }
  256. return 0;
  257. }
  258. int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id)
  259. {
  260. struct gb_control_bundle_pm_request request;
  261. struct gb_control_bundle_pm_response response;
  262. int ret;
  263. request.bundle_id = bundle_id;
  264. ret = gb_operation_sync(control->connection,
  265. GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request,
  266. sizeof(request), &response, sizeof(response));
  267. if (ret) {
  268. dev_err(&control->dev,
  269. "failed to send bundle %u deactivate: %d\n", bundle_id,
  270. ret);
  271. return ret;
  272. }
  273. if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
  274. dev_err(&control->dev, "failed to deactivate bundle %u: %d\n",
  275. bundle_id, response.status);
  276. return gb_control_bundle_pm_status_map(response.status);
  277. }
  278. return 0;
  279. }
  280. int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id)
  281. {
  282. struct gb_control_bundle_pm_request request;
  283. struct gb_control_bundle_pm_response response;
  284. int ret;
  285. if (!control->has_bundle_activate)
  286. return 0;
  287. request.bundle_id = bundle_id;
  288. ret = gb_operation_sync(control->connection,
  289. GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request,
  290. sizeof(request), &response, sizeof(response));
  291. if (ret) {
  292. dev_err(&control->dev,
  293. "failed to send bundle %u activate: %d\n", bundle_id,
  294. ret);
  295. return ret;
  296. }
  297. if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
  298. dev_err(&control->dev, "failed to activate bundle %u: %d\n",
  299. bundle_id, response.status);
  300. return gb_control_bundle_pm_status_map(response.status);
  301. }
  302. return 0;
  303. }
  304. static int gb_control_interface_pm_status_map(u8 status)
  305. {
  306. switch (status) {
  307. case GB_CONTROL_INTF_PM_BUSY:
  308. return -EBUSY;
  309. case GB_CONTROL_INTF_PM_NA:
  310. return -ENOMSG;
  311. default:
  312. return -EREMOTEIO;
  313. }
  314. }
  315. int gb_control_interface_suspend_prepare(struct gb_control *control)
  316. {
  317. struct gb_control_intf_pm_response response;
  318. int ret;
  319. ret = gb_operation_sync(control->connection,
  320. GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0,
  321. &response, sizeof(response));
  322. if (ret) {
  323. dev_err(&control->dev,
  324. "failed to send interface suspend prepare: %d\n", ret);
  325. return ret;
  326. }
  327. if (response.status != GB_CONTROL_INTF_PM_OK) {
  328. dev_err(&control->dev, "interface error while preparing suspend: %d\n",
  329. response.status);
  330. return gb_control_interface_pm_status_map(response.status);
  331. }
  332. return 0;
  333. }
  334. int gb_control_interface_deactivate_prepare(struct gb_control *control)
  335. {
  336. struct gb_control_intf_pm_response response;
  337. int ret;
  338. ret = gb_operation_sync(control->connection,
  339. GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL,
  340. 0, &response, sizeof(response));
  341. if (ret) {
  342. dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n",
  343. ret);
  344. return ret;
  345. }
  346. if (response.status != GB_CONTROL_INTF_PM_OK) {
  347. dev_err(&control->dev, "interface error while preparing deactivate: %d\n",
  348. response.status);
  349. return gb_control_interface_pm_status_map(response.status);
  350. }
  351. return 0;
  352. }
  353. int gb_control_interface_hibernate_abort(struct gb_control *control)
  354. {
  355. struct gb_control_intf_pm_response response;
  356. int ret;
  357. ret = gb_operation_sync(control->connection,
  358. GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0,
  359. &response, sizeof(response));
  360. if (ret) {
  361. dev_err(&control->dev,
  362. "failed to send interface aborting hibernate: %d\n",
  363. ret);
  364. return ret;
  365. }
  366. if (response.status != GB_CONTROL_INTF_PM_OK) {
  367. dev_err(&control->dev, "interface error while aborting hibernate: %d\n",
  368. response.status);
  369. return gb_control_interface_pm_status_map(response.status);
  370. }
  371. return 0;
  372. }
  373. static ssize_t vendor_string_show(struct device *dev,
  374. struct device_attribute *attr, char *buf)
  375. {
  376. struct gb_control *control = to_gb_control(dev);
  377. return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
  378. }
  379. static DEVICE_ATTR_RO(vendor_string);
  380. static ssize_t product_string_show(struct device *dev,
  381. struct device_attribute *attr, char *buf)
  382. {
  383. struct gb_control *control = to_gb_control(dev);
  384. return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
  385. }
  386. static DEVICE_ATTR_RO(product_string);
  387. static struct attribute *control_attrs[] = {
  388. &dev_attr_vendor_string.attr,
  389. &dev_attr_product_string.attr,
  390. NULL,
  391. };
  392. ATTRIBUTE_GROUPS(control);
  393. static void gb_control_release(struct device *dev)
  394. {
  395. struct gb_control *control = to_gb_control(dev);
  396. gb_connection_destroy(control->connection);
  397. kfree(control->vendor_string);
  398. kfree(control->product_string);
  399. kfree(control);
  400. }
  401. struct device_type greybus_control_type = {
  402. .name = "greybus_control",
  403. .release = gb_control_release,
  404. };
  405. struct gb_control *gb_control_create(struct gb_interface *intf)
  406. {
  407. struct gb_connection *connection;
  408. struct gb_control *control;
  409. control = kzalloc(sizeof(*control), GFP_KERNEL);
  410. if (!control)
  411. return ERR_PTR(-ENOMEM);
  412. control->intf = intf;
  413. connection = gb_connection_create_control(intf);
  414. if (IS_ERR(connection)) {
  415. dev_err(&intf->dev,
  416. "failed to create control connection: %ld\n",
  417. PTR_ERR(connection));
  418. kfree(control);
  419. return ERR_CAST(connection);
  420. }
  421. control->connection = connection;
  422. control->dev.parent = &intf->dev;
  423. control->dev.bus = &greybus_bus_type;
  424. control->dev.type = &greybus_control_type;
  425. control->dev.groups = control_groups;
  426. control->dev.dma_mask = intf->dev.dma_mask;
  427. device_initialize(&control->dev);
  428. dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev));
  429. gb_connection_set_data(control->connection, control);
  430. return control;
  431. }
  432. int gb_control_enable(struct gb_control *control)
  433. {
  434. int ret;
  435. dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
  436. ret = gb_connection_enable_tx(control->connection);
  437. if (ret) {
  438. dev_err(&control->connection->intf->dev,
  439. "failed to enable control connection: %d\n",
  440. ret);
  441. return ret;
  442. }
  443. ret = gb_control_get_version(control);
  444. if (ret)
  445. goto err_disable_connection;
  446. if (control->protocol_major > 0 || control->protocol_minor > 1)
  447. control->has_bundle_version = true;
  448. /* FIXME: use protocol version instead */
  449. if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE))
  450. control->has_bundle_activate = true;
  451. return 0;
  452. err_disable_connection:
  453. gb_connection_disable(control->connection);
  454. return ret;
  455. }
  456. void gb_control_disable(struct gb_control *control)
  457. {
  458. dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
  459. if (control->intf->disconnected)
  460. gb_connection_disable_forced(control->connection);
  461. else
  462. gb_connection_disable(control->connection);
  463. }
  464. int gb_control_suspend(struct gb_control *control)
  465. {
  466. gb_connection_disable(control->connection);
  467. return 0;
  468. }
  469. int gb_control_resume(struct gb_control *control)
  470. {
  471. int ret;
  472. ret = gb_connection_enable_tx(control->connection);
  473. if (ret) {
  474. dev_err(&control->connection->intf->dev,
  475. "failed to enable control connection: %d\n", ret);
  476. return ret;
  477. }
  478. return 0;
  479. }
  480. int gb_control_add(struct gb_control *control)
  481. {
  482. int ret;
  483. ret = device_add(&control->dev);
  484. if (ret) {
  485. dev_err(&control->dev,
  486. "failed to register control device: %d\n",
  487. ret);
  488. return ret;
  489. }
  490. return 0;
  491. }
  492. void gb_control_del(struct gb_control *control)
  493. {
  494. if (device_is_registered(&control->dev))
  495. device_del(&control->dev);
  496. }
  497. struct gb_control *gb_control_get(struct gb_control *control)
  498. {
  499. get_device(&control->dev);
  500. return control;
  501. }
  502. void gb_control_put(struct gb_control *control)
  503. {
  504. put_device(&control->dev);
  505. }
  506. void gb_control_mode_switch_prepare(struct gb_control *control)
  507. {
  508. gb_connection_mode_switch_prepare(control->connection);
  509. }
  510. void gb_control_mode_switch_complete(struct gb_control *control)
  511. {
  512. gb_connection_mode_switch_complete(control->connection);
  513. }