svc.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487
  1. /*
  2. * SVC Greybus driver.
  3. *
  4. * Copyright 2015 Google Inc.
  5. * Copyright 2015 Linaro Ltd.
  6. *
  7. * Released under the GPLv2 only.
  8. */
  9. #include <linux/debugfs.h>
  10. #include <linux/workqueue.h>
  11. #include "greybus.h"
  12. #define SVC_INTF_EJECT_TIMEOUT 9000
  13. #define SVC_INTF_ACTIVATE_TIMEOUT 6000
  14. #define SVC_INTF_RESUME_TIMEOUT 3000
  15. struct gb_svc_deferred_request {
  16. struct work_struct work;
  17. struct gb_operation *operation;
  18. };
  19. static int gb_svc_queue_deferred_request(struct gb_operation *operation);
  20. static ssize_t endo_id_show(struct device *dev,
  21. struct device_attribute *attr, char *buf)
  22. {
  23. struct gb_svc *svc = to_gb_svc(dev);
  24. return sprintf(buf, "0x%04x\n", svc->endo_id);
  25. }
  26. static DEVICE_ATTR_RO(endo_id);
  27. static ssize_t ap_intf_id_show(struct device *dev,
  28. struct device_attribute *attr, char *buf)
  29. {
  30. struct gb_svc *svc = to_gb_svc(dev);
  31. return sprintf(buf, "%u\n", svc->ap_intf_id);
  32. }
  33. static DEVICE_ATTR_RO(ap_intf_id);
  34. // FIXME
  35. // This is a hack, we need to do this "right" and clean the interface up
  36. // properly, not just forcibly yank the thing out of the system and hope for the
  37. // best. But for now, people want their modules to come out without having to
  38. // throw the thing to the ground or get out a screwdriver.
  39. static ssize_t intf_eject_store(struct device *dev,
  40. struct device_attribute *attr, const char *buf,
  41. size_t len)
  42. {
  43. struct gb_svc *svc = to_gb_svc(dev);
  44. unsigned short intf_id;
  45. int ret;
  46. ret = kstrtou16(buf, 10, &intf_id);
  47. if (ret < 0)
  48. return ret;
  49. dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
  50. ret = gb_svc_intf_eject(svc, intf_id);
  51. if (ret < 0)
  52. return ret;
  53. return len;
  54. }
  55. static DEVICE_ATTR_WO(intf_eject);
  56. static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
  57. char *buf)
  58. {
  59. struct gb_svc *svc = to_gb_svc(dev);
  60. return sprintf(buf, "%s\n",
  61. gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
  62. }
  63. static ssize_t watchdog_store(struct device *dev,
  64. struct device_attribute *attr, const char *buf,
  65. size_t len)
  66. {
  67. struct gb_svc *svc = to_gb_svc(dev);
  68. int retval;
  69. bool user_request;
  70. retval = strtobool(buf, &user_request);
  71. if (retval)
  72. return retval;
  73. if (user_request)
  74. retval = gb_svc_watchdog_enable(svc);
  75. else
  76. retval = gb_svc_watchdog_disable(svc);
  77. if (retval)
  78. return retval;
  79. return len;
  80. }
  81. static DEVICE_ATTR_RW(watchdog);
  82. static ssize_t watchdog_action_show(struct device *dev,
  83. struct device_attribute *attr, char *buf)
  84. {
  85. struct gb_svc *svc = to_gb_svc(dev);
  86. if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
  87. return sprintf(buf, "panic\n");
  88. else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
  89. return sprintf(buf, "reset\n");
  90. return -EINVAL;
  91. }
  92. static ssize_t watchdog_action_store(struct device *dev,
  93. struct device_attribute *attr,
  94. const char *buf, size_t len)
  95. {
  96. struct gb_svc *svc = to_gb_svc(dev);
  97. if (sysfs_streq(buf, "panic"))
  98. svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
  99. else if (sysfs_streq(buf, "reset"))
  100. svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
  101. else
  102. return -EINVAL;
  103. return len;
  104. }
  105. static DEVICE_ATTR_RW(watchdog_action);
  106. static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
  107. {
  108. struct gb_svc_pwrmon_rail_count_get_response response;
  109. int ret;
  110. ret = gb_operation_sync(svc->connection,
  111. GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
  112. &response, sizeof(response));
  113. if (ret) {
  114. dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
  115. return ret;
  116. }
  117. *value = response.rail_count;
  118. return 0;
  119. }
  120. static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
  121. struct gb_svc_pwrmon_rail_names_get_response *response,
  122. size_t bufsize)
  123. {
  124. int ret;
  125. ret = gb_operation_sync(svc->connection,
  126. GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
  127. response, bufsize);
  128. if (ret) {
  129. dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
  130. return ret;
  131. }
  132. if (response->status != GB_SVC_OP_SUCCESS) {
  133. dev_err(&svc->dev,
  134. "SVC error while getting rail names: %u\n",
  135. response->status);
  136. return -EREMOTEIO;
  137. }
  138. return 0;
  139. }
  140. static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
  141. u8 measurement_type, u32 *value)
  142. {
  143. struct gb_svc_pwrmon_sample_get_request request;
  144. struct gb_svc_pwrmon_sample_get_response response;
  145. int ret;
  146. request.rail_id = rail_id;
  147. request.measurement_type = measurement_type;
  148. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
  149. &request, sizeof(request),
  150. &response, sizeof(response));
  151. if (ret) {
  152. dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
  153. return ret;
  154. }
  155. if (response.result) {
  156. dev_err(&svc->dev,
  157. "UniPro error while getting rail power sample (%d %d): %d\n",
  158. rail_id, measurement_type, response.result);
  159. switch (response.result) {
  160. case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
  161. return -EINVAL;
  162. case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
  163. return -ENOMSG;
  164. default:
  165. return -EREMOTEIO;
  166. }
  167. }
  168. *value = le32_to_cpu(response.measurement);
  169. return 0;
  170. }
  171. int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
  172. u8 measurement_type, u32 *value)
  173. {
  174. struct gb_svc_pwrmon_intf_sample_get_request request;
  175. struct gb_svc_pwrmon_intf_sample_get_response response;
  176. int ret;
  177. request.intf_id = intf_id;
  178. request.measurement_type = measurement_type;
  179. ret = gb_operation_sync(svc->connection,
  180. GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
  181. &request, sizeof(request),
  182. &response, sizeof(response));
  183. if (ret) {
  184. dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
  185. return ret;
  186. }
  187. if (response.result) {
  188. dev_err(&svc->dev,
  189. "UniPro error while getting intf power sample (%d %d): %d\n",
  190. intf_id, measurement_type, response.result);
  191. switch (response.result) {
  192. case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
  193. return -EINVAL;
  194. case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
  195. return -ENOMSG;
  196. default:
  197. return -EREMOTEIO;
  198. }
  199. }
  200. *value = le32_to_cpu(response.measurement);
  201. return 0;
  202. }
  203. static struct attribute *svc_attrs[] = {
  204. &dev_attr_endo_id.attr,
  205. &dev_attr_ap_intf_id.attr,
  206. &dev_attr_intf_eject.attr,
  207. &dev_attr_watchdog.attr,
  208. &dev_attr_watchdog_action.attr,
  209. NULL,
  210. };
  211. ATTRIBUTE_GROUPS(svc);
  212. int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
  213. {
  214. struct gb_svc_intf_device_id_request request;
  215. request.intf_id = intf_id;
  216. request.device_id = device_id;
  217. return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
  218. &request, sizeof(request), NULL, 0);
  219. }
  220. int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
  221. {
  222. struct gb_svc_intf_eject_request request;
  223. int ret;
  224. request.intf_id = intf_id;
  225. /*
  226. * The pulse width for module release in svc is long so we need to
  227. * increase the timeout so the operation will not return to soon.
  228. */
  229. ret = gb_operation_sync_timeout(svc->connection,
  230. GB_SVC_TYPE_INTF_EJECT, &request,
  231. sizeof(request), NULL, 0,
  232. SVC_INTF_EJECT_TIMEOUT);
  233. if (ret) {
  234. dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
  235. return ret;
  236. }
  237. return 0;
  238. }
  239. int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
  240. {
  241. struct gb_svc_intf_vsys_request request;
  242. struct gb_svc_intf_vsys_response response;
  243. int type, ret;
  244. request.intf_id = intf_id;
  245. if (enable)
  246. type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
  247. else
  248. type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
  249. ret = gb_operation_sync(svc->connection, type,
  250. &request, sizeof(request),
  251. &response, sizeof(response));
  252. if (ret < 0)
  253. return ret;
  254. if (response.result_code != GB_SVC_INTF_VSYS_OK)
  255. return -EREMOTEIO;
  256. return 0;
  257. }
  258. int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
  259. {
  260. struct gb_svc_intf_refclk_request request;
  261. struct gb_svc_intf_refclk_response response;
  262. int type, ret;
  263. request.intf_id = intf_id;
  264. if (enable)
  265. type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
  266. else
  267. type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
  268. ret = gb_operation_sync(svc->connection, type,
  269. &request, sizeof(request),
  270. &response, sizeof(response));
  271. if (ret < 0)
  272. return ret;
  273. if (response.result_code != GB_SVC_INTF_REFCLK_OK)
  274. return -EREMOTEIO;
  275. return 0;
  276. }
  277. int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
  278. {
  279. struct gb_svc_intf_unipro_request request;
  280. struct gb_svc_intf_unipro_response response;
  281. int type, ret;
  282. request.intf_id = intf_id;
  283. if (enable)
  284. type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
  285. else
  286. type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
  287. ret = gb_operation_sync(svc->connection, type,
  288. &request, sizeof(request),
  289. &response, sizeof(response));
  290. if (ret < 0)
  291. return ret;
  292. if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
  293. return -EREMOTEIO;
  294. return 0;
  295. }
  296. int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
  297. {
  298. struct gb_svc_intf_activate_request request;
  299. struct gb_svc_intf_activate_response response;
  300. int ret;
  301. request.intf_id = intf_id;
  302. ret = gb_operation_sync_timeout(svc->connection,
  303. GB_SVC_TYPE_INTF_ACTIVATE,
  304. &request, sizeof(request),
  305. &response, sizeof(response),
  306. SVC_INTF_ACTIVATE_TIMEOUT);
  307. if (ret < 0)
  308. return ret;
  309. if (response.status != GB_SVC_OP_SUCCESS) {
  310. dev_err(&svc->dev, "failed to activate interface %u: %u\n",
  311. intf_id, response.status);
  312. return -EREMOTEIO;
  313. }
  314. *intf_type = response.intf_type;
  315. return 0;
  316. }
  317. int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
  318. {
  319. struct gb_svc_intf_resume_request request;
  320. struct gb_svc_intf_resume_response response;
  321. int ret;
  322. request.intf_id = intf_id;
  323. ret = gb_operation_sync_timeout(svc->connection,
  324. GB_SVC_TYPE_INTF_RESUME,
  325. &request, sizeof(request),
  326. &response, sizeof(response),
  327. SVC_INTF_RESUME_TIMEOUT);
  328. if (ret < 0) {
  329. dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
  330. intf_id, ret);
  331. return ret;
  332. }
  333. if (response.status != GB_SVC_OP_SUCCESS) {
  334. dev_err(&svc->dev, "failed to resume interface %u: %u\n",
  335. intf_id, response.status);
  336. return -EREMOTEIO;
  337. }
  338. return 0;
  339. }
  340. int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
  341. u32 *value)
  342. {
  343. struct gb_svc_dme_peer_get_request request;
  344. struct gb_svc_dme_peer_get_response response;
  345. u16 result;
  346. int ret;
  347. request.intf_id = intf_id;
  348. request.attr = cpu_to_le16(attr);
  349. request.selector = cpu_to_le16(selector);
  350. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
  351. &request, sizeof(request),
  352. &response, sizeof(response));
  353. if (ret) {
  354. dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
  355. intf_id, attr, selector, ret);
  356. return ret;
  357. }
  358. result = le16_to_cpu(response.result_code);
  359. if (result) {
  360. dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
  361. intf_id, attr, selector, result);
  362. return -EREMOTEIO;
  363. }
  364. if (value)
  365. *value = le32_to_cpu(response.attr_value);
  366. return 0;
  367. }
  368. int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
  369. u32 value)
  370. {
  371. struct gb_svc_dme_peer_set_request request;
  372. struct gb_svc_dme_peer_set_response response;
  373. u16 result;
  374. int ret;
  375. request.intf_id = intf_id;
  376. request.attr = cpu_to_le16(attr);
  377. request.selector = cpu_to_le16(selector);
  378. request.value = cpu_to_le32(value);
  379. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
  380. &request, sizeof(request),
  381. &response, sizeof(response));
  382. if (ret) {
  383. dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
  384. intf_id, attr, selector, value, ret);
  385. return ret;
  386. }
  387. result = le16_to_cpu(response.result_code);
  388. if (result) {
  389. dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
  390. intf_id, attr, selector, value, result);
  391. return -EREMOTEIO;
  392. }
  393. return 0;
  394. }
  395. int gb_svc_connection_create(struct gb_svc *svc,
  396. u8 intf1_id, u16 cport1_id,
  397. u8 intf2_id, u16 cport2_id,
  398. u8 cport_flags)
  399. {
  400. struct gb_svc_conn_create_request request;
  401. request.intf1_id = intf1_id;
  402. request.cport1_id = cpu_to_le16(cport1_id);
  403. request.intf2_id = intf2_id;
  404. request.cport2_id = cpu_to_le16(cport2_id);
  405. request.tc = 0; /* TC0 */
  406. request.flags = cport_flags;
  407. return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
  408. &request, sizeof(request), NULL, 0);
  409. }
  410. void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
  411. u8 intf2_id, u16 cport2_id)
  412. {
  413. struct gb_svc_conn_destroy_request request;
  414. struct gb_connection *connection = svc->connection;
  415. int ret;
  416. request.intf1_id = intf1_id;
  417. request.cport1_id = cpu_to_le16(cport1_id);
  418. request.intf2_id = intf2_id;
  419. request.cport2_id = cpu_to_le16(cport2_id);
  420. ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
  421. &request, sizeof(request), NULL, 0);
  422. if (ret) {
  423. dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
  424. intf1_id, cport1_id, intf2_id, cport2_id, ret);
  425. }
  426. }
  427. int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
  428. u32 strobe_delay, u32 refclk)
  429. {
  430. struct gb_connection *connection = svc->connection;
  431. struct gb_svc_timesync_enable_request request;
  432. request.count = count;
  433. request.frame_time = cpu_to_le64(frame_time);
  434. request.strobe_delay = cpu_to_le32(strobe_delay);
  435. request.refclk = cpu_to_le32(refclk);
  436. return gb_operation_sync(connection,
  437. GB_SVC_TYPE_TIMESYNC_ENABLE,
  438. &request, sizeof(request), NULL, 0);
  439. }
  440. int gb_svc_timesync_disable(struct gb_svc *svc)
  441. {
  442. struct gb_connection *connection = svc->connection;
  443. return gb_operation_sync(connection,
  444. GB_SVC_TYPE_TIMESYNC_DISABLE,
  445. NULL, 0, NULL, 0);
  446. }
  447. int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time)
  448. {
  449. struct gb_connection *connection = svc->connection;
  450. struct gb_svc_timesync_authoritative_response response;
  451. int ret, i;
  452. ret = gb_operation_sync(connection,
  453. GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE, NULL, 0,
  454. &response, sizeof(response));
  455. if (ret < 0)
  456. return ret;
  457. for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
  458. frame_time[i] = le64_to_cpu(response.frame_time[i]);
  459. return 0;
  460. }
  461. int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time)
  462. {
  463. struct gb_connection *connection = svc->connection;
  464. struct gb_svc_timesync_ping_response response;
  465. int ret;
  466. ret = gb_operation_sync(connection,
  467. GB_SVC_TYPE_TIMESYNC_PING,
  468. NULL, 0,
  469. &response, sizeof(response));
  470. if (ret < 0)
  471. return ret;
  472. *frame_time = le64_to_cpu(response.frame_time);
  473. return 0;
  474. }
  475. int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask)
  476. {
  477. struct gb_connection *connection = svc->connection;
  478. struct gb_svc_timesync_wake_pins_acquire_request request;
  479. request.strobe_mask = cpu_to_le32(strobe_mask);
  480. return gb_operation_sync(connection,
  481. GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE,
  482. &request, sizeof(request),
  483. NULL, 0);
  484. }
  485. int gb_svc_timesync_wake_pins_release(struct gb_svc *svc)
  486. {
  487. struct gb_connection *connection = svc->connection;
  488. return gb_operation_sync(connection,
  489. GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE,
  490. NULL, 0, NULL, 0);
  491. }
  492. /* Creates bi-directional routes between the devices */
  493. int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
  494. u8 intf2_id, u8 dev2_id)
  495. {
  496. struct gb_svc_route_create_request request;
  497. request.intf1_id = intf1_id;
  498. request.dev1_id = dev1_id;
  499. request.intf2_id = intf2_id;
  500. request.dev2_id = dev2_id;
  501. return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
  502. &request, sizeof(request), NULL, 0);
  503. }
  504. /* Destroys bi-directional routes between the devices */
  505. void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
  506. {
  507. struct gb_svc_route_destroy_request request;
  508. int ret;
  509. request.intf1_id = intf1_id;
  510. request.intf2_id = intf2_id;
  511. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
  512. &request, sizeof(request), NULL, 0);
  513. if (ret) {
  514. dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
  515. intf1_id, intf2_id, ret);
  516. }
  517. }
  518. int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
  519. u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
  520. u8 tx_amplitude, u8 tx_hs_equalizer,
  521. u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
  522. u8 flags, u32 quirks,
  523. struct gb_svc_l2_timer_cfg *local,
  524. struct gb_svc_l2_timer_cfg *remote)
  525. {
  526. struct gb_svc_intf_set_pwrm_request request;
  527. struct gb_svc_intf_set_pwrm_response response;
  528. int ret;
  529. u16 result_code;
  530. memset(&request, 0, sizeof(request));
  531. request.intf_id = intf_id;
  532. request.hs_series = hs_series;
  533. request.tx_mode = tx_mode;
  534. request.tx_gear = tx_gear;
  535. request.tx_nlanes = tx_nlanes;
  536. request.tx_amplitude = tx_amplitude;
  537. request.tx_hs_equalizer = tx_hs_equalizer;
  538. request.rx_mode = rx_mode;
  539. request.rx_gear = rx_gear;
  540. request.rx_nlanes = rx_nlanes;
  541. request.flags = flags;
  542. request.quirks = cpu_to_le32(quirks);
  543. if (local)
  544. request.local_l2timerdata = *local;
  545. if (remote)
  546. request.remote_l2timerdata = *remote;
  547. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
  548. &request, sizeof(request),
  549. &response, sizeof(response));
  550. if (ret < 0)
  551. return ret;
  552. result_code = response.result_code;
  553. if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
  554. dev_err(&svc->dev, "set power mode = %d\n", result_code);
  555. return -EIO;
  556. }
  557. return 0;
  558. }
  559. EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
  560. int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
  561. {
  562. struct gb_svc_intf_set_pwrm_request request;
  563. struct gb_svc_intf_set_pwrm_response response;
  564. int ret;
  565. u16 result_code;
  566. memset(&request, 0, sizeof(request));
  567. request.intf_id = intf_id;
  568. request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
  569. request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
  570. request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
  571. ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
  572. &request, sizeof(request),
  573. &response, sizeof(response));
  574. if (ret < 0) {
  575. dev_err(&svc->dev,
  576. "failed to send set power mode operation to interface %u: %d\n",
  577. intf_id, ret);
  578. return ret;
  579. }
  580. result_code = response.result_code;
  581. if (result_code != GB_SVC_SETPWRM_PWR_OK) {
  582. dev_err(&svc->dev,
  583. "failed to hibernate the link for interface %u: %u\n",
  584. intf_id, result_code);
  585. return -EIO;
  586. }
  587. return 0;
  588. }
  589. int gb_svc_ping(struct gb_svc *svc)
  590. {
  591. return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
  592. NULL, 0, NULL, 0,
  593. GB_OPERATION_TIMEOUT_DEFAULT * 2);
  594. }
  595. static int gb_svc_version_request(struct gb_operation *op)
  596. {
  597. struct gb_connection *connection = op->connection;
  598. struct gb_svc *svc = gb_connection_get_data(connection);
  599. struct gb_svc_version_request *request;
  600. struct gb_svc_version_response *response;
  601. if (op->request->payload_size < sizeof(*request)) {
  602. dev_err(&svc->dev, "short version request (%zu < %zu)\n",
  603. op->request->payload_size,
  604. sizeof(*request));
  605. return -EINVAL;
  606. }
  607. request = op->request->payload;
  608. if (request->major > GB_SVC_VERSION_MAJOR) {
  609. dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
  610. request->major, GB_SVC_VERSION_MAJOR);
  611. return -ENOTSUPP;
  612. }
  613. svc->protocol_major = request->major;
  614. svc->protocol_minor = request->minor;
  615. if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
  616. return -ENOMEM;
  617. response = op->response->payload;
  618. response->major = svc->protocol_major;
  619. response->minor = svc->protocol_minor;
  620. return 0;
  621. }
  622. static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
  623. size_t len, loff_t *offset)
  624. {
  625. struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
  626. struct gb_svc *svc = pwrmon_rails->svc;
  627. int ret, desc;
  628. u32 value;
  629. char buff[16];
  630. ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
  631. GB_SVC_PWRMON_TYPE_VOL, &value);
  632. if (ret) {
  633. dev_err(&svc->dev,
  634. "failed to get voltage sample %u: %d\n",
  635. pwrmon_rails->id, ret);
  636. return ret;
  637. }
  638. desc = scnprintf(buff, sizeof(buff), "%u\n", value);
  639. return simple_read_from_buffer(buf, len, offset, buff, desc);
  640. }
  641. static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
  642. size_t len, loff_t *offset)
  643. {
  644. struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
  645. struct gb_svc *svc = pwrmon_rails->svc;
  646. int ret, desc;
  647. u32 value;
  648. char buff[16];
  649. ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
  650. GB_SVC_PWRMON_TYPE_CURR, &value);
  651. if (ret) {
  652. dev_err(&svc->dev,
  653. "failed to get current sample %u: %d\n",
  654. pwrmon_rails->id, ret);
  655. return ret;
  656. }
  657. desc = scnprintf(buff, sizeof(buff), "%u\n", value);
  658. return simple_read_from_buffer(buf, len, offset, buff, desc);
  659. }
  660. static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
  661. size_t len, loff_t *offset)
  662. {
  663. struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
  664. struct gb_svc *svc = pwrmon_rails->svc;
  665. int ret, desc;
  666. u32 value;
  667. char buff[16];
  668. ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
  669. GB_SVC_PWRMON_TYPE_PWR, &value);
  670. if (ret) {
  671. dev_err(&svc->dev, "failed to get power sample %u: %d\n",
  672. pwrmon_rails->id, ret);
  673. return ret;
  674. }
  675. desc = scnprintf(buff, sizeof(buff), "%u\n", value);
  676. return simple_read_from_buffer(buf, len, offset, buff, desc);
  677. }
  678. static const struct file_operations pwrmon_debugfs_voltage_fops = {
  679. .read = pwr_debugfs_voltage_read,
  680. };
  681. static const struct file_operations pwrmon_debugfs_current_fops = {
  682. .read = pwr_debugfs_current_read,
  683. };
  684. static const struct file_operations pwrmon_debugfs_power_fops = {
  685. .read = pwr_debugfs_power_read,
  686. };
  687. static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
  688. {
  689. int i;
  690. size_t bufsize;
  691. struct dentry *dent;
  692. struct gb_svc_pwrmon_rail_names_get_response *rail_names;
  693. u8 rail_count;
  694. dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
  695. if (IS_ERR_OR_NULL(dent))
  696. return;
  697. if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
  698. goto err_pwrmon_debugfs;
  699. if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
  700. goto err_pwrmon_debugfs;
  701. bufsize = sizeof(*rail_names) +
  702. GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
  703. rail_names = kzalloc(bufsize, GFP_KERNEL);
  704. if (!rail_names)
  705. goto err_pwrmon_debugfs;
  706. svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
  707. GFP_KERNEL);
  708. if (!svc->pwrmon_rails)
  709. goto err_pwrmon_debugfs_free;
  710. if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
  711. goto err_pwrmon_debugfs_free;
  712. for (i = 0; i < rail_count; i++) {
  713. struct dentry *dir;
  714. struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
  715. char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
  716. snprintf(fname, sizeof(fname), "%s",
  717. (char *)&rail_names->name[i]);
  718. rail->id = i;
  719. rail->svc = svc;
  720. dir = debugfs_create_dir(fname, dent);
  721. debugfs_create_file("voltage_now", S_IRUGO, dir, rail,
  722. &pwrmon_debugfs_voltage_fops);
  723. debugfs_create_file("current_now", S_IRUGO, dir, rail,
  724. &pwrmon_debugfs_current_fops);
  725. debugfs_create_file("power_now", S_IRUGO, dir, rail,
  726. &pwrmon_debugfs_power_fops);
  727. }
  728. kfree(rail_names);
  729. return;
  730. err_pwrmon_debugfs_free:
  731. kfree(rail_names);
  732. kfree(svc->pwrmon_rails);
  733. svc->pwrmon_rails = NULL;
  734. err_pwrmon_debugfs:
  735. debugfs_remove(dent);
  736. }
  737. static void gb_svc_debugfs_init(struct gb_svc *svc)
  738. {
  739. svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
  740. gb_debugfs_get());
  741. gb_svc_pwrmon_debugfs_init(svc);
  742. }
  743. static void gb_svc_debugfs_exit(struct gb_svc *svc)
  744. {
  745. debugfs_remove_recursive(svc->debugfs_dentry);
  746. kfree(svc->pwrmon_rails);
  747. svc->pwrmon_rails = NULL;
  748. }
  749. static int gb_svc_hello(struct gb_operation *op)
  750. {
  751. struct gb_connection *connection = op->connection;
  752. struct gb_svc *svc = gb_connection_get_data(connection);
  753. struct gb_svc_hello_request *hello_request;
  754. int ret;
  755. if (op->request->payload_size < sizeof(*hello_request)) {
  756. dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
  757. op->request->payload_size,
  758. sizeof(*hello_request));
  759. return -EINVAL;
  760. }
  761. hello_request = op->request->payload;
  762. svc->endo_id = le16_to_cpu(hello_request->endo_id);
  763. svc->ap_intf_id = hello_request->interface_id;
  764. ret = device_add(&svc->dev);
  765. if (ret) {
  766. dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
  767. return ret;
  768. }
  769. ret = gb_svc_watchdog_create(svc);
  770. if (ret) {
  771. dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
  772. goto err_unregister_device;
  773. }
  774. gb_svc_debugfs_init(svc);
  775. ret = gb_timesync_svc_add(svc);
  776. if (ret) {
  777. dev_err(&svc->dev, "failed to add SVC to timesync: %d\n", ret);
  778. gb_svc_debugfs_exit(svc);
  779. goto err_unregister_device;
  780. }
  781. return gb_svc_queue_deferred_request(op);
  782. err_unregister_device:
  783. gb_svc_watchdog_destroy(svc);
  784. device_del(&svc->dev);
  785. return ret;
  786. }
  787. static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
  788. u8 intf_id)
  789. {
  790. struct gb_host_device *hd = svc->hd;
  791. struct gb_module *module;
  792. size_t num_interfaces;
  793. u8 module_id;
  794. list_for_each_entry(module, &hd->modules, hd_node) {
  795. module_id = module->module_id;
  796. num_interfaces = module->num_interfaces;
  797. if (intf_id >= module_id &&
  798. intf_id < module_id + num_interfaces) {
  799. return module->interfaces[intf_id - module_id];
  800. }
  801. }
  802. return NULL;
  803. }
  804. static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
  805. {
  806. struct gb_host_device *hd = svc->hd;
  807. struct gb_module *module;
  808. list_for_each_entry(module, &hd->modules, hd_node) {
  809. if (module->module_id == module_id)
  810. return module;
  811. }
  812. return NULL;
  813. }
  814. static void gb_svc_process_hello_deferred(struct gb_operation *operation)
  815. {
  816. struct gb_connection *connection = operation->connection;
  817. struct gb_svc *svc = gb_connection_get_data(connection);
  818. int ret;
  819. /*
  820. * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
  821. * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
  822. * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
  823. * module.
  824. *
  825. * The code should be removed once SW-2217, Heuristic for UniPro
  826. * Power Mode Changes is resolved.
  827. */
  828. ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
  829. GB_SVC_UNIPRO_HS_SERIES_A,
  830. GB_SVC_UNIPRO_SLOW_AUTO_MODE,
  831. 2, 1,
  832. GB_SVC_SMALL_AMPLITUDE, GB_SVC_NO_DE_EMPHASIS,
  833. GB_SVC_UNIPRO_SLOW_AUTO_MODE,
  834. 2, 1,
  835. 0, 0,
  836. NULL, NULL);
  837. if (ret)
  838. dev_warn(&svc->dev,
  839. "power mode change failed on AP to switch link: %d\n",
  840. ret);
  841. }
  842. static void gb_svc_process_module_inserted(struct gb_operation *operation)
  843. {
  844. struct gb_svc_module_inserted_request *request;
  845. struct gb_connection *connection = operation->connection;
  846. struct gb_svc *svc = gb_connection_get_data(connection);
  847. struct gb_host_device *hd = svc->hd;
  848. struct gb_module *module;
  849. size_t num_interfaces;
  850. u8 module_id;
  851. u16 flags;
  852. int ret;
  853. /* The request message size has already been verified. */
  854. request = operation->request->payload;
  855. module_id = request->primary_intf_id;
  856. num_interfaces = request->intf_count;
  857. flags = le16_to_cpu(request->flags);
  858. dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
  859. __func__, module_id, num_interfaces, flags);
  860. if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
  861. dev_warn(&svc->dev, "no primary interface detected on module %u\n",
  862. module_id);
  863. }
  864. module = gb_svc_module_lookup(svc, module_id);
  865. if (module) {
  866. dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
  867. module_id);
  868. return;
  869. }
  870. module = gb_module_create(hd, module_id, num_interfaces);
  871. if (!module) {
  872. dev_err(&svc->dev, "failed to create module\n");
  873. return;
  874. }
  875. ret = gb_module_add(module);
  876. if (ret) {
  877. gb_module_put(module);
  878. return;
  879. }
  880. list_add(&module->hd_node, &hd->modules);
  881. }
  882. static void gb_svc_process_module_removed(struct gb_operation *operation)
  883. {
  884. struct gb_svc_module_removed_request *request;
  885. struct gb_connection *connection = operation->connection;
  886. struct gb_svc *svc = gb_connection_get_data(connection);
  887. struct gb_module *module;
  888. u8 module_id;
  889. /* The request message size has already been verified. */
  890. request = operation->request->payload;
  891. module_id = request->primary_intf_id;
  892. dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
  893. module = gb_svc_module_lookup(svc, module_id);
  894. if (!module) {
  895. dev_warn(&svc->dev, "unexpected module-removed event %u\n",
  896. module_id);
  897. return;
  898. }
  899. module->disconnected = true;
  900. gb_module_del(module);
  901. list_del(&module->hd_node);
  902. gb_module_put(module);
  903. }
  904. static void gb_svc_process_intf_oops(struct gb_operation *operation)
  905. {
  906. struct gb_svc_intf_oops_request *request;
  907. struct gb_connection *connection = operation->connection;
  908. struct gb_svc *svc = gb_connection_get_data(connection);
  909. struct gb_interface *intf;
  910. u8 intf_id;
  911. u8 reason;
  912. /* The request message size has already been verified. */
  913. request = operation->request->payload;
  914. intf_id = request->intf_id;
  915. reason = request->reason;
  916. intf = gb_svc_interface_lookup(svc, intf_id);
  917. if (!intf) {
  918. dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
  919. intf_id);
  920. return;
  921. }
  922. dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
  923. intf_id, reason);
  924. mutex_lock(&intf->mutex);
  925. intf->disconnected = true;
  926. gb_interface_disable(intf);
  927. gb_interface_deactivate(intf);
  928. mutex_unlock(&intf->mutex);
  929. }
  930. static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
  931. {
  932. struct gb_svc_intf_mailbox_event_request *request;
  933. struct gb_connection *connection = operation->connection;
  934. struct gb_svc *svc = gb_connection_get_data(connection);
  935. struct gb_interface *intf;
  936. u8 intf_id;
  937. u16 result_code;
  938. u32 mailbox;
  939. /* The request message size has already been verified. */
  940. request = operation->request->payload;
  941. intf_id = request->intf_id;
  942. result_code = le16_to_cpu(request->result_code);
  943. mailbox = le32_to_cpu(request->mailbox);
  944. dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
  945. __func__, intf_id, result_code, mailbox);
  946. intf = gb_svc_interface_lookup(svc, intf_id);
  947. if (!intf) {
  948. dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
  949. return;
  950. }
  951. gb_interface_mailbox_event(intf, result_code, mailbox);
  952. }
  953. static void gb_svc_process_deferred_request(struct work_struct *work)
  954. {
  955. struct gb_svc_deferred_request *dr;
  956. struct gb_operation *operation;
  957. struct gb_svc *svc;
  958. u8 type;
  959. dr = container_of(work, struct gb_svc_deferred_request, work);
  960. operation = dr->operation;
  961. svc = gb_connection_get_data(operation->connection);
  962. type = operation->request->header->type;
  963. switch (type) {
  964. case GB_SVC_TYPE_SVC_HELLO:
  965. gb_svc_process_hello_deferred(operation);
  966. break;
  967. case GB_SVC_TYPE_MODULE_INSERTED:
  968. gb_svc_process_module_inserted(operation);
  969. break;
  970. case GB_SVC_TYPE_MODULE_REMOVED:
  971. gb_svc_process_module_removed(operation);
  972. break;
  973. case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
  974. gb_svc_process_intf_mailbox_event(operation);
  975. break;
  976. case GB_SVC_TYPE_INTF_OOPS:
  977. gb_svc_process_intf_oops(operation);
  978. break;
  979. default:
  980. dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
  981. }
  982. gb_operation_put(operation);
  983. kfree(dr);
  984. }
  985. static int gb_svc_queue_deferred_request(struct gb_operation *operation)
  986. {
  987. struct gb_svc *svc = gb_connection_get_data(operation->connection);
  988. struct gb_svc_deferred_request *dr;
  989. dr = kmalloc(sizeof(*dr), GFP_KERNEL);
  990. if (!dr)
  991. return -ENOMEM;
  992. gb_operation_get(operation);
  993. dr->operation = operation;
  994. INIT_WORK(&dr->work, gb_svc_process_deferred_request);
  995. queue_work(svc->wq, &dr->work);
  996. return 0;
  997. }
  998. static int gb_svc_intf_reset_recv(struct gb_operation *op)
  999. {
  1000. struct gb_svc *svc = gb_connection_get_data(op->connection);
  1001. struct gb_message *request = op->request;
  1002. struct gb_svc_intf_reset_request *reset;
  1003. u8 intf_id;
  1004. if (request->payload_size < sizeof(*reset)) {
  1005. dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
  1006. request->payload_size, sizeof(*reset));
  1007. return -EINVAL;
  1008. }
  1009. reset = request->payload;
  1010. intf_id = reset->intf_id;
  1011. /* FIXME Reset the interface here */
  1012. return 0;
  1013. }
  1014. static int gb_svc_module_inserted_recv(struct gb_operation *op)
  1015. {
  1016. struct gb_svc *svc = gb_connection_get_data(op->connection);
  1017. struct gb_svc_module_inserted_request *request;
  1018. if (op->request->payload_size < sizeof(*request)) {
  1019. dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
  1020. op->request->payload_size, sizeof(*request));
  1021. return -EINVAL;
  1022. }
  1023. request = op->request->payload;
  1024. dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
  1025. request->primary_intf_id);
  1026. return gb_svc_queue_deferred_request(op);
  1027. }
  1028. static int gb_svc_module_removed_recv(struct gb_operation *op)
  1029. {
  1030. struct gb_svc *svc = gb_connection_get_data(op->connection);
  1031. struct gb_svc_module_removed_request *request;
  1032. if (op->request->payload_size < sizeof(*request)) {
  1033. dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
  1034. op->request->payload_size, sizeof(*request));
  1035. return -EINVAL;
  1036. }
  1037. request = op->request->payload;
  1038. dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
  1039. request->primary_intf_id);
  1040. return gb_svc_queue_deferred_request(op);
  1041. }
  1042. static int gb_svc_intf_oops_recv(struct gb_operation *op)
  1043. {
  1044. struct gb_svc *svc = gb_connection_get_data(op->connection);
  1045. struct gb_svc_intf_oops_request *request;
  1046. if (op->request->payload_size < sizeof(*request)) {
  1047. dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
  1048. op->request->payload_size, sizeof(*request));
  1049. return -EINVAL;
  1050. }
  1051. return gb_svc_queue_deferred_request(op);
  1052. }
  1053. static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
  1054. {
  1055. struct gb_svc *svc = gb_connection_get_data(op->connection);
  1056. struct gb_svc_intf_mailbox_event_request *request;
  1057. if (op->request->payload_size < sizeof(*request)) {
  1058. dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
  1059. op->request->payload_size, sizeof(*request));
  1060. return -EINVAL;
  1061. }
  1062. request = op->request->payload;
  1063. dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
  1064. return gb_svc_queue_deferred_request(op);
  1065. }
  1066. static int gb_svc_request_handler(struct gb_operation *op)
  1067. {
  1068. struct gb_connection *connection = op->connection;
  1069. struct gb_svc *svc = gb_connection_get_data(connection);
  1070. u8 type = op->type;
  1071. int ret = 0;
  1072. /*
  1073. * SVC requests need to follow a specific order (at least initially) and
  1074. * below code takes care of enforcing that. The expected order is:
  1075. * - PROTOCOL_VERSION
  1076. * - SVC_HELLO
  1077. * - Any other request, but the earlier two.
  1078. *
  1079. * Incoming requests are guaranteed to be serialized and so we don't
  1080. * need to protect 'state' for any races.
  1081. */
  1082. switch (type) {
  1083. case GB_SVC_TYPE_PROTOCOL_VERSION:
  1084. if (svc->state != GB_SVC_STATE_RESET)
  1085. ret = -EINVAL;
  1086. break;
  1087. case GB_SVC_TYPE_SVC_HELLO:
  1088. if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
  1089. ret = -EINVAL;
  1090. break;
  1091. default:
  1092. if (svc->state != GB_SVC_STATE_SVC_HELLO)
  1093. ret = -EINVAL;
  1094. break;
  1095. }
  1096. if (ret) {
  1097. dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
  1098. type, svc->state);
  1099. return ret;
  1100. }
  1101. switch (type) {
  1102. case GB_SVC_TYPE_PROTOCOL_VERSION:
  1103. ret = gb_svc_version_request(op);
  1104. if (!ret)
  1105. svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
  1106. return ret;
  1107. case GB_SVC_TYPE_SVC_HELLO:
  1108. ret = gb_svc_hello(op);
  1109. if (!ret)
  1110. svc->state = GB_SVC_STATE_SVC_HELLO;
  1111. return ret;
  1112. case GB_SVC_TYPE_INTF_RESET:
  1113. return gb_svc_intf_reset_recv(op);
  1114. case GB_SVC_TYPE_MODULE_INSERTED:
  1115. return gb_svc_module_inserted_recv(op);
  1116. case GB_SVC_TYPE_MODULE_REMOVED:
  1117. return gb_svc_module_removed_recv(op);
  1118. case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
  1119. return gb_svc_intf_mailbox_event_recv(op);
  1120. case GB_SVC_TYPE_INTF_OOPS:
  1121. return gb_svc_intf_oops_recv(op);
  1122. default:
  1123. dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
  1124. return -EINVAL;
  1125. }
  1126. }
  1127. static void gb_svc_release(struct device *dev)
  1128. {
  1129. struct gb_svc *svc = to_gb_svc(dev);
  1130. if (svc->connection)
  1131. gb_connection_destroy(svc->connection);
  1132. ida_destroy(&svc->device_id_map);
  1133. destroy_workqueue(svc->wq);
  1134. kfree(svc);
  1135. }
  1136. struct device_type greybus_svc_type = {
  1137. .name = "greybus_svc",
  1138. .release = gb_svc_release,
  1139. };
  1140. struct gb_svc *gb_svc_create(struct gb_host_device *hd)
  1141. {
  1142. struct gb_svc *svc;
  1143. svc = kzalloc(sizeof(*svc), GFP_KERNEL);
  1144. if (!svc)
  1145. return NULL;
  1146. svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
  1147. if (!svc->wq) {
  1148. kfree(svc);
  1149. return NULL;
  1150. }
  1151. svc->dev.parent = &hd->dev;
  1152. svc->dev.bus = &greybus_bus_type;
  1153. svc->dev.type = &greybus_svc_type;
  1154. svc->dev.groups = svc_groups;
  1155. svc->dev.dma_mask = svc->dev.parent->dma_mask;
  1156. device_initialize(&svc->dev);
  1157. dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
  1158. ida_init(&svc->device_id_map);
  1159. svc->state = GB_SVC_STATE_RESET;
  1160. svc->hd = hd;
  1161. svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
  1162. gb_svc_request_handler);
  1163. if (IS_ERR(svc->connection)) {
  1164. dev_err(&svc->dev, "failed to create connection: %ld\n",
  1165. PTR_ERR(svc->connection));
  1166. goto err_put_device;
  1167. }
  1168. gb_connection_set_data(svc->connection, svc);
  1169. return svc;
  1170. err_put_device:
  1171. put_device(&svc->dev);
  1172. return NULL;
  1173. }
  1174. int gb_svc_add(struct gb_svc *svc)
  1175. {
  1176. int ret;
  1177. /*
  1178. * The SVC protocol is currently driven by the SVC, so the SVC device
  1179. * is added from the connection request handler when enough
  1180. * information has been received.
  1181. */
  1182. ret = gb_connection_enable(svc->connection);
  1183. if (ret)
  1184. return ret;
  1185. return 0;
  1186. }
  1187. static void gb_svc_remove_modules(struct gb_svc *svc)
  1188. {
  1189. struct gb_host_device *hd = svc->hd;
  1190. struct gb_module *module, *tmp;
  1191. list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
  1192. gb_module_del(module);
  1193. list_del(&module->hd_node);
  1194. gb_module_put(module);
  1195. }
  1196. }
  1197. void gb_svc_del(struct gb_svc *svc)
  1198. {
  1199. gb_connection_disable_rx(svc->connection);
  1200. /*
  1201. * The SVC device may have been registered from the request handler.
  1202. */
  1203. if (device_is_registered(&svc->dev)) {
  1204. gb_timesync_svc_remove(svc);
  1205. gb_svc_debugfs_exit(svc);
  1206. gb_svc_watchdog_destroy(svc);
  1207. device_del(&svc->dev);
  1208. }
  1209. flush_workqueue(svc->wq);
  1210. gb_svc_remove_modules(svc);
  1211. gb_connection_disable(svc->connection);
  1212. }
  1213. void gb_svc_put(struct gb_svc *svc)
  1214. {
  1215. put_device(&svc->dev);
  1216. }