acpi_ipmi.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662
  1. /*
  2. * acpi_ipmi.c - ACPI IPMI opregion
  3. *
  4. * Copyright (C) 2010, 2013 Intel Corporation
  5. * Author: Zhao Yakui <yakui.zhao@intel.com>
  6. * Lv Zheng <lv.zheng@intel.com>
  7. *
  8. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or (at
  13. * your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along
  21. * with this program; if not, write to the Free Software Foundation, Inc.,
  22. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  23. *
  24. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  25. */
  26. #include <linux/module.h>
  27. #include <linux/acpi.h>
  28. #include <linux/ipmi.h>
  29. #include <linux/spinlock.h>
  30. MODULE_AUTHOR("Zhao Yakui");
  31. MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
  32. MODULE_LICENSE("GPL");
  33. #define ACPI_IPMI_OK 0
  34. #define ACPI_IPMI_TIMEOUT 0x10
  35. #define ACPI_IPMI_UNKNOWN 0x07
  36. /* the IPMI timeout is 5s */
  37. #define IPMI_TIMEOUT (5000)
  38. #define ACPI_IPMI_MAX_MSG_LENGTH 64
  39. struct acpi_ipmi_device {
  40. /* the device list attached to driver_data.ipmi_devices */
  41. struct list_head head;
  42. /* the IPMI request message list */
  43. struct list_head tx_msg_list;
  44. spinlock_t tx_msg_lock;
  45. acpi_handle handle;
  46. struct device *dev;
  47. ipmi_user_t user_interface;
  48. int ipmi_ifnum; /* IPMI interface number */
  49. long curr_msgid;
  50. bool dead;
  51. struct kref kref;
  52. };
  53. struct ipmi_driver_data {
  54. struct list_head ipmi_devices;
  55. struct ipmi_smi_watcher bmc_events;
  56. struct ipmi_user_hndl ipmi_hndlrs;
  57. struct mutex ipmi_lock;
  58. /*
  59. * NOTE: IPMI System Interface Selection
  60. * There is no system interface specified by the IPMI operation
  61. * region access. We try to select one system interface with ACPI
  62. * handle set. IPMI messages passed from the ACPI codes are sent
  63. * to this selected global IPMI system interface.
  64. */
  65. struct acpi_ipmi_device *selected_smi;
  66. };
  67. struct acpi_ipmi_msg {
  68. struct list_head head;
  69. /*
  70. * General speaking the addr type should be SI_ADDR_TYPE. And
  71. * the addr channel should be BMC.
  72. * In fact it can also be IPMB type. But we will have to
  73. * parse it from the Netfn command buffer. It is so complex
  74. * that it is skipped.
  75. */
  76. struct ipmi_addr addr;
  77. long tx_msgid;
  78. /* it is used to track whether the IPMI message is finished */
  79. struct completion tx_complete;
  80. struct kernel_ipmi_msg tx_message;
  81. int msg_done;
  82. /* tx/rx data . And copy it from/to ACPI object buffer */
  83. u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
  84. u8 rx_len;
  85. struct acpi_ipmi_device *device;
  86. struct kref kref;
  87. };
  88. /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
  89. struct acpi_ipmi_buffer {
  90. u8 status;
  91. u8 length;
  92. u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
  93. };
  94. static void ipmi_register_bmc(int iface, struct device *dev);
  95. static void ipmi_bmc_gone(int iface);
  96. static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
  97. static struct ipmi_driver_data driver_data = {
  98. .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
  99. .bmc_events = {
  100. .owner = THIS_MODULE,
  101. .new_smi = ipmi_register_bmc,
  102. .smi_gone = ipmi_bmc_gone,
  103. },
  104. .ipmi_hndlrs = {
  105. .ipmi_recv_hndl = ipmi_msg_handler,
  106. },
  107. .ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock)
  108. };
  109. static struct acpi_ipmi_device *
  110. ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
  111. {
  112. struct acpi_ipmi_device *ipmi_device;
  113. int err;
  114. ipmi_user_t user;
  115. ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
  116. if (!ipmi_device)
  117. return NULL;
  118. kref_init(&ipmi_device->kref);
  119. INIT_LIST_HEAD(&ipmi_device->head);
  120. INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
  121. spin_lock_init(&ipmi_device->tx_msg_lock);
  122. ipmi_device->handle = handle;
  123. ipmi_device->dev = get_device(dev);
  124. ipmi_device->ipmi_ifnum = iface;
  125. err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
  126. ipmi_device, &user);
  127. if (err) {
  128. put_device(dev);
  129. kfree(ipmi_device);
  130. return NULL;
  131. }
  132. ipmi_device->user_interface = user;
  133. return ipmi_device;
  134. }
  135. static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
  136. {
  137. ipmi_destroy_user(ipmi_device->user_interface);
  138. put_device(ipmi_device->dev);
  139. kfree(ipmi_device);
  140. }
  141. static void ipmi_dev_release_kref(struct kref *kref)
  142. {
  143. struct acpi_ipmi_device *ipmi =
  144. container_of(kref, struct acpi_ipmi_device, kref);
  145. ipmi_dev_release(ipmi);
  146. }
  147. static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
  148. {
  149. list_del(&ipmi_device->head);
  150. if (driver_data.selected_smi == ipmi_device)
  151. driver_data.selected_smi = NULL;
  152. /*
  153. * Always setting dead flag after deleting from the list or
  154. * list_for_each_entry() codes must get changed.
  155. */
  156. ipmi_device->dead = true;
  157. }
  158. static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
  159. {
  160. struct acpi_ipmi_device *ipmi_device = NULL;
  161. mutex_lock(&driver_data.ipmi_lock);
  162. if (driver_data.selected_smi) {
  163. ipmi_device = driver_data.selected_smi;
  164. kref_get(&ipmi_device->kref);
  165. }
  166. mutex_unlock(&driver_data.ipmi_lock);
  167. return ipmi_device;
  168. }
  169. static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
  170. {
  171. kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
  172. }
  173. static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
  174. {
  175. struct acpi_ipmi_device *ipmi;
  176. struct acpi_ipmi_msg *ipmi_msg;
  177. ipmi = acpi_ipmi_dev_get();
  178. if (!ipmi)
  179. return NULL;
  180. ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
  181. if (!ipmi_msg) {
  182. acpi_ipmi_dev_put(ipmi);
  183. return NULL;
  184. }
  185. kref_init(&ipmi_msg->kref);
  186. init_completion(&ipmi_msg->tx_complete);
  187. INIT_LIST_HEAD(&ipmi_msg->head);
  188. ipmi_msg->device = ipmi;
  189. ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
  190. return ipmi_msg;
  191. }
  192. static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
  193. {
  194. acpi_ipmi_dev_put(tx_msg->device);
  195. kfree(tx_msg);
  196. }
  197. static void ipmi_msg_release_kref(struct kref *kref)
  198. {
  199. struct acpi_ipmi_msg *tx_msg =
  200. container_of(kref, struct acpi_ipmi_msg, kref);
  201. ipmi_msg_release(tx_msg);
  202. }
  203. static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
  204. {
  205. kref_get(&tx_msg->kref);
  206. return tx_msg;
  207. }
  208. static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
  209. {
  210. kref_put(&tx_msg->kref, ipmi_msg_release_kref);
  211. }
  212. #define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff)
  213. #define IPMI_OP_RGN_CMD(offset) (offset & 0xff)
  214. static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
  215. acpi_physical_address address,
  216. acpi_integer *value)
  217. {
  218. struct kernel_ipmi_msg *msg;
  219. struct acpi_ipmi_buffer *buffer;
  220. struct acpi_ipmi_device *device;
  221. unsigned long flags;
  222. msg = &tx_msg->tx_message;
  223. /*
  224. * IPMI network function and command are encoded in the address
  225. * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
  226. */
  227. msg->netfn = IPMI_OP_RGN_NETFN(address);
  228. msg->cmd = IPMI_OP_RGN_CMD(address);
  229. msg->data = tx_msg->data;
  230. /*
  231. * value is the parameter passed by the IPMI opregion space handler.
  232. * It points to the IPMI request message buffer
  233. */
  234. buffer = (struct acpi_ipmi_buffer *)value;
  235. /* copy the tx message data */
  236. if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
  237. dev_WARN_ONCE(tx_msg->device->dev, true,
  238. "Unexpected request (msg len %d).\n",
  239. buffer->length);
  240. return -EINVAL;
  241. }
  242. msg->data_len = buffer->length;
  243. memcpy(tx_msg->data, buffer->data, msg->data_len);
  244. /*
  245. * now the default type is SYSTEM_INTERFACE and channel type is BMC.
  246. * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
  247. * the addr type should be changed to IPMB. Then we will have to parse
  248. * the IPMI request message buffer to get the IPMB address.
  249. * If so, please fix me.
  250. */
  251. tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
  252. tx_msg->addr.channel = IPMI_BMC_CHANNEL;
  253. tx_msg->addr.data[0] = 0;
  254. /* Get the msgid */
  255. device = tx_msg->device;
  256. spin_lock_irqsave(&device->tx_msg_lock, flags);
  257. device->curr_msgid++;
  258. tx_msg->tx_msgid = device->curr_msgid;
  259. spin_unlock_irqrestore(&device->tx_msg_lock, flags);
  260. return 0;
  261. }
  262. static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
  263. acpi_integer *value)
  264. {
  265. struct acpi_ipmi_buffer *buffer;
  266. /*
  267. * value is also used as output parameter. It represents the response
  268. * IPMI message returned by IPMI command.
  269. */
  270. buffer = (struct acpi_ipmi_buffer *)value;
  271. /*
  272. * If the flag of msg_done is not set, it means that the IPMI command is
  273. * not executed correctly.
  274. */
  275. buffer->status = msg->msg_done;
  276. if (msg->msg_done != ACPI_IPMI_OK)
  277. return;
  278. /*
  279. * If the IPMI response message is obtained correctly, the status code
  280. * will be ACPI_IPMI_OK
  281. */
  282. buffer->length = msg->rx_len;
  283. memcpy(buffer->data, msg->data, msg->rx_len);
  284. }
  285. static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
  286. {
  287. struct acpi_ipmi_msg *tx_msg;
  288. unsigned long flags;
  289. /*
  290. * NOTE: On-going ipmi_recv_msg
  291. * ipmi_msg_handler() may still be invoked by ipmi_si after
  292. * flushing. But it is safe to do a fast flushing on module_exit()
  293. * without waiting for all ipmi_recv_msg(s) to complete from
  294. * ipmi_msg_handler() as it is ensured by ipmi_si that all
  295. * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
  296. */
  297. spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
  298. while (!list_empty(&ipmi->tx_msg_list)) {
  299. tx_msg = list_first_entry(&ipmi->tx_msg_list,
  300. struct acpi_ipmi_msg,
  301. head);
  302. list_del(&tx_msg->head);
  303. spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
  304. /* wake up the sleep thread on the Tx msg */
  305. complete(&tx_msg->tx_complete);
  306. acpi_ipmi_msg_put(tx_msg);
  307. spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
  308. }
  309. spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
  310. }
  311. static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
  312. struct acpi_ipmi_msg *msg)
  313. {
  314. struct acpi_ipmi_msg *tx_msg, *temp;
  315. bool msg_found = false;
  316. unsigned long flags;
  317. spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
  318. list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
  319. if (msg == tx_msg) {
  320. msg_found = true;
  321. list_del(&tx_msg->head);
  322. break;
  323. }
  324. }
  325. spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
  326. if (msg_found)
  327. acpi_ipmi_msg_put(tx_msg);
  328. }
  329. static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
  330. {
  331. struct acpi_ipmi_device *ipmi_device = user_msg_data;
  332. bool msg_found = false;
  333. struct acpi_ipmi_msg *tx_msg, *temp;
  334. struct device *dev = ipmi_device->dev;
  335. unsigned long flags;
  336. if (msg->user != ipmi_device->user_interface) {
  337. dev_warn(dev,
  338. "Unexpected response is returned. returned user %p, expected user %p\n",
  339. msg->user, ipmi_device->user_interface);
  340. goto out_msg;
  341. }
  342. spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
  343. list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
  344. if (msg->msgid == tx_msg->tx_msgid) {
  345. msg_found = true;
  346. list_del(&tx_msg->head);
  347. break;
  348. }
  349. }
  350. spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
  351. if (!msg_found) {
  352. dev_warn(dev,
  353. "Unexpected response (msg id %ld) is returned.\n",
  354. msg->msgid);
  355. goto out_msg;
  356. }
  357. /* copy the response data to Rx_data buffer */
  358. if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
  359. dev_WARN_ONCE(dev, true,
  360. "Unexpected response (msg len %d).\n",
  361. msg->msg.data_len);
  362. goto out_comp;
  363. }
  364. /* response msg is an error msg */
  365. msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
  366. if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
  367. msg->msg.data_len == 1) {
  368. if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
  369. dev_WARN_ONCE(dev, true,
  370. "Unexpected response (timeout).\n");
  371. tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
  372. }
  373. goto out_comp;
  374. }
  375. tx_msg->rx_len = msg->msg.data_len;
  376. memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
  377. tx_msg->msg_done = ACPI_IPMI_OK;
  378. out_comp:
  379. complete(&tx_msg->tx_complete);
  380. acpi_ipmi_msg_put(tx_msg);
  381. out_msg:
  382. ipmi_free_recv_msg(msg);
  383. }
  384. static void ipmi_register_bmc(int iface, struct device *dev)
  385. {
  386. struct acpi_ipmi_device *ipmi_device, *temp;
  387. int err;
  388. struct ipmi_smi_info smi_data;
  389. acpi_handle handle;
  390. err = ipmi_get_smi_info(iface, &smi_data);
  391. if (err)
  392. return;
  393. if (smi_data.addr_src != SI_ACPI)
  394. goto err_ref;
  395. handle = smi_data.addr_info.acpi_info.acpi_handle;
  396. if (!handle)
  397. goto err_ref;
  398. ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle);
  399. if (!ipmi_device) {
  400. dev_warn(smi_data.dev, "Can't create IPMI user interface\n");
  401. goto err_ref;
  402. }
  403. mutex_lock(&driver_data.ipmi_lock);
  404. list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
  405. /*
  406. * if the corresponding ACPI handle is already added
  407. * to the device list, don't add it again.
  408. */
  409. if (temp->handle == handle)
  410. goto err_lock;
  411. }
  412. if (!driver_data.selected_smi)
  413. driver_data.selected_smi = ipmi_device;
  414. list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
  415. mutex_unlock(&driver_data.ipmi_lock);
  416. put_device(smi_data.dev);
  417. return;
  418. err_lock:
  419. mutex_unlock(&driver_data.ipmi_lock);
  420. ipmi_dev_release(ipmi_device);
  421. err_ref:
  422. put_device(smi_data.dev);
  423. return;
  424. }
  425. static void ipmi_bmc_gone(int iface)
  426. {
  427. struct acpi_ipmi_device *ipmi_device, *temp;
  428. bool dev_found = false;
  429. mutex_lock(&driver_data.ipmi_lock);
  430. list_for_each_entry_safe(ipmi_device, temp,
  431. &driver_data.ipmi_devices, head) {
  432. if (ipmi_device->ipmi_ifnum != iface) {
  433. dev_found = true;
  434. __ipmi_dev_kill(ipmi_device);
  435. break;
  436. }
  437. }
  438. if (!driver_data.selected_smi)
  439. driver_data.selected_smi = list_first_entry_or_null(
  440. &driver_data.ipmi_devices,
  441. struct acpi_ipmi_device, head);
  442. mutex_unlock(&driver_data.ipmi_lock);
  443. if (dev_found) {
  444. ipmi_flush_tx_msg(ipmi_device);
  445. acpi_ipmi_dev_put(ipmi_device);
  446. }
  447. }
  448. /*
  449. * This is the IPMI opregion space handler.
  450. * @function: indicates the read/write. In fact as the IPMI message is driven
  451. * by command, only write is meaningful.
  452. * @address: This contains the netfn/command of IPMI request message.
  453. * @bits : not used.
  454. * @value : it is an in/out parameter. It points to the IPMI message buffer.
  455. * Before the IPMI message is sent, it represents the actual request
  456. * IPMI message. After the IPMI message is finished, it represents
  457. * the response IPMI message returned by IPMI command.
  458. * @handler_context: IPMI device context.
  459. */
  460. static acpi_status
  461. acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
  462. u32 bits, acpi_integer *value,
  463. void *handler_context, void *region_context)
  464. {
  465. struct acpi_ipmi_msg *tx_msg;
  466. struct acpi_ipmi_device *ipmi_device;
  467. int err;
  468. acpi_status status;
  469. unsigned long flags;
  470. /*
  471. * IPMI opregion message.
  472. * IPMI message is firstly written to the BMC and system software
  473. * can get the respsonse. So it is unmeaningful for the read access
  474. * of IPMI opregion.
  475. */
  476. if ((function & ACPI_IO_MASK) == ACPI_READ)
  477. return AE_TYPE;
  478. tx_msg = ipmi_msg_alloc();
  479. if (!tx_msg)
  480. return AE_NOT_EXIST;
  481. ipmi_device = tx_msg->device;
  482. if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
  483. ipmi_msg_release(tx_msg);
  484. return AE_TYPE;
  485. }
  486. acpi_ipmi_msg_get(tx_msg);
  487. mutex_lock(&driver_data.ipmi_lock);
  488. /* Do not add a tx_msg that can not be flushed. */
  489. if (ipmi_device->dead) {
  490. mutex_unlock(&driver_data.ipmi_lock);
  491. ipmi_msg_release(tx_msg);
  492. return AE_NOT_EXIST;
  493. }
  494. spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
  495. list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
  496. spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
  497. mutex_unlock(&driver_data.ipmi_lock);
  498. err = ipmi_request_settime(ipmi_device->user_interface,
  499. &tx_msg->addr,
  500. tx_msg->tx_msgid,
  501. &tx_msg->tx_message,
  502. NULL, 0, 0, IPMI_TIMEOUT);
  503. if (err) {
  504. status = AE_ERROR;
  505. goto out_msg;
  506. }
  507. wait_for_completion(&tx_msg->tx_complete);
  508. acpi_format_ipmi_response(tx_msg, value);
  509. status = AE_OK;
  510. out_msg:
  511. ipmi_cancel_tx_msg(ipmi_device, tx_msg);
  512. acpi_ipmi_msg_put(tx_msg);
  513. return status;
  514. }
  515. static int __init acpi_ipmi_init(void)
  516. {
  517. int result;
  518. acpi_status status;
  519. if (acpi_disabled)
  520. return 0;
  521. status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
  522. ACPI_ADR_SPACE_IPMI,
  523. &acpi_ipmi_space_handler,
  524. NULL, NULL);
  525. if (ACPI_FAILURE(status)) {
  526. pr_warn("Can't register IPMI opregion space handle\n");
  527. return -EINVAL;
  528. }
  529. result = ipmi_smi_watcher_register(&driver_data.bmc_events);
  530. if (result)
  531. pr_err("Can't register IPMI system interface watcher\n");
  532. return result;
  533. }
  534. static void __exit acpi_ipmi_exit(void)
  535. {
  536. struct acpi_ipmi_device *ipmi_device;
  537. if (acpi_disabled)
  538. return;
  539. ipmi_smi_watcher_unregister(&driver_data.bmc_events);
  540. /*
  541. * When one smi_watcher is unregistered, it is only deleted
  542. * from the smi_watcher list. But the smi_gone callback function
  543. * is not called. So explicitly uninstall the ACPI IPMI oregion
  544. * handler and free it.
  545. */
  546. mutex_lock(&driver_data.ipmi_lock);
  547. while (!list_empty(&driver_data.ipmi_devices)) {
  548. ipmi_device = list_first_entry(&driver_data.ipmi_devices,
  549. struct acpi_ipmi_device,
  550. head);
  551. __ipmi_dev_kill(ipmi_device);
  552. mutex_unlock(&driver_data.ipmi_lock);
  553. ipmi_flush_tx_msg(ipmi_device);
  554. acpi_ipmi_dev_put(ipmi_device);
  555. mutex_lock(&driver_data.ipmi_lock);
  556. }
  557. mutex_unlock(&driver_data.ipmi_lock);
  558. acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
  559. ACPI_ADR_SPACE_IPMI,
  560. &acpi_ipmi_space_handler);
  561. }
  562. module_init(acpi_ipmi_init);
  563. module_exit(acpi_ipmi_exit);