ehca_main.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124
  1. /*
  2. * IBM eServer eHCA Infiniband device driver for Linux on POWER
  3. *
  4. * module start stop, hca detection
  5. *
  6. * Authors: Heiko J Schick <schickhj@de.ibm.com>
  7. * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
  8. * Joachim Fenkes <fenkes@de.ibm.com>
  9. *
  10. * Copyright (c) 2005 IBM Corporation
  11. *
  12. * All rights reserved.
  13. *
  14. * This source code is distributed under a dual license of GPL v2.0 and OpenIB
  15. * BSD.
  16. *
  17. * OpenIB BSD License
  18. *
  19. * Redistribution and use in source and binary forms, with or without
  20. * modification, are permitted provided that the following conditions are met:
  21. *
  22. * Redistributions of source code must retain the above copyright notice, this
  23. * list of conditions and the following disclaimer.
  24. *
  25. * Redistributions in binary form must reproduce the above copyright notice,
  26. * this list of conditions and the following disclaimer in the documentation
  27. * and/or other materials
  28. * provided with the distribution.
  29. *
  30. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  31. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  32. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  33. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  34. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  35. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  36. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  37. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
  38. * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  39. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  40. * POSSIBILITY OF SUCH DAMAGE.
  41. */
  42. #ifdef CONFIG_PPC_64K_PAGES
  43. #include <linux/slab.h>
  44. #endif
  45. #include <linux/notifier.h>
  46. #include <linux/memory.h>
  47. #include <rdma/ib_mad.h>
  48. #include "ehca_classes.h"
  49. #include "ehca_iverbs.h"
  50. #include "ehca_mrmw.h"
  51. #include "ehca_tools.h"
  52. #include "hcp_if.h"
  53. #define HCAD_VERSION "0029"
  54. MODULE_LICENSE("Dual BSD/GPL");
  55. MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
  56. MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
  57. MODULE_VERSION(HCAD_VERSION);
  58. static bool ehca_open_aqp1 = 0;
  59. static int ehca_hw_level = 0;
  60. static bool ehca_poll_all_eqs = 1;
  61. int ehca_debug_level = 0;
  62. int ehca_nr_ports = -1;
  63. bool ehca_use_hp_mr = 0;
  64. int ehca_port_act_time = 30;
  65. int ehca_static_rate = -1;
  66. bool ehca_scaling_code = 0;
  67. int ehca_lock_hcalls = -1;
  68. int ehca_max_cq = -1;
  69. int ehca_max_qp = -1;
  70. module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO);
  71. module_param_named(debug_level, ehca_debug_level, int, S_IRUGO);
  72. module_param_named(hw_level, ehca_hw_level, int, S_IRUGO);
  73. module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO);
  74. module_param_named(use_hp_mr, ehca_use_hp_mr, bool, S_IRUGO);
  75. module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
  76. module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO);
  77. module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
  78. module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO);
  79. module_param_named(lock_hcalls, ehca_lock_hcalls, bint, S_IRUGO);
  80. module_param_named(number_of_cqs, ehca_max_cq, int, S_IRUGO);
  81. module_param_named(number_of_qps, ehca_max_qp, int, S_IRUGO);
  82. MODULE_PARM_DESC(open_aqp1,
  83. "Open AQP1 on startup (default: no)");
  84. MODULE_PARM_DESC(debug_level,
  85. "Amount of debug output (0: none (default), 1: traces, "
  86. "2: some dumps, 3: lots)");
  87. MODULE_PARM_DESC(hw_level,
  88. "Hardware level (0: autosensing (default), "
  89. "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
  90. MODULE_PARM_DESC(nr_ports,
  91. "number of connected ports (-1: autodetect (default), "
  92. "1: port one only, 2: two ports)");
  93. MODULE_PARM_DESC(use_hp_mr,
  94. "Use high performance MRs (default: no)");
  95. MODULE_PARM_DESC(port_act_time,
  96. "Time to wait for port activation (default: 30 sec)");
  97. MODULE_PARM_DESC(poll_all_eqs,
  98. "Poll all event queues periodically (default: yes)");
  99. MODULE_PARM_DESC(static_rate,
  100. "Set permanent static rate (default: no static rate)");
  101. MODULE_PARM_DESC(scaling_code,
  102. "Enable scaling code (default: no)");
  103. MODULE_PARM_DESC(lock_hcalls,
  104. "Serialize all hCalls made by the driver "
  105. "(default: autodetect)");
  106. MODULE_PARM_DESC(number_of_cqs,
  107. "Max number of CQs which can be allocated "
  108. "(default: autodetect)");
  109. MODULE_PARM_DESC(number_of_qps,
  110. "Max number of QPs which can be allocated "
  111. "(default: autodetect)");
  112. DEFINE_RWLOCK(ehca_qp_idr_lock);
  113. DEFINE_RWLOCK(ehca_cq_idr_lock);
  114. DEFINE_IDR(ehca_qp_idr);
  115. DEFINE_IDR(ehca_cq_idr);
  116. static LIST_HEAD(shca_list); /* list of all registered ehcas */
  117. DEFINE_SPINLOCK(shca_list_lock);
  118. static struct timer_list poll_eqs_timer;
  119. #ifdef CONFIG_PPC_64K_PAGES
  120. static struct kmem_cache *ctblk_cache;
  121. void *ehca_alloc_fw_ctrlblock(gfp_t flags)
  122. {
  123. void *ret = kmem_cache_zalloc(ctblk_cache, flags);
  124. if (!ret)
  125. ehca_gen_err("Out of memory for ctblk");
  126. return ret;
  127. }
  128. void ehca_free_fw_ctrlblock(void *ptr)
  129. {
  130. if (ptr)
  131. kmem_cache_free(ctblk_cache, ptr);
  132. }
  133. #endif
  134. int ehca2ib_return_code(u64 ehca_rc)
  135. {
  136. switch (ehca_rc) {
  137. case H_SUCCESS:
  138. return 0;
  139. case H_RESOURCE: /* Resource in use */
  140. case H_BUSY:
  141. return -EBUSY;
  142. case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
  143. case H_CONSTRAINED: /* resource constraint */
  144. case H_NO_MEM:
  145. return -ENOMEM;
  146. default:
  147. return -EINVAL;
  148. }
  149. }
  150. static int ehca_create_slab_caches(void)
  151. {
  152. int ret;
  153. ret = ehca_init_pd_cache();
  154. if (ret) {
  155. ehca_gen_err("Cannot create PD SLAB cache.");
  156. return ret;
  157. }
  158. ret = ehca_init_cq_cache();
  159. if (ret) {
  160. ehca_gen_err("Cannot create CQ SLAB cache.");
  161. goto create_slab_caches2;
  162. }
  163. ret = ehca_init_qp_cache();
  164. if (ret) {
  165. ehca_gen_err("Cannot create QP SLAB cache.");
  166. goto create_slab_caches3;
  167. }
  168. ret = ehca_init_av_cache();
  169. if (ret) {
  170. ehca_gen_err("Cannot create AV SLAB cache.");
  171. goto create_slab_caches4;
  172. }
  173. ret = ehca_init_mrmw_cache();
  174. if (ret) {
  175. ehca_gen_err("Cannot create MR&MW SLAB cache.");
  176. goto create_slab_caches5;
  177. }
  178. ret = ehca_init_small_qp_cache();
  179. if (ret) {
  180. ehca_gen_err("Cannot create small queue SLAB cache.");
  181. goto create_slab_caches6;
  182. }
  183. #ifdef CONFIG_PPC_64K_PAGES
  184. ctblk_cache = kmem_cache_create("ehca_cache_ctblk",
  185. EHCA_PAGESIZE, H_CB_ALIGNMENT,
  186. SLAB_HWCACHE_ALIGN,
  187. NULL);
  188. if (!ctblk_cache) {
  189. ehca_gen_err("Cannot create ctblk SLAB cache.");
  190. ehca_cleanup_small_qp_cache();
  191. ret = -ENOMEM;
  192. goto create_slab_caches6;
  193. }
  194. #endif
  195. return 0;
  196. create_slab_caches6:
  197. ehca_cleanup_mrmw_cache();
  198. create_slab_caches5:
  199. ehca_cleanup_av_cache();
  200. create_slab_caches4:
  201. ehca_cleanup_qp_cache();
  202. create_slab_caches3:
  203. ehca_cleanup_cq_cache();
  204. create_slab_caches2:
  205. ehca_cleanup_pd_cache();
  206. return ret;
  207. }
  208. static void ehca_destroy_slab_caches(void)
  209. {
  210. ehca_cleanup_small_qp_cache();
  211. ehca_cleanup_mrmw_cache();
  212. ehca_cleanup_av_cache();
  213. ehca_cleanup_qp_cache();
  214. ehca_cleanup_cq_cache();
  215. ehca_cleanup_pd_cache();
  216. #ifdef CONFIG_PPC_64K_PAGES
  217. if (ctblk_cache)
  218. kmem_cache_destroy(ctblk_cache);
  219. #endif
  220. }
  221. #define EHCA_HCAAVER EHCA_BMASK_IBM(32, 39)
  222. #define EHCA_REVID EHCA_BMASK_IBM(40, 63)
  223. static struct cap_descr {
  224. u64 mask;
  225. char *descr;
  226. } hca_cap_descr[] = {
  227. { HCA_CAP_AH_PORT_NR_CHECK, "HCA_CAP_AH_PORT_NR_CHECK" },
  228. { HCA_CAP_ATOMIC, "HCA_CAP_ATOMIC" },
  229. { HCA_CAP_AUTO_PATH_MIG, "HCA_CAP_AUTO_PATH_MIG" },
  230. { HCA_CAP_BAD_P_KEY_CTR, "HCA_CAP_BAD_P_KEY_CTR" },
  231. { HCA_CAP_SQD_RTS_PORT_CHANGE, "HCA_CAP_SQD_RTS_PORT_CHANGE" },
  232. { HCA_CAP_CUR_QP_STATE_MOD, "HCA_CAP_CUR_QP_STATE_MOD" },
  233. { HCA_CAP_INIT_TYPE, "HCA_CAP_INIT_TYPE" },
  234. { HCA_CAP_PORT_ACTIVE_EVENT, "HCA_CAP_PORT_ACTIVE_EVENT" },
  235. { HCA_CAP_Q_KEY_VIOL_CTR, "HCA_CAP_Q_KEY_VIOL_CTR" },
  236. { HCA_CAP_WQE_RESIZE, "HCA_CAP_WQE_RESIZE" },
  237. { HCA_CAP_RAW_PACKET_MCAST, "HCA_CAP_RAW_PACKET_MCAST" },
  238. { HCA_CAP_SHUTDOWN_PORT, "HCA_CAP_SHUTDOWN_PORT" },
  239. { HCA_CAP_RC_LL_QP, "HCA_CAP_RC_LL_QP" },
  240. { HCA_CAP_SRQ, "HCA_CAP_SRQ" },
  241. { HCA_CAP_UD_LL_QP, "HCA_CAP_UD_LL_QP" },
  242. { HCA_CAP_RESIZE_MR, "HCA_CAP_RESIZE_MR" },
  243. { HCA_CAP_MINI_QP, "HCA_CAP_MINI_QP" },
  244. { HCA_CAP_H_ALLOC_RES_SYNC, "HCA_CAP_H_ALLOC_RES_SYNC" },
  245. };
  246. static int ehca_sense_attributes(struct ehca_shca *shca)
  247. {
  248. int i, ret = 0;
  249. u64 h_ret;
  250. struct hipz_query_hca *rblock;
  251. struct hipz_query_port *port;
  252. const char *loc_code;
  253. static const u32 pgsize_map[] = {
  254. HCA_CAP_MR_PGSIZE_4K, 0x1000,
  255. HCA_CAP_MR_PGSIZE_64K, 0x10000,
  256. HCA_CAP_MR_PGSIZE_1M, 0x100000,
  257. HCA_CAP_MR_PGSIZE_16M, 0x1000000,
  258. };
  259. ehca_gen_dbg("Probing adapter %s...",
  260. shca->ofdev->dev.of_node->full_name);
  261. loc_code = of_get_property(shca->ofdev->dev.of_node, "ibm,loc-code",
  262. NULL);
  263. if (loc_code)
  264. ehca_gen_dbg(" ... location lode=%s", loc_code);
  265. rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  266. if (!rblock) {
  267. ehca_gen_err("Cannot allocate rblock memory.");
  268. return -ENOMEM;
  269. }
  270. h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
  271. if (h_ret != H_SUCCESS) {
  272. ehca_gen_err("Cannot query device properties. h_ret=%lli",
  273. h_ret);
  274. ret = -EPERM;
  275. goto sense_attributes1;
  276. }
  277. if (ehca_nr_ports == 1)
  278. shca->num_ports = 1;
  279. else
  280. shca->num_ports = (u8)rblock->num_ports;
  281. ehca_gen_dbg(" ... found %x ports", rblock->num_ports);
  282. if (ehca_hw_level == 0) {
  283. u32 hcaaver;
  284. u32 revid;
  285. hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver);
  286. revid = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver);
  287. ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
  288. if (hcaaver == 1) {
  289. if (revid <= 3)
  290. shca->hw_level = 0x10 | (revid + 1);
  291. else
  292. shca->hw_level = 0x14;
  293. } else if (hcaaver == 2) {
  294. if (revid == 0)
  295. shca->hw_level = 0x21;
  296. else if (revid == 0x10)
  297. shca->hw_level = 0x22;
  298. else if (revid == 0x20 || revid == 0x21)
  299. shca->hw_level = 0x23;
  300. }
  301. if (!shca->hw_level) {
  302. ehca_gen_warn("unknown hardware version"
  303. " - assuming default level");
  304. shca->hw_level = 0x22;
  305. }
  306. } else
  307. shca->hw_level = ehca_hw_level;
  308. ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
  309. shca->hca_cap = rblock->hca_cap_indicators;
  310. ehca_gen_dbg(" ... HCA capabilities:");
  311. for (i = 0; i < ARRAY_SIZE(hca_cap_descr); i++)
  312. if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap))
  313. ehca_gen_dbg(" %s", hca_cap_descr[i].descr);
  314. /* Autodetect hCall locking -- the "H_ALLOC_RESOURCE synced" flag is
  315. * a firmware property, so it's valid across all adapters
  316. */
  317. if (ehca_lock_hcalls == -1)
  318. ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC,
  319. shca->hca_cap);
  320. /* translate supported MR page sizes; always support 4K */
  321. shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
  322. for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
  323. if (rblock->memory_page_size_supported & pgsize_map[i])
  324. shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
  325. /* Set maximum number of CQs and QPs to calculate EQ size */
  326. if (shca->max_num_qps == -1)
  327. shca->max_num_qps = min_t(int, rblock->max_qp,
  328. EHCA_MAX_NUM_QUEUES);
  329. else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
  330. ehca_gen_warn("The requested number of QPs is out of range "
  331. "(1 - %i) specified by HW. Value is set to %i",
  332. rblock->max_qp, rblock->max_qp);
  333. shca->max_num_qps = rblock->max_qp;
  334. }
  335. if (shca->max_num_cqs == -1)
  336. shca->max_num_cqs = min_t(int, rblock->max_cq,
  337. EHCA_MAX_NUM_QUEUES);
  338. else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
  339. ehca_gen_warn("The requested number of CQs is out of range "
  340. "(1 - %i) specified by HW. Value is set to %i",
  341. rblock->max_cq, rblock->max_cq);
  342. }
  343. /* query max MTU from first port -- it's the same for all ports */
  344. port = (struct hipz_query_port *)rblock;
  345. h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
  346. if (h_ret != H_SUCCESS) {
  347. ehca_gen_err("Cannot query port properties. h_ret=%lli",
  348. h_ret);
  349. ret = -EPERM;
  350. goto sense_attributes1;
  351. }
  352. shca->max_mtu = port->max_mtu;
  353. sense_attributes1:
  354. ehca_free_fw_ctrlblock(rblock);
  355. return ret;
  356. }
  357. static int init_node_guid(struct ehca_shca *shca)
  358. {
  359. int ret = 0;
  360. struct hipz_query_hca *rblock;
  361. rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
  362. if (!rblock) {
  363. ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
  364. return -ENOMEM;
  365. }
  366. if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
  367. ehca_err(&shca->ib_device, "Can't query device properties");
  368. ret = -EINVAL;
  369. goto init_node_guid1;
  370. }
  371. memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
  372. init_node_guid1:
  373. ehca_free_fw_ctrlblock(rblock);
  374. return ret;
  375. }
  376. static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num,
  377. struct ib_port_immutable *immutable)
  378. {
  379. struct ib_port_attr attr;
  380. int err;
  381. err = ehca_query_port(ibdev, port_num, &attr);
  382. if (err)
  383. return err;
  384. immutable->pkey_tbl_len = attr.pkey_tbl_len;
  385. immutable->gid_tbl_len = attr.gid_tbl_len;
  386. immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
  387. immutable->max_mad_size = IB_MGMT_MAD_SIZE;
  388. return 0;
  389. }
  390. static int ehca_init_device(struct ehca_shca *shca)
  391. {
  392. int ret;
  393. ret = init_node_guid(shca);
  394. if (ret)
  395. return ret;
  396. strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
  397. shca->ib_device.owner = THIS_MODULE;
  398. shca->ib_device.uverbs_abi_ver = 8;
  399. shca->ib_device.uverbs_cmd_mask =
  400. (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
  401. (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
  402. (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
  403. (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
  404. (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
  405. (1ull << IB_USER_VERBS_CMD_REG_MR) |
  406. (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
  407. (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
  408. (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
  409. (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
  410. (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
  411. (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
  412. (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
  413. (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
  414. (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
  415. (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
  416. shca->ib_device.node_type = RDMA_NODE_IB_CA;
  417. shca->ib_device.phys_port_cnt = shca->num_ports;
  418. shca->ib_device.num_comp_vectors = 1;
  419. shca->ib_device.dma_device = &shca->ofdev->dev;
  420. shca->ib_device.query_device = ehca_query_device;
  421. shca->ib_device.query_port = ehca_query_port;
  422. shca->ib_device.query_gid = ehca_query_gid;
  423. shca->ib_device.query_pkey = ehca_query_pkey;
  424. /* shca->in_device.modify_device = ehca_modify_device */
  425. shca->ib_device.modify_port = ehca_modify_port;
  426. shca->ib_device.alloc_ucontext = ehca_alloc_ucontext;
  427. shca->ib_device.dealloc_ucontext = ehca_dealloc_ucontext;
  428. shca->ib_device.alloc_pd = ehca_alloc_pd;
  429. shca->ib_device.dealloc_pd = ehca_dealloc_pd;
  430. shca->ib_device.create_ah = ehca_create_ah;
  431. /* shca->ib_device.modify_ah = ehca_modify_ah; */
  432. shca->ib_device.query_ah = ehca_query_ah;
  433. shca->ib_device.destroy_ah = ehca_destroy_ah;
  434. shca->ib_device.create_qp = ehca_create_qp;
  435. shca->ib_device.modify_qp = ehca_modify_qp;
  436. shca->ib_device.query_qp = ehca_query_qp;
  437. shca->ib_device.destroy_qp = ehca_destroy_qp;
  438. shca->ib_device.post_send = ehca_post_send;
  439. shca->ib_device.post_recv = ehca_post_recv;
  440. shca->ib_device.create_cq = ehca_create_cq;
  441. shca->ib_device.destroy_cq = ehca_destroy_cq;
  442. shca->ib_device.resize_cq = ehca_resize_cq;
  443. shca->ib_device.poll_cq = ehca_poll_cq;
  444. /* shca->ib_device.peek_cq = ehca_peek_cq; */
  445. shca->ib_device.req_notify_cq = ehca_req_notify_cq;
  446. /* shca->ib_device.req_ncomp_notif = ehca_req_ncomp_notif; */
  447. shca->ib_device.get_dma_mr = ehca_get_dma_mr;
  448. shca->ib_device.reg_phys_mr = ehca_reg_phys_mr;
  449. shca->ib_device.reg_user_mr = ehca_reg_user_mr;
  450. shca->ib_device.query_mr = ehca_query_mr;
  451. shca->ib_device.dereg_mr = ehca_dereg_mr;
  452. shca->ib_device.rereg_phys_mr = ehca_rereg_phys_mr;
  453. shca->ib_device.alloc_mw = ehca_alloc_mw;
  454. shca->ib_device.bind_mw = ehca_bind_mw;
  455. shca->ib_device.dealloc_mw = ehca_dealloc_mw;
  456. shca->ib_device.alloc_fmr = ehca_alloc_fmr;
  457. shca->ib_device.map_phys_fmr = ehca_map_phys_fmr;
  458. shca->ib_device.unmap_fmr = ehca_unmap_fmr;
  459. shca->ib_device.dealloc_fmr = ehca_dealloc_fmr;
  460. shca->ib_device.attach_mcast = ehca_attach_mcast;
  461. shca->ib_device.detach_mcast = ehca_detach_mcast;
  462. shca->ib_device.process_mad = ehca_process_mad;
  463. shca->ib_device.mmap = ehca_mmap;
  464. shca->ib_device.dma_ops = &ehca_dma_mapping_ops;
  465. shca->ib_device.get_port_immutable = ehca_port_immutable;
  466. if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
  467. shca->ib_device.uverbs_cmd_mask |=
  468. (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
  469. (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
  470. (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
  471. (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
  472. shca->ib_device.create_srq = ehca_create_srq;
  473. shca->ib_device.modify_srq = ehca_modify_srq;
  474. shca->ib_device.query_srq = ehca_query_srq;
  475. shca->ib_device.destroy_srq = ehca_destroy_srq;
  476. shca->ib_device.post_srq_recv = ehca_post_srq_recv;
  477. }
  478. return ret;
  479. }
  480. static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
  481. {
  482. struct ehca_sport *sport = &shca->sport[port - 1];
  483. struct ib_cq *ibcq;
  484. struct ib_qp *ibqp;
  485. struct ib_qp_init_attr qp_init_attr;
  486. struct ib_cq_init_attr cq_attr = {};
  487. int ret;
  488. if (sport->ibcq_aqp1) {
  489. ehca_err(&shca->ib_device, "AQP1 CQ is already created.");
  490. return -EPERM;
  491. }
  492. cq_attr.cqe = 10;
  493. ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1),
  494. &cq_attr);
  495. if (IS_ERR(ibcq)) {
  496. ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
  497. return PTR_ERR(ibcq);
  498. }
  499. sport->ibcq_aqp1 = ibcq;
  500. if (sport->ibqp_sqp[IB_QPT_GSI]) {
  501. ehca_err(&shca->ib_device, "AQP1 QP is already created.");
  502. ret = -EPERM;
  503. goto create_aqp1;
  504. }
  505. memset(&qp_init_attr, 0, sizeof(struct ib_qp_init_attr));
  506. qp_init_attr.send_cq = ibcq;
  507. qp_init_attr.recv_cq = ibcq;
  508. qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
  509. qp_init_attr.cap.max_send_wr = 100;
  510. qp_init_attr.cap.max_recv_wr = 100;
  511. qp_init_attr.cap.max_send_sge = 2;
  512. qp_init_attr.cap.max_recv_sge = 1;
  513. qp_init_attr.qp_type = IB_QPT_GSI;
  514. qp_init_attr.port_num = port;
  515. qp_init_attr.qp_context = NULL;
  516. qp_init_attr.event_handler = NULL;
  517. qp_init_attr.srq = NULL;
  518. ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr);
  519. if (IS_ERR(ibqp)) {
  520. ehca_err(&shca->ib_device, "Cannot create AQP1 QP.");
  521. ret = PTR_ERR(ibqp);
  522. goto create_aqp1;
  523. }
  524. sport->ibqp_sqp[IB_QPT_GSI] = ibqp;
  525. return 0;
  526. create_aqp1:
  527. ib_destroy_cq(sport->ibcq_aqp1);
  528. return ret;
  529. }
  530. static int ehca_destroy_aqp1(struct ehca_sport *sport)
  531. {
  532. int ret;
  533. ret = ib_destroy_qp(sport->ibqp_sqp[IB_QPT_GSI]);
  534. if (ret) {
  535. ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret);
  536. return ret;
  537. }
  538. ret = ib_destroy_cq(sport->ibcq_aqp1);
  539. if (ret)
  540. ehca_gen_err("Cannot destroy AQP1 CQ. ret=%i", ret);
  541. return ret;
  542. }
  543. static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
  544. {
  545. return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
  546. }
  547. static ssize_t ehca_store_debug_level(struct device_driver *ddp,
  548. const char *buf, size_t count)
  549. {
  550. int value = (*buf) - '0';
  551. if (value >= 0 && value <= 9)
  552. ehca_debug_level = value;
  553. return 1;
  554. }
  555. static DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
  556. ehca_show_debug_level, ehca_store_debug_level);
  557. static struct attribute *ehca_drv_attrs[] = {
  558. &driver_attr_debug_level.attr,
  559. NULL
  560. };
  561. static struct attribute_group ehca_drv_attr_grp = {
  562. .attrs = ehca_drv_attrs
  563. };
  564. static const struct attribute_group *ehca_drv_attr_groups[] = {
  565. &ehca_drv_attr_grp,
  566. NULL,
  567. };
  568. #define EHCA_RESOURCE_ATTR(name) \
  569. static ssize_t ehca_show_##name(struct device *dev, \
  570. struct device_attribute *attr, \
  571. char *buf) \
  572. { \
  573. struct ehca_shca *shca; \
  574. struct hipz_query_hca *rblock; \
  575. int data; \
  576. \
  577. shca = dev_get_drvdata(dev); \
  578. \
  579. rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); \
  580. if (!rblock) { \
  581. dev_err(dev, "Can't allocate rblock memory.\n"); \
  582. return 0; \
  583. } \
  584. \
  585. if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
  586. dev_err(dev, "Can't query device properties\n"); \
  587. ehca_free_fw_ctrlblock(rblock); \
  588. return 0; \
  589. } \
  590. \
  591. data = rblock->name; \
  592. ehca_free_fw_ctrlblock(rblock); \
  593. \
  594. if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1)) \
  595. return snprintf(buf, 256, "1\n"); \
  596. else \
  597. return snprintf(buf, 256, "%d\n", data); \
  598. \
  599. } \
  600. static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL);
  601. EHCA_RESOURCE_ATTR(num_ports);
  602. EHCA_RESOURCE_ATTR(hw_ver);
  603. EHCA_RESOURCE_ATTR(max_eq);
  604. EHCA_RESOURCE_ATTR(cur_eq);
  605. EHCA_RESOURCE_ATTR(max_cq);
  606. EHCA_RESOURCE_ATTR(cur_cq);
  607. EHCA_RESOURCE_ATTR(max_qp);
  608. EHCA_RESOURCE_ATTR(cur_qp);
  609. EHCA_RESOURCE_ATTR(max_mr);
  610. EHCA_RESOURCE_ATTR(cur_mr);
  611. EHCA_RESOURCE_ATTR(max_mw);
  612. EHCA_RESOURCE_ATTR(cur_mw);
  613. EHCA_RESOURCE_ATTR(max_pd);
  614. EHCA_RESOURCE_ATTR(max_ah);
  615. static ssize_t ehca_show_adapter_handle(struct device *dev,
  616. struct device_attribute *attr,
  617. char *buf)
  618. {
  619. struct ehca_shca *shca = dev_get_drvdata(dev);
  620. return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
  621. }
  622. static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
  623. static struct attribute *ehca_dev_attrs[] = {
  624. &dev_attr_adapter_handle.attr,
  625. &dev_attr_num_ports.attr,
  626. &dev_attr_hw_ver.attr,
  627. &dev_attr_max_eq.attr,
  628. &dev_attr_cur_eq.attr,
  629. &dev_attr_max_cq.attr,
  630. &dev_attr_cur_cq.attr,
  631. &dev_attr_max_qp.attr,
  632. &dev_attr_cur_qp.attr,
  633. &dev_attr_max_mr.attr,
  634. &dev_attr_cur_mr.attr,
  635. &dev_attr_max_mw.attr,
  636. &dev_attr_cur_mw.attr,
  637. &dev_attr_max_pd.attr,
  638. &dev_attr_max_ah.attr,
  639. NULL
  640. };
  641. static struct attribute_group ehca_dev_attr_grp = {
  642. .attrs = ehca_dev_attrs
  643. };
  644. static int ehca_probe(struct platform_device *dev)
  645. {
  646. struct ehca_shca *shca;
  647. const u64 *handle;
  648. struct ib_pd *ibpd;
  649. int ret, i, eq_size;
  650. unsigned long flags;
  651. handle = of_get_property(dev->dev.of_node, "ibm,hca-handle", NULL);
  652. if (!handle) {
  653. ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
  654. dev->dev.of_node->full_name);
  655. return -ENODEV;
  656. }
  657. if (!(*handle)) {
  658. ehca_gen_err("Wrong eHCA handle for adapter: %s.",
  659. dev->dev.of_node->full_name);
  660. return -ENODEV;
  661. }
  662. shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca));
  663. if (!shca) {
  664. ehca_gen_err("Cannot allocate shca memory.");
  665. return -ENOMEM;
  666. }
  667. mutex_init(&shca->modify_mutex);
  668. atomic_set(&shca->num_cqs, 0);
  669. atomic_set(&shca->num_qps, 0);
  670. shca->max_num_qps = ehca_max_qp;
  671. shca->max_num_cqs = ehca_max_cq;
  672. for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
  673. spin_lock_init(&shca->sport[i].mod_sqp_lock);
  674. shca->ofdev = dev;
  675. shca->ipz_hca_handle.handle = *handle;
  676. dev_set_drvdata(&dev->dev, shca);
  677. ret = ehca_sense_attributes(shca);
  678. if (ret < 0) {
  679. ehca_gen_err("Cannot sense eHCA attributes.");
  680. goto probe1;
  681. }
  682. ret = ehca_init_device(shca);
  683. if (ret) {
  684. ehca_gen_err("Cannot init ehca device struct");
  685. goto probe1;
  686. }
  687. eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
  688. /* create event queues */
  689. ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
  690. if (ret) {
  691. ehca_err(&shca->ib_device, "Cannot create EQ.");
  692. goto probe1;
  693. }
  694. ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
  695. if (ret) {
  696. ehca_err(&shca->ib_device, "Cannot create NEQ.");
  697. goto probe3;
  698. }
  699. /* create internal protection domain */
  700. ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL);
  701. if (IS_ERR(ibpd)) {
  702. ehca_err(&shca->ib_device, "Cannot create internal PD.");
  703. ret = PTR_ERR(ibpd);
  704. goto probe4;
  705. }
  706. shca->pd = container_of(ibpd, struct ehca_pd, ib_pd);
  707. shca->pd->ib_pd.device = &shca->ib_device;
  708. /* create internal max MR */
  709. ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr);
  710. if (ret) {
  711. ehca_err(&shca->ib_device, "Cannot create internal MR ret=%i",
  712. ret);
  713. goto probe5;
  714. }
  715. ret = ib_register_device(&shca->ib_device, NULL);
  716. if (ret) {
  717. ehca_err(&shca->ib_device,
  718. "ib_register_device() failed ret=%i", ret);
  719. goto probe6;
  720. }
  721. /* create AQP1 for port 1 */
  722. if (ehca_open_aqp1 == 1) {
  723. shca->sport[0].port_state = IB_PORT_DOWN;
  724. ret = ehca_create_aqp1(shca, 1);
  725. if (ret) {
  726. ehca_err(&shca->ib_device,
  727. "Cannot create AQP1 for port 1.");
  728. goto probe7;
  729. }
  730. }
  731. /* create AQP1 for port 2 */
  732. if ((ehca_open_aqp1 == 1) && (shca->num_ports == 2)) {
  733. shca->sport[1].port_state = IB_PORT_DOWN;
  734. ret = ehca_create_aqp1(shca, 2);
  735. if (ret) {
  736. ehca_err(&shca->ib_device,
  737. "Cannot create AQP1 for port 2.");
  738. goto probe8;
  739. }
  740. }
  741. ret = sysfs_create_group(&dev->dev.kobj, &ehca_dev_attr_grp);
  742. if (ret) /* only complain; we can live without attributes */
  743. ehca_err(&shca->ib_device,
  744. "Cannot create device attributes ret=%d", ret);
  745. spin_lock_irqsave(&shca_list_lock, flags);
  746. list_add(&shca->shca_list, &shca_list);
  747. spin_unlock_irqrestore(&shca_list_lock, flags);
  748. return 0;
  749. probe8:
  750. ret = ehca_destroy_aqp1(&shca->sport[0]);
  751. if (ret)
  752. ehca_err(&shca->ib_device,
  753. "Cannot destroy AQP1 for port 1. ret=%i", ret);
  754. probe7:
  755. ib_unregister_device(&shca->ib_device);
  756. probe6:
  757. ret = ehca_dereg_internal_maxmr(shca);
  758. if (ret)
  759. ehca_err(&shca->ib_device,
  760. "Cannot destroy internal MR. ret=%x", ret);
  761. probe5:
  762. ret = ehca_dealloc_pd(&shca->pd->ib_pd);
  763. if (ret)
  764. ehca_err(&shca->ib_device,
  765. "Cannot destroy internal PD. ret=%x", ret);
  766. probe4:
  767. ret = ehca_destroy_eq(shca, &shca->neq);
  768. if (ret)
  769. ehca_err(&shca->ib_device,
  770. "Cannot destroy NEQ. ret=%x", ret);
  771. probe3:
  772. ret = ehca_destroy_eq(shca, &shca->eq);
  773. if (ret)
  774. ehca_err(&shca->ib_device,
  775. "Cannot destroy EQ. ret=%x", ret);
  776. probe1:
  777. ib_dealloc_device(&shca->ib_device);
  778. return -EINVAL;
  779. }
  780. static int ehca_remove(struct platform_device *dev)
  781. {
  782. struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
  783. unsigned long flags;
  784. int ret;
  785. sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
  786. if (ehca_open_aqp1 == 1) {
  787. int i;
  788. for (i = 0; i < shca->num_ports; i++) {
  789. ret = ehca_destroy_aqp1(&shca->sport[i]);
  790. if (ret)
  791. ehca_err(&shca->ib_device,
  792. "Cannot destroy AQP1 for port %x "
  793. "ret=%i", ret, i);
  794. }
  795. }
  796. ib_unregister_device(&shca->ib_device);
  797. ret = ehca_dereg_internal_maxmr(shca);
  798. if (ret)
  799. ehca_err(&shca->ib_device,
  800. "Cannot destroy internal MR. ret=%i", ret);
  801. ret = ehca_dealloc_pd(&shca->pd->ib_pd);
  802. if (ret)
  803. ehca_err(&shca->ib_device,
  804. "Cannot destroy internal PD. ret=%i", ret);
  805. ret = ehca_destroy_eq(shca, &shca->eq);
  806. if (ret)
  807. ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%i", ret);
  808. ret = ehca_destroy_eq(shca, &shca->neq);
  809. if (ret)
  810. ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%i", ret);
  811. ib_dealloc_device(&shca->ib_device);
  812. spin_lock_irqsave(&shca_list_lock, flags);
  813. list_del(&shca->shca_list);
  814. spin_unlock_irqrestore(&shca_list_lock, flags);
  815. return ret;
  816. }
  817. static struct of_device_id ehca_device_table[] =
  818. {
  819. {
  820. .name = "lhca",
  821. .compatible = "IBM,lhca",
  822. },
  823. {},
  824. };
  825. MODULE_DEVICE_TABLE(of, ehca_device_table);
  826. static struct platform_driver ehca_driver = {
  827. .probe = ehca_probe,
  828. .remove = ehca_remove,
  829. .driver = {
  830. .name = "ehca",
  831. .owner = THIS_MODULE,
  832. .groups = ehca_drv_attr_groups,
  833. .of_match_table = ehca_device_table,
  834. },
  835. };
  836. void ehca_poll_eqs(unsigned long data)
  837. {
  838. struct ehca_shca *shca;
  839. spin_lock(&shca_list_lock);
  840. list_for_each_entry(shca, &shca_list, shca_list) {
  841. if (shca->eq.is_initialized) {
  842. /* call deadman proc only if eq ptr does not change */
  843. struct ehca_eq *eq = &shca->eq;
  844. int max = 3;
  845. volatile u64 q_ofs, q_ofs2;
  846. unsigned long flags;
  847. spin_lock_irqsave(&eq->spinlock, flags);
  848. q_ofs = eq->ipz_queue.current_q_offset;
  849. spin_unlock_irqrestore(&eq->spinlock, flags);
  850. do {
  851. spin_lock_irqsave(&eq->spinlock, flags);
  852. q_ofs2 = eq->ipz_queue.current_q_offset;
  853. spin_unlock_irqrestore(&eq->spinlock, flags);
  854. max--;
  855. } while (q_ofs == q_ofs2 && max > 0);
  856. if (q_ofs == q_ofs2)
  857. ehca_process_eq(shca, 0);
  858. }
  859. }
  860. mod_timer(&poll_eqs_timer, round_jiffies(jiffies + HZ));
  861. spin_unlock(&shca_list_lock);
  862. }
  863. static int ehca_mem_notifier(struct notifier_block *nb,
  864. unsigned long action, void *data)
  865. {
  866. static unsigned long ehca_dmem_warn_time;
  867. unsigned long flags;
  868. switch (action) {
  869. case MEM_CANCEL_OFFLINE:
  870. case MEM_CANCEL_ONLINE:
  871. case MEM_ONLINE:
  872. case MEM_OFFLINE:
  873. return NOTIFY_OK;
  874. case MEM_GOING_ONLINE:
  875. case MEM_GOING_OFFLINE:
  876. /* only ok if no hca is attached to the lpar */
  877. spin_lock_irqsave(&shca_list_lock, flags);
  878. if (list_empty(&shca_list)) {
  879. spin_unlock_irqrestore(&shca_list_lock, flags);
  880. return NOTIFY_OK;
  881. } else {
  882. spin_unlock_irqrestore(&shca_list_lock, flags);
  883. if (printk_timed_ratelimit(&ehca_dmem_warn_time,
  884. 30 * 1000))
  885. ehca_gen_err("DMEM operations are not allowed"
  886. "in conjunction with eHCA");
  887. return NOTIFY_BAD;
  888. }
  889. }
  890. return NOTIFY_OK;
  891. }
  892. static struct notifier_block ehca_mem_nb = {
  893. .notifier_call = ehca_mem_notifier,
  894. };
  895. static int __init ehca_module_init(void)
  896. {
  897. int ret;
  898. printk(KERN_INFO "eHCA Infiniband Device Driver "
  899. "(Version " HCAD_VERSION ")\n");
  900. ret = ehca_create_comp_pool();
  901. if (ret) {
  902. ehca_gen_err("Cannot create comp pool.");
  903. return ret;
  904. }
  905. ret = ehca_create_slab_caches();
  906. if (ret) {
  907. ehca_gen_err("Cannot create SLAB caches");
  908. ret = -ENOMEM;
  909. goto module_init1;
  910. }
  911. ret = ehca_create_busmap();
  912. if (ret) {
  913. ehca_gen_err("Cannot create busmap.");
  914. goto module_init2;
  915. }
  916. ret = ibmebus_register_driver(&ehca_driver);
  917. if (ret) {
  918. ehca_gen_err("Cannot register eHCA device driver");
  919. ret = -EINVAL;
  920. goto module_init3;
  921. }
  922. ret = register_memory_notifier(&ehca_mem_nb);
  923. if (ret) {
  924. ehca_gen_err("Failed registering memory add/remove notifier");
  925. goto module_init4;
  926. }
  927. if (ehca_poll_all_eqs != 1) {
  928. ehca_gen_err("WARNING!!!");
  929. ehca_gen_err("It is possible to lose interrupts.");
  930. } else {
  931. init_timer(&poll_eqs_timer);
  932. poll_eqs_timer.function = ehca_poll_eqs;
  933. poll_eqs_timer.expires = jiffies + HZ;
  934. add_timer(&poll_eqs_timer);
  935. }
  936. return 0;
  937. module_init4:
  938. ibmebus_unregister_driver(&ehca_driver);
  939. module_init3:
  940. ehca_destroy_busmap();
  941. module_init2:
  942. ehca_destroy_slab_caches();
  943. module_init1:
  944. ehca_destroy_comp_pool();
  945. return ret;
  946. };
  947. static void __exit ehca_module_exit(void)
  948. {
  949. if (ehca_poll_all_eqs == 1)
  950. del_timer_sync(&poll_eqs_timer);
  951. ibmebus_unregister_driver(&ehca_driver);
  952. unregister_memory_notifier(&ehca_mem_nb);
  953. ehca_destroy_busmap();
  954. ehca_destroy_slab_caches();
  955. ehca_destroy_comp_pool();
  956. idr_destroy(&ehca_cq_idr);
  957. idr_destroy(&ehca_qp_idr);
  958. };
  959. module_init(ehca_module_init);
  960. module_exit(ehca_module_exit);