ibmvmc.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * IBM Power Systems Virtual Management Channel Support.
  4. *
  5. * Copyright (c) 2004, 2018 IBM Corp.
  6. * Dave Engebretsen engebret@us.ibm.com
  7. * Steven Royer seroyer@linux.vnet.ibm.com
  8. * Adam Reznechek adreznec@linux.vnet.ibm.com
  9. * Bryant G. Ly <bryantly@linux.vnet.ibm.com>
  10. */
  11. #include <linux/module.h>
  12. #include <linux/kernel.h>
  13. #include <linux/kthread.h>
  14. #include <linux/major.h>
  15. #include <linux/string.h>
  16. #include <linux/fcntl.h>
  17. #include <linux/slab.h>
  18. #include <linux/poll.h>
  19. #include <linux/init.h>
  20. #include <linux/fs.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/percpu.h>
  24. #include <linux/delay.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/io.h>
  27. #include <linux/miscdevice.h>
  28. #include <linux/sched/signal.h>
  29. #include <asm/byteorder.h>
  30. #include <asm/irq.h>
  31. #include <asm/vio.h>
  32. #include "ibmvmc.h"
  33. #define IBMVMC_DRIVER_VERSION "1.0"
  34. /*
  35. * Static global variables
  36. */
  37. static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait);
  38. static const char ibmvmc_driver_name[] = "ibmvmc";
  39. static struct ibmvmc_struct ibmvmc;
  40. static struct ibmvmc_hmc hmcs[MAX_HMCS];
  41. static struct crq_server_adapter ibmvmc_adapter;
  42. static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE;
  43. static int ibmvmc_max_hmcs = DEFAULT_HMCS;
  44. static int ibmvmc_max_mtu = DEFAULT_MTU;
  45. static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba,
  46. u64 dliobn, u64 dlioba)
  47. {
  48. long rc = 0;
  49. /* Ensure all writes to source memory are visible before hcall */
  50. dma_wmb();
  51. pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
  52. length, sliobn, slioba, dliobn, dlioba);
  53. rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba,
  54. dliobn, dlioba);
  55. pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc);
  56. return rc;
  57. }
  58. static inline void h_free_crq(uint32_t unit_address)
  59. {
  60. long rc = 0;
  61. do {
  62. if (H_IS_LONG_BUSY(rc))
  63. msleep(get_longbusy_msecs(rc));
  64. rc = plpar_hcall_norets(H_FREE_CRQ, unit_address);
  65. } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
  66. }
  67. /**
  68. * h_request_vmc: - request a hypervisor virtual management channel device
  69. * @vmc_index: drc index of the vmc device created
  70. *
  71. * Requests the hypervisor create a new virtual management channel device,
  72. * allowing this partition to send hypervisor virtualization control
  73. * commands.
  74. *
  75. * Return:
  76. * 0 - Success
  77. * Non-zero - Failure
  78. */
  79. static inline long h_request_vmc(u32 *vmc_index)
  80. {
  81. long rc = 0;
  82. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  83. do {
  84. if (H_IS_LONG_BUSY(rc))
  85. msleep(get_longbusy_msecs(rc));
  86. /* Call to request the VMC device from phyp */
  87. rc = plpar_hcall(H_REQUEST_VMC, retbuf);
  88. pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc);
  89. *vmc_index = retbuf[0];
  90. } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
  91. return rc;
  92. }
  93. /* routines for managing a command/response queue */
  94. /**
  95. * ibmvmc_handle_event: - Interrupt handler for crq events
  96. * @irq: number of irq to handle, not used
  97. * @dev_instance: crq_server_adapter that received interrupt
  98. *
  99. * Disables interrupts and schedules ibmvmc_task
  100. *
  101. * Always returns IRQ_HANDLED
  102. */
  103. static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance)
  104. {
  105. struct crq_server_adapter *adapter =
  106. (struct crq_server_adapter *)dev_instance;
  107. vio_disable_interrupts(to_vio_dev(adapter->dev));
  108. tasklet_schedule(&adapter->work_task);
  109. return IRQ_HANDLED;
  110. }
  111. /**
  112. * ibmvmc_release_crq_queue - Release CRQ Queue
  113. *
  114. * @adapter: crq_server_adapter struct
  115. *
  116. * Return:
  117. * 0 - Success
  118. * Non-Zero - Failure
  119. */
  120. static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter)
  121. {
  122. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  123. struct crq_queue *queue = &adapter->queue;
  124. free_irq(vdev->irq, (void *)adapter);
  125. tasklet_kill(&adapter->work_task);
  126. if (adapter->reset_task)
  127. kthread_stop(adapter->reset_task);
  128. h_free_crq(vdev->unit_address);
  129. dma_unmap_single(adapter->dev,
  130. queue->msg_token,
  131. queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  132. free_page((unsigned long)queue->msgs);
  133. }
  134. /**
  135. * ibmvmc_reset_crq_queue - Reset CRQ Queue
  136. *
  137. * @adapter: crq_server_adapter struct
  138. *
  139. * This function calls h_free_crq and then calls H_REG_CRQ and does all the
  140. * bookkeeping to get us back to where we can communicate.
  141. *
  142. * Return:
  143. * 0 - Success
  144. * Non-Zero - Failure
  145. */
  146. static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter)
  147. {
  148. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  149. struct crq_queue *queue = &adapter->queue;
  150. int rc = 0;
  151. /* Close the CRQ */
  152. h_free_crq(vdev->unit_address);
  153. /* Clean out the queue */
  154. memset(queue->msgs, 0x00, PAGE_SIZE);
  155. queue->cur = 0;
  156. /* And re-open it again */
  157. rc = plpar_hcall_norets(H_REG_CRQ,
  158. vdev->unit_address,
  159. queue->msg_token, PAGE_SIZE);
  160. if (rc == 2)
  161. /* Adapter is good, but other end is not ready */
  162. dev_warn(adapter->dev, "Partner adapter not ready\n");
  163. else if (rc != 0)
  164. dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc);
  165. return rc;
  166. }
  167. /**
  168. * crq_queue_next_crq: - Returns the next entry in message queue
  169. * @queue: crq_queue to use
  170. *
  171. * Returns pointer to next entry in queue, or NULL if there are no new
  172. * entried in the CRQ.
  173. */
  174. static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue)
  175. {
  176. struct ibmvmc_crq_msg *crq;
  177. unsigned long flags;
  178. spin_lock_irqsave(&queue->lock, flags);
  179. crq = &queue->msgs[queue->cur];
  180. if (crq->valid & 0x80) {
  181. if (++queue->cur == queue->size)
  182. queue->cur = 0;
  183. /* Ensure the read of the valid bit occurs before reading any
  184. * other bits of the CRQ entry
  185. */
  186. dma_rmb();
  187. } else {
  188. crq = NULL;
  189. }
  190. spin_unlock_irqrestore(&queue->lock, flags);
  191. return crq;
  192. }
  193. /**
  194. * ibmvmc_send_crq - Send CRQ
  195. *
  196. * @adapter: crq_server_adapter struct
  197. * @word1: Word1 Data field
  198. * @word2: Word2 Data field
  199. *
  200. * Return:
  201. * 0 - Success
  202. * Non-Zero - Failure
  203. */
  204. static long ibmvmc_send_crq(struct crq_server_adapter *adapter,
  205. u64 word1, u64 word2)
  206. {
  207. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  208. long rc = 0;
  209. dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n",
  210. vdev->unit_address, word1, word2);
  211. /*
  212. * Ensure the command buffer is flushed to memory before handing it
  213. * over to the other side to prevent it from fetching any stale data.
  214. */
  215. dma_wmb();
  216. rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
  217. dev_dbg(adapter->dev, "rc = 0x%lx\n", rc);
  218. return rc;
  219. }
  220. /**
  221. * alloc_dma_buffer - Create DMA Buffer
  222. *
  223. * @vdev: vio_dev struct
  224. * @size: Size field
  225. * @dma_handle: DMA address field
  226. *
  227. * Allocates memory for the command queue and maps remote memory into an
  228. * ioba.
  229. *
  230. * Returns a pointer to the buffer
  231. */
  232. static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
  233. dma_addr_t *dma_handle)
  234. {
  235. /* allocate memory */
  236. void *buffer = kzalloc(size, GFP_ATOMIC);
  237. if (!buffer) {
  238. *dma_handle = 0;
  239. return NULL;
  240. }
  241. /* DMA map */
  242. *dma_handle = dma_map_single(&vdev->dev, buffer, size,
  243. DMA_BIDIRECTIONAL);
  244. if (dma_mapping_error(&vdev->dev, *dma_handle)) {
  245. *dma_handle = 0;
  246. kzfree(buffer);
  247. return NULL;
  248. }
  249. return buffer;
  250. }
  251. /**
  252. * free_dma_buffer - Free DMA Buffer
  253. *
  254. * @vdev: vio_dev struct
  255. * @size: Size field
  256. * @vaddr: Address field
  257. * @dma_handle: DMA address field
  258. *
  259. * Releases memory for a command queue and unmaps mapped remote memory.
  260. */
  261. static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr,
  262. dma_addr_t dma_handle)
  263. {
  264. /* DMA unmap */
  265. dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL);
  266. /* deallocate memory */
  267. kzfree(vaddr);
  268. }
  269. /**
  270. * ibmvmc_get_valid_hmc_buffer - Retrieve Valid HMC Buffer
  271. *
  272. * @hmc_index: HMC Index Field
  273. *
  274. * Return:
  275. * Pointer to ibmvmc_buffer
  276. */
  277. static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index)
  278. {
  279. struct ibmvmc_buffer *buffer;
  280. struct ibmvmc_buffer *ret_buf = NULL;
  281. unsigned long i;
  282. if (hmc_index > ibmvmc.max_hmc_index)
  283. return NULL;
  284. buffer = hmcs[hmc_index].buffer;
  285. for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
  286. if (buffer[i].valid && buffer[i].free &&
  287. buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
  288. buffer[i].free = 0;
  289. ret_buf = &buffer[i];
  290. break;
  291. }
  292. }
  293. return ret_buf;
  294. }
  295. /**
  296. * ibmvmc_get_free_hmc_buffer - Get Free HMC Buffer
  297. *
  298. * @adapter: crq_server_adapter struct
  299. * @hmc_index: Hmc Index field
  300. *
  301. * Return:
  302. * Pointer to ibmvmc_buffer
  303. */
  304. static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter,
  305. u8 hmc_index)
  306. {
  307. struct ibmvmc_buffer *buffer;
  308. struct ibmvmc_buffer *ret_buf = NULL;
  309. unsigned long i;
  310. if (hmc_index > ibmvmc.max_hmc_index) {
  311. dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n",
  312. hmc_index);
  313. return NULL;
  314. }
  315. buffer = hmcs[hmc_index].buffer;
  316. for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
  317. if (buffer[i].free &&
  318. buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
  319. buffer[i].free = 0;
  320. ret_buf = &buffer[i];
  321. break;
  322. }
  323. }
  324. return ret_buf;
  325. }
  326. /**
  327. * ibmvmc_free_hmc_buffer - Free an HMC Buffer
  328. *
  329. * @hmc: ibmvmc_hmc struct
  330. * @buffer: ibmvmc_buffer struct
  331. *
  332. */
  333. static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc,
  334. struct ibmvmc_buffer *buffer)
  335. {
  336. unsigned long flags;
  337. spin_lock_irqsave(&hmc->lock, flags);
  338. buffer->free = 1;
  339. spin_unlock_irqrestore(&hmc->lock, flags);
  340. }
  341. /**
  342. * ibmvmc_count_hmc_buffers - Count HMC Buffers
  343. *
  344. * @hmc_index: HMC Index field
  345. * @valid: Valid number of buffers field
  346. * @free: Free number of buffers field
  347. *
  348. */
  349. static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid,
  350. unsigned int *free)
  351. {
  352. struct ibmvmc_buffer *buffer;
  353. unsigned long i;
  354. unsigned long flags;
  355. if (hmc_index > ibmvmc.max_hmc_index)
  356. return;
  357. if (!valid || !free)
  358. return;
  359. *valid = 0; *free = 0;
  360. buffer = hmcs[hmc_index].buffer;
  361. spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
  362. for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
  363. if (buffer[i].valid) {
  364. *valid = *valid + 1;
  365. if (buffer[i].free)
  366. *free = *free + 1;
  367. }
  368. }
  369. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  370. }
  371. /**
  372. * ibmvmc_get_free_hmc - Get Free HMC
  373. *
  374. * Return:
  375. * Pointer to an available HMC Connection
  376. * Null otherwise
  377. */
  378. static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void)
  379. {
  380. unsigned long i;
  381. unsigned long flags;
  382. /*
  383. * Find an available HMC connection.
  384. */
  385. for (i = 0; i <= ibmvmc.max_hmc_index; i++) {
  386. spin_lock_irqsave(&hmcs[i].lock, flags);
  387. if (hmcs[i].state == ibmhmc_state_free) {
  388. hmcs[i].index = i;
  389. hmcs[i].state = ibmhmc_state_initial;
  390. spin_unlock_irqrestore(&hmcs[i].lock, flags);
  391. return &hmcs[i];
  392. }
  393. spin_unlock_irqrestore(&hmcs[i].lock, flags);
  394. }
  395. return NULL;
  396. }
  397. /**
  398. * ibmvmc_return_hmc - Return an HMC Connection
  399. *
  400. * @hmc: ibmvmc_hmc struct
  401. * @release_readers: Number of readers connected to session
  402. *
  403. * This function releases the HMC connections back into the pool.
  404. *
  405. * Return:
  406. * 0 - Success
  407. * Non-zero - Failure
  408. */
  409. static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers)
  410. {
  411. struct ibmvmc_buffer *buffer;
  412. struct crq_server_adapter *adapter;
  413. struct vio_dev *vdev;
  414. unsigned long i;
  415. unsigned long flags;
  416. if (!hmc || !hmc->adapter)
  417. return -EIO;
  418. if (release_readers) {
  419. if (hmc->file_session) {
  420. struct ibmvmc_file_session *session = hmc->file_session;
  421. session->valid = 0;
  422. wake_up_interruptible(&ibmvmc_read_wait);
  423. }
  424. }
  425. adapter = hmc->adapter;
  426. vdev = to_vio_dev(adapter->dev);
  427. spin_lock_irqsave(&hmc->lock, flags);
  428. hmc->index = 0;
  429. hmc->state = ibmhmc_state_free;
  430. hmc->queue_head = 0;
  431. hmc->queue_tail = 0;
  432. buffer = hmc->buffer;
  433. for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
  434. if (buffer[i].valid) {
  435. free_dma_buffer(vdev,
  436. ibmvmc.max_mtu,
  437. buffer[i].real_addr_local,
  438. buffer[i].dma_addr_local);
  439. dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i);
  440. }
  441. memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer));
  442. hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID;
  443. }
  444. spin_unlock_irqrestore(&hmc->lock, flags);
  445. return 0;
  446. }
  447. /**
  448. * ibmvmc_send_open - Interface Open
  449. * @buffer: Pointer to ibmvmc_buffer struct
  450. * @hmc: Pointer to ibmvmc_hmc struct
  451. *
  452. * This command is sent by the management partition as the result of a
  453. * management partition device request. It causes the hypervisor to
  454. * prepare a set of data buffers for the management application connection
  455. * indicated HMC idx. A unique HMC Idx would be used if multiple management
  456. * applications running concurrently were desired. Before responding to this
  457. * command, the hypervisor must provide the management partition with at
  458. * least one of these new buffers via the Add Buffer. This indicates whether
  459. * the messages are inbound or outbound from the hypervisor.
  460. *
  461. * Return:
  462. * 0 - Success
  463. * Non-zero - Failure
  464. */
  465. static int ibmvmc_send_open(struct ibmvmc_buffer *buffer,
  466. struct ibmvmc_hmc *hmc)
  467. {
  468. struct ibmvmc_crq_msg crq_msg;
  469. struct crq_server_adapter *adapter;
  470. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  471. int rc = 0;
  472. if (!hmc || !hmc->adapter)
  473. return -EIO;
  474. adapter = hmc->adapter;
  475. dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
  476. (unsigned long)buffer->size, (unsigned long)adapter->liobn,
  477. (unsigned long)buffer->dma_addr_local,
  478. (unsigned long)adapter->riobn,
  479. (unsigned long)buffer->dma_addr_remote);
  480. rc = h_copy_rdma(buffer->size,
  481. adapter->liobn,
  482. buffer->dma_addr_local,
  483. adapter->riobn,
  484. buffer->dma_addr_remote);
  485. if (rc) {
  486. dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n",
  487. rc);
  488. return -EIO;
  489. }
  490. hmc->state = ibmhmc_state_opening;
  491. crq_msg.valid = 0x80;
  492. crq_msg.type = VMC_MSG_OPEN;
  493. crq_msg.status = 0;
  494. crq_msg.var1.rsvd = 0;
  495. crq_msg.hmc_session = hmc->session;
  496. crq_msg.hmc_index = hmc->index;
  497. crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
  498. crq_msg.rsvd = 0;
  499. crq_msg.var3.rsvd = 0;
  500. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  501. be64_to_cpu(crq_as_u64[1]));
  502. return rc;
  503. }
  504. /**
  505. * ibmvmc_send_close - Interface Close
  506. * @hmc: Pointer to ibmvmc_hmc struct
  507. *
  508. * This command is sent by the management partition to terminate a
  509. * management application to hypervisor connection. When this command is
  510. * sent, the management partition has quiesced all I/O operations to all
  511. * buffers associated with this management application connection, and
  512. * has freed any storage for these buffers.
  513. *
  514. * Return:
  515. * 0 - Success
  516. * Non-zero - Failure
  517. */
  518. static int ibmvmc_send_close(struct ibmvmc_hmc *hmc)
  519. {
  520. struct ibmvmc_crq_msg crq_msg;
  521. struct crq_server_adapter *adapter;
  522. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  523. int rc = 0;
  524. if (!hmc || !hmc->adapter)
  525. return -EIO;
  526. adapter = hmc->adapter;
  527. dev_info(adapter->dev, "CRQ send: close\n");
  528. crq_msg.valid = 0x80;
  529. crq_msg.type = VMC_MSG_CLOSE;
  530. crq_msg.status = 0;
  531. crq_msg.var1.rsvd = 0;
  532. crq_msg.hmc_session = hmc->session;
  533. crq_msg.hmc_index = hmc->index;
  534. crq_msg.var2.rsvd = 0;
  535. crq_msg.rsvd = 0;
  536. crq_msg.var3.rsvd = 0;
  537. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  538. be64_to_cpu(crq_as_u64[1]));
  539. return rc;
  540. }
  541. /**
  542. * ibmvmc_send_capabilities - Send VMC Capabilities
  543. *
  544. * @adapter: crq_server_adapter struct
  545. *
  546. * The capabilities message is an administrative message sent after the CRQ
  547. * initialization sequence of messages and is used to exchange VMC capabilities
  548. * between the management partition and the hypervisor. The management
  549. * partition must send this message and the hypervisor must respond with VMC
  550. * capabilities Response message before HMC interface message can begin. Any
  551. * HMC interface messages received before the exchange of capabilities has
  552. * complete are dropped.
  553. *
  554. * Return:
  555. * 0 - Success
  556. */
  557. static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter)
  558. {
  559. struct ibmvmc_admin_crq_msg crq_msg;
  560. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  561. dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n");
  562. crq_msg.valid = 0x80;
  563. crq_msg.type = VMC_MSG_CAP;
  564. crq_msg.status = 0;
  565. crq_msg.rsvd[0] = 0;
  566. crq_msg.rsvd[1] = 0;
  567. crq_msg.max_hmc = ibmvmc_max_hmcs;
  568. crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu);
  569. crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size);
  570. crq_msg.crq_size = cpu_to_be16(adapter->queue.size);
  571. crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION);
  572. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  573. be64_to_cpu(crq_as_u64[1]));
  574. ibmvmc.state = ibmvmc_state_capabilities;
  575. return 0;
  576. }
  577. /**
  578. * ibmvmc_send_add_buffer_resp - Add Buffer Response
  579. *
  580. * @adapter: crq_server_adapter struct
  581. * @status: Status field
  582. * @hmc_session: HMC Session field
  583. * @hmc_index: HMC Index field
  584. * @buffer_id: Buffer Id field
  585. *
  586. * This command is sent by the management partition to the hypervisor in
  587. * response to the Add Buffer message. The Status field indicates the result of
  588. * the command.
  589. *
  590. * Return:
  591. * 0 - Success
  592. */
  593. static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter,
  594. u8 status, u8 hmc_session,
  595. u8 hmc_index, u16 buffer_id)
  596. {
  597. struct ibmvmc_crq_msg crq_msg;
  598. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  599. dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n");
  600. crq_msg.valid = 0x80;
  601. crq_msg.type = VMC_MSG_ADD_BUF_RESP;
  602. crq_msg.status = status;
  603. crq_msg.var1.rsvd = 0;
  604. crq_msg.hmc_session = hmc_session;
  605. crq_msg.hmc_index = hmc_index;
  606. crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
  607. crq_msg.rsvd = 0;
  608. crq_msg.var3.rsvd = 0;
  609. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  610. be64_to_cpu(crq_as_u64[1]));
  611. return 0;
  612. }
  613. /**
  614. * ibmvmc_send_rem_buffer_resp - Remove Buffer Response
  615. *
  616. * @adapter: crq_server_adapter struct
  617. * @status: Status field
  618. * @hmc_session: HMC Session field
  619. * @hmc_index: HMC Index field
  620. * @buffer_id: Buffer Id field
  621. *
  622. * This command is sent by the management partition to the hypervisor in
  623. * response to the Remove Buffer message. The Buffer ID field indicates
  624. * which buffer the management partition selected to remove. The Status
  625. * field indicates the result of the command.
  626. *
  627. * Return:
  628. * 0 - Success
  629. */
  630. static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
  631. u8 status, u8 hmc_session,
  632. u8 hmc_index, u16 buffer_id)
  633. {
  634. struct ibmvmc_crq_msg crq_msg;
  635. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  636. dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n");
  637. crq_msg.valid = 0x80;
  638. crq_msg.type = VMC_MSG_REM_BUF_RESP;
  639. crq_msg.status = status;
  640. crq_msg.var1.rsvd = 0;
  641. crq_msg.hmc_session = hmc_session;
  642. crq_msg.hmc_index = hmc_index;
  643. crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
  644. crq_msg.rsvd = 0;
  645. crq_msg.var3.rsvd = 0;
  646. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  647. be64_to_cpu(crq_as_u64[1]));
  648. return 0;
  649. }
  650. /**
  651. * ibmvmc_send_msg - Signal Message
  652. *
  653. * @adapter: crq_server_adapter struct
  654. * @buffer: ibmvmc_buffer struct
  655. * @hmc: ibmvmc_hmc struct
  656. * @msg_length: message length field
  657. *
  658. * This command is sent between the management partition and the hypervisor
  659. * in order to signal the arrival of an HMC protocol message. The command
  660. * can be sent by both the management partition and the hypervisor. It is
  661. * used for all traffic between the management application and the hypervisor,
  662. * regardless of who initiated the communication.
  663. *
  664. * There is no response to this message.
  665. *
  666. * Return:
  667. * 0 - Success
  668. * Non-zero - Failure
  669. */
  670. static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
  671. struct ibmvmc_buffer *buffer,
  672. struct ibmvmc_hmc *hmc, int msg_len)
  673. {
  674. struct ibmvmc_crq_msg crq_msg;
  675. __be64 *crq_as_u64 = (__be64 *)&crq_msg;
  676. int rc = 0;
  677. dev_dbg(adapter->dev, "CRQ send: rdma to HV\n");
  678. rc = h_copy_rdma(msg_len,
  679. adapter->liobn,
  680. buffer->dma_addr_local,
  681. adapter->riobn,
  682. buffer->dma_addr_remote);
  683. if (rc) {
  684. dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n",
  685. rc);
  686. return rc;
  687. }
  688. crq_msg.valid = 0x80;
  689. crq_msg.type = VMC_MSG_SIGNAL;
  690. crq_msg.status = 0;
  691. crq_msg.var1.rsvd = 0;
  692. crq_msg.hmc_session = hmc->session;
  693. crq_msg.hmc_index = hmc->index;
  694. crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
  695. crq_msg.var3.msg_len = cpu_to_be32(msg_len);
  696. dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n",
  697. be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1]));
  698. buffer->owner = VMC_BUF_OWNER_HV;
  699. ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
  700. be64_to_cpu(crq_as_u64[1]));
  701. return rc;
  702. }
  703. /**
  704. * ibmvmc_open - Open Session
  705. *
  706. * @inode: inode struct
  707. * @file: file struct
  708. *
  709. * Return:
  710. * 0 - Success
  711. * Non-zero - Failure
  712. */
  713. static int ibmvmc_open(struct inode *inode, struct file *file)
  714. {
  715. struct ibmvmc_file_session *session;
  716. pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
  717. (unsigned long)inode, (unsigned long)file,
  718. ibmvmc.state);
  719. session = kzalloc(sizeof(*session), GFP_KERNEL);
  720. if (!session)
  721. return -ENOMEM;
  722. session->file = file;
  723. file->private_data = session;
  724. return 0;
  725. }
  726. /**
  727. * ibmvmc_close - Close Session
  728. *
  729. * @inode: inode struct
  730. * @file: file struct
  731. *
  732. * Return:
  733. * 0 - Success
  734. * Non-zero - Failure
  735. */
  736. static int ibmvmc_close(struct inode *inode, struct file *file)
  737. {
  738. struct ibmvmc_file_session *session;
  739. struct ibmvmc_hmc *hmc;
  740. int rc = 0;
  741. unsigned long flags;
  742. pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__,
  743. (unsigned long)file, ibmvmc.state);
  744. session = file->private_data;
  745. if (!session)
  746. return -EIO;
  747. hmc = session->hmc;
  748. if (hmc) {
  749. if (!hmc->adapter)
  750. return -EIO;
  751. if (ibmvmc.state == ibmvmc_state_failed) {
  752. dev_warn(hmc->adapter->dev, "close: state_failed\n");
  753. return -EIO;
  754. }
  755. spin_lock_irqsave(&hmc->lock, flags);
  756. if (hmc->state >= ibmhmc_state_opening) {
  757. rc = ibmvmc_send_close(hmc);
  758. if (rc)
  759. dev_warn(hmc->adapter->dev, "close: send_close failed.\n");
  760. }
  761. spin_unlock_irqrestore(&hmc->lock, flags);
  762. }
  763. kzfree(session);
  764. return rc;
  765. }
  766. /**
  767. * ibmvmc_read - Read
  768. *
  769. * @file: file struct
  770. * @buf: Character buffer
  771. * @nbytes: Size in bytes
  772. * @ppos: Offset
  773. *
  774. * Return:
  775. * 0 - Success
  776. * Non-zero - Failure
  777. */
  778. static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes,
  779. loff_t *ppos)
  780. {
  781. struct ibmvmc_file_session *session;
  782. struct ibmvmc_hmc *hmc;
  783. struct crq_server_adapter *adapter;
  784. struct ibmvmc_buffer *buffer;
  785. ssize_t n;
  786. ssize_t retval = 0;
  787. unsigned long flags;
  788. DEFINE_WAIT(wait);
  789. pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n",
  790. (unsigned long)file, (unsigned long)buf,
  791. (unsigned long)nbytes);
  792. if (nbytes == 0)
  793. return 0;
  794. if (nbytes > ibmvmc.max_mtu) {
  795. pr_warn("ibmvmc: read: nbytes invalid 0x%x\n",
  796. (unsigned int)nbytes);
  797. return -EINVAL;
  798. }
  799. session = file->private_data;
  800. if (!session) {
  801. pr_warn("ibmvmc: read: no session\n");
  802. return -EIO;
  803. }
  804. hmc = session->hmc;
  805. if (!hmc) {
  806. pr_warn("ibmvmc: read: no hmc\n");
  807. return -EIO;
  808. }
  809. adapter = hmc->adapter;
  810. if (!adapter) {
  811. pr_warn("ibmvmc: read: no adapter\n");
  812. return -EIO;
  813. }
  814. do {
  815. prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE);
  816. spin_lock_irqsave(&hmc->lock, flags);
  817. if (hmc->queue_tail != hmc->queue_head)
  818. /* Data is available */
  819. break;
  820. spin_unlock_irqrestore(&hmc->lock, flags);
  821. if (!session->valid) {
  822. retval = -EBADFD;
  823. goto out;
  824. }
  825. if (file->f_flags & O_NONBLOCK) {
  826. retval = -EAGAIN;
  827. goto out;
  828. }
  829. schedule();
  830. if (signal_pending(current)) {
  831. retval = -ERESTARTSYS;
  832. goto out;
  833. }
  834. } while (1);
  835. buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]);
  836. hmc->queue_tail++;
  837. if (hmc->queue_tail == ibmvmc_max_buf_pool_size)
  838. hmc->queue_tail = 0;
  839. spin_unlock_irqrestore(&hmc->lock, flags);
  840. nbytes = min_t(size_t, nbytes, buffer->msg_len);
  841. n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes);
  842. dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes);
  843. ibmvmc_free_hmc_buffer(hmc, buffer);
  844. retval = nbytes;
  845. if (n) {
  846. dev_warn(adapter->dev, "read: copy to user failed.\n");
  847. retval = -EFAULT;
  848. }
  849. out:
  850. finish_wait(&ibmvmc_read_wait, &wait);
  851. dev_dbg(adapter->dev, "read: out %ld\n", retval);
  852. return retval;
  853. }
  854. /**
  855. * ibmvmc_poll - Poll
  856. *
  857. * @file: file struct
  858. * @wait: Poll Table
  859. *
  860. * Return:
  861. * poll.h return values
  862. */
  863. static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
  864. {
  865. struct ibmvmc_file_session *session;
  866. struct ibmvmc_hmc *hmc;
  867. unsigned int mask = 0;
  868. session = file->private_data;
  869. if (!session)
  870. return 0;
  871. hmc = session->hmc;
  872. if (!hmc)
  873. return 0;
  874. poll_wait(file, &ibmvmc_read_wait, wait);
  875. if (hmc->queue_head != hmc->queue_tail)
  876. mask |= POLLIN | POLLRDNORM;
  877. return mask;
  878. }
  879. /**
  880. * ibmvmc_write - Write
  881. *
  882. * @file: file struct
  883. * @buf: Character buffer
  884. * @count: Count field
  885. * @ppos: Offset
  886. *
  887. * Return:
  888. * 0 - Success
  889. * Non-zero - Failure
  890. */
  891. static ssize_t ibmvmc_write(struct file *file, const char *buffer,
  892. size_t count, loff_t *ppos)
  893. {
  894. struct ibmvmc_buffer *vmc_buffer;
  895. struct ibmvmc_file_session *session;
  896. struct crq_server_adapter *adapter;
  897. struct ibmvmc_hmc *hmc;
  898. unsigned char *buf;
  899. unsigned long flags;
  900. size_t bytes;
  901. const char *p = buffer;
  902. size_t c = count;
  903. int ret = 0;
  904. session = file->private_data;
  905. if (!session)
  906. return -EIO;
  907. hmc = session->hmc;
  908. if (!hmc)
  909. return -EIO;
  910. spin_lock_irqsave(&hmc->lock, flags);
  911. if (hmc->state == ibmhmc_state_free) {
  912. /* HMC connection is not valid (possibly was reset under us). */
  913. ret = -EIO;
  914. goto out;
  915. }
  916. adapter = hmc->adapter;
  917. if (!adapter) {
  918. ret = -EIO;
  919. goto out;
  920. }
  921. if (count > ibmvmc.max_mtu) {
  922. dev_warn(adapter->dev, "invalid buffer size 0x%lx\n",
  923. (unsigned long)count);
  924. ret = -EIO;
  925. goto out;
  926. }
  927. /* Waiting for the open resp message to the ioctl(1) - retry */
  928. if (hmc->state == ibmhmc_state_opening) {
  929. ret = -EBUSY;
  930. goto out;
  931. }
  932. /* Make sure the ioctl() was called & the open msg sent, and that
  933. * the HMC connection has not failed.
  934. */
  935. if (hmc->state != ibmhmc_state_ready) {
  936. ret = -EIO;
  937. goto out;
  938. }
  939. vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
  940. if (!vmc_buffer) {
  941. /* No buffer available for the msg send, or we have not yet
  942. * completed the open/open_resp sequence. Retry until this is
  943. * complete.
  944. */
  945. ret = -EBUSY;
  946. goto out;
  947. }
  948. if (!vmc_buffer->real_addr_local) {
  949. dev_err(adapter->dev, "no buffer storage assigned\n");
  950. ret = -EIO;
  951. goto out;
  952. }
  953. buf = vmc_buffer->real_addr_local;
  954. while (c > 0) {
  955. bytes = min_t(size_t, c, vmc_buffer->size);
  956. bytes -= copy_from_user(buf, p, bytes);
  957. if (!bytes) {
  958. ret = -EFAULT;
  959. goto out;
  960. }
  961. c -= bytes;
  962. p += bytes;
  963. }
  964. if (p == buffer)
  965. goto out;
  966. file->f_path.dentry->d_inode->i_mtime = current_time(file_inode(file));
  967. mark_inode_dirty(file->f_path.dentry->d_inode);
  968. dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
  969. (unsigned long)file, (unsigned long)count);
  970. ibmvmc_send_msg(adapter, vmc_buffer, hmc, count);
  971. ret = p - buffer;
  972. out:
  973. spin_unlock_irqrestore(&hmc->lock, flags);
  974. return (ssize_t)(ret);
  975. }
  976. /**
  977. * ibmvmc_setup_hmc - Setup the HMC
  978. *
  979. * @session: ibmvmc_file_session struct
  980. *
  981. * Return:
  982. * 0 - Success
  983. * Non-zero - Failure
  984. */
  985. static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session)
  986. {
  987. struct ibmvmc_hmc *hmc;
  988. unsigned int valid, free, index;
  989. if (ibmvmc.state == ibmvmc_state_failed) {
  990. pr_warn("ibmvmc: Reserve HMC: state_failed\n");
  991. return -EIO;
  992. }
  993. if (ibmvmc.state < ibmvmc_state_ready) {
  994. pr_warn("ibmvmc: Reserve HMC: not state_ready\n");
  995. return -EAGAIN;
  996. }
  997. /* Device is busy until capabilities have been exchanged and we
  998. * have a generic buffer for each possible HMC connection.
  999. */
  1000. for (index = 0; index <= ibmvmc.max_hmc_index; index++) {
  1001. valid = 0;
  1002. ibmvmc_count_hmc_buffers(index, &valid, &free);
  1003. if (valid == 0) {
  1004. pr_warn("ibmvmc: buffers not ready for index %d\n",
  1005. index);
  1006. return -ENOBUFS;
  1007. }
  1008. }
  1009. /* Get an hmc object, and transition to ibmhmc_state_initial */
  1010. hmc = ibmvmc_get_free_hmc();
  1011. if (!hmc) {
  1012. pr_warn("%s: free hmc not found\n", __func__);
  1013. return -EBUSY;
  1014. }
  1015. hmc->session = hmc->session + 1;
  1016. if (hmc->session == 0xff)
  1017. hmc->session = 1;
  1018. session->hmc = hmc;
  1019. hmc->adapter = &ibmvmc_adapter;
  1020. hmc->file_session = session;
  1021. session->valid = 1;
  1022. return 0;
  1023. }
  1024. /**
  1025. * ibmvmc_ioctl_sethmcid - IOCTL Set HMC ID
  1026. *
  1027. * @session: ibmvmc_file_session struct
  1028. * @new_hmc_id: HMC id field
  1029. *
  1030. * IOCTL command to setup the hmc id
  1031. *
  1032. * Return:
  1033. * 0 - Success
  1034. * Non-zero - Failure
  1035. */
  1036. static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session,
  1037. unsigned char __user *new_hmc_id)
  1038. {
  1039. struct ibmvmc_hmc *hmc;
  1040. struct ibmvmc_buffer *buffer;
  1041. size_t bytes;
  1042. char print_buffer[HMC_ID_LEN + 1];
  1043. unsigned long flags;
  1044. long rc = 0;
  1045. /* Reserve HMC session */
  1046. hmc = session->hmc;
  1047. if (!hmc) {
  1048. rc = ibmvmc_setup_hmc(session);
  1049. if (rc)
  1050. return rc;
  1051. hmc = session->hmc;
  1052. if (!hmc) {
  1053. pr_err("ibmvmc: setup_hmc success but no hmc\n");
  1054. return -EIO;
  1055. }
  1056. }
  1057. if (hmc->state != ibmhmc_state_initial) {
  1058. pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n",
  1059. hmc->state);
  1060. return -EIO;
  1061. }
  1062. bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN);
  1063. if (bytes)
  1064. return -EFAULT;
  1065. /* Send Open Session command */
  1066. spin_lock_irqsave(&hmc->lock, flags);
  1067. buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
  1068. spin_unlock_irqrestore(&hmc->lock, flags);
  1069. if (!buffer || !buffer->real_addr_local) {
  1070. pr_warn("ibmvmc: sethmcid: no buffer available\n");
  1071. return -EIO;
  1072. }
  1073. /* Make sure buffer is NULL terminated before trying to print it */
  1074. memset(print_buffer, 0, HMC_ID_LEN + 1);
  1075. strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN);
  1076. pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer);
  1077. memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN);
  1078. /* RDMA over ID, send open msg, change state to ibmhmc_state_opening */
  1079. rc = ibmvmc_send_open(buffer, hmc);
  1080. return rc;
  1081. }
  1082. /**
  1083. * ibmvmc_ioctl_query - IOCTL Query
  1084. *
  1085. * @session: ibmvmc_file_session struct
  1086. * @ret_struct: ibmvmc_query_struct
  1087. *
  1088. * Return:
  1089. * 0 - Success
  1090. * Non-zero - Failure
  1091. */
  1092. static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session,
  1093. struct ibmvmc_query_struct __user *ret_struct)
  1094. {
  1095. struct ibmvmc_query_struct query_struct;
  1096. size_t bytes;
  1097. memset(&query_struct, 0, sizeof(query_struct));
  1098. query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial);
  1099. query_struct.state = ibmvmc.state;
  1100. query_struct.vmc_drc_index = ibmvmc.vmc_drc_index;
  1101. bytes = copy_to_user(ret_struct, &query_struct,
  1102. sizeof(query_struct));
  1103. if (bytes)
  1104. return -EFAULT;
  1105. return 0;
  1106. }
  1107. /**
  1108. * ibmvmc_ioctl_requestvmc - IOCTL Request VMC
  1109. *
  1110. * @session: ibmvmc_file_session struct
  1111. * @ret_vmc_index: VMC Index
  1112. *
  1113. * Return:
  1114. * 0 - Success
  1115. * Non-zero - Failure
  1116. */
  1117. static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
  1118. u32 __user *ret_vmc_index)
  1119. {
  1120. /* TODO: (adreznec) Add locking to control multiple process access */
  1121. size_t bytes;
  1122. long rc;
  1123. u32 vmc_drc_index;
  1124. /* Call to request the VMC device from phyp*/
  1125. rc = h_request_vmc(&vmc_drc_index);
  1126. pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc);
  1127. if (rc == H_SUCCESS) {
  1128. rc = 0;
  1129. } else if (rc == H_FUNCTION) {
  1130. pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n");
  1131. return -EPERM;
  1132. } else if (rc == H_AUTHORITY) {
  1133. pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n");
  1134. return -EPERM;
  1135. } else if (rc == H_HARDWARE) {
  1136. pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n");
  1137. return -EIO;
  1138. } else if (rc == H_RESOURCE) {
  1139. pr_err("ibmvmc: requestvmc: vmc resource unavailable\n");
  1140. return -ENODEV;
  1141. } else if (rc == H_NOT_AVAILABLE) {
  1142. pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n");
  1143. return -EPERM;
  1144. } else if (rc == H_PARAMETER) {
  1145. pr_err("ibmvmc: requestvmc: invalid parameter\n");
  1146. return -EINVAL;
  1147. }
  1148. /* Success, set the vmc index in global struct */
  1149. ibmvmc.vmc_drc_index = vmc_drc_index;
  1150. bytes = copy_to_user(ret_vmc_index, &vmc_drc_index,
  1151. sizeof(*ret_vmc_index));
  1152. if (bytes) {
  1153. pr_warn("ibmvmc: requestvmc: copy to user failed.\n");
  1154. return -EFAULT;
  1155. }
  1156. return rc;
  1157. }
  1158. /**
  1159. * ibmvmc_ioctl - IOCTL
  1160. *
  1161. * @session: ibmvmc_file_session struct
  1162. * @cmd: cmd field
  1163. * @arg: Argument field
  1164. *
  1165. * Return:
  1166. * 0 - Success
  1167. * Non-zero - Failure
  1168. */
  1169. static long ibmvmc_ioctl(struct file *file,
  1170. unsigned int cmd, unsigned long arg)
  1171. {
  1172. struct ibmvmc_file_session *session = file->private_data;
  1173. pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n",
  1174. (unsigned long)file, cmd, arg,
  1175. (unsigned long)session);
  1176. if (!session) {
  1177. pr_warn("ibmvmc: ioctl: no session\n");
  1178. return -EIO;
  1179. }
  1180. switch (cmd) {
  1181. case VMC_IOCTL_SETHMCID:
  1182. return ibmvmc_ioctl_sethmcid(session,
  1183. (unsigned char __user *)arg);
  1184. case VMC_IOCTL_QUERY:
  1185. return ibmvmc_ioctl_query(session,
  1186. (struct ibmvmc_query_struct __user *)arg);
  1187. case VMC_IOCTL_REQUESTVMC:
  1188. return ibmvmc_ioctl_requestvmc(session,
  1189. (unsigned int __user *)arg);
  1190. default:
  1191. pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd);
  1192. return -EINVAL;
  1193. }
  1194. }
  1195. static const struct file_operations ibmvmc_fops = {
  1196. .owner = THIS_MODULE,
  1197. .read = ibmvmc_read,
  1198. .write = ibmvmc_write,
  1199. .poll = ibmvmc_poll,
  1200. .unlocked_ioctl = ibmvmc_ioctl,
  1201. .open = ibmvmc_open,
  1202. .release = ibmvmc_close,
  1203. };
  1204. /**
  1205. * ibmvmc_add_buffer - Add Buffer
  1206. *
  1207. * @adapter: crq_server_adapter struct
  1208. * @crq: ibmvmc_crq_msg struct
  1209. *
  1210. * This message transfers a buffer from hypervisor ownership to management
  1211. * partition ownership. The LIOBA is obtained from the virtual TCE table
  1212. * associated with the hypervisor side of the VMC device, and points to a
  1213. * buffer of size MTU (as established in the capabilities exchange).
  1214. *
  1215. * Typical flow for ading buffers:
  1216. * 1. A new management application connection is opened by the management
  1217. * partition.
  1218. * 2. The hypervisor assigns new buffers for the traffic associated with
  1219. * that connection.
  1220. * 3. The hypervisor sends VMC Add Buffer messages to the management
  1221. * partition, informing it of the new buffers.
  1222. * 4. The hypervisor sends an HMC protocol message (to the management
  1223. * application) notifying it of the new buffers. This informs the
  1224. * application that it has buffers available for sending HMC
  1225. * commands.
  1226. *
  1227. * Return:
  1228. * 0 - Success
  1229. * Non-zero - Failure
  1230. */
  1231. static int ibmvmc_add_buffer(struct crq_server_adapter *adapter,
  1232. struct ibmvmc_crq_msg *crq)
  1233. {
  1234. struct ibmvmc_buffer *buffer;
  1235. u8 hmc_index;
  1236. u8 hmc_session;
  1237. u16 buffer_id;
  1238. unsigned long flags;
  1239. int rc = 0;
  1240. if (!crq)
  1241. return -1;
  1242. hmc_session = crq->hmc_session;
  1243. hmc_index = crq->hmc_index;
  1244. buffer_id = be16_to_cpu(crq->var2.buffer_id);
  1245. if (hmc_index > ibmvmc.max_hmc_index) {
  1246. dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n",
  1247. hmc_index);
  1248. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
  1249. hmc_session, hmc_index, buffer_id);
  1250. return -1;
  1251. }
  1252. if (buffer_id >= ibmvmc.max_buffer_pool_size) {
  1253. dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n",
  1254. buffer_id);
  1255. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
  1256. hmc_session, hmc_index, buffer_id);
  1257. return -1;
  1258. }
  1259. spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
  1260. buffer = &hmcs[hmc_index].buffer[buffer_id];
  1261. if (buffer->real_addr_local || buffer->dma_addr_local) {
  1262. dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n",
  1263. (unsigned long)buffer_id);
  1264. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1265. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
  1266. hmc_session, hmc_index, buffer_id);
  1267. return -1;
  1268. }
  1269. buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev),
  1270. ibmvmc.max_mtu,
  1271. &buffer->dma_addr_local);
  1272. if (!buffer->real_addr_local) {
  1273. dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n");
  1274. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1275. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE,
  1276. hmc_session, hmc_index, buffer_id);
  1277. return -1;
  1278. }
  1279. buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba);
  1280. buffer->size = ibmvmc.max_mtu;
  1281. buffer->owner = crq->var1.owner;
  1282. buffer->free = 1;
  1283. /* Must ensure valid==1 is observable only after all other fields are */
  1284. dma_wmb();
  1285. buffer->valid = 1;
  1286. buffer->id = buffer_id;
  1287. dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n");
  1288. dev_dbg(adapter->dev, " index: %d, session: %d, buffer: 0x%x, owner: %d\n",
  1289. hmc_index, hmc_session, buffer_id, buffer->owner);
  1290. dev_dbg(adapter->dev, " local: 0x%x, remote: 0x%x\n",
  1291. (u32)buffer->dma_addr_local,
  1292. (u32)buffer->dma_addr_remote);
  1293. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1294. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
  1295. hmc_index, buffer_id);
  1296. return rc;
  1297. }
  1298. /**
  1299. * ibmvmc_rem_buffer - Remove Buffer
  1300. *
  1301. * @adapter: crq_server_adapter struct
  1302. * @crq: ibmvmc_crq_msg struct
  1303. *
  1304. * This message requests an HMC buffer to be transferred from management
  1305. * partition ownership to hypervisor ownership. The management partition may
  1306. * not be able to satisfy the request at a particular point in time if all its
  1307. * buffers are in use. The management partition requires a depth of at least
  1308. * one inbound buffer to allow management application commands to flow to the
  1309. * hypervisor. It is, therefore, an interface error for the hypervisor to
  1310. * attempt to remove the management partition's last buffer.
  1311. *
  1312. * The hypervisor is expected to manage buffer usage with the management
  1313. * application directly and inform the management partition when buffers may be
  1314. * removed. The typical flow for removing buffers:
  1315. *
  1316. * 1. The management application no longer needs a communication path to a
  1317. * particular hypervisor function. That function is closed.
  1318. * 2. The hypervisor and the management application quiesce all traffic to that
  1319. * function. The hypervisor requests a reduction in buffer pool size.
  1320. * 3. The management application acknowledges the reduction in buffer pool size.
  1321. * 4. The hypervisor sends a Remove Buffer message to the management partition,
  1322. * informing it of the reduction in buffers.
  1323. * 5. The management partition verifies it can remove the buffer. This is
  1324. * possible if buffers have been quiesced.
  1325. *
  1326. * Return:
  1327. * 0 - Success
  1328. * Non-zero - Failure
  1329. */
  1330. /*
  1331. * The hypervisor requested that we pick an unused buffer, and return it.
  1332. * Before sending the buffer back, we free any storage associated with the
  1333. * buffer.
  1334. */
  1335. static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter,
  1336. struct ibmvmc_crq_msg *crq)
  1337. {
  1338. struct ibmvmc_buffer *buffer;
  1339. u8 hmc_index;
  1340. u8 hmc_session;
  1341. u16 buffer_id = 0;
  1342. unsigned long flags;
  1343. int rc = 0;
  1344. if (!crq)
  1345. return -1;
  1346. hmc_session = crq->hmc_session;
  1347. hmc_index = crq->hmc_index;
  1348. if (hmc_index > ibmvmc.max_hmc_index) {
  1349. dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n",
  1350. hmc_index);
  1351. ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
  1352. hmc_session, hmc_index, buffer_id);
  1353. return -1;
  1354. }
  1355. spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
  1356. buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index);
  1357. if (!buffer) {
  1358. dev_info(adapter->dev, "rem_buffer: no buffer to remove\n");
  1359. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1360. ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER,
  1361. hmc_session, hmc_index,
  1362. VMC_INVALID_BUFFER_ID);
  1363. return -1;
  1364. }
  1365. buffer_id = buffer->id;
  1366. if (buffer->valid)
  1367. free_dma_buffer(to_vio_dev(adapter->dev),
  1368. ibmvmc.max_mtu,
  1369. buffer->real_addr_local,
  1370. buffer->dma_addr_local);
  1371. memset(buffer, 0, sizeof(struct ibmvmc_buffer));
  1372. spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
  1373. dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id);
  1374. ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
  1375. hmc_index, buffer_id);
  1376. return rc;
  1377. }
  1378. static int ibmvmc_recv_msg(struct crq_server_adapter *adapter,
  1379. struct ibmvmc_crq_msg *crq)
  1380. {
  1381. struct ibmvmc_buffer *buffer;
  1382. struct ibmvmc_hmc *hmc;
  1383. unsigned long msg_len;
  1384. u8 hmc_index;
  1385. u8 hmc_session;
  1386. u16 buffer_id;
  1387. unsigned long flags;
  1388. int rc = 0;
  1389. if (!crq)
  1390. return -1;
  1391. /* Hypervisor writes CRQs directly into our memory in big endian */
  1392. dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n",
  1393. be64_to_cpu(*((unsigned long *)crq)),
  1394. be64_to_cpu(*(((unsigned long *)crq) + 1)));
  1395. hmc_session = crq->hmc_session;
  1396. hmc_index = crq->hmc_index;
  1397. buffer_id = be16_to_cpu(crq->var2.buffer_id);
  1398. msg_len = be32_to_cpu(crq->var3.msg_len);
  1399. if (hmc_index > ibmvmc.max_hmc_index) {
  1400. dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n",
  1401. hmc_index);
  1402. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
  1403. hmc_session, hmc_index, buffer_id);
  1404. return -1;
  1405. }
  1406. if (buffer_id >= ibmvmc.max_buffer_pool_size) {
  1407. dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n",
  1408. buffer_id);
  1409. ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
  1410. hmc_session, hmc_index, buffer_id);
  1411. return -1;
  1412. }
  1413. hmc = &hmcs[hmc_index];
  1414. spin_lock_irqsave(&hmc->lock, flags);
  1415. if (hmc->state == ibmhmc_state_free) {
  1416. dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n",
  1417. hmc->state);
  1418. /* HMC connection is not valid (possibly was reset under us). */
  1419. spin_unlock_irqrestore(&hmc->lock, flags);
  1420. return -1;
  1421. }
  1422. buffer = &hmc->buffer[buffer_id];
  1423. if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) {
  1424. dev_err(adapter->dev, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n",
  1425. buffer->valid, buffer->owner);
  1426. spin_unlock_irqrestore(&hmc->lock, flags);
  1427. return -1;
  1428. }
  1429. /* RDMA the data into the partition. */
  1430. rc = h_copy_rdma(msg_len,
  1431. adapter->riobn,
  1432. buffer->dma_addr_remote,
  1433. adapter->liobn,
  1434. buffer->dma_addr_local);
  1435. dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n",
  1436. (unsigned int)msg_len, (unsigned int)buffer_id,
  1437. (unsigned int)hmc->queue_head, (unsigned int)hmc_index);
  1438. buffer->msg_len = msg_len;
  1439. buffer->free = 0;
  1440. buffer->owner = VMC_BUF_OWNER_ALPHA;
  1441. if (rc) {
  1442. dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n",
  1443. rc);
  1444. spin_unlock_irqrestore(&hmc->lock, flags);
  1445. return -1;
  1446. }
  1447. /* Must be locked because read operates on the same data */
  1448. hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id;
  1449. hmc->queue_head++;
  1450. if (hmc->queue_head == ibmvmc_max_buf_pool_size)
  1451. hmc->queue_head = 0;
  1452. if (hmc->queue_head == hmc->queue_tail)
  1453. dev_err(adapter->dev, "outbound buffer queue wrapped.\n");
  1454. spin_unlock_irqrestore(&hmc->lock, flags);
  1455. wake_up_interruptible(&ibmvmc_read_wait);
  1456. return 0;
  1457. }
  1458. /**
  1459. * ibmvmc_process_capabilities - Process Capabilities
  1460. *
  1461. * @adapter: crq_server_adapter struct
  1462. * @crqp: ibmvmc_crq_msg struct
  1463. *
  1464. */
  1465. static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter,
  1466. struct ibmvmc_crq_msg *crqp)
  1467. {
  1468. struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp;
  1469. if ((be16_to_cpu(crq->version) >> 8) !=
  1470. (IBMVMC_PROTOCOL_VERSION >> 8)) {
  1471. dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n",
  1472. be16_to_cpu(crq->version),
  1473. IBMVMC_PROTOCOL_VERSION);
  1474. ibmvmc.state = ibmvmc_state_failed;
  1475. return;
  1476. }
  1477. ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu));
  1478. ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size,
  1479. be16_to_cpu(crq->pool_size));
  1480. ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1;
  1481. ibmvmc.state = ibmvmc_state_ready;
  1482. dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n",
  1483. ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size,
  1484. ibmvmc.max_hmc_index);
  1485. }
  1486. /**
  1487. * ibmvmc_validate_hmc_session - Validate HMC Session
  1488. *
  1489. * @adapter: crq_server_adapter struct
  1490. * @crq: ibmvmc_crq_msg struct
  1491. *
  1492. * Return:
  1493. * 0 - Success
  1494. * Non-zero - Failure
  1495. */
  1496. static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter,
  1497. struct ibmvmc_crq_msg *crq)
  1498. {
  1499. unsigned char hmc_index;
  1500. hmc_index = crq->hmc_index;
  1501. if (crq->hmc_session == 0)
  1502. return 0;
  1503. if (hmc_index > ibmvmc.max_hmc_index)
  1504. return -1;
  1505. if (hmcs[hmc_index].session != crq->hmc_session) {
  1506. dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n",
  1507. hmcs[hmc_index].session, crq->hmc_session);
  1508. return -1;
  1509. }
  1510. return 0;
  1511. }
  1512. /**
  1513. * ibmvmc_reset - Reset
  1514. *
  1515. * @adapter: crq_server_adapter struct
  1516. * @xport_event: export_event field
  1517. *
  1518. * Closes all HMC sessions and conditionally schedules a CRQ reset.
  1519. * @xport_event: If true, the partner closed their CRQ; we don't need to reset.
  1520. * If false, we need to schedule a CRQ reset.
  1521. */
  1522. static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event)
  1523. {
  1524. int i;
  1525. if (ibmvmc.state != ibmvmc_state_sched_reset) {
  1526. dev_info(adapter->dev, "*** Reset to initial state.\n");
  1527. for (i = 0; i < ibmvmc_max_hmcs; i++)
  1528. ibmvmc_return_hmc(&hmcs[i], xport_event);
  1529. if (xport_event) {
  1530. /* CRQ was closed by the partner. We don't need to do
  1531. * anything except set ourself to the correct state to
  1532. * handle init msgs.
  1533. */
  1534. ibmvmc.state = ibmvmc_state_crqinit;
  1535. } else {
  1536. /* The partner did not close their CRQ - instead, we're
  1537. * closing the CRQ on our end. Need to schedule this
  1538. * for process context, because CRQ reset may require a
  1539. * sleep.
  1540. *
  1541. * Setting ibmvmc.state here immediately prevents
  1542. * ibmvmc_open from completing until the reset
  1543. * completes in process context.
  1544. */
  1545. ibmvmc.state = ibmvmc_state_sched_reset;
  1546. dev_dbg(adapter->dev, "Device reset scheduled");
  1547. wake_up_interruptible(&adapter->reset_wait_queue);
  1548. }
  1549. }
  1550. }
  1551. /**
  1552. * ibmvmc_reset_task - Reset Task
  1553. *
  1554. * @data: Data field
  1555. *
  1556. * Performs a CRQ reset of the VMC device in process context.
  1557. * NOTE: This function should not be called directly, use ibmvmc_reset.
  1558. */
  1559. static int ibmvmc_reset_task(void *data)
  1560. {
  1561. struct crq_server_adapter *adapter = data;
  1562. int rc;
  1563. set_user_nice(current, -20);
  1564. while (!kthread_should_stop()) {
  1565. wait_event_interruptible(adapter->reset_wait_queue,
  1566. (ibmvmc.state == ibmvmc_state_sched_reset) ||
  1567. kthread_should_stop());
  1568. if (kthread_should_stop())
  1569. break;
  1570. dev_dbg(adapter->dev, "CRQ resetting in process context");
  1571. tasklet_disable(&adapter->work_task);
  1572. rc = ibmvmc_reset_crq_queue(adapter);
  1573. if (rc != H_SUCCESS && rc != H_RESOURCE) {
  1574. dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
  1575. rc);
  1576. ibmvmc.state = ibmvmc_state_failed;
  1577. } else {
  1578. ibmvmc.state = ibmvmc_state_crqinit;
  1579. if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0)
  1580. != 0 && rc != H_RESOURCE)
  1581. dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
  1582. }
  1583. vio_enable_interrupts(to_vio_dev(adapter->dev));
  1584. tasklet_enable(&adapter->work_task);
  1585. }
  1586. return 0;
  1587. }
  1588. /**
  1589. * ibmvmc_process_open_resp - Process Open Response
  1590. *
  1591. * @crq: ibmvmc_crq_msg struct
  1592. * @adapter: crq_server_adapter struct
  1593. *
  1594. * This command is sent by the hypervisor in response to the Interface
  1595. * Open message. When this message is received, the indicated buffer is
  1596. * again available for management partition use.
  1597. */
  1598. static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq,
  1599. struct crq_server_adapter *adapter)
  1600. {
  1601. unsigned char hmc_index;
  1602. unsigned short buffer_id;
  1603. hmc_index = crq->hmc_index;
  1604. if (hmc_index > ibmvmc.max_hmc_index) {
  1605. /* Why would PHYP give an index > max negotiated? */
  1606. ibmvmc_reset(adapter, false);
  1607. return;
  1608. }
  1609. if (crq->status) {
  1610. dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n",
  1611. crq->status);
  1612. ibmvmc_return_hmc(&hmcs[hmc_index], false);
  1613. return;
  1614. }
  1615. if (hmcs[hmc_index].state == ibmhmc_state_opening) {
  1616. buffer_id = be16_to_cpu(crq->var2.buffer_id);
  1617. if (buffer_id >= ibmvmc.max_buffer_pool_size) {
  1618. dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n",
  1619. buffer_id);
  1620. hmcs[hmc_index].state = ibmhmc_state_failed;
  1621. } else {
  1622. ibmvmc_free_hmc_buffer(&hmcs[hmc_index],
  1623. &hmcs[hmc_index].buffer[buffer_id]);
  1624. hmcs[hmc_index].state = ibmhmc_state_ready;
  1625. dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n");
  1626. }
  1627. } else {
  1628. dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n",
  1629. hmcs[hmc_index].state);
  1630. }
  1631. }
  1632. /**
  1633. * ibmvmc_process_close_resp - Process Close Response
  1634. *
  1635. * @crq: ibmvmc_crq_msg struct
  1636. * @adapter: crq_server_adapter struct
  1637. *
  1638. * This command is sent by the hypervisor in response to the managemant
  1639. * application Interface Close message.
  1640. *
  1641. * If the close fails, simply reset the entire driver as the state of the VMC
  1642. * must be in tough shape.
  1643. */
  1644. static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq,
  1645. struct crq_server_adapter *adapter)
  1646. {
  1647. unsigned char hmc_index;
  1648. hmc_index = crq->hmc_index;
  1649. if (hmc_index > ibmvmc.max_hmc_index) {
  1650. ibmvmc_reset(adapter, false);
  1651. return;
  1652. }
  1653. if (crq->status) {
  1654. dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n",
  1655. crq->status);
  1656. ibmvmc_reset(adapter, false);
  1657. return;
  1658. }
  1659. ibmvmc_return_hmc(&hmcs[hmc_index], false);
  1660. }
  1661. /**
  1662. * ibmvmc_crq_process - Process CRQ
  1663. *
  1664. * @adapter: crq_server_adapter struct
  1665. * @crq: ibmvmc_crq_msg struct
  1666. *
  1667. * Process the CRQ message based upon the type of message received.
  1668. *
  1669. */
  1670. static void ibmvmc_crq_process(struct crq_server_adapter *adapter,
  1671. struct ibmvmc_crq_msg *crq)
  1672. {
  1673. switch (crq->type) {
  1674. case VMC_MSG_CAP_RESP:
  1675. dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n",
  1676. crq->type);
  1677. if (ibmvmc.state == ibmvmc_state_capabilities)
  1678. ibmvmc_process_capabilities(adapter, crq);
  1679. else
  1680. dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n",
  1681. ibmvmc.state);
  1682. break;
  1683. case VMC_MSG_OPEN_RESP:
  1684. dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n",
  1685. crq->type);
  1686. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1687. ibmvmc_process_open_resp(crq, adapter);
  1688. break;
  1689. case VMC_MSG_ADD_BUF:
  1690. dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n",
  1691. crq->type);
  1692. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1693. ibmvmc_add_buffer(adapter, crq);
  1694. break;
  1695. case VMC_MSG_REM_BUF:
  1696. dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n",
  1697. crq->type);
  1698. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1699. ibmvmc_rem_buffer(adapter, crq);
  1700. break;
  1701. case VMC_MSG_SIGNAL:
  1702. dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n",
  1703. crq->type);
  1704. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1705. ibmvmc_recv_msg(adapter, crq);
  1706. break;
  1707. case VMC_MSG_CLOSE_RESP:
  1708. dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n",
  1709. crq->type);
  1710. if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
  1711. ibmvmc_process_close_resp(crq, adapter);
  1712. break;
  1713. case VMC_MSG_CAP:
  1714. case VMC_MSG_OPEN:
  1715. case VMC_MSG_CLOSE:
  1716. case VMC_MSG_ADD_BUF_RESP:
  1717. case VMC_MSG_REM_BUF_RESP:
  1718. dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n",
  1719. crq->type);
  1720. break;
  1721. default:
  1722. dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n",
  1723. crq->type);
  1724. break;
  1725. }
  1726. }
  1727. /**
  1728. * ibmvmc_handle_crq_init - Handle CRQ Init
  1729. *
  1730. * @crq: ibmvmc_crq_msg struct
  1731. * @adapter: crq_server_adapter struct
  1732. *
  1733. * Handle the type of crq initialization based on whether
  1734. * it is a message or a response.
  1735. *
  1736. */
  1737. static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq,
  1738. struct crq_server_adapter *adapter)
  1739. {
  1740. switch (crq->type) {
  1741. case 0x01: /* Initialization message */
  1742. dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n",
  1743. ibmvmc.state);
  1744. if (ibmvmc.state == ibmvmc_state_crqinit) {
  1745. /* Send back a response */
  1746. if (ibmvmc_send_crq(adapter, 0xC002000000000000,
  1747. 0) == 0)
  1748. ibmvmc_send_capabilities(adapter);
  1749. else
  1750. dev_err(adapter->dev, " Unable to send init rsp\n");
  1751. } else {
  1752. dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n",
  1753. ibmvmc.state, ibmvmc.max_mtu);
  1754. }
  1755. break;
  1756. case 0x02: /* Initialization response */
  1757. dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n",
  1758. ibmvmc.state);
  1759. if (ibmvmc.state == ibmvmc_state_crqinit)
  1760. ibmvmc_send_capabilities(adapter);
  1761. break;
  1762. default:
  1763. dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n",
  1764. (unsigned long)crq->type);
  1765. }
  1766. }
  1767. /**
  1768. * ibmvmc_handle_crq - Handle CRQ
  1769. *
  1770. * @crq: ibmvmc_crq_msg struct
  1771. * @adapter: crq_server_adapter struct
  1772. *
  1773. * Read the command elements from the command queue and execute the
  1774. * requests based upon the type of crq message.
  1775. *
  1776. */
  1777. static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq,
  1778. struct crq_server_adapter *adapter)
  1779. {
  1780. switch (crq->valid) {
  1781. case 0xC0: /* initialization */
  1782. ibmvmc_handle_crq_init(crq, adapter);
  1783. break;
  1784. case 0xFF: /* Hypervisor telling us the connection is closed */
  1785. dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n");
  1786. ibmvmc_reset(adapter, true);
  1787. break;
  1788. case 0x80: /* real payload */
  1789. ibmvmc_crq_process(adapter, crq);
  1790. break;
  1791. default:
  1792. dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n",
  1793. crq->valid);
  1794. break;
  1795. }
  1796. }
  1797. static void ibmvmc_task(unsigned long data)
  1798. {
  1799. struct crq_server_adapter *adapter =
  1800. (struct crq_server_adapter *)data;
  1801. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  1802. struct ibmvmc_crq_msg *crq;
  1803. int done = 0;
  1804. while (!done) {
  1805. /* Pull all the valid messages off the CRQ */
  1806. while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) {
  1807. ibmvmc_handle_crq(crq, adapter);
  1808. crq->valid = 0x00;
  1809. /* CRQ reset was requested, stop processing CRQs.
  1810. * Interrupts will be re-enabled by the reset task.
  1811. */
  1812. if (ibmvmc.state == ibmvmc_state_sched_reset)
  1813. return;
  1814. }
  1815. vio_enable_interrupts(vdev);
  1816. crq = crq_queue_next_crq(&adapter->queue);
  1817. if (crq) {
  1818. vio_disable_interrupts(vdev);
  1819. ibmvmc_handle_crq(crq, adapter);
  1820. crq->valid = 0x00;
  1821. /* CRQ reset was requested, stop processing CRQs.
  1822. * Interrupts will be re-enabled by the reset task.
  1823. */
  1824. if (ibmvmc.state == ibmvmc_state_sched_reset)
  1825. return;
  1826. } else {
  1827. done = 1;
  1828. }
  1829. }
  1830. }
  1831. /**
  1832. * ibmvmc_init_crq_queue - Init CRQ Queue
  1833. *
  1834. * @adapter: crq_server_adapter struct
  1835. *
  1836. * Return:
  1837. * 0 - Success
  1838. * Non-zero - Failure
  1839. */
  1840. static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
  1841. {
  1842. struct vio_dev *vdev = to_vio_dev(adapter->dev);
  1843. struct crq_queue *queue = &adapter->queue;
  1844. int rc = 0;
  1845. int retrc = 0;
  1846. queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL);
  1847. if (!queue->msgs)
  1848. goto malloc_failed;
  1849. queue->size = PAGE_SIZE / sizeof(*queue->msgs);
  1850. queue->msg_token = dma_map_single(adapter->dev, queue->msgs,
  1851. queue->size * sizeof(*queue->msgs),
  1852. DMA_BIDIRECTIONAL);
  1853. if (dma_mapping_error(adapter->dev, queue->msg_token))
  1854. goto map_failed;
  1855. retrc = plpar_hcall_norets(H_REG_CRQ,
  1856. vdev->unit_address,
  1857. queue->msg_token, PAGE_SIZE);
  1858. rc = retrc;
  1859. if (rc == H_RESOURCE)
  1860. rc = ibmvmc_reset_crq_queue(adapter);
  1861. if (rc == 2) {
  1862. dev_warn(adapter->dev, "Partner adapter not ready\n");
  1863. retrc = 0;
  1864. } else if (rc != 0) {
  1865. dev_err(adapter->dev, "Error %d opening adapter\n", rc);
  1866. goto reg_crq_failed;
  1867. }
  1868. queue->cur = 0;
  1869. spin_lock_init(&queue->lock);
  1870. tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter);
  1871. if (request_irq(vdev->irq,
  1872. ibmvmc_handle_event,
  1873. 0, "ibmvmc", (void *)adapter) != 0) {
  1874. dev_err(adapter->dev, "couldn't register irq 0x%x\n",
  1875. vdev->irq);
  1876. goto req_irq_failed;
  1877. }
  1878. rc = vio_enable_interrupts(vdev);
  1879. if (rc != 0) {
  1880. dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc);
  1881. goto req_irq_failed;
  1882. }
  1883. return retrc;
  1884. req_irq_failed:
  1885. /* Cannot have any work since we either never got our IRQ registered,
  1886. * or never got interrupts enabled
  1887. */
  1888. tasklet_kill(&adapter->work_task);
  1889. h_free_crq(vdev->unit_address);
  1890. reg_crq_failed:
  1891. dma_unmap_single(adapter->dev,
  1892. queue->msg_token,
  1893. queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
  1894. map_failed:
  1895. free_page((unsigned long)queue->msgs);
  1896. malloc_failed:
  1897. return -ENOMEM;
  1898. }
  1899. /* Fill in the liobn and riobn fields on the adapter */
  1900. static int read_dma_window(struct vio_dev *vdev,
  1901. struct crq_server_adapter *adapter)
  1902. {
  1903. const __be32 *dma_window;
  1904. const __be32 *prop;
  1905. /* TODO Using of_parse_dma_window would be better, but it doesn't give
  1906. * a way to read multiple windows without already knowing the size of
  1907. * a window or the number of windows
  1908. */
  1909. dma_window =
  1910. (const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window",
  1911. NULL);
  1912. if (!dma_window) {
  1913. dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n");
  1914. return -1;
  1915. }
  1916. adapter->liobn = be32_to_cpu(*dma_window);
  1917. dma_window++;
  1918. prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
  1919. NULL);
  1920. if (!prop) {
  1921. dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n");
  1922. dma_window++;
  1923. } else {
  1924. dma_window += be32_to_cpu(*prop);
  1925. }
  1926. prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
  1927. NULL);
  1928. if (!prop) {
  1929. dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n");
  1930. dma_window++;
  1931. } else {
  1932. dma_window += be32_to_cpu(*prop);
  1933. }
  1934. /* dma_window should point to the second window now */
  1935. adapter->riobn = be32_to_cpu(*dma_window);
  1936. return 0;
  1937. }
  1938. static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
  1939. {
  1940. struct crq_server_adapter *adapter = &ibmvmc_adapter;
  1941. int rc;
  1942. dev_set_drvdata(&vdev->dev, NULL);
  1943. memset(adapter, 0, sizeof(*adapter));
  1944. adapter->dev = &vdev->dev;
  1945. dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address);
  1946. rc = read_dma_window(vdev, adapter);
  1947. if (rc != 0) {
  1948. ibmvmc.state = ibmvmc_state_failed;
  1949. return -1;
  1950. }
  1951. dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
  1952. adapter->liobn, adapter->riobn);
  1953. init_waitqueue_head(&adapter->reset_wait_queue);
  1954. adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc");
  1955. if (IS_ERR(adapter->reset_task)) {
  1956. dev_err(adapter->dev, "Failed to start reset thread\n");
  1957. ibmvmc.state = ibmvmc_state_failed;
  1958. rc = PTR_ERR(adapter->reset_task);
  1959. adapter->reset_task = NULL;
  1960. return rc;
  1961. }
  1962. rc = ibmvmc_init_crq_queue(adapter);
  1963. if (rc != 0 && rc != H_RESOURCE) {
  1964. dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
  1965. rc);
  1966. ibmvmc.state = ibmvmc_state_failed;
  1967. goto crq_failed;
  1968. }
  1969. ibmvmc.state = ibmvmc_state_crqinit;
  1970. /* Try to send an initialization message. Note that this is allowed
  1971. * to fail if the other end is not acive. In that case we just wait
  1972. * for the other side to initialize.
  1973. */
  1974. if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 &&
  1975. rc != H_RESOURCE)
  1976. dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
  1977. dev_set_drvdata(&vdev->dev, adapter);
  1978. return 0;
  1979. crq_failed:
  1980. kthread_stop(adapter->reset_task);
  1981. adapter->reset_task = NULL;
  1982. return -EPERM;
  1983. }
  1984. static int ibmvmc_remove(struct vio_dev *vdev)
  1985. {
  1986. struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev);
  1987. dev_info(adapter->dev, "Entering remove for UA 0x%x\n",
  1988. vdev->unit_address);
  1989. ibmvmc_release_crq_queue(adapter);
  1990. return 0;
  1991. }
  1992. static struct vio_device_id ibmvmc_device_table[] = {
  1993. { "ibm,vmc", "IBM,vmc" },
  1994. { "", "" }
  1995. };
  1996. MODULE_DEVICE_TABLE(vio, ibmvmc_device_table);
  1997. static struct vio_driver ibmvmc_driver = {
  1998. .name = ibmvmc_driver_name,
  1999. .id_table = ibmvmc_device_table,
  2000. .probe = ibmvmc_probe,
  2001. .remove = ibmvmc_remove,
  2002. };
  2003. static void __init ibmvmc_scrub_module_parms(void)
  2004. {
  2005. if (ibmvmc_max_mtu > MAX_MTU) {
  2006. pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU);
  2007. ibmvmc_max_mtu = MAX_MTU;
  2008. } else if (ibmvmc_max_mtu < MIN_MTU) {
  2009. pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU);
  2010. ibmvmc_max_mtu = MIN_MTU;
  2011. }
  2012. if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) {
  2013. pr_warn("ibmvmc: Max buffer pool size reduced to %d\n",
  2014. MAX_BUF_POOL_SIZE);
  2015. ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE;
  2016. } else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) {
  2017. pr_warn("ibmvmc: Max buffer pool size increased to %d\n",
  2018. MIN_BUF_POOL_SIZE);
  2019. ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE;
  2020. }
  2021. if (ibmvmc_max_hmcs > MAX_HMCS) {
  2022. pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS);
  2023. ibmvmc_max_hmcs = MAX_HMCS;
  2024. } else if (ibmvmc_max_hmcs < MIN_HMCS) {
  2025. pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS);
  2026. ibmvmc_max_hmcs = MIN_HMCS;
  2027. }
  2028. }
  2029. static struct miscdevice ibmvmc_miscdev = {
  2030. .name = ibmvmc_driver_name,
  2031. .minor = MISC_DYNAMIC_MINOR,
  2032. .fops = &ibmvmc_fops,
  2033. };
  2034. static int __init ibmvmc_module_init(void)
  2035. {
  2036. int rc, i, j;
  2037. ibmvmc.state = ibmvmc_state_initial;
  2038. pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION);
  2039. rc = misc_register(&ibmvmc_miscdev);
  2040. if (rc) {
  2041. pr_err("ibmvmc: misc registration failed\n");
  2042. goto misc_register_failed;
  2043. }
  2044. pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR,
  2045. ibmvmc_miscdev.minor);
  2046. /* Initialize data structures */
  2047. memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS);
  2048. for (i = 0; i < MAX_HMCS; i++) {
  2049. spin_lock_init(&hmcs[i].lock);
  2050. hmcs[i].state = ibmhmc_state_free;
  2051. for (j = 0; j < MAX_BUF_POOL_SIZE; j++)
  2052. hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID;
  2053. }
  2054. /* Sanity check module parms */
  2055. ibmvmc_scrub_module_parms();
  2056. /*
  2057. * Initialize some reasonable values. Might be negotiated smaller
  2058. * values during the capabilities exchange.
  2059. */
  2060. ibmvmc.max_mtu = ibmvmc_max_mtu;
  2061. ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size;
  2062. ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1;
  2063. rc = vio_register_driver(&ibmvmc_driver);
  2064. if (rc) {
  2065. pr_err("ibmvmc: rc %d from vio_register_driver\n", rc);
  2066. goto vio_reg_failed;
  2067. }
  2068. return 0;
  2069. vio_reg_failed:
  2070. misc_deregister(&ibmvmc_miscdev);
  2071. misc_register_failed:
  2072. return rc;
  2073. }
  2074. static void __exit ibmvmc_module_exit(void)
  2075. {
  2076. pr_info("ibmvmc: module exit\n");
  2077. vio_unregister_driver(&ibmvmc_driver);
  2078. misc_deregister(&ibmvmc_miscdev);
  2079. }
  2080. module_init(ibmvmc_module_init);
  2081. module_exit(ibmvmc_module_exit);
  2082. module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size,
  2083. int, 0644);
  2084. MODULE_PARM_DESC(buf_pool_size, "Buffer pool size");
  2085. module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644);
  2086. MODULE_PARM_DESC(max_hmcs, "Max HMCs");
  2087. module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644);
  2088. MODULE_PARM_DESC(max_mtu, "Max MTU");
  2089. MODULE_AUTHOR("Steven Royer <seroyer@linux.vnet.ibm.com>");
  2090. MODULE_DESCRIPTION("IBM VMC");
  2091. MODULE_VERSION(IBMVMC_DRIVER_VERSION);
  2092. MODULE_LICENSE("GPL v2");