commctrl.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124
  1. /*
  2. * Adaptec AAC series RAID controller driver
  3. * (c) Copyright 2001 Red Hat Inc.
  4. *
  5. * based on the old aacraid driver that is..
  6. * Adaptec aacraid device driver for Linux.
  7. *
  8. * Copyright (c) 2000-2010 Adaptec, Inc.
  9. * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10. * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; see the file COPYING. If not, write to
  24. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25. *
  26. * Module Name:
  27. * commctrl.c
  28. *
  29. * Abstract: Contains all routines for control of the AFA comm layer
  30. *
  31. */
  32. #include <linux/kernel.h>
  33. #include <linux/init.h>
  34. #include <linux/types.h>
  35. #include <linux/pci.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/slab.h>
  38. #include <linux/completion.h>
  39. #include <linux/dma-mapping.h>
  40. #include <linux/blkdev.h>
  41. #include <linux/delay.h> /* ssleep prototype */
  42. #include <linux/kthread.h>
  43. #include <linux/semaphore.h>
  44. #include <linux/uaccess.h>
  45. #include <scsi/scsi_host.h>
  46. #include "aacraid.h"
  47. /**
  48. * ioctl_send_fib - send a FIB from userspace
  49. * @dev: adapter is being processed
  50. * @arg: arguments to the ioctl call
  51. *
  52. * This routine sends a fib to the adapter on behalf of a user level
  53. * program.
  54. */
  55. # define AAC_DEBUG_PREAMBLE KERN_INFO
  56. # define AAC_DEBUG_POSTAMBLE
  57. static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
  58. {
  59. struct hw_fib * kfib;
  60. struct fib *fibptr;
  61. struct hw_fib * hw_fib = (struct hw_fib *)0;
  62. dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
  63. unsigned int size, osize;
  64. int retval;
  65. if (dev->in_reset) {
  66. return -EBUSY;
  67. }
  68. fibptr = aac_fib_alloc(dev);
  69. if(fibptr == NULL) {
  70. return -ENOMEM;
  71. }
  72. kfib = fibptr->hw_fib_va;
  73. /*
  74. * First copy in the header so that we can check the size field.
  75. */
  76. if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
  77. aac_fib_free(fibptr);
  78. return -EFAULT;
  79. }
  80. /*
  81. * Since we copy based on the fib header size, make sure that we
  82. * will not overrun the buffer when we copy the memory. Return
  83. * an error if we would.
  84. */
  85. osize = size = le16_to_cpu(kfib->header.Size) +
  86. sizeof(struct aac_fibhdr);
  87. if (size < le16_to_cpu(kfib->header.SenderSize))
  88. size = le16_to_cpu(kfib->header.SenderSize);
  89. if (size > dev->max_fib_size) {
  90. dma_addr_t daddr;
  91. if (size > 2048) {
  92. retval = -EINVAL;
  93. goto cleanup;
  94. }
  95. kfib = dma_alloc_coherent(&dev->pdev->dev, size, &daddr,
  96. GFP_KERNEL);
  97. if (!kfib) {
  98. retval = -ENOMEM;
  99. goto cleanup;
  100. }
  101. /* Highjack the hw_fib */
  102. hw_fib = fibptr->hw_fib_va;
  103. hw_fib_pa = fibptr->hw_fib_pa;
  104. fibptr->hw_fib_va = kfib;
  105. fibptr->hw_fib_pa = daddr;
  106. memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
  107. memcpy(kfib, hw_fib, dev->max_fib_size);
  108. }
  109. if (copy_from_user(kfib, arg, size)) {
  110. retval = -EFAULT;
  111. goto cleanup;
  112. }
  113. /* Sanity check the second copy */
  114. if ((osize != le16_to_cpu(kfib->header.Size) +
  115. sizeof(struct aac_fibhdr))
  116. || (size < le16_to_cpu(kfib->header.SenderSize))) {
  117. retval = -EINVAL;
  118. goto cleanup;
  119. }
  120. if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
  121. aac_adapter_interrupt(dev);
  122. /*
  123. * Since we didn't really send a fib, zero out the state to allow
  124. * cleanup code not to assert.
  125. */
  126. kfib->header.XferState = 0;
  127. } else {
  128. retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
  129. le16_to_cpu(kfib->header.Size) , FsaNormal,
  130. 1, 1, NULL, NULL);
  131. if (retval) {
  132. goto cleanup;
  133. }
  134. if (aac_fib_complete(fibptr) != 0) {
  135. retval = -EINVAL;
  136. goto cleanup;
  137. }
  138. }
  139. /*
  140. * Make sure that the size returned by the adapter (which includes
  141. * the header) is less than or equal to the size of a fib, so we
  142. * don't corrupt application data. Then copy that size to the user
  143. * buffer. (Don't try to add the header information again, since it
  144. * was already included by the adapter.)
  145. */
  146. retval = 0;
  147. if (copy_to_user(arg, (void *)kfib, size))
  148. retval = -EFAULT;
  149. cleanup:
  150. if (hw_fib) {
  151. dma_free_coherent(&dev->pdev->dev, size, kfib,
  152. fibptr->hw_fib_pa);
  153. fibptr->hw_fib_pa = hw_fib_pa;
  154. fibptr->hw_fib_va = hw_fib;
  155. }
  156. if (retval != -ERESTARTSYS)
  157. aac_fib_free(fibptr);
  158. return retval;
  159. }
  160. /**
  161. * open_getadapter_fib - Get the next fib
  162. *
  163. * This routine will get the next Fib, if available, from the AdapterFibContext
  164. * passed in from the user.
  165. */
  166. static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
  167. {
  168. struct aac_fib_context * fibctx;
  169. int status;
  170. fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
  171. if (fibctx == NULL) {
  172. status = -ENOMEM;
  173. } else {
  174. unsigned long flags;
  175. struct list_head * entry;
  176. struct aac_fib_context * context;
  177. fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
  178. fibctx->size = sizeof(struct aac_fib_context);
  179. /*
  180. * Yes yes, I know this could be an index, but we have a
  181. * better guarantee of uniqueness for the locked loop below.
  182. * Without the aid of a persistent history, this also helps
  183. * reduce the chance that the opaque context would be reused.
  184. */
  185. fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
  186. /*
  187. * Initialize the mutex used to wait for the next AIF.
  188. */
  189. sema_init(&fibctx->wait_sem, 0);
  190. fibctx->wait = 0;
  191. /*
  192. * Initialize the fibs and set the count of fibs on
  193. * the list to 0.
  194. */
  195. fibctx->count = 0;
  196. INIT_LIST_HEAD(&fibctx->fib_list);
  197. fibctx->jiffies = jiffies/HZ;
  198. /*
  199. * Now add this context onto the adapter's
  200. * AdapterFibContext list.
  201. */
  202. spin_lock_irqsave(&dev->fib_lock, flags);
  203. /* Ensure that we have a unique identifier */
  204. entry = dev->fib_list.next;
  205. while (entry != &dev->fib_list) {
  206. context = list_entry(entry, struct aac_fib_context, next);
  207. if (context->unique == fibctx->unique) {
  208. /* Not unique (32 bits) */
  209. fibctx->unique++;
  210. entry = dev->fib_list.next;
  211. } else {
  212. entry = entry->next;
  213. }
  214. }
  215. list_add_tail(&fibctx->next, &dev->fib_list);
  216. spin_unlock_irqrestore(&dev->fib_lock, flags);
  217. if (copy_to_user(arg, &fibctx->unique,
  218. sizeof(fibctx->unique))) {
  219. status = -EFAULT;
  220. } else {
  221. status = 0;
  222. }
  223. }
  224. return status;
  225. }
  226. /**
  227. * next_getadapter_fib - get the next fib
  228. * @dev: adapter to use
  229. * @arg: ioctl argument
  230. *
  231. * This routine will get the next Fib, if available, from the AdapterFibContext
  232. * passed in from the user.
  233. */
  234. static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
  235. {
  236. struct fib_ioctl f;
  237. struct fib *fib;
  238. struct aac_fib_context *fibctx;
  239. int status;
  240. struct list_head * entry;
  241. unsigned long flags;
  242. if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
  243. return -EFAULT;
  244. /*
  245. * Verify that the HANDLE passed in was a valid AdapterFibContext
  246. *
  247. * Search the list of AdapterFibContext addresses on the adapter
  248. * to be sure this is a valid address
  249. */
  250. spin_lock_irqsave(&dev->fib_lock, flags);
  251. entry = dev->fib_list.next;
  252. fibctx = NULL;
  253. while (entry != &dev->fib_list) {
  254. fibctx = list_entry(entry, struct aac_fib_context, next);
  255. /*
  256. * Extract the AdapterFibContext from the Input parameters.
  257. */
  258. if (fibctx->unique == f.fibctx) { /* We found a winner */
  259. break;
  260. }
  261. entry = entry->next;
  262. fibctx = NULL;
  263. }
  264. if (!fibctx) {
  265. spin_unlock_irqrestore(&dev->fib_lock, flags);
  266. dprintk ((KERN_INFO "Fib Context not found\n"));
  267. return -EINVAL;
  268. }
  269. if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
  270. (fibctx->size != sizeof(struct aac_fib_context))) {
  271. spin_unlock_irqrestore(&dev->fib_lock, flags);
  272. dprintk ((KERN_INFO "Fib Context corrupt?\n"));
  273. return -EINVAL;
  274. }
  275. status = 0;
  276. /*
  277. * If there are no fibs to send back, then either wait or return
  278. * -EAGAIN
  279. */
  280. return_fib:
  281. if (!list_empty(&fibctx->fib_list)) {
  282. /*
  283. * Pull the next fib from the fibs
  284. */
  285. entry = fibctx->fib_list.next;
  286. list_del(entry);
  287. fib = list_entry(entry, struct fib, fiblink);
  288. fibctx->count--;
  289. spin_unlock_irqrestore(&dev->fib_lock, flags);
  290. if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
  291. kfree(fib->hw_fib_va);
  292. kfree(fib);
  293. return -EFAULT;
  294. }
  295. /*
  296. * Free the space occupied by this copy of the fib.
  297. */
  298. kfree(fib->hw_fib_va);
  299. kfree(fib);
  300. status = 0;
  301. } else {
  302. spin_unlock_irqrestore(&dev->fib_lock, flags);
  303. /* If someone killed the AIF aacraid thread, restart it */
  304. status = !dev->aif_thread;
  305. if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
  306. /* Be paranoid, be very paranoid! */
  307. kthread_stop(dev->thread);
  308. ssleep(1);
  309. dev->aif_thread = 0;
  310. dev->thread = kthread_run(aac_command_thread, dev,
  311. "%s", dev->name);
  312. ssleep(1);
  313. }
  314. if (f.wait) {
  315. if(down_interruptible(&fibctx->wait_sem) < 0) {
  316. status = -ERESTARTSYS;
  317. } else {
  318. /* Lock again and retry */
  319. spin_lock_irqsave(&dev->fib_lock, flags);
  320. goto return_fib;
  321. }
  322. } else {
  323. status = -EAGAIN;
  324. }
  325. }
  326. fibctx->jiffies = jiffies/HZ;
  327. return status;
  328. }
  329. int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
  330. {
  331. struct fib *fib;
  332. /*
  333. * First free any FIBs that have not been consumed.
  334. */
  335. while (!list_empty(&fibctx->fib_list)) {
  336. struct list_head * entry;
  337. /*
  338. * Pull the next fib from the fibs
  339. */
  340. entry = fibctx->fib_list.next;
  341. list_del(entry);
  342. fib = list_entry(entry, struct fib, fiblink);
  343. fibctx->count--;
  344. /*
  345. * Free the space occupied by this copy of the fib.
  346. */
  347. kfree(fib->hw_fib_va);
  348. kfree(fib);
  349. }
  350. /*
  351. * Remove the Context from the AdapterFibContext List
  352. */
  353. list_del(&fibctx->next);
  354. /*
  355. * Invalidate context
  356. */
  357. fibctx->type = 0;
  358. /*
  359. * Free the space occupied by the Context
  360. */
  361. kfree(fibctx);
  362. return 0;
  363. }
  364. /**
  365. * close_getadapter_fib - close down user fib context
  366. * @dev: adapter
  367. * @arg: ioctl arguments
  368. *
  369. * This routine will close down the fibctx passed in from the user.
  370. */
  371. static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
  372. {
  373. struct aac_fib_context *fibctx;
  374. int status;
  375. unsigned long flags;
  376. struct list_head * entry;
  377. /*
  378. * Verify that the HANDLE passed in was a valid AdapterFibContext
  379. *
  380. * Search the list of AdapterFibContext addresses on the adapter
  381. * to be sure this is a valid address
  382. */
  383. entry = dev->fib_list.next;
  384. fibctx = NULL;
  385. while(entry != &dev->fib_list) {
  386. fibctx = list_entry(entry, struct aac_fib_context, next);
  387. /*
  388. * Extract the fibctx from the input parameters
  389. */
  390. if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */
  391. break;
  392. entry = entry->next;
  393. fibctx = NULL;
  394. }
  395. if (!fibctx)
  396. return 0; /* Already gone */
  397. if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
  398. (fibctx->size != sizeof(struct aac_fib_context)))
  399. return -EINVAL;
  400. spin_lock_irqsave(&dev->fib_lock, flags);
  401. status = aac_close_fib_context(dev, fibctx);
  402. spin_unlock_irqrestore(&dev->fib_lock, flags);
  403. return status;
  404. }
  405. /**
  406. * check_revision - close down user fib context
  407. * @dev: adapter
  408. * @arg: ioctl arguments
  409. *
  410. * This routine returns the driver version.
  411. * Under Linux, there have been no version incompatibilities, so this is
  412. * simple!
  413. */
  414. static int check_revision(struct aac_dev *dev, void __user *arg)
  415. {
  416. struct revision response;
  417. char *driver_version = aac_driver_version;
  418. u32 version;
  419. response.compat = 1;
  420. version = (simple_strtol(driver_version,
  421. &driver_version, 10) << 24) | 0x00000400;
  422. version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
  423. version += simple_strtol(driver_version + 1, NULL, 10);
  424. response.version = cpu_to_le32(version);
  425. # ifdef AAC_DRIVER_BUILD
  426. response.build = cpu_to_le32(AAC_DRIVER_BUILD);
  427. # else
  428. response.build = cpu_to_le32(9999);
  429. # endif
  430. if (copy_to_user(arg, &response, sizeof(response)))
  431. return -EFAULT;
  432. return 0;
  433. }
  434. /**
  435. *
  436. * aac_send_raw_scb
  437. *
  438. */
  439. static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
  440. {
  441. struct fib* srbfib;
  442. int status;
  443. struct aac_srb *srbcmd = NULL;
  444. struct aac_hba_cmd_req *hbacmd = NULL;
  445. struct user_aac_srb *user_srbcmd = NULL;
  446. struct user_aac_srb __user *user_srb = arg;
  447. struct aac_srb_reply __user *user_reply;
  448. u32 chn;
  449. u32 fibsize = 0;
  450. u32 flags = 0;
  451. s32 rcode = 0;
  452. u32 data_dir;
  453. void __user *sg_user[HBA_MAX_SG_EMBEDDED];
  454. void *sg_list[HBA_MAX_SG_EMBEDDED];
  455. u32 sg_count[HBA_MAX_SG_EMBEDDED];
  456. u32 sg_indx = 0;
  457. u32 byte_count = 0;
  458. u32 actual_fibsize64, actual_fibsize = 0;
  459. int i;
  460. int is_native_device;
  461. u64 address;
  462. if (dev->in_reset) {
  463. dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
  464. return -EBUSY;
  465. }
  466. if (!capable(CAP_SYS_ADMIN)){
  467. dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
  468. return -EPERM;
  469. }
  470. /*
  471. * Allocate and initialize a Fib then setup a SRB command
  472. */
  473. if (!(srbfib = aac_fib_alloc(dev))) {
  474. return -ENOMEM;
  475. }
  476. memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
  477. if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
  478. dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
  479. rcode = -EFAULT;
  480. goto cleanup;
  481. }
  482. if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
  483. (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
  484. rcode = -EINVAL;
  485. goto cleanup;
  486. }
  487. user_srbcmd = kmalloc(fibsize, GFP_KERNEL);
  488. if (!user_srbcmd) {
  489. dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n"));
  490. rcode = -ENOMEM;
  491. goto cleanup;
  492. }
  493. if(copy_from_user(user_srbcmd, user_srb,fibsize)){
  494. dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
  495. rcode = -EFAULT;
  496. goto cleanup;
  497. }
  498. flags = user_srbcmd->flags; /* from user in cpu order */
  499. switch (flags & (SRB_DataIn | SRB_DataOut)) {
  500. case SRB_DataOut:
  501. data_dir = DMA_TO_DEVICE;
  502. break;
  503. case (SRB_DataIn | SRB_DataOut):
  504. data_dir = DMA_BIDIRECTIONAL;
  505. break;
  506. case SRB_DataIn:
  507. data_dir = DMA_FROM_DEVICE;
  508. break;
  509. default:
  510. data_dir = DMA_NONE;
  511. }
  512. if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
  513. dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
  514. user_srbcmd->sg.count));
  515. rcode = -EINVAL;
  516. goto cleanup;
  517. }
  518. if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
  519. dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n"));
  520. rcode = -EINVAL;
  521. goto cleanup;
  522. }
  523. actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
  524. ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
  525. actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
  526. (sizeof(struct sgentry64) - sizeof(struct sgentry));
  527. /* User made a mistake - should not continue */
  528. if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
  529. dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
  530. "Raw SRB command calculated fibsize=%lu;%lu "
  531. "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
  532. "issued fibsize=%d\n",
  533. actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
  534. sizeof(struct aac_srb), sizeof(struct sgentry),
  535. sizeof(struct sgentry64), fibsize));
  536. rcode = -EINVAL;
  537. goto cleanup;
  538. }
  539. chn = user_srbcmd->channel;
  540. if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS &&
  541. dev->hba_map[chn][user_srbcmd->id].devtype ==
  542. AAC_DEVTYPE_NATIVE_RAW) {
  543. is_native_device = 1;
  544. hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va;
  545. memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */
  546. /* iu_type is a parameter of aac_hba_send */
  547. switch (data_dir) {
  548. case DMA_TO_DEVICE:
  549. hbacmd->byte1 = 2;
  550. break;
  551. case DMA_FROM_DEVICE:
  552. case DMA_BIDIRECTIONAL:
  553. hbacmd->byte1 = 1;
  554. break;
  555. case DMA_NONE:
  556. default:
  557. break;
  558. }
  559. hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun);
  560. hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus;
  561. /*
  562. * we fill in reply_qid later in aac_src_deliver_message
  563. * we fill in iu_type, request_id later in aac_hba_send
  564. * we fill in emb_data_desc_count, data_length later
  565. * in sg list build
  566. */
  567. memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb));
  568. address = (u64)srbfib->hw_error_pa;
  569. hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
  570. hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
  571. hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
  572. hbacmd->emb_data_desc_count =
  573. cpu_to_le32(user_srbcmd->sg.count);
  574. srbfib->hbacmd_size = 64 +
  575. user_srbcmd->sg.count * sizeof(struct aac_hba_sgl);
  576. } else {
  577. is_native_device = 0;
  578. aac_fib_init(srbfib);
  579. /* raw_srb FIB is not FastResponseCapable */
  580. srbfib->hw_fib_va->header.XferState &=
  581. ~cpu_to_le32(FastResponseCapable);
  582. srbcmd = (struct aac_srb *) fib_data(srbfib);
  583. // Fix up srb for endian and force some values
  584. srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
  585. srbcmd->channel = cpu_to_le32(user_srbcmd->channel);
  586. srbcmd->id = cpu_to_le32(user_srbcmd->id);
  587. srbcmd->lun = cpu_to_le32(user_srbcmd->lun);
  588. srbcmd->timeout = cpu_to_le32(user_srbcmd->timeout);
  589. srbcmd->flags = cpu_to_le32(flags);
  590. srbcmd->retry_limit = 0; // Obsolete parameter
  591. srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
  592. memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
  593. }
  594. byte_count = 0;
  595. if (is_native_device) {
  596. struct user_sgmap *usg32 = &user_srbcmd->sg;
  597. struct user_sgmap64 *usg64 =
  598. (struct user_sgmap64 *)&user_srbcmd->sg;
  599. for (i = 0; i < usg32->count; i++) {
  600. void *p;
  601. u64 addr;
  602. sg_count[i] = (actual_fibsize64 == fibsize) ?
  603. usg64->sg[i].count : usg32->sg[i].count;
  604. if (sg_count[i] >
  605. (dev->scsi_host_ptr->max_sectors << 9)) {
  606. pr_err("aacraid: upsg->sg[%d].count=%u>%u\n",
  607. i, sg_count[i],
  608. dev->scsi_host_ptr->max_sectors << 9);
  609. rcode = -EINVAL;
  610. goto cleanup;
  611. }
  612. p = kmalloc(sg_count[i], GFP_KERNEL);
  613. if (!p) {
  614. rcode = -ENOMEM;
  615. goto cleanup;
  616. }
  617. if (actual_fibsize64 == fibsize) {
  618. addr = (u64)usg64->sg[i].addr[0];
  619. addr += ((u64)usg64->sg[i].addr[1]) << 32;
  620. } else {
  621. addr = (u64)usg32->sg[i].addr;
  622. }
  623. sg_user[i] = (void __user *)(uintptr_t)addr;
  624. sg_list[i] = p; // save so we can clean up later
  625. sg_indx = i;
  626. if (flags & SRB_DataOut) {
  627. if (copy_from_user(p, sg_user[i],
  628. sg_count[i])) {
  629. rcode = -EFAULT;
  630. goto cleanup;
  631. }
  632. }
  633. addr = pci_map_single(dev->pdev, p, sg_count[i],
  634. data_dir);
  635. hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
  636. hbacmd->sge[i].addr_lo = cpu_to_le32(
  637. (u32)(addr & 0xffffffff));
  638. hbacmd->sge[i].len = cpu_to_le32(sg_count[i]);
  639. hbacmd->sge[i].flags = 0;
  640. byte_count += sg_count[i];
  641. }
  642. if (usg32->count > 0) /* embedded sglist */
  643. hbacmd->sge[usg32->count-1].flags =
  644. cpu_to_le32(0x40000000);
  645. hbacmd->data_length = cpu_to_le32(byte_count);
  646. status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib,
  647. NULL, NULL);
  648. } else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
  649. struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
  650. struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
  651. /*
  652. * This should also catch if user used the 32 bit sgmap
  653. */
  654. if (actual_fibsize64 == fibsize) {
  655. actual_fibsize = actual_fibsize64;
  656. for (i = 0; i < upsg->count; i++) {
  657. u64 addr;
  658. void* p;
  659. sg_count[i] = upsg->sg[i].count;
  660. if (sg_count[i] >
  661. ((dev->adapter_info.options &
  662. AAC_OPT_NEW_COMM) ?
  663. (dev->scsi_host_ptr->max_sectors << 9) :
  664. 65536)) {
  665. rcode = -EINVAL;
  666. goto cleanup;
  667. }
  668. p = kmalloc(sg_count[i], GFP_KERNEL);
  669. if(!p) {
  670. dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  671. sg_count[i], i, upsg->count));
  672. rcode = -ENOMEM;
  673. goto cleanup;
  674. }
  675. addr = (u64)upsg->sg[i].addr[0];
  676. addr += ((u64)upsg->sg[i].addr[1]) << 32;
  677. sg_user[i] = (void __user *)(uintptr_t)addr;
  678. sg_list[i] = p; // save so we can clean up later
  679. sg_indx = i;
  680. if (flags & SRB_DataOut) {
  681. if (copy_from_user(p, sg_user[i],
  682. sg_count[i])){
  683. dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
  684. rcode = -EFAULT;
  685. goto cleanup;
  686. }
  687. }
  688. addr = pci_map_single(dev->pdev, p,
  689. sg_count[i], data_dir);
  690. psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
  691. psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
  692. byte_count += sg_count[i];
  693. psg->sg[i].count = cpu_to_le32(sg_count[i]);
  694. }
  695. } else {
  696. struct user_sgmap* usg;
  697. usg = kmemdup(upsg,
  698. actual_fibsize - sizeof(struct aac_srb)
  699. + sizeof(struct sgmap), GFP_KERNEL);
  700. if (!usg) {
  701. dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
  702. rcode = -ENOMEM;
  703. goto cleanup;
  704. }
  705. actual_fibsize = actual_fibsize64;
  706. for (i = 0; i < usg->count; i++) {
  707. u64 addr;
  708. void* p;
  709. sg_count[i] = usg->sg[i].count;
  710. if (sg_count[i] >
  711. ((dev->adapter_info.options &
  712. AAC_OPT_NEW_COMM) ?
  713. (dev->scsi_host_ptr->max_sectors << 9) :
  714. 65536)) {
  715. kfree(usg);
  716. rcode = -EINVAL;
  717. goto cleanup;
  718. }
  719. p = kmalloc(sg_count[i], GFP_KERNEL);
  720. if(!p) {
  721. dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  722. sg_count[i], i, usg->count));
  723. kfree(usg);
  724. rcode = -ENOMEM;
  725. goto cleanup;
  726. }
  727. sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
  728. sg_list[i] = p; // save so we can clean up later
  729. sg_indx = i;
  730. if (flags & SRB_DataOut) {
  731. if (copy_from_user(p, sg_user[i],
  732. sg_count[i])) {
  733. kfree (usg);
  734. dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
  735. rcode = -EFAULT;
  736. goto cleanup;
  737. }
  738. }
  739. addr = pci_map_single(dev->pdev, p,
  740. sg_count[i], data_dir);
  741. psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
  742. psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
  743. byte_count += sg_count[i];
  744. psg->sg[i].count = cpu_to_le32(sg_count[i]);
  745. }
  746. kfree (usg);
  747. }
  748. srbcmd->count = cpu_to_le32(byte_count);
  749. if (user_srbcmd->sg.count)
  750. psg->count = cpu_to_le32(sg_indx+1);
  751. else
  752. psg->count = 0;
  753. status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
  754. } else {
  755. struct user_sgmap* upsg = &user_srbcmd->sg;
  756. struct sgmap* psg = &srbcmd->sg;
  757. if (actual_fibsize64 == fibsize) {
  758. struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
  759. for (i = 0; i < upsg->count; i++) {
  760. uintptr_t addr;
  761. void* p;
  762. sg_count[i] = usg->sg[i].count;
  763. if (sg_count[i] >
  764. ((dev->adapter_info.options &
  765. AAC_OPT_NEW_COMM) ?
  766. (dev->scsi_host_ptr->max_sectors << 9) :
  767. 65536)) {
  768. rcode = -EINVAL;
  769. goto cleanup;
  770. }
  771. p = kmalloc(sg_count[i], GFP_KERNEL);
  772. if (!p) {
  773. dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  774. sg_count[i], i, usg->count));
  775. rcode = -ENOMEM;
  776. goto cleanup;
  777. }
  778. addr = (u64)usg->sg[i].addr[0];
  779. addr += ((u64)usg->sg[i].addr[1]) << 32;
  780. sg_user[i] = (void __user *)addr;
  781. sg_list[i] = p; // save so we can clean up later
  782. sg_indx = i;
  783. if (flags & SRB_DataOut) {
  784. if (copy_from_user(p, sg_user[i],
  785. sg_count[i])){
  786. dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
  787. rcode = -EFAULT;
  788. goto cleanup;
  789. }
  790. }
  791. addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
  792. psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
  793. byte_count += usg->sg[i].count;
  794. psg->sg[i].count = cpu_to_le32(sg_count[i]);
  795. }
  796. } else {
  797. for (i = 0; i < upsg->count; i++) {
  798. dma_addr_t addr;
  799. void* p;
  800. sg_count[i] = upsg->sg[i].count;
  801. if (sg_count[i] >
  802. ((dev->adapter_info.options &
  803. AAC_OPT_NEW_COMM) ?
  804. (dev->scsi_host_ptr->max_sectors << 9) :
  805. 65536)) {
  806. rcode = -EINVAL;
  807. goto cleanup;
  808. }
  809. p = kmalloc(sg_count[i], GFP_KERNEL);
  810. if (!p) {
  811. dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
  812. sg_count[i], i, upsg->count));
  813. rcode = -ENOMEM;
  814. goto cleanup;
  815. }
  816. sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
  817. sg_list[i] = p; // save so we can clean up later
  818. sg_indx = i;
  819. if (flags & SRB_DataOut) {
  820. if (copy_from_user(p, sg_user[i],
  821. sg_count[i])) {
  822. dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
  823. rcode = -EFAULT;
  824. goto cleanup;
  825. }
  826. }
  827. addr = pci_map_single(dev->pdev, p,
  828. sg_count[i], data_dir);
  829. psg->sg[i].addr = cpu_to_le32(addr);
  830. byte_count += sg_count[i];
  831. psg->sg[i].count = cpu_to_le32(sg_count[i]);
  832. }
  833. }
  834. srbcmd->count = cpu_to_le32(byte_count);
  835. if (user_srbcmd->sg.count)
  836. psg->count = cpu_to_le32(sg_indx+1);
  837. else
  838. psg->count = 0;
  839. status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
  840. }
  841. if (status == -ERESTARTSYS) {
  842. rcode = -ERESTARTSYS;
  843. goto cleanup;
  844. }
  845. if (status != 0) {
  846. dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
  847. rcode = -ENXIO;
  848. goto cleanup;
  849. }
  850. if (flags & SRB_DataIn) {
  851. for(i = 0 ; i <= sg_indx; i++){
  852. if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) {
  853. dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
  854. rcode = -EFAULT;
  855. goto cleanup;
  856. }
  857. }
  858. }
  859. user_reply = arg + fibsize;
  860. if (is_native_device) {
  861. struct aac_hba_resp *err =
  862. &((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err;
  863. struct aac_srb_reply reply;
  864. memset(&reply, 0, sizeof(reply));
  865. reply.status = ST_OK;
  866. if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) {
  867. /* fast response */
  868. reply.srb_status = SRB_STATUS_SUCCESS;
  869. reply.scsi_status = 0;
  870. reply.data_xfer_length = byte_count;
  871. reply.sense_data_size = 0;
  872. memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE);
  873. } else {
  874. reply.srb_status = err->service_response;
  875. reply.scsi_status = err->status;
  876. reply.data_xfer_length = byte_count -
  877. le32_to_cpu(err->residual_count);
  878. reply.sense_data_size = err->sense_response_data_len;
  879. memcpy(reply.sense_data, err->sense_response_buf,
  880. AAC_SENSE_BUFFERSIZE);
  881. }
  882. if (copy_to_user(user_reply, &reply,
  883. sizeof(struct aac_srb_reply))) {
  884. dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
  885. rcode = -EFAULT;
  886. goto cleanup;
  887. }
  888. } else {
  889. struct aac_srb_reply *reply;
  890. reply = (struct aac_srb_reply *) fib_data(srbfib);
  891. if (copy_to_user(user_reply, reply,
  892. sizeof(struct aac_srb_reply))) {
  893. dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
  894. rcode = -EFAULT;
  895. goto cleanup;
  896. }
  897. }
  898. cleanup:
  899. kfree(user_srbcmd);
  900. if (rcode != -ERESTARTSYS) {
  901. for (i = 0; i <= sg_indx; i++)
  902. kfree(sg_list[i]);
  903. aac_fib_complete(srbfib);
  904. aac_fib_free(srbfib);
  905. }
  906. return rcode;
  907. }
  908. struct aac_pci_info {
  909. u32 bus;
  910. u32 slot;
  911. };
  912. static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
  913. {
  914. struct aac_pci_info pci_info;
  915. pci_info.bus = dev->pdev->bus->number;
  916. pci_info.slot = PCI_SLOT(dev->pdev->devfn);
  917. if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
  918. dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
  919. return -EFAULT;
  920. }
  921. return 0;
  922. }
  923. static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
  924. {
  925. struct aac_hba_info hbainfo;
  926. memset(&hbainfo, 0, sizeof(hbainfo));
  927. hbainfo.adapter_number = (u8) dev->id;
  928. hbainfo.system_io_bus_number = dev->pdev->bus->number;
  929. hbainfo.device_number = (dev->pdev->devfn >> 3);
  930. hbainfo.function_number = (dev->pdev->devfn & 0x0007);
  931. hbainfo.vendor_id = dev->pdev->vendor;
  932. hbainfo.device_id = dev->pdev->device;
  933. hbainfo.sub_vendor_id = dev->pdev->subsystem_vendor;
  934. hbainfo.sub_system_id = dev->pdev->subsystem_device;
  935. if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) {
  936. dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n"));
  937. return -EFAULT;
  938. }
  939. return 0;
  940. }
  941. struct aac_reset_iop {
  942. u8 reset_type;
  943. };
  944. static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
  945. {
  946. struct aac_reset_iop reset;
  947. int retval;
  948. if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
  949. return -EFAULT;
  950. dev->adapter_shutdown = 1;
  951. mutex_unlock(&dev->ioctl_mutex);
  952. retval = aac_reset_adapter(dev, 0, reset.reset_type);
  953. mutex_lock(&dev->ioctl_mutex);
  954. return retval;
  955. }
  956. int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
  957. {
  958. int status;
  959. mutex_lock(&dev->ioctl_mutex);
  960. if (dev->adapter_shutdown) {
  961. status = -EACCES;
  962. goto cleanup;
  963. }
  964. /*
  965. * HBA gets first crack
  966. */
  967. status = aac_dev_ioctl(dev, cmd, arg);
  968. if (status != -ENOTTY)
  969. goto cleanup;
  970. switch (cmd) {
  971. case FSACTL_MINIPORT_REV_CHECK:
  972. status = check_revision(dev, arg);
  973. break;
  974. case FSACTL_SEND_LARGE_FIB:
  975. case FSACTL_SENDFIB:
  976. status = ioctl_send_fib(dev, arg);
  977. break;
  978. case FSACTL_OPEN_GET_ADAPTER_FIB:
  979. status = open_getadapter_fib(dev, arg);
  980. break;
  981. case FSACTL_GET_NEXT_ADAPTER_FIB:
  982. status = next_getadapter_fib(dev, arg);
  983. break;
  984. case FSACTL_CLOSE_GET_ADAPTER_FIB:
  985. status = close_getadapter_fib(dev, arg);
  986. break;
  987. case FSACTL_SEND_RAW_SRB:
  988. status = aac_send_raw_srb(dev,arg);
  989. break;
  990. case FSACTL_GET_PCI_INFO:
  991. status = aac_get_pci_info(dev,arg);
  992. break;
  993. case FSACTL_GET_HBA_INFO:
  994. status = aac_get_hba_info(dev, arg);
  995. break;
  996. case FSACTL_RESET_IOP:
  997. status = aac_send_reset_adapter(dev, arg);
  998. break;
  999. default:
  1000. status = -ENOTTY;
  1001. break;
  1002. }
  1003. cleanup:
  1004. mutex_unlock(&dev->ioctl_mutex);
  1005. return status;
  1006. }