linit.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143
  1. /*
  2. * Adaptec AAC series RAID controller driver
  3. * (c) Copyright 2001 Red Hat Inc.
  4. *
  5. * based on the old aacraid driver that is..
  6. * Adaptec aacraid device driver for Linux.
  7. *
  8. * Copyright (c) 2000-2010 Adaptec, Inc.
  9. * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
  10. * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; see the file COPYING. If not, write to
  24. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25. *
  26. * Module Name:
  27. * linit.c
  28. *
  29. * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
  30. */
  31. #include <linux/compat.h>
  32. #include <linux/blkdev.h>
  33. #include <linux/completion.h>
  34. #include <linux/init.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/moduleparam.h>
  39. #include <linux/pci.h>
  40. #include <linux/aer.h>
  41. #include <linux/pci-aspm.h>
  42. #include <linux/slab.h>
  43. #include <linux/mutex.h>
  44. #include <linux/spinlock.h>
  45. #include <linux/syscalls.h>
  46. #include <linux/delay.h>
  47. #include <linux/kthread.h>
  48. #include <scsi/scsi.h>
  49. #include <scsi/scsi_cmnd.h>
  50. #include <scsi/scsi_device.h>
  51. #include <scsi/scsi_host.h>
  52. #include <scsi/scsi_tcq.h>
  53. #include <scsi/scsicam.h>
  54. #include <scsi/scsi_eh.h>
  55. #include "aacraid.h"
  56. #define AAC_DRIVER_VERSION "1.2.1"
  57. #ifndef AAC_DRIVER_BRANCH
  58. #define AAC_DRIVER_BRANCH ""
  59. #endif
  60. #define AAC_DRIVERNAME "aacraid"
  61. #ifdef AAC_DRIVER_BUILD
  62. #define _str(x) #x
  63. #define str(x) _str(x)
  64. #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
  65. #else
  66. #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH
  67. #endif
  68. MODULE_AUTHOR("Red Hat Inc and Adaptec");
  69. MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
  70. "Adaptec Advanced Raid Products, "
  71. "HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
  72. MODULE_LICENSE("GPL");
  73. MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
  74. static DEFINE_MUTEX(aac_mutex);
  75. static LIST_HEAD(aac_devices);
  76. static int aac_cfg_major = AAC_CHARDEV_UNREGISTERED;
  77. char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
  78. /*
  79. * Because of the way Linux names scsi devices, the order in this table has
  80. * become important. Check for on-board Raid first, add-in cards second.
  81. *
  82. * Note: The last field is used to index into aac_drivers below.
  83. */
  84. static const struct pci_device_id aac_pci_tbl[] = {
  85. { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
  86. { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
  87. { 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
  88. { 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
  89. { 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */
  90. { 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */
  91. { 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
  92. { 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */
  93. { 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */
  94. { 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */
  95. { 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */
  96. { 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */
  97. { 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */
  98. { 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */
  99. { 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */
  100. { 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */
  101. { 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */
  102. { 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */
  103. { 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
  104. { 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
  105. { 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
  106. { 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */
  107. { 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
  108. { 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
  109. { 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
  110. { 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024RO (Lancer) */
  111. { 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014RO (Lancer) */
  112. { 0x9005, 0x0286, 0x9005, 0x02a0, 0, 0, 27 }, /* ICP9047MA (Lancer) */
  113. { 0x9005, 0x0286, 0x9005, 0x02a1, 0, 0, 28 }, /* ICP9087MA (Lancer) */
  114. { 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5445AU (Hurricane44) */
  115. { 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
  116. { 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
  117. { 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
  118. { 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
  119. { 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
  120. { 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
  121. { 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
  122. { 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
  123. { 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
  124. { 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
  125. { 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
  126. { 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
  127. { 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
  128. { 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
  129. { 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
  130. { 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005 */
  131. { 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
  132. { 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
  133. { 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
  134. { 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
  135. { 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000 (BlackBird) */
  136. { 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
  137. { 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
  138. { 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-3800 (Hurricane44) */
  139. { 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
  140. { 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
  141. { 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
  142. { 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
  143. { 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
  144. { 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
  145. { 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
  146. { 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
  147. { 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
  148. { 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
  149. { 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
  150. { 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
  151. { 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
  152. { 0,}
  153. };
  154. MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
  155. /*
  156. * dmb - For now we add the number of channels to this structure.
  157. * In the future we should add a fib that reports the number of channels
  158. * for the card. At that time we can remove the channels from here
  159. */
  160. static struct aac_driver_ident aac_drivers[] = {
  161. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
  162. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
  163. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
  164. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
  165. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
  166. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
  167. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
  168. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
  169. { aac_rx_init, "percraid", "DELL ", "PERCRAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
  170. { aac_rx_init, "aacraid", "ADAPTEC ", "catapult ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
  171. { aac_rx_init, "aacraid", "ADAPTEC ", "tomcat ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
  172. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2120S ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2120S (Crusader) */
  173. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Adaptec 2200S (Vulcan) */
  174. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 2200S ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
  175. { aac_rx_init, "aacraid", "Legend ", "Legend S220 ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
  176. { aac_rx_init, "aacraid", "Legend ", "Legend S230 ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
  177. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3230S ", 2 }, /* Adaptec 3230S (Harrier) */
  178. { aac_rx_init, "aacraid", "ADAPTEC ", "Adaptec 3240S ", 2 }, /* Adaptec 3240S (Tornado) */
  179. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020ZCR ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
  180. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025ZCR ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
  181. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
  182. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */
  183. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2820SA ", 1 }, /* AAR-2820SA (Intruder) */
  184. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2620SA ", 1 }, /* AAR-2620SA (Intruder) */
  185. { aac_rkt_init, "aacraid", "ADAPTEC ", "AAR-2420SA ", 1 }, /* AAR-2420SA (Intruder) */
  186. { aac_rkt_init, "aacraid", "ICP ", "ICP9024RO ", 2 }, /* ICP9024RO (Lancer) */
  187. { aac_rkt_init, "aacraid", "ICP ", "ICP9014RO ", 1 }, /* ICP9014RO (Lancer) */
  188. { aac_rkt_init, "aacraid", "ICP ", "ICP9047MA ", 1 }, /* ICP9047MA (Lancer) */
  189. { aac_rkt_init, "aacraid", "ICP ", "ICP9087MA ", 1 }, /* ICP9087MA (Lancer) */
  190. { aac_rkt_init, "aacraid", "ICP ", "ICP5445AU ", 1 }, /* ICP5445AU (Hurricane44) */
  191. { aac_rx_init, "aacraid", "ICP ", "ICP9085LI ", 1 }, /* ICP9085LI (Marauder-X) */
  192. { aac_rx_init, "aacraid", "ICP ", "ICP5085BR ", 1 }, /* ICP5085BR (Marauder-E) */
  193. { aac_rkt_init, "aacraid", "ICP ", "ICP9067MA ", 1 }, /* ICP9067MA (Intruder-6) */
  194. { NULL , "aacraid", "ADAPTEC ", "Themisto ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
  195. { aac_rkt_init, "aacraid", "ADAPTEC ", "Callisto ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
  196. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2020SA ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
  197. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2025SA ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
  198. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2410SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
  199. { aac_rx_init, "aacraid", "DELL ", "CERC SR2 ", 1, AAC_QUIRK_17SG }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
  200. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2810SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
  201. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-21610SA SATA", 1, AAC_QUIRK_17SG }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
  202. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2026ZCR ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
  203. { aac_rx_init, "aacraid", "ADAPTEC ", "AAR-2610SA ", 1 }, /* SATA 6Ch (Bearcat) */
  204. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-2240S ", 1 }, /* ASR-2240S (SabreExpress) */
  205. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4005 ", 1 }, /* ASR-4005 */
  206. { aac_rx_init, "ServeRAID","IBM ", "ServeRAID 8i ", 1 }, /* IBM 8i (AvonPark) */
  207. { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
  208. { aac_rkt_init, "ServeRAID","IBM ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
  209. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4000 ", 1 }, /* ASR-4000 (BlackBird & AvonPark) */
  210. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4800SAS ", 1 }, /* ASR-4800SAS (Marauder-X) */
  211. { aac_rx_init, "aacraid", "ADAPTEC ", "ASR-4805SAS ", 1 }, /* ASR-4805SAS (Marauder-E) */
  212. { aac_rkt_init, "aacraid", "ADAPTEC ", "ASR-3800 ", 1 }, /* ASR-3800 (Hurricane44) */
  213. { aac_rx_init, "percraid", "DELL ", "PERC 320/DC ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
  214. { aac_sa_init, "aacraid", "ADAPTEC ", "Adaptec 5400S ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
  215. { aac_sa_init, "aacraid", "ADAPTEC ", "AAC-364 ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
  216. { aac_sa_init, "percraid", "DELL ", "PERCRAID ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
  217. { aac_sa_init, "hpnraid", "HP ", "NetRAID ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
  218. { aac_rx_init, "aacraid", "DELL ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
  219. { aac_rx_init, "aacraid", "Legend ", "RAID ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
  220. { aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
  221. { aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
  222. { aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
  223. { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
  224. { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
  225. { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
  226. };
  227. /**
  228. * aac_queuecommand - queue a SCSI command
  229. * @cmd: SCSI command to queue
  230. * @done: Function to call on command completion
  231. *
  232. * Queues a command for execution by the associated Host Adapter.
  233. *
  234. * TODO: unify with aac_scsi_cmd().
  235. */
  236. static int aac_queuecommand(struct Scsi_Host *shost,
  237. struct scsi_cmnd *cmd)
  238. {
  239. int r = 0;
  240. cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
  241. r = (aac_scsi_cmd(cmd) ? FAILED : 0);
  242. return r;
  243. }
  244. /**
  245. * aac_info - Returns the host adapter name
  246. * @shost: Scsi host to report on
  247. *
  248. * Returns a static string describing the device in question
  249. */
  250. static const char *aac_info(struct Scsi_Host *shost)
  251. {
  252. struct aac_dev *dev = (struct aac_dev *)shost->hostdata;
  253. return aac_drivers[dev->cardtype].name;
  254. }
  255. /**
  256. * aac_get_driver_ident
  257. * @devtype: index into lookup table
  258. *
  259. * Returns a pointer to the entry in the driver lookup table.
  260. */
  261. struct aac_driver_ident* aac_get_driver_ident(int devtype)
  262. {
  263. return &aac_drivers[devtype];
  264. }
  265. /**
  266. * aac_biosparm - return BIOS parameters for disk
  267. * @sdev: The scsi device corresponding to the disk
  268. * @bdev: the block device corresponding to the disk
  269. * @capacity: the sector capacity of the disk
  270. * @geom: geometry block to fill in
  271. *
  272. * Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
  273. * The default disk geometry is 64 heads, 32 sectors, and the appropriate
  274. * number of cylinders so as not to exceed drive capacity. In order for
  275. * disks equal to or larger than 1 GB to be addressable by the BIOS
  276. * without exceeding the BIOS limitation of 1024 cylinders, Extended
  277. * Translation should be enabled. With Extended Translation enabled,
  278. * drives between 1 GB inclusive and 2 GB exclusive are given a disk
  279. * geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
  280. * are given a disk geometry of 255 heads and 63 sectors. However, if
  281. * the BIOS detects that the Extended Translation setting does not match
  282. * the geometry in the partition table, then the translation inferred
  283. * from the partition table will be used by the BIOS, and a warning may
  284. * be displayed.
  285. */
  286. static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
  287. sector_t capacity, int *geom)
  288. {
  289. struct diskparm *param = (struct diskparm *)geom;
  290. unsigned char *buf;
  291. dprintk((KERN_DEBUG "aac_biosparm.\n"));
  292. /*
  293. * Assuming extended translation is enabled - #REVISIT#
  294. */
  295. if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */
  296. if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */
  297. param->heads = 255;
  298. param->sectors = 63;
  299. } else {
  300. param->heads = 128;
  301. param->sectors = 32;
  302. }
  303. } else {
  304. param->heads = 64;
  305. param->sectors = 32;
  306. }
  307. param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
  308. /*
  309. * Read the first 1024 bytes from the disk device, if the boot
  310. * sector partition table is valid, search for a partition table
  311. * entry whose end_head matches one of the standard geometry
  312. * translations ( 64/32, 128/32, 255/63 ).
  313. */
  314. buf = scsi_bios_ptable(bdev);
  315. if (!buf)
  316. return 0;
  317. if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
  318. struct partition *first = (struct partition * )buf;
  319. struct partition *entry = first;
  320. int saved_cylinders = param->cylinders;
  321. int num;
  322. unsigned char end_head, end_sec;
  323. for(num = 0; num < 4; num++) {
  324. end_head = entry->end_head;
  325. end_sec = entry->end_sector & 0x3f;
  326. if(end_head == 63) {
  327. param->heads = 64;
  328. param->sectors = 32;
  329. break;
  330. } else if(end_head == 127) {
  331. param->heads = 128;
  332. param->sectors = 32;
  333. break;
  334. } else if(end_head == 254) {
  335. param->heads = 255;
  336. param->sectors = 63;
  337. break;
  338. }
  339. entry++;
  340. }
  341. if (num == 4) {
  342. end_head = first->end_head;
  343. end_sec = first->end_sector & 0x3f;
  344. }
  345. param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
  346. if (num < 4 && end_sec == param->sectors) {
  347. if (param->cylinders != saved_cylinders)
  348. dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
  349. param->heads, param->sectors, num));
  350. } else if (end_head > 0 || end_sec > 0) {
  351. dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
  352. end_head + 1, end_sec, num));
  353. dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
  354. param->heads, param->sectors));
  355. }
  356. }
  357. kfree(buf);
  358. return 0;
  359. }
  360. /**
  361. * aac_slave_configure - compute queue depths
  362. * @sdev: SCSI device we are considering
  363. *
  364. * Selects queue depths for each target device based on the host adapter's
  365. * total capacity and the queue depth supported by the target device.
  366. * A queue depth of one automatically disables tagged queueing.
  367. */
  368. static int aac_slave_configure(struct scsi_device *sdev)
  369. {
  370. struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
  371. int chn, tid;
  372. unsigned int depth = 0;
  373. unsigned int set_timeout = 0;
  374. bool set_qd_dev_type = false;
  375. u8 devtype = 0;
  376. chn = aac_logical_to_phys(sdev_channel(sdev));
  377. tid = sdev_id(sdev);
  378. if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
  379. devtype = aac->hba_map[chn][tid].devtype;
  380. if (devtype == AAC_DEVTYPE_NATIVE_RAW) {
  381. depth = aac->hba_map[chn][tid].qd_limit;
  382. set_timeout = 1;
  383. goto common_config;
  384. }
  385. if (devtype == AAC_DEVTYPE_ARC_RAW) {
  386. set_qd_dev_type = true;
  387. set_timeout = 1;
  388. goto common_config;
  389. }
  390. }
  391. if (aac->jbod && (sdev->type == TYPE_DISK))
  392. sdev->removable = 1;
  393. if (sdev->type == TYPE_DISK
  394. && sdev_channel(sdev) != CONTAINER_CHANNEL
  395. && (!aac->jbod || sdev->inq_periph_qual)
  396. && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
  397. if (expose_physicals == 0)
  398. return -ENXIO;
  399. if (expose_physicals < 0)
  400. sdev->no_uld_attach = 1;
  401. }
  402. if (sdev->tagged_supported
  403. && sdev->type == TYPE_DISK
  404. && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
  405. && !sdev->no_uld_attach) {
  406. struct scsi_device * dev;
  407. struct Scsi_Host *host = sdev->host;
  408. unsigned num_lsu = 0;
  409. unsigned num_one = 0;
  410. unsigned cid;
  411. set_timeout = 1;
  412. for (cid = 0; cid < aac->maximum_num_containers; ++cid)
  413. if (aac->fsa_dev[cid].valid)
  414. ++num_lsu;
  415. __shost_for_each_device(dev, host) {
  416. if (dev->tagged_supported
  417. && dev->type == TYPE_DISK
  418. && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
  419. && !dev->no_uld_attach) {
  420. if ((sdev_channel(dev) != CONTAINER_CHANNEL)
  421. || !aac->fsa_dev[sdev_id(dev)].valid) {
  422. ++num_lsu;
  423. }
  424. } else {
  425. ++num_one;
  426. }
  427. }
  428. if (num_lsu == 0)
  429. ++num_lsu;
  430. depth = (host->can_queue - num_one) / num_lsu;
  431. if (sdev_channel(sdev) != NATIVE_CHANNEL)
  432. goto common_config;
  433. set_qd_dev_type = true;
  434. }
  435. common_config:
  436. /*
  437. * Check if SATA drive
  438. */
  439. if (set_qd_dev_type) {
  440. if (strncmp(sdev->vendor, "ATA", 3) == 0)
  441. depth = 32;
  442. else
  443. depth = 64;
  444. }
  445. /*
  446. * Firmware has an individual device recovery time typically
  447. * of 35 seconds, give us a margin.
  448. */
  449. if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ))
  450. blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
  451. if (depth > 256)
  452. depth = 256;
  453. else if (depth < 1)
  454. depth = 1;
  455. scsi_change_queue_depth(sdev, depth);
  456. sdev->tagged_supported = 1;
  457. return 0;
  458. }
  459. /**
  460. * aac_change_queue_depth - alter queue depths
  461. * @sdev: SCSI device we are considering
  462. * @depth: desired queue depth
  463. *
  464. * Alters queue depths for target device based on the host adapter's
  465. * total capacity and the queue depth supported by the target device.
  466. */
  467. static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
  468. {
  469. struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
  470. int chn, tid, is_native_device = 0;
  471. chn = aac_logical_to_phys(sdev_channel(sdev));
  472. tid = sdev_id(sdev);
  473. if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
  474. aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW)
  475. is_native_device = 1;
  476. if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
  477. (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
  478. struct scsi_device * dev;
  479. struct Scsi_Host *host = sdev->host;
  480. unsigned num = 0;
  481. __shost_for_each_device(dev, host) {
  482. if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
  483. (sdev_channel(dev) == CONTAINER_CHANNEL))
  484. ++num;
  485. ++num;
  486. }
  487. if (num >= host->can_queue)
  488. num = host->can_queue - 1;
  489. if (depth > (host->can_queue - num))
  490. depth = host->can_queue - num;
  491. if (depth > 256)
  492. depth = 256;
  493. else if (depth < 2)
  494. depth = 2;
  495. return scsi_change_queue_depth(sdev, depth);
  496. } else if (is_native_device) {
  497. scsi_change_queue_depth(sdev, aac->hba_map[chn][tid].qd_limit);
  498. } else {
  499. scsi_change_queue_depth(sdev, 1);
  500. }
  501. return sdev->queue_depth;
  502. }
  503. static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
  504. {
  505. struct scsi_device *sdev = to_scsi_device(dev);
  506. struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
  507. if (sdev_channel(sdev) != CONTAINER_CHANNEL)
  508. return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
  509. ? "Hidden\n" :
  510. ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : ""));
  511. return snprintf(buf, PAGE_SIZE, "%s\n",
  512. get_container_type(aac->fsa_dev[sdev_id(sdev)].type));
  513. }
  514. static struct device_attribute aac_raid_level_attr = {
  515. .attr = {
  516. .name = "level",
  517. .mode = S_IRUGO,
  518. },
  519. .show = aac_show_raid_level
  520. };
  521. static ssize_t aac_show_unique_id(struct device *dev,
  522. struct device_attribute *attr, char *buf)
  523. {
  524. struct scsi_device *sdev = to_scsi_device(dev);
  525. struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
  526. unsigned char sn[16];
  527. memset(sn, 0, sizeof(sn));
  528. if (sdev_channel(sdev) == CONTAINER_CHANNEL)
  529. memcpy(sn, aac->fsa_dev[sdev_id(sdev)].identifier, sizeof(sn));
  530. return snprintf(buf, 16 * 2 + 2,
  531. "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
  532. sn[0], sn[1], sn[2], sn[3],
  533. sn[4], sn[5], sn[6], sn[7],
  534. sn[8], sn[9], sn[10], sn[11],
  535. sn[12], sn[13], sn[14], sn[15]);
  536. }
  537. static struct device_attribute aac_unique_id_attr = {
  538. .attr = {
  539. .name = "unique_id",
  540. .mode = 0444,
  541. },
  542. .show = aac_show_unique_id
  543. };
  544. static struct device_attribute *aac_dev_attrs[] = {
  545. &aac_raid_level_attr,
  546. &aac_unique_id_attr,
  547. NULL,
  548. };
  549. static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
  550. {
  551. struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
  552. if (!capable(CAP_SYS_RAWIO))
  553. return -EPERM;
  554. return aac_do_ioctl(dev, cmd, arg);
  555. }
  556. static int get_num_of_incomplete_fibs(struct aac_dev *aac)
  557. {
  558. unsigned long flags;
  559. struct scsi_device *sdev = NULL;
  560. struct Scsi_Host *shost = aac->scsi_host_ptr;
  561. struct scsi_cmnd *scmnd = NULL;
  562. struct device *ctrl_dev;
  563. int mlcnt = 0;
  564. int llcnt = 0;
  565. int ehcnt = 0;
  566. int fwcnt = 0;
  567. int krlcnt = 0;
  568. __shost_for_each_device(sdev, shost) {
  569. spin_lock_irqsave(&sdev->list_lock, flags);
  570. list_for_each_entry(scmnd, &sdev->cmd_list, list) {
  571. switch (scmnd->SCp.phase) {
  572. case AAC_OWNER_FIRMWARE:
  573. fwcnt++;
  574. break;
  575. case AAC_OWNER_ERROR_HANDLER:
  576. ehcnt++;
  577. break;
  578. case AAC_OWNER_LOWLEVEL:
  579. llcnt++;
  580. break;
  581. case AAC_OWNER_MIDLEVEL:
  582. mlcnt++;
  583. break;
  584. default:
  585. krlcnt++;
  586. break;
  587. }
  588. }
  589. spin_unlock_irqrestore(&sdev->list_lock, flags);
  590. }
  591. ctrl_dev = &aac->pdev->dev;
  592. dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", mlcnt);
  593. dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", llcnt);
  594. dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", ehcnt);
  595. dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fwcnt);
  596. dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", krlcnt);
  597. return mlcnt + llcnt + ehcnt + fwcnt;
  598. }
  599. static int aac_eh_abort(struct scsi_cmnd* cmd)
  600. {
  601. struct scsi_device * dev = cmd->device;
  602. struct Scsi_Host * host = dev->host;
  603. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  604. int count, found;
  605. u32 bus, cid;
  606. int ret = FAILED;
  607. if (aac_adapter_check_health(aac))
  608. return ret;
  609. bus = aac_logical_to_phys(scmd_channel(cmd));
  610. cid = scmd_id(cmd);
  611. if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
  612. struct fib *fib;
  613. struct aac_hba_tm_req *tmf;
  614. int status;
  615. u64 address;
  616. pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
  617. AAC_DRIVERNAME,
  618. host->host_no, sdev_channel(dev), sdev_id(dev), (int)dev->lun);
  619. found = 0;
  620. for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
  621. fib = &aac->fibs[count];
  622. if (*(u8 *)fib->hw_fib_va != 0 &&
  623. (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
  624. (fib->callback_data == cmd)) {
  625. found = 1;
  626. break;
  627. }
  628. }
  629. if (!found)
  630. return ret;
  631. /* start a HBA_TMF_ABORT_TASK TMF request */
  632. fib = aac_fib_alloc(aac);
  633. if (!fib)
  634. return ret;
  635. tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
  636. memset(tmf, 0, sizeof(*tmf));
  637. tmf->tmf = HBA_TMF_ABORT_TASK;
  638. tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
  639. tmf->lun[1] = cmd->device->lun;
  640. address = (u64)fib->hw_error_pa;
  641. tmf->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
  642. tmf->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
  643. tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
  644. fib->hbacmd_size = sizeof(*tmf);
  645. cmd->SCp.sent_command = 0;
  646. status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
  647. (fib_callback) aac_hba_callback,
  648. (void *) cmd);
  649. /* Wait up to 15 secs for completion */
  650. for (count = 0; count < 15; ++count) {
  651. if (cmd->SCp.sent_command) {
  652. ret = SUCCESS;
  653. break;
  654. }
  655. msleep(1000);
  656. }
  657. if (ret != SUCCESS)
  658. pr_err("%s: Host adapter abort request timed out\n",
  659. AAC_DRIVERNAME);
  660. } else {
  661. pr_err(
  662. "%s: Host adapter abort request.\n"
  663. "%s: Outstanding commands on (%d,%d,%d,%d):\n",
  664. AAC_DRIVERNAME, AAC_DRIVERNAME,
  665. host->host_no, sdev_channel(dev), sdev_id(dev),
  666. (int)dev->lun);
  667. switch (cmd->cmnd[0]) {
  668. case SERVICE_ACTION_IN_16:
  669. if (!(aac->raw_io_interface) ||
  670. !(aac->raw_io_64) ||
  671. ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
  672. break;
  673. case INQUIRY:
  674. case READ_CAPACITY:
  675. /*
  676. * Mark associated FIB to not complete,
  677. * eh handler does this
  678. */
  679. for (count = 0;
  680. count < (host->can_queue + AAC_NUM_MGT_FIB);
  681. ++count) {
  682. struct fib *fib = &aac->fibs[count];
  683. if (fib->hw_fib_va->header.XferState &&
  684. (fib->flags & FIB_CONTEXT_FLAG) &&
  685. (fib->callback_data == cmd)) {
  686. fib->flags |=
  687. FIB_CONTEXT_FLAG_TIMED_OUT;
  688. cmd->SCp.phase =
  689. AAC_OWNER_ERROR_HANDLER;
  690. ret = SUCCESS;
  691. }
  692. }
  693. break;
  694. case TEST_UNIT_READY:
  695. /*
  696. * Mark associated FIB to not complete,
  697. * eh handler does this
  698. */
  699. for (count = 0;
  700. count < (host->can_queue + AAC_NUM_MGT_FIB);
  701. ++count) {
  702. struct scsi_cmnd *command;
  703. struct fib *fib = &aac->fibs[count];
  704. command = fib->callback_data;
  705. if ((fib->hw_fib_va->header.XferState &
  706. cpu_to_le32
  707. (Async | NoResponseExpected)) &&
  708. (fib->flags & FIB_CONTEXT_FLAG) &&
  709. ((command)) &&
  710. (command->device == cmd->device)) {
  711. fib->flags |=
  712. FIB_CONTEXT_FLAG_TIMED_OUT;
  713. command->SCp.phase =
  714. AAC_OWNER_ERROR_HANDLER;
  715. if (command == cmd)
  716. ret = SUCCESS;
  717. }
  718. }
  719. break;
  720. }
  721. }
  722. return ret;
  723. }
  724. static u8 aac_eh_tmf_lun_reset_fib(struct aac_hba_map_info *info,
  725. struct fib *fib, u64 tmf_lun)
  726. {
  727. struct aac_hba_tm_req *tmf;
  728. u64 address;
  729. /* start a HBA_TMF_LUN_RESET TMF request */
  730. tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
  731. memset(tmf, 0, sizeof(*tmf));
  732. tmf->tmf = HBA_TMF_LUN_RESET;
  733. tmf->it_nexus = info->rmw_nexus;
  734. int_to_scsilun(tmf_lun, (struct scsi_lun *)tmf->lun);
  735. address = (u64)fib->hw_error_pa;
  736. tmf->error_ptr_hi = cpu_to_le32
  737. ((u32)(address >> 32));
  738. tmf->error_ptr_lo = cpu_to_le32
  739. ((u32)(address & 0xffffffff));
  740. tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
  741. fib->hbacmd_size = sizeof(*tmf);
  742. return HBA_IU_TYPE_SCSI_TM_REQ;
  743. }
  744. static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
  745. struct fib *fib)
  746. {
  747. struct aac_hba_reset_req *rst;
  748. u64 address;
  749. /* already tried, start a hard reset now */
  750. rst = (struct aac_hba_reset_req *)fib->hw_fib_va;
  751. memset(rst, 0, sizeof(*rst));
  752. rst->it_nexus = info->rmw_nexus;
  753. address = (u64)fib->hw_error_pa;
  754. rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
  755. rst->error_ptr_lo = cpu_to_le32
  756. ((u32)(address & 0xffffffff));
  757. rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
  758. fib->hbacmd_size = sizeof(*rst);
  759. return HBA_IU_TYPE_SATA_REQ;
  760. }
  761. void aac_tmf_callback(void *context, struct fib *fibptr)
  762. {
  763. struct aac_hba_resp *err =
  764. &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
  765. struct aac_hba_map_info *info = context;
  766. int res;
  767. switch (err->service_response) {
  768. case HBA_RESP_SVCRES_TMF_REJECTED:
  769. res = -1;
  770. break;
  771. case HBA_RESP_SVCRES_TMF_LUN_INVALID:
  772. res = 0;
  773. break;
  774. case HBA_RESP_SVCRES_TMF_COMPLETE:
  775. case HBA_RESP_SVCRES_TMF_SUCCEEDED:
  776. res = 0;
  777. break;
  778. default:
  779. res = -2;
  780. break;
  781. }
  782. aac_fib_complete(fibptr);
  783. info->reset_state = res;
  784. }
  785. /*
  786. * aac_eh_dev_reset - Device reset command handling
  787. * @scsi_cmd: SCSI command block causing the reset
  788. *
  789. */
  790. static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
  791. {
  792. struct scsi_device * dev = cmd->device;
  793. struct Scsi_Host * host = dev->host;
  794. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  795. struct aac_hba_map_info *info;
  796. int count;
  797. u32 bus, cid;
  798. struct fib *fib;
  799. int ret = FAILED;
  800. int status;
  801. u8 command;
  802. bus = aac_logical_to_phys(scmd_channel(cmd));
  803. cid = scmd_id(cmd);
  804. if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
  805. return FAILED;
  806. info = &aac->hba_map[bus][cid];
  807. if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
  808. info->reset_state > 0)
  809. return FAILED;
  810. pr_err("%s: Host adapter reset request. SCSI hang ?\n",
  811. AAC_DRIVERNAME);
  812. fib = aac_fib_alloc(aac);
  813. if (!fib)
  814. return ret;
  815. /* start a HBA_TMF_LUN_RESET TMF request */
  816. command = aac_eh_tmf_lun_reset_fib(info, fib, dev->lun);
  817. info->reset_state = 1;
  818. status = aac_hba_send(command, fib,
  819. (fib_callback) aac_tmf_callback,
  820. (void *) info);
  821. /* Wait up to 15 seconds for completion */
  822. for (count = 0; count < 15; ++count) {
  823. if (info->reset_state == 0) {
  824. ret = info->reset_state == 0 ? SUCCESS : FAILED;
  825. break;
  826. }
  827. msleep(1000);
  828. }
  829. return ret;
  830. }
  831. /*
  832. * aac_eh_target_reset - Target reset command handling
  833. * @scsi_cmd: SCSI command block causing the reset
  834. *
  835. */
  836. static int aac_eh_target_reset(struct scsi_cmnd *cmd)
  837. {
  838. struct scsi_device * dev = cmd->device;
  839. struct Scsi_Host * host = dev->host;
  840. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  841. struct aac_hba_map_info *info;
  842. int count;
  843. u32 bus, cid;
  844. int ret = FAILED;
  845. struct fib *fib;
  846. int status;
  847. u8 command;
  848. bus = aac_logical_to_phys(scmd_channel(cmd));
  849. cid = scmd_id(cmd);
  850. if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
  851. return FAILED;
  852. info = &aac->hba_map[bus][cid];
  853. if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
  854. info->reset_state > 0)
  855. return FAILED;
  856. pr_err("%s: Host adapter reset request. SCSI hang ?\n",
  857. AAC_DRIVERNAME);
  858. fib = aac_fib_alloc(aac);
  859. if (!fib)
  860. return ret;
  861. /* already tried, start a hard reset now */
  862. command = aac_eh_tmf_hard_reset_fib(info, fib);
  863. info->reset_state = 2;
  864. status = aac_hba_send(command, fib,
  865. (fib_callback) aac_tmf_callback,
  866. (void *) info);
  867. /* Wait up to 15 seconds for completion */
  868. for (count = 0; count < 15; ++count) {
  869. if (info->reset_state <= 0) {
  870. ret = info->reset_state == 0 ? SUCCESS : FAILED;
  871. break;
  872. }
  873. msleep(1000);
  874. }
  875. return ret;
  876. }
  877. /*
  878. * aac_eh_bus_reset - Bus reset command handling
  879. * @scsi_cmd: SCSI command block causing the reset
  880. *
  881. */
  882. static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
  883. {
  884. struct scsi_device * dev = cmd->device;
  885. struct Scsi_Host * host = dev->host;
  886. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  887. int count;
  888. u32 cmd_bus;
  889. int status = 0;
  890. cmd_bus = aac_logical_to_phys(scmd_channel(cmd));
  891. /* Mark the assoc. FIB to not complete, eh handler does this */
  892. for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
  893. struct fib *fib = &aac->fibs[count];
  894. if (fib->hw_fib_va->header.XferState &&
  895. (fib->flags & FIB_CONTEXT_FLAG) &&
  896. (fib->flags & FIB_CONTEXT_FLAG_SCSI_CMD)) {
  897. struct aac_hba_map_info *info;
  898. u32 bus, cid;
  899. cmd = (struct scsi_cmnd *)fib->callback_data;
  900. bus = aac_logical_to_phys(scmd_channel(cmd));
  901. if (bus != cmd_bus)
  902. continue;
  903. cid = scmd_id(cmd);
  904. info = &aac->hba_map[bus][cid];
  905. if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
  906. info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
  907. fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
  908. cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
  909. }
  910. }
  911. }
  912. pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
  913. /*
  914. * Check the health of the controller
  915. */
  916. status = aac_adapter_check_health(aac);
  917. if (status)
  918. dev_err(&aac->pdev->dev, "Adapter health - %d\n", status);
  919. count = get_num_of_incomplete_fibs(aac);
  920. return (count == 0) ? SUCCESS : FAILED;
  921. }
  922. /*
  923. * aac_eh_host_reset - Host reset command handling
  924. * @scsi_cmd: SCSI command block causing the reset
  925. *
  926. */
  927. int aac_eh_host_reset(struct scsi_cmnd *cmd)
  928. {
  929. struct scsi_device * dev = cmd->device;
  930. struct Scsi_Host * host = dev->host;
  931. struct aac_dev * aac = (struct aac_dev *)host->hostdata;
  932. int ret = FAILED;
  933. __le32 supported_options2 = 0;
  934. bool is_mu_reset;
  935. bool is_ignore_reset;
  936. bool is_doorbell_reset;
  937. /*
  938. * Check if reset is supported by the firmware
  939. */
  940. supported_options2 = aac->supplement_adapter_info.supported_options2;
  941. is_mu_reset = supported_options2 & AAC_OPTION_MU_RESET;
  942. is_doorbell_reset = supported_options2 & AAC_OPTION_DOORBELL_RESET;
  943. is_ignore_reset = supported_options2 & AAC_OPTION_IGNORE_RESET;
  944. /*
  945. * This adapter needs a blind reset, only do so for
  946. * Adapters that support a register, instead of a commanded,
  947. * reset.
  948. */
  949. if ((is_mu_reset || is_doorbell_reset)
  950. && aac_check_reset
  951. && (aac_check_reset != -1 || !is_ignore_reset)) {
  952. /* Bypass wait for command quiesce */
  953. if (aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET) == 0)
  954. ret = SUCCESS;
  955. }
  956. /*
  957. * Reset EH state
  958. */
  959. if (ret == SUCCESS) {
  960. int bus, cid;
  961. struct aac_hba_map_info *info;
  962. for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
  963. for (cid = 0; cid < AAC_MAX_TARGETS; cid++) {
  964. info = &aac->hba_map[bus][cid];
  965. if (info->devtype == AAC_DEVTYPE_NATIVE_RAW)
  966. info->reset_state = 0;
  967. }
  968. }
  969. }
  970. return ret;
  971. }
  972. /**
  973. * aac_cfg_open - open a configuration file
  974. * @inode: inode being opened
  975. * @file: file handle attached
  976. *
  977. * Called when the configuration device is opened. Does the needed
  978. * set up on the handle and then returns
  979. *
  980. * Bugs: This needs extending to check a given adapter is present
  981. * so we can support hot plugging, and to ref count adapters.
  982. */
  983. static int aac_cfg_open(struct inode *inode, struct file *file)
  984. {
  985. struct aac_dev *aac;
  986. unsigned minor_number = iminor(inode);
  987. int err = -ENODEV;
  988. mutex_lock(&aac_mutex); /* BKL pushdown: nothing else protects this list */
  989. list_for_each_entry(aac, &aac_devices, entry) {
  990. if (aac->id == minor_number) {
  991. file->private_data = aac;
  992. err = 0;
  993. break;
  994. }
  995. }
  996. mutex_unlock(&aac_mutex);
  997. return err;
  998. }
  999. /**
  1000. * aac_cfg_ioctl - AAC configuration request
  1001. * @inode: inode of device
  1002. * @file: file handle
  1003. * @cmd: ioctl command code
  1004. * @arg: argument
  1005. *
  1006. * Handles a configuration ioctl. Currently this involves wrapping it
  1007. * up and feeding it into the nasty windowsalike glue layer.
  1008. *
  1009. * Bugs: Needs locking against parallel ioctls lower down
  1010. * Bugs: Needs to handle hot plugging
  1011. */
  1012. static long aac_cfg_ioctl(struct file *file,
  1013. unsigned int cmd, unsigned long arg)
  1014. {
  1015. struct aac_dev *aac = (struct aac_dev *)file->private_data;
  1016. if (!capable(CAP_SYS_RAWIO))
  1017. return -EPERM;
  1018. return aac_do_ioctl(aac, cmd, (void __user *)arg);
  1019. }
  1020. #ifdef CONFIG_COMPAT
  1021. static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
  1022. {
  1023. long ret;
  1024. switch (cmd) {
  1025. case FSACTL_MINIPORT_REV_CHECK:
  1026. case FSACTL_SENDFIB:
  1027. case FSACTL_OPEN_GET_ADAPTER_FIB:
  1028. case FSACTL_CLOSE_GET_ADAPTER_FIB:
  1029. case FSACTL_SEND_RAW_SRB:
  1030. case FSACTL_GET_PCI_INFO:
  1031. case FSACTL_QUERY_DISK:
  1032. case FSACTL_DELETE_DISK:
  1033. case FSACTL_FORCE_DELETE_DISK:
  1034. case FSACTL_GET_CONTAINERS:
  1035. case FSACTL_SEND_LARGE_FIB:
  1036. ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
  1037. break;
  1038. case FSACTL_GET_NEXT_ADAPTER_FIB: {
  1039. struct fib_ioctl __user *f;
  1040. f = compat_alloc_user_space(sizeof(*f));
  1041. ret = 0;
  1042. if (clear_user(f, sizeof(*f)))
  1043. ret = -EFAULT;
  1044. if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
  1045. ret = -EFAULT;
  1046. if (!ret)
  1047. ret = aac_do_ioctl(dev, cmd, f);
  1048. break;
  1049. }
  1050. default:
  1051. ret = -ENOIOCTLCMD;
  1052. break;
  1053. }
  1054. return ret;
  1055. }
  1056. static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
  1057. {
  1058. struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
  1059. if (!capable(CAP_SYS_RAWIO))
  1060. return -EPERM;
  1061. return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
  1062. }
  1063. static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
  1064. {
  1065. if (!capable(CAP_SYS_RAWIO))
  1066. return -EPERM;
  1067. return aac_compat_do_ioctl(file->private_data, cmd, arg);
  1068. }
  1069. #endif
  1070. static ssize_t aac_show_model(struct device *device,
  1071. struct device_attribute *attr, char *buf)
  1072. {
  1073. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1074. int len;
  1075. if (dev->supplement_adapter_info.adapter_type_text[0]) {
  1076. char *cp = dev->supplement_adapter_info.adapter_type_text;
  1077. while (*cp && *cp != ' ')
  1078. ++cp;
  1079. while (*cp == ' ')
  1080. ++cp;
  1081. len = snprintf(buf, PAGE_SIZE, "%s\n", cp);
  1082. } else
  1083. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1084. aac_drivers[dev->cardtype].model);
  1085. return len;
  1086. }
  1087. static ssize_t aac_show_vendor(struct device *device,
  1088. struct device_attribute *attr, char *buf)
  1089. {
  1090. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1091. struct aac_supplement_adapter_info *sup_adap_info;
  1092. int len;
  1093. sup_adap_info = &dev->supplement_adapter_info;
  1094. if (sup_adap_info->adapter_type_text[0]) {
  1095. char *cp = sup_adap_info->adapter_type_text;
  1096. while (*cp && *cp != ' ')
  1097. ++cp;
  1098. len = snprintf(buf, PAGE_SIZE, "%.*s\n",
  1099. (int)(cp - (char *)sup_adap_info->adapter_type_text),
  1100. sup_adap_info->adapter_type_text);
  1101. } else
  1102. len = snprintf(buf, PAGE_SIZE, "%s\n",
  1103. aac_drivers[dev->cardtype].vname);
  1104. return len;
  1105. }
  1106. static ssize_t aac_show_flags(struct device *cdev,
  1107. struct device_attribute *attr, char *buf)
  1108. {
  1109. int len = 0;
  1110. struct aac_dev *dev = (struct aac_dev*)class_to_shost(cdev)->hostdata;
  1111. if (nblank(dprintk(x)))
  1112. len = snprintf(buf, PAGE_SIZE, "dprintk\n");
  1113. #ifdef AAC_DETAILED_STATUS_INFO
  1114. len += snprintf(buf + len, PAGE_SIZE - len,
  1115. "AAC_DETAILED_STATUS_INFO\n");
  1116. #endif
  1117. if (dev->raw_io_interface && dev->raw_io_64)
  1118. len += snprintf(buf + len, PAGE_SIZE - len,
  1119. "SAI_READ_CAPACITY_16\n");
  1120. if (dev->jbod)
  1121. len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
  1122. if (dev->supplement_adapter_info.supported_options2 &
  1123. AAC_OPTION_POWER_MANAGEMENT)
  1124. len += snprintf(buf + len, PAGE_SIZE - len,
  1125. "SUPPORTED_POWER_MANAGEMENT\n");
  1126. if (dev->msi)
  1127. len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
  1128. return len;
  1129. }
  1130. static ssize_t aac_show_kernel_version(struct device *device,
  1131. struct device_attribute *attr,
  1132. char *buf)
  1133. {
  1134. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1135. int len, tmp;
  1136. tmp = le32_to_cpu(dev->adapter_info.kernelrev);
  1137. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  1138. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  1139. le32_to_cpu(dev->adapter_info.kernelbuild));
  1140. return len;
  1141. }
  1142. static ssize_t aac_show_monitor_version(struct device *device,
  1143. struct device_attribute *attr,
  1144. char *buf)
  1145. {
  1146. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1147. int len, tmp;
  1148. tmp = le32_to_cpu(dev->adapter_info.monitorrev);
  1149. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  1150. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  1151. le32_to_cpu(dev->adapter_info.monitorbuild));
  1152. return len;
  1153. }
  1154. static ssize_t aac_show_bios_version(struct device *device,
  1155. struct device_attribute *attr,
  1156. char *buf)
  1157. {
  1158. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1159. int len, tmp;
  1160. tmp = le32_to_cpu(dev->adapter_info.biosrev);
  1161. len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
  1162. tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
  1163. le32_to_cpu(dev->adapter_info.biosbuild));
  1164. return len;
  1165. }
  1166. static ssize_t aac_show_driver_version(struct device *device,
  1167. struct device_attribute *attr,
  1168. char *buf)
  1169. {
  1170. return snprintf(buf, PAGE_SIZE, "%s\n", aac_driver_version);
  1171. }
  1172. static ssize_t aac_show_serial_number(struct device *device,
  1173. struct device_attribute *attr, char *buf)
  1174. {
  1175. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1176. int len = 0;
  1177. if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
  1178. len = snprintf(buf, 16, "%06X\n",
  1179. le32_to_cpu(dev->adapter_info.serial[0]));
  1180. if (len &&
  1181. !memcmp(&dev->supplement_adapter_info.mfg_pcba_serial_no[
  1182. sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no)-len],
  1183. buf, len-1))
  1184. len = snprintf(buf, 16, "%.*s\n",
  1185. (int)sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no),
  1186. dev->supplement_adapter_info.mfg_pcba_serial_no);
  1187. return min(len, 16);
  1188. }
  1189. static ssize_t aac_show_max_channel(struct device *device,
  1190. struct device_attribute *attr, char *buf)
  1191. {
  1192. return snprintf(buf, PAGE_SIZE, "%d\n",
  1193. class_to_shost(device)->max_channel);
  1194. }
  1195. static ssize_t aac_show_max_id(struct device *device,
  1196. struct device_attribute *attr, char *buf)
  1197. {
  1198. return snprintf(buf, PAGE_SIZE, "%d\n",
  1199. class_to_shost(device)->max_id);
  1200. }
  1201. static ssize_t aac_store_reset_adapter(struct device *device,
  1202. struct device_attribute *attr,
  1203. const char *buf, size_t count)
  1204. {
  1205. int retval = -EACCES;
  1206. if (!capable(CAP_SYS_ADMIN))
  1207. return retval;
  1208. retval = aac_reset_adapter(shost_priv(class_to_shost(device)),
  1209. buf[0] == '!', IOP_HWSOFT_RESET);
  1210. if (retval >= 0)
  1211. retval = count;
  1212. return retval;
  1213. }
  1214. static ssize_t aac_show_reset_adapter(struct device *device,
  1215. struct device_attribute *attr,
  1216. char *buf)
  1217. {
  1218. struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
  1219. int len, tmp;
  1220. tmp = aac_adapter_check_health(dev);
  1221. if ((tmp == 0) && dev->in_reset)
  1222. tmp = -EBUSY;
  1223. len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp);
  1224. return len;
  1225. }
  1226. static struct device_attribute aac_model = {
  1227. .attr = {
  1228. .name = "model",
  1229. .mode = S_IRUGO,
  1230. },
  1231. .show = aac_show_model,
  1232. };
  1233. static struct device_attribute aac_vendor = {
  1234. .attr = {
  1235. .name = "vendor",
  1236. .mode = S_IRUGO,
  1237. },
  1238. .show = aac_show_vendor,
  1239. };
  1240. static struct device_attribute aac_flags = {
  1241. .attr = {
  1242. .name = "flags",
  1243. .mode = S_IRUGO,
  1244. },
  1245. .show = aac_show_flags,
  1246. };
  1247. static struct device_attribute aac_kernel_version = {
  1248. .attr = {
  1249. .name = "hba_kernel_version",
  1250. .mode = S_IRUGO,
  1251. },
  1252. .show = aac_show_kernel_version,
  1253. };
  1254. static struct device_attribute aac_monitor_version = {
  1255. .attr = {
  1256. .name = "hba_monitor_version",
  1257. .mode = S_IRUGO,
  1258. },
  1259. .show = aac_show_monitor_version,
  1260. };
  1261. static struct device_attribute aac_bios_version = {
  1262. .attr = {
  1263. .name = "hba_bios_version",
  1264. .mode = S_IRUGO,
  1265. },
  1266. .show = aac_show_bios_version,
  1267. };
  1268. static struct device_attribute aac_lld_version = {
  1269. .attr = {
  1270. .name = "driver_version",
  1271. .mode = 0444,
  1272. },
  1273. .show = aac_show_driver_version,
  1274. };
  1275. static struct device_attribute aac_serial_number = {
  1276. .attr = {
  1277. .name = "serial_number",
  1278. .mode = S_IRUGO,
  1279. },
  1280. .show = aac_show_serial_number,
  1281. };
  1282. static struct device_attribute aac_max_channel = {
  1283. .attr = {
  1284. .name = "max_channel",
  1285. .mode = S_IRUGO,
  1286. },
  1287. .show = aac_show_max_channel,
  1288. };
  1289. static struct device_attribute aac_max_id = {
  1290. .attr = {
  1291. .name = "max_id",
  1292. .mode = S_IRUGO,
  1293. },
  1294. .show = aac_show_max_id,
  1295. };
  1296. static struct device_attribute aac_reset = {
  1297. .attr = {
  1298. .name = "reset_host",
  1299. .mode = S_IWUSR|S_IRUGO,
  1300. },
  1301. .store = aac_store_reset_adapter,
  1302. .show = aac_show_reset_adapter,
  1303. };
  1304. static struct device_attribute *aac_attrs[] = {
  1305. &aac_model,
  1306. &aac_vendor,
  1307. &aac_flags,
  1308. &aac_kernel_version,
  1309. &aac_monitor_version,
  1310. &aac_bios_version,
  1311. &aac_lld_version,
  1312. &aac_serial_number,
  1313. &aac_max_channel,
  1314. &aac_max_id,
  1315. &aac_reset,
  1316. NULL
  1317. };
  1318. ssize_t aac_get_serial_number(struct device *device, char *buf)
  1319. {
  1320. return aac_show_serial_number(device, &aac_serial_number, buf);
  1321. }
  1322. static const struct file_operations aac_cfg_fops = {
  1323. .owner = THIS_MODULE,
  1324. .unlocked_ioctl = aac_cfg_ioctl,
  1325. #ifdef CONFIG_COMPAT
  1326. .compat_ioctl = aac_compat_cfg_ioctl,
  1327. #endif
  1328. .open = aac_cfg_open,
  1329. .llseek = noop_llseek,
  1330. };
  1331. static struct scsi_host_template aac_driver_template = {
  1332. .module = THIS_MODULE,
  1333. .name = "AAC",
  1334. .proc_name = AAC_DRIVERNAME,
  1335. .info = aac_info,
  1336. .ioctl = aac_ioctl,
  1337. #ifdef CONFIG_COMPAT
  1338. .compat_ioctl = aac_compat_ioctl,
  1339. #endif
  1340. .queuecommand = aac_queuecommand,
  1341. .bios_param = aac_biosparm,
  1342. .shost_attrs = aac_attrs,
  1343. .slave_configure = aac_slave_configure,
  1344. .change_queue_depth = aac_change_queue_depth,
  1345. .sdev_attrs = aac_dev_attrs,
  1346. .eh_abort_handler = aac_eh_abort,
  1347. .eh_device_reset_handler = aac_eh_dev_reset,
  1348. .eh_target_reset_handler = aac_eh_target_reset,
  1349. .eh_bus_reset_handler = aac_eh_bus_reset,
  1350. .eh_host_reset_handler = aac_eh_host_reset,
  1351. .can_queue = AAC_NUM_IO_FIB,
  1352. .this_id = MAXIMUM_NUM_CONTAINERS,
  1353. .sg_tablesize = 16,
  1354. .max_sectors = 128,
  1355. #if (AAC_NUM_IO_FIB > 256)
  1356. .cmd_per_lun = 256,
  1357. #else
  1358. .cmd_per_lun = AAC_NUM_IO_FIB,
  1359. #endif
  1360. .use_clustering = ENABLE_CLUSTERING,
  1361. .emulated = 1,
  1362. .no_write_same = 1,
  1363. };
  1364. static void __aac_shutdown(struct aac_dev * aac)
  1365. {
  1366. int i;
  1367. mutex_lock(&aac->ioctl_mutex);
  1368. aac->adapter_shutdown = 1;
  1369. mutex_unlock(&aac->ioctl_mutex);
  1370. if (aac->aif_thread) {
  1371. int i;
  1372. /* Clear out events first */
  1373. for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) {
  1374. struct fib *fib = &aac->fibs[i];
  1375. if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
  1376. (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
  1377. up(&fib->event_wait);
  1378. }
  1379. kthread_stop(aac->thread);
  1380. aac->thread = NULL;
  1381. }
  1382. aac_send_shutdown(aac);
  1383. aac_adapter_disable_int(aac);
  1384. if (aac_is_src(aac)) {
  1385. if (aac->max_msix > 1) {
  1386. for (i = 0; i < aac->max_msix; i++) {
  1387. free_irq(pci_irq_vector(aac->pdev, i),
  1388. &(aac->aac_msix[i]));
  1389. }
  1390. } else {
  1391. free_irq(aac->pdev->irq,
  1392. &(aac->aac_msix[0]));
  1393. }
  1394. } else {
  1395. free_irq(aac->pdev->irq, aac);
  1396. }
  1397. if (aac->msi)
  1398. pci_disable_msi(aac->pdev);
  1399. else if (aac->max_msix > 1)
  1400. pci_disable_msix(aac->pdev);
  1401. }
  1402. static void aac_init_char(void)
  1403. {
  1404. aac_cfg_major = register_chrdev(0, "aac", &aac_cfg_fops);
  1405. if (aac_cfg_major < 0) {
  1406. pr_err("aacraid: unable to register \"aac\" device.\n");
  1407. }
  1408. }
  1409. static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
  1410. {
  1411. unsigned index = id->driver_data;
  1412. struct Scsi_Host *shost;
  1413. struct aac_dev *aac;
  1414. struct list_head *insert = &aac_devices;
  1415. int error = -ENODEV;
  1416. int unique_id = 0;
  1417. u64 dmamask;
  1418. int mask_bits = 0;
  1419. extern int aac_sync_mode;
  1420. /*
  1421. * Only series 7 needs freset.
  1422. */
  1423. if (pdev->device == PMC_DEVICE_S7)
  1424. pdev->needs_freset = 1;
  1425. list_for_each_entry(aac, &aac_devices, entry) {
  1426. if (aac->id > unique_id)
  1427. break;
  1428. insert = &aac->entry;
  1429. unique_id++;
  1430. }
  1431. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
  1432. PCIE_LINK_STATE_CLKPM);
  1433. error = pci_enable_device(pdev);
  1434. if (error)
  1435. goto out;
  1436. error = -ENODEV;
  1437. if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
  1438. error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1439. if (error) {
  1440. dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed");
  1441. goto out_disable_pdev;
  1442. }
  1443. }
  1444. /*
  1445. * If the quirk31 bit is set, the adapter needs adapter
  1446. * to driver communication memory to be allocated below 2gig
  1447. */
  1448. if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) {
  1449. dmamask = DMA_BIT_MASK(31);
  1450. mask_bits = 31;
  1451. } else {
  1452. dmamask = DMA_BIT_MASK(32);
  1453. mask_bits = 32;
  1454. }
  1455. error = pci_set_consistent_dma_mask(pdev, dmamask);
  1456. if (error) {
  1457. dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n"
  1458. , mask_bits);
  1459. goto out_disable_pdev;
  1460. }
  1461. pci_set_master(pdev);
  1462. shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
  1463. if (!shost)
  1464. goto out_disable_pdev;
  1465. shost->irq = pdev->irq;
  1466. shost->unique_id = unique_id;
  1467. shost->max_cmd_len = 16;
  1468. shost->use_cmd_list = 1;
  1469. if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT)
  1470. aac_init_char();
  1471. aac = (struct aac_dev *)shost->hostdata;
  1472. aac->base_start = pci_resource_start(pdev, 0);
  1473. aac->scsi_host_ptr = shost;
  1474. aac->pdev = pdev;
  1475. aac->name = aac_driver_template.name;
  1476. aac->id = shost->unique_id;
  1477. aac->cardtype = index;
  1478. INIT_LIST_HEAD(&aac->entry);
  1479. if (aac_reset_devices || reset_devices)
  1480. aac->init_reset = true;
  1481. aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
  1482. sizeof(struct fib),
  1483. GFP_KERNEL);
  1484. if (!aac->fibs)
  1485. goto out_free_host;
  1486. spin_lock_init(&aac->fib_lock);
  1487. mutex_init(&aac->ioctl_mutex);
  1488. mutex_init(&aac->scan_mutex);
  1489. INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
  1490. /*
  1491. * Map in the registers from the adapter.
  1492. */
  1493. aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
  1494. if ((*aac_drivers[index].init)(aac)) {
  1495. error = -ENODEV;
  1496. goto out_unmap;
  1497. }
  1498. if (aac->sync_mode) {
  1499. if (aac_sync_mode)
  1500. printk(KERN_INFO "%s%d: Sync. mode enforced "
  1501. "by driver parameter. This will cause "
  1502. "a significant performance decrease!\n",
  1503. aac->name,
  1504. aac->id);
  1505. else
  1506. printk(KERN_INFO "%s%d: Async. mode not supported "
  1507. "by current driver, sync. mode enforced."
  1508. "\nPlease update driver to get full performance.\n",
  1509. aac->name,
  1510. aac->id);
  1511. }
  1512. /*
  1513. * Start any kernel threads needed
  1514. */
  1515. aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME);
  1516. if (IS_ERR(aac->thread)) {
  1517. printk(KERN_ERR "aacraid: Unable to create command thread.\n");
  1518. error = PTR_ERR(aac->thread);
  1519. aac->thread = NULL;
  1520. goto out_deinit;
  1521. }
  1522. aac->maximum_num_channels = aac_drivers[index].channels;
  1523. error = aac_get_adapter_info(aac);
  1524. if (error < 0)
  1525. goto out_deinit;
  1526. /*
  1527. * Lets override negotiations and drop the maximum SG limit to 34
  1528. */
  1529. if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
  1530. (shost->sg_tablesize > 34)) {
  1531. shost->sg_tablesize = 34;
  1532. shost->max_sectors = (shost->sg_tablesize * 8) + 112;
  1533. }
  1534. if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
  1535. (shost->sg_tablesize > 17)) {
  1536. shost->sg_tablesize = 17;
  1537. shost->max_sectors = (shost->sg_tablesize * 8) + 112;
  1538. }
  1539. error = pci_set_dma_max_seg_size(pdev,
  1540. (aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
  1541. (shost->max_sectors << 9) : 65536);
  1542. if (error)
  1543. goto out_deinit;
  1544. /*
  1545. * Firmware printf works only with older firmware.
  1546. */
  1547. if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
  1548. aac->printf_enabled = 1;
  1549. else
  1550. aac->printf_enabled = 0;
  1551. /*
  1552. * max channel will be the physical channels plus 1 virtual channel
  1553. * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
  1554. * physical channels are address by their actual physical number+1
  1555. */
  1556. if (aac->nondasd_support || expose_physicals || aac->jbod)
  1557. shost->max_channel = aac->maximum_num_channels;
  1558. else
  1559. shost->max_channel = 0;
  1560. aac_get_config_status(aac, 0);
  1561. aac_get_containers(aac);
  1562. list_add(&aac->entry, insert);
  1563. shost->max_id = aac->maximum_num_containers;
  1564. if (shost->max_id < aac->maximum_num_physicals)
  1565. shost->max_id = aac->maximum_num_physicals;
  1566. if (shost->max_id < MAXIMUM_NUM_CONTAINERS)
  1567. shost->max_id = MAXIMUM_NUM_CONTAINERS;
  1568. else
  1569. shost->this_id = shost->max_id;
  1570. if (!aac->sa_firmware && aac_drivers[index].quirks & AAC_QUIRK_SRC)
  1571. aac_intr_normal(aac, 0, 2, 0, NULL);
  1572. /*
  1573. * dmb - we may need to move the setting of these parms somewhere else once
  1574. * we get a fib that can report the actual numbers
  1575. */
  1576. shost->max_lun = AAC_MAX_LUN;
  1577. pci_set_drvdata(pdev, shost);
  1578. error = scsi_add_host(shost, &pdev->dev);
  1579. if (error)
  1580. goto out_deinit;
  1581. aac_scan_host(aac);
  1582. pci_enable_pcie_error_reporting(pdev);
  1583. pci_save_state(pdev);
  1584. return 0;
  1585. out_deinit:
  1586. __aac_shutdown(aac);
  1587. out_unmap:
  1588. aac_fib_map_free(aac);
  1589. if (aac->comm_addr)
  1590. dma_free_coherent(&aac->pdev->dev, aac->comm_size,
  1591. aac->comm_addr, aac->comm_phys);
  1592. kfree(aac->queues);
  1593. aac_adapter_ioremap(aac, 0);
  1594. kfree(aac->fibs);
  1595. kfree(aac->fsa_dev);
  1596. out_free_host:
  1597. scsi_host_put(shost);
  1598. out_disable_pdev:
  1599. pci_disable_device(pdev);
  1600. out:
  1601. return error;
  1602. }
  1603. static void aac_release_resources(struct aac_dev *aac)
  1604. {
  1605. aac_adapter_disable_int(aac);
  1606. aac_free_irq(aac);
  1607. }
  1608. static int aac_acquire_resources(struct aac_dev *dev)
  1609. {
  1610. unsigned long status;
  1611. /*
  1612. * First clear out all interrupts. Then enable the one's that we
  1613. * can handle.
  1614. */
  1615. while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)
  1616. || status == 0xffffffff)
  1617. msleep(20);
  1618. aac_adapter_disable_int(dev);
  1619. aac_adapter_enable_int(dev);
  1620. if (aac_is_src(dev))
  1621. aac_define_int_mode(dev);
  1622. if (dev->msi_enabled)
  1623. aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
  1624. if (aac_acquire_irq(dev))
  1625. goto error_iounmap;
  1626. aac_adapter_enable_int(dev);
  1627. /*max msix may change after EEH
  1628. * Re-assign vectors to fibs
  1629. */
  1630. aac_fib_vector_assign(dev);
  1631. if (!dev->sync_mode) {
  1632. /* After EEH recovery or suspend resume, max_msix count
  1633. * may change, therefore updating in init as well.
  1634. */
  1635. dev->init->r7.no_of_msix_vectors = cpu_to_le32(dev->max_msix);
  1636. aac_adapter_start(dev);
  1637. }
  1638. return 0;
  1639. error_iounmap:
  1640. return -1;
  1641. }
  1642. #if (defined(CONFIG_PM))
  1643. static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
  1644. {
  1645. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1646. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1647. scsi_block_requests(shost);
  1648. aac_cancel_safw_rescan_worker(aac);
  1649. aac_send_shutdown(aac);
  1650. aac_release_resources(aac);
  1651. pci_set_drvdata(pdev, shost);
  1652. pci_save_state(pdev);
  1653. pci_disable_device(pdev);
  1654. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  1655. return 0;
  1656. }
  1657. static int aac_resume(struct pci_dev *pdev)
  1658. {
  1659. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1660. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1661. int r;
  1662. pci_set_power_state(pdev, PCI_D0);
  1663. pci_enable_wake(pdev, PCI_D0, 0);
  1664. pci_restore_state(pdev);
  1665. r = pci_enable_device(pdev);
  1666. if (r)
  1667. goto fail_device;
  1668. pci_set_master(pdev);
  1669. if (aac_acquire_resources(aac))
  1670. goto fail_device;
  1671. /*
  1672. * reset this flag to unblock ioctl() as it was set at
  1673. * aac_send_shutdown() to block ioctls from upperlayer
  1674. */
  1675. aac->adapter_shutdown = 0;
  1676. scsi_unblock_requests(shost);
  1677. return 0;
  1678. fail_device:
  1679. printk(KERN_INFO "%s%d: resume failed.\n", aac->name, aac->id);
  1680. scsi_host_put(shost);
  1681. pci_disable_device(pdev);
  1682. return -ENODEV;
  1683. }
  1684. #endif
  1685. static void aac_shutdown(struct pci_dev *dev)
  1686. {
  1687. struct Scsi_Host *shost = pci_get_drvdata(dev);
  1688. scsi_block_requests(shost);
  1689. __aac_shutdown((struct aac_dev *)shost->hostdata);
  1690. }
  1691. static void aac_remove_one(struct pci_dev *pdev)
  1692. {
  1693. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1694. struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
  1695. aac_cancel_safw_rescan_worker(aac);
  1696. scsi_remove_host(shost);
  1697. __aac_shutdown(aac);
  1698. aac_fib_map_free(aac);
  1699. dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
  1700. aac->comm_phys);
  1701. kfree(aac->queues);
  1702. aac_adapter_ioremap(aac, 0);
  1703. kfree(aac->fibs);
  1704. kfree(aac->fsa_dev);
  1705. list_del(&aac->entry);
  1706. scsi_host_put(shost);
  1707. pci_disable_device(pdev);
  1708. if (list_empty(&aac_devices)) {
  1709. unregister_chrdev(aac_cfg_major, "aac");
  1710. aac_cfg_major = AAC_CHARDEV_NEEDS_REINIT;
  1711. }
  1712. }
  1713. static void aac_flush_ios(struct aac_dev *aac)
  1714. {
  1715. int i;
  1716. struct scsi_cmnd *cmd;
  1717. for (i = 0; i < aac->scsi_host_ptr->can_queue; i++) {
  1718. cmd = (struct scsi_cmnd *)aac->fibs[i].callback_data;
  1719. if (cmd && (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) {
  1720. scsi_dma_unmap(cmd);
  1721. if (aac->handle_pci_error)
  1722. cmd->result = DID_NO_CONNECT << 16;
  1723. else
  1724. cmd->result = DID_RESET << 16;
  1725. cmd->scsi_done(cmd);
  1726. }
  1727. }
  1728. }
  1729. static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
  1730. enum pci_channel_state error)
  1731. {
  1732. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1733. struct aac_dev *aac = shost_priv(shost);
  1734. dev_err(&pdev->dev, "aacraid: PCI error detected %x\n", error);
  1735. switch (error) {
  1736. case pci_channel_io_normal:
  1737. return PCI_ERS_RESULT_CAN_RECOVER;
  1738. case pci_channel_io_frozen:
  1739. aac->handle_pci_error = 1;
  1740. scsi_block_requests(aac->scsi_host_ptr);
  1741. aac_cancel_safw_rescan_worker(aac);
  1742. aac_flush_ios(aac);
  1743. aac_release_resources(aac);
  1744. pci_disable_pcie_error_reporting(pdev);
  1745. aac_adapter_ioremap(aac, 0);
  1746. return PCI_ERS_RESULT_NEED_RESET;
  1747. case pci_channel_io_perm_failure:
  1748. aac->handle_pci_error = 1;
  1749. aac_flush_ios(aac);
  1750. return PCI_ERS_RESULT_DISCONNECT;
  1751. }
  1752. return PCI_ERS_RESULT_NEED_RESET;
  1753. }
  1754. static pci_ers_result_t aac_pci_mmio_enabled(struct pci_dev *pdev)
  1755. {
  1756. dev_err(&pdev->dev, "aacraid: PCI error - mmio enabled\n");
  1757. return PCI_ERS_RESULT_NEED_RESET;
  1758. }
  1759. static pci_ers_result_t aac_pci_slot_reset(struct pci_dev *pdev)
  1760. {
  1761. dev_err(&pdev->dev, "aacraid: PCI error - slot reset\n");
  1762. pci_restore_state(pdev);
  1763. if (pci_enable_device(pdev)) {
  1764. dev_warn(&pdev->dev,
  1765. "aacraid: failed to enable slave\n");
  1766. goto fail_device;
  1767. }
  1768. pci_set_master(pdev);
  1769. if (pci_enable_device_mem(pdev)) {
  1770. dev_err(&pdev->dev, "pci_enable_device_mem failed\n");
  1771. goto fail_device;
  1772. }
  1773. return PCI_ERS_RESULT_RECOVERED;
  1774. fail_device:
  1775. dev_err(&pdev->dev, "aacraid: PCI error - slot reset failed\n");
  1776. return PCI_ERS_RESULT_DISCONNECT;
  1777. }
  1778. static void aac_pci_resume(struct pci_dev *pdev)
  1779. {
  1780. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  1781. struct scsi_device *sdev = NULL;
  1782. struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
  1783. pci_cleanup_aer_uncorrect_error_status(pdev);
  1784. if (aac_adapter_ioremap(aac, aac->base_size)) {
  1785. dev_err(&pdev->dev, "aacraid: ioremap failed\n");
  1786. /* remap failed, go back ... */
  1787. aac->comm_interface = AAC_COMM_PRODUCER;
  1788. if (aac_adapter_ioremap(aac, AAC_MIN_FOOTPRINT_SIZE)) {
  1789. dev_warn(&pdev->dev,
  1790. "aacraid: unable to map adapter.\n");
  1791. return;
  1792. }
  1793. }
  1794. msleep(10000);
  1795. aac_acquire_resources(aac);
  1796. /*
  1797. * reset this flag to unblock ioctl() as it was set
  1798. * at aac_send_shutdown() to block ioctls from upperlayer
  1799. */
  1800. aac->adapter_shutdown = 0;
  1801. aac->handle_pci_error = 0;
  1802. shost_for_each_device(sdev, shost)
  1803. if (sdev->sdev_state == SDEV_OFFLINE)
  1804. sdev->sdev_state = SDEV_RUNNING;
  1805. scsi_unblock_requests(aac->scsi_host_ptr);
  1806. aac_scan_host(aac);
  1807. pci_save_state(pdev);
  1808. dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
  1809. }
  1810. static struct pci_error_handlers aac_pci_err_handler = {
  1811. .error_detected = aac_pci_error_detected,
  1812. .mmio_enabled = aac_pci_mmio_enabled,
  1813. .slot_reset = aac_pci_slot_reset,
  1814. .resume = aac_pci_resume,
  1815. };
  1816. static struct pci_driver aac_pci_driver = {
  1817. .name = AAC_DRIVERNAME,
  1818. .id_table = aac_pci_tbl,
  1819. .probe = aac_probe_one,
  1820. .remove = aac_remove_one,
  1821. #if (defined(CONFIG_PM))
  1822. .suspend = aac_suspend,
  1823. .resume = aac_resume,
  1824. #endif
  1825. .shutdown = aac_shutdown,
  1826. .err_handler = &aac_pci_err_handler,
  1827. };
  1828. static int __init aac_init(void)
  1829. {
  1830. int error;
  1831. printk(KERN_INFO "Adaptec %s driver %s\n",
  1832. AAC_DRIVERNAME, aac_driver_version);
  1833. error = pci_register_driver(&aac_pci_driver);
  1834. if (error < 0)
  1835. return error;
  1836. aac_init_char();
  1837. return 0;
  1838. }
  1839. static void __exit aac_exit(void)
  1840. {
  1841. if (aac_cfg_major > -1)
  1842. unregister_chrdev(aac_cfg_major, "aac");
  1843. pci_unregister_driver(&aac_pci_driver);
  1844. }
  1845. module_init(aac_init);
  1846. module_exit(aac_exit);