core.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142
  1. /*
  2. * Filename: core.c
  3. *
  4. *
  5. * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
  6. * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
  7. *
  8. * (C) Copyright 2013 IBM Corporation
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License as
  12. * published by the Free Software Foundation; either version 2 of the
  13. * License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software Foundation,
  22. * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23. */
  24. #include <linux/kernel.h>
  25. #include <linux/init.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/module.h>
  28. #include <linux/pci.h>
  29. #include <linux/reboot.h>
  30. #include <linux/slab.h>
  31. #include <linux/bitops.h>
  32. #include <linux/delay.h>
  33. #include <linux/debugfs.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/genhd.h>
  36. #include <linux/idr.h>
  37. #include "rsxx_priv.h"
  38. #include "rsxx_cfg.h"
  39. #define NO_LEGACY 0
  40. #define SYNC_START_TIMEOUT (10 * 60) /* 10 minutes */
  41. MODULE_DESCRIPTION("IBM Flash Adapter 900GB Full Height Device Driver");
  42. MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
  43. MODULE_LICENSE("GPL");
  44. MODULE_VERSION(DRIVER_VERSION);
  45. static unsigned int force_legacy = NO_LEGACY;
  46. module_param(force_legacy, uint, 0444);
  47. MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
  48. static unsigned int sync_start = 1;
  49. module_param(sync_start, uint, 0444);
  50. MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete "
  51. "until the card startup has completed.");
  52. static DEFINE_IDA(rsxx_disk_ida);
  53. static DEFINE_SPINLOCK(rsxx_ida_lock);
  54. /* --------------------Debugfs Setup ------------------- */
  55. static int rsxx_attr_pci_regs_show(struct seq_file *m, void *p)
  56. {
  57. struct rsxx_cardinfo *card = m->private;
  58. seq_printf(m, "HWID 0x%08x\n",
  59. ioread32(card->regmap + HWID));
  60. seq_printf(m, "SCRATCH 0x%08x\n",
  61. ioread32(card->regmap + SCRATCH));
  62. seq_printf(m, "IER 0x%08x\n",
  63. ioread32(card->regmap + IER));
  64. seq_printf(m, "IPR 0x%08x\n",
  65. ioread32(card->regmap + IPR));
  66. seq_printf(m, "CREG_CMD 0x%08x\n",
  67. ioread32(card->regmap + CREG_CMD));
  68. seq_printf(m, "CREG_ADD 0x%08x\n",
  69. ioread32(card->regmap + CREG_ADD));
  70. seq_printf(m, "CREG_CNT 0x%08x\n",
  71. ioread32(card->regmap + CREG_CNT));
  72. seq_printf(m, "CREG_STAT 0x%08x\n",
  73. ioread32(card->regmap + CREG_STAT));
  74. seq_printf(m, "CREG_DATA0 0x%08x\n",
  75. ioread32(card->regmap + CREG_DATA0));
  76. seq_printf(m, "CREG_DATA1 0x%08x\n",
  77. ioread32(card->regmap + CREG_DATA1));
  78. seq_printf(m, "CREG_DATA2 0x%08x\n",
  79. ioread32(card->regmap + CREG_DATA2));
  80. seq_printf(m, "CREG_DATA3 0x%08x\n",
  81. ioread32(card->regmap + CREG_DATA3));
  82. seq_printf(m, "CREG_DATA4 0x%08x\n",
  83. ioread32(card->regmap + CREG_DATA4));
  84. seq_printf(m, "CREG_DATA5 0x%08x\n",
  85. ioread32(card->regmap + CREG_DATA5));
  86. seq_printf(m, "CREG_DATA6 0x%08x\n",
  87. ioread32(card->regmap + CREG_DATA6));
  88. seq_printf(m, "CREG_DATA7 0x%08x\n",
  89. ioread32(card->regmap + CREG_DATA7));
  90. seq_printf(m, "INTR_COAL 0x%08x\n",
  91. ioread32(card->regmap + INTR_COAL));
  92. seq_printf(m, "HW_ERROR 0x%08x\n",
  93. ioread32(card->regmap + HW_ERROR));
  94. seq_printf(m, "DEBUG0 0x%08x\n",
  95. ioread32(card->regmap + PCI_DEBUG0));
  96. seq_printf(m, "DEBUG1 0x%08x\n",
  97. ioread32(card->regmap + PCI_DEBUG1));
  98. seq_printf(m, "DEBUG2 0x%08x\n",
  99. ioread32(card->regmap + PCI_DEBUG2));
  100. seq_printf(m, "DEBUG3 0x%08x\n",
  101. ioread32(card->regmap + PCI_DEBUG3));
  102. seq_printf(m, "DEBUG4 0x%08x\n",
  103. ioread32(card->regmap + PCI_DEBUG4));
  104. seq_printf(m, "DEBUG5 0x%08x\n",
  105. ioread32(card->regmap + PCI_DEBUG5));
  106. seq_printf(m, "DEBUG6 0x%08x\n",
  107. ioread32(card->regmap + PCI_DEBUG6));
  108. seq_printf(m, "DEBUG7 0x%08x\n",
  109. ioread32(card->regmap + PCI_DEBUG7));
  110. seq_printf(m, "RECONFIG 0x%08x\n",
  111. ioread32(card->regmap + PCI_RECONFIG));
  112. return 0;
  113. }
  114. static int rsxx_attr_stats_show(struct seq_file *m, void *p)
  115. {
  116. struct rsxx_cardinfo *card = m->private;
  117. int i;
  118. for (i = 0; i < card->n_targets; i++) {
  119. seq_printf(m, "Ctrl %d CRC Errors = %d\n",
  120. i, card->ctrl[i].stats.crc_errors);
  121. seq_printf(m, "Ctrl %d Hard Errors = %d\n",
  122. i, card->ctrl[i].stats.hard_errors);
  123. seq_printf(m, "Ctrl %d Soft Errors = %d\n",
  124. i, card->ctrl[i].stats.soft_errors);
  125. seq_printf(m, "Ctrl %d Writes Issued = %d\n",
  126. i, card->ctrl[i].stats.writes_issued);
  127. seq_printf(m, "Ctrl %d Writes Failed = %d\n",
  128. i, card->ctrl[i].stats.writes_failed);
  129. seq_printf(m, "Ctrl %d Reads Issued = %d\n",
  130. i, card->ctrl[i].stats.reads_issued);
  131. seq_printf(m, "Ctrl %d Reads Failed = %d\n",
  132. i, card->ctrl[i].stats.reads_failed);
  133. seq_printf(m, "Ctrl %d Reads Retried = %d\n",
  134. i, card->ctrl[i].stats.reads_retried);
  135. seq_printf(m, "Ctrl %d Discards Issued = %d\n",
  136. i, card->ctrl[i].stats.discards_issued);
  137. seq_printf(m, "Ctrl %d Discards Failed = %d\n",
  138. i, card->ctrl[i].stats.discards_failed);
  139. seq_printf(m, "Ctrl %d DMA SW Errors = %d\n",
  140. i, card->ctrl[i].stats.dma_sw_err);
  141. seq_printf(m, "Ctrl %d DMA HW Faults = %d\n",
  142. i, card->ctrl[i].stats.dma_hw_fault);
  143. seq_printf(m, "Ctrl %d DMAs Cancelled = %d\n",
  144. i, card->ctrl[i].stats.dma_cancelled);
  145. seq_printf(m, "Ctrl %d SW Queue Depth = %d\n",
  146. i, card->ctrl[i].stats.sw_q_depth);
  147. seq_printf(m, "Ctrl %d HW Queue Depth = %d\n",
  148. i, atomic_read(&card->ctrl[i].stats.hw_q_depth));
  149. }
  150. return 0;
  151. }
  152. static int rsxx_attr_stats_open(struct inode *inode, struct file *file)
  153. {
  154. return single_open(file, rsxx_attr_stats_show, inode->i_private);
  155. }
  156. static int rsxx_attr_pci_regs_open(struct inode *inode, struct file *file)
  157. {
  158. return single_open(file, rsxx_attr_pci_regs_show, inode->i_private);
  159. }
  160. static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
  161. size_t cnt, loff_t *ppos)
  162. {
  163. struct rsxx_cardinfo *card = file_inode(fp)->i_private;
  164. char *buf;
  165. ssize_t st;
  166. buf = kzalloc(cnt, GFP_KERNEL);
  167. if (!buf)
  168. return -ENOMEM;
  169. st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
  170. if (!st)
  171. st = copy_to_user(ubuf, buf, cnt);
  172. kfree(buf);
  173. if (st)
  174. return st;
  175. *ppos += cnt;
  176. return cnt;
  177. }
  178. static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf,
  179. size_t cnt, loff_t *ppos)
  180. {
  181. struct rsxx_cardinfo *card = file_inode(fp)->i_private;
  182. char *buf;
  183. ssize_t st;
  184. buf = memdup_user(ubuf, cnt);
  185. if (IS_ERR(buf))
  186. return PTR_ERR(buf);
  187. st = rsxx_creg_write(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
  188. kfree(buf);
  189. if (st)
  190. return st;
  191. *ppos += cnt;
  192. return cnt;
  193. }
  194. static const struct file_operations debugfs_cram_fops = {
  195. .owner = THIS_MODULE,
  196. .read = rsxx_cram_read,
  197. .write = rsxx_cram_write,
  198. };
  199. static const struct file_operations debugfs_stats_fops = {
  200. .owner = THIS_MODULE,
  201. .open = rsxx_attr_stats_open,
  202. .read = seq_read,
  203. .llseek = seq_lseek,
  204. .release = single_release,
  205. };
  206. static const struct file_operations debugfs_pci_regs_fops = {
  207. .owner = THIS_MODULE,
  208. .open = rsxx_attr_pci_regs_open,
  209. .read = seq_read,
  210. .llseek = seq_lseek,
  211. .release = single_release,
  212. };
  213. static void rsxx_debugfs_dev_new(struct rsxx_cardinfo *card)
  214. {
  215. struct dentry *debugfs_stats;
  216. struct dentry *debugfs_pci_regs;
  217. struct dentry *debugfs_cram;
  218. card->debugfs_dir = debugfs_create_dir(card->gendisk->disk_name, NULL);
  219. if (IS_ERR_OR_NULL(card->debugfs_dir))
  220. goto failed_debugfs_dir;
  221. debugfs_stats = debugfs_create_file("stats", S_IRUGO,
  222. card->debugfs_dir, card,
  223. &debugfs_stats_fops);
  224. if (IS_ERR_OR_NULL(debugfs_stats))
  225. goto failed_debugfs_stats;
  226. debugfs_pci_regs = debugfs_create_file("pci_regs", S_IRUGO,
  227. card->debugfs_dir, card,
  228. &debugfs_pci_regs_fops);
  229. if (IS_ERR_OR_NULL(debugfs_pci_regs))
  230. goto failed_debugfs_pci_regs;
  231. debugfs_cram = debugfs_create_file("cram", S_IRUGO | S_IWUSR,
  232. card->debugfs_dir, card,
  233. &debugfs_cram_fops);
  234. if (IS_ERR_OR_NULL(debugfs_cram))
  235. goto failed_debugfs_cram;
  236. return;
  237. failed_debugfs_cram:
  238. debugfs_remove(debugfs_pci_regs);
  239. failed_debugfs_pci_regs:
  240. debugfs_remove(debugfs_stats);
  241. failed_debugfs_stats:
  242. debugfs_remove(card->debugfs_dir);
  243. failed_debugfs_dir:
  244. card->debugfs_dir = NULL;
  245. }
  246. /*----------------- Interrupt Control & Handling -------------------*/
  247. static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
  248. {
  249. card->isr_mask = 0;
  250. card->ier_mask = 0;
  251. }
  252. static void __enable_intr(unsigned int *mask, unsigned int intr)
  253. {
  254. *mask |= intr;
  255. }
  256. static void __disable_intr(unsigned int *mask, unsigned int intr)
  257. {
  258. *mask &= ~intr;
  259. }
  260. /*
  261. * NOTE: Disabling the IER will disable the hardware interrupt.
  262. * Disabling the ISR will disable the software handling of the ISR bit.
  263. *
  264. * Enable/Disable interrupt functions assume the card->irq_lock
  265. * is held by the caller.
  266. */
  267. void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
  268. {
  269. if (unlikely(card->halt) ||
  270. unlikely(card->eeh_state))
  271. return;
  272. __enable_intr(&card->ier_mask, intr);
  273. iowrite32(card->ier_mask, card->regmap + IER);
  274. }
  275. void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
  276. {
  277. if (unlikely(card->eeh_state))
  278. return;
  279. __disable_intr(&card->ier_mask, intr);
  280. iowrite32(card->ier_mask, card->regmap + IER);
  281. }
  282. void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
  283. unsigned int intr)
  284. {
  285. if (unlikely(card->halt) ||
  286. unlikely(card->eeh_state))
  287. return;
  288. __enable_intr(&card->isr_mask, intr);
  289. __enable_intr(&card->ier_mask, intr);
  290. iowrite32(card->ier_mask, card->regmap + IER);
  291. }
  292. void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
  293. unsigned int intr)
  294. {
  295. if (unlikely(card->eeh_state))
  296. return;
  297. __disable_intr(&card->isr_mask, intr);
  298. __disable_intr(&card->ier_mask, intr);
  299. iowrite32(card->ier_mask, card->regmap + IER);
  300. }
  301. static irqreturn_t rsxx_isr(int irq, void *pdata)
  302. {
  303. struct rsxx_cardinfo *card = pdata;
  304. unsigned int isr;
  305. int handled = 0;
  306. int reread_isr;
  307. int i;
  308. spin_lock(&card->irq_lock);
  309. do {
  310. reread_isr = 0;
  311. if (unlikely(card->eeh_state))
  312. break;
  313. isr = ioread32(card->regmap + ISR);
  314. if (isr == 0xffffffff) {
  315. /*
  316. * A few systems seem to have an intermittent issue
  317. * where PCI reads return all Fs, but retrying the read
  318. * a little later will return as expected.
  319. */
  320. dev_info(CARD_TO_DEV(card),
  321. "ISR = 0xFFFFFFFF, retrying later\n");
  322. break;
  323. }
  324. isr &= card->isr_mask;
  325. if (!isr)
  326. break;
  327. for (i = 0; i < card->n_targets; i++) {
  328. if (isr & CR_INTR_DMA(i)) {
  329. if (card->ier_mask & CR_INTR_DMA(i)) {
  330. rsxx_disable_ier(card, CR_INTR_DMA(i));
  331. reread_isr = 1;
  332. }
  333. queue_work(card->ctrl[i].done_wq,
  334. &card->ctrl[i].dma_done_work);
  335. handled++;
  336. }
  337. }
  338. if (isr & CR_INTR_CREG) {
  339. queue_work(card->creg_ctrl.creg_wq,
  340. &card->creg_ctrl.done_work);
  341. handled++;
  342. }
  343. if (isr & CR_INTR_EVENT) {
  344. queue_work(card->event_wq, &card->event_work);
  345. rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
  346. handled++;
  347. }
  348. } while (reread_isr);
  349. spin_unlock(&card->irq_lock);
  350. return handled ? IRQ_HANDLED : IRQ_NONE;
  351. }
  352. /*----------------- Card Event Handler -------------------*/
  353. static const char * const rsxx_card_state_to_str(unsigned int state)
  354. {
  355. static const char * const state_strings[] = {
  356. "Unknown", "Shutdown", "Starting", "Formatting",
  357. "Uninitialized", "Good", "Shutting Down",
  358. "Fault", "Read Only Fault", "dStroying"
  359. };
  360. return state_strings[ffs(state)];
  361. }
  362. static void card_state_change(struct rsxx_cardinfo *card,
  363. unsigned int new_state)
  364. {
  365. int st;
  366. dev_info(CARD_TO_DEV(card),
  367. "card state change detected.(%s -> %s)\n",
  368. rsxx_card_state_to_str(card->state),
  369. rsxx_card_state_to_str(new_state));
  370. card->state = new_state;
  371. /* Don't attach DMA interfaces if the card has an invalid config */
  372. if (!card->config_valid)
  373. return;
  374. switch (new_state) {
  375. case CARD_STATE_RD_ONLY_FAULT:
  376. dev_crit(CARD_TO_DEV(card),
  377. "Hardware has entered read-only mode!\n");
  378. /*
  379. * Fall through so the DMA devices can be attached and
  380. * the user can attempt to pull off their data.
  381. */
  382. case CARD_STATE_GOOD:
  383. st = rsxx_get_card_size8(card, &card->size8);
  384. if (st)
  385. dev_err(CARD_TO_DEV(card),
  386. "Failed attaching DMA devices\n");
  387. if (card->config_valid)
  388. set_capacity(card->gendisk, card->size8 >> 9);
  389. break;
  390. case CARD_STATE_FAULT:
  391. dev_crit(CARD_TO_DEV(card),
  392. "Hardware Fault reported!\n");
  393. /* Fall through. */
  394. /* Everything else, detach DMA interface if it's attached. */
  395. case CARD_STATE_SHUTDOWN:
  396. case CARD_STATE_STARTING:
  397. case CARD_STATE_FORMATTING:
  398. case CARD_STATE_UNINITIALIZED:
  399. case CARD_STATE_SHUTTING_DOWN:
  400. /*
  401. * dStroy is a term coined by marketing to represent the low level
  402. * secure erase.
  403. */
  404. case CARD_STATE_DSTROYING:
  405. set_capacity(card->gendisk, 0);
  406. break;
  407. }
  408. }
  409. static void card_event_handler(struct work_struct *work)
  410. {
  411. struct rsxx_cardinfo *card;
  412. unsigned int state;
  413. unsigned long flags;
  414. int st;
  415. card = container_of(work, struct rsxx_cardinfo, event_work);
  416. if (unlikely(card->halt))
  417. return;
  418. /*
  419. * Enable the interrupt now to avoid any weird race conditions where a
  420. * state change might occur while rsxx_get_card_state() is
  421. * processing a returned creg cmd.
  422. */
  423. spin_lock_irqsave(&card->irq_lock, flags);
  424. rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
  425. spin_unlock_irqrestore(&card->irq_lock, flags);
  426. st = rsxx_get_card_state(card, &state);
  427. if (st) {
  428. dev_info(CARD_TO_DEV(card),
  429. "Failed reading state after event.\n");
  430. return;
  431. }
  432. if (card->state != state)
  433. card_state_change(card, state);
  434. if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING)
  435. rsxx_read_hw_log(card);
  436. }
  437. /*----------------- Card Operations -------------------*/
  438. static int card_shutdown(struct rsxx_cardinfo *card)
  439. {
  440. unsigned int state;
  441. signed long start;
  442. const int timeout = msecs_to_jiffies(120000);
  443. int st;
  444. /* We can't issue a shutdown if the card is in a transition state */
  445. start = jiffies;
  446. do {
  447. st = rsxx_get_card_state(card, &state);
  448. if (st)
  449. return st;
  450. } while (state == CARD_STATE_STARTING &&
  451. (jiffies - start < timeout));
  452. if (state == CARD_STATE_STARTING)
  453. return -ETIMEDOUT;
  454. /* Only issue a shutdown if we need to */
  455. if ((state != CARD_STATE_SHUTTING_DOWN) &&
  456. (state != CARD_STATE_SHUTDOWN)) {
  457. st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN);
  458. if (st)
  459. return st;
  460. }
  461. start = jiffies;
  462. do {
  463. st = rsxx_get_card_state(card, &state);
  464. if (st)
  465. return st;
  466. } while (state != CARD_STATE_SHUTDOWN &&
  467. (jiffies - start < timeout));
  468. if (state != CARD_STATE_SHUTDOWN)
  469. return -ETIMEDOUT;
  470. return 0;
  471. }
  472. static int rsxx_eeh_frozen(struct pci_dev *dev)
  473. {
  474. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  475. int i;
  476. int st;
  477. dev_warn(&dev->dev, "IBM Flash Adapter PCI: preparing for slot reset.\n");
  478. card->eeh_state = 1;
  479. rsxx_mask_interrupts(card);
  480. /*
  481. * We need to guarantee that the write for eeh_state and masking
  482. * interrupts does not become reordered. This will prevent a possible
  483. * race condition with the EEH code.
  484. */
  485. wmb();
  486. pci_disable_device(dev);
  487. st = rsxx_eeh_save_issued_dmas(card);
  488. if (st)
  489. return st;
  490. rsxx_eeh_save_issued_creg(card);
  491. for (i = 0; i < card->n_targets; i++) {
  492. if (card->ctrl[i].status.buf)
  493. pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
  494. card->ctrl[i].status.buf,
  495. card->ctrl[i].status.dma_addr);
  496. if (card->ctrl[i].cmd.buf)
  497. pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
  498. card->ctrl[i].cmd.buf,
  499. card->ctrl[i].cmd.dma_addr);
  500. }
  501. return 0;
  502. }
  503. static void rsxx_eeh_failure(struct pci_dev *dev)
  504. {
  505. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  506. int i;
  507. int cnt = 0;
  508. dev_err(&dev->dev, "IBM Flash Adapter PCI: disabling failed card.\n");
  509. card->eeh_state = 1;
  510. card->halt = 1;
  511. for (i = 0; i < card->n_targets; i++) {
  512. spin_lock_bh(&card->ctrl[i].queue_lock);
  513. cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
  514. &card->ctrl[i].queue,
  515. COMPLETE_DMA);
  516. spin_unlock_bh(&card->ctrl[i].queue_lock);
  517. cnt += rsxx_dma_cancel(&card->ctrl[i]);
  518. if (cnt)
  519. dev_info(CARD_TO_DEV(card),
  520. "Freed %d queued DMAs on channel %d\n",
  521. cnt, card->ctrl[i].id);
  522. }
  523. }
  524. static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
  525. {
  526. unsigned int status;
  527. int iter = 0;
  528. /* We need to wait for the hardware to reset */
  529. while (iter++ < 10) {
  530. status = ioread32(card->regmap + PCI_RECONFIG);
  531. if (status & RSXX_FLUSH_BUSY) {
  532. ssleep(1);
  533. continue;
  534. }
  535. if (status & RSXX_FLUSH_TIMEOUT)
  536. dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n");
  537. return 0;
  538. }
  539. /* Hardware failed resetting itself. */
  540. return -1;
  541. }
  542. static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
  543. enum pci_channel_state error)
  544. {
  545. int st;
  546. if (dev->revision < RSXX_EEH_SUPPORT)
  547. return PCI_ERS_RESULT_NONE;
  548. if (error == pci_channel_io_perm_failure) {
  549. rsxx_eeh_failure(dev);
  550. return PCI_ERS_RESULT_DISCONNECT;
  551. }
  552. st = rsxx_eeh_frozen(dev);
  553. if (st) {
  554. dev_err(&dev->dev, "Slot reset setup failed\n");
  555. rsxx_eeh_failure(dev);
  556. return PCI_ERS_RESULT_DISCONNECT;
  557. }
  558. return PCI_ERS_RESULT_NEED_RESET;
  559. }
  560. static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
  561. {
  562. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  563. unsigned long flags;
  564. int i;
  565. int st;
  566. dev_warn(&dev->dev,
  567. "IBM Flash Adapter PCI: recovering from slot reset.\n");
  568. st = pci_enable_device(dev);
  569. if (st)
  570. goto failed_hw_setup;
  571. pci_set_master(dev);
  572. st = rsxx_eeh_fifo_flush_poll(card);
  573. if (st)
  574. goto failed_hw_setup;
  575. rsxx_dma_queue_reset(card);
  576. for (i = 0; i < card->n_targets; i++) {
  577. st = rsxx_hw_buffers_init(dev, &card->ctrl[i]);
  578. if (st)
  579. goto failed_hw_buffers_init;
  580. }
  581. if (card->config_valid)
  582. rsxx_dma_configure(card);
  583. /* Clears the ISR register from spurious interrupts */
  584. st = ioread32(card->regmap + ISR);
  585. card->eeh_state = 0;
  586. spin_lock_irqsave(&card->irq_lock, flags);
  587. if (card->n_targets & RSXX_MAX_TARGETS)
  588. rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
  589. else
  590. rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C);
  591. spin_unlock_irqrestore(&card->irq_lock, flags);
  592. rsxx_kick_creg_queue(card);
  593. for (i = 0; i < card->n_targets; i++) {
  594. spin_lock(&card->ctrl[i].queue_lock);
  595. if (list_empty(&card->ctrl[i].queue)) {
  596. spin_unlock(&card->ctrl[i].queue_lock);
  597. continue;
  598. }
  599. spin_unlock(&card->ctrl[i].queue_lock);
  600. queue_work(card->ctrl[i].issue_wq,
  601. &card->ctrl[i].issue_dma_work);
  602. }
  603. dev_info(&dev->dev, "IBM Flash Adapter PCI: recovery complete.\n");
  604. return PCI_ERS_RESULT_RECOVERED;
  605. failed_hw_buffers_init:
  606. for (i = 0; i < card->n_targets; i++) {
  607. if (card->ctrl[i].status.buf)
  608. pci_free_consistent(card->dev,
  609. STATUS_BUFFER_SIZE8,
  610. card->ctrl[i].status.buf,
  611. card->ctrl[i].status.dma_addr);
  612. if (card->ctrl[i].cmd.buf)
  613. pci_free_consistent(card->dev,
  614. COMMAND_BUFFER_SIZE8,
  615. card->ctrl[i].cmd.buf,
  616. card->ctrl[i].cmd.dma_addr);
  617. }
  618. failed_hw_setup:
  619. rsxx_eeh_failure(dev);
  620. return PCI_ERS_RESULT_DISCONNECT;
  621. }
  622. /*----------------- Driver Initialization & Setup -------------------*/
  623. /* Returns: 0 if the driver is compatible with the device
  624. -1 if the driver is NOT compatible with the device */
  625. static int rsxx_compatibility_check(struct rsxx_cardinfo *card)
  626. {
  627. unsigned char pci_rev;
  628. pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
  629. if (pci_rev > RS70_PCI_REV_SUPPORTED)
  630. return -1;
  631. return 0;
  632. }
  633. static int rsxx_pci_probe(struct pci_dev *dev,
  634. const struct pci_device_id *id)
  635. {
  636. struct rsxx_cardinfo *card;
  637. int st;
  638. unsigned int sync_timeout;
  639. dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
  640. card = kzalloc(sizeof(*card), GFP_KERNEL);
  641. if (!card)
  642. return -ENOMEM;
  643. card->dev = dev;
  644. pci_set_drvdata(dev, card);
  645. do {
  646. if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) {
  647. st = -ENOMEM;
  648. goto failed_ida_get;
  649. }
  650. spin_lock(&rsxx_ida_lock);
  651. st = ida_get_new(&rsxx_disk_ida, &card->disk_id);
  652. spin_unlock(&rsxx_ida_lock);
  653. } while (st == -EAGAIN);
  654. if (st)
  655. goto failed_ida_get;
  656. st = pci_enable_device(dev);
  657. if (st)
  658. goto failed_enable;
  659. pci_set_master(dev);
  660. pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
  661. st = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
  662. if (st) {
  663. dev_err(CARD_TO_DEV(card),
  664. "No usable DMA configuration,aborting\n");
  665. goto failed_dma_mask;
  666. }
  667. st = pci_request_regions(dev, DRIVER_NAME);
  668. if (st) {
  669. dev_err(CARD_TO_DEV(card),
  670. "Failed to request memory region\n");
  671. goto failed_request_regions;
  672. }
  673. if (pci_resource_len(dev, 0) == 0) {
  674. dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n");
  675. st = -ENOMEM;
  676. goto failed_iomap;
  677. }
  678. card->regmap = pci_iomap(dev, 0, 0);
  679. if (!card->regmap) {
  680. dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n");
  681. st = -ENOMEM;
  682. goto failed_iomap;
  683. }
  684. spin_lock_init(&card->irq_lock);
  685. card->halt = 0;
  686. card->eeh_state = 0;
  687. spin_lock_irq(&card->irq_lock);
  688. rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
  689. spin_unlock_irq(&card->irq_lock);
  690. if (!force_legacy) {
  691. st = pci_enable_msi(dev);
  692. if (st)
  693. dev_warn(CARD_TO_DEV(card),
  694. "Failed to enable MSI\n");
  695. }
  696. st = request_irq(dev->irq, rsxx_isr, IRQF_SHARED,
  697. DRIVER_NAME, card);
  698. if (st) {
  699. dev_err(CARD_TO_DEV(card),
  700. "Failed requesting IRQ%d\n", dev->irq);
  701. goto failed_irq;
  702. }
  703. /************* Setup Processor Command Interface *************/
  704. st = rsxx_creg_setup(card);
  705. if (st) {
  706. dev_err(CARD_TO_DEV(card), "Failed to setup creg interface.\n");
  707. goto failed_creg_setup;
  708. }
  709. spin_lock_irq(&card->irq_lock);
  710. rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
  711. spin_unlock_irq(&card->irq_lock);
  712. st = rsxx_compatibility_check(card);
  713. if (st) {
  714. dev_warn(CARD_TO_DEV(card),
  715. "Incompatible driver detected. Please update the driver.\n");
  716. st = -EINVAL;
  717. goto failed_compatiblity_check;
  718. }
  719. /************* Load Card Config *************/
  720. st = rsxx_load_config(card);
  721. if (st)
  722. dev_err(CARD_TO_DEV(card),
  723. "Failed loading card config\n");
  724. /************* Setup DMA Engine *************/
  725. st = rsxx_get_num_targets(card, &card->n_targets);
  726. if (st)
  727. dev_info(CARD_TO_DEV(card),
  728. "Failed reading the number of DMA targets\n");
  729. card->ctrl = kzalloc(card->n_targets * sizeof(*card->ctrl), GFP_KERNEL);
  730. if (!card->ctrl) {
  731. st = -ENOMEM;
  732. goto failed_dma_setup;
  733. }
  734. st = rsxx_dma_setup(card);
  735. if (st) {
  736. dev_info(CARD_TO_DEV(card),
  737. "Failed to setup DMA engine\n");
  738. goto failed_dma_setup;
  739. }
  740. /************* Setup Card Event Handler *************/
  741. card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
  742. if (!card->event_wq) {
  743. dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
  744. goto failed_event_handler;
  745. }
  746. INIT_WORK(&card->event_work, card_event_handler);
  747. st = rsxx_setup_dev(card);
  748. if (st)
  749. goto failed_create_dev;
  750. rsxx_get_card_state(card, &card->state);
  751. dev_info(CARD_TO_DEV(card),
  752. "card state: %s\n",
  753. rsxx_card_state_to_str(card->state));
  754. /*
  755. * Now that the DMA Engine and devices have been setup,
  756. * we can enable the event interrupt(it kicks off actions in
  757. * those layers so we couldn't enable it right away.)
  758. */
  759. spin_lock_irq(&card->irq_lock);
  760. rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
  761. spin_unlock_irq(&card->irq_lock);
  762. if (card->state == CARD_STATE_SHUTDOWN) {
  763. st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
  764. if (st)
  765. dev_crit(CARD_TO_DEV(card),
  766. "Failed issuing card startup\n");
  767. if (sync_start) {
  768. sync_timeout = SYNC_START_TIMEOUT;
  769. dev_info(CARD_TO_DEV(card),
  770. "Waiting for card to startup\n");
  771. do {
  772. ssleep(1);
  773. sync_timeout--;
  774. rsxx_get_card_state(card, &card->state);
  775. } while (sync_timeout &&
  776. (card->state == CARD_STATE_STARTING));
  777. if (card->state == CARD_STATE_STARTING) {
  778. dev_warn(CARD_TO_DEV(card),
  779. "Card startup timed out\n");
  780. card->size8 = 0;
  781. } else {
  782. dev_info(CARD_TO_DEV(card),
  783. "card state: %s\n",
  784. rsxx_card_state_to_str(card->state));
  785. st = rsxx_get_card_size8(card, &card->size8);
  786. if (st)
  787. card->size8 = 0;
  788. }
  789. }
  790. } else if (card->state == CARD_STATE_GOOD ||
  791. card->state == CARD_STATE_RD_ONLY_FAULT) {
  792. st = rsxx_get_card_size8(card, &card->size8);
  793. if (st)
  794. card->size8 = 0;
  795. }
  796. rsxx_attach_dev(card);
  797. /************* Setup Debugfs *************/
  798. rsxx_debugfs_dev_new(card);
  799. return 0;
  800. failed_create_dev:
  801. destroy_workqueue(card->event_wq);
  802. card->event_wq = NULL;
  803. failed_event_handler:
  804. rsxx_dma_destroy(card);
  805. failed_dma_setup:
  806. failed_compatiblity_check:
  807. destroy_workqueue(card->creg_ctrl.creg_wq);
  808. card->creg_ctrl.creg_wq = NULL;
  809. failed_creg_setup:
  810. spin_lock_irq(&card->irq_lock);
  811. rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
  812. spin_unlock_irq(&card->irq_lock);
  813. free_irq(dev->irq, card);
  814. if (!force_legacy)
  815. pci_disable_msi(dev);
  816. failed_irq:
  817. pci_iounmap(dev, card->regmap);
  818. failed_iomap:
  819. pci_release_regions(dev);
  820. failed_request_regions:
  821. failed_dma_mask:
  822. pci_disable_device(dev);
  823. failed_enable:
  824. spin_lock(&rsxx_ida_lock);
  825. ida_remove(&rsxx_disk_ida, card->disk_id);
  826. spin_unlock(&rsxx_ida_lock);
  827. failed_ida_get:
  828. kfree(card);
  829. return st;
  830. }
  831. static void rsxx_pci_remove(struct pci_dev *dev)
  832. {
  833. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  834. unsigned long flags;
  835. int st;
  836. int i;
  837. if (!card)
  838. return;
  839. dev_info(CARD_TO_DEV(card),
  840. "Removing PCI-Flash SSD.\n");
  841. rsxx_detach_dev(card);
  842. for (i = 0; i < card->n_targets; i++) {
  843. spin_lock_irqsave(&card->irq_lock, flags);
  844. rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
  845. spin_unlock_irqrestore(&card->irq_lock, flags);
  846. }
  847. st = card_shutdown(card);
  848. if (st)
  849. dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n");
  850. /* Sync outstanding event handlers. */
  851. spin_lock_irqsave(&card->irq_lock, flags);
  852. rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
  853. spin_unlock_irqrestore(&card->irq_lock, flags);
  854. cancel_work_sync(&card->event_work);
  855. rsxx_destroy_dev(card);
  856. rsxx_dma_destroy(card);
  857. spin_lock_irqsave(&card->irq_lock, flags);
  858. rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
  859. spin_unlock_irqrestore(&card->irq_lock, flags);
  860. /* Prevent work_structs from re-queuing themselves. */
  861. card->halt = 1;
  862. debugfs_remove_recursive(card->debugfs_dir);
  863. free_irq(dev->irq, card);
  864. if (!force_legacy)
  865. pci_disable_msi(dev);
  866. rsxx_creg_destroy(card);
  867. pci_iounmap(dev, card->regmap);
  868. pci_disable_device(dev);
  869. pci_release_regions(dev);
  870. kfree(card);
  871. }
  872. static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state)
  873. {
  874. /* We don't support suspend at this time. */
  875. return -ENOSYS;
  876. }
  877. static void rsxx_pci_shutdown(struct pci_dev *dev)
  878. {
  879. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  880. unsigned long flags;
  881. int i;
  882. if (!card)
  883. return;
  884. dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n");
  885. rsxx_detach_dev(card);
  886. for (i = 0; i < card->n_targets; i++) {
  887. spin_lock_irqsave(&card->irq_lock, flags);
  888. rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
  889. spin_unlock_irqrestore(&card->irq_lock, flags);
  890. }
  891. card_shutdown(card);
  892. }
  893. static const struct pci_error_handlers rsxx_err_handler = {
  894. .error_detected = rsxx_error_detected,
  895. .slot_reset = rsxx_slot_reset,
  896. };
  897. static const struct pci_device_id rsxx_pci_ids[] = {
  898. {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)},
  899. {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)},
  900. {0,},
  901. };
  902. MODULE_DEVICE_TABLE(pci, rsxx_pci_ids);
  903. static struct pci_driver rsxx_pci_driver = {
  904. .name = DRIVER_NAME,
  905. .id_table = rsxx_pci_ids,
  906. .probe = rsxx_pci_probe,
  907. .remove = rsxx_pci_remove,
  908. .suspend = rsxx_pci_suspend,
  909. .shutdown = rsxx_pci_shutdown,
  910. .err_handler = &rsxx_err_handler,
  911. };
  912. static int __init rsxx_core_init(void)
  913. {
  914. int st;
  915. st = rsxx_dev_init();
  916. if (st)
  917. return st;
  918. st = rsxx_dma_init();
  919. if (st)
  920. goto dma_init_failed;
  921. st = rsxx_creg_init();
  922. if (st)
  923. goto creg_init_failed;
  924. return pci_register_driver(&rsxx_pci_driver);
  925. creg_init_failed:
  926. rsxx_dma_cleanup();
  927. dma_init_failed:
  928. rsxx_dev_cleanup();
  929. return st;
  930. }
  931. static void __exit rsxx_core_cleanup(void)
  932. {
  933. pci_unregister_driver(&rsxx_pci_driver);
  934. rsxx_creg_cleanup();
  935. rsxx_dma_cleanup();
  936. rsxx_dev_cleanup();
  937. }
  938. module_init(rsxx_core_init);
  939. module_exit(rsxx_core_cleanup);