core.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145
  1. /*
  2. * Filename: core.c
  3. *
  4. *
  5. * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
  6. * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
  7. *
  8. * (C) Copyright 2013 IBM Corporation
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License as
  12. * published by the Free Software Foundation; either version 2 of the
  13. * License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software Foundation,
  22. * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  23. */
  24. #include <linux/kernel.h>
  25. #include <linux/init.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/module.h>
  28. #include <linux/pci.h>
  29. #include <linux/reboot.h>
  30. #include <linux/slab.h>
  31. #include <linux/bitops.h>
  32. #include <linux/delay.h>
  33. #include <linux/debugfs.h>
  34. #include <linux/seq_file.h>
  35. #include <linux/genhd.h>
  36. #include <linux/idr.h>
  37. #include "rsxx_priv.h"
  38. #include "rsxx_cfg.h"
  39. #define NO_LEGACY 0
  40. #define SYNC_START_TIMEOUT (10 * 60) /* 10 minutes */
  41. MODULE_DESCRIPTION("IBM Flash Adapter 900GB Full Height Device Driver");
  42. MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
  43. MODULE_LICENSE("GPL");
  44. MODULE_VERSION(DRIVER_VERSION);
  45. static unsigned int force_legacy = NO_LEGACY;
  46. module_param(force_legacy, uint, 0444);
  47. MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
  48. static unsigned int sync_start = 1;
  49. module_param(sync_start, uint, 0444);
  50. MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete "
  51. "until the card startup has completed.");
  52. static DEFINE_IDA(rsxx_disk_ida);
  53. static DEFINE_SPINLOCK(rsxx_ida_lock);
  54. /* --------------------Debugfs Setup ------------------- */
  55. static int rsxx_attr_pci_regs_show(struct seq_file *m, void *p)
  56. {
  57. struct rsxx_cardinfo *card = m->private;
  58. seq_printf(m, "HWID 0x%08x\n",
  59. ioread32(card->regmap + HWID));
  60. seq_printf(m, "SCRATCH 0x%08x\n",
  61. ioread32(card->regmap + SCRATCH));
  62. seq_printf(m, "IER 0x%08x\n",
  63. ioread32(card->regmap + IER));
  64. seq_printf(m, "IPR 0x%08x\n",
  65. ioread32(card->regmap + IPR));
  66. seq_printf(m, "CREG_CMD 0x%08x\n",
  67. ioread32(card->regmap + CREG_CMD));
  68. seq_printf(m, "CREG_ADD 0x%08x\n",
  69. ioread32(card->regmap + CREG_ADD));
  70. seq_printf(m, "CREG_CNT 0x%08x\n",
  71. ioread32(card->regmap + CREG_CNT));
  72. seq_printf(m, "CREG_STAT 0x%08x\n",
  73. ioread32(card->regmap + CREG_STAT));
  74. seq_printf(m, "CREG_DATA0 0x%08x\n",
  75. ioread32(card->regmap + CREG_DATA0));
  76. seq_printf(m, "CREG_DATA1 0x%08x\n",
  77. ioread32(card->regmap + CREG_DATA1));
  78. seq_printf(m, "CREG_DATA2 0x%08x\n",
  79. ioread32(card->regmap + CREG_DATA2));
  80. seq_printf(m, "CREG_DATA3 0x%08x\n",
  81. ioread32(card->regmap + CREG_DATA3));
  82. seq_printf(m, "CREG_DATA4 0x%08x\n",
  83. ioread32(card->regmap + CREG_DATA4));
  84. seq_printf(m, "CREG_DATA5 0x%08x\n",
  85. ioread32(card->regmap + CREG_DATA5));
  86. seq_printf(m, "CREG_DATA6 0x%08x\n",
  87. ioread32(card->regmap + CREG_DATA6));
  88. seq_printf(m, "CREG_DATA7 0x%08x\n",
  89. ioread32(card->regmap + CREG_DATA7));
  90. seq_printf(m, "INTR_COAL 0x%08x\n",
  91. ioread32(card->regmap + INTR_COAL));
  92. seq_printf(m, "HW_ERROR 0x%08x\n",
  93. ioread32(card->regmap + HW_ERROR));
  94. seq_printf(m, "DEBUG0 0x%08x\n",
  95. ioread32(card->regmap + PCI_DEBUG0));
  96. seq_printf(m, "DEBUG1 0x%08x\n",
  97. ioread32(card->regmap + PCI_DEBUG1));
  98. seq_printf(m, "DEBUG2 0x%08x\n",
  99. ioread32(card->regmap + PCI_DEBUG2));
  100. seq_printf(m, "DEBUG3 0x%08x\n",
  101. ioread32(card->regmap + PCI_DEBUG3));
  102. seq_printf(m, "DEBUG4 0x%08x\n",
  103. ioread32(card->regmap + PCI_DEBUG4));
  104. seq_printf(m, "DEBUG5 0x%08x\n",
  105. ioread32(card->regmap + PCI_DEBUG5));
  106. seq_printf(m, "DEBUG6 0x%08x\n",
  107. ioread32(card->regmap + PCI_DEBUG6));
  108. seq_printf(m, "DEBUG7 0x%08x\n",
  109. ioread32(card->regmap + PCI_DEBUG7));
  110. seq_printf(m, "RECONFIG 0x%08x\n",
  111. ioread32(card->regmap + PCI_RECONFIG));
  112. return 0;
  113. }
  114. static int rsxx_attr_stats_show(struct seq_file *m, void *p)
  115. {
  116. struct rsxx_cardinfo *card = m->private;
  117. int i;
  118. for (i = 0; i < card->n_targets; i++) {
  119. seq_printf(m, "Ctrl %d CRC Errors = %d\n",
  120. i, card->ctrl[i].stats.crc_errors);
  121. seq_printf(m, "Ctrl %d Hard Errors = %d\n",
  122. i, card->ctrl[i].stats.hard_errors);
  123. seq_printf(m, "Ctrl %d Soft Errors = %d\n",
  124. i, card->ctrl[i].stats.soft_errors);
  125. seq_printf(m, "Ctrl %d Writes Issued = %d\n",
  126. i, card->ctrl[i].stats.writes_issued);
  127. seq_printf(m, "Ctrl %d Writes Failed = %d\n",
  128. i, card->ctrl[i].stats.writes_failed);
  129. seq_printf(m, "Ctrl %d Reads Issued = %d\n",
  130. i, card->ctrl[i].stats.reads_issued);
  131. seq_printf(m, "Ctrl %d Reads Failed = %d\n",
  132. i, card->ctrl[i].stats.reads_failed);
  133. seq_printf(m, "Ctrl %d Reads Retried = %d\n",
  134. i, card->ctrl[i].stats.reads_retried);
  135. seq_printf(m, "Ctrl %d Discards Issued = %d\n",
  136. i, card->ctrl[i].stats.discards_issued);
  137. seq_printf(m, "Ctrl %d Discards Failed = %d\n",
  138. i, card->ctrl[i].stats.discards_failed);
  139. seq_printf(m, "Ctrl %d DMA SW Errors = %d\n",
  140. i, card->ctrl[i].stats.dma_sw_err);
  141. seq_printf(m, "Ctrl %d DMA HW Faults = %d\n",
  142. i, card->ctrl[i].stats.dma_hw_fault);
  143. seq_printf(m, "Ctrl %d DMAs Cancelled = %d\n",
  144. i, card->ctrl[i].stats.dma_cancelled);
  145. seq_printf(m, "Ctrl %d SW Queue Depth = %d\n",
  146. i, card->ctrl[i].stats.sw_q_depth);
  147. seq_printf(m, "Ctrl %d HW Queue Depth = %d\n",
  148. i, atomic_read(&card->ctrl[i].stats.hw_q_depth));
  149. }
  150. return 0;
  151. }
  152. static int rsxx_attr_stats_open(struct inode *inode, struct file *file)
  153. {
  154. return single_open(file, rsxx_attr_stats_show, inode->i_private);
  155. }
  156. static int rsxx_attr_pci_regs_open(struct inode *inode, struct file *file)
  157. {
  158. return single_open(file, rsxx_attr_pci_regs_show, inode->i_private);
  159. }
  160. static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
  161. size_t cnt, loff_t *ppos)
  162. {
  163. struct rsxx_cardinfo *card = file_inode(fp)->i_private;
  164. char *buf;
  165. ssize_t st;
  166. buf = kzalloc(cnt, GFP_KERNEL);
  167. if (!buf)
  168. return -ENOMEM;
  169. st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
  170. if (!st)
  171. st = copy_to_user(ubuf, buf, cnt);
  172. kfree(buf);
  173. if (st)
  174. return st;
  175. *ppos += cnt;
  176. return cnt;
  177. }
  178. static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf,
  179. size_t cnt, loff_t *ppos)
  180. {
  181. struct rsxx_cardinfo *card = file_inode(fp)->i_private;
  182. char *buf;
  183. ssize_t st;
  184. buf = kzalloc(cnt, GFP_KERNEL);
  185. if (!buf)
  186. return -ENOMEM;
  187. st = copy_from_user(buf, ubuf, cnt);
  188. if (!st)
  189. st = rsxx_creg_write(card, CREG_ADD_CRAM + (u32)*ppos, cnt,
  190. buf, 1);
  191. kfree(buf);
  192. if (st)
  193. return st;
  194. *ppos += cnt;
  195. return cnt;
  196. }
  197. static const struct file_operations debugfs_cram_fops = {
  198. .owner = THIS_MODULE,
  199. .read = rsxx_cram_read,
  200. .write = rsxx_cram_write,
  201. };
  202. static const struct file_operations debugfs_stats_fops = {
  203. .owner = THIS_MODULE,
  204. .open = rsxx_attr_stats_open,
  205. .read = seq_read,
  206. .llseek = seq_lseek,
  207. .release = single_release,
  208. };
  209. static const struct file_operations debugfs_pci_regs_fops = {
  210. .owner = THIS_MODULE,
  211. .open = rsxx_attr_pci_regs_open,
  212. .read = seq_read,
  213. .llseek = seq_lseek,
  214. .release = single_release,
  215. };
  216. static void rsxx_debugfs_dev_new(struct rsxx_cardinfo *card)
  217. {
  218. struct dentry *debugfs_stats;
  219. struct dentry *debugfs_pci_regs;
  220. struct dentry *debugfs_cram;
  221. card->debugfs_dir = debugfs_create_dir(card->gendisk->disk_name, NULL);
  222. if (IS_ERR_OR_NULL(card->debugfs_dir))
  223. goto failed_debugfs_dir;
  224. debugfs_stats = debugfs_create_file("stats", S_IRUGO,
  225. card->debugfs_dir, card,
  226. &debugfs_stats_fops);
  227. if (IS_ERR_OR_NULL(debugfs_stats))
  228. goto failed_debugfs_stats;
  229. debugfs_pci_regs = debugfs_create_file("pci_regs", S_IRUGO,
  230. card->debugfs_dir, card,
  231. &debugfs_pci_regs_fops);
  232. if (IS_ERR_OR_NULL(debugfs_pci_regs))
  233. goto failed_debugfs_pci_regs;
  234. debugfs_cram = debugfs_create_file("cram", S_IRUGO | S_IWUSR,
  235. card->debugfs_dir, card,
  236. &debugfs_cram_fops);
  237. if (IS_ERR_OR_NULL(debugfs_cram))
  238. goto failed_debugfs_cram;
  239. return;
  240. failed_debugfs_cram:
  241. debugfs_remove(debugfs_pci_regs);
  242. failed_debugfs_pci_regs:
  243. debugfs_remove(debugfs_stats);
  244. failed_debugfs_stats:
  245. debugfs_remove(card->debugfs_dir);
  246. failed_debugfs_dir:
  247. card->debugfs_dir = NULL;
  248. }
  249. /*----------------- Interrupt Control & Handling -------------------*/
  250. static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
  251. {
  252. card->isr_mask = 0;
  253. card->ier_mask = 0;
  254. }
  255. static void __enable_intr(unsigned int *mask, unsigned int intr)
  256. {
  257. *mask |= intr;
  258. }
  259. static void __disable_intr(unsigned int *mask, unsigned int intr)
  260. {
  261. *mask &= ~intr;
  262. }
  263. /*
  264. * NOTE: Disabling the IER will disable the hardware interrupt.
  265. * Disabling the ISR will disable the software handling of the ISR bit.
  266. *
  267. * Enable/Disable interrupt functions assume the card->irq_lock
  268. * is held by the caller.
  269. */
  270. void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
  271. {
  272. if (unlikely(card->halt) ||
  273. unlikely(card->eeh_state))
  274. return;
  275. __enable_intr(&card->ier_mask, intr);
  276. iowrite32(card->ier_mask, card->regmap + IER);
  277. }
  278. void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
  279. {
  280. if (unlikely(card->eeh_state))
  281. return;
  282. __disable_intr(&card->ier_mask, intr);
  283. iowrite32(card->ier_mask, card->regmap + IER);
  284. }
  285. void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
  286. unsigned int intr)
  287. {
  288. if (unlikely(card->halt) ||
  289. unlikely(card->eeh_state))
  290. return;
  291. __enable_intr(&card->isr_mask, intr);
  292. __enable_intr(&card->ier_mask, intr);
  293. iowrite32(card->ier_mask, card->regmap + IER);
  294. }
  295. void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
  296. unsigned int intr)
  297. {
  298. if (unlikely(card->eeh_state))
  299. return;
  300. __disable_intr(&card->isr_mask, intr);
  301. __disable_intr(&card->ier_mask, intr);
  302. iowrite32(card->ier_mask, card->regmap + IER);
  303. }
  304. static irqreturn_t rsxx_isr(int irq, void *pdata)
  305. {
  306. struct rsxx_cardinfo *card = pdata;
  307. unsigned int isr;
  308. int handled = 0;
  309. int reread_isr;
  310. int i;
  311. spin_lock(&card->irq_lock);
  312. do {
  313. reread_isr = 0;
  314. if (unlikely(card->eeh_state))
  315. break;
  316. isr = ioread32(card->regmap + ISR);
  317. if (isr == 0xffffffff) {
  318. /*
  319. * A few systems seem to have an intermittent issue
  320. * where PCI reads return all Fs, but retrying the read
  321. * a little later will return as expected.
  322. */
  323. dev_info(CARD_TO_DEV(card),
  324. "ISR = 0xFFFFFFFF, retrying later\n");
  325. break;
  326. }
  327. isr &= card->isr_mask;
  328. if (!isr)
  329. break;
  330. for (i = 0; i < card->n_targets; i++) {
  331. if (isr & CR_INTR_DMA(i)) {
  332. if (card->ier_mask & CR_INTR_DMA(i)) {
  333. rsxx_disable_ier(card, CR_INTR_DMA(i));
  334. reread_isr = 1;
  335. }
  336. queue_work(card->ctrl[i].done_wq,
  337. &card->ctrl[i].dma_done_work);
  338. handled++;
  339. }
  340. }
  341. if (isr & CR_INTR_CREG) {
  342. queue_work(card->creg_ctrl.creg_wq,
  343. &card->creg_ctrl.done_work);
  344. handled++;
  345. }
  346. if (isr & CR_INTR_EVENT) {
  347. queue_work(card->event_wq, &card->event_work);
  348. rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
  349. handled++;
  350. }
  351. } while (reread_isr);
  352. spin_unlock(&card->irq_lock);
  353. return handled ? IRQ_HANDLED : IRQ_NONE;
  354. }
  355. /*----------------- Card Event Handler -------------------*/
  356. static const char * const rsxx_card_state_to_str(unsigned int state)
  357. {
  358. static const char * const state_strings[] = {
  359. "Unknown", "Shutdown", "Starting", "Formatting",
  360. "Uninitialized", "Good", "Shutting Down",
  361. "Fault", "Read Only Fault", "dStroying"
  362. };
  363. return state_strings[ffs(state)];
  364. }
  365. static void card_state_change(struct rsxx_cardinfo *card,
  366. unsigned int new_state)
  367. {
  368. int st;
  369. dev_info(CARD_TO_DEV(card),
  370. "card state change detected.(%s -> %s)\n",
  371. rsxx_card_state_to_str(card->state),
  372. rsxx_card_state_to_str(new_state));
  373. card->state = new_state;
  374. /* Don't attach DMA interfaces if the card has an invalid config */
  375. if (!card->config_valid)
  376. return;
  377. switch (new_state) {
  378. case CARD_STATE_RD_ONLY_FAULT:
  379. dev_crit(CARD_TO_DEV(card),
  380. "Hardware has entered read-only mode!\n");
  381. /*
  382. * Fall through so the DMA devices can be attached and
  383. * the user can attempt to pull off their data.
  384. */
  385. case CARD_STATE_GOOD:
  386. st = rsxx_get_card_size8(card, &card->size8);
  387. if (st)
  388. dev_err(CARD_TO_DEV(card),
  389. "Failed attaching DMA devices\n");
  390. if (card->config_valid)
  391. set_capacity(card->gendisk, card->size8 >> 9);
  392. break;
  393. case CARD_STATE_FAULT:
  394. dev_crit(CARD_TO_DEV(card),
  395. "Hardware Fault reported!\n");
  396. /* Fall through. */
  397. /* Everything else, detach DMA interface if it's attached. */
  398. case CARD_STATE_SHUTDOWN:
  399. case CARD_STATE_STARTING:
  400. case CARD_STATE_FORMATTING:
  401. case CARD_STATE_UNINITIALIZED:
  402. case CARD_STATE_SHUTTING_DOWN:
  403. /*
  404. * dStroy is a term coined by marketing to represent the low level
  405. * secure erase.
  406. */
  407. case CARD_STATE_DSTROYING:
  408. set_capacity(card->gendisk, 0);
  409. break;
  410. }
  411. }
  412. static void card_event_handler(struct work_struct *work)
  413. {
  414. struct rsxx_cardinfo *card;
  415. unsigned int state;
  416. unsigned long flags;
  417. int st;
  418. card = container_of(work, struct rsxx_cardinfo, event_work);
  419. if (unlikely(card->halt))
  420. return;
  421. /*
  422. * Enable the interrupt now to avoid any weird race conditions where a
  423. * state change might occur while rsxx_get_card_state() is
  424. * processing a returned creg cmd.
  425. */
  426. spin_lock_irqsave(&card->irq_lock, flags);
  427. rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
  428. spin_unlock_irqrestore(&card->irq_lock, flags);
  429. st = rsxx_get_card_state(card, &state);
  430. if (st) {
  431. dev_info(CARD_TO_DEV(card),
  432. "Failed reading state after event.\n");
  433. return;
  434. }
  435. if (card->state != state)
  436. card_state_change(card, state);
  437. if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING)
  438. rsxx_read_hw_log(card);
  439. }
  440. /*----------------- Card Operations -------------------*/
  441. static int card_shutdown(struct rsxx_cardinfo *card)
  442. {
  443. unsigned int state;
  444. signed long start;
  445. const int timeout = msecs_to_jiffies(120000);
  446. int st;
  447. /* We can't issue a shutdown if the card is in a transition state */
  448. start = jiffies;
  449. do {
  450. st = rsxx_get_card_state(card, &state);
  451. if (st)
  452. return st;
  453. } while (state == CARD_STATE_STARTING &&
  454. (jiffies - start < timeout));
  455. if (state == CARD_STATE_STARTING)
  456. return -ETIMEDOUT;
  457. /* Only issue a shutdown if we need to */
  458. if ((state != CARD_STATE_SHUTTING_DOWN) &&
  459. (state != CARD_STATE_SHUTDOWN)) {
  460. st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN);
  461. if (st)
  462. return st;
  463. }
  464. start = jiffies;
  465. do {
  466. st = rsxx_get_card_state(card, &state);
  467. if (st)
  468. return st;
  469. } while (state != CARD_STATE_SHUTDOWN &&
  470. (jiffies - start < timeout));
  471. if (state != CARD_STATE_SHUTDOWN)
  472. return -ETIMEDOUT;
  473. return 0;
  474. }
  475. static int rsxx_eeh_frozen(struct pci_dev *dev)
  476. {
  477. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  478. int i;
  479. int st;
  480. dev_warn(&dev->dev, "IBM Flash Adapter PCI: preparing for slot reset.\n");
  481. card->eeh_state = 1;
  482. rsxx_mask_interrupts(card);
  483. /*
  484. * We need to guarantee that the write for eeh_state and masking
  485. * interrupts does not become reordered. This will prevent a possible
  486. * race condition with the EEH code.
  487. */
  488. wmb();
  489. pci_disable_device(dev);
  490. st = rsxx_eeh_save_issued_dmas(card);
  491. if (st)
  492. return st;
  493. rsxx_eeh_save_issued_creg(card);
  494. for (i = 0; i < card->n_targets; i++) {
  495. if (card->ctrl[i].status.buf)
  496. pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
  497. card->ctrl[i].status.buf,
  498. card->ctrl[i].status.dma_addr);
  499. if (card->ctrl[i].cmd.buf)
  500. pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
  501. card->ctrl[i].cmd.buf,
  502. card->ctrl[i].cmd.dma_addr);
  503. }
  504. return 0;
  505. }
  506. static void rsxx_eeh_failure(struct pci_dev *dev)
  507. {
  508. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  509. int i;
  510. int cnt = 0;
  511. dev_err(&dev->dev, "IBM Flash Adapter PCI: disabling failed card.\n");
  512. card->eeh_state = 1;
  513. card->halt = 1;
  514. for (i = 0; i < card->n_targets; i++) {
  515. spin_lock_bh(&card->ctrl[i].queue_lock);
  516. cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
  517. &card->ctrl[i].queue,
  518. COMPLETE_DMA);
  519. spin_unlock_bh(&card->ctrl[i].queue_lock);
  520. cnt += rsxx_dma_cancel(&card->ctrl[i]);
  521. if (cnt)
  522. dev_info(CARD_TO_DEV(card),
  523. "Freed %d queued DMAs on channel %d\n",
  524. cnt, card->ctrl[i].id);
  525. }
  526. }
  527. static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
  528. {
  529. unsigned int status;
  530. int iter = 0;
  531. /* We need to wait for the hardware to reset */
  532. while (iter++ < 10) {
  533. status = ioread32(card->regmap + PCI_RECONFIG);
  534. if (status & RSXX_FLUSH_BUSY) {
  535. ssleep(1);
  536. continue;
  537. }
  538. if (status & RSXX_FLUSH_TIMEOUT)
  539. dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n");
  540. return 0;
  541. }
  542. /* Hardware failed resetting itself. */
  543. return -1;
  544. }
  545. static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
  546. enum pci_channel_state error)
  547. {
  548. int st;
  549. if (dev->revision < RSXX_EEH_SUPPORT)
  550. return PCI_ERS_RESULT_NONE;
  551. if (error == pci_channel_io_perm_failure) {
  552. rsxx_eeh_failure(dev);
  553. return PCI_ERS_RESULT_DISCONNECT;
  554. }
  555. st = rsxx_eeh_frozen(dev);
  556. if (st) {
  557. dev_err(&dev->dev, "Slot reset setup failed\n");
  558. rsxx_eeh_failure(dev);
  559. return PCI_ERS_RESULT_DISCONNECT;
  560. }
  561. return PCI_ERS_RESULT_NEED_RESET;
  562. }
  563. static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
  564. {
  565. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  566. unsigned long flags;
  567. int i;
  568. int st;
  569. dev_warn(&dev->dev,
  570. "IBM Flash Adapter PCI: recovering from slot reset.\n");
  571. st = pci_enable_device(dev);
  572. if (st)
  573. goto failed_hw_setup;
  574. pci_set_master(dev);
  575. st = rsxx_eeh_fifo_flush_poll(card);
  576. if (st)
  577. goto failed_hw_setup;
  578. rsxx_dma_queue_reset(card);
  579. for (i = 0; i < card->n_targets; i++) {
  580. st = rsxx_hw_buffers_init(dev, &card->ctrl[i]);
  581. if (st)
  582. goto failed_hw_buffers_init;
  583. }
  584. if (card->config_valid)
  585. rsxx_dma_configure(card);
  586. /* Clears the ISR register from spurious interrupts */
  587. st = ioread32(card->regmap + ISR);
  588. card->eeh_state = 0;
  589. spin_lock_irqsave(&card->irq_lock, flags);
  590. if (card->n_targets & RSXX_MAX_TARGETS)
  591. rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
  592. else
  593. rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C);
  594. spin_unlock_irqrestore(&card->irq_lock, flags);
  595. rsxx_kick_creg_queue(card);
  596. for (i = 0; i < card->n_targets; i++) {
  597. spin_lock(&card->ctrl[i].queue_lock);
  598. if (list_empty(&card->ctrl[i].queue)) {
  599. spin_unlock(&card->ctrl[i].queue_lock);
  600. continue;
  601. }
  602. spin_unlock(&card->ctrl[i].queue_lock);
  603. queue_work(card->ctrl[i].issue_wq,
  604. &card->ctrl[i].issue_dma_work);
  605. }
  606. dev_info(&dev->dev, "IBM Flash Adapter PCI: recovery complete.\n");
  607. return PCI_ERS_RESULT_RECOVERED;
  608. failed_hw_buffers_init:
  609. for (i = 0; i < card->n_targets; i++) {
  610. if (card->ctrl[i].status.buf)
  611. pci_free_consistent(card->dev,
  612. STATUS_BUFFER_SIZE8,
  613. card->ctrl[i].status.buf,
  614. card->ctrl[i].status.dma_addr);
  615. if (card->ctrl[i].cmd.buf)
  616. pci_free_consistent(card->dev,
  617. COMMAND_BUFFER_SIZE8,
  618. card->ctrl[i].cmd.buf,
  619. card->ctrl[i].cmd.dma_addr);
  620. }
  621. failed_hw_setup:
  622. rsxx_eeh_failure(dev);
  623. return PCI_ERS_RESULT_DISCONNECT;
  624. }
  625. /*----------------- Driver Initialization & Setup -------------------*/
  626. /* Returns: 0 if the driver is compatible with the device
  627. -1 if the driver is NOT compatible with the device */
  628. static int rsxx_compatibility_check(struct rsxx_cardinfo *card)
  629. {
  630. unsigned char pci_rev;
  631. pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
  632. if (pci_rev > RS70_PCI_REV_SUPPORTED)
  633. return -1;
  634. return 0;
  635. }
  636. static int rsxx_pci_probe(struct pci_dev *dev,
  637. const struct pci_device_id *id)
  638. {
  639. struct rsxx_cardinfo *card;
  640. int st;
  641. unsigned int sync_timeout;
  642. dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
  643. card = kzalloc(sizeof(*card), GFP_KERNEL);
  644. if (!card)
  645. return -ENOMEM;
  646. card->dev = dev;
  647. pci_set_drvdata(dev, card);
  648. do {
  649. if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) {
  650. st = -ENOMEM;
  651. goto failed_ida_get;
  652. }
  653. spin_lock(&rsxx_ida_lock);
  654. st = ida_get_new(&rsxx_disk_ida, &card->disk_id);
  655. spin_unlock(&rsxx_ida_lock);
  656. } while (st == -EAGAIN);
  657. if (st)
  658. goto failed_ida_get;
  659. st = pci_enable_device(dev);
  660. if (st)
  661. goto failed_enable;
  662. pci_set_master(dev);
  663. pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
  664. st = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
  665. if (st) {
  666. dev_err(CARD_TO_DEV(card),
  667. "No usable DMA configuration,aborting\n");
  668. goto failed_dma_mask;
  669. }
  670. st = pci_request_regions(dev, DRIVER_NAME);
  671. if (st) {
  672. dev_err(CARD_TO_DEV(card),
  673. "Failed to request memory region\n");
  674. goto failed_request_regions;
  675. }
  676. if (pci_resource_len(dev, 0) == 0) {
  677. dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n");
  678. st = -ENOMEM;
  679. goto failed_iomap;
  680. }
  681. card->regmap = pci_iomap(dev, 0, 0);
  682. if (!card->regmap) {
  683. dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n");
  684. st = -ENOMEM;
  685. goto failed_iomap;
  686. }
  687. spin_lock_init(&card->irq_lock);
  688. card->halt = 0;
  689. card->eeh_state = 0;
  690. spin_lock_irq(&card->irq_lock);
  691. rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
  692. spin_unlock_irq(&card->irq_lock);
  693. if (!force_legacy) {
  694. st = pci_enable_msi(dev);
  695. if (st)
  696. dev_warn(CARD_TO_DEV(card),
  697. "Failed to enable MSI\n");
  698. }
  699. st = request_irq(dev->irq, rsxx_isr, IRQF_SHARED,
  700. DRIVER_NAME, card);
  701. if (st) {
  702. dev_err(CARD_TO_DEV(card),
  703. "Failed requesting IRQ%d\n", dev->irq);
  704. goto failed_irq;
  705. }
  706. /************* Setup Processor Command Interface *************/
  707. st = rsxx_creg_setup(card);
  708. if (st) {
  709. dev_err(CARD_TO_DEV(card), "Failed to setup creg interface.\n");
  710. goto failed_creg_setup;
  711. }
  712. spin_lock_irq(&card->irq_lock);
  713. rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
  714. spin_unlock_irq(&card->irq_lock);
  715. st = rsxx_compatibility_check(card);
  716. if (st) {
  717. dev_warn(CARD_TO_DEV(card),
  718. "Incompatible driver detected. Please update the driver.\n");
  719. st = -EINVAL;
  720. goto failed_compatiblity_check;
  721. }
  722. /************* Load Card Config *************/
  723. st = rsxx_load_config(card);
  724. if (st)
  725. dev_err(CARD_TO_DEV(card),
  726. "Failed loading card config\n");
  727. /************* Setup DMA Engine *************/
  728. st = rsxx_get_num_targets(card, &card->n_targets);
  729. if (st)
  730. dev_info(CARD_TO_DEV(card),
  731. "Failed reading the number of DMA targets\n");
  732. card->ctrl = kzalloc(card->n_targets * sizeof(*card->ctrl), GFP_KERNEL);
  733. if (!card->ctrl) {
  734. st = -ENOMEM;
  735. goto failed_dma_setup;
  736. }
  737. st = rsxx_dma_setup(card);
  738. if (st) {
  739. dev_info(CARD_TO_DEV(card),
  740. "Failed to setup DMA engine\n");
  741. goto failed_dma_setup;
  742. }
  743. /************* Setup Card Event Handler *************/
  744. card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
  745. if (!card->event_wq) {
  746. dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
  747. goto failed_event_handler;
  748. }
  749. INIT_WORK(&card->event_work, card_event_handler);
  750. st = rsxx_setup_dev(card);
  751. if (st)
  752. goto failed_create_dev;
  753. rsxx_get_card_state(card, &card->state);
  754. dev_info(CARD_TO_DEV(card),
  755. "card state: %s\n",
  756. rsxx_card_state_to_str(card->state));
  757. /*
  758. * Now that the DMA Engine and devices have been setup,
  759. * we can enable the event interrupt(it kicks off actions in
  760. * those layers so we couldn't enable it right away.)
  761. */
  762. spin_lock_irq(&card->irq_lock);
  763. rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
  764. spin_unlock_irq(&card->irq_lock);
  765. if (card->state == CARD_STATE_SHUTDOWN) {
  766. st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
  767. if (st)
  768. dev_crit(CARD_TO_DEV(card),
  769. "Failed issuing card startup\n");
  770. if (sync_start) {
  771. sync_timeout = SYNC_START_TIMEOUT;
  772. dev_info(CARD_TO_DEV(card),
  773. "Waiting for card to startup\n");
  774. do {
  775. ssleep(1);
  776. sync_timeout--;
  777. rsxx_get_card_state(card, &card->state);
  778. } while (sync_timeout &&
  779. (card->state == CARD_STATE_STARTING));
  780. if (card->state == CARD_STATE_STARTING) {
  781. dev_warn(CARD_TO_DEV(card),
  782. "Card startup timed out\n");
  783. card->size8 = 0;
  784. } else {
  785. dev_info(CARD_TO_DEV(card),
  786. "card state: %s\n",
  787. rsxx_card_state_to_str(card->state));
  788. st = rsxx_get_card_size8(card, &card->size8);
  789. if (st)
  790. card->size8 = 0;
  791. }
  792. }
  793. } else if (card->state == CARD_STATE_GOOD ||
  794. card->state == CARD_STATE_RD_ONLY_FAULT) {
  795. st = rsxx_get_card_size8(card, &card->size8);
  796. if (st)
  797. card->size8 = 0;
  798. }
  799. rsxx_attach_dev(card);
  800. /************* Setup Debugfs *************/
  801. rsxx_debugfs_dev_new(card);
  802. return 0;
  803. failed_create_dev:
  804. destroy_workqueue(card->event_wq);
  805. card->event_wq = NULL;
  806. failed_event_handler:
  807. rsxx_dma_destroy(card);
  808. failed_dma_setup:
  809. failed_compatiblity_check:
  810. destroy_workqueue(card->creg_ctrl.creg_wq);
  811. card->creg_ctrl.creg_wq = NULL;
  812. failed_creg_setup:
  813. spin_lock_irq(&card->irq_lock);
  814. rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
  815. spin_unlock_irq(&card->irq_lock);
  816. free_irq(dev->irq, card);
  817. if (!force_legacy)
  818. pci_disable_msi(dev);
  819. failed_irq:
  820. pci_iounmap(dev, card->regmap);
  821. failed_iomap:
  822. pci_release_regions(dev);
  823. failed_request_regions:
  824. failed_dma_mask:
  825. pci_disable_device(dev);
  826. failed_enable:
  827. spin_lock(&rsxx_ida_lock);
  828. ida_remove(&rsxx_disk_ida, card->disk_id);
  829. spin_unlock(&rsxx_ida_lock);
  830. failed_ida_get:
  831. kfree(card);
  832. return st;
  833. }
  834. static void rsxx_pci_remove(struct pci_dev *dev)
  835. {
  836. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  837. unsigned long flags;
  838. int st;
  839. int i;
  840. if (!card)
  841. return;
  842. dev_info(CARD_TO_DEV(card),
  843. "Removing PCI-Flash SSD.\n");
  844. rsxx_detach_dev(card);
  845. for (i = 0; i < card->n_targets; i++) {
  846. spin_lock_irqsave(&card->irq_lock, flags);
  847. rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
  848. spin_unlock_irqrestore(&card->irq_lock, flags);
  849. }
  850. st = card_shutdown(card);
  851. if (st)
  852. dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n");
  853. /* Sync outstanding event handlers. */
  854. spin_lock_irqsave(&card->irq_lock, flags);
  855. rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
  856. spin_unlock_irqrestore(&card->irq_lock, flags);
  857. cancel_work_sync(&card->event_work);
  858. rsxx_destroy_dev(card);
  859. rsxx_dma_destroy(card);
  860. spin_lock_irqsave(&card->irq_lock, flags);
  861. rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
  862. spin_unlock_irqrestore(&card->irq_lock, flags);
  863. /* Prevent work_structs from re-queuing themselves. */
  864. card->halt = 1;
  865. debugfs_remove_recursive(card->debugfs_dir);
  866. free_irq(dev->irq, card);
  867. if (!force_legacy)
  868. pci_disable_msi(dev);
  869. rsxx_creg_destroy(card);
  870. pci_iounmap(dev, card->regmap);
  871. pci_disable_device(dev);
  872. pci_release_regions(dev);
  873. kfree(card);
  874. }
  875. static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state)
  876. {
  877. /* We don't support suspend at this time. */
  878. return -ENOSYS;
  879. }
  880. static void rsxx_pci_shutdown(struct pci_dev *dev)
  881. {
  882. struct rsxx_cardinfo *card = pci_get_drvdata(dev);
  883. unsigned long flags;
  884. int i;
  885. if (!card)
  886. return;
  887. dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n");
  888. rsxx_detach_dev(card);
  889. for (i = 0; i < card->n_targets; i++) {
  890. spin_lock_irqsave(&card->irq_lock, flags);
  891. rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
  892. spin_unlock_irqrestore(&card->irq_lock, flags);
  893. }
  894. card_shutdown(card);
  895. }
  896. static const struct pci_error_handlers rsxx_err_handler = {
  897. .error_detected = rsxx_error_detected,
  898. .slot_reset = rsxx_slot_reset,
  899. };
  900. static const struct pci_device_id rsxx_pci_ids[] = {
  901. {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)},
  902. {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)},
  903. {0,},
  904. };
  905. MODULE_DEVICE_TABLE(pci, rsxx_pci_ids);
  906. static struct pci_driver rsxx_pci_driver = {
  907. .name = DRIVER_NAME,
  908. .id_table = rsxx_pci_ids,
  909. .probe = rsxx_pci_probe,
  910. .remove = rsxx_pci_remove,
  911. .suspend = rsxx_pci_suspend,
  912. .shutdown = rsxx_pci_shutdown,
  913. .err_handler = &rsxx_err_handler,
  914. };
  915. static int __init rsxx_core_init(void)
  916. {
  917. int st;
  918. st = rsxx_dev_init();
  919. if (st)
  920. return st;
  921. st = rsxx_dma_init();
  922. if (st)
  923. goto dma_init_failed;
  924. st = rsxx_creg_init();
  925. if (st)
  926. goto creg_init_failed;
  927. return pci_register_driver(&rsxx_pci_driver);
  928. creg_init_failed:
  929. rsxx_dma_cleanup();
  930. dma_init_failed:
  931. rsxx_dev_cleanup();
  932. return st;
  933. }
  934. static void __exit rsxx_core_cleanup(void)
  935. {
  936. pci_unregister_driver(&rsxx_pci_driver);
  937. rsxx_creg_cleanup();
  938. rsxx_dma_cleanup();
  939. rsxx_dev_cleanup();
  940. }
  941. module_init(rsxx_core_init);
  942. module_exit(rsxx_core_cleanup);