sun_esp.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. /* sun_esp.c: ESP front-end for Sparc SBUS systems.
  2. *
  3. * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/types.h>
  7. #include <linux/delay.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/init.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/of.h>
  13. #include <linux/of_device.h>
  14. #include <linux/gfp.h>
  15. #include <asm/irq.h>
  16. #include <asm/io.h>
  17. #include <asm/dma.h>
  18. #include <scsi/scsi_host.h>
  19. #include "esp_scsi.h"
  20. #define DRV_MODULE_NAME "sun_esp"
  21. #define PFX DRV_MODULE_NAME ": "
  22. #define DRV_VERSION "1.100"
  23. #define DRV_MODULE_RELDATE "August 27, 2008"
  24. #define dma_read32(REG) \
  25. sbus_readl(esp->dma_regs + (REG))
  26. #define dma_write32(VAL, REG) \
  27. sbus_writel((VAL), esp->dma_regs + (REG))
  28. /* DVMA chip revisions */
  29. enum dvma_rev {
  30. dvmarev0,
  31. dvmaesc1,
  32. dvmarev1,
  33. dvmarev2,
  34. dvmarev3,
  35. dvmarevplus,
  36. dvmahme
  37. };
  38. static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
  39. {
  40. esp->dma = dma_of;
  41. esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
  42. resource_size(&dma_of->resource[0]),
  43. "espdma");
  44. if (!esp->dma_regs)
  45. return -ENOMEM;
  46. switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
  47. case DMA_VERS0:
  48. esp->dmarev = dvmarev0;
  49. break;
  50. case DMA_ESCV1:
  51. esp->dmarev = dvmaesc1;
  52. break;
  53. case DMA_VERS1:
  54. esp->dmarev = dvmarev1;
  55. break;
  56. case DMA_VERS2:
  57. esp->dmarev = dvmarev2;
  58. break;
  59. case DMA_VERHME:
  60. esp->dmarev = dvmahme;
  61. break;
  62. case DMA_VERSPLUS:
  63. esp->dmarev = dvmarevplus;
  64. break;
  65. }
  66. return 0;
  67. }
  68. static int esp_sbus_map_regs(struct esp *esp, int hme)
  69. {
  70. struct platform_device *op = esp->dev;
  71. struct resource *res;
  72. /* On HME, two reg sets exist, first is DVMA,
  73. * second is ESP registers.
  74. */
  75. if (hme)
  76. res = &op->resource[1];
  77. else
  78. res = &op->resource[0];
  79. esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
  80. if (!esp->regs)
  81. return -ENOMEM;
  82. return 0;
  83. }
  84. static int esp_sbus_map_command_block(struct esp *esp)
  85. {
  86. struct platform_device *op = esp->dev;
  87. esp->command_block = dma_alloc_coherent(&op->dev, 16,
  88. &esp->command_block_dma,
  89. GFP_ATOMIC);
  90. if (!esp->command_block)
  91. return -ENOMEM;
  92. return 0;
  93. }
  94. static int esp_sbus_register_irq(struct esp *esp)
  95. {
  96. struct Scsi_Host *host = esp->host;
  97. struct platform_device *op = esp->dev;
  98. host->irq = op->archdata.irqs[0];
  99. return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
  100. }
  101. static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
  102. {
  103. struct platform_device *op = esp->dev;
  104. struct device_node *dp;
  105. dp = op->dev.of_node;
  106. esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
  107. if (esp->scsi_id != 0xff)
  108. goto done;
  109. esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
  110. if (esp->scsi_id != 0xff)
  111. goto done;
  112. esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
  113. "scsi-initiator-id", 7);
  114. done:
  115. esp->host->this_id = esp->scsi_id;
  116. esp->scsi_id_mask = (1 << esp->scsi_id);
  117. }
  118. static void esp_get_differential(struct esp *esp)
  119. {
  120. struct platform_device *op = esp->dev;
  121. struct device_node *dp;
  122. dp = op->dev.of_node;
  123. if (of_find_property(dp, "differential", NULL))
  124. esp->flags |= ESP_FLAG_DIFFERENTIAL;
  125. else
  126. esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
  127. }
  128. static void esp_get_clock_params(struct esp *esp)
  129. {
  130. struct platform_device *op = esp->dev;
  131. struct device_node *bus_dp, *dp;
  132. int fmhz;
  133. dp = op->dev.of_node;
  134. bus_dp = dp->parent;
  135. fmhz = of_getintprop_default(dp, "clock-frequency", 0);
  136. if (fmhz == 0)
  137. fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
  138. esp->cfreq = fmhz;
  139. }
  140. static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
  141. {
  142. struct device_node *dma_dp = dma_of->dev.of_node;
  143. struct platform_device *op = esp->dev;
  144. struct device_node *dp;
  145. u8 bursts, val;
  146. dp = op->dev.of_node;
  147. bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
  148. val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
  149. if (val != 0xff)
  150. bursts &= val;
  151. val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
  152. if (val != 0xff)
  153. bursts &= val;
  154. if (bursts == 0xff ||
  155. (bursts & DMA_BURST16) == 0 ||
  156. (bursts & DMA_BURST32) == 0)
  157. bursts = (DMA_BURST32 - 1);
  158. esp->bursts = bursts;
  159. }
  160. static void esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
  161. {
  162. esp_get_scsi_id(esp, espdma);
  163. esp_get_differential(esp);
  164. esp_get_clock_params(esp);
  165. esp_get_bursts(esp, espdma);
  166. }
  167. static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
  168. {
  169. sbus_writeb(val, esp->regs + (reg * 4UL));
  170. }
  171. static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
  172. {
  173. return sbus_readb(esp->regs + (reg * 4UL));
  174. }
  175. static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
  176. size_t sz, int dir)
  177. {
  178. struct platform_device *op = esp->dev;
  179. return dma_map_single(&op->dev, buf, sz, dir);
  180. }
  181. static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
  182. int num_sg, int dir)
  183. {
  184. struct platform_device *op = esp->dev;
  185. return dma_map_sg(&op->dev, sg, num_sg, dir);
  186. }
  187. static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
  188. size_t sz, int dir)
  189. {
  190. struct platform_device *op = esp->dev;
  191. dma_unmap_single(&op->dev, addr, sz, dir);
  192. }
  193. static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
  194. int num_sg, int dir)
  195. {
  196. struct platform_device *op = esp->dev;
  197. dma_unmap_sg(&op->dev, sg, num_sg, dir);
  198. }
  199. static int sbus_esp_irq_pending(struct esp *esp)
  200. {
  201. if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
  202. return 1;
  203. return 0;
  204. }
  205. static void sbus_esp_reset_dma(struct esp *esp)
  206. {
  207. int can_do_burst16, can_do_burst32, can_do_burst64;
  208. int can_do_sbus64, lim;
  209. struct platform_device *op;
  210. u32 val;
  211. can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
  212. can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
  213. can_do_burst64 = 0;
  214. can_do_sbus64 = 0;
  215. op = esp->dev;
  216. if (sbus_can_dma_64bit())
  217. can_do_sbus64 = 1;
  218. if (sbus_can_burst64())
  219. can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
  220. /* Put the DVMA into a known state. */
  221. if (esp->dmarev != dvmahme) {
  222. val = dma_read32(DMA_CSR);
  223. dma_write32(val | DMA_RST_SCSI, DMA_CSR);
  224. dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
  225. }
  226. switch (esp->dmarev) {
  227. case dvmahme:
  228. dma_write32(DMA_RESET_FAS366, DMA_CSR);
  229. dma_write32(DMA_RST_SCSI, DMA_CSR);
  230. esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
  231. DMA_SCSI_DISAB | DMA_INT_ENAB);
  232. esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
  233. DMA_BRST_SZ);
  234. if (can_do_burst64)
  235. esp->prev_hme_dmacsr |= DMA_BRST64;
  236. else if (can_do_burst32)
  237. esp->prev_hme_dmacsr |= DMA_BRST32;
  238. if (can_do_sbus64) {
  239. esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
  240. sbus_set_sbus64(&op->dev, esp->bursts);
  241. }
  242. lim = 1000;
  243. while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
  244. if (--lim == 0) {
  245. printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
  246. "will not clear!\n",
  247. esp->host->unique_id);
  248. break;
  249. }
  250. udelay(1);
  251. }
  252. dma_write32(0, DMA_CSR);
  253. dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
  254. dma_write32(0, DMA_ADDR);
  255. break;
  256. case dvmarev2:
  257. if (esp->rev != ESP100) {
  258. val = dma_read32(DMA_CSR);
  259. dma_write32(val | DMA_3CLKS, DMA_CSR);
  260. }
  261. break;
  262. case dvmarev3:
  263. val = dma_read32(DMA_CSR);
  264. val &= ~DMA_3CLKS;
  265. val |= DMA_2CLKS;
  266. if (can_do_burst32) {
  267. val &= ~DMA_BRST_SZ;
  268. val |= DMA_BRST32;
  269. }
  270. dma_write32(val, DMA_CSR);
  271. break;
  272. case dvmaesc1:
  273. val = dma_read32(DMA_CSR);
  274. val |= DMA_ADD_ENABLE;
  275. val &= ~DMA_BCNT_ENAB;
  276. if (!can_do_burst32 && can_do_burst16) {
  277. val |= DMA_ESC_BURST;
  278. } else {
  279. val &= ~(DMA_ESC_BURST);
  280. }
  281. dma_write32(val, DMA_CSR);
  282. break;
  283. default:
  284. break;
  285. }
  286. /* Enable interrupts. */
  287. val = dma_read32(DMA_CSR);
  288. dma_write32(val | DMA_INT_ENAB, DMA_CSR);
  289. }
  290. static void sbus_esp_dma_drain(struct esp *esp)
  291. {
  292. u32 csr;
  293. int lim;
  294. if (esp->dmarev == dvmahme)
  295. return;
  296. csr = dma_read32(DMA_CSR);
  297. if (!(csr & DMA_FIFO_ISDRAIN))
  298. return;
  299. if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
  300. dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
  301. lim = 1000;
  302. while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
  303. if (--lim == 0) {
  304. printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
  305. esp->host->unique_id);
  306. break;
  307. }
  308. udelay(1);
  309. }
  310. }
  311. static void sbus_esp_dma_invalidate(struct esp *esp)
  312. {
  313. if (esp->dmarev == dvmahme) {
  314. dma_write32(DMA_RST_SCSI, DMA_CSR);
  315. esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
  316. (DMA_PARITY_OFF | DMA_2CLKS |
  317. DMA_SCSI_DISAB | DMA_INT_ENAB)) &
  318. ~(DMA_ST_WRITE | DMA_ENABLE));
  319. dma_write32(0, DMA_CSR);
  320. dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
  321. /* This is necessary to avoid having the SCSI channel
  322. * engine lock up on us.
  323. */
  324. dma_write32(0, DMA_ADDR);
  325. } else {
  326. u32 val;
  327. int lim;
  328. lim = 1000;
  329. while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
  330. if (--lim == 0) {
  331. printk(KERN_ALERT PFX "esp%d: DMA will not "
  332. "invalidate!\n", esp->host->unique_id);
  333. break;
  334. }
  335. udelay(1);
  336. }
  337. val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
  338. val |= DMA_FIFO_INV;
  339. dma_write32(val, DMA_CSR);
  340. val &= ~DMA_FIFO_INV;
  341. dma_write32(val, DMA_CSR);
  342. }
  343. }
  344. static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
  345. u32 dma_count, int write, u8 cmd)
  346. {
  347. u32 csr;
  348. BUG_ON(!(cmd & ESP_CMD_DMA));
  349. sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
  350. sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
  351. if (esp->rev == FASHME) {
  352. sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
  353. sbus_esp_write8(esp, 0, FAS_RHI);
  354. scsi_esp_cmd(esp, cmd);
  355. csr = esp->prev_hme_dmacsr;
  356. csr |= DMA_SCSI_DISAB | DMA_ENABLE;
  357. if (write)
  358. csr |= DMA_ST_WRITE;
  359. else
  360. csr &= ~DMA_ST_WRITE;
  361. esp->prev_hme_dmacsr = csr;
  362. dma_write32(dma_count, DMA_COUNT);
  363. dma_write32(addr, DMA_ADDR);
  364. dma_write32(csr, DMA_CSR);
  365. } else {
  366. csr = dma_read32(DMA_CSR);
  367. csr |= DMA_ENABLE;
  368. if (write)
  369. csr |= DMA_ST_WRITE;
  370. else
  371. csr &= ~DMA_ST_WRITE;
  372. dma_write32(csr, DMA_CSR);
  373. if (esp->dmarev == dvmaesc1) {
  374. u32 end = PAGE_ALIGN(addr + dma_count + 16U);
  375. dma_write32(end - addr, DMA_COUNT);
  376. }
  377. dma_write32(addr, DMA_ADDR);
  378. scsi_esp_cmd(esp, cmd);
  379. }
  380. }
  381. static int sbus_esp_dma_error(struct esp *esp)
  382. {
  383. u32 csr = dma_read32(DMA_CSR);
  384. if (csr & DMA_HNDL_ERROR)
  385. return 1;
  386. return 0;
  387. }
  388. static const struct esp_driver_ops sbus_esp_ops = {
  389. .esp_write8 = sbus_esp_write8,
  390. .esp_read8 = sbus_esp_read8,
  391. .map_single = sbus_esp_map_single,
  392. .map_sg = sbus_esp_map_sg,
  393. .unmap_single = sbus_esp_unmap_single,
  394. .unmap_sg = sbus_esp_unmap_sg,
  395. .irq_pending = sbus_esp_irq_pending,
  396. .reset_dma = sbus_esp_reset_dma,
  397. .dma_drain = sbus_esp_dma_drain,
  398. .dma_invalidate = sbus_esp_dma_invalidate,
  399. .send_dma_cmd = sbus_esp_send_dma_cmd,
  400. .dma_error = sbus_esp_dma_error,
  401. };
  402. static int esp_sbus_probe_one(struct platform_device *op,
  403. struct platform_device *espdma, int hme)
  404. {
  405. struct scsi_host_template *tpnt = &scsi_esp_template;
  406. struct Scsi_Host *host;
  407. struct esp *esp;
  408. int err;
  409. host = scsi_host_alloc(tpnt, sizeof(struct esp));
  410. err = -ENOMEM;
  411. if (!host)
  412. goto fail;
  413. host->max_id = (hme ? 16 : 8);
  414. esp = shost_priv(host);
  415. esp->host = host;
  416. esp->dev = op;
  417. esp->ops = &sbus_esp_ops;
  418. if (hme)
  419. esp->flags |= ESP_FLAG_WIDE_CAPABLE;
  420. err = esp_sbus_setup_dma(esp, espdma);
  421. if (err < 0)
  422. goto fail_unlink;
  423. err = esp_sbus_map_regs(esp, hme);
  424. if (err < 0)
  425. goto fail_unlink;
  426. err = esp_sbus_map_command_block(esp);
  427. if (err < 0)
  428. goto fail_unmap_regs;
  429. err = esp_sbus_register_irq(esp);
  430. if (err < 0)
  431. goto fail_unmap_command_block;
  432. esp_sbus_get_props(esp, espdma);
  433. /* Before we try to touch the ESP chip, ESC1 dma can
  434. * come up with the reset bit set, so make sure that
  435. * is clear first.
  436. */
  437. if (esp->dmarev == dvmaesc1) {
  438. u32 val = dma_read32(DMA_CSR);
  439. dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
  440. }
  441. dev_set_drvdata(&op->dev, esp);
  442. err = scsi_esp_register(esp, &op->dev);
  443. if (err)
  444. goto fail_free_irq;
  445. return 0;
  446. fail_free_irq:
  447. free_irq(host->irq, esp);
  448. fail_unmap_command_block:
  449. dma_free_coherent(&op->dev, 16,
  450. esp->command_block,
  451. esp->command_block_dma);
  452. fail_unmap_regs:
  453. of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
  454. fail_unlink:
  455. scsi_host_put(host);
  456. fail:
  457. return err;
  458. }
  459. static int esp_sbus_probe(struct platform_device *op)
  460. {
  461. struct device_node *dma_node = NULL;
  462. struct device_node *dp = op->dev.of_node;
  463. struct platform_device *dma_of = NULL;
  464. int hme = 0;
  465. if (dp->parent &&
  466. (!strcmp(dp->parent->name, "espdma") ||
  467. !strcmp(dp->parent->name, "dma")))
  468. dma_node = dp->parent;
  469. else if (!strcmp(dp->name, "SUNW,fas")) {
  470. dma_node = op->dev.of_node;
  471. hme = 1;
  472. }
  473. if (dma_node)
  474. dma_of = of_find_device_by_node(dma_node);
  475. if (!dma_of)
  476. return -ENODEV;
  477. return esp_sbus_probe_one(op, dma_of, hme);
  478. }
  479. static int esp_sbus_remove(struct platform_device *op)
  480. {
  481. struct esp *esp = dev_get_drvdata(&op->dev);
  482. struct platform_device *dma_of = esp->dma;
  483. unsigned int irq = esp->host->irq;
  484. bool is_hme;
  485. u32 val;
  486. scsi_esp_unregister(esp);
  487. /* Disable interrupts. */
  488. val = dma_read32(DMA_CSR);
  489. dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
  490. free_irq(irq, esp);
  491. is_hme = (esp->dmarev == dvmahme);
  492. dma_free_coherent(&op->dev, 16,
  493. esp->command_block,
  494. esp->command_block_dma);
  495. of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
  496. SBUS_ESP_REG_SIZE);
  497. of_iounmap(&dma_of->resource[0], esp->dma_regs,
  498. resource_size(&dma_of->resource[0]));
  499. scsi_host_put(esp->host);
  500. dev_set_drvdata(&op->dev, NULL);
  501. return 0;
  502. }
  503. static const struct of_device_id esp_match[] = {
  504. {
  505. .name = "SUNW,esp",
  506. },
  507. {
  508. .name = "SUNW,fas",
  509. },
  510. {
  511. .name = "esp",
  512. },
  513. {},
  514. };
  515. MODULE_DEVICE_TABLE(of, esp_match);
  516. static struct platform_driver esp_sbus_driver = {
  517. .driver = {
  518. .name = "esp",
  519. .of_match_table = esp_match,
  520. },
  521. .probe = esp_sbus_probe,
  522. .remove = esp_sbus_remove,
  523. };
  524. static int __init sunesp_init(void)
  525. {
  526. return platform_driver_register(&esp_sbus_driver);
  527. }
  528. static void __exit sunesp_exit(void)
  529. {
  530. platform_driver_unregister(&esp_sbus_driver);
  531. }
  532. MODULE_DESCRIPTION("Sun ESP SCSI driver");
  533. MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  534. MODULE_LICENSE("GPL");
  535. MODULE_VERSION(DRV_VERSION);
  536. module_init(sunesp_init);
  537. module_exit(sunesp_exit);