zip_main.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. /***********************license start************************************
  2. * Copyright (c) 2003-2017 Cavium, Inc.
  3. * All rights reserved.
  4. *
  5. * License: one of 'Cavium License' or 'GNU General Public License Version 2'
  6. *
  7. * This file is provided under the terms of the Cavium License (see below)
  8. * or under the terms of GNU General Public License, Version 2, as
  9. * published by the Free Software Foundation. When using or redistributing
  10. * this file, you may do so under either license.
  11. *
  12. * Cavium License: Redistribution and use in source and binary forms, with
  13. * or without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * * Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. *
  19. * * Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials provided
  22. * with the distribution.
  23. *
  24. * * Neither the name of Cavium Inc. nor the names of its contributors may be
  25. * used to endorse or promote products derived from this software without
  26. * specific prior written permission.
  27. *
  28. * This Software, including technical data, may be subject to U.S. export
  29. * control laws, including the U.S. Export Administration Act and its
  30. * associated regulations, and may be subject to export or import
  31. * regulations in other countries.
  32. *
  33. * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
  34. * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS
  35. * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH
  36. * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
  37. * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
  38. * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY)
  39. * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A
  40. * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET
  41. * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE
  42. * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES
  43. * WITH YOU.
  44. ***********************license end**************************************/
  45. #include "common.h"
  46. #include "zip_crypto.h"
  47. #define DRV_NAME "ThunderX-ZIP"
  48. static struct zip_device *zip_dev[MAX_ZIP_DEVICES];
  49. static const struct pci_device_id zip_id_table[] = {
  50. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) },
  51. { 0, }
  52. };
  53. void zip_reg_write(u64 val, u64 __iomem *addr)
  54. {
  55. writeq(val, addr);
  56. }
  57. u64 zip_reg_read(u64 __iomem *addr)
  58. {
  59. return readq(addr);
  60. }
  61. /*
  62. * Allocates new ZIP device structure
  63. * Returns zip_device pointer or NULL if cannot allocate memory for zip_device
  64. */
  65. static struct zip_device *zip_alloc_device(struct pci_dev *pdev)
  66. {
  67. struct zip_device *zip = NULL;
  68. int idx;
  69. for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) {
  70. if (!zip_dev[idx])
  71. break;
  72. }
  73. /* To ensure that the index is within the limit */
  74. if (idx < MAX_ZIP_DEVICES)
  75. zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL);
  76. if (!zip)
  77. return NULL;
  78. zip_dev[idx] = zip;
  79. zip->index = idx;
  80. return zip;
  81. }
  82. /**
  83. * zip_get_device - Get ZIP device based on node id of cpu
  84. *
  85. * @node: Node id of the current cpu
  86. * Return: Pointer to Zip device structure
  87. */
  88. struct zip_device *zip_get_device(int node)
  89. {
  90. if ((node < MAX_ZIP_DEVICES) && (node >= 0))
  91. return zip_dev[node];
  92. zip_err("ZIP device not found for node id %d\n", node);
  93. return NULL;
  94. }
  95. /**
  96. * zip_get_node_id - Get the node id of the current cpu
  97. *
  98. * Return: Node id of the current cpu
  99. */
  100. int zip_get_node_id(void)
  101. {
  102. return cpu_to_node(raw_smp_processor_id());
  103. }
  104. /* Initializes the ZIP h/w sub-system */
  105. static int zip_init_hw(struct zip_device *zip)
  106. {
  107. union zip_cmd_ctl cmd_ctl;
  108. union zip_constants constants;
  109. union zip_que_ena que_ena;
  110. union zip_quex_map que_map;
  111. union zip_que_pri que_pri;
  112. union zip_quex_sbuf_addr que_sbuf_addr;
  113. union zip_quex_sbuf_ctl que_sbuf_ctl;
  114. int q = 0;
  115. /* Enable the ZIP Engine(Core) Clock */
  116. cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL);
  117. cmd_ctl.s.forceclk = 1;
  118. zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL));
  119. zip_msg("ZIP_CMD_CTL : 0x%016llx",
  120. zip_reg_read(zip->reg_base + ZIP_CMD_CTL));
  121. constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS);
  122. zip->depth = constants.s.depth;
  123. zip->onfsize = constants.s.onfsize;
  124. zip->ctxsize = constants.s.ctxsize;
  125. zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx",
  126. zip->depth, zip->onfsize, zip->ctxsize);
  127. /*
  128. * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to
  129. * have the correct buffer pointer and size configured for each
  130. * instruction queue.
  131. */
  132. for (q = 0; q < ZIP_NUM_QUEUES; q++) {
  133. que_sbuf_ctl.u_reg64 = 0ull;
  134. que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64));
  135. que_sbuf_ctl.s.inst_be = 0;
  136. que_sbuf_ctl.s.stream_id = 0;
  137. zip_reg_write(que_sbuf_ctl.u_reg64,
  138. (zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
  139. zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q,
  140. zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q)));
  141. }
  142. for (q = 0; q < ZIP_NUM_QUEUES; q++) {
  143. memset(&zip->iq[q], 0x0, sizeof(struct zip_iq));
  144. spin_lock_init(&zip->iq[q].lock);
  145. if (zip_cmd_qbuf_alloc(zip, q)) {
  146. while (q != 0) {
  147. q--;
  148. zip_cmd_qbuf_free(zip, q);
  149. }
  150. return -ENOMEM;
  151. }
  152. /* Initialize tail ptr to head */
  153. zip->iq[q].sw_tail = zip->iq[q].sw_head;
  154. zip->iq[q].hw_tail = zip->iq[q].sw_head;
  155. /* Write the physical addr to register */
  156. que_sbuf_addr.u_reg64 = 0ull;
  157. que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >>
  158. ZIP_128B_ALIGN);
  159. zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q,
  160. (u64)que_sbuf_addr.s.ptr);
  161. zip_reg_write(que_sbuf_addr.u_reg64,
  162. (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
  163. zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q,
  164. zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q)));
  165. zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx",
  166. zip->iq[q].sw_head, zip->iq[q].sw_tail,
  167. zip->iq[q].hw_tail);
  168. zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr);
  169. }
  170. /*
  171. * Queue-to-ZIP core mapping
  172. * If a queue is not mapped to a particular core, it is equivalent to
  173. * the ZIP core being disabled.
  174. */
  175. que_ena.u_reg64 = 0x0ull;
  176. /* Enabling queues based on ZIP_NUM_QUEUES */
  177. for (q = 0; q < ZIP_NUM_QUEUES; q++)
  178. que_ena.s.ena |= (0x1 << q);
  179. zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA));
  180. zip_msg("QUE_ENA : 0x%016llx",
  181. zip_reg_read(zip->reg_base + ZIP_QUE_ENA));
  182. for (q = 0; q < ZIP_NUM_QUEUES; q++) {
  183. que_map.u_reg64 = 0ull;
  184. /* Mapping each queue to two ZIP cores */
  185. que_map.s.zce = 0x3;
  186. zip_reg_write(que_map.u_reg64,
  187. (zip->reg_base + ZIP_QUEX_MAP(q)));
  188. zip_msg("QUE_MAP(%d) : 0x%016llx", q,
  189. zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q)));
  190. }
  191. que_pri.u_reg64 = 0ull;
  192. for (q = 0; q < ZIP_NUM_QUEUES; q++)
  193. que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */
  194. zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI));
  195. zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI));
  196. return 0;
  197. }
  198. static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  199. {
  200. struct device *dev = &pdev->dev;
  201. struct zip_device *zip = NULL;
  202. int err;
  203. zip = zip_alloc_device(pdev);
  204. if (!zip)
  205. return -ENOMEM;
  206. dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index,
  207. pdev->vendor, pdev->device, dev_to_node(dev));
  208. pci_set_drvdata(pdev, zip);
  209. zip->pdev = pdev;
  210. err = pci_enable_device(pdev);
  211. if (err) {
  212. dev_err(dev, "Failed to enable PCI device");
  213. goto err_free_device;
  214. }
  215. err = pci_request_regions(pdev, DRV_NAME);
  216. if (err) {
  217. dev_err(dev, "PCI request regions failed 0x%x", err);
  218. goto err_disable_device;
  219. }
  220. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
  221. if (err) {
  222. dev_err(dev, "Unable to get usable DMA configuration\n");
  223. goto err_release_regions;
  224. }
  225. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
  226. if (err) {
  227. dev_err(dev, "Unable to get 48-bit DMA for allocations\n");
  228. goto err_release_regions;
  229. }
  230. /* MAP configuration registers */
  231. zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0);
  232. if (!zip->reg_base) {
  233. dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting");
  234. err = -ENOMEM;
  235. goto err_release_regions;
  236. }
  237. /* Initialize ZIP Hardware */
  238. err = zip_init_hw(zip);
  239. if (err)
  240. goto err_release_regions;
  241. return 0;
  242. err_release_regions:
  243. if (zip->reg_base)
  244. iounmap(zip->reg_base);
  245. pci_release_regions(pdev);
  246. err_disable_device:
  247. pci_disable_device(pdev);
  248. err_free_device:
  249. pci_set_drvdata(pdev, NULL);
  250. /* Remove zip_dev from zip_device list, free the zip_device memory */
  251. zip_dev[zip->index] = NULL;
  252. devm_kfree(dev, zip);
  253. return err;
  254. }
  255. static void zip_remove(struct pci_dev *pdev)
  256. {
  257. struct zip_device *zip = pci_get_drvdata(pdev);
  258. union zip_cmd_ctl cmd_ctl;
  259. int q = 0;
  260. if (!zip)
  261. return;
  262. if (zip->reg_base) {
  263. cmd_ctl.u_reg64 = 0x0ull;
  264. cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */
  265. zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL));
  266. iounmap(zip->reg_base);
  267. }
  268. pci_release_regions(pdev);
  269. pci_disable_device(pdev);
  270. /*
  271. * Free Command Queue buffers. This free should be called for all
  272. * the enabled Queues.
  273. */
  274. for (q = 0; q < ZIP_NUM_QUEUES; q++)
  275. zip_cmd_qbuf_free(zip, q);
  276. pci_set_drvdata(pdev, NULL);
  277. /* remove zip device from zip device list */
  278. zip_dev[zip->index] = NULL;
  279. }
  280. /* PCI Sub-System Interface */
  281. static struct pci_driver zip_driver = {
  282. .name = DRV_NAME,
  283. .id_table = zip_id_table,
  284. .probe = zip_probe,
  285. .remove = zip_remove,
  286. };
  287. /* Kernel Crypto Subsystem Interface */
  288. static struct crypto_alg zip_comp_deflate = {
  289. .cra_name = "deflate",
  290. .cra_driver_name = "deflate-cavium",
  291. .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
  292. .cra_ctxsize = sizeof(struct zip_kernel_ctx),
  293. .cra_priority = 300,
  294. .cra_module = THIS_MODULE,
  295. .cra_init = zip_alloc_comp_ctx_deflate,
  296. .cra_exit = zip_free_comp_ctx,
  297. .cra_u = { .compress = {
  298. .coa_compress = zip_comp_compress,
  299. .coa_decompress = zip_comp_decompress
  300. } }
  301. };
  302. static struct crypto_alg zip_comp_lzs = {
  303. .cra_name = "lzs",
  304. .cra_driver_name = "lzs-cavium",
  305. .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
  306. .cra_ctxsize = sizeof(struct zip_kernel_ctx),
  307. .cra_priority = 300,
  308. .cra_module = THIS_MODULE,
  309. .cra_init = zip_alloc_comp_ctx_lzs,
  310. .cra_exit = zip_free_comp_ctx,
  311. .cra_u = { .compress = {
  312. .coa_compress = zip_comp_compress,
  313. .coa_decompress = zip_comp_decompress
  314. } }
  315. };
  316. static struct scomp_alg zip_scomp_deflate = {
  317. .alloc_ctx = zip_alloc_scomp_ctx_deflate,
  318. .free_ctx = zip_free_scomp_ctx,
  319. .compress = zip_scomp_compress,
  320. .decompress = zip_scomp_decompress,
  321. .base = {
  322. .cra_name = "deflate",
  323. .cra_driver_name = "deflate-scomp-cavium",
  324. .cra_module = THIS_MODULE,
  325. .cra_priority = 300,
  326. }
  327. };
  328. static struct scomp_alg zip_scomp_lzs = {
  329. .alloc_ctx = zip_alloc_scomp_ctx_lzs,
  330. .free_ctx = zip_free_scomp_ctx,
  331. .compress = zip_scomp_compress,
  332. .decompress = zip_scomp_decompress,
  333. .base = {
  334. .cra_name = "lzs",
  335. .cra_driver_name = "lzs-scomp-cavium",
  336. .cra_module = THIS_MODULE,
  337. .cra_priority = 300,
  338. }
  339. };
  340. static int zip_register_compression_device(void)
  341. {
  342. int ret;
  343. ret = crypto_register_alg(&zip_comp_deflate);
  344. if (ret < 0) {
  345. zip_err("Deflate algorithm registration failed\n");
  346. return ret;
  347. }
  348. ret = crypto_register_alg(&zip_comp_lzs);
  349. if (ret < 0) {
  350. zip_err("LZS algorithm registration failed\n");
  351. goto err_unregister_alg_deflate;
  352. }
  353. ret = crypto_register_scomp(&zip_scomp_deflate);
  354. if (ret < 0) {
  355. zip_err("Deflate scomp algorithm registration failed\n");
  356. goto err_unregister_alg_lzs;
  357. }
  358. ret = crypto_register_scomp(&zip_scomp_lzs);
  359. if (ret < 0) {
  360. zip_err("LZS scomp algorithm registration failed\n");
  361. goto err_unregister_scomp_deflate;
  362. }
  363. return ret;
  364. err_unregister_scomp_deflate:
  365. crypto_unregister_scomp(&zip_scomp_deflate);
  366. err_unregister_alg_lzs:
  367. crypto_unregister_alg(&zip_comp_lzs);
  368. err_unregister_alg_deflate:
  369. crypto_unregister_alg(&zip_comp_deflate);
  370. return ret;
  371. }
  372. static void zip_unregister_compression_device(void)
  373. {
  374. crypto_unregister_alg(&zip_comp_deflate);
  375. crypto_unregister_alg(&zip_comp_lzs);
  376. crypto_unregister_scomp(&zip_scomp_deflate);
  377. crypto_unregister_scomp(&zip_scomp_lzs);
  378. }
  379. /*
  380. * debugfs functions
  381. */
  382. #ifdef CONFIG_DEBUG_FS
  383. #include <linux/debugfs.h>
  384. /* Displays ZIP device statistics */
  385. static int zip_show_stats(struct seq_file *s, void *unused)
  386. {
  387. u64 val = 0ull;
  388. u64 avg_chunk = 0ull, avg_cr = 0ull;
  389. u32 q = 0;
  390. int index = 0;
  391. struct zip_device *zip;
  392. struct zip_stats *st;
  393. for (index = 0; index < MAX_ZIP_DEVICES; index++) {
  394. u64 pending = 0;
  395. if (zip_dev[index]) {
  396. zip = zip_dev[index];
  397. st = &zip->stats;
  398. /* Get all the pending requests */
  399. for (q = 0; q < ZIP_NUM_QUEUES; q++) {
  400. val = zip_reg_read((zip->reg_base +
  401. ZIP_DBG_QUEX_STA(q)));
  402. pending += val >> 32 & 0xffffff;
  403. }
  404. val = atomic64_read(&st->comp_req_complete);
  405. avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
  406. val = atomic64_read(&st->comp_out_bytes);
  407. avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0;
  408. seq_printf(s, " ZIP Device %d Stats\n"
  409. "-----------------------------------\n"
  410. "Comp Req Submitted : \t%lld\n"
  411. "Comp Req Completed : \t%lld\n"
  412. "Compress In Bytes : \t%lld\n"
  413. "Compressed Out Bytes : \t%lld\n"
  414. "Average Chunk size : \t%llu\n"
  415. "Average Compression ratio : \t%llu\n"
  416. "Decomp Req Submitted : \t%lld\n"
  417. "Decomp Req Completed : \t%lld\n"
  418. "Decompress In Bytes : \t%lld\n"
  419. "Decompressed Out Bytes : \t%lld\n"
  420. "Decompress Bad requests : \t%lld\n"
  421. "Pending Req : \t%lld\n"
  422. "---------------------------------\n",
  423. index,
  424. (u64)atomic64_read(&st->comp_req_submit),
  425. (u64)atomic64_read(&st->comp_req_complete),
  426. (u64)atomic64_read(&st->comp_in_bytes),
  427. (u64)atomic64_read(&st->comp_out_bytes),
  428. avg_chunk,
  429. avg_cr,
  430. (u64)atomic64_read(&st->decomp_req_submit),
  431. (u64)atomic64_read(&st->decomp_req_complete),
  432. (u64)atomic64_read(&st->decomp_in_bytes),
  433. (u64)atomic64_read(&st->decomp_out_bytes),
  434. (u64)atomic64_read(&st->decomp_bad_reqs),
  435. pending);
  436. }
  437. }
  438. return 0;
  439. }
  440. /* Clears stats data */
  441. static int zip_clear_stats(struct seq_file *s, void *unused)
  442. {
  443. int index = 0;
  444. for (index = 0; index < MAX_ZIP_DEVICES; index++) {
  445. if (zip_dev[index]) {
  446. memset(&zip_dev[index]->stats, 0,
  447. sizeof(struct zip_stats));
  448. seq_printf(s, "Cleared stats for zip %d\n", index);
  449. }
  450. }
  451. return 0;
  452. }
  453. static struct zip_registers zipregs[64] = {
  454. {"ZIP_CMD_CTL ", 0x0000ull},
  455. {"ZIP_THROTTLE ", 0x0010ull},
  456. {"ZIP_CONSTANTS ", 0x00A0ull},
  457. {"ZIP_QUE0_MAP ", 0x1400ull},
  458. {"ZIP_QUE1_MAP ", 0x1408ull},
  459. {"ZIP_QUE_ENA ", 0x0500ull},
  460. {"ZIP_QUE_PRI ", 0x0508ull},
  461. {"ZIP_QUE0_DONE ", 0x2000ull},
  462. {"ZIP_QUE1_DONE ", 0x2008ull},
  463. {"ZIP_QUE0_DOORBELL ", 0x4000ull},
  464. {"ZIP_QUE1_DOORBELL ", 0x4008ull},
  465. {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull},
  466. {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull},
  467. {"ZIP_QUE0_SBUF_CTL ", 0x1200ull},
  468. {"ZIP_QUE1_SBUF_CTL ", 0x1208ull},
  469. { NULL, 0}
  470. };
  471. /* Prints registers' contents */
  472. static int zip_print_regs(struct seq_file *s, void *unused)
  473. {
  474. u64 val = 0;
  475. int i = 0, index = 0;
  476. for (index = 0; index < MAX_ZIP_DEVICES; index++) {
  477. if (zip_dev[index]) {
  478. seq_printf(s, "--------------------------------\n"
  479. " ZIP Device %d Registers\n"
  480. "--------------------------------\n",
  481. index);
  482. i = 0;
  483. while (zipregs[i].reg_name) {
  484. val = zip_reg_read((zip_dev[index]->reg_base +
  485. zipregs[i].reg_offset));
  486. seq_printf(s, "%s: 0x%016llx\n",
  487. zipregs[i].reg_name, val);
  488. i++;
  489. }
  490. }
  491. }
  492. return 0;
  493. }
  494. static int zip_stats_open(struct inode *inode, struct file *file)
  495. {
  496. return single_open(file, zip_show_stats, NULL);
  497. }
  498. static const struct file_operations zip_stats_fops = {
  499. .owner = THIS_MODULE,
  500. .open = zip_stats_open,
  501. .read = seq_read,
  502. .release = single_release,
  503. };
  504. static int zip_clear_open(struct inode *inode, struct file *file)
  505. {
  506. return single_open(file, zip_clear_stats, NULL);
  507. }
  508. static const struct file_operations zip_clear_fops = {
  509. .owner = THIS_MODULE,
  510. .open = zip_clear_open,
  511. .read = seq_read,
  512. .release = single_release,
  513. };
  514. static int zip_regs_open(struct inode *inode, struct file *file)
  515. {
  516. return single_open(file, zip_print_regs, NULL);
  517. }
  518. static const struct file_operations zip_regs_fops = {
  519. .owner = THIS_MODULE,
  520. .open = zip_regs_open,
  521. .read = seq_read,
  522. .release = single_release,
  523. };
  524. /* Root directory for thunderx_zip debugfs entry */
  525. static struct dentry *zip_debugfs_root;
  526. static int __init zip_debugfs_init(void)
  527. {
  528. struct dentry *zip_stats, *zip_clear, *zip_regs;
  529. if (!debugfs_initialized())
  530. return -ENODEV;
  531. zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL);
  532. if (!zip_debugfs_root)
  533. return -ENOMEM;
  534. /* Creating files for entries inside thunderx_zip directory */
  535. zip_stats = debugfs_create_file("zip_stats", 0444,
  536. zip_debugfs_root,
  537. NULL, &zip_stats_fops);
  538. if (!zip_stats)
  539. goto failed_to_create;
  540. zip_clear = debugfs_create_file("zip_clear", 0444,
  541. zip_debugfs_root,
  542. NULL, &zip_clear_fops);
  543. if (!zip_clear)
  544. goto failed_to_create;
  545. zip_regs = debugfs_create_file("zip_regs", 0444,
  546. zip_debugfs_root,
  547. NULL, &zip_regs_fops);
  548. if (!zip_regs)
  549. goto failed_to_create;
  550. return 0;
  551. failed_to_create:
  552. debugfs_remove_recursive(zip_debugfs_root);
  553. return -ENOENT;
  554. }
  555. static void __exit zip_debugfs_exit(void)
  556. {
  557. debugfs_remove_recursive(zip_debugfs_root);
  558. }
  559. #else
  560. static int __init zip_debugfs_init(void)
  561. {
  562. return 0;
  563. }
  564. static void __exit zip_debugfs_exit(void) { }
  565. #endif
  566. /* debugfs - end */
  567. static int __init zip_init_module(void)
  568. {
  569. int ret;
  570. zip_msg("%s\n", DRV_NAME);
  571. ret = pci_register_driver(&zip_driver);
  572. if (ret < 0) {
  573. zip_err("ZIP: pci_register_driver() failed\n");
  574. return ret;
  575. }
  576. /* Register with the Kernel Crypto Interface */
  577. ret = zip_register_compression_device();
  578. if (ret < 0) {
  579. zip_err("ZIP: Kernel Crypto Registration failed\n");
  580. goto err_pci_unregister;
  581. }
  582. /* comp-decomp statistics are handled with debugfs interface */
  583. ret = zip_debugfs_init();
  584. if (ret < 0) {
  585. zip_err("ZIP: debugfs initialization failed\n");
  586. goto err_crypto_unregister;
  587. }
  588. return ret;
  589. err_crypto_unregister:
  590. zip_unregister_compression_device();
  591. err_pci_unregister:
  592. pci_unregister_driver(&zip_driver);
  593. return ret;
  594. }
  595. static void __exit zip_cleanup_module(void)
  596. {
  597. zip_debugfs_exit();
  598. /* Unregister from the kernel crypto interface */
  599. zip_unregister_compression_device();
  600. /* Unregister this driver for pci zip devices */
  601. pci_unregister_driver(&zip_driver);
  602. }
  603. module_init(zip_init_module);
  604. module_exit(zip_cleanup_module);
  605. MODULE_AUTHOR("Cavium Inc");
  606. MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver");
  607. MODULE_LICENSE("GPL v2");
  608. MODULE_DEVICE_TABLE(pci, zip_id_table);