nfit.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/platform_device.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/libnvdimm.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/device.h>
  19. #include <linux/module.h>
  20. #include <linux/ndctl.h>
  21. #include <linux/sizes.h>
  22. #include <linux/slab.h>
  23. #include <nfit.h>
  24. #include <nd.h>
  25. #include "nfit_test.h"
  26. /*
  27. * Generate an NFIT table to describe the following topology:
  28. *
  29. * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
  30. *
  31. * (a) (b) DIMM BLK-REGION
  32. * +----------+--------------+----------+---------+
  33. * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
  34. * | imc0 +--+- - - - - region0 - - - -+----------+ +
  35. * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
  36. * | +----------+--------------v----------v v
  37. * +--+---+ | |
  38. * | cpu0 | region1
  39. * +--+---+ | |
  40. * | +-------------------------^----------^ ^
  41. * +--+---+ | blk4.0 | pm1.0 | 2 region4
  42. * | imc1 +--+-------------------------+----------+ +
  43. * +------+ | blk5.0 | pm1.0 | 3 region5
  44. * +-------------------------+----------+-+-------+
  45. *
  46. * *) In this layout we have four dimms and two memory controllers in one
  47. * socket. Each unique interface (BLK or PMEM) to DPA space
  48. * is identified by a region device with a dynamically assigned id.
  49. *
  50. * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
  51. * A single PMEM namespace "pm0.0" is created using half of the
  52. * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
  53. * allocate from from the bottom of a region. The unallocated
  54. * portion of REGION0 aliases with REGION2 and REGION3. That
  55. * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
  56. * "blk3.0") starting at the base of each DIMM to offset (a) in those
  57. * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
  58. * names that can be assigned to a namespace.
  59. *
  60. * *) In the last portion of dimm0 and dimm1 we have an interleaved
  61. * SPA range, REGION1, that spans those two dimms as well as dimm2
  62. * and dimm3. Some of REGION1 allocated to a PMEM namespace named
  63. * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
  64. * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
  65. * "blk5.0".
  66. *
  67. * *) The portion of dimm2 and dimm3 that do not participate in the
  68. * REGION1 interleaved SPA range (i.e. the DPA address below offset
  69. * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
  70. * Note, that BLK namespaces need not be contiguous in DPA-space, and
  71. * can consume aliased capacity from multiple interleave sets.
  72. *
  73. * BUS1: Legacy NVDIMM (single contiguous range)
  74. *
  75. * region2
  76. * +---------------------+
  77. * |---------------------|
  78. * || pm2.0 ||
  79. * |---------------------|
  80. * +---------------------+
  81. *
  82. * *) A NFIT-table may describe a simple system-physical-address range
  83. * with no BLK aliasing. This type of region may optionally
  84. * reference an NVDIMM.
  85. */
  86. enum {
  87. NUM_PM = 2,
  88. NUM_DCR = 4,
  89. NUM_BDW = NUM_DCR,
  90. NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
  91. NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
  92. DIMM_SIZE = SZ_32M,
  93. LABEL_SIZE = SZ_128K,
  94. SPA0_SIZE = DIMM_SIZE,
  95. SPA1_SIZE = DIMM_SIZE*2,
  96. SPA2_SIZE = DIMM_SIZE,
  97. BDW_SIZE = 64 << 8,
  98. DCR_SIZE = 12,
  99. NUM_NFITS = 2, /* permit testing multiple NFITs per system */
  100. };
  101. struct nfit_test_dcr {
  102. __le64 bdw_addr;
  103. __le32 bdw_status;
  104. __u8 aperature[BDW_SIZE];
  105. };
  106. #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
  107. (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
  108. | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
  109. static u32 handle[NUM_DCR] = {
  110. [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
  111. [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
  112. [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
  113. [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
  114. };
  115. struct nfit_test {
  116. struct acpi_nfit_desc acpi_desc;
  117. struct platform_device pdev;
  118. struct list_head resources;
  119. void *nfit_buf;
  120. dma_addr_t nfit_dma;
  121. size_t nfit_size;
  122. int num_dcr;
  123. int num_pm;
  124. void **dimm;
  125. dma_addr_t *dimm_dma;
  126. void **label;
  127. dma_addr_t *label_dma;
  128. void **spa_set;
  129. dma_addr_t *spa_set_dma;
  130. struct nfit_test_dcr **dcr;
  131. dma_addr_t *dcr_dma;
  132. int (*alloc)(struct nfit_test *t);
  133. void (*setup)(struct nfit_test *t);
  134. };
  135. static struct nfit_test *to_nfit_test(struct device *dev)
  136. {
  137. struct platform_device *pdev = to_platform_device(dev);
  138. return container_of(pdev, struct nfit_test, pdev);
  139. }
  140. static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
  141. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  142. unsigned int buf_len)
  143. {
  144. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  145. struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
  146. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  147. int i, rc;
  148. if (!nfit_mem || !test_bit(cmd, &nfit_mem->dsm_mask))
  149. return -ENXIO;
  150. /* lookup label space for the given dimm */
  151. for (i = 0; i < ARRAY_SIZE(handle); i++)
  152. if (__to_nfit_memdev(nfit_mem)->device_handle == handle[i])
  153. break;
  154. if (i >= ARRAY_SIZE(handle))
  155. return -ENXIO;
  156. switch (cmd) {
  157. case ND_CMD_GET_CONFIG_SIZE: {
  158. struct nd_cmd_get_config_size *nd_cmd = buf;
  159. if (buf_len < sizeof(*nd_cmd))
  160. return -EINVAL;
  161. nd_cmd->status = 0;
  162. nd_cmd->config_size = LABEL_SIZE;
  163. nd_cmd->max_xfer = SZ_4K;
  164. rc = 0;
  165. break;
  166. }
  167. case ND_CMD_GET_CONFIG_DATA: {
  168. struct nd_cmd_get_config_data_hdr *nd_cmd = buf;
  169. unsigned int len, offset = nd_cmd->in_offset;
  170. if (buf_len < sizeof(*nd_cmd))
  171. return -EINVAL;
  172. if (offset >= LABEL_SIZE)
  173. return -EINVAL;
  174. if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
  175. return -EINVAL;
  176. nd_cmd->status = 0;
  177. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  178. memcpy(nd_cmd->out_buf, t->label[i] + offset, len);
  179. rc = buf_len - sizeof(*nd_cmd) - len;
  180. break;
  181. }
  182. case ND_CMD_SET_CONFIG_DATA: {
  183. struct nd_cmd_set_config_hdr *nd_cmd = buf;
  184. unsigned int len, offset = nd_cmd->in_offset;
  185. u32 *status;
  186. if (buf_len < sizeof(*nd_cmd))
  187. return -EINVAL;
  188. if (offset >= LABEL_SIZE)
  189. return -EINVAL;
  190. if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
  191. return -EINVAL;
  192. status = buf + nd_cmd->in_length + sizeof(*nd_cmd);
  193. *status = 0;
  194. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  195. memcpy(t->label[i] + offset, nd_cmd->in_buf, len);
  196. rc = buf_len - sizeof(*nd_cmd) - (len + 4);
  197. break;
  198. }
  199. default:
  200. return -ENOTTY;
  201. }
  202. return rc;
  203. }
  204. static DEFINE_SPINLOCK(nfit_test_lock);
  205. static struct nfit_test *instances[NUM_NFITS];
  206. static void release_nfit_res(void *data)
  207. {
  208. struct nfit_test_resource *nfit_res = data;
  209. struct resource *res = nfit_res->res;
  210. spin_lock(&nfit_test_lock);
  211. list_del(&nfit_res->list);
  212. spin_unlock(&nfit_test_lock);
  213. if (is_vmalloc_addr(nfit_res->buf))
  214. vfree(nfit_res->buf);
  215. else
  216. dma_free_coherent(nfit_res->dev, resource_size(res),
  217. nfit_res->buf, res->start);
  218. kfree(res);
  219. kfree(nfit_res);
  220. }
  221. static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
  222. void *buf)
  223. {
  224. struct device *dev = &t->pdev.dev;
  225. struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
  226. struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
  227. GFP_KERNEL);
  228. int rc;
  229. if (!res || !buf || !nfit_res)
  230. goto err;
  231. rc = devm_add_action(dev, release_nfit_res, nfit_res);
  232. if (rc)
  233. goto err;
  234. INIT_LIST_HEAD(&nfit_res->list);
  235. memset(buf, 0, size);
  236. nfit_res->dev = dev;
  237. nfit_res->buf = buf;
  238. nfit_res->res = res;
  239. res->start = *dma;
  240. res->end = *dma + size - 1;
  241. res->name = "NFIT";
  242. spin_lock(&nfit_test_lock);
  243. list_add(&nfit_res->list, &t->resources);
  244. spin_unlock(&nfit_test_lock);
  245. return nfit_res->buf;
  246. err:
  247. if (buf && !is_vmalloc_addr(buf))
  248. dma_free_coherent(dev, size, buf, *dma);
  249. else if (buf)
  250. vfree(buf);
  251. kfree(res);
  252. kfree(nfit_res);
  253. return NULL;
  254. }
  255. static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
  256. {
  257. void *buf = vmalloc(size);
  258. *dma = (unsigned long) buf;
  259. return __test_alloc(t, size, dma, buf);
  260. }
  261. static void *test_alloc_coherent(struct nfit_test *t, size_t size,
  262. dma_addr_t *dma)
  263. {
  264. struct device *dev = &t->pdev.dev;
  265. void *buf = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
  266. return __test_alloc(t, size, dma, buf);
  267. }
  268. static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
  269. {
  270. int i;
  271. for (i = 0; i < ARRAY_SIZE(instances); i++) {
  272. struct nfit_test_resource *n, *nfit_res = NULL;
  273. struct nfit_test *t = instances[i];
  274. if (!t)
  275. continue;
  276. spin_lock(&nfit_test_lock);
  277. list_for_each_entry(n, &t->resources, list) {
  278. if (addr >= n->res->start && (addr < n->res->start
  279. + resource_size(n->res))) {
  280. nfit_res = n;
  281. break;
  282. } else if (addr >= (unsigned long) n->buf
  283. && (addr < (unsigned long) n->buf
  284. + resource_size(n->res))) {
  285. nfit_res = n;
  286. break;
  287. }
  288. }
  289. spin_unlock(&nfit_test_lock);
  290. if (nfit_res)
  291. return nfit_res;
  292. }
  293. return NULL;
  294. }
  295. static int nfit_test0_alloc(struct nfit_test *t)
  296. {
  297. size_t nfit_size = sizeof(struct acpi_table_nfit)
  298. + sizeof(struct acpi_nfit_system_address) * NUM_SPA
  299. + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
  300. + sizeof(struct acpi_nfit_control_region) * NUM_DCR
  301. + sizeof(struct acpi_nfit_data_region) * NUM_BDW;
  302. int i;
  303. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  304. if (!t->nfit_buf)
  305. return -ENOMEM;
  306. t->nfit_size = nfit_size;
  307. t->spa_set[0] = test_alloc_coherent(t, SPA0_SIZE, &t->spa_set_dma[0]);
  308. if (!t->spa_set[0])
  309. return -ENOMEM;
  310. t->spa_set[1] = test_alloc_coherent(t, SPA1_SIZE, &t->spa_set_dma[1]);
  311. if (!t->spa_set[1])
  312. return -ENOMEM;
  313. for (i = 0; i < NUM_DCR; i++) {
  314. t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
  315. if (!t->dimm[i])
  316. return -ENOMEM;
  317. t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
  318. if (!t->label[i])
  319. return -ENOMEM;
  320. sprintf(t->label[i], "label%d", i);
  321. }
  322. for (i = 0; i < NUM_DCR; i++) {
  323. t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
  324. if (!t->dcr[i])
  325. return -ENOMEM;
  326. }
  327. return 0;
  328. }
  329. static int nfit_test1_alloc(struct nfit_test *t)
  330. {
  331. size_t nfit_size = sizeof(struct acpi_table_nfit)
  332. + sizeof(struct acpi_nfit_system_address)
  333. + sizeof(struct acpi_nfit_memory_map)
  334. + sizeof(struct acpi_nfit_control_region);
  335. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  336. if (!t->nfit_buf)
  337. return -ENOMEM;
  338. t->nfit_size = nfit_size;
  339. t->spa_set[0] = test_alloc_coherent(t, SPA2_SIZE, &t->spa_set_dma[0]);
  340. if (!t->spa_set[0])
  341. return -ENOMEM;
  342. return 0;
  343. }
  344. static void nfit_test_init_header(struct acpi_table_nfit *nfit, size_t size)
  345. {
  346. memcpy(nfit->header.signature, ACPI_SIG_NFIT, 4);
  347. nfit->header.length = size;
  348. nfit->header.revision = 1;
  349. memcpy(nfit->header.oem_id, "LIBND", 6);
  350. memcpy(nfit->header.oem_table_id, "TEST", 5);
  351. nfit->header.oem_revision = 1;
  352. memcpy(nfit->header.asl_compiler_id, "TST", 4);
  353. nfit->header.asl_compiler_revision = 1;
  354. }
  355. static void nfit_test0_setup(struct nfit_test *t)
  356. {
  357. struct nvdimm_bus_descriptor *nd_desc;
  358. struct acpi_nfit_desc *acpi_desc;
  359. struct acpi_nfit_memory_map *memdev;
  360. void *nfit_buf = t->nfit_buf;
  361. size_t size = t->nfit_size;
  362. struct acpi_nfit_system_address *spa;
  363. struct acpi_nfit_control_region *dcr;
  364. struct acpi_nfit_data_region *bdw;
  365. unsigned int offset;
  366. nfit_test_init_header(nfit_buf, size);
  367. /*
  368. * spa0 (interleave first half of dimm0 and dimm1, note storage
  369. * does not actually alias the related block-data-window
  370. * regions)
  371. */
  372. spa = nfit_buf + sizeof(struct acpi_table_nfit);
  373. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  374. spa->header.length = sizeof(*spa);
  375. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  376. spa->range_index = 0+1;
  377. spa->address = t->spa_set_dma[0];
  378. spa->length = SPA0_SIZE;
  379. /*
  380. * spa1 (interleave last half of the 4 DIMMS, note storage
  381. * does not actually alias the related block-data-window
  382. * regions)
  383. */
  384. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa);
  385. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  386. spa->header.length = sizeof(*spa);
  387. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  388. spa->range_index = 1+1;
  389. spa->address = t->spa_set_dma[1];
  390. spa->length = SPA1_SIZE;
  391. /* spa2 (dcr0) dimm0 */
  392. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 2;
  393. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  394. spa->header.length = sizeof(*spa);
  395. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  396. spa->range_index = 2+1;
  397. spa->address = t->dcr_dma[0];
  398. spa->length = DCR_SIZE;
  399. /* spa3 (dcr1) dimm1 */
  400. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 3;
  401. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  402. spa->header.length = sizeof(*spa);
  403. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  404. spa->range_index = 3+1;
  405. spa->address = t->dcr_dma[1];
  406. spa->length = DCR_SIZE;
  407. /* spa4 (dcr2) dimm2 */
  408. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 4;
  409. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  410. spa->header.length = sizeof(*spa);
  411. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  412. spa->range_index = 4+1;
  413. spa->address = t->dcr_dma[2];
  414. spa->length = DCR_SIZE;
  415. /* spa5 (dcr3) dimm3 */
  416. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 5;
  417. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  418. spa->header.length = sizeof(*spa);
  419. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  420. spa->range_index = 5+1;
  421. spa->address = t->dcr_dma[3];
  422. spa->length = DCR_SIZE;
  423. /* spa6 (bdw for dcr0) dimm0 */
  424. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 6;
  425. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  426. spa->header.length = sizeof(*spa);
  427. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  428. spa->range_index = 6+1;
  429. spa->address = t->dimm_dma[0];
  430. spa->length = DIMM_SIZE;
  431. /* spa7 (bdw for dcr1) dimm1 */
  432. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 7;
  433. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  434. spa->header.length = sizeof(*spa);
  435. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  436. spa->range_index = 7+1;
  437. spa->address = t->dimm_dma[1];
  438. spa->length = DIMM_SIZE;
  439. /* spa8 (bdw for dcr2) dimm2 */
  440. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 8;
  441. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  442. spa->header.length = sizeof(*spa);
  443. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  444. spa->range_index = 8+1;
  445. spa->address = t->dimm_dma[2];
  446. spa->length = DIMM_SIZE;
  447. /* spa9 (bdw for dcr3) dimm3 */
  448. spa = nfit_buf + sizeof(struct acpi_table_nfit) + sizeof(*spa) * 9;
  449. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  450. spa->header.length = sizeof(*spa);
  451. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  452. spa->range_index = 9+1;
  453. spa->address = t->dimm_dma[3];
  454. spa->length = DIMM_SIZE;
  455. offset = sizeof(struct acpi_table_nfit) + sizeof(*spa) * 10;
  456. /* mem-region0 (spa0, dimm0) */
  457. memdev = nfit_buf + offset;
  458. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  459. memdev->header.length = sizeof(*memdev);
  460. memdev->device_handle = handle[0];
  461. memdev->physical_id = 0;
  462. memdev->region_id = 0;
  463. memdev->range_index = 0+1;
  464. memdev->region_index = 0+1;
  465. memdev->region_size = SPA0_SIZE/2;
  466. memdev->region_offset = t->spa_set_dma[0];
  467. memdev->address = 0;
  468. memdev->interleave_index = 0;
  469. memdev->interleave_ways = 2;
  470. /* mem-region1 (spa0, dimm1) */
  471. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
  472. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  473. memdev->header.length = sizeof(*memdev);
  474. memdev->device_handle = handle[1];
  475. memdev->physical_id = 1;
  476. memdev->region_id = 0;
  477. memdev->range_index = 0+1;
  478. memdev->region_index = 1+1;
  479. memdev->region_size = SPA0_SIZE/2;
  480. memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2;
  481. memdev->address = 0;
  482. memdev->interleave_index = 0;
  483. memdev->interleave_ways = 2;
  484. /* mem-region2 (spa1, dimm0) */
  485. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
  486. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  487. memdev->header.length = sizeof(*memdev);
  488. memdev->device_handle = handle[0];
  489. memdev->physical_id = 0;
  490. memdev->region_id = 1;
  491. memdev->range_index = 1+1;
  492. memdev->region_index = 0+1;
  493. memdev->region_size = SPA1_SIZE/4;
  494. memdev->region_offset = t->spa_set_dma[1];
  495. memdev->address = SPA0_SIZE/2;
  496. memdev->interleave_index = 0;
  497. memdev->interleave_ways = 4;
  498. /* mem-region3 (spa1, dimm1) */
  499. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
  500. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  501. memdev->header.length = sizeof(*memdev);
  502. memdev->device_handle = handle[1];
  503. memdev->physical_id = 1;
  504. memdev->region_id = 1;
  505. memdev->range_index = 1+1;
  506. memdev->region_index = 1+1;
  507. memdev->region_size = SPA1_SIZE/4;
  508. memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4;
  509. memdev->address = SPA0_SIZE/2;
  510. memdev->interleave_index = 0;
  511. memdev->interleave_ways = 4;
  512. /* mem-region4 (spa1, dimm2) */
  513. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
  514. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  515. memdev->header.length = sizeof(*memdev);
  516. memdev->device_handle = handle[2];
  517. memdev->physical_id = 2;
  518. memdev->region_id = 0;
  519. memdev->range_index = 1+1;
  520. memdev->region_index = 2+1;
  521. memdev->region_size = SPA1_SIZE/4;
  522. memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4;
  523. memdev->address = SPA0_SIZE/2;
  524. memdev->interleave_index = 0;
  525. memdev->interleave_ways = 4;
  526. /* mem-region5 (spa1, dimm3) */
  527. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
  528. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  529. memdev->header.length = sizeof(*memdev);
  530. memdev->device_handle = handle[3];
  531. memdev->physical_id = 3;
  532. memdev->region_id = 0;
  533. memdev->range_index = 1+1;
  534. memdev->region_index = 3+1;
  535. memdev->region_size = SPA1_SIZE/4;
  536. memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4;
  537. memdev->address = SPA0_SIZE/2;
  538. memdev->interleave_index = 0;
  539. memdev->interleave_ways = 4;
  540. /* mem-region6 (spa/dcr0, dimm0) */
  541. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
  542. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  543. memdev->header.length = sizeof(*memdev);
  544. memdev->device_handle = handle[0];
  545. memdev->physical_id = 0;
  546. memdev->region_id = 0;
  547. memdev->range_index = 2+1;
  548. memdev->region_index = 0+1;
  549. memdev->region_size = 0;
  550. memdev->region_offset = 0;
  551. memdev->address = 0;
  552. memdev->interleave_index = 0;
  553. memdev->interleave_ways = 1;
  554. /* mem-region7 (spa/dcr1, dimm1) */
  555. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
  556. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  557. memdev->header.length = sizeof(*memdev);
  558. memdev->device_handle = handle[1];
  559. memdev->physical_id = 1;
  560. memdev->region_id = 0;
  561. memdev->range_index = 3+1;
  562. memdev->region_index = 1+1;
  563. memdev->region_size = 0;
  564. memdev->region_offset = 0;
  565. memdev->address = 0;
  566. memdev->interleave_index = 0;
  567. memdev->interleave_ways = 1;
  568. /* mem-region8 (spa/dcr2, dimm2) */
  569. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
  570. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  571. memdev->header.length = sizeof(*memdev);
  572. memdev->device_handle = handle[2];
  573. memdev->physical_id = 2;
  574. memdev->region_id = 0;
  575. memdev->range_index = 4+1;
  576. memdev->region_index = 2+1;
  577. memdev->region_size = 0;
  578. memdev->region_offset = 0;
  579. memdev->address = 0;
  580. memdev->interleave_index = 0;
  581. memdev->interleave_ways = 1;
  582. /* mem-region9 (spa/dcr3, dimm3) */
  583. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
  584. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  585. memdev->header.length = sizeof(*memdev);
  586. memdev->device_handle = handle[3];
  587. memdev->physical_id = 3;
  588. memdev->region_id = 0;
  589. memdev->range_index = 5+1;
  590. memdev->region_index = 3+1;
  591. memdev->region_size = 0;
  592. memdev->region_offset = 0;
  593. memdev->address = 0;
  594. memdev->interleave_index = 0;
  595. memdev->interleave_ways = 1;
  596. /* mem-region10 (spa/bdw0, dimm0) */
  597. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
  598. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  599. memdev->header.length = sizeof(*memdev);
  600. memdev->device_handle = handle[0];
  601. memdev->physical_id = 0;
  602. memdev->region_id = 0;
  603. memdev->range_index = 6+1;
  604. memdev->region_index = 0+1;
  605. memdev->region_size = 0;
  606. memdev->region_offset = 0;
  607. memdev->address = 0;
  608. memdev->interleave_index = 0;
  609. memdev->interleave_ways = 1;
  610. /* mem-region11 (spa/bdw1, dimm1) */
  611. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
  612. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  613. memdev->header.length = sizeof(*memdev);
  614. memdev->device_handle = handle[1];
  615. memdev->physical_id = 1;
  616. memdev->region_id = 0;
  617. memdev->range_index = 7+1;
  618. memdev->region_index = 1+1;
  619. memdev->region_size = 0;
  620. memdev->region_offset = 0;
  621. memdev->address = 0;
  622. memdev->interleave_index = 0;
  623. memdev->interleave_ways = 1;
  624. /* mem-region12 (spa/bdw2, dimm2) */
  625. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
  626. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  627. memdev->header.length = sizeof(*memdev);
  628. memdev->device_handle = handle[2];
  629. memdev->physical_id = 2;
  630. memdev->region_id = 0;
  631. memdev->range_index = 8+1;
  632. memdev->region_index = 2+1;
  633. memdev->region_size = 0;
  634. memdev->region_offset = 0;
  635. memdev->address = 0;
  636. memdev->interleave_index = 0;
  637. memdev->interleave_ways = 1;
  638. /* mem-region13 (spa/dcr3, dimm3) */
  639. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
  640. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  641. memdev->header.length = sizeof(*memdev);
  642. memdev->device_handle = handle[3];
  643. memdev->physical_id = 3;
  644. memdev->region_id = 0;
  645. memdev->range_index = 9+1;
  646. memdev->region_index = 3+1;
  647. memdev->region_size = 0;
  648. memdev->region_offset = 0;
  649. memdev->address = 0;
  650. memdev->interleave_index = 0;
  651. memdev->interleave_ways = 1;
  652. offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
  653. /* dcr-descriptor0 */
  654. dcr = nfit_buf + offset;
  655. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  656. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  657. dcr->region_index = 0+1;
  658. dcr->vendor_id = 0xabcd;
  659. dcr->device_id = 0;
  660. dcr->revision_id = 1;
  661. dcr->serial_number = ~handle[0];
  662. dcr->windows = 1;
  663. dcr->window_size = DCR_SIZE;
  664. dcr->command_offset = 0;
  665. dcr->command_size = 8;
  666. dcr->status_offset = 8;
  667. dcr->status_size = 4;
  668. /* dcr-descriptor1 */
  669. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
  670. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  671. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  672. dcr->region_index = 1+1;
  673. dcr->vendor_id = 0xabcd;
  674. dcr->device_id = 0;
  675. dcr->revision_id = 1;
  676. dcr->serial_number = ~handle[1];
  677. dcr->windows = 1;
  678. dcr->window_size = DCR_SIZE;
  679. dcr->command_offset = 0;
  680. dcr->command_size = 8;
  681. dcr->status_offset = 8;
  682. dcr->status_size = 4;
  683. /* dcr-descriptor2 */
  684. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
  685. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  686. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  687. dcr->region_index = 2+1;
  688. dcr->vendor_id = 0xabcd;
  689. dcr->device_id = 0;
  690. dcr->revision_id = 1;
  691. dcr->serial_number = ~handle[2];
  692. dcr->windows = 1;
  693. dcr->window_size = DCR_SIZE;
  694. dcr->command_offset = 0;
  695. dcr->command_size = 8;
  696. dcr->status_offset = 8;
  697. dcr->status_size = 4;
  698. /* dcr-descriptor3 */
  699. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
  700. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  701. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  702. dcr->region_index = 3+1;
  703. dcr->vendor_id = 0xabcd;
  704. dcr->device_id = 0;
  705. dcr->revision_id = 1;
  706. dcr->serial_number = ~handle[3];
  707. dcr->windows = 1;
  708. dcr->window_size = DCR_SIZE;
  709. dcr->command_offset = 0;
  710. dcr->command_size = 8;
  711. dcr->status_offset = 8;
  712. dcr->status_size = 4;
  713. offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
  714. /* bdw0 (spa/dcr0, dimm0) */
  715. bdw = nfit_buf + offset;
  716. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  717. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  718. bdw->region_index = 0+1;
  719. bdw->windows = 1;
  720. bdw->offset = 0;
  721. bdw->size = BDW_SIZE;
  722. bdw->capacity = DIMM_SIZE;
  723. bdw->start_address = 0;
  724. /* bdw1 (spa/dcr1, dimm1) */
  725. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
  726. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  727. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  728. bdw->region_index = 1+1;
  729. bdw->windows = 1;
  730. bdw->offset = 0;
  731. bdw->size = BDW_SIZE;
  732. bdw->capacity = DIMM_SIZE;
  733. bdw->start_address = 0;
  734. /* bdw2 (spa/dcr2, dimm2) */
  735. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
  736. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  737. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  738. bdw->region_index = 2+1;
  739. bdw->windows = 1;
  740. bdw->offset = 0;
  741. bdw->size = BDW_SIZE;
  742. bdw->capacity = DIMM_SIZE;
  743. bdw->start_address = 0;
  744. /* bdw3 (spa/dcr3, dimm3) */
  745. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
  746. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  747. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  748. bdw->region_index = 3+1;
  749. bdw->windows = 1;
  750. bdw->offset = 0;
  751. bdw->size = BDW_SIZE;
  752. bdw->capacity = DIMM_SIZE;
  753. bdw->start_address = 0;
  754. acpi_desc = &t->acpi_desc;
  755. set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_dsm_force_en);
  756. set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
  757. set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_dsm_force_en);
  758. nd_desc = &acpi_desc->nd_desc;
  759. nd_desc->ndctl = nfit_test_ctl;
  760. }
  761. static void nfit_test1_setup(struct nfit_test *t)
  762. {
  763. size_t size = t->nfit_size, offset;
  764. void *nfit_buf = t->nfit_buf;
  765. struct acpi_nfit_memory_map *memdev;
  766. struct acpi_nfit_control_region *dcr;
  767. struct acpi_nfit_system_address *spa;
  768. nfit_test_init_header(nfit_buf, size);
  769. offset = sizeof(struct acpi_table_nfit);
  770. /* spa0 (flat range with no bdw aliasing) */
  771. spa = nfit_buf + offset;
  772. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  773. spa->header.length = sizeof(*spa);
  774. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  775. spa->range_index = 0+1;
  776. spa->address = t->spa_set_dma[0];
  777. spa->length = SPA2_SIZE;
  778. offset += sizeof(*spa);
  779. /* mem-region0 (spa0, dimm0) */
  780. memdev = nfit_buf + offset;
  781. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  782. memdev->header.length = sizeof(*memdev);
  783. memdev->device_handle = 0;
  784. memdev->physical_id = 0;
  785. memdev->region_id = 0;
  786. memdev->range_index = 0+1;
  787. memdev->region_index = 0+1;
  788. memdev->region_size = SPA2_SIZE;
  789. memdev->region_offset = 0;
  790. memdev->address = 0;
  791. memdev->interleave_index = 0;
  792. memdev->interleave_ways = 1;
  793. memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
  794. | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
  795. | ACPI_NFIT_MEM_ARMED;
  796. offset += sizeof(*memdev);
  797. /* dcr-descriptor0 */
  798. dcr = nfit_buf + offset;
  799. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  800. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  801. dcr->region_index = 0+1;
  802. dcr->vendor_id = 0xabcd;
  803. dcr->device_id = 0;
  804. dcr->revision_id = 1;
  805. dcr->serial_number = ~0;
  806. dcr->code = 0x201;
  807. dcr->windows = 0;
  808. dcr->window_size = 0;
  809. dcr->command_offset = 0;
  810. dcr->command_size = 0;
  811. dcr->status_offset = 0;
  812. dcr->status_size = 0;
  813. }
  814. static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
  815. void *iobuf, u64 len, int rw)
  816. {
  817. struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
  818. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  819. struct nd_region *nd_region = &ndbr->nd_region;
  820. unsigned int lane;
  821. lane = nd_region_acquire_lane(nd_region);
  822. if (rw)
  823. memcpy(mmio->base + dpa, iobuf, len);
  824. else
  825. memcpy(iobuf, mmio->base + dpa, len);
  826. nd_region_release_lane(nd_region, lane);
  827. return 0;
  828. }
  829. static int nfit_test_probe(struct platform_device *pdev)
  830. {
  831. struct nvdimm_bus_descriptor *nd_desc;
  832. struct acpi_nfit_desc *acpi_desc;
  833. struct device *dev = &pdev->dev;
  834. struct nfit_test *nfit_test;
  835. int rc;
  836. nfit_test = to_nfit_test(&pdev->dev);
  837. /* common alloc */
  838. if (nfit_test->num_dcr) {
  839. int num = nfit_test->num_dcr;
  840. nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
  841. GFP_KERNEL);
  842. nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  843. GFP_KERNEL);
  844. nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
  845. GFP_KERNEL);
  846. nfit_test->label_dma = devm_kcalloc(dev, num,
  847. sizeof(dma_addr_t), GFP_KERNEL);
  848. nfit_test->dcr = devm_kcalloc(dev, num,
  849. sizeof(struct nfit_test_dcr *), GFP_KERNEL);
  850. nfit_test->dcr_dma = devm_kcalloc(dev, num,
  851. sizeof(dma_addr_t), GFP_KERNEL);
  852. if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
  853. && nfit_test->label_dma && nfit_test->dcr
  854. && nfit_test->dcr_dma)
  855. /* pass */;
  856. else
  857. return -ENOMEM;
  858. }
  859. if (nfit_test->num_pm) {
  860. int num = nfit_test->num_pm;
  861. nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
  862. GFP_KERNEL);
  863. nfit_test->spa_set_dma = devm_kcalloc(dev, num,
  864. sizeof(dma_addr_t), GFP_KERNEL);
  865. if (nfit_test->spa_set && nfit_test->spa_set_dma)
  866. /* pass */;
  867. else
  868. return -ENOMEM;
  869. }
  870. /* per-nfit specific alloc */
  871. if (nfit_test->alloc(nfit_test))
  872. return -ENOMEM;
  873. nfit_test->setup(nfit_test);
  874. acpi_desc = &nfit_test->acpi_desc;
  875. acpi_desc->dev = &pdev->dev;
  876. acpi_desc->nfit = nfit_test->nfit_buf;
  877. acpi_desc->blk_do_io = nfit_test_blk_do_io;
  878. nd_desc = &acpi_desc->nd_desc;
  879. nd_desc->attr_groups = acpi_nfit_attribute_groups;
  880. acpi_desc->nvdimm_bus = nvdimm_bus_register(&pdev->dev, nd_desc);
  881. if (!acpi_desc->nvdimm_bus)
  882. return -ENXIO;
  883. rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_size);
  884. if (rc) {
  885. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  886. return rc;
  887. }
  888. return 0;
  889. }
  890. static int nfit_test_remove(struct platform_device *pdev)
  891. {
  892. struct nfit_test *nfit_test = to_nfit_test(&pdev->dev);
  893. struct acpi_nfit_desc *acpi_desc = &nfit_test->acpi_desc;
  894. nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
  895. return 0;
  896. }
  897. static void nfit_test_release(struct device *dev)
  898. {
  899. struct nfit_test *nfit_test = to_nfit_test(dev);
  900. kfree(nfit_test);
  901. }
  902. static const struct platform_device_id nfit_test_id[] = {
  903. { KBUILD_MODNAME },
  904. { },
  905. };
  906. static struct platform_driver nfit_test_driver = {
  907. .probe = nfit_test_probe,
  908. .remove = nfit_test_remove,
  909. .driver = {
  910. .name = KBUILD_MODNAME,
  911. },
  912. .id_table = nfit_test_id,
  913. };
  914. #ifdef CONFIG_CMA_SIZE_MBYTES
  915. #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
  916. #else
  917. #define CMA_SIZE_MBYTES 0
  918. #endif
  919. static __init int nfit_test_init(void)
  920. {
  921. int rc, i;
  922. nfit_test_setup(nfit_test_lookup);
  923. for (i = 0; i < NUM_NFITS; i++) {
  924. struct nfit_test *nfit_test;
  925. struct platform_device *pdev;
  926. static int once;
  927. nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
  928. if (!nfit_test) {
  929. rc = -ENOMEM;
  930. goto err_register;
  931. }
  932. INIT_LIST_HEAD(&nfit_test->resources);
  933. switch (i) {
  934. case 0:
  935. nfit_test->num_pm = NUM_PM;
  936. nfit_test->num_dcr = NUM_DCR;
  937. nfit_test->alloc = nfit_test0_alloc;
  938. nfit_test->setup = nfit_test0_setup;
  939. break;
  940. case 1:
  941. nfit_test->num_pm = 1;
  942. nfit_test->alloc = nfit_test1_alloc;
  943. nfit_test->setup = nfit_test1_setup;
  944. break;
  945. default:
  946. rc = -EINVAL;
  947. goto err_register;
  948. }
  949. pdev = &nfit_test->pdev;
  950. pdev->name = KBUILD_MODNAME;
  951. pdev->id = i;
  952. pdev->dev.release = nfit_test_release;
  953. rc = platform_device_register(pdev);
  954. if (rc) {
  955. put_device(&pdev->dev);
  956. goto err_register;
  957. }
  958. rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  959. if (rc)
  960. goto err_register;
  961. instances[i] = nfit_test;
  962. if (!once++) {
  963. dma_addr_t dma;
  964. void *buf;
  965. buf = dma_alloc_coherent(&pdev->dev, SZ_128M, &dma,
  966. GFP_KERNEL);
  967. if (!buf) {
  968. rc = -ENOMEM;
  969. dev_warn(&pdev->dev, "need 128M of free cma\n");
  970. goto err_register;
  971. }
  972. dma_free_coherent(&pdev->dev, SZ_128M, buf, dma);
  973. }
  974. }
  975. rc = platform_driver_register(&nfit_test_driver);
  976. if (rc)
  977. goto err_register;
  978. return 0;
  979. err_register:
  980. for (i = 0; i < NUM_NFITS; i++)
  981. if (instances[i])
  982. platform_device_unregister(&instances[i]->pdev);
  983. nfit_test_teardown();
  984. return rc;
  985. }
  986. static __exit void nfit_test_exit(void)
  987. {
  988. int i;
  989. platform_driver_unregister(&nfit_test_driver);
  990. for (i = 0; i < NUM_NFITS; i++)
  991. platform_device_unregister(&instances[i]->pdev);
  992. nfit_test_teardown();
  993. }
  994. module_init(nfit_test_init);
  995. module_exit(nfit_test_exit);
  996. MODULE_LICENSE("GPL v2");
  997. MODULE_AUTHOR("Intel Corporation");