nfit.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955
  1. /*
  2. * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/platform_device.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/workqueue.h>
  17. #include <linux/libnvdimm.h>
  18. #include <linux/vmalloc.h>
  19. #include <linux/device.h>
  20. #include <linux/module.h>
  21. #include <linux/mutex.h>
  22. #include <linux/ndctl.h>
  23. #include <linux/sizes.h>
  24. #include <linux/list.h>
  25. #include <linux/slab.h>
  26. #include <nd-core.h>
  27. #include <nfit.h>
  28. #include <nd.h>
  29. #include "nfit_test.h"
  30. /*
  31. * Generate an NFIT table to describe the following topology:
  32. *
  33. * BUS0: Interleaved PMEM regions, and aliasing with BLK regions
  34. *
  35. * (a) (b) DIMM BLK-REGION
  36. * +----------+--------------+----------+---------+
  37. * +------+ | blk2.0 | pm0.0 | blk2.1 | pm1.0 | 0 region2
  38. * | imc0 +--+- - - - - region0 - - - -+----------+ +
  39. * +--+---+ | blk3.0 | pm0.0 | blk3.1 | pm1.0 | 1 region3
  40. * | +----------+--------------v----------v v
  41. * +--+---+ | |
  42. * | cpu0 | region1
  43. * +--+---+ | |
  44. * | +-------------------------^----------^ ^
  45. * +--+---+ | blk4.0 | pm1.0 | 2 region4
  46. * | imc1 +--+-------------------------+----------+ +
  47. * +------+ | blk5.0 | pm1.0 | 3 region5
  48. * +-------------------------+----------+-+-------+
  49. *
  50. * +--+---+
  51. * | cpu1 |
  52. * +--+---+ (Hotplug DIMM)
  53. * | +----------------------------------------------+
  54. * +--+---+ | blk6.0/pm7.0 | 4 region6/7
  55. * | imc0 +--+----------------------------------------------+
  56. * +------+
  57. *
  58. *
  59. * *) In this layout we have four dimms and two memory controllers in one
  60. * socket. Each unique interface (BLK or PMEM) to DPA space
  61. * is identified by a region device with a dynamically assigned id.
  62. *
  63. * *) The first portion of dimm0 and dimm1 are interleaved as REGION0.
  64. * A single PMEM namespace "pm0.0" is created using half of the
  65. * REGION0 SPA-range. REGION0 spans dimm0 and dimm1. PMEM namespace
  66. * allocate from from the bottom of a region. The unallocated
  67. * portion of REGION0 aliases with REGION2 and REGION3. That
  68. * unallacted capacity is reclaimed as BLK namespaces ("blk2.0" and
  69. * "blk3.0") starting at the base of each DIMM to offset (a) in those
  70. * DIMMs. "pm0.0", "blk2.0" and "blk3.0" are free-form readable
  71. * names that can be assigned to a namespace.
  72. *
  73. * *) In the last portion of dimm0 and dimm1 we have an interleaved
  74. * SPA range, REGION1, that spans those two dimms as well as dimm2
  75. * and dimm3. Some of REGION1 allocated to a PMEM namespace named
  76. * "pm1.0" the rest is reclaimed in 4 BLK namespaces (for each
  77. * dimm in the interleave set), "blk2.1", "blk3.1", "blk4.0", and
  78. * "blk5.0".
  79. *
  80. * *) The portion of dimm2 and dimm3 that do not participate in the
  81. * REGION1 interleaved SPA range (i.e. the DPA address below offset
  82. * (b) are also included in the "blk4.0" and "blk5.0" namespaces.
  83. * Note, that BLK namespaces need not be contiguous in DPA-space, and
  84. * can consume aliased capacity from multiple interleave sets.
  85. *
  86. * BUS1: Legacy NVDIMM (single contiguous range)
  87. *
  88. * region2
  89. * +---------------------+
  90. * |---------------------|
  91. * || pm2.0 ||
  92. * |---------------------|
  93. * +---------------------+
  94. *
  95. * *) A NFIT-table may describe a simple system-physical-address range
  96. * with no BLK aliasing. This type of region may optionally
  97. * reference an NVDIMM.
  98. */
  99. enum {
  100. NUM_PM = 3,
  101. NUM_DCR = 5,
  102. NUM_HINTS = 8,
  103. NUM_BDW = NUM_DCR,
  104. NUM_SPA = NUM_PM + NUM_DCR + NUM_BDW,
  105. NUM_MEM = NUM_DCR + NUM_BDW + 2 /* spa0 iset */ + 4 /* spa1 iset */,
  106. DIMM_SIZE = SZ_32M,
  107. LABEL_SIZE = SZ_128K,
  108. SPA_VCD_SIZE = SZ_4M,
  109. SPA0_SIZE = DIMM_SIZE,
  110. SPA1_SIZE = DIMM_SIZE*2,
  111. SPA2_SIZE = DIMM_SIZE,
  112. BDW_SIZE = 64 << 8,
  113. DCR_SIZE = 12,
  114. NUM_NFITS = 2, /* permit testing multiple NFITs per system */
  115. };
  116. struct nfit_test_dcr {
  117. __le64 bdw_addr;
  118. __le32 bdw_status;
  119. __u8 aperature[BDW_SIZE];
  120. };
  121. #define NFIT_DIMM_HANDLE(node, socket, imc, chan, dimm) \
  122. (((node & 0xfff) << 16) | ((socket & 0xf) << 12) \
  123. | ((imc & 0xf) << 8) | ((chan & 0xf) << 4) | (dimm & 0xf))
  124. static u32 handle[NUM_DCR] = {
  125. [0] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 0),
  126. [1] = NFIT_DIMM_HANDLE(0, 0, 0, 0, 1),
  127. [2] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 0),
  128. [3] = NFIT_DIMM_HANDLE(0, 0, 1, 0, 1),
  129. [4] = NFIT_DIMM_HANDLE(0, 1, 0, 0, 0),
  130. };
  131. static unsigned long dimm_fail_cmd_flags[NUM_DCR];
  132. struct nfit_test {
  133. struct acpi_nfit_desc acpi_desc;
  134. struct platform_device pdev;
  135. struct list_head resources;
  136. void *nfit_buf;
  137. dma_addr_t nfit_dma;
  138. size_t nfit_size;
  139. int num_dcr;
  140. int num_pm;
  141. void **dimm;
  142. dma_addr_t *dimm_dma;
  143. void **flush;
  144. dma_addr_t *flush_dma;
  145. void **label;
  146. dma_addr_t *label_dma;
  147. void **spa_set;
  148. dma_addr_t *spa_set_dma;
  149. struct nfit_test_dcr **dcr;
  150. dma_addr_t *dcr_dma;
  151. int (*alloc)(struct nfit_test *t);
  152. void (*setup)(struct nfit_test *t);
  153. int setup_hotplug;
  154. union acpi_object **_fit;
  155. dma_addr_t _fit_dma;
  156. struct ars_state {
  157. struct nd_cmd_ars_status *ars_status;
  158. unsigned long deadline;
  159. spinlock_t lock;
  160. } ars_state;
  161. struct device *dimm_dev[NUM_DCR];
  162. };
  163. static struct nfit_test *to_nfit_test(struct device *dev)
  164. {
  165. struct platform_device *pdev = to_platform_device(dev);
  166. return container_of(pdev, struct nfit_test, pdev);
  167. }
  168. static int nfit_test_cmd_get_config_size(struct nd_cmd_get_config_size *nd_cmd,
  169. unsigned int buf_len)
  170. {
  171. if (buf_len < sizeof(*nd_cmd))
  172. return -EINVAL;
  173. nd_cmd->status = 0;
  174. nd_cmd->config_size = LABEL_SIZE;
  175. nd_cmd->max_xfer = SZ_4K;
  176. return 0;
  177. }
  178. static int nfit_test_cmd_get_config_data(struct nd_cmd_get_config_data_hdr
  179. *nd_cmd, unsigned int buf_len, void *label)
  180. {
  181. unsigned int len, offset = nd_cmd->in_offset;
  182. int rc;
  183. if (buf_len < sizeof(*nd_cmd))
  184. return -EINVAL;
  185. if (offset >= LABEL_SIZE)
  186. return -EINVAL;
  187. if (nd_cmd->in_length + sizeof(*nd_cmd) > buf_len)
  188. return -EINVAL;
  189. nd_cmd->status = 0;
  190. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  191. memcpy(nd_cmd->out_buf, label + offset, len);
  192. rc = buf_len - sizeof(*nd_cmd) - len;
  193. return rc;
  194. }
  195. static int nfit_test_cmd_set_config_data(struct nd_cmd_set_config_hdr *nd_cmd,
  196. unsigned int buf_len, void *label)
  197. {
  198. unsigned int len, offset = nd_cmd->in_offset;
  199. u32 *status;
  200. int rc;
  201. if (buf_len < sizeof(*nd_cmd))
  202. return -EINVAL;
  203. if (offset >= LABEL_SIZE)
  204. return -EINVAL;
  205. if (nd_cmd->in_length + sizeof(*nd_cmd) + 4 > buf_len)
  206. return -EINVAL;
  207. status = (void *)nd_cmd + nd_cmd->in_length + sizeof(*nd_cmd);
  208. *status = 0;
  209. len = min(nd_cmd->in_length, LABEL_SIZE - offset);
  210. memcpy(label + offset, nd_cmd->in_buf, len);
  211. rc = buf_len - sizeof(*nd_cmd) - (len + 4);
  212. return rc;
  213. }
  214. #define NFIT_TEST_ARS_RECORDS 4
  215. #define NFIT_TEST_CLEAR_ERR_UNIT 256
  216. static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
  217. unsigned int buf_len)
  218. {
  219. if (buf_len < sizeof(*nd_cmd))
  220. return -EINVAL;
  221. nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
  222. + NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
  223. nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
  224. nd_cmd->clear_err_unit = NFIT_TEST_CLEAR_ERR_UNIT;
  225. return 0;
  226. }
  227. /*
  228. * Initialize the ars_state to return an ars_result 1 second in the future with
  229. * a 4K error range in the middle of the requested address range.
  230. */
  231. static void post_ars_status(struct ars_state *ars_state, u64 addr, u64 len)
  232. {
  233. struct nd_cmd_ars_status *ars_status;
  234. struct nd_ars_record *ars_record;
  235. ars_state->deadline = jiffies + 1*HZ;
  236. ars_status = ars_state->ars_status;
  237. ars_status->status = 0;
  238. ars_status->out_length = sizeof(struct nd_cmd_ars_status)
  239. + sizeof(struct nd_ars_record);
  240. ars_status->address = addr;
  241. ars_status->length = len;
  242. ars_status->type = ND_ARS_PERSISTENT;
  243. ars_status->num_records = 1;
  244. ars_record = &ars_status->records[0];
  245. ars_record->handle = 0;
  246. ars_record->err_address = addr + len / 2;
  247. ars_record->length = SZ_4K;
  248. }
  249. static int nfit_test_cmd_ars_start(struct ars_state *ars_state,
  250. struct nd_cmd_ars_start *ars_start, unsigned int buf_len,
  251. int *cmd_rc)
  252. {
  253. if (buf_len < sizeof(*ars_start))
  254. return -EINVAL;
  255. spin_lock(&ars_state->lock);
  256. if (time_before(jiffies, ars_state->deadline)) {
  257. ars_start->status = NFIT_ARS_START_BUSY;
  258. *cmd_rc = -EBUSY;
  259. } else {
  260. ars_start->status = 0;
  261. ars_start->scrub_time = 1;
  262. post_ars_status(ars_state, ars_start->address,
  263. ars_start->length);
  264. *cmd_rc = 0;
  265. }
  266. spin_unlock(&ars_state->lock);
  267. return 0;
  268. }
  269. static int nfit_test_cmd_ars_status(struct ars_state *ars_state,
  270. struct nd_cmd_ars_status *ars_status, unsigned int buf_len,
  271. int *cmd_rc)
  272. {
  273. if (buf_len < ars_state->ars_status->out_length)
  274. return -EINVAL;
  275. spin_lock(&ars_state->lock);
  276. if (time_before(jiffies, ars_state->deadline)) {
  277. memset(ars_status, 0, buf_len);
  278. ars_status->status = NFIT_ARS_STATUS_BUSY;
  279. ars_status->out_length = sizeof(*ars_status);
  280. *cmd_rc = -EBUSY;
  281. } else {
  282. memcpy(ars_status, ars_state->ars_status,
  283. ars_state->ars_status->out_length);
  284. *cmd_rc = 0;
  285. }
  286. spin_unlock(&ars_state->lock);
  287. return 0;
  288. }
  289. static int nfit_test_cmd_clear_error(struct nd_cmd_clear_error *clear_err,
  290. unsigned int buf_len, int *cmd_rc)
  291. {
  292. const u64 mask = NFIT_TEST_CLEAR_ERR_UNIT - 1;
  293. if (buf_len < sizeof(*clear_err))
  294. return -EINVAL;
  295. if ((clear_err->address & mask) || (clear_err->length & mask))
  296. return -EINVAL;
  297. /*
  298. * Report 'all clear' success for all commands even though a new
  299. * scrub will find errors again. This is enough to have the
  300. * error removed from the 'badblocks' tracking in the pmem
  301. * driver.
  302. */
  303. clear_err->status = 0;
  304. clear_err->cleared = clear_err->length;
  305. *cmd_rc = 0;
  306. return 0;
  307. }
  308. static int nfit_test_cmd_smart(struct nd_cmd_smart *smart, unsigned int buf_len)
  309. {
  310. static const struct nd_smart_payload smart_data = {
  311. .flags = ND_SMART_HEALTH_VALID | ND_SMART_TEMP_VALID
  312. | ND_SMART_SPARES_VALID | ND_SMART_ALARM_VALID
  313. | ND_SMART_USED_VALID | ND_SMART_SHUTDOWN_VALID,
  314. .health = ND_SMART_NON_CRITICAL_HEALTH,
  315. .temperature = 23 * 16,
  316. .spares = 75,
  317. .alarm_flags = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
  318. .life_used = 5,
  319. .shutdown_state = 0,
  320. .vendor_size = 0,
  321. };
  322. if (buf_len < sizeof(*smart))
  323. return -EINVAL;
  324. memcpy(smart->data, &smart_data, sizeof(smart_data));
  325. return 0;
  326. }
  327. static int nfit_test_cmd_smart_threshold(struct nd_cmd_smart_threshold *smart_t,
  328. unsigned int buf_len)
  329. {
  330. static const struct nd_smart_threshold_payload smart_t_data = {
  331. .alarm_control = ND_SMART_SPARE_TRIP | ND_SMART_TEMP_TRIP,
  332. .temperature = 40 * 16,
  333. .spares = 5,
  334. };
  335. if (buf_len < sizeof(*smart_t))
  336. return -EINVAL;
  337. memcpy(smart_t->data, &smart_t_data, sizeof(smart_t_data));
  338. return 0;
  339. }
  340. static int nfit_test_ctl(struct nvdimm_bus_descriptor *nd_desc,
  341. struct nvdimm *nvdimm, unsigned int cmd, void *buf,
  342. unsigned int buf_len, int *cmd_rc)
  343. {
  344. struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
  345. struct nfit_test *t = container_of(acpi_desc, typeof(*t), acpi_desc);
  346. unsigned int func = cmd;
  347. int i, rc = 0, __cmd_rc;
  348. if (!cmd_rc)
  349. cmd_rc = &__cmd_rc;
  350. *cmd_rc = 0;
  351. if (nvdimm) {
  352. struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
  353. unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
  354. if (!nfit_mem)
  355. return -ENOTTY;
  356. if (cmd == ND_CMD_CALL) {
  357. struct nd_cmd_pkg *call_pkg = buf;
  358. buf_len = call_pkg->nd_size_in + call_pkg->nd_size_out;
  359. buf = (void *) call_pkg->nd_payload;
  360. func = call_pkg->nd_command;
  361. if (call_pkg->nd_family != nfit_mem->family)
  362. return -ENOTTY;
  363. }
  364. if (!test_bit(cmd, &cmd_mask)
  365. || !test_bit(func, &nfit_mem->dsm_mask))
  366. return -ENOTTY;
  367. /* lookup label space for the given dimm */
  368. for (i = 0; i < ARRAY_SIZE(handle); i++)
  369. if (__to_nfit_memdev(nfit_mem)->device_handle ==
  370. handle[i])
  371. break;
  372. if (i >= ARRAY_SIZE(handle))
  373. return -ENXIO;
  374. if ((1 << func) & dimm_fail_cmd_flags[i])
  375. return -EIO;
  376. switch (func) {
  377. case ND_CMD_GET_CONFIG_SIZE:
  378. rc = nfit_test_cmd_get_config_size(buf, buf_len);
  379. break;
  380. case ND_CMD_GET_CONFIG_DATA:
  381. rc = nfit_test_cmd_get_config_data(buf, buf_len,
  382. t->label[i]);
  383. break;
  384. case ND_CMD_SET_CONFIG_DATA:
  385. rc = nfit_test_cmd_set_config_data(buf, buf_len,
  386. t->label[i]);
  387. break;
  388. case ND_CMD_SMART:
  389. rc = nfit_test_cmd_smart(buf, buf_len);
  390. break;
  391. case ND_CMD_SMART_THRESHOLD:
  392. rc = nfit_test_cmd_smart_threshold(buf, buf_len);
  393. device_lock(&t->pdev.dev);
  394. __acpi_nvdimm_notify(t->dimm_dev[i], 0x81);
  395. device_unlock(&t->pdev.dev);
  396. break;
  397. default:
  398. return -ENOTTY;
  399. }
  400. } else {
  401. struct ars_state *ars_state = &t->ars_state;
  402. if (!nd_desc || !test_bit(cmd, &nd_desc->cmd_mask))
  403. return -ENOTTY;
  404. switch (func) {
  405. case ND_CMD_ARS_CAP:
  406. rc = nfit_test_cmd_ars_cap(buf, buf_len);
  407. break;
  408. case ND_CMD_ARS_START:
  409. rc = nfit_test_cmd_ars_start(ars_state, buf, buf_len,
  410. cmd_rc);
  411. break;
  412. case ND_CMD_ARS_STATUS:
  413. rc = nfit_test_cmd_ars_status(ars_state, buf, buf_len,
  414. cmd_rc);
  415. break;
  416. case ND_CMD_CLEAR_ERROR:
  417. rc = nfit_test_cmd_clear_error(buf, buf_len, cmd_rc);
  418. break;
  419. default:
  420. return -ENOTTY;
  421. }
  422. }
  423. return rc;
  424. }
  425. static DEFINE_SPINLOCK(nfit_test_lock);
  426. static struct nfit_test *instances[NUM_NFITS];
  427. static void release_nfit_res(void *data)
  428. {
  429. struct nfit_test_resource *nfit_res = data;
  430. spin_lock(&nfit_test_lock);
  431. list_del(&nfit_res->list);
  432. spin_unlock(&nfit_test_lock);
  433. vfree(nfit_res->buf);
  434. kfree(nfit_res);
  435. }
  436. static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
  437. void *buf)
  438. {
  439. struct device *dev = &t->pdev.dev;
  440. struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
  441. GFP_KERNEL);
  442. int rc;
  443. if (!buf || !nfit_res)
  444. goto err;
  445. rc = devm_add_action(dev, release_nfit_res, nfit_res);
  446. if (rc)
  447. goto err;
  448. INIT_LIST_HEAD(&nfit_res->list);
  449. memset(buf, 0, size);
  450. nfit_res->dev = dev;
  451. nfit_res->buf = buf;
  452. nfit_res->res.start = *dma;
  453. nfit_res->res.end = *dma + size - 1;
  454. nfit_res->res.name = "NFIT";
  455. spin_lock_init(&nfit_res->lock);
  456. INIT_LIST_HEAD(&nfit_res->requests);
  457. spin_lock(&nfit_test_lock);
  458. list_add(&nfit_res->list, &t->resources);
  459. spin_unlock(&nfit_test_lock);
  460. return nfit_res->buf;
  461. err:
  462. if (buf)
  463. vfree(buf);
  464. kfree(nfit_res);
  465. return NULL;
  466. }
  467. static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
  468. {
  469. void *buf = vmalloc(size);
  470. *dma = (unsigned long) buf;
  471. return __test_alloc(t, size, dma, buf);
  472. }
  473. static struct nfit_test_resource *nfit_test_lookup(resource_size_t addr)
  474. {
  475. int i;
  476. for (i = 0; i < ARRAY_SIZE(instances); i++) {
  477. struct nfit_test_resource *n, *nfit_res = NULL;
  478. struct nfit_test *t = instances[i];
  479. if (!t)
  480. continue;
  481. spin_lock(&nfit_test_lock);
  482. list_for_each_entry(n, &t->resources, list) {
  483. if (addr >= n->res.start && (addr < n->res.start
  484. + resource_size(&n->res))) {
  485. nfit_res = n;
  486. break;
  487. } else if (addr >= (unsigned long) n->buf
  488. && (addr < (unsigned long) n->buf
  489. + resource_size(&n->res))) {
  490. nfit_res = n;
  491. break;
  492. }
  493. }
  494. spin_unlock(&nfit_test_lock);
  495. if (nfit_res)
  496. return nfit_res;
  497. }
  498. return NULL;
  499. }
  500. static int ars_state_init(struct device *dev, struct ars_state *ars_state)
  501. {
  502. ars_state->ars_status = devm_kzalloc(dev,
  503. sizeof(struct nd_cmd_ars_status)
  504. + sizeof(struct nd_ars_record) * NFIT_TEST_ARS_RECORDS,
  505. GFP_KERNEL);
  506. if (!ars_state->ars_status)
  507. return -ENOMEM;
  508. spin_lock_init(&ars_state->lock);
  509. return 0;
  510. }
  511. static void put_dimms(void *data)
  512. {
  513. struct device **dimm_dev = data;
  514. int i;
  515. for (i = 0; i < NUM_DCR; i++)
  516. if (dimm_dev[i])
  517. device_unregister(dimm_dev[i]);
  518. }
  519. static struct class *nfit_test_dimm;
  520. static int dimm_name_to_id(struct device *dev)
  521. {
  522. int dimm;
  523. if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1
  524. || dimm >= NUM_DCR || dimm < 0)
  525. return -ENXIO;
  526. return dimm;
  527. }
  528. static ssize_t handle_show(struct device *dev, struct device_attribute *attr,
  529. char *buf)
  530. {
  531. int dimm = dimm_name_to_id(dev);
  532. if (dimm < 0)
  533. return dimm;
  534. return sprintf(buf, "%#x", handle[dimm]);
  535. }
  536. DEVICE_ATTR_RO(handle);
  537. static ssize_t fail_cmd_show(struct device *dev, struct device_attribute *attr,
  538. char *buf)
  539. {
  540. int dimm = dimm_name_to_id(dev);
  541. if (dimm < 0)
  542. return dimm;
  543. return sprintf(buf, "%#lx\n", dimm_fail_cmd_flags[dimm]);
  544. }
  545. static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr,
  546. const char *buf, size_t size)
  547. {
  548. int dimm = dimm_name_to_id(dev);
  549. unsigned long val;
  550. ssize_t rc;
  551. if (dimm < 0)
  552. return dimm;
  553. rc = kstrtol(buf, 0, &val);
  554. if (rc)
  555. return rc;
  556. dimm_fail_cmd_flags[dimm] = val;
  557. return size;
  558. }
  559. static DEVICE_ATTR_RW(fail_cmd);
  560. static struct attribute *nfit_test_dimm_attributes[] = {
  561. &dev_attr_fail_cmd.attr,
  562. &dev_attr_handle.attr,
  563. NULL,
  564. };
  565. static struct attribute_group nfit_test_dimm_attribute_group = {
  566. .attrs = nfit_test_dimm_attributes,
  567. };
  568. static const struct attribute_group *nfit_test_dimm_attribute_groups[] = {
  569. &nfit_test_dimm_attribute_group,
  570. NULL,
  571. };
  572. static int nfit_test0_alloc(struct nfit_test *t)
  573. {
  574. size_t nfit_size = sizeof(struct acpi_nfit_system_address) * NUM_SPA
  575. + sizeof(struct acpi_nfit_memory_map) * NUM_MEM
  576. + sizeof(struct acpi_nfit_control_region) * NUM_DCR
  577. + offsetof(struct acpi_nfit_control_region,
  578. window_size) * NUM_DCR
  579. + sizeof(struct acpi_nfit_data_region) * NUM_BDW
  580. + (sizeof(struct acpi_nfit_flush_address)
  581. + sizeof(u64) * NUM_HINTS) * NUM_DCR;
  582. int i;
  583. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  584. if (!t->nfit_buf)
  585. return -ENOMEM;
  586. t->nfit_size = nfit_size;
  587. t->spa_set[0] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[0]);
  588. if (!t->spa_set[0])
  589. return -ENOMEM;
  590. t->spa_set[1] = test_alloc(t, SPA1_SIZE, &t->spa_set_dma[1]);
  591. if (!t->spa_set[1])
  592. return -ENOMEM;
  593. t->spa_set[2] = test_alloc(t, SPA0_SIZE, &t->spa_set_dma[2]);
  594. if (!t->spa_set[2])
  595. return -ENOMEM;
  596. for (i = 0; i < NUM_DCR; i++) {
  597. t->dimm[i] = test_alloc(t, DIMM_SIZE, &t->dimm_dma[i]);
  598. if (!t->dimm[i])
  599. return -ENOMEM;
  600. t->label[i] = test_alloc(t, LABEL_SIZE, &t->label_dma[i]);
  601. if (!t->label[i])
  602. return -ENOMEM;
  603. sprintf(t->label[i], "label%d", i);
  604. t->flush[i] = test_alloc(t, max(PAGE_SIZE,
  605. sizeof(u64) * NUM_HINTS),
  606. &t->flush_dma[i]);
  607. if (!t->flush[i])
  608. return -ENOMEM;
  609. }
  610. for (i = 0; i < NUM_DCR; i++) {
  611. t->dcr[i] = test_alloc(t, LABEL_SIZE, &t->dcr_dma[i]);
  612. if (!t->dcr[i])
  613. return -ENOMEM;
  614. }
  615. t->_fit = test_alloc(t, sizeof(union acpi_object **), &t->_fit_dma);
  616. if (!t->_fit)
  617. return -ENOMEM;
  618. if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev))
  619. return -ENOMEM;
  620. for (i = 0; i < NUM_DCR; i++) {
  621. t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm,
  622. &t->pdev.dev, 0, NULL,
  623. nfit_test_dimm_attribute_groups,
  624. "test_dimm%d", i);
  625. if (!t->dimm_dev[i])
  626. return -ENOMEM;
  627. }
  628. return ars_state_init(&t->pdev.dev, &t->ars_state);
  629. }
  630. static int nfit_test1_alloc(struct nfit_test *t)
  631. {
  632. size_t nfit_size = sizeof(struct acpi_nfit_system_address) * 2
  633. + sizeof(struct acpi_nfit_memory_map)
  634. + offsetof(struct acpi_nfit_control_region, window_size);
  635. t->nfit_buf = test_alloc(t, nfit_size, &t->nfit_dma);
  636. if (!t->nfit_buf)
  637. return -ENOMEM;
  638. t->nfit_size = nfit_size;
  639. t->spa_set[0] = test_alloc(t, SPA2_SIZE, &t->spa_set_dma[0]);
  640. if (!t->spa_set[0])
  641. return -ENOMEM;
  642. t->spa_set[1] = test_alloc(t, SPA_VCD_SIZE, &t->spa_set_dma[1]);
  643. if (!t->spa_set[1])
  644. return -ENOMEM;
  645. return ars_state_init(&t->pdev.dev, &t->ars_state);
  646. }
  647. static void dcr_common_init(struct acpi_nfit_control_region *dcr)
  648. {
  649. dcr->vendor_id = 0xabcd;
  650. dcr->device_id = 0;
  651. dcr->revision_id = 1;
  652. dcr->valid_fields = 1;
  653. dcr->manufacturing_location = 0xa;
  654. dcr->manufacturing_date = cpu_to_be16(2016);
  655. }
  656. static void nfit_test0_setup(struct nfit_test *t)
  657. {
  658. const int flush_hint_size = sizeof(struct acpi_nfit_flush_address)
  659. + (sizeof(u64) * NUM_HINTS);
  660. struct acpi_nfit_desc *acpi_desc;
  661. struct acpi_nfit_memory_map *memdev;
  662. void *nfit_buf = t->nfit_buf;
  663. struct acpi_nfit_system_address *spa;
  664. struct acpi_nfit_control_region *dcr;
  665. struct acpi_nfit_data_region *bdw;
  666. struct acpi_nfit_flush_address *flush;
  667. unsigned int offset, i;
  668. /*
  669. * spa0 (interleave first half of dimm0 and dimm1, note storage
  670. * does not actually alias the related block-data-window
  671. * regions)
  672. */
  673. spa = nfit_buf;
  674. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  675. spa->header.length = sizeof(*spa);
  676. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  677. spa->range_index = 0+1;
  678. spa->address = t->spa_set_dma[0];
  679. spa->length = SPA0_SIZE;
  680. /*
  681. * spa1 (interleave last half of the 4 DIMMS, note storage
  682. * does not actually alias the related block-data-window
  683. * regions)
  684. */
  685. spa = nfit_buf + sizeof(*spa);
  686. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  687. spa->header.length = sizeof(*spa);
  688. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  689. spa->range_index = 1+1;
  690. spa->address = t->spa_set_dma[1];
  691. spa->length = SPA1_SIZE;
  692. /* spa2 (dcr0) dimm0 */
  693. spa = nfit_buf + sizeof(*spa) * 2;
  694. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  695. spa->header.length = sizeof(*spa);
  696. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  697. spa->range_index = 2+1;
  698. spa->address = t->dcr_dma[0];
  699. spa->length = DCR_SIZE;
  700. /* spa3 (dcr1) dimm1 */
  701. spa = nfit_buf + sizeof(*spa) * 3;
  702. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  703. spa->header.length = sizeof(*spa);
  704. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  705. spa->range_index = 3+1;
  706. spa->address = t->dcr_dma[1];
  707. spa->length = DCR_SIZE;
  708. /* spa4 (dcr2) dimm2 */
  709. spa = nfit_buf + sizeof(*spa) * 4;
  710. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  711. spa->header.length = sizeof(*spa);
  712. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  713. spa->range_index = 4+1;
  714. spa->address = t->dcr_dma[2];
  715. spa->length = DCR_SIZE;
  716. /* spa5 (dcr3) dimm3 */
  717. spa = nfit_buf + sizeof(*spa) * 5;
  718. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  719. spa->header.length = sizeof(*spa);
  720. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  721. spa->range_index = 5+1;
  722. spa->address = t->dcr_dma[3];
  723. spa->length = DCR_SIZE;
  724. /* spa6 (bdw for dcr0) dimm0 */
  725. spa = nfit_buf + sizeof(*spa) * 6;
  726. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  727. spa->header.length = sizeof(*spa);
  728. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  729. spa->range_index = 6+1;
  730. spa->address = t->dimm_dma[0];
  731. spa->length = DIMM_SIZE;
  732. /* spa7 (bdw for dcr1) dimm1 */
  733. spa = nfit_buf + sizeof(*spa) * 7;
  734. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  735. spa->header.length = sizeof(*spa);
  736. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  737. spa->range_index = 7+1;
  738. spa->address = t->dimm_dma[1];
  739. spa->length = DIMM_SIZE;
  740. /* spa8 (bdw for dcr2) dimm2 */
  741. spa = nfit_buf + sizeof(*spa) * 8;
  742. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  743. spa->header.length = sizeof(*spa);
  744. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  745. spa->range_index = 8+1;
  746. spa->address = t->dimm_dma[2];
  747. spa->length = DIMM_SIZE;
  748. /* spa9 (bdw for dcr3) dimm3 */
  749. spa = nfit_buf + sizeof(*spa) * 9;
  750. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  751. spa->header.length = sizeof(*spa);
  752. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  753. spa->range_index = 9+1;
  754. spa->address = t->dimm_dma[3];
  755. spa->length = DIMM_SIZE;
  756. offset = sizeof(*spa) * 10;
  757. /* mem-region0 (spa0, dimm0) */
  758. memdev = nfit_buf + offset;
  759. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  760. memdev->header.length = sizeof(*memdev);
  761. memdev->device_handle = handle[0];
  762. memdev->physical_id = 0;
  763. memdev->region_id = 0;
  764. memdev->range_index = 0+1;
  765. memdev->region_index = 4+1;
  766. memdev->region_size = SPA0_SIZE/2;
  767. memdev->region_offset = t->spa_set_dma[0];
  768. memdev->address = 0;
  769. memdev->interleave_index = 0;
  770. memdev->interleave_ways = 2;
  771. /* mem-region1 (spa0, dimm1) */
  772. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map);
  773. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  774. memdev->header.length = sizeof(*memdev);
  775. memdev->device_handle = handle[1];
  776. memdev->physical_id = 1;
  777. memdev->region_id = 0;
  778. memdev->range_index = 0+1;
  779. memdev->region_index = 5+1;
  780. memdev->region_size = SPA0_SIZE/2;
  781. memdev->region_offset = t->spa_set_dma[0] + SPA0_SIZE/2;
  782. memdev->address = 0;
  783. memdev->interleave_index = 0;
  784. memdev->interleave_ways = 2;
  785. /* mem-region2 (spa1, dimm0) */
  786. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 2;
  787. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  788. memdev->header.length = sizeof(*memdev);
  789. memdev->device_handle = handle[0];
  790. memdev->physical_id = 0;
  791. memdev->region_id = 1;
  792. memdev->range_index = 1+1;
  793. memdev->region_index = 4+1;
  794. memdev->region_size = SPA1_SIZE/4;
  795. memdev->region_offset = t->spa_set_dma[1];
  796. memdev->address = SPA0_SIZE/2;
  797. memdev->interleave_index = 0;
  798. memdev->interleave_ways = 4;
  799. /* mem-region3 (spa1, dimm1) */
  800. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 3;
  801. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  802. memdev->header.length = sizeof(*memdev);
  803. memdev->device_handle = handle[1];
  804. memdev->physical_id = 1;
  805. memdev->region_id = 1;
  806. memdev->range_index = 1+1;
  807. memdev->region_index = 5+1;
  808. memdev->region_size = SPA1_SIZE/4;
  809. memdev->region_offset = t->spa_set_dma[1] + SPA1_SIZE/4;
  810. memdev->address = SPA0_SIZE/2;
  811. memdev->interleave_index = 0;
  812. memdev->interleave_ways = 4;
  813. /* mem-region4 (spa1, dimm2) */
  814. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 4;
  815. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  816. memdev->header.length = sizeof(*memdev);
  817. memdev->device_handle = handle[2];
  818. memdev->physical_id = 2;
  819. memdev->region_id = 0;
  820. memdev->range_index = 1+1;
  821. memdev->region_index = 6+1;
  822. memdev->region_size = SPA1_SIZE/4;
  823. memdev->region_offset = t->spa_set_dma[1] + 2*SPA1_SIZE/4;
  824. memdev->address = SPA0_SIZE/2;
  825. memdev->interleave_index = 0;
  826. memdev->interleave_ways = 4;
  827. /* mem-region5 (spa1, dimm3) */
  828. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 5;
  829. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  830. memdev->header.length = sizeof(*memdev);
  831. memdev->device_handle = handle[3];
  832. memdev->physical_id = 3;
  833. memdev->region_id = 0;
  834. memdev->range_index = 1+1;
  835. memdev->region_index = 7+1;
  836. memdev->region_size = SPA1_SIZE/4;
  837. memdev->region_offset = t->spa_set_dma[1] + 3*SPA1_SIZE/4;
  838. memdev->address = SPA0_SIZE/2;
  839. memdev->interleave_index = 0;
  840. memdev->interleave_ways = 4;
  841. /* mem-region6 (spa/dcr0, dimm0) */
  842. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 6;
  843. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  844. memdev->header.length = sizeof(*memdev);
  845. memdev->device_handle = handle[0];
  846. memdev->physical_id = 0;
  847. memdev->region_id = 0;
  848. memdev->range_index = 2+1;
  849. memdev->region_index = 0+1;
  850. memdev->region_size = 0;
  851. memdev->region_offset = 0;
  852. memdev->address = 0;
  853. memdev->interleave_index = 0;
  854. memdev->interleave_ways = 1;
  855. /* mem-region7 (spa/dcr1, dimm1) */
  856. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 7;
  857. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  858. memdev->header.length = sizeof(*memdev);
  859. memdev->device_handle = handle[1];
  860. memdev->physical_id = 1;
  861. memdev->region_id = 0;
  862. memdev->range_index = 3+1;
  863. memdev->region_index = 1+1;
  864. memdev->region_size = 0;
  865. memdev->region_offset = 0;
  866. memdev->address = 0;
  867. memdev->interleave_index = 0;
  868. memdev->interleave_ways = 1;
  869. /* mem-region8 (spa/dcr2, dimm2) */
  870. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 8;
  871. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  872. memdev->header.length = sizeof(*memdev);
  873. memdev->device_handle = handle[2];
  874. memdev->physical_id = 2;
  875. memdev->region_id = 0;
  876. memdev->range_index = 4+1;
  877. memdev->region_index = 2+1;
  878. memdev->region_size = 0;
  879. memdev->region_offset = 0;
  880. memdev->address = 0;
  881. memdev->interleave_index = 0;
  882. memdev->interleave_ways = 1;
  883. /* mem-region9 (spa/dcr3, dimm3) */
  884. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 9;
  885. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  886. memdev->header.length = sizeof(*memdev);
  887. memdev->device_handle = handle[3];
  888. memdev->physical_id = 3;
  889. memdev->region_id = 0;
  890. memdev->range_index = 5+1;
  891. memdev->region_index = 3+1;
  892. memdev->region_size = 0;
  893. memdev->region_offset = 0;
  894. memdev->address = 0;
  895. memdev->interleave_index = 0;
  896. memdev->interleave_ways = 1;
  897. /* mem-region10 (spa/bdw0, dimm0) */
  898. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 10;
  899. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  900. memdev->header.length = sizeof(*memdev);
  901. memdev->device_handle = handle[0];
  902. memdev->physical_id = 0;
  903. memdev->region_id = 0;
  904. memdev->range_index = 6+1;
  905. memdev->region_index = 0+1;
  906. memdev->region_size = 0;
  907. memdev->region_offset = 0;
  908. memdev->address = 0;
  909. memdev->interleave_index = 0;
  910. memdev->interleave_ways = 1;
  911. /* mem-region11 (spa/bdw1, dimm1) */
  912. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 11;
  913. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  914. memdev->header.length = sizeof(*memdev);
  915. memdev->device_handle = handle[1];
  916. memdev->physical_id = 1;
  917. memdev->region_id = 0;
  918. memdev->range_index = 7+1;
  919. memdev->region_index = 1+1;
  920. memdev->region_size = 0;
  921. memdev->region_offset = 0;
  922. memdev->address = 0;
  923. memdev->interleave_index = 0;
  924. memdev->interleave_ways = 1;
  925. /* mem-region12 (spa/bdw2, dimm2) */
  926. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 12;
  927. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  928. memdev->header.length = sizeof(*memdev);
  929. memdev->device_handle = handle[2];
  930. memdev->physical_id = 2;
  931. memdev->region_id = 0;
  932. memdev->range_index = 8+1;
  933. memdev->region_index = 2+1;
  934. memdev->region_size = 0;
  935. memdev->region_offset = 0;
  936. memdev->address = 0;
  937. memdev->interleave_index = 0;
  938. memdev->interleave_ways = 1;
  939. /* mem-region13 (spa/dcr3, dimm3) */
  940. memdev = nfit_buf + offset + sizeof(struct acpi_nfit_memory_map) * 13;
  941. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  942. memdev->header.length = sizeof(*memdev);
  943. memdev->device_handle = handle[3];
  944. memdev->physical_id = 3;
  945. memdev->region_id = 0;
  946. memdev->range_index = 9+1;
  947. memdev->region_index = 3+1;
  948. memdev->region_size = 0;
  949. memdev->region_offset = 0;
  950. memdev->address = 0;
  951. memdev->interleave_index = 0;
  952. memdev->interleave_ways = 1;
  953. offset = offset + sizeof(struct acpi_nfit_memory_map) * 14;
  954. /* dcr-descriptor0: blk */
  955. dcr = nfit_buf + offset;
  956. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  957. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  958. dcr->region_index = 0+1;
  959. dcr_common_init(dcr);
  960. dcr->serial_number = ~handle[0];
  961. dcr->code = NFIT_FIC_BLK;
  962. dcr->windows = 1;
  963. dcr->window_size = DCR_SIZE;
  964. dcr->command_offset = 0;
  965. dcr->command_size = 8;
  966. dcr->status_offset = 8;
  967. dcr->status_size = 4;
  968. /* dcr-descriptor1: blk */
  969. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region);
  970. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  971. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  972. dcr->region_index = 1+1;
  973. dcr_common_init(dcr);
  974. dcr->serial_number = ~handle[1];
  975. dcr->code = NFIT_FIC_BLK;
  976. dcr->windows = 1;
  977. dcr->window_size = DCR_SIZE;
  978. dcr->command_offset = 0;
  979. dcr->command_size = 8;
  980. dcr->status_offset = 8;
  981. dcr->status_size = 4;
  982. /* dcr-descriptor2: blk */
  983. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 2;
  984. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  985. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  986. dcr->region_index = 2+1;
  987. dcr_common_init(dcr);
  988. dcr->serial_number = ~handle[2];
  989. dcr->code = NFIT_FIC_BLK;
  990. dcr->windows = 1;
  991. dcr->window_size = DCR_SIZE;
  992. dcr->command_offset = 0;
  993. dcr->command_size = 8;
  994. dcr->status_offset = 8;
  995. dcr->status_size = 4;
  996. /* dcr-descriptor3: blk */
  997. dcr = nfit_buf + offset + sizeof(struct acpi_nfit_control_region) * 3;
  998. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  999. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  1000. dcr->region_index = 3+1;
  1001. dcr_common_init(dcr);
  1002. dcr->serial_number = ~handle[3];
  1003. dcr->code = NFIT_FIC_BLK;
  1004. dcr->windows = 1;
  1005. dcr->window_size = DCR_SIZE;
  1006. dcr->command_offset = 0;
  1007. dcr->command_size = 8;
  1008. dcr->status_offset = 8;
  1009. dcr->status_size = 4;
  1010. offset = offset + sizeof(struct acpi_nfit_control_region) * 4;
  1011. /* dcr-descriptor0: pmem */
  1012. dcr = nfit_buf + offset;
  1013. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1014. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1015. window_size);
  1016. dcr->region_index = 4+1;
  1017. dcr_common_init(dcr);
  1018. dcr->serial_number = ~handle[0];
  1019. dcr->code = NFIT_FIC_BYTEN;
  1020. dcr->windows = 0;
  1021. /* dcr-descriptor1: pmem */
  1022. dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
  1023. window_size);
  1024. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1025. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1026. window_size);
  1027. dcr->region_index = 5+1;
  1028. dcr_common_init(dcr);
  1029. dcr->serial_number = ~handle[1];
  1030. dcr->code = NFIT_FIC_BYTEN;
  1031. dcr->windows = 0;
  1032. /* dcr-descriptor2: pmem */
  1033. dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
  1034. window_size) * 2;
  1035. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1036. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1037. window_size);
  1038. dcr->region_index = 6+1;
  1039. dcr_common_init(dcr);
  1040. dcr->serial_number = ~handle[2];
  1041. dcr->code = NFIT_FIC_BYTEN;
  1042. dcr->windows = 0;
  1043. /* dcr-descriptor3: pmem */
  1044. dcr = nfit_buf + offset + offsetof(struct acpi_nfit_control_region,
  1045. window_size) * 3;
  1046. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1047. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1048. window_size);
  1049. dcr->region_index = 7+1;
  1050. dcr_common_init(dcr);
  1051. dcr->serial_number = ~handle[3];
  1052. dcr->code = NFIT_FIC_BYTEN;
  1053. dcr->windows = 0;
  1054. offset = offset + offsetof(struct acpi_nfit_control_region,
  1055. window_size) * 4;
  1056. /* bdw0 (spa/dcr0, dimm0) */
  1057. bdw = nfit_buf + offset;
  1058. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1059. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1060. bdw->region_index = 0+1;
  1061. bdw->windows = 1;
  1062. bdw->offset = 0;
  1063. bdw->size = BDW_SIZE;
  1064. bdw->capacity = DIMM_SIZE;
  1065. bdw->start_address = 0;
  1066. /* bdw1 (spa/dcr1, dimm1) */
  1067. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region);
  1068. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1069. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1070. bdw->region_index = 1+1;
  1071. bdw->windows = 1;
  1072. bdw->offset = 0;
  1073. bdw->size = BDW_SIZE;
  1074. bdw->capacity = DIMM_SIZE;
  1075. bdw->start_address = 0;
  1076. /* bdw2 (spa/dcr2, dimm2) */
  1077. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 2;
  1078. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1079. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1080. bdw->region_index = 2+1;
  1081. bdw->windows = 1;
  1082. bdw->offset = 0;
  1083. bdw->size = BDW_SIZE;
  1084. bdw->capacity = DIMM_SIZE;
  1085. bdw->start_address = 0;
  1086. /* bdw3 (spa/dcr3, dimm3) */
  1087. bdw = nfit_buf + offset + sizeof(struct acpi_nfit_data_region) * 3;
  1088. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1089. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1090. bdw->region_index = 3+1;
  1091. bdw->windows = 1;
  1092. bdw->offset = 0;
  1093. bdw->size = BDW_SIZE;
  1094. bdw->capacity = DIMM_SIZE;
  1095. bdw->start_address = 0;
  1096. offset = offset + sizeof(struct acpi_nfit_data_region) * 4;
  1097. /* flush0 (dimm0) */
  1098. flush = nfit_buf + offset;
  1099. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1100. flush->header.length = flush_hint_size;
  1101. flush->device_handle = handle[0];
  1102. flush->hint_count = NUM_HINTS;
  1103. for (i = 0; i < NUM_HINTS; i++)
  1104. flush->hint_address[i] = t->flush_dma[0] + i * sizeof(u64);
  1105. /* flush1 (dimm1) */
  1106. flush = nfit_buf + offset + flush_hint_size * 1;
  1107. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1108. flush->header.length = flush_hint_size;
  1109. flush->device_handle = handle[1];
  1110. flush->hint_count = NUM_HINTS;
  1111. for (i = 0; i < NUM_HINTS; i++)
  1112. flush->hint_address[i] = t->flush_dma[1] + i * sizeof(u64);
  1113. /* flush2 (dimm2) */
  1114. flush = nfit_buf + offset + flush_hint_size * 2;
  1115. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1116. flush->header.length = flush_hint_size;
  1117. flush->device_handle = handle[2];
  1118. flush->hint_count = NUM_HINTS;
  1119. for (i = 0; i < NUM_HINTS; i++)
  1120. flush->hint_address[i] = t->flush_dma[2] + i * sizeof(u64);
  1121. /* flush3 (dimm3) */
  1122. flush = nfit_buf + offset + flush_hint_size * 3;
  1123. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1124. flush->header.length = flush_hint_size;
  1125. flush->device_handle = handle[3];
  1126. flush->hint_count = NUM_HINTS;
  1127. for (i = 0; i < NUM_HINTS; i++)
  1128. flush->hint_address[i] = t->flush_dma[3] + i * sizeof(u64);
  1129. if (t->setup_hotplug) {
  1130. offset = offset + flush_hint_size * 4;
  1131. /* dcr-descriptor4: blk */
  1132. dcr = nfit_buf + offset;
  1133. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1134. dcr->header.length = sizeof(struct acpi_nfit_control_region);
  1135. dcr->region_index = 8+1;
  1136. dcr_common_init(dcr);
  1137. dcr->serial_number = ~handle[4];
  1138. dcr->code = NFIT_FIC_BLK;
  1139. dcr->windows = 1;
  1140. dcr->window_size = DCR_SIZE;
  1141. dcr->command_offset = 0;
  1142. dcr->command_size = 8;
  1143. dcr->status_offset = 8;
  1144. dcr->status_size = 4;
  1145. offset = offset + sizeof(struct acpi_nfit_control_region);
  1146. /* dcr-descriptor4: pmem */
  1147. dcr = nfit_buf + offset;
  1148. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1149. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1150. window_size);
  1151. dcr->region_index = 9+1;
  1152. dcr_common_init(dcr);
  1153. dcr->serial_number = ~handle[4];
  1154. dcr->code = NFIT_FIC_BYTEN;
  1155. dcr->windows = 0;
  1156. offset = offset + offsetof(struct acpi_nfit_control_region,
  1157. window_size);
  1158. /* bdw4 (spa/dcr4, dimm4) */
  1159. bdw = nfit_buf + offset;
  1160. bdw->header.type = ACPI_NFIT_TYPE_DATA_REGION;
  1161. bdw->header.length = sizeof(struct acpi_nfit_data_region);
  1162. bdw->region_index = 8+1;
  1163. bdw->windows = 1;
  1164. bdw->offset = 0;
  1165. bdw->size = BDW_SIZE;
  1166. bdw->capacity = DIMM_SIZE;
  1167. bdw->start_address = 0;
  1168. offset = offset + sizeof(struct acpi_nfit_data_region);
  1169. /* spa10 (dcr4) dimm4 */
  1170. spa = nfit_buf + offset;
  1171. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1172. spa->header.length = sizeof(*spa);
  1173. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_DCR), 16);
  1174. spa->range_index = 10+1;
  1175. spa->address = t->dcr_dma[4];
  1176. spa->length = DCR_SIZE;
  1177. /*
  1178. * spa11 (single-dimm interleave for hotplug, note storage
  1179. * does not actually alias the related block-data-window
  1180. * regions)
  1181. */
  1182. spa = nfit_buf + offset + sizeof(*spa);
  1183. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1184. spa->header.length = sizeof(*spa);
  1185. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1186. spa->range_index = 11+1;
  1187. spa->address = t->spa_set_dma[2];
  1188. spa->length = SPA0_SIZE;
  1189. /* spa12 (bdw for dcr4) dimm4 */
  1190. spa = nfit_buf + offset + sizeof(*spa) * 2;
  1191. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1192. spa->header.length = sizeof(*spa);
  1193. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_BDW), 16);
  1194. spa->range_index = 12+1;
  1195. spa->address = t->dimm_dma[4];
  1196. spa->length = DIMM_SIZE;
  1197. offset = offset + sizeof(*spa) * 3;
  1198. /* mem-region14 (spa/dcr4, dimm4) */
  1199. memdev = nfit_buf + offset;
  1200. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1201. memdev->header.length = sizeof(*memdev);
  1202. memdev->device_handle = handle[4];
  1203. memdev->physical_id = 4;
  1204. memdev->region_id = 0;
  1205. memdev->range_index = 10+1;
  1206. memdev->region_index = 8+1;
  1207. memdev->region_size = 0;
  1208. memdev->region_offset = 0;
  1209. memdev->address = 0;
  1210. memdev->interleave_index = 0;
  1211. memdev->interleave_ways = 1;
  1212. /* mem-region15 (spa0, dimm4) */
  1213. memdev = nfit_buf + offset +
  1214. sizeof(struct acpi_nfit_memory_map);
  1215. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1216. memdev->header.length = sizeof(*memdev);
  1217. memdev->device_handle = handle[4];
  1218. memdev->physical_id = 4;
  1219. memdev->region_id = 0;
  1220. memdev->range_index = 11+1;
  1221. memdev->region_index = 9+1;
  1222. memdev->region_size = SPA0_SIZE;
  1223. memdev->region_offset = t->spa_set_dma[2];
  1224. memdev->address = 0;
  1225. memdev->interleave_index = 0;
  1226. memdev->interleave_ways = 1;
  1227. /* mem-region16 (spa/bdw4, dimm4) */
  1228. memdev = nfit_buf + offset +
  1229. sizeof(struct acpi_nfit_memory_map) * 2;
  1230. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1231. memdev->header.length = sizeof(*memdev);
  1232. memdev->device_handle = handle[4];
  1233. memdev->physical_id = 4;
  1234. memdev->region_id = 0;
  1235. memdev->range_index = 12+1;
  1236. memdev->region_index = 8+1;
  1237. memdev->region_size = 0;
  1238. memdev->region_offset = 0;
  1239. memdev->address = 0;
  1240. memdev->interleave_index = 0;
  1241. memdev->interleave_ways = 1;
  1242. offset = offset + sizeof(struct acpi_nfit_memory_map) * 3;
  1243. /* flush3 (dimm4) */
  1244. flush = nfit_buf + offset;
  1245. flush->header.type = ACPI_NFIT_TYPE_FLUSH_ADDRESS;
  1246. flush->header.length = flush_hint_size;
  1247. flush->device_handle = handle[4];
  1248. flush->hint_count = NUM_HINTS;
  1249. for (i = 0; i < NUM_HINTS; i++)
  1250. flush->hint_address[i] = t->flush_dma[4]
  1251. + i * sizeof(u64);
  1252. }
  1253. post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA0_SIZE);
  1254. acpi_desc = &t->acpi_desc;
  1255. set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en);
  1256. set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  1257. set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en);
  1258. set_bit(ND_CMD_SMART, &acpi_desc->dimm_cmd_force_en);
  1259. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
  1260. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
  1261. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
  1262. set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
  1263. set_bit(ND_CMD_SMART_THRESHOLD, &acpi_desc->dimm_cmd_force_en);
  1264. }
  1265. static void nfit_test1_setup(struct nfit_test *t)
  1266. {
  1267. size_t offset;
  1268. void *nfit_buf = t->nfit_buf;
  1269. struct acpi_nfit_memory_map *memdev;
  1270. struct acpi_nfit_control_region *dcr;
  1271. struct acpi_nfit_system_address *spa;
  1272. struct acpi_nfit_desc *acpi_desc;
  1273. offset = 0;
  1274. /* spa0 (flat range with no bdw aliasing) */
  1275. spa = nfit_buf + offset;
  1276. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1277. spa->header.length = sizeof(*spa);
  1278. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_PM), 16);
  1279. spa->range_index = 0+1;
  1280. spa->address = t->spa_set_dma[0];
  1281. spa->length = SPA2_SIZE;
  1282. /* virtual cd region */
  1283. spa = nfit_buf + sizeof(*spa);
  1284. spa->header.type = ACPI_NFIT_TYPE_SYSTEM_ADDRESS;
  1285. spa->header.length = sizeof(*spa);
  1286. memcpy(spa->range_guid, to_nfit_uuid(NFIT_SPA_VCD), 16);
  1287. spa->range_index = 0;
  1288. spa->address = t->spa_set_dma[1];
  1289. spa->length = SPA_VCD_SIZE;
  1290. offset += sizeof(*spa) * 2;
  1291. /* mem-region0 (spa0, dimm0) */
  1292. memdev = nfit_buf + offset;
  1293. memdev->header.type = ACPI_NFIT_TYPE_MEMORY_MAP;
  1294. memdev->header.length = sizeof(*memdev);
  1295. memdev->device_handle = 0;
  1296. memdev->physical_id = 0;
  1297. memdev->region_id = 0;
  1298. memdev->range_index = 0+1;
  1299. memdev->region_index = 0+1;
  1300. memdev->region_size = SPA2_SIZE;
  1301. memdev->region_offset = 0;
  1302. memdev->address = 0;
  1303. memdev->interleave_index = 0;
  1304. memdev->interleave_ways = 1;
  1305. memdev->flags = ACPI_NFIT_MEM_SAVE_FAILED | ACPI_NFIT_MEM_RESTORE_FAILED
  1306. | ACPI_NFIT_MEM_FLUSH_FAILED | ACPI_NFIT_MEM_HEALTH_OBSERVED
  1307. | ACPI_NFIT_MEM_NOT_ARMED;
  1308. offset += sizeof(*memdev);
  1309. /* dcr-descriptor0 */
  1310. dcr = nfit_buf + offset;
  1311. dcr->header.type = ACPI_NFIT_TYPE_CONTROL_REGION;
  1312. dcr->header.length = offsetof(struct acpi_nfit_control_region,
  1313. window_size);
  1314. dcr->region_index = 0+1;
  1315. dcr_common_init(dcr);
  1316. dcr->serial_number = ~0;
  1317. dcr->code = NFIT_FIC_BYTE;
  1318. dcr->windows = 0;
  1319. post_ars_status(&t->ars_state, t->spa_set_dma[0], SPA2_SIZE);
  1320. acpi_desc = &t->acpi_desc;
  1321. set_bit(ND_CMD_ARS_CAP, &acpi_desc->bus_cmd_force_en);
  1322. set_bit(ND_CMD_ARS_START, &acpi_desc->bus_cmd_force_en);
  1323. set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en);
  1324. set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en);
  1325. }
  1326. static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa,
  1327. void *iobuf, u64 len, int rw)
  1328. {
  1329. struct nfit_blk *nfit_blk = ndbr->blk_provider_data;
  1330. struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
  1331. struct nd_region *nd_region = &ndbr->nd_region;
  1332. unsigned int lane;
  1333. lane = nd_region_acquire_lane(nd_region);
  1334. if (rw)
  1335. memcpy(mmio->addr.base + dpa, iobuf, len);
  1336. else {
  1337. memcpy(iobuf, mmio->addr.base + dpa, len);
  1338. /* give us some some coverage of the mmio_flush_range() API */
  1339. mmio_flush_range(mmio->addr.base + dpa, len);
  1340. }
  1341. nd_region_release_lane(nd_region, lane);
  1342. return 0;
  1343. }
  1344. static unsigned long nfit_ctl_handle;
  1345. union acpi_object *result;
  1346. static union acpi_object *nfit_test_evaluate_dsm(acpi_handle handle,
  1347. const u8 *uuid, u64 rev, u64 func, union acpi_object *argv4)
  1348. {
  1349. if (handle != &nfit_ctl_handle)
  1350. return ERR_PTR(-ENXIO);
  1351. return result;
  1352. }
  1353. static int setup_result(void *buf, size_t size)
  1354. {
  1355. result = kmalloc(sizeof(union acpi_object) + size, GFP_KERNEL);
  1356. if (!result)
  1357. return -ENOMEM;
  1358. result->package.type = ACPI_TYPE_BUFFER,
  1359. result->buffer.pointer = (void *) (result + 1);
  1360. result->buffer.length = size;
  1361. memcpy(result->buffer.pointer, buf, size);
  1362. memset(buf, 0, size);
  1363. return 0;
  1364. }
  1365. static int nfit_ctl_test(struct device *dev)
  1366. {
  1367. int rc, cmd_rc;
  1368. struct nvdimm *nvdimm;
  1369. struct acpi_device *adev;
  1370. struct nfit_mem *nfit_mem;
  1371. struct nd_ars_record *record;
  1372. struct acpi_nfit_desc *acpi_desc;
  1373. const u64 test_val = 0x0123456789abcdefULL;
  1374. unsigned long mask, cmd_size, offset;
  1375. union {
  1376. struct nd_cmd_get_config_size cfg_size;
  1377. struct nd_cmd_ars_status ars_stat;
  1378. struct nd_cmd_ars_cap ars_cap;
  1379. char buf[sizeof(struct nd_cmd_ars_status)
  1380. + sizeof(struct nd_ars_record)];
  1381. } cmds;
  1382. adev = devm_kzalloc(dev, sizeof(*adev), GFP_KERNEL);
  1383. if (!adev)
  1384. return -ENOMEM;
  1385. *adev = (struct acpi_device) {
  1386. .handle = &nfit_ctl_handle,
  1387. .dev = {
  1388. .init_name = "test-adev",
  1389. },
  1390. };
  1391. acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
  1392. if (!acpi_desc)
  1393. return -ENOMEM;
  1394. *acpi_desc = (struct acpi_nfit_desc) {
  1395. .nd_desc = {
  1396. .cmd_mask = 1UL << ND_CMD_ARS_CAP
  1397. | 1UL << ND_CMD_ARS_START
  1398. | 1UL << ND_CMD_ARS_STATUS
  1399. | 1UL << ND_CMD_CLEAR_ERROR,
  1400. .module = THIS_MODULE,
  1401. .provider_name = "ACPI.NFIT",
  1402. .ndctl = acpi_nfit_ctl,
  1403. },
  1404. .dev = &adev->dev,
  1405. };
  1406. nfit_mem = devm_kzalloc(dev, sizeof(*nfit_mem), GFP_KERNEL);
  1407. if (!nfit_mem)
  1408. return -ENOMEM;
  1409. mask = 1UL << ND_CMD_SMART | 1UL << ND_CMD_SMART_THRESHOLD
  1410. | 1UL << ND_CMD_DIMM_FLAGS | 1UL << ND_CMD_GET_CONFIG_SIZE
  1411. | 1UL << ND_CMD_GET_CONFIG_DATA | 1UL << ND_CMD_SET_CONFIG_DATA
  1412. | 1UL << ND_CMD_VENDOR;
  1413. *nfit_mem = (struct nfit_mem) {
  1414. .adev = adev,
  1415. .family = NVDIMM_FAMILY_INTEL,
  1416. .dsm_mask = mask,
  1417. };
  1418. nvdimm = devm_kzalloc(dev, sizeof(*nvdimm), GFP_KERNEL);
  1419. if (!nvdimm)
  1420. return -ENOMEM;
  1421. *nvdimm = (struct nvdimm) {
  1422. .provider_data = nfit_mem,
  1423. .cmd_mask = mask,
  1424. .dev = {
  1425. .init_name = "test-dimm",
  1426. },
  1427. };
  1428. /* basic checkout of a typical 'get config size' command */
  1429. cmd_size = sizeof(cmds.cfg_size);
  1430. cmds.cfg_size = (struct nd_cmd_get_config_size) {
  1431. .status = 0,
  1432. .config_size = SZ_128K,
  1433. .max_xfer = SZ_4K,
  1434. };
  1435. rc = setup_result(cmds.buf, cmd_size);
  1436. if (rc)
  1437. return rc;
  1438. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
  1439. cmds.buf, cmd_size, &cmd_rc);
  1440. if (rc < 0 || cmd_rc || cmds.cfg_size.status != 0
  1441. || cmds.cfg_size.config_size != SZ_128K
  1442. || cmds.cfg_size.max_xfer != SZ_4K) {
  1443. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1444. __func__, __LINE__, rc, cmd_rc);
  1445. return -EIO;
  1446. }
  1447. /* test ars_status with zero output */
  1448. cmd_size = offsetof(struct nd_cmd_ars_status, address);
  1449. cmds.ars_stat = (struct nd_cmd_ars_status) {
  1450. .out_length = 0,
  1451. };
  1452. rc = setup_result(cmds.buf, cmd_size);
  1453. if (rc)
  1454. return rc;
  1455. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  1456. cmds.buf, cmd_size, &cmd_rc);
  1457. if (rc < 0 || cmd_rc) {
  1458. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1459. __func__, __LINE__, rc, cmd_rc);
  1460. return -EIO;
  1461. }
  1462. /* test ars_cap with benign extended status */
  1463. cmd_size = sizeof(cmds.ars_cap);
  1464. cmds.ars_cap = (struct nd_cmd_ars_cap) {
  1465. .status = ND_ARS_PERSISTENT << 16,
  1466. };
  1467. offset = offsetof(struct nd_cmd_ars_cap, status);
  1468. rc = setup_result(cmds.buf + offset, cmd_size - offset);
  1469. if (rc)
  1470. return rc;
  1471. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_CAP,
  1472. cmds.buf, cmd_size, &cmd_rc);
  1473. if (rc < 0 || cmd_rc) {
  1474. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1475. __func__, __LINE__, rc, cmd_rc);
  1476. return -EIO;
  1477. }
  1478. /* test ars_status with 'status' trimmed from 'out_length' */
  1479. cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
  1480. cmds.ars_stat = (struct nd_cmd_ars_status) {
  1481. .out_length = cmd_size - 4,
  1482. };
  1483. record = &cmds.ars_stat.records[0];
  1484. *record = (struct nd_ars_record) {
  1485. .length = test_val,
  1486. };
  1487. rc = setup_result(cmds.buf, cmd_size);
  1488. if (rc)
  1489. return rc;
  1490. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  1491. cmds.buf, cmd_size, &cmd_rc);
  1492. if (rc < 0 || cmd_rc || record->length != test_val) {
  1493. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1494. __func__, __LINE__, rc, cmd_rc);
  1495. return -EIO;
  1496. }
  1497. /* test ars_status with 'Output (Size)' including 'status' */
  1498. cmd_size = sizeof(cmds.ars_stat) + sizeof(struct nd_ars_record);
  1499. cmds.ars_stat = (struct nd_cmd_ars_status) {
  1500. .out_length = cmd_size,
  1501. };
  1502. record = &cmds.ars_stat.records[0];
  1503. *record = (struct nd_ars_record) {
  1504. .length = test_val,
  1505. };
  1506. rc = setup_result(cmds.buf, cmd_size);
  1507. if (rc)
  1508. return rc;
  1509. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, NULL, ND_CMD_ARS_STATUS,
  1510. cmds.buf, cmd_size, &cmd_rc);
  1511. if (rc < 0 || cmd_rc || record->length != test_val) {
  1512. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1513. __func__, __LINE__, rc, cmd_rc);
  1514. return -EIO;
  1515. }
  1516. /* test extended status for get_config_size results in failure */
  1517. cmd_size = sizeof(cmds.cfg_size);
  1518. cmds.cfg_size = (struct nd_cmd_get_config_size) {
  1519. .status = 1 << 16,
  1520. };
  1521. rc = setup_result(cmds.buf, cmd_size);
  1522. if (rc)
  1523. return rc;
  1524. rc = acpi_nfit_ctl(&acpi_desc->nd_desc, nvdimm, ND_CMD_GET_CONFIG_SIZE,
  1525. cmds.buf, cmd_size, &cmd_rc);
  1526. if (rc < 0 || cmd_rc >= 0) {
  1527. dev_dbg(dev, "%s: failed at: %d rc: %d cmd_rc: %d\n",
  1528. __func__, __LINE__, rc, cmd_rc);
  1529. return -EIO;
  1530. }
  1531. return 0;
  1532. }
  1533. static int nfit_test_probe(struct platform_device *pdev)
  1534. {
  1535. struct nvdimm_bus_descriptor *nd_desc;
  1536. struct acpi_nfit_desc *acpi_desc;
  1537. struct device *dev = &pdev->dev;
  1538. struct nfit_test *nfit_test;
  1539. struct nfit_mem *nfit_mem;
  1540. union acpi_object *obj;
  1541. int rc;
  1542. if (strcmp(dev_name(&pdev->dev), "nfit_test.0") == 0) {
  1543. rc = nfit_ctl_test(&pdev->dev);
  1544. if (rc)
  1545. return rc;
  1546. }
  1547. nfit_test = to_nfit_test(&pdev->dev);
  1548. /* common alloc */
  1549. if (nfit_test->num_dcr) {
  1550. int num = nfit_test->num_dcr;
  1551. nfit_test->dimm = devm_kcalloc(dev, num, sizeof(void *),
  1552. GFP_KERNEL);
  1553. nfit_test->dimm_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  1554. GFP_KERNEL);
  1555. nfit_test->flush = devm_kcalloc(dev, num, sizeof(void *),
  1556. GFP_KERNEL);
  1557. nfit_test->flush_dma = devm_kcalloc(dev, num, sizeof(dma_addr_t),
  1558. GFP_KERNEL);
  1559. nfit_test->label = devm_kcalloc(dev, num, sizeof(void *),
  1560. GFP_KERNEL);
  1561. nfit_test->label_dma = devm_kcalloc(dev, num,
  1562. sizeof(dma_addr_t), GFP_KERNEL);
  1563. nfit_test->dcr = devm_kcalloc(dev, num,
  1564. sizeof(struct nfit_test_dcr *), GFP_KERNEL);
  1565. nfit_test->dcr_dma = devm_kcalloc(dev, num,
  1566. sizeof(dma_addr_t), GFP_KERNEL);
  1567. if (nfit_test->dimm && nfit_test->dimm_dma && nfit_test->label
  1568. && nfit_test->label_dma && nfit_test->dcr
  1569. && nfit_test->dcr_dma && nfit_test->flush
  1570. && nfit_test->flush_dma)
  1571. /* pass */;
  1572. else
  1573. return -ENOMEM;
  1574. }
  1575. if (nfit_test->num_pm) {
  1576. int num = nfit_test->num_pm;
  1577. nfit_test->spa_set = devm_kcalloc(dev, num, sizeof(void *),
  1578. GFP_KERNEL);
  1579. nfit_test->spa_set_dma = devm_kcalloc(dev, num,
  1580. sizeof(dma_addr_t), GFP_KERNEL);
  1581. if (nfit_test->spa_set && nfit_test->spa_set_dma)
  1582. /* pass */;
  1583. else
  1584. return -ENOMEM;
  1585. }
  1586. /* per-nfit specific alloc */
  1587. if (nfit_test->alloc(nfit_test))
  1588. return -ENOMEM;
  1589. nfit_test->setup(nfit_test);
  1590. acpi_desc = &nfit_test->acpi_desc;
  1591. acpi_nfit_desc_init(acpi_desc, &pdev->dev);
  1592. acpi_desc->blk_do_io = nfit_test_blk_do_io;
  1593. nd_desc = &acpi_desc->nd_desc;
  1594. nd_desc->provider_name = NULL;
  1595. nd_desc->module = THIS_MODULE;
  1596. nd_desc->ndctl = nfit_test_ctl;
  1597. rc = acpi_nfit_init(acpi_desc, nfit_test->nfit_buf,
  1598. nfit_test->nfit_size);
  1599. if (rc)
  1600. return rc;
  1601. if (nfit_test->setup != nfit_test0_setup)
  1602. return 0;
  1603. nfit_test->setup_hotplug = 1;
  1604. nfit_test->setup(nfit_test);
  1605. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  1606. if (!obj)
  1607. return -ENOMEM;
  1608. obj->type = ACPI_TYPE_BUFFER;
  1609. obj->buffer.length = nfit_test->nfit_size;
  1610. obj->buffer.pointer = nfit_test->nfit_buf;
  1611. *(nfit_test->_fit) = obj;
  1612. __acpi_nfit_notify(&pdev->dev, nfit_test, 0x80);
  1613. /* associate dimm devices with nfit_mem data for notification testing */
  1614. mutex_lock(&acpi_desc->init_mutex);
  1615. list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
  1616. u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
  1617. int i;
  1618. for (i = 0; i < NUM_DCR; i++)
  1619. if (nfit_handle == handle[i])
  1620. dev_set_drvdata(nfit_test->dimm_dev[i],
  1621. nfit_mem);
  1622. }
  1623. mutex_unlock(&acpi_desc->init_mutex);
  1624. return 0;
  1625. }
  1626. static int nfit_test_remove(struct platform_device *pdev)
  1627. {
  1628. return 0;
  1629. }
  1630. static void nfit_test_release(struct device *dev)
  1631. {
  1632. struct nfit_test *nfit_test = to_nfit_test(dev);
  1633. kfree(nfit_test);
  1634. }
  1635. static const struct platform_device_id nfit_test_id[] = {
  1636. { KBUILD_MODNAME },
  1637. { },
  1638. };
  1639. static struct platform_driver nfit_test_driver = {
  1640. .probe = nfit_test_probe,
  1641. .remove = nfit_test_remove,
  1642. .driver = {
  1643. .name = KBUILD_MODNAME,
  1644. },
  1645. .id_table = nfit_test_id,
  1646. };
  1647. static __init int nfit_test_init(void)
  1648. {
  1649. int rc, i;
  1650. nfit_test_setup(nfit_test_lookup, nfit_test_evaluate_dsm);
  1651. nfit_test_dimm = class_create(THIS_MODULE, "nfit_test_dimm");
  1652. if (IS_ERR(nfit_test_dimm)) {
  1653. rc = PTR_ERR(nfit_test_dimm);
  1654. goto err_register;
  1655. }
  1656. for (i = 0; i < NUM_NFITS; i++) {
  1657. struct nfit_test *nfit_test;
  1658. struct platform_device *pdev;
  1659. nfit_test = kzalloc(sizeof(*nfit_test), GFP_KERNEL);
  1660. if (!nfit_test) {
  1661. rc = -ENOMEM;
  1662. goto err_register;
  1663. }
  1664. INIT_LIST_HEAD(&nfit_test->resources);
  1665. switch (i) {
  1666. case 0:
  1667. nfit_test->num_pm = NUM_PM;
  1668. nfit_test->num_dcr = NUM_DCR;
  1669. nfit_test->alloc = nfit_test0_alloc;
  1670. nfit_test->setup = nfit_test0_setup;
  1671. break;
  1672. case 1:
  1673. nfit_test->num_pm = 1;
  1674. nfit_test->alloc = nfit_test1_alloc;
  1675. nfit_test->setup = nfit_test1_setup;
  1676. break;
  1677. default:
  1678. rc = -EINVAL;
  1679. goto err_register;
  1680. }
  1681. pdev = &nfit_test->pdev;
  1682. pdev->name = KBUILD_MODNAME;
  1683. pdev->id = i;
  1684. pdev->dev.release = nfit_test_release;
  1685. rc = platform_device_register(pdev);
  1686. if (rc) {
  1687. put_device(&pdev->dev);
  1688. goto err_register;
  1689. }
  1690. get_device(&pdev->dev);
  1691. rc = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  1692. if (rc)
  1693. goto err_register;
  1694. instances[i] = nfit_test;
  1695. }
  1696. rc = platform_driver_register(&nfit_test_driver);
  1697. if (rc)
  1698. goto err_register;
  1699. return 0;
  1700. err_register:
  1701. for (i = 0; i < NUM_NFITS; i++)
  1702. if (instances[i])
  1703. platform_device_unregister(&instances[i]->pdev);
  1704. nfit_test_teardown();
  1705. for (i = 0; i < NUM_NFITS; i++)
  1706. if (instances[i])
  1707. put_device(&instances[i]->pdev.dev);
  1708. return rc;
  1709. }
  1710. static __exit void nfit_test_exit(void)
  1711. {
  1712. int i;
  1713. for (i = 0; i < NUM_NFITS; i++)
  1714. platform_device_unregister(&instances[i]->pdev);
  1715. platform_driver_unregister(&nfit_test_driver);
  1716. nfit_test_teardown();
  1717. for (i = 0; i < NUM_NFITS; i++)
  1718. put_device(&instances[i]->pdev.dev);
  1719. class_destroy(nfit_test_dimm);
  1720. }
  1721. module_init(nfit_test_init);
  1722. module_exit(nfit_test_exit);
  1723. MODULE_LICENSE("GPL v2");
  1724. MODULE_AUTHOR("Intel Corporation");