firmware.c 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306
  1. /*
  2. * Copyright(c) 2015 - 2017 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include <linux/firmware.h>
  48. #include <linux/mutex.h>
  49. #include <linux/module.h>
  50. #include <linux/delay.h>
  51. #include <linux/crc32.h>
  52. #include "hfi.h"
  53. #include "trace.h"
  54. /*
  55. * Make it easy to toggle firmware file name and if it gets loaded by
  56. * editing the following. This may be something we do while in development
  57. * but not necessarily something a user would ever need to use.
  58. */
  59. #define DEFAULT_FW_8051_NAME_FPGA "hfi_dc8051.bin"
  60. #define DEFAULT_FW_8051_NAME_ASIC "hfi1_dc8051.fw"
  61. #define DEFAULT_FW_FABRIC_NAME "hfi1_fabric.fw"
  62. #define DEFAULT_FW_SBUS_NAME "hfi1_sbus.fw"
  63. #define DEFAULT_FW_PCIE_NAME "hfi1_pcie.fw"
  64. #define ALT_FW_8051_NAME_ASIC "hfi1_dc8051_d.fw"
  65. #define ALT_FW_FABRIC_NAME "hfi1_fabric_d.fw"
  66. #define ALT_FW_SBUS_NAME "hfi1_sbus_d.fw"
  67. #define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
  68. MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC);
  69. MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME);
  70. MODULE_FIRMWARE(DEFAULT_FW_SBUS_NAME);
  71. MODULE_FIRMWARE(DEFAULT_FW_PCIE_NAME);
  72. static uint fw_8051_load = 1;
  73. static uint fw_fabric_serdes_load = 1;
  74. static uint fw_pcie_serdes_load = 1;
  75. static uint fw_sbus_load = 1;
  76. /* Firmware file names get set in hfi1_firmware_init() based on the above */
  77. static char *fw_8051_name;
  78. static char *fw_fabric_serdes_name;
  79. static char *fw_sbus_name;
  80. static char *fw_pcie_serdes_name;
  81. #define SBUS_MAX_POLL_COUNT 100
  82. #define SBUS_COUNTER(reg, name) \
  83. (((reg) >> ASIC_STS_SBUS_COUNTERS_##name##_CNT_SHIFT) & \
  84. ASIC_STS_SBUS_COUNTERS_##name##_CNT_MASK)
  85. /*
  86. * Firmware security header.
  87. */
  88. struct css_header {
  89. u32 module_type;
  90. u32 header_len;
  91. u32 header_version;
  92. u32 module_id;
  93. u32 module_vendor;
  94. u32 date; /* BCD yyyymmdd */
  95. u32 size; /* in DWORDs */
  96. u32 key_size; /* in DWORDs */
  97. u32 modulus_size; /* in DWORDs */
  98. u32 exponent_size; /* in DWORDs */
  99. u32 reserved[22];
  100. };
  101. /* expected field values */
  102. #define CSS_MODULE_TYPE 0x00000006
  103. #define CSS_HEADER_LEN 0x000000a1
  104. #define CSS_HEADER_VERSION 0x00010000
  105. #define CSS_MODULE_VENDOR 0x00008086
  106. #define KEY_SIZE 256
  107. #define MU_SIZE 8
  108. #define EXPONENT_SIZE 4
  109. /* size of platform configuration partition */
  110. #define MAX_PLATFORM_CONFIG_FILE_SIZE 4096
  111. /* size of file of plaform configuration encoded in format version 4 */
  112. #define PLATFORM_CONFIG_FORMAT_4_FILE_SIZE 528
  113. /* the file itself */
  114. struct firmware_file {
  115. struct css_header css_header;
  116. u8 modulus[KEY_SIZE];
  117. u8 exponent[EXPONENT_SIZE];
  118. u8 signature[KEY_SIZE];
  119. u8 firmware[];
  120. };
  121. struct augmented_firmware_file {
  122. struct css_header css_header;
  123. u8 modulus[KEY_SIZE];
  124. u8 exponent[EXPONENT_SIZE];
  125. u8 signature[KEY_SIZE];
  126. u8 r2[KEY_SIZE];
  127. u8 mu[MU_SIZE];
  128. u8 firmware[];
  129. };
  130. /* augmented file size difference */
  131. #define AUGMENT_SIZE (sizeof(struct augmented_firmware_file) - \
  132. sizeof(struct firmware_file))
  133. struct firmware_details {
  134. /* Linux core piece */
  135. const struct firmware *fw;
  136. struct css_header *css_header;
  137. u8 *firmware_ptr; /* pointer to binary data */
  138. u32 firmware_len; /* length in bytes */
  139. u8 *modulus; /* pointer to the modulus */
  140. u8 *exponent; /* pointer to the exponent */
  141. u8 *signature; /* pointer to the signature */
  142. u8 *r2; /* pointer to r2 */
  143. u8 *mu; /* pointer to mu */
  144. struct augmented_firmware_file dummy_header;
  145. };
  146. /*
  147. * The mutex protects fw_state, fw_err, and all of the firmware_details
  148. * variables.
  149. */
  150. static DEFINE_MUTEX(fw_mutex);
  151. enum fw_state {
  152. FW_EMPTY,
  153. FW_TRY,
  154. FW_FINAL,
  155. FW_ERR
  156. };
  157. static enum fw_state fw_state = FW_EMPTY;
  158. static int fw_err;
  159. static struct firmware_details fw_8051;
  160. static struct firmware_details fw_fabric;
  161. static struct firmware_details fw_pcie;
  162. static struct firmware_details fw_sbus;
  163. /* flags for turn_off_spicos() */
  164. #define SPICO_SBUS 0x1
  165. #define SPICO_FABRIC 0x2
  166. #define ENABLE_SPICO_SMASK 0x1
  167. /* security block commands */
  168. #define RSA_CMD_INIT 0x1
  169. #define RSA_CMD_START 0x2
  170. /* security block status */
  171. #define RSA_STATUS_IDLE 0x0
  172. #define RSA_STATUS_ACTIVE 0x1
  173. #define RSA_STATUS_DONE 0x2
  174. #define RSA_STATUS_FAILED 0x3
  175. /* RSA engine timeout, in ms */
  176. #define RSA_ENGINE_TIMEOUT 100 /* ms */
  177. /* hardware mutex timeout, in ms */
  178. #define HM_TIMEOUT 10 /* ms */
  179. /* 8051 memory access timeout, in us */
  180. #define DC8051_ACCESS_TIMEOUT 100 /* us */
  181. /* the number of fabric SerDes on the SBus */
  182. #define NUM_FABRIC_SERDES 4
  183. /* ASIC_STS_SBUS_RESULT.RESULT_CODE value */
  184. #define SBUS_READ_COMPLETE 0x4
  185. /* SBus fabric SerDes addresses, one set per HFI */
  186. static const u8 fabric_serdes_addrs[2][NUM_FABRIC_SERDES] = {
  187. { 0x01, 0x02, 0x03, 0x04 },
  188. { 0x28, 0x29, 0x2a, 0x2b }
  189. };
  190. /* SBus PCIe SerDes addresses, one set per HFI */
  191. static const u8 pcie_serdes_addrs[2][NUM_PCIE_SERDES] = {
  192. { 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16,
  193. 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26 },
  194. { 0x2f, 0x31, 0x33, 0x35, 0x37, 0x39, 0x3b, 0x3d,
  195. 0x3f, 0x41, 0x43, 0x45, 0x47, 0x49, 0x4b, 0x4d }
  196. };
  197. /* SBus PCIe PCS addresses, one set per HFI */
  198. const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES] = {
  199. { 0x09, 0x0b, 0x0d, 0x0f, 0x11, 0x13, 0x15, 0x17,
  200. 0x19, 0x1b, 0x1d, 0x1f, 0x21, 0x23, 0x25, 0x27 },
  201. { 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
  202. 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e }
  203. };
  204. /* SBus fabric SerDes broadcast addresses, one per HFI */
  205. static const u8 fabric_serdes_broadcast[2] = { 0xe4, 0xe5 };
  206. static const u8 all_fabric_serdes_broadcast = 0xe1;
  207. /* SBus PCIe SerDes broadcast addresses, one per HFI */
  208. const u8 pcie_serdes_broadcast[2] = { 0xe2, 0xe3 };
  209. static const u8 all_pcie_serdes_broadcast = 0xe0;
  210. static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = {
  211. 0,
  212. SYSTEM_TABLE_MAX,
  213. PORT_TABLE_MAX,
  214. RX_PRESET_TABLE_MAX,
  215. TX_PRESET_TABLE_MAX,
  216. QSFP_ATTEN_TABLE_MAX,
  217. VARIABLE_SETTINGS_TABLE_MAX
  218. };
  219. /* forwards */
  220. static void dispose_one_firmware(struct firmware_details *fdet);
  221. static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
  222. struct firmware_details *fdet);
  223. static void dump_fw_version(struct hfi1_devdata *dd);
  224. /*
  225. * Read a single 64-bit value from 8051 data memory.
  226. *
  227. * Expects:
  228. * o caller to have already set up data read, no auto increment
  229. * o caller to turn off read enable when finished
  230. *
  231. * The address argument is a byte offset. Bits 0:2 in the address are
  232. * ignored - i.e. the hardware will always do aligned 8-byte reads as if
  233. * the lower bits are zero.
  234. *
  235. * Return 0 on success, -ENXIO on a read error (timeout).
  236. */
  237. static int __read_8051_data(struct hfi1_devdata *dd, u32 addr, u64 *result)
  238. {
  239. u64 reg;
  240. int count;
  241. /* step 1: set the address, clear enable */
  242. reg = (addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
  243. << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT;
  244. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
  245. /* step 2: enable */
  246. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL,
  247. reg | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK);
  248. /* wait until ACCESS_COMPLETED is set */
  249. count = 0;
  250. while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
  251. & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
  252. == 0) {
  253. count++;
  254. if (count > DC8051_ACCESS_TIMEOUT) {
  255. dd_dev_err(dd, "timeout reading 8051 data\n");
  256. return -ENXIO;
  257. }
  258. ndelay(10);
  259. }
  260. /* gather the data */
  261. *result = read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_RD_DATA);
  262. return 0;
  263. }
  264. /*
  265. * Read 8051 data starting at addr, for len bytes. Will read in 8-byte chunks.
  266. * Return 0 on success, -errno on error.
  267. */
  268. int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result)
  269. {
  270. unsigned long flags;
  271. u32 done;
  272. int ret = 0;
  273. spin_lock_irqsave(&dd->dc8051_memlock, flags);
  274. /* data read set-up, no auto-increment */
  275. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
  276. for (done = 0; done < len; addr += 8, done += 8, result++) {
  277. ret = __read_8051_data(dd, addr, result);
  278. if (ret)
  279. break;
  280. }
  281. /* turn off read enable */
  282. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
  283. spin_unlock_irqrestore(&dd->dc8051_memlock, flags);
  284. return ret;
  285. }
  286. /*
  287. * Write data or code to the 8051 code or data RAM.
  288. */
  289. static int write_8051(struct hfi1_devdata *dd, int code, u32 start,
  290. const u8 *data, u32 len)
  291. {
  292. u64 reg;
  293. u32 offset;
  294. int aligned, count;
  295. /* check alignment */
  296. aligned = ((unsigned long)data & 0x7) == 0;
  297. /* write set-up */
  298. reg = (code ? DC_DC8051_CFG_RAM_ACCESS_SETUP_RAM_SEL_SMASK : 0ull)
  299. | DC_DC8051_CFG_RAM_ACCESS_SETUP_AUTO_INCR_ADDR_SMASK;
  300. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, reg);
  301. reg = ((start & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK)
  302. << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT)
  303. | DC_DC8051_CFG_RAM_ACCESS_CTRL_WRITE_ENA_SMASK;
  304. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg);
  305. /* write */
  306. for (offset = 0; offset < len; offset += 8) {
  307. int bytes = len - offset;
  308. if (bytes < 8) {
  309. reg = 0;
  310. memcpy(&reg, &data[offset], bytes);
  311. } else if (aligned) {
  312. reg = *(u64 *)&data[offset];
  313. } else {
  314. memcpy(&reg, &data[offset], 8);
  315. }
  316. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_WR_DATA, reg);
  317. /* wait until ACCESS_COMPLETED is set */
  318. count = 0;
  319. while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS)
  320. & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK)
  321. == 0) {
  322. count++;
  323. if (count > DC8051_ACCESS_TIMEOUT) {
  324. dd_dev_err(dd, "timeout writing 8051 data\n");
  325. return -ENXIO;
  326. }
  327. udelay(1);
  328. }
  329. }
  330. /* turn off write access, auto increment (also sets to data access) */
  331. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0);
  332. write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0);
  333. return 0;
  334. }
  335. /* return 0 if values match, non-zero and complain otherwise */
  336. static int invalid_header(struct hfi1_devdata *dd, const char *what,
  337. u32 actual, u32 expected)
  338. {
  339. if (actual == expected)
  340. return 0;
  341. dd_dev_err(dd,
  342. "invalid firmware header field %s: expected 0x%x, actual 0x%x\n",
  343. what, expected, actual);
  344. return 1;
  345. }
  346. /*
  347. * Verify that the static fields in the CSS header match.
  348. */
  349. static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css)
  350. {
  351. /* verify CSS header fields (most sizes are in DW, so add /4) */
  352. if (invalid_header(dd, "module_type", css->module_type,
  353. CSS_MODULE_TYPE) ||
  354. invalid_header(dd, "header_len", css->header_len,
  355. (sizeof(struct firmware_file) / 4)) ||
  356. invalid_header(dd, "header_version", css->header_version,
  357. CSS_HEADER_VERSION) ||
  358. invalid_header(dd, "module_vendor", css->module_vendor,
  359. CSS_MODULE_VENDOR) ||
  360. invalid_header(dd, "key_size", css->key_size, KEY_SIZE / 4) ||
  361. invalid_header(dd, "modulus_size", css->modulus_size,
  362. KEY_SIZE / 4) ||
  363. invalid_header(dd, "exponent_size", css->exponent_size,
  364. EXPONENT_SIZE / 4)) {
  365. return -EINVAL;
  366. }
  367. return 0;
  368. }
  369. /*
  370. * Make sure there are at least some bytes after the prefix.
  371. */
  372. static int payload_check(struct hfi1_devdata *dd, const char *name,
  373. long file_size, long prefix_size)
  374. {
  375. /* make sure we have some payload */
  376. if (prefix_size >= file_size) {
  377. dd_dev_err(dd,
  378. "firmware \"%s\", size %ld, must be larger than %ld bytes\n",
  379. name, file_size, prefix_size);
  380. return -EINVAL;
  381. }
  382. return 0;
  383. }
  384. /*
  385. * Request the firmware from the system. Extract the pieces and fill in
  386. * fdet. If successful, the caller will need to call dispose_one_firmware().
  387. * Returns 0 on success, -ERRNO on error.
  388. */
  389. static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name,
  390. struct firmware_details *fdet)
  391. {
  392. struct css_header *css;
  393. int ret;
  394. memset(fdet, 0, sizeof(*fdet));
  395. ret = request_firmware(&fdet->fw, name, &dd->pcidev->dev);
  396. if (ret) {
  397. dd_dev_warn(dd, "cannot find firmware \"%s\", err %d\n",
  398. name, ret);
  399. return ret;
  400. }
  401. /* verify the firmware */
  402. if (fdet->fw->size < sizeof(struct css_header)) {
  403. dd_dev_err(dd, "firmware \"%s\" is too small\n", name);
  404. ret = -EINVAL;
  405. goto done;
  406. }
  407. css = (struct css_header *)fdet->fw->data;
  408. hfi1_cdbg(FIRMWARE, "Firmware %s details:", name);
  409. hfi1_cdbg(FIRMWARE, "file size: 0x%lx bytes", fdet->fw->size);
  410. hfi1_cdbg(FIRMWARE, "CSS structure:");
  411. hfi1_cdbg(FIRMWARE, " module_type 0x%x", css->module_type);
  412. hfi1_cdbg(FIRMWARE, " header_len 0x%03x (0x%03x bytes)",
  413. css->header_len, 4 * css->header_len);
  414. hfi1_cdbg(FIRMWARE, " header_version 0x%x", css->header_version);
  415. hfi1_cdbg(FIRMWARE, " module_id 0x%x", css->module_id);
  416. hfi1_cdbg(FIRMWARE, " module_vendor 0x%x", css->module_vendor);
  417. hfi1_cdbg(FIRMWARE, " date 0x%x", css->date);
  418. hfi1_cdbg(FIRMWARE, " size 0x%03x (0x%03x bytes)",
  419. css->size, 4 * css->size);
  420. hfi1_cdbg(FIRMWARE, " key_size 0x%03x (0x%03x bytes)",
  421. css->key_size, 4 * css->key_size);
  422. hfi1_cdbg(FIRMWARE, " modulus_size 0x%03x (0x%03x bytes)",
  423. css->modulus_size, 4 * css->modulus_size);
  424. hfi1_cdbg(FIRMWARE, " exponent_size 0x%03x (0x%03x bytes)",
  425. css->exponent_size, 4 * css->exponent_size);
  426. hfi1_cdbg(FIRMWARE, "firmware size: 0x%lx bytes",
  427. fdet->fw->size - sizeof(struct firmware_file));
  428. /*
  429. * If the file does not have a valid CSS header, fail.
  430. * Otherwise, check the CSS size field for an expected size.
  431. * The augmented file has r2 and mu inserted after the header
  432. * was generated, so there will be a known difference between
  433. * the CSS header size and the actual file size. Use this
  434. * difference to identify an augmented file.
  435. *
  436. * Note: css->size is in DWORDs, multiply by 4 to get bytes.
  437. */
  438. ret = verify_css_header(dd, css);
  439. if (ret) {
  440. dd_dev_info(dd, "Invalid CSS header for \"%s\"\n", name);
  441. } else if ((css->size * 4) == fdet->fw->size) {
  442. /* non-augmented firmware file */
  443. struct firmware_file *ff = (struct firmware_file *)
  444. fdet->fw->data;
  445. /* make sure there are bytes in the payload */
  446. ret = payload_check(dd, name, fdet->fw->size,
  447. sizeof(struct firmware_file));
  448. if (ret == 0) {
  449. fdet->css_header = css;
  450. fdet->modulus = ff->modulus;
  451. fdet->exponent = ff->exponent;
  452. fdet->signature = ff->signature;
  453. fdet->r2 = fdet->dummy_header.r2; /* use dummy space */
  454. fdet->mu = fdet->dummy_header.mu; /* use dummy space */
  455. fdet->firmware_ptr = ff->firmware;
  456. fdet->firmware_len = fdet->fw->size -
  457. sizeof(struct firmware_file);
  458. /*
  459. * Header does not include r2 and mu - generate here.
  460. * For now, fail.
  461. */
  462. dd_dev_err(dd, "driver is unable to validate firmware without r2 and mu (not in firmware file)\n");
  463. ret = -EINVAL;
  464. }
  465. } else if ((css->size * 4) + AUGMENT_SIZE == fdet->fw->size) {
  466. /* augmented firmware file */
  467. struct augmented_firmware_file *aff =
  468. (struct augmented_firmware_file *)fdet->fw->data;
  469. /* make sure there are bytes in the payload */
  470. ret = payload_check(dd, name, fdet->fw->size,
  471. sizeof(struct augmented_firmware_file));
  472. if (ret == 0) {
  473. fdet->css_header = css;
  474. fdet->modulus = aff->modulus;
  475. fdet->exponent = aff->exponent;
  476. fdet->signature = aff->signature;
  477. fdet->r2 = aff->r2;
  478. fdet->mu = aff->mu;
  479. fdet->firmware_ptr = aff->firmware;
  480. fdet->firmware_len = fdet->fw->size -
  481. sizeof(struct augmented_firmware_file);
  482. }
  483. } else {
  484. /* css->size check failed */
  485. dd_dev_err(dd,
  486. "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n",
  487. fdet->fw->size / 4,
  488. (fdet->fw->size - AUGMENT_SIZE) / 4,
  489. css->size);
  490. ret = -EINVAL;
  491. }
  492. done:
  493. /* if returning an error, clean up after ourselves */
  494. if (ret)
  495. dispose_one_firmware(fdet);
  496. return ret;
  497. }
  498. static void dispose_one_firmware(struct firmware_details *fdet)
  499. {
  500. release_firmware(fdet->fw);
  501. /* erase all previous information */
  502. memset(fdet, 0, sizeof(*fdet));
  503. }
  504. /*
  505. * Obtain the 4 firmwares from the OS. All must be obtained at once or not
  506. * at all. If called with the firmware state in FW_TRY, use alternate names.
  507. * On exit, this routine will have set the firmware state to one of FW_TRY,
  508. * FW_FINAL, or FW_ERR.
  509. *
  510. * Must be holding fw_mutex.
  511. */
  512. static void __obtain_firmware(struct hfi1_devdata *dd)
  513. {
  514. int err = 0;
  515. if (fw_state == FW_FINAL) /* nothing more to obtain */
  516. return;
  517. if (fw_state == FW_ERR) /* already in error */
  518. return;
  519. /* fw_state is FW_EMPTY or FW_TRY */
  520. retry:
  521. if (fw_state == FW_TRY) {
  522. /*
  523. * We tried the original and it failed. Move to the
  524. * alternate.
  525. */
  526. dd_dev_warn(dd, "using alternate firmware names\n");
  527. /*
  528. * Let others run. Some systems, when missing firmware, does
  529. * something that holds for 30 seconds. If we do that twice
  530. * in a row it triggers task blocked warning.
  531. */
  532. cond_resched();
  533. if (fw_8051_load)
  534. dispose_one_firmware(&fw_8051);
  535. if (fw_fabric_serdes_load)
  536. dispose_one_firmware(&fw_fabric);
  537. if (fw_sbus_load)
  538. dispose_one_firmware(&fw_sbus);
  539. if (fw_pcie_serdes_load)
  540. dispose_one_firmware(&fw_pcie);
  541. fw_8051_name = ALT_FW_8051_NAME_ASIC;
  542. fw_fabric_serdes_name = ALT_FW_FABRIC_NAME;
  543. fw_sbus_name = ALT_FW_SBUS_NAME;
  544. fw_pcie_serdes_name = ALT_FW_PCIE_NAME;
  545. /*
  546. * Add a delay before obtaining and loading debug firmware.
  547. * Authorization will fail if the delay between firmware
  548. * authorization events is shorter than 50us. Add 100us to
  549. * make a delay time safe.
  550. */
  551. usleep_range(100, 120);
  552. }
  553. if (fw_sbus_load) {
  554. err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus);
  555. if (err)
  556. goto done;
  557. }
  558. if (fw_pcie_serdes_load) {
  559. err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie);
  560. if (err)
  561. goto done;
  562. }
  563. if (fw_fabric_serdes_load) {
  564. err = obtain_one_firmware(dd, fw_fabric_serdes_name,
  565. &fw_fabric);
  566. if (err)
  567. goto done;
  568. }
  569. if (fw_8051_load) {
  570. err = obtain_one_firmware(dd, fw_8051_name, &fw_8051);
  571. if (err)
  572. goto done;
  573. }
  574. done:
  575. if (err) {
  576. /* oops, had problems obtaining a firmware */
  577. if (fw_state == FW_EMPTY && dd->icode == ICODE_RTL_SILICON) {
  578. /* retry with alternate (RTL only) */
  579. fw_state = FW_TRY;
  580. goto retry;
  581. }
  582. dd_dev_err(dd, "unable to obtain working firmware\n");
  583. fw_state = FW_ERR;
  584. fw_err = -ENOENT;
  585. } else {
  586. /* success */
  587. if (fw_state == FW_EMPTY &&
  588. dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
  589. fw_state = FW_TRY; /* may retry later */
  590. else
  591. fw_state = FW_FINAL; /* cannot try again */
  592. }
  593. }
  594. /*
  595. * Called by all HFIs when loading their firmware - i.e. device probe time.
  596. * The first one will do the actual firmware load. Use a mutex to resolve
  597. * any possible race condition.
  598. *
  599. * The call to this routine cannot be moved to driver load because the kernel
  600. * call request_firmware() requires a device which is only available after
  601. * the first device probe.
  602. */
  603. static int obtain_firmware(struct hfi1_devdata *dd)
  604. {
  605. unsigned long timeout;
  606. mutex_lock(&fw_mutex);
  607. /* 40s delay due to long delay on missing firmware on some systems */
  608. timeout = jiffies + msecs_to_jiffies(40000);
  609. while (fw_state == FW_TRY) {
  610. /*
  611. * Another device is trying the firmware. Wait until it
  612. * decides what works (or not).
  613. */
  614. if (time_after(jiffies, timeout)) {
  615. /* waited too long */
  616. dd_dev_err(dd, "Timeout waiting for firmware try");
  617. fw_state = FW_ERR;
  618. fw_err = -ETIMEDOUT;
  619. break;
  620. }
  621. mutex_unlock(&fw_mutex);
  622. msleep(20); /* arbitrary delay */
  623. mutex_lock(&fw_mutex);
  624. }
  625. /* not in FW_TRY state */
  626. /* set fw_state to FW_TRY, FW_FINAL, or FW_ERR, and fw_err */
  627. if (fw_state == FW_EMPTY)
  628. __obtain_firmware(dd);
  629. mutex_unlock(&fw_mutex);
  630. return fw_err;
  631. }
  632. /*
  633. * Called when the driver unloads. The timing is asymmetric with its
  634. * counterpart, obtain_firmware(). If called at device remove time,
  635. * then it is conceivable that another device could probe while the
  636. * firmware is being disposed. The mutexes can be moved to do that
  637. * safely, but then the firmware would be requested from the OS multiple
  638. * times.
  639. *
  640. * No mutex is needed as the driver is unloading and there cannot be any
  641. * other callers.
  642. */
  643. void dispose_firmware(void)
  644. {
  645. dispose_one_firmware(&fw_8051);
  646. dispose_one_firmware(&fw_fabric);
  647. dispose_one_firmware(&fw_pcie);
  648. dispose_one_firmware(&fw_sbus);
  649. /* retain the error state, otherwise revert to empty */
  650. if (fw_state != FW_ERR)
  651. fw_state = FW_EMPTY;
  652. }
  653. /*
  654. * Called with the result of a firmware download.
  655. *
  656. * Return 1 to retry loading the firmware, 0 to stop.
  657. */
  658. static int retry_firmware(struct hfi1_devdata *dd, int load_result)
  659. {
  660. int retry;
  661. mutex_lock(&fw_mutex);
  662. if (load_result == 0) {
  663. /*
  664. * The load succeeded, so expect all others to do the same.
  665. * Do not retry again.
  666. */
  667. if (fw_state == FW_TRY)
  668. fw_state = FW_FINAL;
  669. retry = 0; /* do NOT retry */
  670. } else if (fw_state == FW_TRY) {
  671. /* load failed, obtain alternate firmware */
  672. __obtain_firmware(dd);
  673. retry = (fw_state == FW_FINAL);
  674. } else {
  675. /* else in FW_FINAL or FW_ERR, no retry in either case */
  676. retry = 0;
  677. }
  678. mutex_unlock(&fw_mutex);
  679. return retry;
  680. }
  681. /*
  682. * Write a block of data to a given array CSR. All calls will be in
  683. * multiples of 8 bytes.
  684. */
  685. static void write_rsa_data(struct hfi1_devdata *dd, int what,
  686. const u8 *data, int nbytes)
  687. {
  688. int qw_size = nbytes / 8;
  689. int i;
  690. if (((unsigned long)data & 0x7) == 0) {
  691. /* aligned */
  692. u64 *ptr = (u64 *)data;
  693. for (i = 0; i < qw_size; i++, ptr++)
  694. write_csr(dd, what + (8 * i), *ptr);
  695. } else {
  696. /* not aligned */
  697. for (i = 0; i < qw_size; i++, data += 8) {
  698. u64 value;
  699. memcpy(&value, data, 8);
  700. write_csr(dd, what + (8 * i), value);
  701. }
  702. }
  703. }
  704. /*
  705. * Write a block of data to a given CSR as a stream of writes. All calls will
  706. * be in multiples of 8 bytes.
  707. */
  708. static void write_streamed_rsa_data(struct hfi1_devdata *dd, int what,
  709. const u8 *data, int nbytes)
  710. {
  711. u64 *ptr = (u64 *)data;
  712. int qw_size = nbytes / 8;
  713. for (; qw_size > 0; qw_size--, ptr++)
  714. write_csr(dd, what, *ptr);
  715. }
  716. /*
  717. * Download the signature and start the RSA mechanism. Wait for
  718. * RSA_ENGINE_TIMEOUT before giving up.
  719. */
  720. static int run_rsa(struct hfi1_devdata *dd, const char *who,
  721. const u8 *signature)
  722. {
  723. unsigned long timeout;
  724. u64 reg;
  725. u32 status;
  726. int ret = 0;
  727. /* write the signature */
  728. write_rsa_data(dd, MISC_CFG_RSA_SIGNATURE, signature, KEY_SIZE);
  729. /* initialize RSA */
  730. write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_INIT);
  731. /*
  732. * Make sure the engine is idle and insert a delay between the two
  733. * writes to MISC_CFG_RSA_CMD.
  734. */
  735. status = (read_csr(dd, MISC_CFG_FW_CTRL)
  736. & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
  737. >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
  738. if (status != RSA_STATUS_IDLE) {
  739. dd_dev_err(dd, "%s security engine not idle - giving up\n",
  740. who);
  741. return -EBUSY;
  742. }
  743. /* start RSA */
  744. write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_START);
  745. /*
  746. * Look for the result.
  747. *
  748. * The RSA engine is hooked up to two MISC errors. The driver
  749. * masks these errors as they do not respond to the standard
  750. * error "clear down" mechanism. Look for these errors here and
  751. * clear them when possible. This routine will exit with the
  752. * errors of the current run still set.
  753. *
  754. * MISC_FW_AUTH_FAILED_ERR
  755. * Firmware authorization failed. This can be cleared by
  756. * re-initializing the RSA engine, then clearing the status bit.
  757. * Do not re-init the RSA angine immediately after a successful
  758. * run - this will reset the current authorization.
  759. *
  760. * MISC_KEY_MISMATCH_ERR
  761. * Key does not match. The only way to clear this is to load
  762. * a matching key then clear the status bit. If this error
  763. * is raised, it will persist outside of this routine until a
  764. * matching key is loaded.
  765. */
  766. timeout = msecs_to_jiffies(RSA_ENGINE_TIMEOUT) + jiffies;
  767. while (1) {
  768. status = (read_csr(dd, MISC_CFG_FW_CTRL)
  769. & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK)
  770. >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT;
  771. if (status == RSA_STATUS_IDLE) {
  772. /* should not happen */
  773. dd_dev_err(dd, "%s firmware security bad idle state\n",
  774. who);
  775. ret = -EINVAL;
  776. break;
  777. } else if (status == RSA_STATUS_DONE) {
  778. /* finished successfully */
  779. break;
  780. } else if (status == RSA_STATUS_FAILED) {
  781. /* finished unsuccessfully */
  782. ret = -EINVAL;
  783. break;
  784. }
  785. /* else still active */
  786. if (time_after(jiffies, timeout)) {
  787. /*
  788. * Timed out while active. We can't reset the engine
  789. * if it is stuck active, but run through the
  790. * error code to see what error bits are set.
  791. */
  792. dd_dev_err(dd, "%s firmware security time out\n", who);
  793. ret = -ETIMEDOUT;
  794. break;
  795. }
  796. msleep(20);
  797. }
  798. /*
  799. * Arrive here on success or failure. Clear all RSA engine
  800. * errors. All current errors will stick - the RSA logic is keeping
  801. * error high. All previous errors will clear - the RSA logic
  802. * is not keeping the error high.
  803. */
  804. write_csr(dd, MISC_ERR_CLEAR,
  805. MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK |
  806. MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK);
  807. /*
  808. * All that is left are the current errors. Print warnings on
  809. * authorization failure details, if any. Firmware authorization
  810. * can be retried, so these are only warnings.
  811. */
  812. reg = read_csr(dd, MISC_ERR_STATUS);
  813. if (ret) {
  814. if (reg & MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK)
  815. dd_dev_warn(dd, "%s firmware authorization failed\n",
  816. who);
  817. if (reg & MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK)
  818. dd_dev_warn(dd, "%s firmware key mismatch\n", who);
  819. }
  820. return ret;
  821. }
  822. static void load_security_variables(struct hfi1_devdata *dd,
  823. struct firmware_details *fdet)
  824. {
  825. /* Security variables a. Write the modulus */
  826. write_rsa_data(dd, MISC_CFG_RSA_MODULUS, fdet->modulus, KEY_SIZE);
  827. /* Security variables b. Write the r2 */
  828. write_rsa_data(dd, MISC_CFG_RSA_R2, fdet->r2, KEY_SIZE);
  829. /* Security variables c. Write the mu */
  830. write_rsa_data(dd, MISC_CFG_RSA_MU, fdet->mu, MU_SIZE);
  831. /* Security variables d. Write the header */
  832. write_streamed_rsa_data(dd, MISC_CFG_SHA_PRELOAD,
  833. (u8 *)fdet->css_header,
  834. sizeof(struct css_header));
  835. }
  836. /* return the 8051 firmware state */
  837. static inline u32 get_firmware_state(struct hfi1_devdata *dd)
  838. {
  839. u64 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
  840. return (reg >> DC_DC8051_STS_CUR_STATE_FIRMWARE_SHIFT)
  841. & DC_DC8051_STS_CUR_STATE_FIRMWARE_MASK;
  842. }
  843. /*
  844. * Wait until the firmware is up and ready to take host requests.
  845. * Return 0 on success, -ETIMEDOUT on timeout.
  846. */
  847. int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
  848. {
  849. unsigned long timeout;
  850. /* in the simulator, the fake 8051 is always ready */
  851. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
  852. return 0;
  853. timeout = msecs_to_jiffies(mstimeout) + jiffies;
  854. while (1) {
  855. if (get_firmware_state(dd) == 0xa0) /* ready */
  856. return 0;
  857. if (time_after(jiffies, timeout)) /* timed out */
  858. return -ETIMEDOUT;
  859. usleep_range(1950, 2050); /* sleep 2ms-ish */
  860. }
  861. }
  862. /*
  863. * Load the 8051 firmware.
  864. */
  865. static int load_8051_firmware(struct hfi1_devdata *dd,
  866. struct firmware_details *fdet)
  867. {
  868. u64 reg;
  869. int ret;
  870. u8 ver_major;
  871. u8 ver_minor;
  872. u8 ver_patch;
  873. /*
  874. * DC Reset sequence
  875. * Load DC 8051 firmware
  876. */
  877. /*
  878. * DC reset step 1: Reset DC8051
  879. */
  880. reg = DC_DC8051_CFG_RST_M8051W_SMASK
  881. | DC_DC8051_CFG_RST_CRAM_SMASK
  882. | DC_DC8051_CFG_RST_DRAM_SMASK
  883. | DC_DC8051_CFG_RST_IRAM_SMASK
  884. | DC_DC8051_CFG_RST_SFR_SMASK;
  885. write_csr(dd, DC_DC8051_CFG_RST, reg);
  886. /*
  887. * DC reset step 2 (optional): Load 8051 data memory with link
  888. * configuration
  889. */
  890. /*
  891. * DC reset step 3: Load DC8051 firmware
  892. */
  893. /* release all but the core reset */
  894. reg = DC_DC8051_CFG_RST_M8051W_SMASK;
  895. write_csr(dd, DC_DC8051_CFG_RST, reg);
  896. /* Firmware load step 1 */
  897. load_security_variables(dd, fdet);
  898. /*
  899. * Firmware load step 2. Clear MISC_CFG_FW_CTRL.FW_8051_LOADED
  900. */
  901. write_csr(dd, MISC_CFG_FW_CTRL, 0);
  902. /* Firmware load steps 3-5 */
  903. ret = write_8051(dd, 1/*code*/, 0, fdet->firmware_ptr,
  904. fdet->firmware_len);
  905. if (ret)
  906. return ret;
  907. /*
  908. * DC reset step 4. Host starts the DC8051 firmware
  909. */
  910. /*
  911. * Firmware load step 6. Set MISC_CFG_FW_CTRL.FW_8051_LOADED
  912. */
  913. write_csr(dd, MISC_CFG_FW_CTRL, MISC_CFG_FW_CTRL_FW_8051_LOADED_SMASK);
  914. /* Firmware load steps 7-10 */
  915. ret = run_rsa(dd, "8051", fdet->signature);
  916. if (ret)
  917. return ret;
  918. /* clear all reset bits, releasing the 8051 */
  919. write_csr(dd, DC_DC8051_CFG_RST, 0ull);
  920. /*
  921. * DC reset step 5. Wait for firmware to be ready to accept host
  922. * requests.
  923. */
  924. ret = wait_fm_ready(dd, TIMEOUT_8051_START);
  925. if (ret) { /* timed out */
  926. dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
  927. get_firmware_state(dd));
  928. return -ETIMEDOUT;
  929. }
  930. read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
  931. dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
  932. (int)ver_major, (int)ver_minor, (int)ver_patch);
  933. dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
  934. ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
  935. if (ret != HCMD_SUCCESS) {
  936. dd_dev_err(dd,
  937. "Failed to set host interface version, return 0x%x\n",
  938. ret);
  939. return -EIO;
  940. }
  941. return 0;
  942. }
  943. /*
  944. * Write the SBus request register
  945. *
  946. * No need for masking - the arguments are sized exactly.
  947. */
  948. void sbus_request(struct hfi1_devdata *dd,
  949. u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
  950. {
  951. write_csr(dd, ASIC_CFG_SBUS_REQUEST,
  952. ((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT) |
  953. ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT) |
  954. ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT) |
  955. ((u64)receiver_addr <<
  956. ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT));
  957. }
  958. /*
  959. * Read a value from the SBus.
  960. *
  961. * Requires the caller to be in fast mode
  962. */
  963. static u32 sbus_read(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr,
  964. u32 data_in)
  965. {
  966. u64 reg;
  967. int retries;
  968. int success = 0;
  969. u32 result = 0;
  970. u32 result_code = 0;
  971. sbus_request(dd, receiver_addr, data_addr, READ_SBUS_RECEIVER, data_in);
  972. for (retries = 0; retries < 100; retries++) {
  973. usleep_range(1000, 1200); /* arbitrary */
  974. reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
  975. result_code = (reg >> ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT)
  976. & ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK;
  977. if (result_code != SBUS_READ_COMPLETE)
  978. continue;
  979. success = 1;
  980. result = (reg >> ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT)
  981. & ASIC_STS_SBUS_RESULT_DATA_OUT_MASK;
  982. break;
  983. }
  984. if (!success) {
  985. dd_dev_err(dd, "%s: read failed, result code 0x%x\n", __func__,
  986. result_code);
  987. }
  988. return result;
  989. }
  990. /*
  991. * Turn off the SBus and fabric serdes spicos.
  992. *
  993. * + Must be called with Sbus fast mode turned on.
  994. * + Must be called after fabric serdes broadcast is set up.
  995. * + Must be called before the 8051 is loaded - assumes 8051 is not loaded
  996. * when using MISC_CFG_FW_CTRL.
  997. */
  998. static void turn_off_spicos(struct hfi1_devdata *dd, int flags)
  999. {
  1000. /* only needed on A0 */
  1001. if (!is_ax(dd))
  1002. return;
  1003. dd_dev_info(dd, "Turning off spicos:%s%s\n",
  1004. flags & SPICO_SBUS ? " SBus" : "",
  1005. flags & SPICO_FABRIC ? " fabric" : "");
  1006. write_csr(dd, MISC_CFG_FW_CTRL, ENABLE_SPICO_SMASK);
  1007. /* disable SBus spico */
  1008. if (flags & SPICO_SBUS)
  1009. sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01,
  1010. WRITE_SBUS_RECEIVER, 0x00000040);
  1011. /* disable the fabric serdes spicos */
  1012. if (flags & SPICO_FABRIC)
  1013. sbus_request(dd, fabric_serdes_broadcast[dd->hfi1_id],
  1014. 0x07, WRITE_SBUS_RECEIVER, 0x00000000);
  1015. write_csr(dd, MISC_CFG_FW_CTRL, 0);
  1016. }
  1017. /*
  1018. * Reset all of the fabric serdes for this HFI in preparation to take the
  1019. * link to Polling.
  1020. *
  1021. * To do a reset, we need to write to to the serdes registers. Unfortunately,
  1022. * the fabric serdes download to the other HFI on the ASIC will have turned
  1023. * off the firmware validation on this HFI. This means we can't write to the
  1024. * registers to reset the serdes. Work around this by performing a complete
  1025. * re-download and validation of the fabric serdes firmware. This, as a
  1026. * by-product, will reset the serdes. NOTE: the re-download requires that
  1027. * the 8051 be in the Offline state. I.e. not actively trying to use the
  1028. * serdes. This routine is called at the point where the link is Offline and
  1029. * is getting ready to go to Polling.
  1030. */
  1031. void fabric_serdes_reset(struct hfi1_devdata *dd)
  1032. {
  1033. int ret;
  1034. if (!fw_fabric_serdes_load)
  1035. return;
  1036. ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
  1037. if (ret) {
  1038. dd_dev_err(dd,
  1039. "Cannot acquire SBus resource to reset fabric SerDes - perhaps you should reboot\n");
  1040. return;
  1041. }
  1042. set_sbus_fast_mode(dd);
  1043. if (is_ax(dd)) {
  1044. /* A0 serdes do not work with a re-download */
  1045. u8 ra = fabric_serdes_broadcast[dd->hfi1_id];
  1046. /* place SerDes in reset and disable SPICO */
  1047. sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
  1048. /* wait 100 refclk cycles @ 156.25MHz => 640ns */
  1049. udelay(1);
  1050. /* remove SerDes reset */
  1051. sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
  1052. /* turn SPICO enable on */
  1053. sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
  1054. } else {
  1055. turn_off_spicos(dd, SPICO_FABRIC);
  1056. /*
  1057. * No need for firmware retry - what to download has already
  1058. * been decided.
  1059. * No need to pay attention to the load return - the only
  1060. * failure is a validation failure, which has already been
  1061. * checked by the initial download.
  1062. */
  1063. (void)load_fabric_serdes_firmware(dd, &fw_fabric);
  1064. }
  1065. clear_sbus_fast_mode(dd);
  1066. release_chip_resource(dd, CR_SBUS);
  1067. }
  1068. /* Access to the SBus in this routine should probably be serialized */
  1069. int sbus_request_slow(struct hfi1_devdata *dd,
  1070. u8 receiver_addr, u8 data_addr, u8 command, u32 data_in)
  1071. {
  1072. u64 reg, count = 0;
  1073. /* make sure fast mode is clear */
  1074. clear_sbus_fast_mode(dd);
  1075. sbus_request(dd, receiver_addr, data_addr, command, data_in);
  1076. write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
  1077. ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK);
  1078. /* Wait for both DONE and RCV_DATA_VALID to go high */
  1079. reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
  1080. while (!((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
  1081. (reg & ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK))) {
  1082. if (count++ >= SBUS_MAX_POLL_COUNT) {
  1083. u64 counts = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
  1084. /*
  1085. * If the loop has timed out, we are OK if DONE bit
  1086. * is set and RCV_DATA_VALID and EXECUTE counters
  1087. * are the same. If not, we cannot proceed.
  1088. */
  1089. if ((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) &&
  1090. (SBUS_COUNTER(counts, RCV_DATA_VALID) ==
  1091. SBUS_COUNTER(counts, EXECUTE)))
  1092. break;
  1093. return -ETIMEDOUT;
  1094. }
  1095. udelay(1);
  1096. reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
  1097. }
  1098. count = 0;
  1099. write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
  1100. /* Wait for DONE to clear after EXECUTE is cleared */
  1101. reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
  1102. while (reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) {
  1103. if (count++ >= SBUS_MAX_POLL_COUNT)
  1104. return -ETIME;
  1105. udelay(1);
  1106. reg = read_csr(dd, ASIC_STS_SBUS_RESULT);
  1107. }
  1108. return 0;
  1109. }
  1110. static int load_fabric_serdes_firmware(struct hfi1_devdata *dd,
  1111. struct firmware_details *fdet)
  1112. {
  1113. int i, err;
  1114. const u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; /* receiver addr */
  1115. dd_dev_info(dd, "Downloading fabric firmware\n");
  1116. /* step 1: load security variables */
  1117. load_security_variables(dd, fdet);
  1118. /* step 2: place SerDes in reset and disable SPICO */
  1119. sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011);
  1120. /* wait 100 refclk cycles @ 156.25MHz => 640ns */
  1121. udelay(1);
  1122. /* step 3: remove SerDes reset */
  1123. sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010);
  1124. /* step 4: assert IMEM override */
  1125. sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x40000000);
  1126. /* step 5: download SerDes machine code */
  1127. for (i = 0; i < fdet->firmware_len; i += 4) {
  1128. sbus_request(dd, ra, 0x0a, WRITE_SBUS_RECEIVER,
  1129. *(u32 *)&fdet->firmware_ptr[i]);
  1130. }
  1131. /* step 6: IMEM override off */
  1132. sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x00000000);
  1133. /* step 7: turn ECC on */
  1134. sbus_request(dd, ra, 0x0b, WRITE_SBUS_RECEIVER, 0x000c0000);
  1135. /* steps 8-11: run the RSA engine */
  1136. err = run_rsa(dd, "fabric serdes", fdet->signature);
  1137. if (err)
  1138. return err;
  1139. /* step 12: turn SPICO enable on */
  1140. sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002);
  1141. /* step 13: enable core hardware interrupts */
  1142. sbus_request(dd, ra, 0x08, WRITE_SBUS_RECEIVER, 0x00000000);
  1143. return 0;
  1144. }
  1145. static int load_sbus_firmware(struct hfi1_devdata *dd,
  1146. struct firmware_details *fdet)
  1147. {
  1148. int i, err;
  1149. const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
  1150. dd_dev_info(dd, "Downloading SBus firmware\n");
  1151. /* step 1: load security variables */
  1152. load_security_variables(dd, fdet);
  1153. /* step 2: place SPICO into reset and enable off */
  1154. sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x000000c0);
  1155. /* step 3: remove reset, enable off, IMEM_CNTRL_EN on */
  1156. sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000240);
  1157. /* step 4: set starting IMEM address for burst download */
  1158. sbus_request(dd, ra, 0x03, WRITE_SBUS_RECEIVER, 0x80000000);
  1159. /* step 5: download the SBus Master machine code */
  1160. for (i = 0; i < fdet->firmware_len; i += 4) {
  1161. sbus_request(dd, ra, 0x14, WRITE_SBUS_RECEIVER,
  1162. *(u32 *)&fdet->firmware_ptr[i]);
  1163. }
  1164. /* step 6: set IMEM_CNTL_EN off */
  1165. sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000040);
  1166. /* step 7: turn ECC on */
  1167. sbus_request(dd, ra, 0x16, WRITE_SBUS_RECEIVER, 0x000c0000);
  1168. /* steps 8-11: run the RSA engine */
  1169. err = run_rsa(dd, "SBus", fdet->signature);
  1170. if (err)
  1171. return err;
  1172. /* step 12: set SPICO_ENABLE on */
  1173. sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
  1174. return 0;
  1175. }
  1176. static int load_pcie_serdes_firmware(struct hfi1_devdata *dd,
  1177. struct firmware_details *fdet)
  1178. {
  1179. int i;
  1180. const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */
  1181. dd_dev_info(dd, "Downloading PCIe firmware\n");
  1182. /* step 1: load security variables */
  1183. load_security_variables(dd, fdet);
  1184. /* step 2: assert single step (halts the SBus Master spico) */
  1185. sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000001);
  1186. /* step 3: enable XDMEM access */
  1187. sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000d40);
  1188. /* step 4: load firmware into SBus Master XDMEM */
  1189. /*
  1190. * NOTE: the dmem address, write_en, and wdata are all pre-packed,
  1191. * we only need to pick up the bytes and write them
  1192. */
  1193. for (i = 0; i < fdet->firmware_len; i += 4) {
  1194. sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER,
  1195. *(u32 *)&fdet->firmware_ptr[i]);
  1196. }
  1197. /* step 5: disable XDMEM access */
  1198. sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140);
  1199. /* step 6: allow SBus Spico to run */
  1200. sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000000);
  1201. /*
  1202. * steps 7-11: run RSA, if it succeeds, firmware is available to
  1203. * be swapped
  1204. */
  1205. return run_rsa(dd, "PCIe serdes", fdet->signature);
  1206. }
  1207. /*
  1208. * Set the given broadcast values on the given list of devices.
  1209. */
  1210. static void set_serdes_broadcast(struct hfi1_devdata *dd, u8 bg1, u8 bg2,
  1211. const u8 *addrs, int count)
  1212. {
  1213. while (--count >= 0) {
  1214. /*
  1215. * Set BROADCAST_GROUP_1 and BROADCAST_GROUP_2, leave
  1216. * defaults for everything else. Do not read-modify-write,
  1217. * per instruction from the manufacturer.
  1218. *
  1219. * Register 0xfd:
  1220. * bits what
  1221. * ----- ---------------------------------
  1222. * 0 IGNORE_BROADCAST (default 0)
  1223. * 11:4 BROADCAST_GROUP_1 (default 0xff)
  1224. * 23:16 BROADCAST_GROUP_2 (default 0xff)
  1225. */
  1226. sbus_request(dd, addrs[count], 0xfd, WRITE_SBUS_RECEIVER,
  1227. (u32)bg1 << 4 | (u32)bg2 << 16);
  1228. }
  1229. }
  1230. int acquire_hw_mutex(struct hfi1_devdata *dd)
  1231. {
  1232. unsigned long timeout;
  1233. int try = 0;
  1234. u8 mask = 1 << dd->hfi1_id;
  1235. u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
  1236. if (user == mask) {
  1237. dd_dev_info(dd,
  1238. "Hardware mutex already acquired, mutex mask %u\n",
  1239. (u32)mask);
  1240. return 0;
  1241. }
  1242. retry:
  1243. timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies;
  1244. while (1) {
  1245. write_csr(dd, ASIC_CFG_MUTEX, mask);
  1246. user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
  1247. if (user == mask)
  1248. return 0; /* success */
  1249. if (time_after(jiffies, timeout))
  1250. break; /* timed out */
  1251. msleep(20);
  1252. }
  1253. /* timed out */
  1254. dd_dev_err(dd,
  1255. "Unable to acquire hardware mutex, mutex mask %u, my mask %u (%s)\n",
  1256. (u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up");
  1257. if (try == 0) {
  1258. /* break mutex and retry */
  1259. write_csr(dd, ASIC_CFG_MUTEX, 0);
  1260. try++;
  1261. goto retry;
  1262. }
  1263. return -EBUSY;
  1264. }
  1265. void release_hw_mutex(struct hfi1_devdata *dd)
  1266. {
  1267. u8 mask = 1 << dd->hfi1_id;
  1268. u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
  1269. if (user != mask)
  1270. dd_dev_warn(dd,
  1271. "Unable to release hardware mutex, mutex mask %u, my mask %u\n",
  1272. (u32)user, (u32)mask);
  1273. else
  1274. write_csr(dd, ASIC_CFG_MUTEX, 0);
  1275. }
  1276. /* return the given resource bit(s) as a mask for the given HFI */
  1277. static inline u64 resource_mask(u32 hfi1_id, u32 resource)
  1278. {
  1279. return ((u64)resource) << (hfi1_id ? CR_DYN_SHIFT : 0);
  1280. }
  1281. static void fail_mutex_acquire_message(struct hfi1_devdata *dd,
  1282. const char *func)
  1283. {
  1284. dd_dev_err(dd,
  1285. "%s: hardware mutex stuck - suggest rebooting the machine\n",
  1286. func);
  1287. }
  1288. /*
  1289. * Acquire access to a chip resource.
  1290. *
  1291. * Return 0 on success, -EBUSY if resource busy, -EIO if mutex acquire failed.
  1292. */
  1293. static int __acquire_chip_resource(struct hfi1_devdata *dd, u32 resource)
  1294. {
  1295. u64 scratch0, all_bits, my_bit;
  1296. int ret;
  1297. if (resource & CR_DYN_MASK) {
  1298. /* a dynamic resource is in use if either HFI has set the bit */
  1299. if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0 &&
  1300. (resource & (CR_I2C1 | CR_I2C2))) {
  1301. /* discrete devices must serialize across both chains */
  1302. all_bits = resource_mask(0, CR_I2C1 | CR_I2C2) |
  1303. resource_mask(1, CR_I2C1 | CR_I2C2);
  1304. } else {
  1305. all_bits = resource_mask(0, resource) |
  1306. resource_mask(1, resource);
  1307. }
  1308. my_bit = resource_mask(dd->hfi1_id, resource);
  1309. } else {
  1310. /* non-dynamic resources are not split between HFIs */
  1311. all_bits = resource;
  1312. my_bit = resource;
  1313. }
  1314. /* lock against other callers within the driver wanting a resource */
  1315. mutex_lock(&dd->asic_data->asic_resource_mutex);
  1316. ret = acquire_hw_mutex(dd);
  1317. if (ret) {
  1318. fail_mutex_acquire_message(dd, __func__);
  1319. ret = -EIO;
  1320. goto done;
  1321. }
  1322. scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
  1323. if (scratch0 & all_bits) {
  1324. ret = -EBUSY;
  1325. } else {
  1326. write_csr(dd, ASIC_CFG_SCRATCH, scratch0 | my_bit);
  1327. /* force write to be visible to other HFI on another OS */
  1328. (void)read_csr(dd, ASIC_CFG_SCRATCH);
  1329. }
  1330. release_hw_mutex(dd);
  1331. done:
  1332. mutex_unlock(&dd->asic_data->asic_resource_mutex);
  1333. return ret;
  1334. }
  1335. /*
  1336. * Acquire access to a chip resource, wait up to mswait milliseconds for
  1337. * the resource to become available.
  1338. *
  1339. * Return 0 on success, -EBUSY if busy (even after wait), -EIO if mutex
  1340. * acquire failed.
  1341. */
  1342. int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait)
  1343. {
  1344. unsigned long timeout;
  1345. int ret;
  1346. timeout = jiffies + msecs_to_jiffies(mswait);
  1347. while (1) {
  1348. ret = __acquire_chip_resource(dd, resource);
  1349. if (ret != -EBUSY)
  1350. return ret;
  1351. /* resource is busy, check our timeout */
  1352. if (time_after_eq(jiffies, timeout))
  1353. return -EBUSY;
  1354. usleep_range(80, 120); /* arbitrary delay */
  1355. }
  1356. }
  1357. /*
  1358. * Release access to a chip resource
  1359. */
  1360. void release_chip_resource(struct hfi1_devdata *dd, u32 resource)
  1361. {
  1362. u64 scratch0, bit;
  1363. /* only dynamic resources should ever be cleared */
  1364. if (!(resource & CR_DYN_MASK)) {
  1365. dd_dev_err(dd, "%s: invalid resource 0x%x\n", __func__,
  1366. resource);
  1367. return;
  1368. }
  1369. bit = resource_mask(dd->hfi1_id, resource);
  1370. /* lock against other callers within the driver wanting a resource */
  1371. mutex_lock(&dd->asic_data->asic_resource_mutex);
  1372. if (acquire_hw_mutex(dd)) {
  1373. fail_mutex_acquire_message(dd, __func__);
  1374. goto done;
  1375. }
  1376. scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
  1377. if ((scratch0 & bit) != 0) {
  1378. scratch0 &= ~bit;
  1379. write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
  1380. /* force write to be visible to other HFI on another OS */
  1381. (void)read_csr(dd, ASIC_CFG_SCRATCH);
  1382. } else {
  1383. dd_dev_warn(dd, "%s: id %d, resource 0x%x: bit not set\n",
  1384. __func__, dd->hfi1_id, resource);
  1385. }
  1386. release_hw_mutex(dd);
  1387. done:
  1388. mutex_unlock(&dd->asic_data->asic_resource_mutex);
  1389. }
  1390. /*
  1391. * Return true if resource is set, false otherwise. Print a warning
  1392. * if not set and a function is supplied.
  1393. */
  1394. bool check_chip_resource(struct hfi1_devdata *dd, u32 resource,
  1395. const char *func)
  1396. {
  1397. u64 scratch0, bit;
  1398. if (resource & CR_DYN_MASK)
  1399. bit = resource_mask(dd->hfi1_id, resource);
  1400. else
  1401. bit = resource;
  1402. scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
  1403. if ((scratch0 & bit) == 0) {
  1404. if (func)
  1405. dd_dev_warn(dd,
  1406. "%s: id %d, resource 0x%x, not acquired!\n",
  1407. func, dd->hfi1_id, resource);
  1408. return false;
  1409. }
  1410. return true;
  1411. }
  1412. static void clear_chip_resources(struct hfi1_devdata *dd, const char *func)
  1413. {
  1414. u64 scratch0;
  1415. /* lock against other callers within the driver wanting a resource */
  1416. mutex_lock(&dd->asic_data->asic_resource_mutex);
  1417. if (acquire_hw_mutex(dd)) {
  1418. fail_mutex_acquire_message(dd, func);
  1419. goto done;
  1420. }
  1421. /* clear all dynamic access bits for this HFI */
  1422. scratch0 = read_csr(dd, ASIC_CFG_SCRATCH);
  1423. scratch0 &= ~resource_mask(dd->hfi1_id, CR_DYN_MASK);
  1424. write_csr(dd, ASIC_CFG_SCRATCH, scratch0);
  1425. /* force write to be visible to other HFI on another OS */
  1426. (void)read_csr(dd, ASIC_CFG_SCRATCH);
  1427. release_hw_mutex(dd);
  1428. done:
  1429. mutex_unlock(&dd->asic_data->asic_resource_mutex);
  1430. }
  1431. void init_chip_resources(struct hfi1_devdata *dd)
  1432. {
  1433. /* clear any holds left by us */
  1434. clear_chip_resources(dd, __func__);
  1435. }
  1436. void finish_chip_resources(struct hfi1_devdata *dd)
  1437. {
  1438. /* clear any holds left by us */
  1439. clear_chip_resources(dd, __func__);
  1440. }
  1441. void set_sbus_fast_mode(struct hfi1_devdata *dd)
  1442. {
  1443. write_csr(dd, ASIC_CFG_SBUS_EXECUTE,
  1444. ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK);
  1445. }
  1446. void clear_sbus_fast_mode(struct hfi1_devdata *dd)
  1447. {
  1448. u64 reg, count = 0;
  1449. reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
  1450. while (SBUS_COUNTER(reg, EXECUTE) !=
  1451. SBUS_COUNTER(reg, RCV_DATA_VALID)) {
  1452. if (count++ >= SBUS_MAX_POLL_COUNT)
  1453. break;
  1454. udelay(1);
  1455. reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS);
  1456. }
  1457. write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0);
  1458. }
  1459. int load_firmware(struct hfi1_devdata *dd)
  1460. {
  1461. int ret;
  1462. if (fw_fabric_serdes_load) {
  1463. ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
  1464. if (ret)
  1465. return ret;
  1466. set_sbus_fast_mode(dd);
  1467. set_serdes_broadcast(dd, all_fabric_serdes_broadcast,
  1468. fabric_serdes_broadcast[dd->hfi1_id],
  1469. fabric_serdes_addrs[dd->hfi1_id],
  1470. NUM_FABRIC_SERDES);
  1471. turn_off_spicos(dd, SPICO_FABRIC);
  1472. do {
  1473. ret = load_fabric_serdes_firmware(dd, &fw_fabric);
  1474. } while (retry_firmware(dd, ret));
  1475. clear_sbus_fast_mode(dd);
  1476. release_chip_resource(dd, CR_SBUS);
  1477. if (ret)
  1478. return ret;
  1479. }
  1480. if (fw_8051_load) {
  1481. do {
  1482. ret = load_8051_firmware(dd, &fw_8051);
  1483. } while (retry_firmware(dd, ret));
  1484. if (ret)
  1485. return ret;
  1486. }
  1487. dump_fw_version(dd);
  1488. return 0;
  1489. }
  1490. int hfi1_firmware_init(struct hfi1_devdata *dd)
  1491. {
  1492. /* only RTL can use these */
  1493. if (dd->icode != ICODE_RTL_SILICON) {
  1494. fw_fabric_serdes_load = 0;
  1495. fw_pcie_serdes_load = 0;
  1496. fw_sbus_load = 0;
  1497. }
  1498. /* no 8051 or QSFP on simulator */
  1499. if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
  1500. fw_8051_load = 0;
  1501. if (!fw_8051_name) {
  1502. if (dd->icode == ICODE_RTL_SILICON)
  1503. fw_8051_name = DEFAULT_FW_8051_NAME_ASIC;
  1504. else
  1505. fw_8051_name = DEFAULT_FW_8051_NAME_FPGA;
  1506. }
  1507. if (!fw_fabric_serdes_name)
  1508. fw_fabric_serdes_name = DEFAULT_FW_FABRIC_NAME;
  1509. if (!fw_sbus_name)
  1510. fw_sbus_name = DEFAULT_FW_SBUS_NAME;
  1511. if (!fw_pcie_serdes_name)
  1512. fw_pcie_serdes_name = DEFAULT_FW_PCIE_NAME;
  1513. return obtain_firmware(dd);
  1514. }
  1515. /*
  1516. * This function is a helper function for parse_platform_config(...) and
  1517. * does not check for validity of the platform configuration cache
  1518. * (because we know it is invalid as we are building up the cache).
  1519. * As such, this should not be called from anywhere other than
  1520. * parse_platform_config
  1521. */
  1522. static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
  1523. {
  1524. u32 meta_ver, meta_ver_meta, ver_start, ver_len, mask;
  1525. struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
  1526. if (!system_table)
  1527. return -EINVAL;
  1528. meta_ver_meta =
  1529. *(pcfgcache->config_tables[PLATFORM_CONFIG_SYSTEM_TABLE].table_metadata
  1530. + SYSTEM_TABLE_META_VERSION);
  1531. mask = ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
  1532. ver_start = meta_ver_meta & mask;
  1533. meta_ver_meta >>= METADATA_TABLE_FIELD_LEN_SHIFT;
  1534. mask = ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
  1535. ver_len = meta_ver_meta & mask;
  1536. ver_start /= 8;
  1537. meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1);
  1538. if (meta_ver < 4) {
  1539. dd_dev_info(
  1540. dd, "%s:Please update platform config\n", __func__);
  1541. return -EINVAL;
  1542. }
  1543. return 0;
  1544. }
  1545. int parse_platform_config(struct hfi1_devdata *dd)
  1546. {
  1547. struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
  1548. struct hfi1_pportdata *ppd = dd->pport;
  1549. u32 *ptr = NULL;
  1550. u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0, file_length = 0;
  1551. u32 record_idx = 0, table_type = 0, table_length_dwords = 0;
  1552. int ret = -EINVAL; /* assume failure */
  1553. /*
  1554. * For integrated devices that did not fall back to the default file,
  1555. * the SI tuning information for active channels is acquired from the
  1556. * scratch register bitmap, thus there is no platform config to parse.
  1557. * Skip parsing in these situations.
  1558. */
  1559. if (ppd->config_from_scratch)
  1560. return 0;
  1561. if (!dd->platform_config.data) {
  1562. dd_dev_err(dd, "%s: Missing config file\n", __func__);
  1563. goto bail;
  1564. }
  1565. ptr = (u32 *)dd->platform_config.data;
  1566. magic_num = *ptr;
  1567. ptr++;
  1568. if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) {
  1569. dd_dev_err(dd, "%s: Bad config file\n", __func__);
  1570. goto bail;
  1571. }
  1572. /* Field is file size in DWORDs */
  1573. file_length = (*ptr) * 4;
  1574. /*
  1575. * Length can't be larger than partition size. Assume platform
  1576. * config format version 4 is being used. Interpret the file size
  1577. * field as header instead by not moving the pointer.
  1578. */
  1579. if (file_length > MAX_PLATFORM_CONFIG_FILE_SIZE) {
  1580. dd_dev_info(dd,
  1581. "%s:File length out of bounds, using alternative format\n",
  1582. __func__);
  1583. file_length = PLATFORM_CONFIG_FORMAT_4_FILE_SIZE;
  1584. } else {
  1585. ptr++;
  1586. }
  1587. if (file_length > dd->platform_config.size) {
  1588. dd_dev_info(dd, "%s:File claims to be larger than read size\n",
  1589. __func__);
  1590. goto bail;
  1591. } else if (file_length < dd->platform_config.size) {
  1592. dd_dev_info(dd,
  1593. "%s:File claims to be smaller than read size, continuing\n",
  1594. __func__);
  1595. }
  1596. /* exactly equal, perfection */
  1597. /*
  1598. * In both cases where we proceed, using the self-reported file length
  1599. * is the safer option. In case of old format a predefined value is
  1600. * being used.
  1601. */
  1602. while (ptr < (u32 *)(dd->platform_config.data + file_length)) {
  1603. header1 = *ptr;
  1604. header2 = *(ptr + 1);
  1605. if (header1 != ~header2) {
  1606. dd_dev_err(dd, "%s: Failed validation at offset %ld\n",
  1607. __func__, (ptr - (u32 *)
  1608. dd->platform_config.data));
  1609. goto bail;
  1610. }
  1611. record_idx = *ptr &
  1612. ((1 << PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS) - 1);
  1613. table_length_dwords = (*ptr >>
  1614. PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT) &
  1615. ((1 << PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS) - 1);
  1616. table_type = (*ptr >> PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT) &
  1617. ((1 << PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS) - 1);
  1618. /* Done with this set of headers */
  1619. ptr += 2;
  1620. if (record_idx) {
  1621. /* data table */
  1622. switch (table_type) {
  1623. case PLATFORM_CONFIG_SYSTEM_TABLE:
  1624. pcfgcache->config_tables[table_type].num_table =
  1625. 1;
  1626. ret = check_meta_version(dd, ptr);
  1627. if (ret)
  1628. goto bail;
  1629. break;
  1630. case PLATFORM_CONFIG_PORT_TABLE:
  1631. pcfgcache->config_tables[table_type].num_table =
  1632. 2;
  1633. break;
  1634. case PLATFORM_CONFIG_RX_PRESET_TABLE:
  1635. /* fall through */
  1636. case PLATFORM_CONFIG_TX_PRESET_TABLE:
  1637. /* fall through */
  1638. case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
  1639. /* fall through */
  1640. case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
  1641. pcfgcache->config_tables[table_type].num_table =
  1642. table_length_dwords;
  1643. break;
  1644. default:
  1645. dd_dev_err(dd,
  1646. "%s: Unknown data table %d, offset %ld\n",
  1647. __func__, table_type,
  1648. (ptr - (u32 *)
  1649. dd->platform_config.data));
  1650. goto bail; /* We don't trust this file now */
  1651. }
  1652. pcfgcache->config_tables[table_type].table = ptr;
  1653. } else {
  1654. /* metadata table */
  1655. switch (table_type) {
  1656. case PLATFORM_CONFIG_SYSTEM_TABLE:
  1657. /* fall through */
  1658. case PLATFORM_CONFIG_PORT_TABLE:
  1659. /* fall through */
  1660. case PLATFORM_CONFIG_RX_PRESET_TABLE:
  1661. /* fall through */
  1662. case PLATFORM_CONFIG_TX_PRESET_TABLE:
  1663. /* fall through */
  1664. case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
  1665. /* fall through */
  1666. case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
  1667. break;
  1668. default:
  1669. dd_dev_err(dd,
  1670. "%s: Unknown meta table %d, offset %ld\n",
  1671. __func__, table_type,
  1672. (ptr -
  1673. (u32 *)dd->platform_config.data));
  1674. goto bail; /* We don't trust this file now */
  1675. }
  1676. pcfgcache->config_tables[table_type].table_metadata =
  1677. ptr;
  1678. }
  1679. /* Calculate and check table crc */
  1680. crc = crc32_le(~(u32)0, (unsigned char const *)ptr,
  1681. (table_length_dwords * 4));
  1682. crc ^= ~(u32)0;
  1683. /* Jump the table */
  1684. ptr += table_length_dwords;
  1685. if (crc != *ptr) {
  1686. dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
  1687. __func__, (ptr -
  1688. (u32 *)dd->platform_config.data));
  1689. goto bail;
  1690. }
  1691. /* Jump the CRC DWORD */
  1692. ptr++;
  1693. }
  1694. pcfgcache->cache_valid = 1;
  1695. return 0;
  1696. bail:
  1697. memset(pcfgcache, 0, sizeof(struct platform_config_cache));
  1698. return ret;
  1699. }
  1700. static void get_integrated_platform_config_field(
  1701. struct hfi1_devdata *dd,
  1702. enum platform_config_table_type_encoding table_type,
  1703. int field_index, u32 *data)
  1704. {
  1705. struct hfi1_pportdata *ppd = dd->pport;
  1706. u8 *cache = ppd->qsfp_info.cache;
  1707. u32 tx_preset = 0;
  1708. switch (table_type) {
  1709. case PLATFORM_CONFIG_SYSTEM_TABLE:
  1710. if (field_index == SYSTEM_TABLE_QSFP_POWER_CLASS_MAX)
  1711. *data = ppd->max_power_class;
  1712. else if (field_index == SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G)
  1713. *data = ppd->default_atten;
  1714. break;
  1715. case PLATFORM_CONFIG_PORT_TABLE:
  1716. if (field_index == PORT_TABLE_PORT_TYPE)
  1717. *data = ppd->port_type;
  1718. else if (field_index == PORT_TABLE_LOCAL_ATTEN_25G)
  1719. *data = ppd->local_atten;
  1720. else if (field_index == PORT_TABLE_REMOTE_ATTEN_25G)
  1721. *data = ppd->remote_atten;
  1722. break;
  1723. case PLATFORM_CONFIG_RX_PRESET_TABLE:
  1724. if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR_APPLY)
  1725. *data = (ppd->rx_preset & QSFP_RX_CDR_APPLY_SMASK) >>
  1726. QSFP_RX_CDR_APPLY_SHIFT;
  1727. else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP_APPLY)
  1728. *data = (ppd->rx_preset & QSFP_RX_EMP_APPLY_SMASK) >>
  1729. QSFP_RX_EMP_APPLY_SHIFT;
  1730. else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP_APPLY)
  1731. *data = (ppd->rx_preset & QSFP_RX_AMP_APPLY_SMASK) >>
  1732. QSFP_RX_AMP_APPLY_SHIFT;
  1733. else if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR)
  1734. *data = (ppd->rx_preset & QSFP_RX_CDR_SMASK) >>
  1735. QSFP_RX_CDR_SHIFT;
  1736. else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP)
  1737. *data = (ppd->rx_preset & QSFP_RX_EMP_SMASK) >>
  1738. QSFP_RX_EMP_SHIFT;
  1739. else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP)
  1740. *data = (ppd->rx_preset & QSFP_RX_AMP_SMASK) >>
  1741. QSFP_RX_AMP_SHIFT;
  1742. break;
  1743. case PLATFORM_CONFIG_TX_PRESET_TABLE:
  1744. if (cache[QSFP_EQ_INFO_OFFS] & 0x4)
  1745. tx_preset = ppd->tx_preset_eq;
  1746. else
  1747. tx_preset = ppd->tx_preset_noeq;
  1748. if (field_index == TX_PRESET_TABLE_PRECUR)
  1749. *data = (tx_preset & TX_PRECUR_SMASK) >>
  1750. TX_PRECUR_SHIFT;
  1751. else if (field_index == TX_PRESET_TABLE_ATTN)
  1752. *data = (tx_preset & TX_ATTN_SMASK) >>
  1753. TX_ATTN_SHIFT;
  1754. else if (field_index == TX_PRESET_TABLE_POSTCUR)
  1755. *data = (tx_preset & TX_POSTCUR_SMASK) >>
  1756. TX_POSTCUR_SHIFT;
  1757. else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR_APPLY)
  1758. *data = (tx_preset & QSFP_TX_CDR_APPLY_SMASK) >>
  1759. QSFP_TX_CDR_APPLY_SHIFT;
  1760. else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ_APPLY)
  1761. *data = (tx_preset & QSFP_TX_EQ_APPLY_SMASK) >>
  1762. QSFP_TX_EQ_APPLY_SHIFT;
  1763. else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR)
  1764. *data = (tx_preset & QSFP_TX_CDR_SMASK) >>
  1765. QSFP_TX_CDR_SHIFT;
  1766. else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ)
  1767. *data = (tx_preset & QSFP_TX_EQ_SMASK) >>
  1768. QSFP_TX_EQ_SHIFT;
  1769. break;
  1770. case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
  1771. case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
  1772. default:
  1773. break;
  1774. }
  1775. }
  1776. static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table,
  1777. int field, u32 *field_len_bits,
  1778. u32 *field_start_bits)
  1779. {
  1780. struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
  1781. u32 *src_ptr = NULL;
  1782. if (!pcfgcache->cache_valid)
  1783. return -EINVAL;
  1784. switch (table) {
  1785. case PLATFORM_CONFIG_SYSTEM_TABLE:
  1786. /* fall through */
  1787. case PLATFORM_CONFIG_PORT_TABLE:
  1788. /* fall through */
  1789. case PLATFORM_CONFIG_RX_PRESET_TABLE:
  1790. /* fall through */
  1791. case PLATFORM_CONFIG_TX_PRESET_TABLE:
  1792. /* fall through */
  1793. case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
  1794. /* fall through */
  1795. case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
  1796. if (field && field < platform_config_table_limits[table])
  1797. src_ptr =
  1798. pcfgcache->config_tables[table].table_metadata + field;
  1799. break;
  1800. default:
  1801. dd_dev_info(dd, "%s: Unknown table\n", __func__);
  1802. break;
  1803. }
  1804. if (!src_ptr)
  1805. return -EINVAL;
  1806. if (field_start_bits)
  1807. *field_start_bits = *src_ptr &
  1808. ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1);
  1809. if (field_len_bits)
  1810. *field_len_bits = (*src_ptr >> METADATA_TABLE_FIELD_LEN_SHIFT)
  1811. & ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1);
  1812. return 0;
  1813. }
  1814. /* This is the central interface to getting data out of the platform config
  1815. * file. It depends on parse_platform_config() having populated the
  1816. * platform_config_cache in hfi1_devdata, and checks the cache_valid member to
  1817. * validate the sanity of the cache.
  1818. *
  1819. * The non-obvious parameters:
  1820. * @table_index: Acts as a look up key into which instance of the tables the
  1821. * relevant field is fetched from.
  1822. *
  1823. * This applies to the data tables that have multiple instances. The port table
  1824. * is an exception to this rule as each HFI only has one port and thus the
  1825. * relevant table can be distinguished by hfi_id.
  1826. *
  1827. * @data: pointer to memory that will be populated with the field requested.
  1828. * @len: length of memory pointed by @data in bytes.
  1829. */
  1830. int get_platform_config_field(struct hfi1_devdata *dd,
  1831. enum platform_config_table_type_encoding
  1832. table_type, int table_index, int field_index,
  1833. u32 *data, u32 len)
  1834. {
  1835. int ret = 0, wlen = 0, seek = 0;
  1836. u32 field_len_bits = 0, field_start_bits = 0, *src_ptr = NULL;
  1837. struct platform_config_cache *pcfgcache = &dd->pcfg_cache;
  1838. struct hfi1_pportdata *ppd = dd->pport;
  1839. if (data)
  1840. memset(data, 0, len);
  1841. else
  1842. return -EINVAL;
  1843. if (ppd->config_from_scratch) {
  1844. /*
  1845. * Use saved configuration from ppd for integrated platforms
  1846. */
  1847. get_integrated_platform_config_field(dd, table_type,
  1848. field_index, data);
  1849. return 0;
  1850. }
  1851. ret = get_platform_fw_field_metadata(dd, table_type, field_index,
  1852. &field_len_bits,
  1853. &field_start_bits);
  1854. if (ret)
  1855. return -EINVAL;
  1856. /* Convert length to bits */
  1857. len *= 8;
  1858. /* Our metadata function checked cache_valid and field_index for us */
  1859. switch (table_type) {
  1860. case PLATFORM_CONFIG_SYSTEM_TABLE:
  1861. src_ptr = pcfgcache->config_tables[table_type].table;
  1862. if (field_index != SYSTEM_TABLE_QSFP_POWER_CLASS_MAX) {
  1863. if (len < field_len_bits)
  1864. return -EINVAL;
  1865. seek = field_start_bits / 8;
  1866. wlen = field_len_bits / 8;
  1867. src_ptr = (u32 *)((u8 *)src_ptr + seek);
  1868. /*
  1869. * We expect the field to be byte aligned and whole byte
  1870. * lengths if we are here
  1871. */
  1872. memcpy(data, src_ptr, wlen);
  1873. return 0;
  1874. }
  1875. break;
  1876. case PLATFORM_CONFIG_PORT_TABLE:
  1877. /* Port table is 4 DWORDS */
  1878. src_ptr = dd->hfi1_id ?
  1879. pcfgcache->config_tables[table_type].table + 4 :
  1880. pcfgcache->config_tables[table_type].table;
  1881. break;
  1882. case PLATFORM_CONFIG_RX_PRESET_TABLE:
  1883. /* fall through */
  1884. case PLATFORM_CONFIG_TX_PRESET_TABLE:
  1885. /* fall through */
  1886. case PLATFORM_CONFIG_QSFP_ATTEN_TABLE:
  1887. /* fall through */
  1888. case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE:
  1889. src_ptr = pcfgcache->config_tables[table_type].table;
  1890. if (table_index <
  1891. pcfgcache->config_tables[table_type].num_table)
  1892. src_ptr += table_index;
  1893. else
  1894. src_ptr = NULL;
  1895. break;
  1896. default:
  1897. dd_dev_info(dd, "%s: Unknown table\n", __func__);
  1898. break;
  1899. }
  1900. if (!src_ptr || len < field_len_bits)
  1901. return -EINVAL;
  1902. src_ptr += (field_start_bits / 32);
  1903. *data = (*src_ptr >> (field_start_bits % 32)) &
  1904. ((1 << field_len_bits) - 1);
  1905. return 0;
  1906. }
  1907. /*
  1908. * Download the firmware needed for the Gen3 PCIe SerDes. An update
  1909. * to the SBus firmware is needed before updating the PCIe firmware.
  1910. *
  1911. * Note: caller must be holding the SBus resource.
  1912. */
  1913. int load_pcie_firmware(struct hfi1_devdata *dd)
  1914. {
  1915. int ret = 0;
  1916. /* both firmware loads below use the SBus */
  1917. set_sbus_fast_mode(dd);
  1918. if (fw_sbus_load) {
  1919. turn_off_spicos(dd, SPICO_SBUS);
  1920. do {
  1921. ret = load_sbus_firmware(dd, &fw_sbus);
  1922. } while (retry_firmware(dd, ret));
  1923. if (ret)
  1924. goto done;
  1925. }
  1926. if (fw_pcie_serdes_load) {
  1927. dd_dev_info(dd, "Setting PCIe SerDes broadcast\n");
  1928. set_serdes_broadcast(dd, all_pcie_serdes_broadcast,
  1929. pcie_serdes_broadcast[dd->hfi1_id],
  1930. pcie_serdes_addrs[dd->hfi1_id],
  1931. NUM_PCIE_SERDES);
  1932. do {
  1933. ret = load_pcie_serdes_firmware(dd, &fw_pcie);
  1934. } while (retry_firmware(dd, ret));
  1935. if (ret)
  1936. goto done;
  1937. }
  1938. done:
  1939. clear_sbus_fast_mode(dd);
  1940. return ret;
  1941. }
  1942. /*
  1943. * Read the GUID from the hardware, store it in dd.
  1944. */
  1945. void read_guid(struct hfi1_devdata *dd)
  1946. {
  1947. /* Take the DC out of reset to get a valid GUID value */
  1948. write_csr(dd, CCE_DC_CTRL, 0);
  1949. (void)read_csr(dd, CCE_DC_CTRL);
  1950. dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID);
  1951. dd_dev_info(dd, "GUID %llx",
  1952. (unsigned long long)dd->base_guid);
  1953. }
  1954. /* read and display firmware version info */
  1955. static void dump_fw_version(struct hfi1_devdata *dd)
  1956. {
  1957. u32 pcie_vers[NUM_PCIE_SERDES];
  1958. u32 fabric_vers[NUM_FABRIC_SERDES];
  1959. u32 sbus_vers;
  1960. int i;
  1961. int all_same;
  1962. int ret;
  1963. u8 rcv_addr;
  1964. ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
  1965. if (ret) {
  1966. dd_dev_err(dd, "Unable to acquire SBus to read firmware versions\n");
  1967. return;
  1968. }
  1969. /* set fast mode */
  1970. set_sbus_fast_mode(dd);
  1971. /* read version for SBus Master */
  1972. sbus_request(dd, SBUS_MASTER_BROADCAST, 0x02, WRITE_SBUS_RECEIVER, 0);
  1973. sbus_request(dd, SBUS_MASTER_BROADCAST, 0x07, WRITE_SBUS_RECEIVER, 0x1);
  1974. /* wait for interrupt to be processed */
  1975. usleep_range(10000, 11000);
  1976. sbus_vers = sbus_read(dd, SBUS_MASTER_BROADCAST, 0x08, 0x1);
  1977. dd_dev_info(dd, "SBus Master firmware version 0x%08x\n", sbus_vers);
  1978. /* read version for PCIe SerDes */
  1979. all_same = 1;
  1980. pcie_vers[0] = 0;
  1981. for (i = 0; i < NUM_PCIE_SERDES; i++) {
  1982. rcv_addr = pcie_serdes_addrs[dd->hfi1_id][i];
  1983. sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
  1984. /* wait for interrupt to be processed */
  1985. usleep_range(10000, 11000);
  1986. pcie_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
  1987. if (i > 0 && pcie_vers[0] != pcie_vers[i])
  1988. all_same = 0;
  1989. }
  1990. if (all_same) {
  1991. dd_dev_info(dd, "PCIe SerDes firmware version 0x%x\n",
  1992. pcie_vers[0]);
  1993. } else {
  1994. dd_dev_warn(dd, "PCIe SerDes do not have the same firmware version\n");
  1995. for (i = 0; i < NUM_PCIE_SERDES; i++) {
  1996. dd_dev_info(dd,
  1997. "PCIe SerDes lane %d firmware version 0x%x\n",
  1998. i, pcie_vers[i]);
  1999. }
  2000. }
  2001. /* read version for fabric SerDes */
  2002. all_same = 1;
  2003. fabric_vers[0] = 0;
  2004. for (i = 0; i < NUM_FABRIC_SERDES; i++) {
  2005. rcv_addr = fabric_serdes_addrs[dd->hfi1_id][i];
  2006. sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0);
  2007. /* wait for interrupt to be processed */
  2008. usleep_range(10000, 11000);
  2009. fabric_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0);
  2010. if (i > 0 && fabric_vers[0] != fabric_vers[i])
  2011. all_same = 0;
  2012. }
  2013. if (all_same) {
  2014. dd_dev_info(dd, "Fabric SerDes firmware version 0x%x\n",
  2015. fabric_vers[0]);
  2016. } else {
  2017. dd_dev_warn(dd, "Fabric SerDes do not have the same firmware version\n");
  2018. for (i = 0; i < NUM_FABRIC_SERDES; i++) {
  2019. dd_dev_info(dd,
  2020. "Fabric SerDes lane %d firmware version 0x%x\n",
  2021. i, fabric_vers[i]);
  2022. }
  2023. }
  2024. clear_sbus_fast_mode(dd);
  2025. release_chip_resource(dd, CR_SBUS);
  2026. }