e1000_i210.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2007 - 2018 Intel Corporation. */
  3. /* e1000_i210
  4. * e1000_i211
  5. */
  6. #include <linux/types.h>
  7. #include <linux/if_ether.h>
  8. #include "e1000_hw.h"
  9. #include "e1000_i210.h"
  10. static s32 igb_update_flash_i210(struct e1000_hw *hw);
  11. /**
  12. * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
  13. * @hw: pointer to the HW structure
  14. *
  15. * Acquire the HW semaphore to access the PHY or NVM
  16. */
  17. static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
  18. {
  19. u32 swsm;
  20. s32 timeout = hw->nvm.word_size + 1;
  21. s32 i = 0;
  22. /* Get the SW semaphore */
  23. while (i < timeout) {
  24. swsm = rd32(E1000_SWSM);
  25. if (!(swsm & E1000_SWSM_SMBI))
  26. break;
  27. udelay(50);
  28. i++;
  29. }
  30. if (i == timeout) {
  31. /* In rare circumstances, the SW semaphore may already be held
  32. * unintentionally. Clear the semaphore once before giving up.
  33. */
  34. if (hw->dev_spec._82575.clear_semaphore_once) {
  35. hw->dev_spec._82575.clear_semaphore_once = false;
  36. igb_put_hw_semaphore(hw);
  37. for (i = 0; i < timeout; i++) {
  38. swsm = rd32(E1000_SWSM);
  39. if (!(swsm & E1000_SWSM_SMBI))
  40. break;
  41. udelay(50);
  42. }
  43. }
  44. /* If we do not have the semaphore here, we have to give up. */
  45. if (i == timeout) {
  46. hw_dbg("Driver can't access device - SMBI bit is set.\n");
  47. return -E1000_ERR_NVM;
  48. }
  49. }
  50. /* Get the FW semaphore. */
  51. for (i = 0; i < timeout; i++) {
  52. swsm = rd32(E1000_SWSM);
  53. wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
  54. /* Semaphore acquired if bit latched */
  55. if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
  56. break;
  57. udelay(50);
  58. }
  59. if (i == timeout) {
  60. /* Release semaphores */
  61. igb_put_hw_semaphore(hw);
  62. hw_dbg("Driver can't access the NVM\n");
  63. return -E1000_ERR_NVM;
  64. }
  65. return 0;
  66. }
  67. /**
  68. * igb_acquire_nvm_i210 - Request for access to EEPROM
  69. * @hw: pointer to the HW structure
  70. *
  71. * Acquire the necessary semaphores for exclusive access to the EEPROM.
  72. * Set the EEPROM access request bit and wait for EEPROM access grant bit.
  73. * Return successful if access grant bit set, else clear the request for
  74. * EEPROM access and return -E1000_ERR_NVM (-1).
  75. **/
  76. static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
  77. {
  78. return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
  79. }
  80. /**
  81. * igb_release_nvm_i210 - Release exclusive access to EEPROM
  82. * @hw: pointer to the HW structure
  83. *
  84. * Stop any current commands to the EEPROM and clear the EEPROM request bit,
  85. * then release the semaphores acquired.
  86. **/
  87. static void igb_release_nvm_i210(struct e1000_hw *hw)
  88. {
  89. igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
  90. }
  91. /**
  92. * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
  93. * @hw: pointer to the HW structure
  94. * @mask: specifies which semaphore to acquire
  95. *
  96. * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
  97. * will also specify which port we're acquiring the lock for.
  98. **/
  99. s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
  100. {
  101. u32 swfw_sync;
  102. u32 swmask = mask;
  103. u32 fwmask = mask << 16;
  104. s32 ret_val = 0;
  105. s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
  106. while (i < timeout) {
  107. if (igb_get_hw_semaphore_i210(hw)) {
  108. ret_val = -E1000_ERR_SWFW_SYNC;
  109. goto out;
  110. }
  111. swfw_sync = rd32(E1000_SW_FW_SYNC);
  112. if (!(swfw_sync & (fwmask | swmask)))
  113. break;
  114. /* Firmware currently using resource (fwmask) */
  115. igb_put_hw_semaphore(hw);
  116. mdelay(5);
  117. i++;
  118. }
  119. if (i == timeout) {
  120. hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
  121. ret_val = -E1000_ERR_SWFW_SYNC;
  122. goto out;
  123. }
  124. swfw_sync |= swmask;
  125. wr32(E1000_SW_FW_SYNC, swfw_sync);
  126. igb_put_hw_semaphore(hw);
  127. out:
  128. return ret_val;
  129. }
  130. /**
  131. * igb_release_swfw_sync_i210 - Release SW/FW semaphore
  132. * @hw: pointer to the HW structure
  133. * @mask: specifies which semaphore to acquire
  134. *
  135. * Release the SW/FW semaphore used to access the PHY or NVM. The mask
  136. * will also specify which port we're releasing the lock for.
  137. **/
  138. void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
  139. {
  140. u32 swfw_sync;
  141. while (igb_get_hw_semaphore_i210(hw))
  142. ; /* Empty */
  143. swfw_sync = rd32(E1000_SW_FW_SYNC);
  144. swfw_sync &= ~mask;
  145. wr32(E1000_SW_FW_SYNC, swfw_sync);
  146. igb_put_hw_semaphore(hw);
  147. }
  148. /**
  149. * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
  150. * @hw: pointer to the HW structure
  151. * @offset: offset of word in the Shadow Ram to read
  152. * @words: number of words to read
  153. * @data: word read from the Shadow Ram
  154. *
  155. * Reads a 16 bit word from the Shadow Ram using the EERD register.
  156. * Uses necessary synchronization semaphores.
  157. **/
  158. static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
  159. u16 *data)
  160. {
  161. s32 status = 0;
  162. u16 i, count;
  163. /* We cannot hold synchronization semaphores for too long,
  164. * because of forceful takeover procedure. However it is more efficient
  165. * to read in bursts than synchronizing access for each word.
  166. */
  167. for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
  168. count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
  169. E1000_EERD_EEWR_MAX_COUNT : (words - i);
  170. if (!(hw->nvm.ops.acquire(hw))) {
  171. status = igb_read_nvm_eerd(hw, offset, count,
  172. data + i);
  173. hw->nvm.ops.release(hw);
  174. } else {
  175. status = E1000_ERR_SWFW_SYNC;
  176. }
  177. if (status)
  178. break;
  179. }
  180. return status;
  181. }
  182. /**
  183. * igb_write_nvm_srwr - Write to Shadow Ram using EEWR
  184. * @hw: pointer to the HW structure
  185. * @offset: offset within the Shadow Ram to be written to
  186. * @words: number of words to write
  187. * @data: 16 bit word(s) to be written to the Shadow Ram
  188. *
  189. * Writes data to Shadow Ram at offset using EEWR register.
  190. *
  191. * If igb_update_nvm_checksum is not called after this function , the
  192. * Shadow Ram will most likely contain an invalid checksum.
  193. **/
  194. static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
  195. u16 *data)
  196. {
  197. struct e1000_nvm_info *nvm = &hw->nvm;
  198. u32 i, k, eewr = 0;
  199. u32 attempts = 100000;
  200. s32 ret_val = 0;
  201. /* A check for invalid values: offset too large, too many words,
  202. * too many words for the offset, and not enough words.
  203. */
  204. if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
  205. (words == 0)) {
  206. hw_dbg("nvm parameter(s) out of bounds\n");
  207. ret_val = -E1000_ERR_NVM;
  208. goto out;
  209. }
  210. for (i = 0; i < words; i++) {
  211. eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
  212. (data[i] << E1000_NVM_RW_REG_DATA) |
  213. E1000_NVM_RW_REG_START;
  214. wr32(E1000_SRWR, eewr);
  215. for (k = 0; k < attempts; k++) {
  216. if (E1000_NVM_RW_REG_DONE &
  217. rd32(E1000_SRWR)) {
  218. ret_val = 0;
  219. break;
  220. }
  221. udelay(5);
  222. }
  223. if (ret_val) {
  224. hw_dbg("Shadow RAM write EEWR timed out\n");
  225. break;
  226. }
  227. }
  228. out:
  229. return ret_val;
  230. }
  231. /**
  232. * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
  233. * @hw: pointer to the HW structure
  234. * @offset: offset within the Shadow RAM to be written to
  235. * @words: number of words to write
  236. * @data: 16 bit word(s) to be written to the Shadow RAM
  237. *
  238. * Writes data to Shadow RAM at offset using EEWR register.
  239. *
  240. * If e1000_update_nvm_checksum is not called after this function , the
  241. * data will not be committed to FLASH and also Shadow RAM will most likely
  242. * contain an invalid checksum.
  243. *
  244. * If error code is returned, data and Shadow RAM may be inconsistent - buffer
  245. * partially written.
  246. **/
  247. static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
  248. u16 *data)
  249. {
  250. s32 status = 0;
  251. u16 i, count;
  252. /* We cannot hold synchronization semaphores for too long,
  253. * because of forceful takeover procedure. However it is more efficient
  254. * to write in bursts than synchronizing access for each word.
  255. */
  256. for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
  257. count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
  258. E1000_EERD_EEWR_MAX_COUNT : (words - i);
  259. if (!(hw->nvm.ops.acquire(hw))) {
  260. status = igb_write_nvm_srwr(hw, offset, count,
  261. data + i);
  262. hw->nvm.ops.release(hw);
  263. } else {
  264. status = E1000_ERR_SWFW_SYNC;
  265. }
  266. if (status)
  267. break;
  268. }
  269. return status;
  270. }
  271. /**
  272. * igb_read_invm_word_i210 - Reads OTP
  273. * @hw: pointer to the HW structure
  274. * @address: the word address (aka eeprom offset) to read
  275. * @data: pointer to the data read
  276. *
  277. * Reads 16-bit words from the OTP. Return error when the word is not
  278. * stored in OTP.
  279. **/
  280. static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
  281. {
  282. s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
  283. u32 invm_dword;
  284. u16 i;
  285. u8 record_type, word_address;
  286. for (i = 0; i < E1000_INVM_SIZE; i++) {
  287. invm_dword = rd32(E1000_INVM_DATA_REG(i));
  288. /* Get record type */
  289. record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
  290. if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
  291. break;
  292. if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
  293. i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
  294. if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
  295. i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
  296. if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
  297. word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
  298. if (word_address == address) {
  299. *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
  300. hw_dbg("Read INVM Word 0x%02x = %x\n",
  301. address, *data);
  302. status = 0;
  303. break;
  304. }
  305. }
  306. }
  307. if (status)
  308. hw_dbg("Requested word 0x%02x not found in OTP\n", address);
  309. return status;
  310. }
  311. /**
  312. * igb_read_invm_i210 - Read invm wrapper function for I210/I211
  313. * @hw: pointer to the HW structure
  314. * @words: number of words to read
  315. * @data: pointer to the data read
  316. *
  317. * Wrapper function to return data formerly found in the NVM.
  318. **/
  319. static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
  320. u16 words __always_unused, u16 *data)
  321. {
  322. s32 ret_val = 0;
  323. /* Only the MAC addr is required to be present in the iNVM */
  324. switch (offset) {
  325. case NVM_MAC_ADDR:
  326. ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
  327. ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
  328. &data[1]);
  329. ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
  330. &data[2]);
  331. if (ret_val)
  332. hw_dbg("MAC Addr not found in iNVM\n");
  333. break;
  334. case NVM_INIT_CTRL_2:
  335. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  336. if (ret_val) {
  337. *data = NVM_INIT_CTRL_2_DEFAULT_I211;
  338. ret_val = 0;
  339. }
  340. break;
  341. case NVM_INIT_CTRL_4:
  342. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  343. if (ret_val) {
  344. *data = NVM_INIT_CTRL_4_DEFAULT_I211;
  345. ret_val = 0;
  346. }
  347. break;
  348. case NVM_LED_1_CFG:
  349. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  350. if (ret_val) {
  351. *data = NVM_LED_1_CFG_DEFAULT_I211;
  352. ret_val = 0;
  353. }
  354. break;
  355. case NVM_LED_0_2_CFG:
  356. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  357. if (ret_val) {
  358. *data = NVM_LED_0_2_CFG_DEFAULT_I211;
  359. ret_val = 0;
  360. }
  361. break;
  362. case NVM_ID_LED_SETTINGS:
  363. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  364. if (ret_val) {
  365. *data = ID_LED_RESERVED_FFFF;
  366. ret_val = 0;
  367. }
  368. break;
  369. case NVM_SUB_DEV_ID:
  370. *data = hw->subsystem_device_id;
  371. break;
  372. case NVM_SUB_VEN_ID:
  373. *data = hw->subsystem_vendor_id;
  374. break;
  375. case NVM_DEV_ID:
  376. *data = hw->device_id;
  377. break;
  378. case NVM_VEN_ID:
  379. *data = hw->vendor_id;
  380. break;
  381. default:
  382. hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
  383. *data = NVM_RESERVED_WORD;
  384. break;
  385. }
  386. return ret_val;
  387. }
  388. /**
  389. * igb_read_invm_version - Reads iNVM version and image type
  390. * @hw: pointer to the HW structure
  391. * @invm_ver: version structure for the version read
  392. *
  393. * Reads iNVM version and image type.
  394. **/
  395. s32 igb_read_invm_version(struct e1000_hw *hw,
  396. struct e1000_fw_version *invm_ver) {
  397. u32 *record = NULL;
  398. u32 *next_record = NULL;
  399. u32 i = 0;
  400. u32 invm_dword = 0;
  401. u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
  402. E1000_INVM_RECORD_SIZE_IN_BYTES);
  403. u32 buffer[E1000_INVM_SIZE];
  404. s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
  405. u16 version = 0;
  406. /* Read iNVM memory */
  407. for (i = 0; i < E1000_INVM_SIZE; i++) {
  408. invm_dword = rd32(E1000_INVM_DATA_REG(i));
  409. buffer[i] = invm_dword;
  410. }
  411. /* Read version number */
  412. for (i = 1; i < invm_blocks; i++) {
  413. record = &buffer[invm_blocks - i];
  414. next_record = &buffer[invm_blocks - i + 1];
  415. /* Check if we have first version location used */
  416. if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
  417. version = 0;
  418. status = 0;
  419. break;
  420. }
  421. /* Check if we have second version location used */
  422. else if ((i == 1) &&
  423. ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
  424. version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
  425. status = 0;
  426. break;
  427. }
  428. /* Check if we have odd version location
  429. * used and it is the last one used
  430. */
  431. else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
  432. ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
  433. (i != 1))) {
  434. version = (*next_record & E1000_INVM_VER_FIELD_TWO)
  435. >> 13;
  436. status = 0;
  437. break;
  438. }
  439. /* Check if we have even version location
  440. * used and it is the last one used
  441. */
  442. else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
  443. ((*record & 0x3) == 0)) {
  444. version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
  445. status = 0;
  446. break;
  447. }
  448. }
  449. if (!status) {
  450. invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
  451. >> E1000_INVM_MAJOR_SHIFT;
  452. invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
  453. }
  454. /* Read Image Type */
  455. for (i = 1; i < invm_blocks; i++) {
  456. record = &buffer[invm_blocks - i];
  457. next_record = &buffer[invm_blocks - i + 1];
  458. /* Check if we have image type in first location used */
  459. if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
  460. invm_ver->invm_img_type = 0;
  461. status = 0;
  462. break;
  463. }
  464. /* Check if we have image type in first location used */
  465. else if ((((*record & 0x3) == 0) &&
  466. ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
  467. ((((*record & 0x3) != 0) && (i != 1)))) {
  468. invm_ver->invm_img_type =
  469. (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
  470. status = 0;
  471. break;
  472. }
  473. }
  474. return status;
  475. }
  476. /**
  477. * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
  478. * @hw: pointer to the HW structure
  479. *
  480. * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
  481. * and then verifies that the sum of the EEPROM is equal to 0xBABA.
  482. **/
  483. static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
  484. {
  485. s32 status = 0;
  486. s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
  487. if (!(hw->nvm.ops.acquire(hw))) {
  488. /* Replace the read function with semaphore grabbing with
  489. * the one that skips this for a while.
  490. * We have semaphore taken already here.
  491. */
  492. read_op_ptr = hw->nvm.ops.read;
  493. hw->nvm.ops.read = igb_read_nvm_eerd;
  494. status = igb_validate_nvm_checksum(hw);
  495. /* Revert original read operation. */
  496. hw->nvm.ops.read = read_op_ptr;
  497. hw->nvm.ops.release(hw);
  498. } else {
  499. status = E1000_ERR_SWFW_SYNC;
  500. }
  501. return status;
  502. }
  503. /**
  504. * igb_update_nvm_checksum_i210 - Update EEPROM checksum
  505. * @hw: pointer to the HW structure
  506. *
  507. * Updates the EEPROM checksum by reading/adding each word of the EEPROM
  508. * up to the checksum. Then calculates the EEPROM checksum and writes the
  509. * value to the EEPROM. Next commit EEPROM data onto the Flash.
  510. **/
  511. static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
  512. {
  513. s32 ret_val = 0;
  514. u16 checksum = 0;
  515. u16 i, nvm_data;
  516. /* Read the first word from the EEPROM. If this times out or fails, do
  517. * not continue or we could be in for a very long wait while every
  518. * EEPROM read fails
  519. */
  520. ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
  521. if (ret_val) {
  522. hw_dbg("EEPROM read failed\n");
  523. goto out;
  524. }
  525. if (!(hw->nvm.ops.acquire(hw))) {
  526. /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
  527. * because we do not want to take the synchronization
  528. * semaphores twice here.
  529. */
  530. for (i = 0; i < NVM_CHECKSUM_REG; i++) {
  531. ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
  532. if (ret_val) {
  533. hw->nvm.ops.release(hw);
  534. hw_dbg("NVM Read Error while updating checksum.\n");
  535. goto out;
  536. }
  537. checksum += nvm_data;
  538. }
  539. checksum = (u16) NVM_SUM - checksum;
  540. ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
  541. &checksum);
  542. if (ret_val) {
  543. hw->nvm.ops.release(hw);
  544. hw_dbg("NVM Write Error while updating checksum.\n");
  545. goto out;
  546. }
  547. hw->nvm.ops.release(hw);
  548. ret_val = igb_update_flash_i210(hw);
  549. } else {
  550. ret_val = -E1000_ERR_SWFW_SYNC;
  551. }
  552. out:
  553. return ret_val;
  554. }
  555. /**
  556. * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
  557. * @hw: pointer to the HW structure
  558. *
  559. **/
  560. static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
  561. {
  562. s32 ret_val = -E1000_ERR_NVM;
  563. u32 i, reg;
  564. for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
  565. reg = rd32(E1000_EECD);
  566. if (reg & E1000_EECD_FLUDONE_I210) {
  567. ret_val = 0;
  568. break;
  569. }
  570. udelay(5);
  571. }
  572. return ret_val;
  573. }
  574. /**
  575. * igb_get_flash_presence_i210 - Check if flash device is detected.
  576. * @hw: pointer to the HW structure
  577. *
  578. **/
  579. bool igb_get_flash_presence_i210(struct e1000_hw *hw)
  580. {
  581. u32 eec = 0;
  582. bool ret_val = false;
  583. eec = rd32(E1000_EECD);
  584. if (eec & E1000_EECD_FLASH_DETECTED_I210)
  585. ret_val = true;
  586. return ret_val;
  587. }
  588. /**
  589. * igb_update_flash_i210 - Commit EEPROM to the flash
  590. * @hw: pointer to the HW structure
  591. *
  592. **/
  593. static s32 igb_update_flash_i210(struct e1000_hw *hw)
  594. {
  595. s32 ret_val = 0;
  596. u32 flup;
  597. ret_val = igb_pool_flash_update_done_i210(hw);
  598. if (ret_val == -E1000_ERR_NVM) {
  599. hw_dbg("Flash update time out\n");
  600. goto out;
  601. }
  602. flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
  603. wr32(E1000_EECD, flup);
  604. ret_val = igb_pool_flash_update_done_i210(hw);
  605. if (ret_val)
  606. hw_dbg("Flash update time out\n");
  607. else
  608. hw_dbg("Flash update complete\n");
  609. out:
  610. return ret_val;
  611. }
  612. /**
  613. * igb_valid_led_default_i210 - Verify a valid default LED config
  614. * @hw: pointer to the HW structure
  615. * @data: pointer to the NVM (EEPROM)
  616. *
  617. * Read the EEPROM for the current default LED configuration. If the
  618. * LED configuration is not valid, set to a valid LED configuration.
  619. **/
  620. s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
  621. {
  622. s32 ret_val;
  623. ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
  624. if (ret_val) {
  625. hw_dbg("NVM Read Error\n");
  626. goto out;
  627. }
  628. if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
  629. switch (hw->phy.media_type) {
  630. case e1000_media_type_internal_serdes:
  631. *data = ID_LED_DEFAULT_I210_SERDES;
  632. break;
  633. case e1000_media_type_copper:
  634. default:
  635. *data = ID_LED_DEFAULT_I210;
  636. break;
  637. }
  638. }
  639. out:
  640. return ret_val;
  641. }
  642. /**
  643. * __igb_access_xmdio_reg - Read/write XMDIO register
  644. * @hw: pointer to the HW structure
  645. * @address: XMDIO address to program
  646. * @dev_addr: device address to program
  647. * @data: pointer to value to read/write from/to the XMDIO address
  648. * @read: boolean flag to indicate read or write
  649. **/
  650. static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
  651. u8 dev_addr, u16 *data, bool read)
  652. {
  653. s32 ret_val = 0;
  654. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
  655. if (ret_val)
  656. return ret_val;
  657. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
  658. if (ret_val)
  659. return ret_val;
  660. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
  661. dev_addr);
  662. if (ret_val)
  663. return ret_val;
  664. if (read)
  665. ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
  666. else
  667. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
  668. if (ret_val)
  669. return ret_val;
  670. /* Recalibrate the device back to 0 */
  671. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
  672. if (ret_val)
  673. return ret_val;
  674. return ret_val;
  675. }
  676. /**
  677. * igb_read_xmdio_reg - Read XMDIO register
  678. * @hw: pointer to the HW structure
  679. * @addr: XMDIO address to program
  680. * @dev_addr: device address to program
  681. * @data: value to be read from the EMI address
  682. **/
  683. s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
  684. {
  685. return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
  686. }
  687. /**
  688. * igb_write_xmdio_reg - Write XMDIO register
  689. * @hw: pointer to the HW structure
  690. * @addr: XMDIO address to program
  691. * @dev_addr: device address to program
  692. * @data: value to be written to the XMDIO address
  693. **/
  694. s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
  695. {
  696. return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
  697. }
  698. /**
  699. * igb_init_nvm_params_i210 - Init NVM func ptrs.
  700. * @hw: pointer to the HW structure
  701. **/
  702. s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
  703. {
  704. s32 ret_val = 0;
  705. struct e1000_nvm_info *nvm = &hw->nvm;
  706. nvm->ops.acquire = igb_acquire_nvm_i210;
  707. nvm->ops.release = igb_release_nvm_i210;
  708. nvm->ops.valid_led_default = igb_valid_led_default_i210;
  709. /* NVM Function Pointers */
  710. if (igb_get_flash_presence_i210(hw)) {
  711. hw->nvm.type = e1000_nvm_flash_hw;
  712. nvm->ops.read = igb_read_nvm_srrd_i210;
  713. nvm->ops.write = igb_write_nvm_srwr_i210;
  714. nvm->ops.validate = igb_validate_nvm_checksum_i210;
  715. nvm->ops.update = igb_update_nvm_checksum_i210;
  716. } else {
  717. hw->nvm.type = e1000_nvm_invm;
  718. nvm->ops.read = igb_read_invm_i210;
  719. nvm->ops.write = NULL;
  720. nvm->ops.validate = NULL;
  721. nvm->ops.update = NULL;
  722. }
  723. return ret_val;
  724. }
  725. /**
  726. * igb_pll_workaround_i210
  727. * @hw: pointer to the HW structure
  728. *
  729. * Works around an errata in the PLL circuit where it occasionally
  730. * provides the wrong clock frequency after power up.
  731. **/
  732. s32 igb_pll_workaround_i210(struct e1000_hw *hw)
  733. {
  734. s32 ret_val;
  735. u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
  736. u16 nvm_word, phy_word, pci_word, tmp_nvm;
  737. int i;
  738. /* Get and set needed register values */
  739. wuc = rd32(E1000_WUC);
  740. mdicnfg = rd32(E1000_MDICNFG);
  741. reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
  742. wr32(E1000_MDICNFG, reg_val);
  743. /* Get data from NVM, or set default */
  744. ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
  745. &nvm_word);
  746. if (ret_val)
  747. nvm_word = E1000_INVM_DEFAULT_AL;
  748. tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
  749. igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
  750. phy_word = E1000_PHY_PLL_UNCONF;
  751. for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
  752. /* check current state directly from internal PHY */
  753. igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
  754. if ((phy_word & E1000_PHY_PLL_UNCONF)
  755. != E1000_PHY_PLL_UNCONF) {
  756. ret_val = 0;
  757. break;
  758. } else {
  759. ret_val = -E1000_ERR_PHY;
  760. }
  761. /* directly reset the internal PHY */
  762. ctrl = rd32(E1000_CTRL);
  763. wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
  764. ctrl_ext = rd32(E1000_CTRL_EXT);
  765. ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
  766. wr32(E1000_CTRL_EXT, ctrl_ext);
  767. wr32(E1000_WUC, 0);
  768. reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
  769. wr32(E1000_EEARBC_I210, reg_val);
  770. igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
  771. pci_word |= E1000_PCI_PMCSR_D3;
  772. igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
  773. usleep_range(1000, 2000);
  774. pci_word &= ~E1000_PCI_PMCSR_D3;
  775. igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
  776. reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
  777. wr32(E1000_EEARBC_I210, reg_val);
  778. /* restore WUC register */
  779. wr32(E1000_WUC, wuc);
  780. }
  781. igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
  782. /* restore MDICNFG setting */
  783. wr32(E1000_MDICNFG, mdicnfg);
  784. return ret_val;
  785. }
  786. /**
  787. * igb_get_cfg_done_i210 - Read config done bit
  788. * @hw: pointer to the HW structure
  789. *
  790. * Read the management control register for the config done bit for
  791. * completion status. NOTE: silicon which is EEPROM-less will fail trying
  792. * to read the config done bit, so an error is *ONLY* logged and returns
  793. * 0. If we were to return with error, EEPROM-less silicon
  794. * would not be able to be reset or change link.
  795. **/
  796. s32 igb_get_cfg_done_i210(struct e1000_hw *hw)
  797. {
  798. s32 timeout = PHY_CFG_TIMEOUT;
  799. u32 mask = E1000_NVM_CFG_DONE_PORT_0;
  800. while (timeout) {
  801. if (rd32(E1000_EEMNGCTL_I210) & mask)
  802. break;
  803. usleep_range(1000, 2000);
  804. timeout--;
  805. }
  806. if (!timeout)
  807. hw_dbg("MNG configuration cycle has not completed.\n");
  808. return 0;
  809. }