platform.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. /*
  2. * Copyright(c) 2015, 2016 Intel Corporation.
  3. *
  4. * This file is provided under a dual BSD/GPLv2 license. When using or
  5. * redistributing this file, you may do so under either license.
  6. *
  7. * GPL LICENSE SUMMARY
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of version 2 of the GNU General Public License as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * General Public License for more details.
  17. *
  18. * BSD LICENSE
  19. *
  20. * Redistribution and use in source and binary forms, with or without
  21. * modification, are permitted provided that the following conditions
  22. * are met:
  23. *
  24. * - Redistributions of source code must retain the above copyright
  25. * notice, this list of conditions and the following disclaimer.
  26. * - Redistributions in binary form must reproduce the above copyright
  27. * notice, this list of conditions and the following disclaimer in
  28. * the documentation and/or other materials provided with the
  29. * distribution.
  30. * - Neither the name of Intel Corporation nor the names of its
  31. * contributors may be used to endorse or promote products derived
  32. * from this software without specific prior written permission.
  33. *
  34. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  35. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  36. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  37. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  38. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  39. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  40. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  41. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  42. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  43. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  44. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  45. *
  46. */
  47. #include <linux/firmware.h>
  48. #include "hfi.h"
  49. #include "efivar.h"
  50. #include "eprom.h"
  51. #define DEFAULT_PLATFORM_CONFIG_NAME "hfi1_platform.dat"
  52. static int validate_scratch_checksum(struct hfi1_devdata *dd)
  53. {
  54. u64 checksum = 0, temp_scratch = 0;
  55. int i, j, version;
  56. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
  57. version = (temp_scratch & BITMAP_VERSION_SMASK) >> BITMAP_VERSION_SHIFT;
  58. /* Prevent power on default of all zeroes from passing checksum */
  59. if (!version) {
  60. dd_dev_err(dd, "%s: Config bitmap uninitialized\n", __func__);
  61. dd_dev_err(dd,
  62. "%s: Please update your BIOS to support active channels\n",
  63. __func__);
  64. return 0;
  65. }
  66. /*
  67. * ASIC scratch 0 only contains the checksum and bitmap version as
  68. * fields of interest, both of which are handled separately from the
  69. * loop below, so skip it
  70. */
  71. checksum += version;
  72. for (i = 1; i < ASIC_NUM_SCRATCH; i++) {
  73. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH + (8 * i));
  74. for (j = sizeof(u64); j != 0; j -= 2) {
  75. checksum += (temp_scratch & 0xFFFF);
  76. temp_scratch >>= 16;
  77. }
  78. }
  79. while (checksum >> 16)
  80. checksum = (checksum & CHECKSUM_MASK) + (checksum >> 16);
  81. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH);
  82. temp_scratch &= CHECKSUM_SMASK;
  83. temp_scratch >>= CHECKSUM_SHIFT;
  84. if (checksum + temp_scratch == 0xFFFF)
  85. return 1;
  86. dd_dev_err(dd, "%s: Configuration bitmap corrupted\n", __func__);
  87. return 0;
  88. }
  89. static void save_platform_config_fields(struct hfi1_devdata *dd)
  90. {
  91. struct hfi1_pportdata *ppd = dd->pport;
  92. u64 temp_scratch = 0, temp_dest = 0;
  93. temp_scratch = read_csr(dd, ASIC_CFG_SCRATCH_1);
  94. temp_dest = temp_scratch &
  95. (dd->hfi1_id ? PORT1_PORT_TYPE_SMASK :
  96. PORT0_PORT_TYPE_SMASK);
  97. ppd->port_type = temp_dest >>
  98. (dd->hfi1_id ? PORT1_PORT_TYPE_SHIFT :
  99. PORT0_PORT_TYPE_SHIFT);
  100. temp_dest = temp_scratch &
  101. (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SMASK :
  102. PORT0_LOCAL_ATTEN_SMASK);
  103. ppd->local_atten = temp_dest >>
  104. (dd->hfi1_id ? PORT1_LOCAL_ATTEN_SHIFT :
  105. PORT0_LOCAL_ATTEN_SHIFT);
  106. temp_dest = temp_scratch &
  107. (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SMASK :
  108. PORT0_REMOTE_ATTEN_SMASK);
  109. ppd->remote_atten = temp_dest >>
  110. (dd->hfi1_id ? PORT1_REMOTE_ATTEN_SHIFT :
  111. PORT0_REMOTE_ATTEN_SHIFT);
  112. temp_dest = temp_scratch &
  113. (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SMASK :
  114. PORT0_DEFAULT_ATTEN_SMASK);
  115. ppd->default_atten = temp_dest >>
  116. (dd->hfi1_id ? PORT1_DEFAULT_ATTEN_SHIFT :
  117. PORT0_DEFAULT_ATTEN_SHIFT);
  118. temp_scratch = read_csr(dd, dd->hfi1_id ? ASIC_CFG_SCRATCH_3 :
  119. ASIC_CFG_SCRATCH_2);
  120. ppd->tx_preset_eq = (temp_scratch & TX_EQ_SMASK) >> TX_EQ_SHIFT;
  121. ppd->tx_preset_noeq = (temp_scratch & TX_NO_EQ_SMASK) >> TX_NO_EQ_SHIFT;
  122. ppd->rx_preset = (temp_scratch & RX_SMASK) >> RX_SHIFT;
  123. ppd->max_power_class = (temp_scratch & QSFP_MAX_POWER_SMASK) >>
  124. QSFP_MAX_POWER_SHIFT;
  125. ppd->config_from_scratch = true;
  126. }
  127. void get_platform_config(struct hfi1_devdata *dd)
  128. {
  129. int ret = 0;
  130. u8 *temp_platform_config = NULL;
  131. u32 esize;
  132. const struct firmware *platform_config_file = NULL;
  133. if (is_integrated(dd)) {
  134. if (validate_scratch_checksum(dd)) {
  135. save_platform_config_fields(dd);
  136. return;
  137. }
  138. } else {
  139. ret = eprom_read_platform_config(dd,
  140. (void **)&temp_platform_config,
  141. &esize);
  142. if (!ret) {
  143. /* success */
  144. dd->platform_config.data = temp_platform_config;
  145. dd->platform_config.size = esize;
  146. return;
  147. }
  148. }
  149. dd_dev_err(dd,
  150. "%s: Failed to get platform config, falling back to sub-optimal default file\n",
  151. __func__);
  152. ret = request_firmware(&platform_config_file,
  153. DEFAULT_PLATFORM_CONFIG_NAME,
  154. &dd->pcidev->dev);
  155. if (ret) {
  156. dd_dev_err(dd,
  157. "%s: No default platform config file found\n",
  158. __func__);
  159. return;
  160. }
  161. /*
  162. * Allocate separate memory block to store data and free firmware
  163. * structure. This allows free_platform_config to treat EPROM and
  164. * fallback configs in the same manner.
  165. */
  166. dd->platform_config.data = kmemdup(platform_config_file->data,
  167. platform_config_file->size,
  168. GFP_KERNEL);
  169. dd->platform_config.size = platform_config_file->size;
  170. release_firmware(platform_config_file);
  171. }
  172. void free_platform_config(struct hfi1_devdata *dd)
  173. {
  174. /* Release memory allocated for eprom or fallback file read. */
  175. kfree(dd->platform_config.data);
  176. dd->platform_config.data = NULL;
  177. }
  178. void get_port_type(struct hfi1_pportdata *ppd)
  179. {
  180. int ret;
  181. u32 temp;
  182. ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  183. PORT_TABLE_PORT_TYPE, &temp,
  184. 4);
  185. if (ret) {
  186. ppd->port_type = PORT_TYPE_UNKNOWN;
  187. return;
  188. }
  189. ppd->port_type = temp;
  190. }
  191. int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
  192. {
  193. u8 tx_ctrl_byte = on ? 0x0 : 0xF;
  194. int ret = 0;
  195. ret = qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_TX_CTRL_BYTE_OFFS,
  196. &tx_ctrl_byte, 1);
  197. /* we expected 1, so consider 0 an error */
  198. if (ret == 0)
  199. ret = -EIO;
  200. else if (ret == 1)
  201. ret = 0;
  202. return ret;
  203. }
  204. static int qual_power(struct hfi1_pportdata *ppd)
  205. {
  206. u32 cable_power_class = 0, power_class_max = 0;
  207. u8 *cache = ppd->qsfp_info.cache;
  208. int ret = 0;
  209. ret = get_platform_config_field(
  210. ppd->dd, PLATFORM_CONFIG_SYSTEM_TABLE, 0,
  211. SYSTEM_TABLE_QSFP_POWER_CLASS_MAX, &power_class_max, 4);
  212. if (ret)
  213. return ret;
  214. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  215. if (cable_power_class > power_class_max)
  216. ppd->offline_disabled_reason =
  217. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY);
  218. if (ppd->offline_disabled_reason ==
  219. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_POWER_POLICY)) {
  220. dd_dev_err(
  221. ppd->dd,
  222. "%s: Port disabled due to system power restrictions\n",
  223. __func__);
  224. ret = -EPERM;
  225. }
  226. return ret;
  227. }
  228. static int qual_bitrate(struct hfi1_pportdata *ppd)
  229. {
  230. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  231. u8 *cache = ppd->qsfp_info.cache;
  232. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G) &&
  233. cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64)
  234. ppd->offline_disabled_reason =
  235. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
  236. if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G) &&
  237. cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D)
  238. ppd->offline_disabled_reason =
  239. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY);
  240. if (ppd->offline_disabled_reason ==
  241. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_LINKSPEED_POLICY)) {
  242. dd_dev_err(
  243. ppd->dd,
  244. "%s: Cable failed bitrate check, disabling port\n",
  245. __func__);
  246. return -EPERM;
  247. }
  248. return 0;
  249. }
  250. static int set_qsfp_high_power(struct hfi1_pportdata *ppd)
  251. {
  252. u8 cable_power_class = 0, power_ctrl_byte = 0;
  253. u8 *cache = ppd->qsfp_info.cache;
  254. int ret;
  255. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  256. if (cable_power_class > QSFP_POWER_CLASS_1) {
  257. power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS];
  258. power_ctrl_byte |= 1;
  259. power_ctrl_byte &= ~(0x2);
  260. ret = qsfp_write(ppd, ppd->dd->hfi1_id,
  261. QSFP_PWR_CTRL_BYTE_OFFS,
  262. &power_ctrl_byte, 1);
  263. if (ret != 1)
  264. return -EIO;
  265. if (cable_power_class > QSFP_POWER_CLASS_4) {
  266. power_ctrl_byte |= (1 << 2);
  267. ret = qsfp_write(ppd, ppd->dd->hfi1_id,
  268. QSFP_PWR_CTRL_BYTE_OFFS,
  269. &power_ctrl_byte, 1);
  270. if (ret != 1)
  271. return -EIO;
  272. }
  273. /* SFF 8679 rev 1.7 LPMode Deassert time */
  274. msleep(300);
  275. }
  276. return 0;
  277. }
  278. static void apply_rx_cdr(struct hfi1_pportdata *ppd,
  279. u32 rx_preset_index,
  280. u8 *cdr_ctrl_byte)
  281. {
  282. u32 rx_preset;
  283. u8 *cache = ppd->qsfp_info.cache;
  284. int cable_power_class;
  285. if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) &&
  286. (cache[QSFP_CDR_INFO_OFFS] & 0x40)))
  287. return;
  288. /* RX CDR present, bypass supported */
  289. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  290. if (cable_power_class <= QSFP_POWER_CLASS_3) {
  291. /* Power class <= 3, ignore config & turn RX CDR on */
  292. *cdr_ctrl_byte |= 0xF;
  293. return;
  294. }
  295. get_platform_config_field(
  296. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  297. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR_APPLY,
  298. &rx_preset, 4);
  299. if (!rx_preset) {
  300. dd_dev_info(
  301. ppd->dd,
  302. "%s: RX_CDR_APPLY is set to disabled\n",
  303. __func__);
  304. return;
  305. }
  306. get_platform_config_field(
  307. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  308. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_CDR,
  309. &rx_preset, 4);
  310. /* Expand cdr setting to all 4 lanes */
  311. rx_preset = (rx_preset | (rx_preset << 1) |
  312. (rx_preset << 2) | (rx_preset << 3));
  313. if (rx_preset) {
  314. *cdr_ctrl_byte |= rx_preset;
  315. } else {
  316. *cdr_ctrl_byte &= rx_preset;
  317. /* Preserve current TX CDR status */
  318. *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0);
  319. }
  320. }
  321. static void apply_tx_cdr(struct hfi1_pportdata *ppd,
  322. u32 tx_preset_index,
  323. u8 *cdr_ctrl_byte)
  324. {
  325. u32 tx_preset;
  326. u8 *cache = ppd->qsfp_info.cache;
  327. int cable_power_class;
  328. if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) &&
  329. (cache[QSFP_CDR_INFO_OFFS] & 0x80)))
  330. return;
  331. /* TX CDR present, bypass supported */
  332. cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  333. if (cable_power_class <= QSFP_POWER_CLASS_3) {
  334. /* Power class <= 3, ignore config & turn TX CDR on */
  335. *cdr_ctrl_byte |= 0xF0;
  336. return;
  337. }
  338. get_platform_config_field(
  339. ppd->dd,
  340. PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
  341. TX_PRESET_TABLE_QSFP_TX_CDR_APPLY, &tx_preset, 4);
  342. if (!tx_preset) {
  343. dd_dev_info(
  344. ppd->dd,
  345. "%s: TX_CDR_APPLY is set to disabled\n",
  346. __func__);
  347. return;
  348. }
  349. get_platform_config_field(
  350. ppd->dd,
  351. PLATFORM_CONFIG_TX_PRESET_TABLE,
  352. tx_preset_index,
  353. TX_PRESET_TABLE_QSFP_TX_CDR, &tx_preset, 4);
  354. /* Expand cdr setting to all 4 lanes */
  355. tx_preset = (tx_preset | (tx_preset << 1) |
  356. (tx_preset << 2) | (tx_preset << 3));
  357. if (tx_preset)
  358. *cdr_ctrl_byte |= (tx_preset << 4);
  359. else
  360. /* Preserve current/determined RX CDR status */
  361. *cdr_ctrl_byte &= ((tx_preset << 4) | 0xF);
  362. }
  363. static void apply_cdr_settings(
  364. struct hfi1_pportdata *ppd, u32 rx_preset_index,
  365. u32 tx_preset_index)
  366. {
  367. u8 *cache = ppd->qsfp_info.cache;
  368. u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS];
  369. apply_rx_cdr(ppd, rx_preset_index, &cdr_ctrl_byte);
  370. apply_tx_cdr(ppd, tx_preset_index, &cdr_ctrl_byte);
  371. qsfp_write(ppd, ppd->dd->hfi1_id, QSFP_CDR_CTRL_BYTE_OFFS,
  372. &cdr_ctrl_byte, 1);
  373. }
  374. static void apply_tx_eq_auto(struct hfi1_pportdata *ppd)
  375. {
  376. u8 *cache = ppd->qsfp_info.cache;
  377. u8 tx_eq;
  378. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8))
  379. return;
  380. /* Disable adaptive TX EQ if present */
  381. tx_eq = cache[(128 * 3) + 241];
  382. tx_eq &= 0xF0;
  383. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 241, &tx_eq, 1);
  384. }
  385. static void apply_tx_eq_prog(struct hfi1_pportdata *ppd, u32 tx_preset_index)
  386. {
  387. u8 *cache = ppd->qsfp_info.cache;
  388. u32 tx_preset;
  389. u8 tx_eq;
  390. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4))
  391. return;
  392. get_platform_config_field(
  393. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  394. tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ_APPLY,
  395. &tx_preset, 4);
  396. if (!tx_preset) {
  397. dd_dev_info(
  398. ppd->dd,
  399. "%s: TX_EQ_APPLY is set to disabled\n",
  400. __func__);
  401. return;
  402. }
  403. get_platform_config_field(
  404. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  405. tx_preset_index, TX_PRESET_TABLE_QSFP_TX_EQ,
  406. &tx_preset, 4);
  407. if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) {
  408. dd_dev_info(
  409. ppd->dd,
  410. "%s: TX EQ %x unsupported\n",
  411. __func__, tx_preset);
  412. dd_dev_info(
  413. ppd->dd,
  414. "%s: Applying EQ %x\n",
  415. __func__, cache[608] & 0xF0);
  416. tx_preset = (cache[608] & 0xF0) >> 4;
  417. }
  418. tx_eq = tx_preset | (tx_preset << 4);
  419. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 234, &tx_eq, 1);
  420. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 235, &tx_eq, 1);
  421. }
  422. static void apply_rx_eq_emp(struct hfi1_pportdata *ppd, u32 rx_preset_index)
  423. {
  424. u32 rx_preset;
  425. u8 rx_eq, *cache = ppd->qsfp_info.cache;
  426. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2))
  427. return;
  428. get_platform_config_field(
  429. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  430. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP_APPLY,
  431. &rx_preset, 4);
  432. if (!rx_preset) {
  433. dd_dev_info(
  434. ppd->dd,
  435. "%s: RX_EMP_APPLY is set to disabled\n",
  436. __func__);
  437. return;
  438. }
  439. get_platform_config_field(
  440. ppd->dd, PLATFORM_CONFIG_RX_PRESET_TABLE,
  441. rx_preset_index, RX_PRESET_TABLE_QSFP_RX_EMP,
  442. &rx_preset, 4);
  443. if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) {
  444. dd_dev_info(
  445. ppd->dd,
  446. "%s: Requested RX EMP %x\n",
  447. __func__, rx_preset);
  448. dd_dev_info(
  449. ppd->dd,
  450. "%s: Applying supported EMP %x\n",
  451. __func__, cache[608] & 0xF);
  452. rx_preset = cache[608] & 0xF;
  453. }
  454. rx_eq = rx_preset | (rx_preset << 4);
  455. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 236, &rx_eq, 1);
  456. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 237, &rx_eq, 1);
  457. }
  458. static void apply_eq_settings(struct hfi1_pportdata *ppd,
  459. u32 rx_preset_index, u32 tx_preset_index)
  460. {
  461. u8 *cache = ppd->qsfp_info.cache;
  462. /* no point going on w/o a page 3 */
  463. if (cache[2] & 4) {
  464. dd_dev_info(ppd->dd,
  465. "%s: Upper page 03 not present\n",
  466. __func__);
  467. return;
  468. }
  469. apply_tx_eq_auto(ppd);
  470. apply_tx_eq_prog(ppd, tx_preset_index);
  471. apply_rx_eq_emp(ppd, rx_preset_index);
  472. }
  473. static void apply_rx_amplitude_settings(
  474. struct hfi1_pportdata *ppd, u32 rx_preset_index,
  475. u32 tx_preset_index)
  476. {
  477. u32 rx_preset;
  478. u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache;
  479. /* no point going on w/o a page 3 */
  480. if (cache[2] & 4) {
  481. dd_dev_info(ppd->dd,
  482. "%s: Upper page 03 not present\n",
  483. __func__);
  484. return;
  485. }
  486. if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) {
  487. dd_dev_info(ppd->dd,
  488. "%s: RX_AMP_APPLY is set to disabled\n",
  489. __func__);
  490. return;
  491. }
  492. get_platform_config_field(ppd->dd,
  493. PLATFORM_CONFIG_RX_PRESET_TABLE,
  494. rx_preset_index,
  495. RX_PRESET_TABLE_QSFP_RX_AMP_APPLY,
  496. &rx_preset, 4);
  497. if (!rx_preset) {
  498. dd_dev_info(ppd->dd,
  499. "%s: RX_AMP_APPLY is set to disabled\n",
  500. __func__);
  501. return;
  502. }
  503. get_platform_config_field(ppd->dd,
  504. PLATFORM_CONFIG_RX_PRESET_TABLE,
  505. rx_preset_index,
  506. RX_PRESET_TABLE_QSFP_RX_AMP,
  507. &rx_preset, 4);
  508. dd_dev_info(ppd->dd,
  509. "%s: Requested RX AMP %x\n",
  510. __func__,
  511. rx_preset);
  512. for (i = 0; i < 4; i++) {
  513. if (cache[(128 * 3) + 225] & (1 << i)) {
  514. preferred = i;
  515. if (preferred == rx_preset)
  516. break;
  517. }
  518. }
  519. /*
  520. * Verify that preferred RX amplitude is not just a
  521. * fall through of the default
  522. */
  523. if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) {
  524. dd_dev_info(ppd->dd, "No supported RX AMP, not applying\n");
  525. return;
  526. }
  527. dd_dev_info(ppd->dd,
  528. "%s: Applying RX AMP %x\n", __func__, preferred);
  529. rx_amp = preferred | (preferred << 4);
  530. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 238, &rx_amp, 1);
  531. qsfp_write(ppd, ppd->dd->hfi1_id, (256 * 3) + 239, &rx_amp, 1);
  532. }
  533. #define OPA_INVALID_INDEX 0xFFF
  534. static void apply_tx_lanes(struct hfi1_pportdata *ppd, u8 field_id,
  535. u32 config_data, const char *message)
  536. {
  537. u8 i;
  538. int ret = HCMD_SUCCESS;
  539. for (i = 0; i < 4; i++) {
  540. ret = load_8051_config(ppd->dd, field_id, i, config_data);
  541. if (ret != HCMD_SUCCESS) {
  542. dd_dev_err(
  543. ppd->dd,
  544. "%s: %s for lane %u failed\n",
  545. message, __func__, i);
  546. }
  547. }
  548. }
  549. /*
  550. * Return a special SerDes setting for low power AOC cables. The power class
  551. * threshold and setting being used were all found by empirical testing.
  552. *
  553. * Summary of the logic:
  554. *
  555. * if (QSFP and QSFP_TYPE == AOC and QSFP_POWER_CLASS < 4)
  556. * return 0xe
  557. * return 0; // leave at default
  558. */
  559. static u8 aoc_low_power_setting(struct hfi1_pportdata *ppd)
  560. {
  561. u8 *cache = ppd->qsfp_info.cache;
  562. int power_class;
  563. /* QSFP only */
  564. if (ppd->port_type != PORT_TYPE_QSFP)
  565. return 0; /* leave at default */
  566. /* active optical cables only */
  567. switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
  568. case 0x0 ... 0x9: /* fallthrough */
  569. case 0xC: /* fallthrough */
  570. case 0xE:
  571. /* active AOC */
  572. power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]);
  573. if (power_class < QSFP_POWER_CLASS_4)
  574. return 0xe;
  575. }
  576. return 0; /* leave at default */
  577. }
  578. static void apply_tunings(
  579. struct hfi1_pportdata *ppd, u32 tx_preset_index,
  580. u8 tuning_method, u32 total_atten, u8 limiting_active)
  581. {
  582. int ret = 0;
  583. u32 config_data = 0, tx_preset = 0;
  584. u8 precur = 0, attn = 0, postcur = 0, external_device_config = 0;
  585. u8 *cache = ppd->qsfp_info.cache;
  586. /* Pass tuning method to 8051 */
  587. read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
  588. &config_data);
  589. config_data &= ~(0xff << TUNING_METHOD_SHIFT);
  590. config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
  591. ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
  592. config_data);
  593. if (ret != HCMD_SUCCESS)
  594. dd_dev_err(ppd->dd, "%s: Failed to set tuning method\n",
  595. __func__);
  596. /* Set same channel loss for both TX and RX */
  597. config_data = 0 | (total_atten << 16) | (total_atten << 24);
  598. apply_tx_lanes(ppd, CHANNEL_LOSS_SETTINGS, config_data,
  599. "Setting channel loss");
  600. /* Inform 8051 of cable capabilities */
  601. if (ppd->qsfp_info.cache_valid) {
  602. external_device_config =
  603. ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) |
  604. ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) |
  605. ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) |
  606. (cache[QSFP_EQ_INFO_OFFS] & 0x4);
  607. ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
  608. GENERAL_CONFIG, &config_data);
  609. /* Clear, then set the external device config field */
  610. config_data &= ~(u32)0xFF;
  611. config_data |= external_device_config;
  612. ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
  613. GENERAL_CONFIG, config_data);
  614. if (ret != HCMD_SUCCESS)
  615. dd_dev_err(ppd->dd,
  616. "%s: Failed set ext device config params\n",
  617. __func__);
  618. }
  619. if (tx_preset_index == OPA_INVALID_INDEX) {
  620. if (ppd->port_type == PORT_TYPE_QSFP && limiting_active)
  621. dd_dev_err(ppd->dd, "%s: Invalid Tx preset index\n",
  622. __func__);
  623. return;
  624. }
  625. /* Following for limiting active channels only */
  626. get_platform_config_field(
  627. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE, tx_preset_index,
  628. TX_PRESET_TABLE_PRECUR, &tx_preset, 4);
  629. precur = tx_preset;
  630. get_platform_config_field(
  631. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  632. tx_preset_index, TX_PRESET_TABLE_ATTN, &tx_preset, 4);
  633. attn = tx_preset;
  634. get_platform_config_field(
  635. ppd->dd, PLATFORM_CONFIG_TX_PRESET_TABLE,
  636. tx_preset_index, TX_PRESET_TABLE_POSTCUR, &tx_preset, 4);
  637. postcur = tx_preset;
  638. /*
  639. * NOTES:
  640. * o The aoc_low_power_setting is applied to all lanes even
  641. * though only lane 0's value is examined by the firmware.
  642. * o A lingering low power setting after a cable swap does
  643. * not occur. On cable unplug the 8051 is reset and
  644. * restarted on cable insert. This resets all settings to
  645. * their default, erasing any previous low power setting.
  646. */
  647. config_data = precur | (attn << 8) | (postcur << 16) |
  648. (aoc_low_power_setting(ppd) << 24);
  649. apply_tx_lanes(ppd, TX_EQ_SETTINGS, config_data,
  650. "Applying TX settings");
  651. }
  652. /* Must be holding the QSFP i2c resource */
  653. static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
  654. u32 *ptr_rx_preset, u32 *ptr_total_atten)
  655. {
  656. int ret;
  657. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  658. u8 *cache = ppd->qsfp_info.cache;
  659. ppd->qsfp_info.limiting_active = 1;
  660. ret = set_qsfp_tx(ppd, 0);
  661. if (ret)
  662. return ret;
  663. ret = qual_power(ppd);
  664. if (ret)
  665. return ret;
  666. ret = qual_bitrate(ppd);
  667. if (ret)
  668. return ret;
  669. /*
  670. * We'll change the QSFP memory contents from here on out, thus we set a
  671. * flag here to remind ourselves to reset the QSFP module. This prevents
  672. * reuse of stale settings established in our previous pass through.
  673. */
  674. if (ppd->qsfp_info.reset_needed) {
  675. ret = reset_qsfp(ppd);
  676. if (ret)
  677. return ret;
  678. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  679. } else {
  680. ppd->qsfp_info.reset_needed = 1;
  681. }
  682. ret = set_qsfp_high_power(ppd);
  683. if (ret)
  684. return ret;
  685. if (cache[QSFP_EQ_INFO_OFFS] & 0x4) {
  686. ret = get_platform_config_field(
  687. ppd->dd,
  688. PLATFORM_CONFIG_PORT_TABLE, 0,
  689. PORT_TABLE_TX_PRESET_IDX_ACTIVE_EQ,
  690. ptr_tx_preset, 4);
  691. if (ret) {
  692. *ptr_tx_preset = OPA_INVALID_INDEX;
  693. return ret;
  694. }
  695. } else {
  696. ret = get_platform_config_field(
  697. ppd->dd,
  698. PLATFORM_CONFIG_PORT_TABLE, 0,
  699. PORT_TABLE_TX_PRESET_IDX_ACTIVE_NO_EQ,
  700. ptr_tx_preset, 4);
  701. if (ret) {
  702. *ptr_tx_preset = OPA_INVALID_INDEX;
  703. return ret;
  704. }
  705. }
  706. ret = get_platform_config_field(
  707. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  708. PORT_TABLE_RX_PRESET_IDX, ptr_rx_preset, 4);
  709. if (ret) {
  710. *ptr_rx_preset = OPA_INVALID_INDEX;
  711. return ret;
  712. }
  713. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
  714. get_platform_config_field(
  715. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  716. PORT_TABLE_LOCAL_ATTEN_25G, ptr_total_atten, 4);
  717. else if ((lss & OPA_LINK_SPEED_12_5G) && (lse & OPA_LINK_SPEED_12_5G))
  718. get_platform_config_field(
  719. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  720. PORT_TABLE_LOCAL_ATTEN_12G, ptr_total_atten, 4);
  721. apply_cdr_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  722. apply_eq_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  723. apply_rx_amplitude_settings(ppd, *ptr_rx_preset, *ptr_tx_preset);
  724. ret = set_qsfp_tx(ppd, 1);
  725. return ret;
  726. }
  727. static int tune_qsfp(struct hfi1_pportdata *ppd,
  728. u32 *ptr_tx_preset, u32 *ptr_rx_preset,
  729. u8 *ptr_tuning_method, u32 *ptr_total_atten)
  730. {
  731. u32 cable_atten = 0, remote_atten = 0, platform_atten = 0;
  732. u16 lss = ppd->link_speed_supported, lse = ppd->link_speed_enabled;
  733. int ret = 0;
  734. u8 *cache = ppd->qsfp_info.cache;
  735. switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) {
  736. case 0xA ... 0xB:
  737. ret = get_platform_config_field(
  738. ppd->dd,
  739. PLATFORM_CONFIG_PORT_TABLE, 0,
  740. PORT_TABLE_LOCAL_ATTEN_25G,
  741. &platform_atten, 4);
  742. if (ret)
  743. return ret;
  744. if ((lss & OPA_LINK_SPEED_25G) && (lse & OPA_LINK_SPEED_25G))
  745. cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS];
  746. else if ((lss & OPA_LINK_SPEED_12_5G) &&
  747. (lse & OPA_LINK_SPEED_12_5G))
  748. cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS];
  749. /* Fallback to configured attenuation if cable memory is bad */
  750. if (cable_atten == 0 || cable_atten > 36) {
  751. ret = get_platform_config_field(
  752. ppd->dd,
  753. PLATFORM_CONFIG_SYSTEM_TABLE, 0,
  754. SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G,
  755. &cable_atten, 4);
  756. if (ret)
  757. return ret;
  758. }
  759. ret = get_platform_config_field(
  760. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  761. PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
  762. if (ret)
  763. return ret;
  764. *ptr_total_atten = platform_atten + cable_atten + remote_atten;
  765. *ptr_tuning_method = OPA_PASSIVE_TUNING;
  766. break;
  767. case 0x0 ... 0x9: /* fallthrough */
  768. case 0xC: /* fallthrough */
  769. case 0xE:
  770. ret = tune_active_qsfp(ppd, ptr_tx_preset, ptr_rx_preset,
  771. ptr_total_atten);
  772. if (ret)
  773. return ret;
  774. *ptr_tuning_method = OPA_ACTIVE_TUNING;
  775. break;
  776. case 0xD: /* fallthrough */
  777. case 0xF:
  778. default:
  779. dd_dev_warn(ppd->dd, "%s: Unknown/unsupported cable\n",
  780. __func__);
  781. break;
  782. }
  783. return ret;
  784. }
  785. /*
  786. * This function communicates its success or failure via ppd->driver_link_ready
  787. * Thus, it depends on its association with start_link(...) which checks
  788. * driver_link_ready before proceeding with the link negotiation and
  789. * initialization process.
  790. */
  791. void tune_serdes(struct hfi1_pportdata *ppd)
  792. {
  793. int ret = 0;
  794. u32 total_atten = 0;
  795. u32 remote_atten = 0, platform_atten = 0;
  796. u32 rx_preset_index, tx_preset_index;
  797. u8 tuning_method = 0, limiting_active = 0;
  798. struct hfi1_devdata *dd = ppd->dd;
  799. rx_preset_index = OPA_INVALID_INDEX;
  800. tx_preset_index = OPA_INVALID_INDEX;
  801. /* the link defaults to enabled */
  802. ppd->link_enabled = 1;
  803. /* the driver link ready state defaults to not ready */
  804. ppd->driver_link_ready = 0;
  805. ppd->offline_disabled_reason = HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
  806. /* Skip the tuning for testing (loopback != none) and simulations */
  807. if (loopback != LOOPBACK_NONE ||
  808. ppd->dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
  809. ppd->driver_link_ready = 1;
  810. if (qsfp_mod_present(ppd)) {
  811. ret = acquire_chip_resource(ppd->dd,
  812. qsfp_resource(ppd->dd),
  813. QSFP_WAIT);
  814. if (ret) {
  815. dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
  816. __func__, (int)ppd->dd->hfi1_id);
  817. goto bail;
  818. }
  819. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  820. release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
  821. }
  822. return;
  823. }
  824. switch (ppd->port_type) {
  825. case PORT_TYPE_DISCONNECTED:
  826. ppd->offline_disabled_reason =
  827. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_DISCONNECTED);
  828. dd_dev_warn(dd, "%s: Port disconnected, disabling port\n",
  829. __func__);
  830. goto bail;
  831. case PORT_TYPE_FIXED:
  832. /* platform_atten, remote_atten pre-zeroed to catch error */
  833. get_platform_config_field(
  834. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  835. PORT_TABLE_LOCAL_ATTEN_25G, &platform_atten, 4);
  836. get_platform_config_field(
  837. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  838. PORT_TABLE_REMOTE_ATTEN_25G, &remote_atten, 4);
  839. total_atten = platform_atten + remote_atten;
  840. tuning_method = OPA_PASSIVE_TUNING;
  841. break;
  842. case PORT_TYPE_VARIABLE:
  843. if (qsfp_mod_present(ppd)) {
  844. /*
  845. * platform_atten, remote_atten pre-zeroed to
  846. * catch error
  847. */
  848. get_platform_config_field(
  849. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  850. PORT_TABLE_LOCAL_ATTEN_25G,
  851. &platform_atten, 4);
  852. get_platform_config_field(
  853. ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
  854. PORT_TABLE_REMOTE_ATTEN_25G,
  855. &remote_atten, 4);
  856. total_atten = platform_atten + remote_atten;
  857. tuning_method = OPA_PASSIVE_TUNING;
  858. } else {
  859. ppd->offline_disabled_reason =
  860. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_CHASSIS_CONFIG);
  861. goto bail;
  862. }
  863. break;
  864. case PORT_TYPE_QSFP:
  865. if (qsfp_mod_present(ppd)) {
  866. ret = acquire_chip_resource(ppd->dd,
  867. qsfp_resource(ppd->dd),
  868. QSFP_WAIT);
  869. if (ret) {
  870. dd_dev_err(ppd->dd, "%s: hfi%d: cannot lock i2c chain\n",
  871. __func__, (int)ppd->dd->hfi1_id);
  872. goto bail;
  873. }
  874. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  875. if (ppd->qsfp_info.cache_valid) {
  876. ret = tune_qsfp(ppd,
  877. &tx_preset_index,
  878. &rx_preset_index,
  879. &tuning_method,
  880. &total_atten);
  881. /*
  882. * We may have modified the QSFP memory, so
  883. * update the cache to reflect the changes
  884. */
  885. refresh_qsfp_cache(ppd, &ppd->qsfp_info);
  886. limiting_active =
  887. ppd->qsfp_info.limiting_active;
  888. } else {
  889. dd_dev_err(dd,
  890. "%s: Reading QSFP memory failed\n",
  891. __func__);
  892. ret = -EINVAL; /* a fail indication */
  893. }
  894. release_chip_resource(ppd->dd, qsfp_resource(ppd->dd));
  895. if (ret)
  896. goto bail;
  897. } else {
  898. ppd->offline_disabled_reason =
  899. HFI1_ODR_MASK(
  900. OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
  901. goto bail;
  902. }
  903. break;
  904. default:
  905. dd_dev_warn(ppd->dd, "%s: Unknown port type\n", __func__);
  906. ppd->port_type = PORT_TYPE_UNKNOWN;
  907. tuning_method = OPA_UNKNOWN_TUNING;
  908. total_atten = 0;
  909. limiting_active = 0;
  910. tx_preset_index = OPA_INVALID_INDEX;
  911. break;
  912. }
  913. if (ppd->offline_disabled_reason ==
  914. HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
  915. apply_tunings(ppd, tx_preset_index, tuning_method,
  916. total_atten, limiting_active);
  917. if (!ret)
  918. ppd->driver_link_ready = 1;
  919. return;
  920. bail:
  921. ppd->driver_link_ready = 0;
  922. }