amdgpu_ucode.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497
  1. /*
  2. * Copyright 2014 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <linux/firmware.h>
  24. #include <linux/slab.h>
  25. #include <linux/module.h>
  26. #include <drm/drmP.h>
  27. #include "amdgpu.h"
  28. #include "amdgpu_ucode.h"
  29. static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
  30. {
  31. DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
  32. DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes));
  33. DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major));
  34. DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor));
  35. DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major));
  36. DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor));
  37. DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version));
  38. DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes));
  39. DRM_DEBUG("ucode_array_offset_bytes: %u\n",
  40. le32_to_cpu(hdr->ucode_array_offset_bytes));
  41. DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32));
  42. }
  43. void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr)
  44. {
  45. uint16_t version_major = le16_to_cpu(hdr->header_version_major);
  46. uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
  47. DRM_DEBUG("MC\n");
  48. amdgpu_ucode_print_common_hdr(hdr);
  49. if (version_major == 1) {
  50. const struct mc_firmware_header_v1_0 *mc_hdr =
  51. container_of(hdr, struct mc_firmware_header_v1_0, header);
  52. DRM_DEBUG("io_debug_size_bytes: %u\n",
  53. le32_to_cpu(mc_hdr->io_debug_size_bytes));
  54. DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
  55. le32_to_cpu(mc_hdr->io_debug_array_offset_bytes));
  56. } else {
  57. DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor);
  58. }
  59. }
  60. void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
  61. {
  62. uint16_t version_major = le16_to_cpu(hdr->header_version_major);
  63. uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
  64. DRM_DEBUG("SMC\n");
  65. amdgpu_ucode_print_common_hdr(hdr);
  66. if (version_major == 1) {
  67. const struct smc_firmware_header_v1_0 *smc_hdr =
  68. container_of(hdr, struct smc_firmware_header_v1_0, header);
  69. DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
  70. } else {
  71. DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
  72. }
  73. }
  74. void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
  75. {
  76. uint16_t version_major = le16_to_cpu(hdr->header_version_major);
  77. uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
  78. DRM_DEBUG("GFX\n");
  79. amdgpu_ucode_print_common_hdr(hdr);
  80. if (version_major == 1) {
  81. const struct gfx_firmware_header_v1_0 *gfx_hdr =
  82. container_of(hdr, struct gfx_firmware_header_v1_0, header);
  83. DRM_DEBUG("ucode_feature_version: %u\n",
  84. le32_to_cpu(gfx_hdr->ucode_feature_version));
  85. DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
  86. DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
  87. } else {
  88. DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
  89. }
  90. }
  91. void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
  92. {
  93. uint16_t version_major = le16_to_cpu(hdr->header_version_major);
  94. uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
  95. DRM_DEBUG("RLC\n");
  96. amdgpu_ucode_print_common_hdr(hdr);
  97. if (version_major == 1) {
  98. const struct rlc_firmware_header_v1_0 *rlc_hdr =
  99. container_of(hdr, struct rlc_firmware_header_v1_0, header);
  100. DRM_DEBUG("ucode_feature_version: %u\n",
  101. le32_to_cpu(rlc_hdr->ucode_feature_version));
  102. DRM_DEBUG("save_and_restore_offset: %u\n",
  103. le32_to_cpu(rlc_hdr->save_and_restore_offset));
  104. DRM_DEBUG("clear_state_descriptor_offset: %u\n",
  105. le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
  106. DRM_DEBUG("avail_scratch_ram_locations: %u\n",
  107. le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
  108. DRM_DEBUG("master_pkt_description_offset: %u\n",
  109. le32_to_cpu(rlc_hdr->master_pkt_description_offset));
  110. } else if (version_major == 2) {
  111. const struct rlc_firmware_header_v2_0 *rlc_hdr =
  112. container_of(hdr, struct rlc_firmware_header_v2_0, header);
  113. DRM_DEBUG("ucode_feature_version: %u\n",
  114. le32_to_cpu(rlc_hdr->ucode_feature_version));
  115. DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
  116. DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
  117. DRM_DEBUG("save_and_restore_offset: %u\n",
  118. le32_to_cpu(rlc_hdr->save_and_restore_offset));
  119. DRM_DEBUG("clear_state_descriptor_offset: %u\n",
  120. le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
  121. DRM_DEBUG("avail_scratch_ram_locations: %u\n",
  122. le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
  123. DRM_DEBUG("reg_restore_list_size: %u\n",
  124. le32_to_cpu(rlc_hdr->reg_restore_list_size));
  125. DRM_DEBUG("reg_list_format_start: %u\n",
  126. le32_to_cpu(rlc_hdr->reg_list_format_start));
  127. DRM_DEBUG("reg_list_format_separate_start: %u\n",
  128. le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
  129. DRM_DEBUG("starting_offsets_start: %u\n",
  130. le32_to_cpu(rlc_hdr->starting_offsets_start));
  131. DRM_DEBUG("reg_list_format_size_bytes: %u\n",
  132. le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
  133. DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
  134. le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
  135. DRM_DEBUG("reg_list_size_bytes: %u\n",
  136. le32_to_cpu(rlc_hdr->reg_list_size_bytes));
  137. DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
  138. le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
  139. DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
  140. le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
  141. DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
  142. le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
  143. DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
  144. le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
  145. DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
  146. le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
  147. if (version_minor == 1) {
  148. const struct rlc_firmware_header_v2_1 *v2_1 =
  149. container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
  150. DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
  151. le32_to_cpu(v2_1->reg_list_format_direct_reg_list_length));
  152. DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
  153. le32_to_cpu(v2_1->save_restore_list_cntl_ucode_ver));
  154. DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
  155. le32_to_cpu(v2_1->save_restore_list_cntl_feature_ver));
  156. DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
  157. le32_to_cpu(v2_1->save_restore_list_cntl_size_bytes));
  158. DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
  159. le32_to_cpu(v2_1->save_restore_list_cntl_offset_bytes));
  160. DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
  161. le32_to_cpu(v2_1->save_restore_list_gpm_ucode_ver));
  162. DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
  163. le32_to_cpu(v2_1->save_restore_list_gpm_feature_ver));
  164. DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
  165. le32_to_cpu(v2_1->save_restore_list_gpm_size_bytes));
  166. DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
  167. le32_to_cpu(v2_1->save_restore_list_gpm_offset_bytes));
  168. DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
  169. le32_to_cpu(v2_1->save_restore_list_srm_ucode_ver));
  170. DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
  171. le32_to_cpu(v2_1->save_restore_list_srm_feature_ver));
  172. DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
  173. le32_to_cpu(v2_1->save_restore_list_srm_size_bytes));
  174. DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
  175. le32_to_cpu(v2_1->save_restore_list_srm_offset_bytes));
  176. }
  177. } else {
  178. DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
  179. }
  180. }
  181. void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
  182. {
  183. uint16_t version_major = le16_to_cpu(hdr->header_version_major);
  184. uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
  185. DRM_DEBUG("SDMA\n");
  186. amdgpu_ucode_print_common_hdr(hdr);
  187. if (version_major == 1) {
  188. const struct sdma_firmware_header_v1_0 *sdma_hdr =
  189. container_of(hdr, struct sdma_firmware_header_v1_0, header);
  190. DRM_DEBUG("ucode_feature_version: %u\n",
  191. le32_to_cpu(sdma_hdr->ucode_feature_version));
  192. DRM_DEBUG("ucode_change_version: %u\n",
  193. le32_to_cpu(sdma_hdr->ucode_change_version));
  194. DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset));
  195. DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size));
  196. if (version_minor >= 1) {
  197. const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr =
  198. container_of(sdma_hdr, struct sdma_firmware_header_v1_1, v1_0);
  199. DRM_DEBUG("digest_size: %u\n", le32_to_cpu(sdma_v1_1_hdr->digest_size));
  200. }
  201. } else {
  202. DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
  203. version_major, version_minor);
  204. }
  205. }
  206. void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr)
  207. {
  208. uint16_t version_major = le16_to_cpu(hdr->header_version_major);
  209. uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
  210. DRM_DEBUG("GPU_INFO\n");
  211. amdgpu_ucode_print_common_hdr(hdr);
  212. if (version_major == 1) {
  213. const struct gpu_info_firmware_header_v1_0 *gpu_info_hdr =
  214. container_of(hdr, struct gpu_info_firmware_header_v1_0, header);
  215. DRM_DEBUG("version_major: %u\n",
  216. le16_to_cpu(gpu_info_hdr->version_major));
  217. DRM_DEBUG("version_minor: %u\n",
  218. le16_to_cpu(gpu_info_hdr->version_minor));
  219. } else {
  220. DRM_ERROR("Unknown gpu_info ucode version: %u.%u\n", version_major, version_minor);
  221. }
  222. }
  223. int amdgpu_ucode_validate(const struct firmware *fw)
  224. {
  225. const struct common_firmware_header *hdr =
  226. (const struct common_firmware_header *)fw->data;
  227. if (fw->size == le32_to_cpu(hdr->size_bytes))
  228. return 0;
  229. return -EINVAL;
  230. }
  231. bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
  232. uint16_t hdr_major, uint16_t hdr_minor)
  233. {
  234. if ((hdr->common.header_version_major == hdr_major) &&
  235. (hdr->common.header_version_minor == hdr_minor))
  236. return false;
  237. return true;
  238. }
  239. enum amdgpu_firmware_load_type
  240. amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
  241. {
  242. switch (adev->asic_type) {
  243. #ifdef CONFIG_DRM_AMDGPU_SI
  244. case CHIP_TAHITI:
  245. case CHIP_PITCAIRN:
  246. case CHIP_VERDE:
  247. case CHIP_OLAND:
  248. case CHIP_HAINAN:
  249. return AMDGPU_FW_LOAD_DIRECT;
  250. #endif
  251. #ifdef CONFIG_DRM_AMDGPU_CIK
  252. case CHIP_BONAIRE:
  253. case CHIP_KAVERI:
  254. case CHIP_KABINI:
  255. case CHIP_HAWAII:
  256. case CHIP_MULLINS:
  257. return AMDGPU_FW_LOAD_DIRECT;
  258. #endif
  259. case CHIP_TOPAZ:
  260. case CHIP_TONGA:
  261. case CHIP_FIJI:
  262. case CHIP_CARRIZO:
  263. case CHIP_STONEY:
  264. case CHIP_POLARIS10:
  265. case CHIP_POLARIS11:
  266. case CHIP_POLARIS12:
  267. case CHIP_VEGAM:
  268. if (!load_type)
  269. return AMDGPU_FW_LOAD_DIRECT;
  270. else
  271. return AMDGPU_FW_LOAD_SMU;
  272. case CHIP_VEGA10:
  273. case CHIP_RAVEN:
  274. case CHIP_VEGA12:
  275. if (!load_type)
  276. return AMDGPU_FW_LOAD_DIRECT;
  277. else
  278. return AMDGPU_FW_LOAD_PSP;
  279. case CHIP_VEGA20:
  280. return AMDGPU_FW_LOAD_DIRECT;
  281. default:
  282. DRM_ERROR("Unknown firmware load type\n");
  283. }
  284. return AMDGPU_FW_LOAD_DIRECT;
  285. }
  286. static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
  287. struct amdgpu_firmware_info *ucode,
  288. uint64_t mc_addr, void *kptr)
  289. {
  290. const struct common_firmware_header *header = NULL;
  291. const struct gfx_firmware_header_v1_0 *cp_hdr = NULL;
  292. if (NULL == ucode->fw)
  293. return 0;
  294. ucode->mc_addr = mc_addr;
  295. ucode->kaddr = kptr;
  296. if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE)
  297. return 0;
  298. header = (const struct common_firmware_header *)ucode->fw->data;
  299. cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  300. if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP ||
  301. (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 &&
  302. ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 &&
  303. ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT &&
  304. ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT &&
  305. ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL &&
  306. ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
  307. ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) {
  308. ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
  309. memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
  310. le32_to_cpu(header->ucode_array_offset_bytes)),
  311. ucode->ucode_size);
  312. } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1 ||
  313. ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2) {
  314. ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
  315. le32_to_cpu(cp_hdr->jt_size) * 4;
  316. memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
  317. le32_to_cpu(header->ucode_array_offset_bytes)),
  318. ucode->ucode_size);
  319. } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
  320. ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT) {
  321. ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4;
  322. memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
  323. le32_to_cpu(header->ucode_array_offset_bytes) +
  324. le32_to_cpu(cp_hdr->jt_offset) * 4),
  325. ucode->ucode_size);
  326. } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) {
  327. ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
  328. memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl,
  329. ucode->ucode_size);
  330. } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM) {
  331. ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes;
  332. memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_gpm,
  333. ucode->ucode_size);
  334. } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
  335. ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
  336. memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm,
  337. ucode->ucode_size);
  338. }
  339. return 0;
  340. }
  341. static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
  342. uint64_t mc_addr, void *kptr)
  343. {
  344. const struct gfx_firmware_header_v1_0 *header = NULL;
  345. const struct common_firmware_header *comm_hdr = NULL;
  346. uint8_t* src_addr = NULL;
  347. uint8_t* dst_addr = NULL;
  348. if (NULL == ucode->fw)
  349. return 0;
  350. comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
  351. header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  352. dst_addr = ucode->kaddr +
  353. ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes),
  354. PAGE_SIZE);
  355. src_addr = (uint8_t *)ucode->fw->data +
  356. le32_to_cpu(comm_hdr->ucode_array_offset_bytes) +
  357. (le32_to_cpu(header->jt_offset) * 4);
  358. memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
  359. return 0;
  360. }
  361. int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
  362. {
  363. uint64_t fw_offset = 0;
  364. int i, err;
  365. struct amdgpu_firmware_info *ucode = NULL;
  366. const struct common_firmware_header *header = NULL;
  367. if (!adev->firmware.fw_size) {
  368. dev_warn(adev->dev, "No ip firmware need to load\n");
  369. return 0;
  370. }
  371. if (!adev->in_gpu_reset) {
  372. err = amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
  373. amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
  374. &adev->firmware.fw_buf,
  375. &adev->firmware.fw_buf_mc,
  376. &adev->firmware.fw_buf_ptr);
  377. if (err) {
  378. dev_err(adev->dev, "failed to create kernel buffer for firmware.fw_buf\n");
  379. goto failed;
  380. }
  381. }
  382. memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
  383. /*
  384. * if SMU loaded firmware, it needn't add SMC, UVD, and VCE
  385. * ucode info here
  386. */
  387. if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
  388. if (amdgpu_sriov_vf(adev))
  389. adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 3;
  390. else
  391. adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 4;
  392. } else {
  393. adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
  394. }
  395. for (i = 0; i < adev->firmware.max_ucodes; i++) {
  396. ucode = &adev->firmware.ucode[i];
  397. if (ucode->fw) {
  398. header = (const struct common_firmware_header *)ucode->fw->data;
  399. amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
  400. adev->firmware.fw_buf_ptr + fw_offset);
  401. if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
  402. adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
  403. const struct gfx_firmware_header_v1_0 *cp_hdr;
  404. cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
  405. amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset,
  406. adev->firmware.fw_buf_ptr + fw_offset);
  407. fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
  408. }
  409. fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE);
  410. }
  411. }
  412. return 0;
  413. failed:
  414. if (err)
  415. adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
  416. return err;
  417. }
  418. int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
  419. {
  420. int i;
  421. struct amdgpu_firmware_info *ucode = NULL;
  422. if (!adev->firmware.fw_size)
  423. return 0;
  424. for (i = 0; i < adev->firmware.max_ucodes; i++) {
  425. ucode = &adev->firmware.ucode[i];
  426. if (ucode->fw) {
  427. ucode->mc_addr = 0;
  428. ucode->kaddr = NULL;
  429. }
  430. }
  431. amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
  432. &adev->firmware.fw_buf_mc,
  433. &adev->firmware.fw_buf_ptr);
  434. return 0;
  435. }