qcom_scm-32.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. /* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
  2. * Copyright (C) 2015 Linaro Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  16. * 02110-1301, USA.
  17. */
  18. #include <linux/slab.h>
  19. #include <linux/io.h>
  20. #include <linux/module.h>
  21. #include <linux/mutex.h>
  22. #include <linux/errno.h>
  23. #include <linux/err.h>
  24. #include <linux/qcom_scm.h>
  25. #include <linux/dma-mapping.h>
  26. #include "qcom_scm.h"
  27. #define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
  28. #define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
  29. #define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
  30. #define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
  31. #define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
  32. #define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
  33. #define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
  34. #define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
  35. struct qcom_scm_entry {
  36. int flag;
  37. void *entry;
  38. };
  39. static struct qcom_scm_entry qcom_scm_wb[] = {
  40. { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
  41. { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
  42. { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
  43. { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
  44. };
  45. static DEFINE_MUTEX(qcom_scm_lock);
  46. /**
  47. * struct qcom_scm_command - one SCM command buffer
  48. * @len: total available memory for command and response
  49. * @buf_offset: start of command buffer
  50. * @resp_hdr_offset: start of response buffer
  51. * @id: command to be executed
  52. * @buf: buffer returned from qcom_scm_get_command_buffer()
  53. *
  54. * An SCM command is laid out in memory as follows:
  55. *
  56. * ------------------- <--- struct qcom_scm_command
  57. * | command header |
  58. * ------------------- <--- qcom_scm_get_command_buffer()
  59. * | command buffer |
  60. * ------------------- <--- struct qcom_scm_response and
  61. * | response header | qcom_scm_command_to_response()
  62. * ------------------- <--- qcom_scm_get_response_buffer()
  63. * | response buffer |
  64. * -------------------
  65. *
  66. * There can be arbitrary padding between the headers and buffers so
  67. * you should always use the appropriate qcom_scm_get_*_buffer() routines
  68. * to access the buffers in a safe manner.
  69. */
  70. struct qcom_scm_command {
  71. __le32 len;
  72. __le32 buf_offset;
  73. __le32 resp_hdr_offset;
  74. __le32 id;
  75. __le32 buf[0];
  76. };
  77. /**
  78. * struct qcom_scm_response - one SCM response buffer
  79. * @len: total available memory for response
  80. * @buf_offset: start of response data relative to start of qcom_scm_response
  81. * @is_complete: indicates if the command has finished processing
  82. */
  83. struct qcom_scm_response {
  84. __le32 len;
  85. __le32 buf_offset;
  86. __le32 is_complete;
  87. };
  88. /**
  89. * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
  90. * @cmd: command
  91. *
  92. * Returns a pointer to a response for a command.
  93. */
  94. static inline struct qcom_scm_response *qcom_scm_command_to_response(
  95. const struct qcom_scm_command *cmd)
  96. {
  97. return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
  98. }
  99. /**
  100. * qcom_scm_get_command_buffer() - Get a pointer to a command buffer
  101. * @cmd: command
  102. *
  103. * Returns a pointer to the command buffer of a command.
  104. */
  105. static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
  106. {
  107. return (void *)cmd->buf;
  108. }
  109. /**
  110. * qcom_scm_get_response_buffer() - Get a pointer to a response buffer
  111. * @rsp: response
  112. *
  113. * Returns a pointer to a response buffer of a response.
  114. */
  115. static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
  116. {
  117. return (void *)rsp + le32_to_cpu(rsp->buf_offset);
  118. }
  119. static u32 smc(u32 cmd_addr)
  120. {
  121. int context_id;
  122. register u32 r0 asm("r0") = 1;
  123. register u32 r1 asm("r1") = (u32)&context_id;
  124. register u32 r2 asm("r2") = cmd_addr;
  125. do {
  126. asm volatile(
  127. __asmeq("%0", "r0")
  128. __asmeq("%1", "r0")
  129. __asmeq("%2", "r1")
  130. __asmeq("%3", "r2")
  131. #ifdef REQUIRES_SEC
  132. ".arch_extension sec\n"
  133. #endif
  134. "smc #0 @ switch to secure world\n"
  135. : "=r" (r0)
  136. : "r" (r0), "r" (r1), "r" (r2)
  137. : "r3", "r12");
  138. } while (r0 == QCOM_SCM_INTERRUPTED);
  139. return r0;
  140. }
  141. /**
  142. * qcom_scm_call() - Send an SCM command
  143. * @dev: struct device
  144. * @svc_id: service identifier
  145. * @cmd_id: command identifier
  146. * @cmd_buf: command buffer
  147. * @cmd_len: length of the command buffer
  148. * @resp_buf: response buffer
  149. * @resp_len: length of the response buffer
  150. *
  151. * Sends a command to the SCM and waits for the command to finish processing.
  152. *
  153. * A note on cache maintenance:
  154. * Note that any buffers that are expected to be accessed by the secure world
  155. * must be flushed before invoking qcom_scm_call and invalidated in the cache
  156. * immediately after qcom_scm_call returns. Cache maintenance on the command
  157. * and response buffers is taken care of by qcom_scm_call; however, callers are
  158. * responsible for any other cached buffers passed over to the secure world.
  159. */
  160. static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
  161. const void *cmd_buf, size_t cmd_len, void *resp_buf,
  162. size_t resp_len)
  163. {
  164. int ret;
  165. struct qcom_scm_command *cmd;
  166. struct qcom_scm_response *rsp;
  167. size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
  168. dma_addr_t cmd_phys;
  169. cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
  170. if (!cmd)
  171. return -ENOMEM;
  172. cmd->len = cpu_to_le32(alloc_len);
  173. cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
  174. cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
  175. cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
  176. if (cmd_buf)
  177. memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
  178. rsp = qcom_scm_command_to_response(cmd);
  179. cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
  180. if (dma_mapping_error(dev, cmd_phys)) {
  181. kfree(cmd);
  182. return -ENOMEM;
  183. }
  184. mutex_lock(&qcom_scm_lock);
  185. ret = smc(cmd_phys);
  186. if (ret < 0)
  187. ret = qcom_scm_remap_error(ret);
  188. mutex_unlock(&qcom_scm_lock);
  189. if (ret)
  190. goto out;
  191. do {
  192. dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
  193. sizeof(*rsp), DMA_FROM_DEVICE);
  194. } while (!rsp->is_complete);
  195. if (resp_buf) {
  196. dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
  197. le32_to_cpu(rsp->buf_offset),
  198. resp_len, DMA_FROM_DEVICE);
  199. memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
  200. resp_len);
  201. }
  202. out:
  203. dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
  204. kfree(cmd);
  205. return ret;
  206. }
  207. #define SCM_CLASS_REGISTER (0x2 << 8)
  208. #define SCM_MASK_IRQS BIT(5)
  209. #define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
  210. SCM_CLASS_REGISTER | \
  211. SCM_MASK_IRQS | \
  212. (n & 0xf))
  213. /**
  214. * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument
  215. * @svc_id: service identifier
  216. * @cmd_id: command identifier
  217. * @arg1: first argument
  218. *
  219. * This shall only be used with commands that are guaranteed to be
  220. * uninterruptable, atomic and SMP safe.
  221. */
  222. static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
  223. {
  224. int context_id;
  225. register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
  226. register u32 r1 asm("r1") = (u32)&context_id;
  227. register u32 r2 asm("r2") = arg1;
  228. asm volatile(
  229. __asmeq("%0", "r0")
  230. __asmeq("%1", "r0")
  231. __asmeq("%2", "r1")
  232. __asmeq("%3", "r2")
  233. #ifdef REQUIRES_SEC
  234. ".arch_extension sec\n"
  235. #endif
  236. "smc #0 @ switch to secure world\n"
  237. : "=r" (r0)
  238. : "r" (r0), "r" (r1), "r" (r2)
  239. : "r3", "r12");
  240. return r0;
  241. }
  242. /**
  243. * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments
  244. * @svc_id: service identifier
  245. * @cmd_id: command identifier
  246. * @arg1: first argument
  247. * @arg2: second argument
  248. *
  249. * This shall only be used with commands that are guaranteed to be
  250. * uninterruptable, atomic and SMP safe.
  251. */
  252. static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
  253. {
  254. int context_id;
  255. register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
  256. register u32 r1 asm("r1") = (u32)&context_id;
  257. register u32 r2 asm("r2") = arg1;
  258. register u32 r3 asm("r3") = arg2;
  259. asm volatile(
  260. __asmeq("%0", "r0")
  261. __asmeq("%1", "r0")
  262. __asmeq("%2", "r1")
  263. __asmeq("%3", "r2")
  264. __asmeq("%4", "r3")
  265. #ifdef REQUIRES_SEC
  266. ".arch_extension sec\n"
  267. #endif
  268. "smc #0 @ switch to secure world\n"
  269. : "=r" (r0)
  270. : "r" (r0), "r" (r1), "r" (r2), "r" (r3)
  271. : "r12");
  272. return r0;
  273. }
  274. u32 qcom_scm_get_version(void)
  275. {
  276. int context_id;
  277. static u32 version = -1;
  278. register u32 r0 asm("r0");
  279. register u32 r1 asm("r1");
  280. if (version != -1)
  281. return version;
  282. mutex_lock(&qcom_scm_lock);
  283. r0 = 0x1 << 8;
  284. r1 = (u32)&context_id;
  285. do {
  286. asm volatile(
  287. __asmeq("%0", "r0")
  288. __asmeq("%1", "r1")
  289. __asmeq("%2", "r0")
  290. __asmeq("%3", "r1")
  291. #ifdef REQUIRES_SEC
  292. ".arch_extension sec\n"
  293. #endif
  294. "smc #0 @ switch to secure world\n"
  295. : "=r" (r0), "=r" (r1)
  296. : "r" (r0), "r" (r1)
  297. : "r2", "r3", "r12");
  298. } while (r0 == QCOM_SCM_INTERRUPTED);
  299. version = r1;
  300. mutex_unlock(&qcom_scm_lock);
  301. return version;
  302. }
  303. EXPORT_SYMBOL(qcom_scm_get_version);
  304. /**
  305. * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
  306. * @entry: Entry point function for the cpus
  307. * @cpus: The cpumask of cpus that will use the entry point
  308. *
  309. * Set the cold boot address of the cpus. Any cpu outside the supported
  310. * range would be removed from the cpu present mask.
  311. */
  312. int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
  313. {
  314. int flags = 0;
  315. int cpu;
  316. int scm_cb_flags[] = {
  317. QCOM_SCM_FLAG_COLDBOOT_CPU0,
  318. QCOM_SCM_FLAG_COLDBOOT_CPU1,
  319. QCOM_SCM_FLAG_COLDBOOT_CPU2,
  320. QCOM_SCM_FLAG_COLDBOOT_CPU3,
  321. };
  322. if (!cpus || (cpus && cpumask_empty(cpus)))
  323. return -EINVAL;
  324. for_each_cpu(cpu, cpus) {
  325. if (cpu < ARRAY_SIZE(scm_cb_flags))
  326. flags |= scm_cb_flags[cpu];
  327. else
  328. set_cpu_present(cpu, false);
  329. }
  330. return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
  331. flags, virt_to_phys(entry));
  332. }
  333. /**
  334. * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
  335. * @entry: Entry point function for the cpus
  336. * @cpus: The cpumask of cpus that will use the entry point
  337. *
  338. * Set the Linux entry point for the SCM to transfer control to when coming
  339. * out of a power down. CPU power down may be executed on cpuidle or hotplug.
  340. */
  341. int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
  342. const cpumask_t *cpus)
  343. {
  344. int ret;
  345. int flags = 0;
  346. int cpu;
  347. struct {
  348. __le32 flags;
  349. __le32 addr;
  350. } cmd;
  351. /*
  352. * Reassign only if we are switching from hotplug entry point
  353. * to cpuidle entry point or vice versa.
  354. */
  355. for_each_cpu(cpu, cpus) {
  356. if (entry == qcom_scm_wb[cpu].entry)
  357. continue;
  358. flags |= qcom_scm_wb[cpu].flag;
  359. }
  360. /* No change in entry function */
  361. if (!flags)
  362. return 0;
  363. cmd.addr = cpu_to_le32(virt_to_phys(entry));
  364. cmd.flags = cpu_to_le32(flags);
  365. ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
  366. &cmd, sizeof(cmd), NULL, 0);
  367. if (!ret) {
  368. for_each_cpu(cpu, cpus)
  369. qcom_scm_wb[cpu].entry = entry;
  370. }
  371. return ret;
  372. }
  373. /**
  374. * qcom_scm_cpu_power_down() - Power down the cpu
  375. * @flags - Flags to flush cache
  376. *
  377. * This is an end point to power down cpu. If there was a pending interrupt,
  378. * the control would return from this function, otherwise, the cpu jumps to the
  379. * warm boot entry point set for this cpu upon reset.
  380. */
  381. void __qcom_scm_cpu_power_down(u32 flags)
  382. {
  383. qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC,
  384. flags & QCOM_SCM_FLUSH_FLAG_MASK);
  385. }
  386. int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
  387. {
  388. int ret;
  389. __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
  390. __le32 ret_val = 0;
  391. ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
  392. &svc_cmd, sizeof(svc_cmd), &ret_val,
  393. sizeof(ret_val));
  394. if (ret)
  395. return ret;
  396. return le32_to_cpu(ret_val);
  397. }
  398. int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
  399. u32 req_cnt, u32 *resp)
  400. {
  401. if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
  402. return -ERANGE;
  403. return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
  404. req, req_cnt * sizeof(*req), resp, sizeof(*resp));
  405. }
  406. void __qcom_scm_init(void)
  407. {
  408. }
  409. bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
  410. {
  411. __le32 out;
  412. __le32 in;
  413. int ret;
  414. in = cpu_to_le32(peripheral);
  415. ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
  416. QCOM_SCM_PAS_IS_SUPPORTED_CMD,
  417. &in, sizeof(in),
  418. &out, sizeof(out));
  419. return ret ? false : !!out;
  420. }
  421. int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
  422. dma_addr_t metadata_phys)
  423. {
  424. __le32 scm_ret;
  425. int ret;
  426. struct {
  427. __le32 proc;
  428. __le32 image_addr;
  429. } request;
  430. request.proc = cpu_to_le32(peripheral);
  431. request.image_addr = cpu_to_le32(metadata_phys);
  432. ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
  433. QCOM_SCM_PAS_INIT_IMAGE_CMD,
  434. &request, sizeof(request),
  435. &scm_ret, sizeof(scm_ret));
  436. return ret ? : le32_to_cpu(scm_ret);
  437. }
  438. int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
  439. phys_addr_t addr, phys_addr_t size)
  440. {
  441. __le32 scm_ret;
  442. int ret;
  443. struct {
  444. __le32 proc;
  445. __le32 addr;
  446. __le32 len;
  447. } request;
  448. request.proc = cpu_to_le32(peripheral);
  449. request.addr = cpu_to_le32(addr);
  450. request.len = cpu_to_le32(size);
  451. ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
  452. QCOM_SCM_PAS_MEM_SETUP_CMD,
  453. &request, sizeof(request),
  454. &scm_ret, sizeof(scm_ret));
  455. return ret ? : le32_to_cpu(scm_ret);
  456. }
  457. int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
  458. {
  459. __le32 out;
  460. __le32 in;
  461. int ret;
  462. in = cpu_to_le32(peripheral);
  463. ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
  464. QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
  465. &in, sizeof(in),
  466. &out, sizeof(out));
  467. return ret ? : le32_to_cpu(out);
  468. }
  469. int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
  470. {
  471. __le32 out;
  472. __le32 in;
  473. int ret;
  474. in = cpu_to_le32(peripheral);
  475. ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
  476. QCOM_SCM_PAS_SHUTDOWN_CMD,
  477. &in, sizeof(in),
  478. &out, sizeof(out));
  479. return ret ? : le32_to_cpu(out);
  480. }
  481. int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
  482. {
  483. __le32 out;
  484. __le32 in = cpu_to_le32(reset);
  485. int ret;
  486. ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
  487. &in, sizeof(in),
  488. &out, sizeof(out));
  489. return ret ? : le32_to_cpu(out);
  490. }
  491. int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
  492. {
  493. return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
  494. enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0);
  495. }
  496. int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
  497. {
  498. struct {
  499. __le32 state;
  500. __le32 id;
  501. } req;
  502. __le32 scm_ret = 0;
  503. int ret;
  504. req.state = cpu_to_le32(state);
  505. req.id = cpu_to_le32(id);
  506. ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
  507. &req, sizeof(req), &scm_ret, sizeof(scm_ret));
  508. return ret ? : le32_to_cpu(scm_ret);
  509. }
  510. int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
  511. size_t mem_sz, phys_addr_t src, size_t src_sz,
  512. phys_addr_t dest, size_t dest_sz)
  513. {
  514. return -ENODEV;
  515. }
  516. int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
  517. u32 spare)
  518. {
  519. return -ENODEV;
  520. }
  521. int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
  522. size_t *size)
  523. {
  524. return -ENODEV;
  525. }
  526. int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
  527. u32 spare)
  528. {
  529. return -ENODEV;
  530. }
  531. int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
  532. unsigned int *val)
  533. {
  534. int ret;
  535. ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr);
  536. if (ret >= 0)
  537. *val = ret;
  538. return ret < 0 ? ret : 0;
  539. }
  540. int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
  541. {
  542. return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
  543. addr, val);
  544. }