123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630 |
- /* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
- * Copyright (C) 2015 Linaro Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
- #include <linux/slab.h>
- #include <linux/io.h>
- #include <linux/module.h>
- #include <linux/mutex.h>
- #include <linux/errno.h>
- #include <linux/err.h>
- #include <linux/qcom_scm.h>
- #include <linux/dma-mapping.h>
- #include "qcom_scm.h"
- #define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
- #define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
- #define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
- #define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
- #define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
- #define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
- #define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
- #define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
- struct qcom_scm_entry {
- int flag;
- void *entry;
- };
- static struct qcom_scm_entry qcom_scm_wb[] = {
- { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
- { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
- { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
- { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
- };
- static DEFINE_MUTEX(qcom_scm_lock);
- /**
- * struct qcom_scm_command - one SCM command buffer
- * @len: total available memory for command and response
- * @buf_offset: start of command buffer
- * @resp_hdr_offset: start of response buffer
- * @id: command to be executed
- * @buf: buffer returned from qcom_scm_get_command_buffer()
- *
- * An SCM command is laid out in memory as follows:
- *
- * ------------------- <--- struct qcom_scm_command
- * | command header |
- * ------------------- <--- qcom_scm_get_command_buffer()
- * | command buffer |
- * ------------------- <--- struct qcom_scm_response and
- * | response header | qcom_scm_command_to_response()
- * ------------------- <--- qcom_scm_get_response_buffer()
- * | response buffer |
- * -------------------
- *
- * There can be arbitrary padding between the headers and buffers so
- * you should always use the appropriate qcom_scm_get_*_buffer() routines
- * to access the buffers in a safe manner.
- */
- struct qcom_scm_command {
- __le32 len;
- __le32 buf_offset;
- __le32 resp_hdr_offset;
- __le32 id;
- __le32 buf[0];
- };
- /**
- * struct qcom_scm_response - one SCM response buffer
- * @len: total available memory for response
- * @buf_offset: start of response data relative to start of qcom_scm_response
- * @is_complete: indicates if the command has finished processing
- */
- struct qcom_scm_response {
- __le32 len;
- __le32 buf_offset;
- __le32 is_complete;
- };
- /**
- * qcom_scm_command_to_response() - Get a pointer to a qcom_scm_response
- * @cmd: command
- *
- * Returns a pointer to a response for a command.
- */
- static inline struct qcom_scm_response *qcom_scm_command_to_response(
- const struct qcom_scm_command *cmd)
- {
- return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset);
- }
- /**
- * qcom_scm_get_command_buffer() - Get a pointer to a command buffer
- * @cmd: command
- *
- * Returns a pointer to the command buffer of a command.
- */
- static inline void *qcom_scm_get_command_buffer(const struct qcom_scm_command *cmd)
- {
- return (void *)cmd->buf;
- }
- /**
- * qcom_scm_get_response_buffer() - Get a pointer to a response buffer
- * @rsp: response
- *
- * Returns a pointer to a response buffer of a response.
- */
- static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response *rsp)
- {
- return (void *)rsp + le32_to_cpu(rsp->buf_offset);
- }
- static u32 smc(u32 cmd_addr)
- {
- int context_id;
- register u32 r0 asm("r0") = 1;
- register u32 r1 asm("r1") = (u32)&context_id;
- register u32 r2 asm("r2") = cmd_addr;
- do {
- asm volatile(
- __asmeq("%0", "r0")
- __asmeq("%1", "r0")
- __asmeq("%2", "r1")
- __asmeq("%3", "r2")
- #ifdef REQUIRES_SEC
- ".arch_extension sec\n"
- #endif
- "smc #0 @ switch to secure world\n"
- : "=r" (r0)
- : "r" (r0), "r" (r1), "r" (r2)
- : "r3", "r12");
- } while (r0 == QCOM_SCM_INTERRUPTED);
- return r0;
- }
- /**
- * qcom_scm_call() - Send an SCM command
- * @dev: struct device
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @cmd_buf: command buffer
- * @cmd_len: length of the command buffer
- * @resp_buf: response buffer
- * @resp_len: length of the response buffer
- *
- * Sends a command to the SCM and waits for the command to finish processing.
- *
- * A note on cache maintenance:
- * Note that any buffers that are expected to be accessed by the secure world
- * must be flushed before invoking qcom_scm_call and invalidated in the cache
- * immediately after qcom_scm_call returns. Cache maintenance on the command
- * and response buffers is taken care of by qcom_scm_call; however, callers are
- * responsible for any other cached buffers passed over to the secure world.
- */
- static int qcom_scm_call(struct device *dev, u32 svc_id, u32 cmd_id,
- const void *cmd_buf, size_t cmd_len, void *resp_buf,
- size_t resp_len)
- {
- int ret;
- struct qcom_scm_command *cmd;
- struct qcom_scm_response *rsp;
- size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len;
- dma_addr_t cmd_phys;
- cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
- cmd->len = cpu_to_le32(alloc_len);
- cmd->buf_offset = cpu_to_le32(sizeof(*cmd));
- cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len);
- cmd->id = cpu_to_le32((svc_id << 10) | cmd_id);
- if (cmd_buf)
- memcpy(qcom_scm_get_command_buffer(cmd), cmd_buf, cmd_len);
- rsp = qcom_scm_command_to_response(cmd);
- cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, cmd_phys)) {
- kfree(cmd);
- return -ENOMEM;
- }
- mutex_lock(&qcom_scm_lock);
- ret = smc(cmd_phys);
- if (ret < 0)
- ret = qcom_scm_remap_error(ret);
- mutex_unlock(&qcom_scm_lock);
- if (ret)
- goto out;
- do {
- dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len,
- sizeof(*rsp), DMA_FROM_DEVICE);
- } while (!rsp->is_complete);
- if (resp_buf) {
- dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len +
- le32_to_cpu(rsp->buf_offset),
- resp_len, DMA_FROM_DEVICE);
- memcpy(resp_buf, qcom_scm_get_response_buffer(rsp),
- resp_len);
- }
- out:
- dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE);
- kfree(cmd);
- return ret;
- }
- #define SCM_CLASS_REGISTER (0x2 << 8)
- #define SCM_MASK_IRQS BIT(5)
- #define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
- SCM_CLASS_REGISTER | \
- SCM_MASK_IRQS | \
- (n & 0xf))
- /**
- * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @arg1: first argument
- *
- * This shall only be used with commands that are guaranteed to be
- * uninterruptable, atomic and SMP safe.
- */
- static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
- {
- int context_id;
- register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
- register u32 r1 asm("r1") = (u32)&context_id;
- register u32 r2 asm("r2") = arg1;
- asm volatile(
- __asmeq("%0", "r0")
- __asmeq("%1", "r0")
- __asmeq("%2", "r1")
- __asmeq("%3", "r2")
- #ifdef REQUIRES_SEC
- ".arch_extension sec\n"
- #endif
- "smc #0 @ switch to secure world\n"
- : "=r" (r0)
- : "r" (r0), "r" (r1), "r" (r2)
- : "r3", "r12");
- return r0;
- }
- /**
- * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments
- * @svc_id: service identifier
- * @cmd_id: command identifier
- * @arg1: first argument
- * @arg2: second argument
- *
- * This shall only be used with commands that are guaranteed to be
- * uninterruptable, atomic and SMP safe.
- */
- static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
- {
- int context_id;
- register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
- register u32 r1 asm("r1") = (u32)&context_id;
- register u32 r2 asm("r2") = arg1;
- register u32 r3 asm("r3") = arg2;
- asm volatile(
- __asmeq("%0", "r0")
- __asmeq("%1", "r0")
- __asmeq("%2", "r1")
- __asmeq("%3", "r2")
- __asmeq("%4", "r3")
- #ifdef REQUIRES_SEC
- ".arch_extension sec\n"
- #endif
- "smc #0 @ switch to secure world\n"
- : "=r" (r0)
- : "r" (r0), "r" (r1), "r" (r2), "r" (r3)
- : "r12");
- return r0;
- }
- u32 qcom_scm_get_version(void)
- {
- int context_id;
- static u32 version = -1;
- register u32 r0 asm("r0");
- register u32 r1 asm("r1");
- if (version != -1)
- return version;
- mutex_lock(&qcom_scm_lock);
- r0 = 0x1 << 8;
- r1 = (u32)&context_id;
- do {
- asm volatile(
- __asmeq("%0", "r0")
- __asmeq("%1", "r1")
- __asmeq("%2", "r0")
- __asmeq("%3", "r1")
- #ifdef REQUIRES_SEC
- ".arch_extension sec\n"
- #endif
- "smc #0 @ switch to secure world\n"
- : "=r" (r0), "=r" (r1)
- : "r" (r0), "r" (r1)
- : "r2", "r3", "r12");
- } while (r0 == QCOM_SCM_INTERRUPTED);
- version = r1;
- mutex_unlock(&qcom_scm_lock);
- return version;
- }
- EXPORT_SYMBOL(qcom_scm_get_version);
- /**
- * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the cold boot address of the cpus. Any cpu outside the supported
- * range would be removed from the cpu present mask.
- */
- int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
- {
- int flags = 0;
- int cpu;
- int scm_cb_flags[] = {
- QCOM_SCM_FLAG_COLDBOOT_CPU0,
- QCOM_SCM_FLAG_COLDBOOT_CPU1,
- QCOM_SCM_FLAG_COLDBOOT_CPU2,
- QCOM_SCM_FLAG_COLDBOOT_CPU3,
- };
- if (!cpus || (cpus && cpumask_empty(cpus)))
- return -EINVAL;
- for_each_cpu(cpu, cpus) {
- if (cpu < ARRAY_SIZE(scm_cb_flags))
- flags |= scm_cb_flags[cpu];
- else
- set_cpu_present(cpu, false);
- }
- return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
- flags, virt_to_phys(entry));
- }
- /**
- * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the Linux entry point for the SCM to transfer control to when coming
- * out of a power down. CPU power down may be executed on cpuidle or hotplug.
- */
- int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry,
- const cpumask_t *cpus)
- {
- int ret;
- int flags = 0;
- int cpu;
- struct {
- __le32 flags;
- __le32 addr;
- } cmd;
- /*
- * Reassign only if we are switching from hotplug entry point
- * to cpuidle entry point or vice versa.
- */
- for_each_cpu(cpu, cpus) {
- if (entry == qcom_scm_wb[cpu].entry)
- continue;
- flags |= qcom_scm_wb[cpu].flag;
- }
- /* No change in entry function */
- if (!flags)
- return 0;
- cmd.addr = cpu_to_le32(virt_to_phys(entry));
- cmd.flags = cpu_to_le32(flags);
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR,
- &cmd, sizeof(cmd), NULL, 0);
- if (!ret) {
- for_each_cpu(cpu, cpus)
- qcom_scm_wb[cpu].entry = entry;
- }
- return ret;
- }
- /**
- * qcom_scm_cpu_power_down() - Power down the cpu
- * @flags - Flags to flush cache
- *
- * This is an end point to power down cpu. If there was a pending interrupt,
- * the control would return from this function, otherwise, the cpu jumps to the
- * warm boot entry point set for this cpu upon reset.
- */
- void __qcom_scm_cpu_power_down(u32 flags)
- {
- qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC,
- flags & QCOM_SCM_FLUSH_FLAG_MASK);
- }
- int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id)
- {
- int ret;
- __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id);
- __le32 ret_val = 0;
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD,
- &svc_cmd, sizeof(svc_cmd), &ret_val,
- sizeof(ret_val));
- if (ret)
- return ret;
- return le32_to_cpu(ret_val);
- }
- int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req,
- u32 req_cnt, u32 *resp)
- {
- if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
- return -ERANGE;
- return qcom_scm_call(dev, QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
- req, req_cnt * sizeof(*req), resp, sizeof(*resp));
- }
- void __qcom_scm_init(void)
- {
- }
- bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral)
- {
- __le32 out;
- __le32 in;
- int ret;
- in = cpu_to_le32(peripheral);
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PAS_IS_SUPPORTED_CMD,
- &in, sizeof(in),
- &out, sizeof(out));
- return ret ? false : !!out;
- }
- int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral,
- dma_addr_t metadata_phys)
- {
- __le32 scm_ret;
- int ret;
- struct {
- __le32 proc;
- __le32 image_addr;
- } request;
- request.proc = cpu_to_le32(peripheral);
- request.image_addr = cpu_to_le32(metadata_phys);
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PAS_INIT_IMAGE_CMD,
- &request, sizeof(request),
- &scm_ret, sizeof(scm_ret));
- return ret ? : le32_to_cpu(scm_ret);
- }
- int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral,
- phys_addr_t addr, phys_addr_t size)
- {
- __le32 scm_ret;
- int ret;
- struct {
- __le32 proc;
- __le32 addr;
- __le32 len;
- } request;
- request.proc = cpu_to_le32(peripheral);
- request.addr = cpu_to_le32(addr);
- request.len = cpu_to_le32(size);
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PAS_MEM_SETUP_CMD,
- &request, sizeof(request),
- &scm_ret, sizeof(scm_ret));
- return ret ? : le32_to_cpu(scm_ret);
- }
- int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral)
- {
- __le32 out;
- __le32 in;
- int ret;
- in = cpu_to_le32(peripheral);
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
- &in, sizeof(in),
- &out, sizeof(out));
- return ret ? : le32_to_cpu(out);
- }
- int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral)
- {
- __le32 out;
- __le32 in;
- int ret;
- in = cpu_to_le32(peripheral);
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL,
- QCOM_SCM_PAS_SHUTDOWN_CMD,
- &in, sizeof(in),
- &out, sizeof(out));
- return ret ? : le32_to_cpu(out);
- }
- int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
- {
- __le32 out;
- __le32 in = cpu_to_le32(reset);
- int ret;
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MSS_RESET,
- &in, sizeof(in),
- &out, sizeof(out));
- return ret ? : le32_to_cpu(out);
- }
- int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
- {
- return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_DLOAD_MODE,
- enable ? QCOM_SCM_SET_DLOAD_MODE : 0, 0);
- }
- int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id)
- {
- struct {
- __le32 state;
- __le32 id;
- } req;
- __le32 scm_ret = 0;
- int ret;
- req.state = cpu_to_le32(state);
- req.id = cpu_to_le32(id);
- ret = qcom_scm_call(dev, QCOM_SCM_SVC_BOOT, QCOM_SCM_SET_REMOTE_STATE,
- &req, sizeof(req), &scm_ret, sizeof(scm_ret));
- return ret ? : le32_to_cpu(scm_ret);
- }
- int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
- size_t mem_sz, phys_addr_t src, size_t src_sz,
- phys_addr_t dest, size_t dest_sz)
- {
- return -ENODEV;
- }
- int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id,
- u32 spare)
- {
- return -ENODEV;
- }
- int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare,
- size_t *size)
- {
- return -ENODEV;
- }
- int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size,
- u32 spare)
- {
- return -ENODEV;
- }
- int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr,
- unsigned int *val)
- {
- int ret;
- ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr);
- if (ret >= 0)
- *val = ret;
- return ret < 0 ? ret : 0;
- }
- int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val)
- {
- return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE,
- addr, val);
- }
|