diff options
Diffstat (limited to 'drivers/tee')
28 files changed, 5581 insertions, 124 deletions
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig index 61b507c18780..98c3ad083940 100644 --- a/drivers/tee/Kconfig +++ b/drivers/tee/Kconfig @@ -3,8 +3,7 @@ menuconfig TEE tristate "Trusted Execution Environment support" depends on HAVE_ARM_SMCCC || COMPILE_TEST || CPU_SUP_AMD - select CRYPTO - select CRYPTO_SHA1 + select CRYPTO_LIB_SHA1 select DMA_SHARED_BUFFER select GENERIC_ALLOCATOR help @@ -13,8 +12,14 @@ menuconfig TEE if TEE +config TEE_DMABUF_HEAPS + bool + depends on HAS_DMA && DMABUF_HEAPS + default y + source "drivers/tee/optee/Kconfig" source "drivers/tee/amdtee/Kconfig" source "drivers/tee/tstee/Kconfig" +source "drivers/tee/qcomtee/Kconfig" endif diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile index 5488cba30bd2..3239b91dee96 100644 --- a/drivers/tee/Makefile +++ b/drivers/tee/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_TEE) += tee.o tee-objs += tee_core.o +tee-objs += tee_heap.o tee-objs += tee_shm.o tee-objs += tee_shm_pool.o obj-$(CONFIG_OPTEE) += optee/ obj-$(CONFIG_AMDTEE) += amdtee/ obj-$(CONFIG_ARM_TSTEE) += tstee/ +obj-$(CONFIG_QCOMTEE) += qcomtee/ diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index 7bb7990d0b07..50d2051f7f20 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -25,3 +25,8 @@ config OPTEE_INSECURE_LOAD_IMAGE Additional documentation on kernel security risks are at Documentation/tee/op-tee.rst. + +config OPTEE_STATIC_PROTMEM_POOL + bool + depends on HAS_IOMEM && TEE_DMABUF_HEAPS + default y diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile index a6eff388d300..ad7049c1c107 100644 --- a/drivers/tee/optee/Makefile +++ b/drivers/tee/optee/Makefile @@ -4,6 +4,7 @@ optee-objs += core.o optee-objs += call.o optee-objs += notif.o optee-objs += rpc.o +optee-objs += protmem.o optee-objs += supp.o optee-objs += device.o optee-objs += smc_abi.o diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index c75fddc83576..5b62139714ce 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -56,6 +56,13 @@ int optee_rpmb_intf_rdev(struct notifier_block *intf, unsigned long action, return 0; } +int optee_set_dma_mask(struct optee *optee, u_int pa_width) +{ + u64 mask = DMA_BIT_MASK(min(64, pa_width)); + + return dma_coerce_mask_and_coherent(&optee->teedev->dev, mask); +} + static void optee_bus_scan(struct work_struct *work) { WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP)); @@ -72,7 +79,7 @@ static ssize_t rpmb_routing_model_show(struct device *dev, else s = "user"; - return scnprintf(buf, PAGE_SIZE, "%s\n", s); + return sysfs_emit(buf, "%s\n", s); } static DEVICE_ATTR_RO(rpmb_routing_model); diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index a963eed70c1d..bf8390789ecf 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -649,6 +649,124 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx, return optee_ffa_yielding_call(ctx, &data, rpc_arg, system_thread); } +static int do_call_lend_protmem(struct optee *optee, u64 cookie, u32 use_case) +{ + struct optee_shm_arg_entry *entry; + struct optee_msg_arg *msg_arg; + struct tee_shm *shm; + u_int offs; + int rc; + + msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs); + if (IS_ERR(msg_arg)) + return PTR_ERR(msg_arg); + + msg_arg->cmd = OPTEE_MSG_CMD_ASSIGN_PROTMEM; + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; + msg_arg->params[0].u.value.a = cookie; + msg_arg->params[0].u.value.b = use_case; + + rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false); + if (rc) + goto out; + if (msg_arg->ret != TEEC_SUCCESS) { + rc = -EINVAL; + goto out; + } + +out: + optee_free_msg_arg(optee->ctx, entry, offs); + return rc; +} + +static int optee_ffa_lend_protmem(struct optee *optee, struct tee_shm *protmem, + u32 *mem_attrs, unsigned int ma_count, + u32 use_case) +{ + struct ffa_device *ffa_dev = optee->ffa.ffa_dev; + const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops; + const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops; + struct ffa_send_direct_data data; + struct ffa_mem_region_attributes *mem_attr; + struct ffa_mem_ops_args args = { + .use_txbuf = true, + .tag = use_case, + }; + struct page *page; + struct scatterlist sgl; + unsigned int n; + int rc; + + mem_attr = kcalloc(ma_count, sizeof(*mem_attr), GFP_KERNEL); + for (n = 0; n < ma_count; n++) { + mem_attr[n].receiver = mem_attrs[n] & U16_MAX; + mem_attr[n].attrs = mem_attrs[n] >> 16; + } + args.attrs = mem_attr; + args.nattrs = ma_count; + + page = phys_to_page(protmem->paddr); + sg_init_table(&sgl, 1); + sg_set_page(&sgl, page, protmem->size, 0); + + args.sg = &sgl; + rc = mem_ops->memory_lend(&args); + kfree(mem_attr); + if (rc) + return rc; + + rc = do_call_lend_protmem(optee, args.g_handle, use_case); + if (rc) + goto err_reclaim; + + rc = optee_shm_add_ffa_handle(optee, protmem, args.g_handle); + if (rc) + goto err_unreg; + + protmem->sec_world_id = args.g_handle; + + return 0; + +err_unreg: + data = (struct ffa_send_direct_data){ + .data0 = OPTEE_FFA_RELEASE_PROTMEM, + .data1 = (u32)args.g_handle, + .data2 = (u32)(args.g_handle >> 32), + }; + msg_ops->sync_send_receive(ffa_dev, &data); +err_reclaim: + mem_ops->memory_reclaim(args.g_handle, 0); + return rc; +} + +static int optee_ffa_reclaim_protmem(struct optee *optee, + struct tee_shm *protmem) +{ + struct ffa_device *ffa_dev = optee->ffa.ffa_dev; + const struct ffa_msg_ops *msg_ops = ffa_dev->ops->msg_ops; + const struct ffa_mem_ops *mem_ops = ffa_dev->ops->mem_ops; + u64 global_handle = protmem->sec_world_id; + struct ffa_send_direct_data data = { + .data0 = OPTEE_FFA_RELEASE_PROTMEM, + .data1 = (u32)global_handle, + .data2 = (u32)(global_handle >> 32) + }; + int rc; + + optee_shm_rem_ffa_handle(optee, global_handle); + protmem->sec_world_id = 0; + + rc = msg_ops->sync_send_receive(ffa_dev, &data); + if (rc) + pr_err("Release SHM id 0x%llx rc %d\n", global_handle, rc); + + rc = mem_ops->memory_reclaim(global_handle, 0); + if (rc) + pr_err("mem_reclaim: 0x%llx %d", global_handle, rc); + + return rc; +} + /* * 6. Driver initialization * @@ -819,6 +937,8 @@ static const struct optee_ops optee_ffa_ops = { .do_call_with_arg = optee_ffa_do_call_with_arg, .to_msg_param = optee_ffa_to_msg_param, .from_msg_param = optee_ffa_from_msg_param, + .lend_protmem = optee_ffa_lend_protmem, + .reclaim_protmem = optee_ffa_reclaim_protmem, }; static void optee_ffa_remove(struct ffa_device *ffa_dev) @@ -891,6 +1011,25 @@ err: return rc; } +static int optee_ffa_protmem_pool_init(struct optee *optee, u32 sec_caps) +{ + enum tee_dma_heap_id id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY; + struct tee_protmem_pool *pool; + int rc = 0; + + if (sec_caps & OPTEE_FFA_SEC_CAP_PROTMEM) { + pool = optee_protmem_alloc_dyn_pool(optee, id); + if (IS_ERR(pool)) + return PTR_ERR(pool); + + rc = tee_device_register_dma_heap(optee->teedev, id, pool); + if (rc) + pool->ops->destroy_pool(pool); + } + + return rc; +} + static int optee_ffa_probe(struct ffa_device *ffa_dev) { const struct ffa_notifier_ops *notif_ops; @@ -941,7 +1080,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) optee); if (IS_ERR(teedev)) { rc = PTR_ERR(teedev); - goto err_free_pool; + goto err_free_shm_pool; } optee->teedev = teedev; @@ -988,6 +1127,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev) rc); } + if (optee_ffa_protmem_pool_init(optee, sec_caps)) + pr_info("Protected memory service not available\n"); + rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES); if (rc) goto err_unregister_devices; @@ -1018,7 +1160,7 @@ err_unreg_supp_teedev: tee_device_unregister(optee->supp_teedev); err_unreg_teedev: tee_device_unregister(optee->teedev); -err_free_pool: +err_free_shm_pool: tee_shm_pool_free(pool); err_free_optee: kfree(optee); diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h index 257735ae5b56..cc257e7956a3 100644 --- a/drivers/tee/optee/optee_ffa.h +++ b/drivers/tee/optee/optee_ffa.h @@ -81,7 +81,7 @@ * as the second MSG arg struct for * OPTEE_FFA_YIELDING_CALL_WITH_ARG. * Bit[31:8]: Reserved (MBZ) - * w5: Bitfield of secure world capabilities OPTEE_FFA_SEC_CAP_* below, + * w5: Bitfield of OP-TEE capabilities OPTEE_FFA_SEC_CAP_* * w6: The maximum secure world notification number * w7: Not used (MBZ) */ @@ -94,6 +94,8 @@ #define OPTEE_FFA_SEC_CAP_ASYNC_NOTIF BIT(1) /* OP-TEE supports probing for RPMB device if needed */ #define OPTEE_FFA_SEC_CAP_RPMB_PROBE BIT(2) +/* OP-TEE supports Protected Memory for secure data path */ +#define OPTEE_FFA_SEC_CAP_PROTMEM BIT(3) #define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2) @@ -108,7 +110,7 @@ * * Return register usage: * w3: Error code, 0 on success - * w4-w7: Note used (MBZ) + * w4-w7: Not used (MBZ) */ #define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3) @@ -119,16 +121,31 @@ * Call register usage: * w3: Service ID, OPTEE_FFA_ENABLE_ASYNC_NOTIF * w4: Notification value to request bottom half processing, should be - * less than OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE. + * less than OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE * w5-w7: Not used (MBZ) * * Return register usage: * w3: Error code, 0 on success - * w4-w7: Note used (MBZ) + * w4-w7: Not used (MBZ) */ #define OPTEE_FFA_ENABLE_ASYNC_NOTIF OPTEE_FFA_BLOCKING_CALL(5) -#define OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 64 +#define OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE 64 + +/* + * Release Protected memory + * + * Call register usage: + * w3: Service ID, OPTEE_FFA_RECLAIM_PROTMEM + * w4: Shared memory handle, lower bits + * w5: Shared memory handle, higher bits + * w6-w7: Not used (MBZ) + * + * Return register usage: + * w3: Error code, 0 on success + * w4-w7: Note used (MBZ) + */ +#define OPTEE_FFA_RELEASE_PROTMEM OPTEE_FFA_BLOCKING_CALL(8) /* * Call with struct optee_msg_arg as argument in the supplied shared memory diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h index e8840a82b983..838e1d4a22f0 100644 --- a/drivers/tee/optee/optee_msg.h +++ b/drivers/tee/optee/optee_msg.h @@ -133,13 +133,13 @@ struct optee_msg_param_rmem { }; /** - * struct optee_msg_param_fmem - ffa memory reference parameter + * struct optee_msg_param_fmem - FF-A memory reference parameter * @offs_lower: Lower bits of offset into shared memory reference * @offs_upper: Upper bits of offset into shared memory reference * @internal_offs: Internal offset into the first page of shared memory * reference * @size: Size of the buffer - * @global_id: Global identifier of Shared memory + * @global_id: Global identifier of the shared memory */ struct optee_msg_param_fmem { u32 offs_low; @@ -165,7 +165,7 @@ struct optee_msg_param_value { * @attr: attributes * @tmem: parameter by temporary memory reference * @rmem: parameter by registered memory reference - * @fmem: parameter by ffa registered memory reference + * @fmem: parameter by FF-A registered memory reference * @value: parameter by opaque value * @octets: parameter by octet string * @@ -297,6 +297,18 @@ struct optee_msg_arg { #define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001 /* + * Values used in OPTEE_MSG_CMD_LEND_PROTMEM below + * OPTEE_MSG_PROTMEM_RESERVED Reserved + * OPTEE_MSG_PROTMEM_SECURE_VIDEO_PLAY Secure Video Playback + * OPTEE_MSG_PROTMEM_TRUSTED_UI Trused UI + * OPTEE_MSG_PROTMEM_SECURE_VIDEO_RECORD Secure Video Recording + */ +#define OPTEE_MSG_PROTMEM_RESERVED 0 +#define OPTEE_MSG_PROTMEM_SECURE_VIDEO_PLAY 1 +#define OPTEE_MSG_PROTMEM_TRUSTED_UI 2 +#define OPTEE_MSG_PROTMEM_SECURE_VIDEO_RECORD 3 + +/* * Do a secure call with struct optee_msg_arg as argument * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd * @@ -337,15 +349,63 @@ struct optee_msg_arg { * OPTEE_MSG_CMD_STOP_ASYNC_NOTIF informs secure world that from now is * normal world unable to process asynchronous notifications. Typically * used when the driver is shut down. + * + * OPTEE_MSG_CMD_LEND_PROTMEM lends protected memory. The passed normal + * physical memory is protected from normal world access. The memory + * should be unmapped prior to this call since it becomes inaccessible + * during the request. + * Parameters are passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + * [in] param[0].u.value.a OPTEE_MSG_PROTMEM_* defined above + * [in] param[1].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + * [in] param[1].u.tmem.buf_ptr physical address + * [in] param[1].u.tmem.size size + * [in] param[1].u.tmem.shm_ref holds protected memory reference + * + * OPTEE_MSG_CMD_RECLAIM_PROTMEM reclaims a previously lent protected + * memory reference. The physical memory is accessible by the normal world + * after this function has return and can be mapped again. The information + * is passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + * [in] param[0].u.value.a holds protected memory cookie + * + * OPTEE_MSG_CMD_GET_PROTMEM_CONFIG get configuration for a specific + * protected memory use case. Parameters are passed as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INOUT + * [in] param[0].value.a OPTEE_MSG_PROTMEM_* + * [in] param[1].attr OPTEE_MSG_ATTR_TYPE_{R,F}MEM_OUTPUT + * [in] param[1].u.{r,f}mem Buffer or NULL + * [in] param[1].u.{r,f}mem.size Provided size of buffer or 0 for query + * output for the protected use case: + * [out] param[0].value.a Minimal size of protected memory + * [out] param[0].value.b Required alignment of size and start of + * protected memory + * [out] param[0].value.c PA width, max 64 + * [out] param[1].{r,f}mem.size Size of output data + * [out] param[1].{r,f}mem If non-NULL, contains an array of + * uint32_t memory attributes that must be + * included when lending memory for this + * use case + * + * OPTEE_MSG_CMD_ASSIGN_PROTMEM assigns use-case to protected memory + * previously lent using the FFA_LEND framework ABI. Parameters are passed + * as: + * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + * [in] param[0].u.value.a holds protected memory cookie + * [in] param[0].u.value.b OPTEE_MSG_PROTMEM_* defined above */ -#define OPTEE_MSG_CMD_OPEN_SESSION 0 -#define OPTEE_MSG_CMD_INVOKE_COMMAND 1 -#define OPTEE_MSG_CMD_CLOSE_SESSION 2 -#define OPTEE_MSG_CMD_CANCEL 3 -#define OPTEE_MSG_CMD_REGISTER_SHM 4 -#define OPTEE_MSG_CMD_UNREGISTER_SHM 5 -#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6 -#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7 -#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 +#define OPTEE_MSG_CMD_OPEN_SESSION 0 +#define OPTEE_MSG_CMD_INVOKE_COMMAND 1 +#define OPTEE_MSG_CMD_CLOSE_SESSION 2 +#define OPTEE_MSG_CMD_CANCEL 3 +#define OPTEE_MSG_CMD_REGISTER_SHM 4 +#define OPTEE_MSG_CMD_UNREGISTER_SHM 5 +#define OPTEE_MSG_CMD_DO_BOTTOM_HALF 6 +#define OPTEE_MSG_CMD_STOP_ASYNC_NOTIF 7 +#define OPTEE_MSG_CMD_LEND_PROTMEM 8 +#define OPTEE_MSG_CMD_RECLAIM_PROTMEM 9 +#define OPTEE_MSG_CMD_GET_PROTMEM_CONFIG 10 +#define OPTEE_MSG_CMD_ASSIGN_PROTMEM 11 +#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004 #endif /* _OPTEE_MSG_H */ diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index 9526087f0e68..db9ea673fbca 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -176,9 +176,14 @@ struct optee; * @do_call_with_arg: enters OP-TEE in secure world * @to_msg_param: converts from struct tee_param to OPTEE_MSG parameters * @from_msg_param: converts from OPTEE_MSG parameters to struct tee_param + * @lend_protmem: lends physically contiguous memory as restricted + * memory, inaccessible by the kernel + * @reclaim_protmem: reclaims restricted memory previously lent with + * @lend_protmem() and makes it accessible by the + * kernel again * * These OPs are only supposed to be used internally in the OP-TEE driver - * as a way of abstracting the different methogs of entering OP-TEE in + * as a way of abstracting the different methods of entering OP-TEE in * secure world. */ struct optee_ops { @@ -191,6 +196,10 @@ struct optee_ops { int (*from_msg_param)(struct optee *optee, struct tee_param *params, size_t num_params, const struct optee_msg_param *msg_params); + int (*lend_protmem)(struct optee *optee, struct tee_shm *protmem, + u32 *mem_attr, unsigned int ma_count, + u32 use_case); + int (*reclaim_protmem)(struct optee *optee, struct tee_shm *protmem); }; /** @@ -274,6 +283,8 @@ struct optee_call_ctx { extern struct blocking_notifier_head optee_rpmb_intf_added; +int optee_set_dma_mask(struct optee *optee, u_int pa_width); + int optee_notif_init(struct optee *optee, u_int max_key); void optee_notif_uninit(struct optee *optee); int optee_notif_wait(struct optee *optee, u_int key, u32 timeout); @@ -285,6 +296,8 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params, void optee_supp_init(struct optee_supp *supp); void optee_supp_uninit(struct optee_supp *supp); void optee_supp_release(struct optee_supp *supp); +struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee, + enum tee_dma_heap_id id); int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params, struct tee_param *param); diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h index 879426300821..accf76a99288 100644 --- a/drivers/tee/optee/optee_smc.h +++ b/drivers/tee/optee/optee_smc.h @@ -264,7 +264,6 @@ struct optee_smc_get_shm_config_result { #define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0) /* Secure world can communicate via previously unregistered shared memory */ #define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1) - /* * Secure world supports commands "register/unregister shared memory", * secure world accepts command buffers located in any parts of non-secure RAM @@ -280,6 +279,10 @@ struct optee_smc_get_shm_config_result { #define OPTEE_SMC_SEC_CAP_RPC_ARG BIT(6) /* Secure world supports probing for RPMB device if needed */ #define OPTEE_SMC_SEC_CAP_RPMB_PROBE BIT(7) +/* Secure world supports protected memory */ +#define OPTEE_SMC_SEC_CAP_PROTMEM BIT(8) +/* Secure world supports dynamic protected memory */ +#define OPTEE_SMC_SEC_CAP_DYNAMIC_PROTMEM BIT(9) #define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 #define OPTEE_SMC_EXCHANGE_CAPABILITIES \ @@ -451,6 +454,38 @@ struct optee_smc_disable_shm_cache_result { /* See OPTEE_SMC_CALL_WITH_REGD_ARG above */ #define OPTEE_SMC_FUNCID_CALL_WITH_REGD_ARG 19 +/* + * Get protected memory config + * + * Returns the protected memory config. + * + * Call register usage: + * a0 SMC Function ID, OPTEE_SMC_GET_PROTMEM_CONFIG + * a2-6 Not used, must be zero + * a7 Hypervisor Client ID register + * + * Have config return register usage: + * a0 OPTEE_SMC_RETURN_OK + * a1 Physical address of start of protected memory + * a2 Size of protected memory + * a3 PA width, max 64 + * a4-7 Preserved + * + * Not available register usage: + * a0 OPTEE_SMC_RETURN_ENOTAVAIL + * a1-3 Not used + * a4-7 Preserved + */ +#define OPTEE_SMC_FUNCID_GET_PROTMEM_CONFIG 20 +#define OPTEE_SMC_GET_PROTMEM_CONFIG \ + OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_PROTMEM_CONFIG) + +struct optee_smc_get_protmem_config_result { + unsigned long status; + unsigned long start; + unsigned long size; + unsigned long pa_width; +}; /* * Resume from RPC (for example after processing a foreign interrupt) diff --git a/drivers/tee/optee/protmem.c b/drivers/tee/optee/protmem.c new file mode 100644 index 000000000000..2eba48d5ac73 --- /dev/null +++ b/drivers/tee/optee/protmem.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025, Linaro Limited + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/errno.h> +#include <linux/genalloc.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/tee_core.h> +#include <linux/types.h> +#include "optee_private.h" + +struct optee_protmem_dyn_pool { + struct tee_protmem_pool pool; + struct gen_pool *gen_pool; + struct optee *optee; + size_t page_count; + u32 *mem_attrs; + u_int mem_attr_count; + refcount_t refcount; + u32 use_case; + struct tee_shm *protmem; + /* Protects when initializing and tearing down this struct */ + struct mutex mutex; +}; + +static struct optee_protmem_dyn_pool * +to_protmem_dyn_pool(struct tee_protmem_pool *pool) +{ + return container_of(pool, struct optee_protmem_dyn_pool, pool); +} + +static int init_dyn_protmem(struct optee_protmem_dyn_pool *rp) +{ + int rc; + + rp->protmem = tee_shm_alloc_dma_mem(rp->optee->ctx, rp->page_count); + if (IS_ERR(rp->protmem)) { + rc = PTR_ERR(rp->protmem); + goto err_null_protmem; + } + + /* + * TODO unmap the memory range since the physical memory will + * become inaccesible after the lend_protmem() call. + * + * If the platform supports a hypervisor at EL2, it will unmap the + * intermediate physical memory for us and stop cache pre-fetch of + * the memory. + */ + rc = rp->optee->ops->lend_protmem(rp->optee, rp->protmem, + rp->mem_attrs, + rp->mem_attr_count, rp->use_case); + if (rc) + goto err_put_shm; + rp->protmem->flags |= TEE_SHM_DYNAMIC; + + rp->gen_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!rp->gen_pool) { + rc = -ENOMEM; + goto err_reclaim; + } + + rc = gen_pool_add(rp->gen_pool, rp->protmem->paddr, + rp->protmem->size, -1); + if (rc) + goto err_free_pool; + + refcount_set(&rp->refcount, 1); + return 0; + +err_free_pool: + gen_pool_destroy(rp->gen_pool); + rp->gen_pool = NULL; +err_reclaim: + rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem); +err_put_shm: + tee_shm_put(rp->protmem); +err_null_protmem: + rp->protmem = NULL; + return rc; +} + +static int get_dyn_protmem(struct optee_protmem_dyn_pool *rp) +{ + int rc = 0; + + if (!refcount_inc_not_zero(&rp->refcount)) { + mutex_lock(&rp->mutex); + if (rp->gen_pool) { + /* + * Another thread has already initialized the pool + * before us, or the pool was just about to be torn + * down. Either way we only need to increase the + * refcount and we're done. + */ + refcount_inc(&rp->refcount); + } else { + rc = init_dyn_protmem(rp); + } + mutex_unlock(&rp->mutex); + } + + return rc; +} + +static void release_dyn_protmem(struct optee_protmem_dyn_pool *rp) +{ + gen_pool_destroy(rp->gen_pool); + rp->gen_pool = NULL; + + rp->optee->ops->reclaim_protmem(rp->optee, rp->protmem); + rp->protmem->flags &= ~TEE_SHM_DYNAMIC; + + WARN(refcount_read(&rp->protmem->refcount) != 1, "Unexpected refcount"); + tee_shm_put(rp->protmem); + rp->protmem = NULL; +} + +static void put_dyn_protmem(struct optee_protmem_dyn_pool *rp) +{ + if (refcount_dec_and_test(&rp->refcount)) { + mutex_lock(&rp->mutex); + if (rp->gen_pool) + release_dyn_protmem(rp); + mutex_unlock(&rp->mutex); + } +} + +static int protmem_pool_op_dyn_alloc(struct tee_protmem_pool *pool, + struct sg_table *sgt, size_t size, + size_t *offs) +{ + struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool); + size_t sz = ALIGN(size, PAGE_SIZE); + phys_addr_t pa; + int rc; + + rc = get_dyn_protmem(rp); + if (rc) + return rc; + + pa = gen_pool_alloc(rp->gen_pool, sz); + if (!pa) { + rc = -ENOMEM; + goto err_put; + } + + rc = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (rc) + goto err_free; + + sg_set_page(sgt->sgl, phys_to_page(pa), size, 0); + *offs = pa - rp->protmem->paddr; + + return 0; +err_free: + gen_pool_free(rp->gen_pool, pa, size); +err_put: + put_dyn_protmem(rp); + + return rc; +} + +static void protmem_pool_op_dyn_free(struct tee_protmem_pool *pool, + struct sg_table *sgt) +{ + struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool); + struct scatterlist *sg; + int i; + + for_each_sgtable_sg(sgt, sg, i) + gen_pool_free(rp->gen_pool, sg_phys(sg), sg->length); + sg_free_table(sgt); + put_dyn_protmem(rp); +} + +static int protmem_pool_op_dyn_update_shm(struct tee_protmem_pool *pool, + struct sg_table *sgt, size_t offs, + struct tee_shm *shm, + struct tee_shm **parent_shm) +{ + struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool); + + *parent_shm = rp->protmem; + + return 0; +} + +static void pool_op_dyn_destroy_pool(struct tee_protmem_pool *pool) +{ + struct optee_protmem_dyn_pool *rp = to_protmem_dyn_pool(pool); + + mutex_destroy(&rp->mutex); + kfree(rp); +} + +static struct tee_protmem_pool_ops protmem_pool_ops_dyn = { + .alloc = protmem_pool_op_dyn_alloc, + .free = protmem_pool_op_dyn_free, + .update_shm = protmem_pool_op_dyn_update_shm, + .destroy_pool = pool_op_dyn_destroy_pool, +}; + +static int get_protmem_config(struct optee *optee, u32 use_case, + size_t *min_size, u_int *pa_width, + u32 *mem_attrs, u_int *ma_count) +{ + struct tee_param params[2] = { + [0] = { + .attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT, + .u.value.a = use_case, + }, + [1] = { + .attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT, + }, + }; + struct optee_shm_arg_entry *entry; + struct tee_shm *shm_param = NULL; + struct optee_msg_arg *msg_arg; + struct tee_shm *shm; + u_int offs; + int rc; + + if (mem_attrs && *ma_count) { + params[1].u.memref.size = *ma_count * sizeof(*mem_attrs); + shm_param = tee_shm_alloc_priv_buf(optee->ctx, + params[1].u.memref.size); + if (IS_ERR(shm_param)) + return PTR_ERR(shm_param); + params[1].u.memref.shm = shm_param; + } + + msg_arg = optee_get_msg_arg(optee->ctx, ARRAY_SIZE(params), &entry, + &shm, &offs); + if (IS_ERR(msg_arg)) { + rc = PTR_ERR(msg_arg); + goto out_free_shm; + } + msg_arg->cmd = OPTEE_MSG_CMD_GET_PROTMEM_CONFIG; + + rc = optee->ops->to_msg_param(optee, msg_arg->params, + ARRAY_SIZE(params), params); + if (rc) + goto out_free_msg; + + rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false); + if (rc) + goto out_free_msg; + if (msg_arg->ret && msg_arg->ret != TEEC_ERROR_SHORT_BUFFER) { + rc = -EINVAL; + goto out_free_msg; + } + + rc = optee->ops->from_msg_param(optee, params, ARRAY_SIZE(params), + msg_arg->params); + if (rc) + goto out_free_msg; + + if (!msg_arg->ret && mem_attrs && + *ma_count < params[1].u.memref.size / sizeof(*mem_attrs)) { + rc = -EINVAL; + goto out_free_msg; + } + + *min_size = params[0].u.value.a; + *pa_width = params[0].u.value.c; + *ma_count = params[1].u.memref.size / sizeof(*mem_attrs); + + if (msg_arg->ret == TEEC_ERROR_SHORT_BUFFER) { + rc = -ENOSPC; + goto out_free_msg; + } + + if (mem_attrs) + memcpy(mem_attrs, tee_shm_get_va(shm_param, 0), + params[1].u.memref.size); + +out_free_msg: + optee_free_msg_arg(optee->ctx, entry, offs); +out_free_shm: + if (shm_param) + tee_shm_free(shm_param); + return rc; +} + +struct tee_protmem_pool *optee_protmem_alloc_dyn_pool(struct optee *optee, + enum tee_dma_heap_id id) +{ + struct optee_protmem_dyn_pool *rp; + size_t min_size; + u_int pa_width; + int rc; + + rp = kzalloc(sizeof(*rp), GFP_KERNEL); + if (!rp) + return ERR_PTR(-ENOMEM); + rp->use_case = id; + + rc = get_protmem_config(optee, id, &min_size, &pa_width, NULL, + &rp->mem_attr_count); + if (rc) { + if (rc != -ENOSPC) + goto err; + rp->mem_attrs = kcalloc(rp->mem_attr_count, + sizeof(*rp->mem_attrs), GFP_KERNEL); + if (!rp->mem_attrs) { + rc = -ENOMEM; + goto err; + } + rc = get_protmem_config(optee, id, &min_size, &pa_width, + rp->mem_attrs, &rp->mem_attr_count); + if (rc) + goto err_kfree_eps; + } + + rc = optee_set_dma_mask(optee, pa_width); + if (rc) + goto err_kfree_eps; + + rp->pool.ops = &protmem_pool_ops_dyn; + rp->optee = optee; + rp->page_count = min_size / PAGE_SIZE; + mutex_init(&rp->mutex); + + return &rp->pool; + +err_kfree_eps: + kfree(rp->mem_attrs); +err: + kfree(rp); + return ERR_PTR(rc); +} diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 26f8f7bbbe56..0be663fcd52b 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -965,6 +965,70 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx, return rc; } +static int optee_smc_lend_protmem(struct optee *optee, struct tee_shm *protmem, + u32 *mem_attrs, unsigned int ma_count, + u32 use_case) +{ + struct optee_shm_arg_entry *entry; + struct optee_msg_arg *msg_arg; + struct tee_shm *shm; + u_int offs; + int rc; + + msg_arg = optee_get_msg_arg(optee->ctx, 2, &entry, &shm, &offs); + if (IS_ERR(msg_arg)) + return PTR_ERR(msg_arg); + + msg_arg->cmd = OPTEE_MSG_CMD_LEND_PROTMEM; + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; + msg_arg->params[0].u.value.a = use_case; + msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; + msg_arg->params[1].u.tmem.buf_ptr = protmem->paddr; + msg_arg->params[1].u.tmem.size = protmem->size; + msg_arg->params[1].u.tmem.shm_ref = (u_long)protmem; + + rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false); + if (rc) + goto out; + if (msg_arg->ret != TEEC_SUCCESS) { + rc = -EINVAL; + goto out; + } + protmem->sec_world_id = (u_long)protmem; + +out: + optee_free_msg_arg(optee->ctx, entry, offs); + return rc; +} + +static int optee_smc_reclaim_protmem(struct optee *optee, + struct tee_shm *protmem) +{ + struct optee_shm_arg_entry *entry; + struct optee_msg_arg *msg_arg; + struct tee_shm *shm; + u_int offs; + int rc; + + msg_arg = optee_get_msg_arg(optee->ctx, 1, &entry, &shm, &offs); + if (IS_ERR(msg_arg)) + return PTR_ERR(msg_arg); + + msg_arg->cmd = OPTEE_MSG_CMD_RECLAIM_PROTMEM; + msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT; + msg_arg->params[0].u.rmem.shm_ref = (u_long)protmem; + + rc = optee->ops->do_call_with_arg(optee->ctx, shm, offs, false); + if (rc) + goto out; + if (msg_arg->ret != TEEC_SUCCESS) + rc = -EINVAL; + +out: + optee_free_msg_arg(optee->ctx, entry, offs); + return rc; +} + /* * 5. Asynchronous notification */ @@ -1216,6 +1280,8 @@ static const struct optee_ops optee_ops = { .do_call_with_arg = optee_smc_do_call_with_arg, .to_msg_param = optee_to_msg_param, .from_msg_param = optee_from_msg_param, + .lend_protmem = optee_smc_lend_protmem, + .reclaim_protmem = optee_smc_reclaim_protmem, }; static int enable_async_notif(optee_invoke_fn *invoke_fn) @@ -1583,6 +1649,74 @@ static inline int optee_load_fw(struct platform_device *pdev, } #endif +static struct tee_protmem_pool *static_protmem_pool_init(struct optee *optee) +{ +#if IS_ENABLED(CONFIG_OPTEE_STATIC_PROTMEM_POOL) + union { + struct arm_smccc_res smccc; + struct optee_smc_get_protmem_config_result result; + } res; + struct tee_protmem_pool *pool; + void *p; + int rc; + + optee->smc.invoke_fn(OPTEE_SMC_GET_PROTMEM_CONFIG, 0, 0, 0, 0, + 0, 0, 0, &res.smccc); + if (res.result.status != OPTEE_SMC_RETURN_OK) + return ERR_PTR(-EINVAL); + + rc = optee_set_dma_mask(optee, res.result.pa_width); + if (rc) + return ERR_PTR(rc); + + /* + * Map the memory as uncached to make sure the kernel can work with + * __pfn_to_page() and friends since that's needed when passing the + * protected DMA-buf to a device. The memory should otherwise not + * be touched by the kernel since it's likely to cause an external + * abort due to the protection status. + */ + p = devm_memremap(&optee->teedev->dev, res.result.start, + res.result.size, MEMREMAP_WC); + if (IS_ERR(p)) + return p; + + pool = tee_protmem_static_pool_alloc(res.result.start, res.result.size); + if (IS_ERR(pool)) + devm_memunmap(&optee->teedev->dev, p); + + return pool; +#else + return ERR_PTR(-EINVAL); +#endif +} + +static int optee_protmem_pool_init(struct optee *optee) +{ + bool protm = optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_PROTMEM; + bool dyn_protm = optee->smc.sec_caps & + OPTEE_SMC_SEC_CAP_DYNAMIC_PROTMEM; + enum tee_dma_heap_id heap_id = TEE_DMA_HEAP_SECURE_VIDEO_PLAY; + struct tee_protmem_pool *pool = ERR_PTR(-EINVAL); + int rc = -EINVAL; + + if (!protm && !dyn_protm) + return 0; + + if (protm) + pool = static_protmem_pool_init(optee); + if (dyn_protm && IS_ERR(pool)) + pool = optee_protmem_alloc_dyn_pool(optee, heap_id); + if (IS_ERR(pool)) + return PTR_ERR(pool); + + rc = tee_device_register_dma_heap(optee->teedev, heap_id, pool); + if (rc) + pool->ops->destroy_pool(pool); + + return rc; +} + static int optee_probe(struct platform_device *pdev) { optee_invoke_fn *invoke_fn; @@ -1678,7 +1812,7 @@ static int optee_probe(struct platform_device *pdev) optee = kzalloc(sizeof(*optee), GFP_KERNEL); if (!optee) { rc = -ENOMEM; - goto err_free_pool; + goto err_free_shm_pool; } optee->ops = &optee_ops; @@ -1751,6 +1885,9 @@ static int optee_probe(struct platform_device *pdev) pr_info("Asynchronous notifications enabled\n"); } + if (optee_protmem_pool_init(optee)) + pr_info("Protected memory service not available\n"); + /* * Ensure that there are no pre-existing shm objects before enabling * the shm cache so that there's no chance of receiving an invalid @@ -1802,7 +1939,7 @@ err_unreg_teedev: tee_device_unregister(optee->teedev); err_free_optee: kfree(optee); -err_free_pool: +err_free_shm_pool: tee_shm_pool_free(pool); if (memremaped_shm) memunmap(memremaped_shm); diff --git a/drivers/tee/qcomtee/Kconfig b/drivers/tee/qcomtee/Kconfig new file mode 100644 index 000000000000..927686abceb1 --- /dev/null +++ b/drivers/tee/qcomtee/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Qualcomm Trusted Execution Environment Configuration +config QCOMTEE + tristate "Qualcomm TEE Support" + depends on !CPU_BIG_ENDIAN + select QCOM_SCM + select QCOM_TZMEM_MODE_SHMBRIDGE + help + This option enables the Qualcomm Trusted Execution Environment (QTEE) + driver. It provides an API to access services offered by QTEE and + its loaded Trusted Applications (TAs). Additionally, it facilitates + the export of userspace services provided by supplicants to QTEE. diff --git a/drivers/tee/qcomtee/Makefile b/drivers/tee/qcomtee/Makefile new file mode 100644 index 000000000000..7c466c9f32af --- /dev/null +++ b/drivers/tee/qcomtee/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_QCOMTEE) += qcomtee.o +qcomtee-objs += async.o +qcomtee-objs += call.o +qcomtee-objs += core.o +qcomtee-objs += mem_obj.o +qcomtee-objs += primordial_obj.o +qcomtee-objs += shm.o +qcomtee-objs += user_obj.o diff --git a/drivers/tee/qcomtee/async.c b/drivers/tee/qcomtee/async.c new file mode 100644 index 000000000000..31bff4309e67 --- /dev/null +++ b/drivers/tee/qcomtee/async.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include "qcomtee.h" + +#define QCOMTEE_ASYNC_VERSION_1_0 0x00010000U /* Maj: 0x0001, Min: 0x0000. */ +#define QCOMTEE_ASYNC_VERSION_1_1 0x00010001U /* Maj: 0x0001, Min: 0x0001. */ +#define QCOMTEE_ASYNC_VERSION_1_2 0x00010002U /* Maj: 0x0001, Min: 0x0002. */ +#define QCOMTEE_ASYNC_VERSION_CURRENT QCOMTEE_ASYNC_VERSION_1_2 + +#define QCOMTEE_ASYNC_VERSION_MAJOR(n) upper_16_bits(n) +#define QCOMTEE_ASYNC_VERSION_MINOR(n) lower_16_bits(n) + +#define QCOMTEE_ASYNC_VERSION_CURRENT_MAJOR \ + QCOMTEE_ASYNC_VERSION_MAJOR(QCOMTEE_ASYNC_VERSION_CURRENT) +#define QCOMTEE_ASYNC_VERSION_CURRENT_MINOR \ + QCOMTEE_ASYNC_VERSION_MINOR(QCOMTEE_ASYNC_VERSION_CURRENT) + +/** + * struct qcomtee_async_msg_hdr - Asynchronous message header format. + * @version: current async protocol version of the remote endpoint. + * @op: async operation. + * + * @version specifies the endpoint's (QTEE or driver) supported async protocol. + * For example, if QTEE sets @version to %QCOMTEE_ASYNC_VERSION_1_1, QTEE + * handles operations supported in %QCOMTEE_ASYNC_VERSION_1_1 or + * %QCOMTEE_ASYNC_VERSION_1_0. @op determines the message format. + */ +struct qcomtee_async_msg_hdr { + u32 version; + u32 op; +}; + +/* Size of an empty async message. */ +#define QCOMTEE_ASYNC_MSG_ZERO sizeof(struct qcomtee_async_msg_hdr) + +/** + * struct qcomtee_async_release_msg - Release asynchronous message. + * @hdr: message header as &struct qcomtee_async_msg_hdr. + * @counts: number of objects in @object_ids. + * @object_ids: array of object IDs that should be released. + * + * Available in Maj = 0x0001, Min >= 0x0000. + */ +struct qcomtee_async_release_msg { + struct qcomtee_async_msg_hdr hdr; + u32 counts; + u32 object_ids[] __counted_by(counts); +}; + +/** + * qcomtee_get_async_buffer() - Get the start of the asynchronous message. + * @oic: context used for the current invocation. + * @async_buffer: return buffer to extract from or fill in async messages. + * + * If @oic is used for direct object invocation, the whole outbound buffer + * is available for the async message. If @oic is used for a callback request, + * the tail of the outbound buffer (after the callback request message) is + * available for the async message. + * + * The start of the async buffer is aligned, see qcomtee_msg_offset_align(). + */ +static void qcomtee_get_async_buffer(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_buffer *async_buffer) +{ + struct qcomtee_msg_callback *msg; + unsigned int offset; + int i; + + if (!(oic->flags & QCOMTEE_OIC_FLAG_BUSY)) { + /* The outbound buffer is empty. Using the whole buffer. */ + offset = 0; + } else { + msg = (struct qcomtee_msg_callback *)oic->out_msg.addr; + + /* Start offset in a message for buffer arguments. */ + offset = qcomtee_msg_buffer_args(struct qcomtee_msg_callback, + qcomtee_msg_args(msg)); + + /* Add size of IB arguments. */ + qcomtee_msg_for_each_input_buffer(i, msg) + offset += qcomtee_msg_offset_align(msg->args[i].b.size); + + /* Add size of OB arguments. */ + qcomtee_msg_for_each_output_buffer(i, msg) + offset += qcomtee_msg_offset_align(msg->args[i].b.size); + } + + async_buffer->addr = oic->out_msg.addr + offset; + async_buffer->size = oic->out_msg.size - offset; +} + +/** + * async_release() - Process QTEE async release requests. + * @oic: context used for the current invocation. + * @msg: async message for object release. + * @size: size of the async buffer available. + * + * Return: Size of the outbound buffer used when processing @msg. + */ +static size_t async_release(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_async_msg_hdr *async_msg, + size_t size) +{ + struct qcomtee_async_release_msg *msg; + struct qcomtee_object *object; + int i; + + msg = (struct qcomtee_async_release_msg *)async_msg; + + for (i = 0; i < msg->counts; i++) { + object = qcomtee_idx_erase(oic, msg->object_ids[i]); + qcomtee_object_put(object); + } + + return struct_size(msg, object_ids, msg->counts); +} + +/** + * qcomtee_fetch_async_reqs() - Fetch and process asynchronous messages. + * @oic: context used for the current invocation. + * + * Calls handlers to process the requested operations in the async message. + * Currently, only supports async release requests. + */ +void qcomtee_fetch_async_reqs(struct qcomtee_object_invoke_ctx *oic) +{ + struct qcomtee_async_msg_hdr *async_msg; + struct qcomtee_buffer async_buffer; + size_t consumed, used = 0; + u16 major_ver; + + qcomtee_get_async_buffer(oic, &async_buffer); + + while (async_buffer.size - used > QCOMTEE_ASYNC_MSG_ZERO) { + async_msg = (struct qcomtee_async_msg_hdr *)(async_buffer.addr + + used); + /* + * QTEE assumes that the unused space of the async buffer is + * zeroed; so if version is zero, the buffer is unused. + */ + if (async_msg->version == 0) + goto out; + + major_ver = QCOMTEE_ASYNC_VERSION_MAJOR(async_msg->version); + /* Major version mismatch is a compatibility break. */ + if (major_ver != QCOMTEE_ASYNC_VERSION_CURRENT_MAJOR) { + pr_err("Async message version mismatch (%u != %u)\n", + major_ver, QCOMTEE_ASYNC_VERSION_CURRENT_MAJOR); + + goto out; + } + + switch (async_msg->op) { + case QCOMTEE_MSG_OBJECT_OP_RELEASE: + consumed = async_release(oic, async_msg, + async_buffer.size - used); + break; + default: + pr_err("Unsupported async message %u\n", async_msg->op); + goto out; + } + + /* Supported operation but unable to parse the message. */ + if (!consumed) { + pr_err("Unable to parse async message for op %u\n", + async_msg->op); + goto out; + } + + /* Next async message. */ + used += qcomtee_msg_offset_align(consumed); + } + +out: + /* Reset the async buffer so async requests do not loop to QTEE. */ + memzero_explicit(async_buffer.addr, async_buffer.size); +} diff --git a/drivers/tee/qcomtee/call.c b/drivers/tee/qcomtee/call.c new file mode 100644 index 000000000000..cc17a48d0ab7 --- /dev/null +++ b/drivers/tee/qcomtee/call.c @@ -0,0 +1,820 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/slab.h> +#include <linux/tee.h> +#include <linux/platform_device.h> +#include <linux/xarray.h> + +#include "qcomtee.h" + +static int find_qtee_object(struct qcomtee_object **object, unsigned long id, + struct qcomtee_context_data *ctxdata) +{ + int err = 0; + + guard(rcu)(); + /* Object release is RCU protected. */ + *object = idr_find(&ctxdata->qtee_objects_idr, id); + if (!qcomtee_object_get(*object)) + err = -EINVAL; + + return err; +} + +static void del_qtee_object(unsigned long id, + struct qcomtee_context_data *ctxdata) +{ + struct qcomtee_object *object; + + scoped_guard(mutex, &ctxdata->qtee_lock) + object = idr_remove(&ctxdata->qtee_objects_idr, id); + + qcomtee_object_put(object); +} + +/** + * qcomtee_context_add_qtee_object() - Add a QTEE object to the context. + * @param: TEE parameter representing @object. + * @object: QTEE object. + * @ctx: context to add the object. + * + * It assumes @object is %QCOMTEE_OBJECT_TYPE_TEE and the caller has already + * issued qcomtee_object_get() for @object. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +int qcomtee_context_add_qtee_object(struct tee_param *param, + struct qcomtee_object *object, + struct tee_context *ctx) +{ + int ret; + struct qcomtee_context_data *ctxdata = ctx->data; + + scoped_guard(mutex, &ctxdata->qtee_lock) + ret = idr_alloc(&ctxdata->qtee_objects_idr, object, 0, 0, + GFP_KERNEL); + if (ret < 0) + return ret; + + param->u.objref.id = ret; + /* QTEE Object: QCOMTEE_OBJREF_FLAG_TEE set. */ + param->u.objref.flags = QCOMTEE_OBJREF_FLAG_TEE; + + return 0; +} + +/* Retrieve the QTEE object added with qcomtee_context_add_qtee_object(). */ +int qcomtee_context_find_qtee_object(struct qcomtee_object **object, + struct tee_param *param, + struct tee_context *ctx) +{ + struct qcomtee_context_data *ctxdata = ctx->data; + + return find_qtee_object(object, param->u.objref.id, ctxdata); +} + +/** + * qcomtee_context_del_qtee_object() - Delete a QTEE object from the context. + * @param: TEE parameter representing @object. + * @ctx: context for deleting the object. + * + * The @param has been initialized by qcomtee_context_add_qtee_object(). + */ +void qcomtee_context_del_qtee_object(struct tee_param *param, + struct tee_context *ctx) +{ + struct qcomtee_context_data *ctxdata = ctx->data; + /* 'qtee_objects_idr' stores QTEE objects only. */ + if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_TEE) + del_qtee_object(param->u.objref.id, ctxdata); +} + +/** + * qcomtee_objref_to_arg() - Convert OBJREF parameter to QTEE argument. + * @arg: QTEE argument. + * @param: TEE parameter. + * @ctx: context in which the conversion should happen. + * + * It assumes @param is an OBJREF. + * It does not set @arg.type; the caller should initialize it to a correct + * &enum qcomtee_arg_type value. It gets the object's refcount in @arg; + * the caller should manage to put it afterward. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +int qcomtee_objref_to_arg(struct qcomtee_arg *arg, struct tee_param *param, + struct tee_context *ctx) +{ + int err = -EINVAL; + + arg->o = NULL_QCOMTEE_OBJECT; + /* param is a NULL object: */ + if (param->u.objref.id == TEE_OBJREF_NULL) + return 0; + + /* param is a callback object: */ + if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_USER) + err = qcomtee_user_param_to_object(&arg->o, param, ctx); + /* param is a QTEE object: */ + else if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_TEE) + err = qcomtee_context_find_qtee_object(&arg->o, param, ctx); + /* param is a memory object: */ + else if (param->u.objref.flags & QCOMTEE_OBJREF_FLAG_MEM) + err = qcomtee_memobj_param_to_object(&arg->o, param, ctx); + + /* + * For callback objects, call qcomtee_object_get() to keep a temporary + * copy for the driver, as these objects are released asynchronously + * and may disappear even before returning from QTEE. + * + * - For direct object invocations, the matching put is called in + * qcomtee_object_invoke() when parsing the QTEE response. + * - For callback responses, put is called in qcomtee_user_object_notify() + * after QTEE has received its copies. + */ + + if (!err && (typeof_qcomtee_object(arg->o) == QCOMTEE_OBJECT_TYPE_CB)) + qcomtee_object_get(arg->o); + + return err; +} + +/** + * qcomtee_objref_from_arg() - Convert QTEE argument to OBJREF param. + * @param: TEE parameter. + * @arg: QTEE argument. + * @ctx: context in which the conversion should happen. + * + * It assumes @arg is of %QCOMTEE_ARG_TYPE_IO or %QCOMTEE_ARG_TYPE_OO. + * It does not set @param.attr; the caller should initialize it to a + * correct type. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +int qcomtee_objref_from_arg(struct tee_param *param, struct qcomtee_arg *arg, + struct tee_context *ctx) +{ + struct qcomtee_object *object = arg->o; + + switch (typeof_qcomtee_object(object)) { + case QCOMTEE_OBJECT_TYPE_NULL: + param->u.objref.id = TEE_OBJREF_NULL; + + return 0; + case QCOMTEE_OBJECT_TYPE_CB: + /* object is a callback object: */ + if (is_qcomtee_user_object(object)) + return qcomtee_user_param_from_object(param, object, + ctx); + /* object is a memory object: */ + else if (is_qcomtee_memobj_object(object)) + return qcomtee_memobj_param_from_object(param, object, + ctx); + + break; + case QCOMTEE_OBJECT_TYPE_TEE: + return qcomtee_context_add_qtee_object(param, object, ctx); + + case QCOMTEE_OBJECT_TYPE_ROOT: + default: + break; + } + + return -EINVAL; +} + +/** + * qcomtee_params_to_args() - Convert TEE parameters to QTEE arguments. + * @u: QTEE arguments. + * @params: TEE parameters. + * @num_params: number of elements in the parameter array. + * @ctx: context in which the conversion should happen. + * + * It assumes @u has at least @num_params + 1 entries and has been initialized + * with %QCOMTEE_ARG_TYPE_INV as &struct qcomtee_arg.type. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +static int qcomtee_params_to_args(struct qcomtee_arg *u, + struct tee_param *params, int num_params, + struct tee_context *ctx) +{ + int i; + + for (i = 0; i < num_params; i++) { + switch (params[i].attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT: + u[i].flags = QCOMTEE_ARG_FLAGS_UADDR; + u[i].b.uaddr = params[i].u.ubuf.uaddr; + u[i].b.size = params[i].u.ubuf.size; + + if (params[i].attr == + TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT) + u[i].type = QCOMTEE_ARG_TYPE_IB; + else /* TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT */ + u[i].type = QCOMTEE_ARG_TYPE_OB; + + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT: + u[i].type = QCOMTEE_ARG_TYPE_IO; + if (qcomtee_objref_to_arg(&u[i], ¶ms[i], ctx)) + goto out_failed; + + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT: + u[i].type = QCOMTEE_ARG_TYPE_OO; + u[i].o = NULL_QCOMTEE_OBJECT; + break; + default: + goto out_failed; + } + } + + return 0; + +out_failed: + /* Undo qcomtee_objref_to_arg(). */ + for (i--; i >= 0; i--) { + if (u[i].type != QCOMTEE_ARG_TYPE_IO) + continue; + + qcomtee_user_object_set_notify(u[i].o, false); + /* See docs for qcomtee_objref_to_arg() for double put. */ + if (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB) + qcomtee_object_put(u[i].o); + + qcomtee_object_put(u[i].o); + } + + return -EINVAL; +} + +/** + * qcomtee_params_from_args() - Convert QTEE arguments to TEE parameters. + * @params: TEE parameters. + * @u: QTEE arguments. + * @num_params: number of elements in the parameter array. + * @ctx: context in which the conversion should happen. + * + * @u should have already been initialized by qcomtee_params_to_args(). + * This also represents the end of a QTEE invocation that started with + * qcomtee_params_to_args() by releasing %QCOMTEE_ARG_TYPE_IO objects. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +static int qcomtee_params_from_args(struct tee_param *params, + struct qcomtee_arg *u, int num_params, + struct tee_context *ctx) +{ + int i, np; + + qcomtee_arg_for_each(np, u) { + switch (u[np].type) { + case QCOMTEE_ARG_TYPE_OB: + /* TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT */ + params[np].u.ubuf.size = u[np].b.size; + + break; + case QCOMTEE_ARG_TYPE_IO: + /* IEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT */ + qcomtee_object_put(u[np].o); + + break; + case QCOMTEE_ARG_TYPE_OO: + /* TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT */ + if (qcomtee_objref_from_arg(¶ms[np], &u[np], ctx)) + goto out_failed; + + break; + case QCOMTEE_ARG_TYPE_IB: + default: + break; + } + } + + return 0; + +out_failed: + /* Undo qcomtee_objref_from_arg(). */ + for (i = 0; i < np; i++) { + if (params[i].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT) + qcomtee_context_del_qtee_object(¶ms[i], ctx); + } + + /* Release any IO and OO objects not processed. */ + for (; u[i].type && i < num_params; i++) { + if (u[i].type == QCOMTEE_ARG_TYPE_OO || + u[i].type == QCOMTEE_ARG_TYPE_IO) + qcomtee_object_put(u[i].o); + } + + return -EINVAL; +} + +/* TEE Device Ops. */ + +static int qcomtee_params_check(struct tee_param *params, int num_params) +{ + int io = 0, oo = 0, ib = 0, ob = 0; + int i; + + /* QTEE can accept 64 arguments. */ + if (num_params > QCOMTEE_ARGS_MAX) + return -EINVAL; + + /* Supported parameter types. */ + for (i = 0; i < num_params; i++) { + switch (params[i].attr) { + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT: + ib++; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT: + ob++; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT: + io++; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT: + oo++; + break; + default: + return -EINVAL; + } + } + + /* QTEE can accept 16 arguments of each supported types. */ + if (io > QCOMTEE_ARGS_PER_TYPE || oo > QCOMTEE_ARGS_PER_TYPE || + ib > QCOMTEE_ARGS_PER_TYPE || ob > QCOMTEE_ARGS_PER_TYPE) + return -EINVAL; + + return 0; +} + +/* Check if an operation on ROOT_QCOMTEE_OBJECT from userspace is permitted. */ +static int qcomtee_root_object_check(u32 op, struct tee_param *params, + int num_params) +{ + /* Some privileged operations recognized by QTEE. */ + if (op == QCOMTEE_ROOT_OP_NOTIFY_DOMAIN_CHANGE || + op == QCOMTEE_ROOT_OP_ADCI_ACCEPT || + op == QCOMTEE_ROOT_OP_ADCI_SHUTDOWN) + return -EINVAL; + + /* + * QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS is to register with QTEE + * by passing a credential object as input OBJREF. TEE_OBJREF_NULL as a + * credential object represents a privileged client for QTEE and + * is used by the kernel only. + */ + if (op == QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS && num_params == 2) { + if (params[0].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT && + params[1].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT) { + if (params[0].u.objref.id == TEE_OBJREF_NULL) + return -EINVAL; + } + } + + return 0; +} + +/** + * qcomtee_object_invoke() - Invoke a QTEE object. + * @ctx: TEE context. + * @arg: ioctl arguments. + * @params: parameters for the object. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +static int qcomtee_object_invoke(struct tee_context *ctx, + struct tee_ioctl_object_invoke_arg *arg, + struct tee_param *params) +{ + struct qcomtee_object_invoke_ctx *oic __free(kfree) = NULL; + struct qcomtee_context_data *ctxdata = ctx->data; + struct qcomtee_arg *u __free(kfree) = NULL; + struct qcomtee_object *object; + int i, ret, result; + + if (qcomtee_params_check(params, arg->num_params)) + return -EINVAL; + + /* First, handle reserved operations: */ + if (arg->op == QCOMTEE_MSG_OBJECT_OP_RELEASE) { + del_qtee_object(arg->id, ctxdata); + + return 0; + } + + /* Otherwise, invoke a QTEE object: */ + oic = qcomtee_object_invoke_ctx_alloc(ctx); + if (!oic) + return -ENOMEM; + + /* +1 for ending QCOMTEE_ARG_TYPE_INV. */ + u = kcalloc(arg->num_params + 1, sizeof(*u), GFP_KERNEL); + if (!u) + return -ENOMEM; + + /* Get an object to invoke. */ + if (arg->id == TEE_OBJREF_NULL) { + /* Use ROOT if TEE_OBJREF_NULL is invoked. */ + if (qcomtee_root_object_check(arg->op, params, arg->num_params)) + return -EINVAL; + + object = ROOT_QCOMTEE_OBJECT; + } else if (find_qtee_object(&object, arg->id, ctxdata)) { + return -EINVAL; + } + + ret = qcomtee_params_to_args(u, params, arg->num_params, ctx); + if (ret) + goto out; + + ret = qcomtee_object_do_invoke(oic, object, arg->op, u, &result); + if (ret) { + qcomtee_arg_for_each_input_object(i, u) { + qcomtee_user_object_set_notify(u[i].o, false); + qcomtee_object_put(u[i].o); + } + + goto out; + } + + /* Prase QTEE response and put driver's object copies: */ + + if (!result) { + /* Assume service is UNAVAIL if unable to process the result. */ + if (qcomtee_params_from_args(params, u, arg->num_params, ctx)) + result = QCOMTEE_MSG_ERROR_UNAVAIL; + } else { + /* + * qcomtee_params_to_args() gets a copy of IO for the driver to + * make sure they do not get released while in the middle of + * invocation. On success (!result), qcomtee_params_from_args() + * puts them; Otherwise, put them here. + */ + qcomtee_arg_for_each_input_object(i, u) + qcomtee_object_put(u[i].o); + } + + arg->ret = result; +out: + qcomtee_object_put(object); + + return ret; +} + +/** + * qcomtee_supp_recv() - Wait for a request for the supplicant. + * @ctx: TEE context. + * @op: requested operation on the object. + * @num_params: number of elements in the parameter array. + * @params: parameters for @op. + * + * The first parameter is a meta %TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT. + * On input, it provides a user buffer. This buffer is used for parameters of + * type %TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT in qcomtee_cb_params_from_args(). + * On output, the object ID and request ID are stored in the meta parameter. + * + * @num_params is updated to the number of parameters that actually exist + * in @params on return. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +static int qcomtee_supp_recv(struct tee_context *ctx, u32 *op, u32 *num_params, + struct tee_param *params) +{ + struct qcomtee_user_object_request_data data; + void __user *uaddr; + size_t ubuf_size; + int i, ret; + + if (!*num_params) + return -EINVAL; + + /* First parameter should be an INOUT + meta parameter. */ + if (params->attr != + (TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT | TEE_IOCTL_PARAM_ATTR_META)) + return -EINVAL; + + /* Other parameters are none. */ + for (i = 1; i < *num_params; i++) + if (params[i].attr) + return -EINVAL; + + if (!IS_ALIGNED(params->u.value.a, 8)) + return -EINVAL; + + /* User buffer and size from meta parameter. */ + uaddr = u64_to_user_ptr(params->u.value.a); + ubuf_size = params->u.value.b; + /* Process TEE parameters. +/-1 to ignore the meta parameter. */ + ret = qcomtee_user_object_select(ctx, params + 1, *num_params - 1, + uaddr, ubuf_size, &data); + if (ret) + return ret; + + params->u.value.a = data.object_id; + params->u.value.b = data.id; + params->u.value.c = 0; + *op = data.op; + *num_params = data.np + 1; + + return 0; +} + +/** + * qcomtee_supp_send() - Submit a response for a request. + * @ctx: TEE context. + * @errno: return value for the request. + * @num_params: number of elements in the parameter array. + * @params: returned parameters. + * + * The first parameter is a meta %TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT. + * It specifies the request ID this response belongs to. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +static int qcomtee_supp_send(struct tee_context *ctx, u32 errno, u32 num_params, + struct tee_param *params) +{ + int req_id; + + if (!num_params) + return -EINVAL; + + /* First parameter should be an OUTPUT + meta parameter. */ + if (params->attr != (TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT | + TEE_IOCTL_PARAM_ATTR_META)) + return -EINVAL; + + req_id = params->u.value.a; + /* Process TEE parameters. +/-1 to ignore the meta parameter. */ + return qcomtee_user_object_submit(ctx, params + 1, num_params - 1, + req_id, errno); +} + +static int qcomtee_open(struct tee_context *ctx) +{ + struct qcomtee_context_data *ctxdata __free(kfree) = NULL; + + ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL); + if (!ctxdata) + return -ENOMEM; + + /* + * In the QTEE driver, the same context is used to refcount resources + * shared by QTEE. For example, teedev_ctx_get() is called for any + * instance of callback objects (see qcomtee_user_param_to_object()). + * + * Maintain a copy of teedev for QTEE as it serves as a direct user of + * this context. The teedev will be released in the context's release(). + * + * tee_device_unregister() will remain blocked until all contexts + * are released. This includes contexts owned by the user, which are + * closed by teedev_close_context(), as well as those owned by QTEE + * closed by teedev_ctx_put() in object's release(). + */ + if (!tee_device_get(ctx->teedev)) + return -EINVAL; + + idr_init(&ctxdata->qtee_objects_idr); + mutex_init(&ctxdata->qtee_lock); + idr_init(&ctxdata->reqs_idr); + INIT_LIST_HEAD(&ctxdata->reqs_list); + mutex_init(&ctxdata->reqs_lock); + init_completion(&ctxdata->req_c); + + ctx->data = no_free_ptr(ctxdata); + + return 0; +} + +/* Gets called when the user closes the device */ +static void qcomtee_close_context(struct tee_context *ctx) +{ + struct qcomtee_context_data *ctxdata = ctx->data; + struct qcomtee_object *object; + int id; + + /* Process QUEUED or PROCESSING requests. */ + qcomtee_requests_destroy(ctxdata); + /* Release QTEE objects. */ + idr_for_each_entry(&ctxdata->qtee_objects_idr, object, id) + qcomtee_object_put(object); +} + +/* Gets called when the final reference to the context goes away. */ +static void qcomtee_release(struct tee_context *ctx) +{ + struct qcomtee_context_data *ctxdata = ctx->data; + + idr_destroy(&ctxdata->qtee_objects_idr); + idr_destroy(&ctxdata->reqs_idr); + kfree(ctxdata); + + /* There is nothing shared in this context with QTEE. */ + tee_device_put(ctx->teedev); +} + +static void qcomtee_get_version(struct tee_device *teedev, + struct tee_ioctl_version_data *vers) +{ + struct tee_ioctl_version_data v = { + .impl_id = TEE_IMPL_ID_QTEE, + .gen_caps = TEE_GEN_CAP_OBJREF, + }; + + *vers = v; +} + +/** + * qcomtee_get_qtee_feature_list() - Query QTEE features versions. + * @ctx: TEE context. + * @id: ID of the feature to query. + * @version: version of the feature. + * + * Used to query the verion of features supported by QTEE. + */ +static void qcomtee_get_qtee_feature_list(struct tee_context *ctx, u32 id, + u32 *version) +{ + struct qcomtee_object_invoke_ctx *oic __free(kfree); + struct qcomtee_object *client_env, *service; + struct qcomtee_arg u[3] = { 0 }; + int result; + + oic = qcomtee_object_invoke_ctx_alloc(ctx); + if (!oic) + return; + + client_env = qcomtee_object_get_client_env(oic); + if (client_env == NULL_QCOMTEE_OBJECT) + return; + + /* Get ''FeatureVersions Service'' object. */ + service = qcomtee_object_get_service(oic, client_env, + QCOMTEE_FEATURE_VER_UID); + if (service == NULL_QCOMTEE_OBJECT) + goto out_failed; + + /* IB: Feature to query. */ + u[0].b.addr = &id; + u[0].b.size = sizeof(id); + u[0].type = QCOMTEE_ARG_TYPE_IB; + + /* OB: Version returned. */ + u[1].b.addr = version; + u[1].b.size = sizeof(*version); + u[1].type = QCOMTEE_ARG_TYPE_OB; + + qcomtee_object_do_invoke(oic, service, QCOMTEE_FEATURE_VER_OP_GET, u, + &result); + +out_failed: + qcomtee_object_put(service); + qcomtee_object_put(client_env); +} + +static const struct tee_driver_ops qcomtee_ops = { + .get_version = qcomtee_get_version, + .open = qcomtee_open, + .close_context = qcomtee_close_context, + .release = qcomtee_release, + .object_invoke_func = qcomtee_object_invoke, + .supp_recv = qcomtee_supp_recv, + .supp_send = qcomtee_supp_send, +}; + +static const struct tee_desc qcomtee_desc = { + .name = "qcomtee", + .ops = &qcomtee_ops, + .owner = THIS_MODULE, +}; + +static int qcomtee_probe(struct platform_device *pdev) +{ + struct workqueue_struct *async_wq; + struct tee_device *teedev; + struct tee_shm_pool *pool; + struct tee_context *ctx; + struct qcomtee *qcomtee; + int err; + + qcomtee = kzalloc(sizeof(*qcomtee), GFP_KERNEL); + if (!qcomtee) + return -ENOMEM; + + pool = qcomtee_shm_pool_alloc(); + if (IS_ERR(pool)) { + err = PTR_ERR(pool); + + goto err_free_qcomtee; + } + + teedev = tee_device_alloc(&qcomtee_desc, NULL, pool, qcomtee); + if (IS_ERR(teedev)) { + err = PTR_ERR(teedev); + + goto err_pool_destroy; + } + + qcomtee->teedev = teedev; + qcomtee->pool = pool; + err = tee_device_register(qcomtee->teedev); + if (err) + goto err_unreg_teedev; + + platform_set_drvdata(pdev, qcomtee); + /* Start async wq. */ + async_wq = alloc_ordered_workqueue("qcomtee_wq", 0); + if (!async_wq) { + err = -ENOMEM; + + goto err_unreg_teedev; + } + + qcomtee->wq = async_wq; + /* Driver context used for async operations of teedev. */ + ctx = teedev_open(qcomtee->teedev); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + + goto err_dest_wq; + } + + qcomtee->ctx = ctx; + /* Init Object table. */ + qcomtee->xa_last_id = 0; + xa_init_flags(&qcomtee->xa_local_objects, XA_FLAGS_ALLOC); + /* Get QTEE verion. */ + qcomtee_get_qtee_feature_list(qcomtee->ctx, + QCOMTEE_FEATURE_VER_OP_GET_QTEE_ID, + &qcomtee->qtee_version); + + pr_info("QTEE version %u.%u.%u\n", + QTEE_VERSION_GET_MAJOR(qcomtee->qtee_version), + QTEE_VERSION_GET_MINOR(qcomtee->qtee_version), + QTEE_VERSION_GET_PATCH(qcomtee->qtee_version)); + + return 0; + +err_dest_wq: + destroy_workqueue(qcomtee->wq); +err_unreg_teedev: + tee_device_unregister(qcomtee->teedev); +err_pool_destroy: + tee_shm_pool_free(pool); +err_free_qcomtee: + kfree(qcomtee); + + return err; +} + +/** + * qcomtee_remove() - Device Removal Routine. + * @pdev: platform device information struct. + * + * It is called by the platform subsystem to alert the driver that it should + * release the device. + * + * QTEE does not provide an API to inform it about a callback object going away. + * However, when releasing QTEE objects, any callback object sent to QTEE + * previously would be released by QTEE as part of the object release. + */ +static void qcomtee_remove(struct platform_device *pdev) +{ + struct qcomtee *qcomtee = platform_get_drvdata(pdev); + + teedev_close_context(qcomtee->ctx); + /* Wait for RELEASE operations to be processed for QTEE objects. */ + tee_device_unregister(qcomtee->teedev); + destroy_workqueue(qcomtee->wq); + tee_shm_pool_free(qcomtee->pool); + kfree(qcomtee); +} + +static const struct platform_device_id qcomtee_ids[] = { { "qcomtee", 0 }, {} }; +MODULE_DEVICE_TABLE(platform, qcomtee_ids); + +static struct platform_driver qcomtee_platform_driver = { + .probe = qcomtee_probe, + .remove = qcomtee_remove, + .driver = { + .name = "qcomtee", + }, + .id_table = qcomtee_ids, +}; + +module_platform_driver(qcomtee_platform_driver); + +MODULE_AUTHOR("Qualcomm"); +MODULE_DESCRIPTION("QTEE driver"); +MODULE_VERSION("1.0"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tee/qcomtee/core.c b/drivers/tee/qcomtee/core.c new file mode 100644 index 000000000000..783acc59cfa9 --- /dev/null +++ b/drivers/tee/qcomtee/core.c @@ -0,0 +1,915 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/xarray.h> + +#include "qcomtee.h" + +/* QTEE root object. */ +struct qcomtee_object qcomtee_object_root = { + .name = "root", + .object_type = QCOMTEE_OBJECT_TYPE_ROOT, + .info.qtee_id = QCOMTEE_MSG_OBJECT_ROOT, +}; + +/* Next argument of type @type after index @i. */ +int qcomtee_next_arg_type(struct qcomtee_arg *u, int i, + enum qcomtee_arg_type type) +{ + while (u[i].type != QCOMTEE_ARG_TYPE_INV && u[i].type != type) + i++; + return i; +} + +/* + * QTEE expects IDs with QCOMTEE_MSG_OBJECT_NS_BIT set for objects of + * QCOMTEE_OBJECT_TYPE_CB type. The first ID with QCOMTEE_MSG_OBJECT_NS_BIT + * set is reserved for the primordial object. + */ +#define QCOMTEE_OBJECT_PRIMORDIAL (QCOMTEE_MSG_OBJECT_NS_BIT) +#define QCOMTEE_OBJECT_ID_START (QCOMTEE_OBJECT_PRIMORDIAL + 1) +#define QCOMTEE_OBJECT_ID_END (U32_MAX) + +#define QCOMTEE_OBJECT_SET(p, type, ...) \ + __QCOMTEE_OBJECT_SET(p, type, ##__VA_ARGS__, 0UL) +#define __QCOMTEE_OBJECT_SET(p, type, optr, ...) \ + do { \ + (p)->object_type = (type); \ + (p)->info.qtee_id = (unsigned long)(optr); \ + } while (0) + +static struct qcomtee_object * +qcomtee_qtee_object_alloc(struct qcomtee_object_invoke_ctx *oic, + unsigned int object_id) +{ + struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev); + struct qcomtee_object *object; + + object = kzalloc(sizeof(*object), GFP_KERNEL); + if (!object) + return NULL_QCOMTEE_OBJECT; + + /* If failed, "no-name". */ + object->name = kasprintf(GFP_KERNEL, "qcomtee-%u", object_id); + QCOMTEE_OBJECT_SET(object, QCOMTEE_OBJECT_TYPE_TEE, object_id); + kref_init(&object->refcount); + /* A QTEE object requires a context for async operations. */ + object->info.qcomtee_async_ctx = qcomtee->ctx; + teedev_ctx_get(object->info.qcomtee_async_ctx); + + return object; +} + +static void qcomtee_qtee_object_free(struct qcomtee_object *object) +{ + /* See qcomtee_qtee_object_alloc(). */ + teedev_ctx_put(object->info.qcomtee_async_ctx); + + kfree(object->name); + kfree(object); +} + +static void qcomtee_do_release_qtee_object(struct work_struct *work) +{ + struct qcomtee_object *object; + struct qcomtee *qcomtee; + int ret, result; + + /* RELEASE does not require any argument. */ + struct qcomtee_arg args[] = { { .type = QCOMTEE_ARG_TYPE_INV } }; + + object = container_of(work, struct qcomtee_object, work); + qcomtee = tee_get_drvdata(object->info.qcomtee_async_ctx->teedev); + /* Get the TEE context used for asynchronous operations. */ + qcomtee->oic.ctx = object->info.qcomtee_async_ctx; + + ret = qcomtee_object_do_invoke_internal(&qcomtee->oic, object, + QCOMTEE_MSG_OBJECT_OP_RELEASE, + args, &result); + + /* Is it safe to retry the release? */ + if (ret && ret != -ENODEV) { + queue_work(qcomtee->wq, &object->work); + } else { + if (ret || result) + pr_err("%s release failed, ret = %d (%x)\n", + qcomtee_object_name(object), ret, result); + qcomtee_qtee_object_free(object); + } +} + +static void qcomtee_release_qtee_object(struct qcomtee_object *object) +{ + struct qcomtee *qcomtee = + tee_get_drvdata(object->info.qcomtee_async_ctx->teedev); + + INIT_WORK(&object->work, qcomtee_do_release_qtee_object); + queue_work(qcomtee->wq, &object->work); +} + +static void qcomtee_object_release(struct kref *refcount) +{ + struct qcomtee_object *object; + const char *name; + + object = container_of(refcount, struct qcomtee_object, refcount); + + /* + * qcomtee_object_get() is called in a RCU read lock. synchronize_rcu() + * to avoid releasing the object while it is being accessed in + * qcomtee_object_get(). + */ + synchronize_rcu(); + + switch (typeof_qcomtee_object(object)) { + case QCOMTEE_OBJECT_TYPE_TEE: + qcomtee_release_qtee_object(object); + + break; + case QCOMTEE_OBJECT_TYPE_CB: + name = object->name; + + if (object->ops->release) + object->ops->release(object); + + kfree_const(name); + + break; + case QCOMTEE_OBJECT_TYPE_ROOT: + case QCOMTEE_OBJECT_TYPE_NULL: + default: + break; + } +} + +/** + * qcomtee_object_get() - Increase the object's reference count. + * @object: object to increase the reference count. + * + * Context: The caller should hold RCU read lock. + */ +int qcomtee_object_get(struct qcomtee_object *object) +{ + if (object != &qcomtee_primordial_object && + object != NULL_QCOMTEE_OBJECT && + object != ROOT_QCOMTEE_OBJECT) + return kref_get_unless_zero(&object->refcount); + + return 0; +} + +/** + * qcomtee_object_put() - Decrease the object's reference count. + * @object: object to decrease the reference count. + */ +void qcomtee_object_put(struct qcomtee_object *object) +{ + if (object != &qcomtee_primordial_object && + object != NULL_QCOMTEE_OBJECT && + object != ROOT_QCOMTEE_OBJECT) + kref_put(&object->refcount, qcomtee_object_release); +} + +static int qcomtee_idx_alloc(struct qcomtee_object_invoke_ctx *oic, u32 *idx, + struct qcomtee_object *object) +{ + struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev); + + /* Every ID allocated here has QCOMTEE_MSG_OBJECT_NS_BIT set. */ + return xa_alloc_cyclic(&qcomtee->xa_local_objects, idx, object, + XA_LIMIT(QCOMTEE_OBJECT_ID_START, + QCOMTEE_OBJECT_ID_END), + &qcomtee->xa_last_id, GFP_KERNEL); +} + +struct qcomtee_object *qcomtee_idx_erase(struct qcomtee_object_invoke_ctx *oic, + u32 idx) +{ + struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev); + + if (idx < QCOMTEE_OBJECT_ID_START || idx > QCOMTEE_OBJECT_ID_END) + return NULL_QCOMTEE_OBJECT; + + return xa_erase(&qcomtee->xa_local_objects, idx); +} + +/** + * qcomtee_object_id_get() - Get an ID for an object to send to QTEE. + * @oic: context to use for the invocation. + * @object: object to assign an ID. + * @object_id: object ID. + * + * Called on the path to QTEE to construct the message; see + * qcomtee_prepare_msg() and qcomtee_update_msg(). + * + * Return: On success, returns 0; on failure, returns < 0. + */ +static int qcomtee_object_id_get(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *object, + unsigned int *object_id) +{ + u32 idx; + + switch (typeof_qcomtee_object(object)) { + case QCOMTEE_OBJECT_TYPE_CB: + if (qcomtee_idx_alloc(oic, &idx, object) < 0) + return -ENOSPC; + + *object_id = idx; + + break; + case QCOMTEE_OBJECT_TYPE_ROOT: + case QCOMTEE_OBJECT_TYPE_TEE: + *object_id = object->info.qtee_id; + + break; + case QCOMTEE_OBJECT_TYPE_NULL: + *object_id = QCOMTEE_MSG_OBJECT_NULL; + + break; + } + + return 0; +} + +/* Release object ID assigned in qcomtee_object_id_get. */ +static void qcomtee_object_id_put(struct qcomtee_object_invoke_ctx *oic, + unsigned int object_id) +{ + qcomtee_idx_erase(oic, object_id); +} + +/** + * qcomtee_local_object_get() - Get the object referenced by the ID. + * @oic: context to use for the invocation. + * @object_id: object ID. + * + * It is called on the path from QTEE. + * It is called on behalf of QTEE to obtain an instance of an object + * for a given ID. It increases the object's reference count on success. + * + * Return: On error, returns %NULL_QCOMTEE_OBJECT. + * On success, returns the object. + */ +static struct qcomtee_object * +qcomtee_local_object_get(struct qcomtee_object_invoke_ctx *oic, + unsigned int object_id) +{ + struct qcomtee *qcomtee = tee_get_drvdata(oic->ctx->teedev); + struct qcomtee_object *object; + + if (object_id == QCOMTEE_OBJECT_PRIMORDIAL) + return &qcomtee_primordial_object; + + guard(rcu)(); + object = xa_load(&qcomtee->xa_local_objects, object_id); + /* It already checks for %NULL_QCOMTEE_OBJECT. */ + qcomtee_object_get(object); + + return object; +} + +/** + * qcomtee_object_user_init() - Initialize an object for the user. + * @object: object to initialize. + * @ot: type of object as &enum qcomtee_object_type. + * @ops: instance of callbacks. + * @fmt: name assigned to the object. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +int qcomtee_object_user_init(struct qcomtee_object *object, + enum qcomtee_object_type ot, + struct qcomtee_object_operations *ops, + const char *fmt, ...) +{ + va_list ap; + int ret; + + kref_init(&object->refcount); + QCOMTEE_OBJECT_SET(object, QCOMTEE_OBJECT_TYPE_NULL); + + va_start(ap, fmt); + switch (ot) { + case QCOMTEE_OBJECT_TYPE_NULL: + ret = 0; + + break; + case QCOMTEE_OBJECT_TYPE_CB: + object->ops = ops; + if (!object->ops->dispatch) + return -EINVAL; + + /* If failed, "no-name". */ + object->name = kvasprintf_const(GFP_KERNEL, fmt, ap); + QCOMTEE_OBJECT_SET(object, QCOMTEE_OBJECT_TYPE_CB); + + ret = 0; + break; + case QCOMTEE_OBJECT_TYPE_ROOT: + case QCOMTEE_OBJECT_TYPE_TEE: + default: + ret = -EINVAL; + } + va_end(ap); + + return ret; +} + +/** + * qcomtee_object_type() - Returns the type of object represented by an ID. + * @object_id: object ID for the object. + * + * Similar to typeof_qcomtee_object(), but instead of receiving an object as + * an argument, it receives an object ID. It is used internally on the return + * path from QTEE. + * + * Return: Returns the type of object referenced by @object_id. + */ +static enum qcomtee_object_type qcomtee_object_type(unsigned int object_id) +{ + if (object_id == QCOMTEE_MSG_OBJECT_NULL) + return QCOMTEE_OBJECT_TYPE_NULL; + + if (object_id & QCOMTEE_MSG_OBJECT_NS_BIT) + return QCOMTEE_OBJECT_TYPE_CB; + + return QCOMTEE_OBJECT_TYPE_TEE; +} + +/** + * qcomtee_object_qtee_init() - Initialize an object for QTEE. + * @oic: context to use for the invocation. + * @object: object returned. + * @object_id: object ID received from QTEE. + * + * Return: On failure, returns < 0 and sets @object to %NULL_QCOMTEE_OBJECT. + * On success, returns 0 + */ +static int qcomtee_object_qtee_init(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object **object, + unsigned int object_id) +{ + int ret = 0; + + switch (qcomtee_object_type(object_id)) { + case QCOMTEE_OBJECT_TYPE_NULL: + *object = NULL_QCOMTEE_OBJECT; + + break; + case QCOMTEE_OBJECT_TYPE_CB: + *object = qcomtee_local_object_get(oic, object_id); + if (*object == NULL_QCOMTEE_OBJECT) + ret = -EINVAL; + + break; + + default: /* QCOMTEE_OBJECT_TYPE_TEE */ + *object = qcomtee_qtee_object_alloc(oic, object_id); + if (*object == NULL_QCOMTEE_OBJECT) + ret = -ENOMEM; + + break; + } + + return ret; +} + +/* + * ''Marshaling API'' + * qcomtee_prepare_msg - Prepare the inbound buffer for sending to QTEE + * qcomtee_update_args - Parse the QTEE response in the inbound buffer + * qcomtee_prepare_args - Parse the QTEE request from the outbound buffer + * qcomtee_update_msg - Update the outbound buffer with the response for QTEE + */ + +static int qcomtee_prepare_msg(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *object, u32 op, + struct qcomtee_arg *u) +{ + struct qcomtee_msg_object_invoke *msg; + unsigned int object_id; + int i, ib, ob, io, oo; + size_t offset; + + /* Use the input message buffer in 'oic'. */ + msg = oic->in_msg.addr; + + /* Start offset in a message for buffer arguments. */ + offset = qcomtee_msg_buffer_args(struct qcomtee_msg_object_invoke, + qcomtee_args_len(u)); + + /* Get the ID of the object being invoked. */ + if (qcomtee_object_id_get(oic, object, &object_id)) + return -ENOSPC; + + ib = 0; + qcomtee_arg_for_each_input_buffer(i, u) { + void *msgptr; /* Address of buffer payload: */ + /* Overflow already checked in qcomtee_msg_buffers_alloc(). */ + msg->args[ib].b.offset = offset; + msg->args[ib].b.size = u[i].b.size; + + msgptr = qcomtee_msg_offset_to_ptr(msg, offset); + /* Userspace client or kernel client!? */ + if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR)) + memcpy(msgptr, u[i].b.addr, u[i].b.size); + else if (copy_from_user(msgptr, u[i].b.uaddr, u[i].b.size)) + return -EINVAL; + + offset += qcomtee_msg_offset_align(u[i].b.size); + ib++; + } + + ob = ib; + qcomtee_arg_for_each_output_buffer(i, u) { + /* Overflow already checked in qcomtee_msg_buffers_alloc(). */ + msg->args[ob].b.offset = offset; + msg->args[ob].b.size = u[i].b.size; + + offset += qcomtee_msg_offset_align(u[i].b.size); + ob++; + } + + io = ob; + qcomtee_arg_for_each_input_object(i, u) { + if (qcomtee_object_id_get(oic, u[i].o, &msg->args[io].o)) { + qcomtee_object_id_put(oic, object_id); + for (io--; io >= ob; io--) + qcomtee_object_id_put(oic, msg->args[io].o); + + return -ENOSPC; + } + + io++; + } + + oo = io; + qcomtee_arg_for_each_output_object(i, u) + oo++; + + /* Set object, operation, and argument counts. */ + qcomtee_msg_init(msg, object_id, op, ib, ob, io, oo); + + return 0; +} + +/** + * qcomtee_update_args() - Parse the QTEE response in the inbound buffer. + * @u: array of arguments for the invocation. + * @oic: context to use for the invocation. + * + * @u must be the same as the one used in qcomtee_prepare_msg() when + * initializing the inbound buffer. + * + * On failure, it continues processing the QTEE message. The caller should + * do the necessary cleanup, including calling qcomtee_object_put() + * on the output objects. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +static int qcomtee_update_args(struct qcomtee_arg *u, + struct qcomtee_object_invoke_ctx *oic) +{ + struct qcomtee_msg_object_invoke *msg; + int i, ib, ob, io, oo; + int ret = 0; + + /* Use the input message buffer in 'oic'. */ + msg = oic->in_msg.addr; + + ib = 0; + qcomtee_arg_for_each_input_buffer(i, u) + ib++; + + ob = ib; + qcomtee_arg_for_each_output_buffer(i, u) { + void *msgptr; /* Address of buffer payload: */ + /* QTEE can override the size to a smaller value. */ + u[i].b.size = msg->args[ob].b.size; + + msgptr = qcomtee_msg_offset_to_ptr(msg, msg->args[ob].b.offset); + /* Userspace client or kernel client!? */ + if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR)) + memcpy(u[i].b.addr, msgptr, u[i].b.size); + else if (copy_to_user(u[i].b.uaddr, msgptr, u[i].b.size)) + ret = -EINVAL; + + ob++; + } + + io = ob; + qcomtee_arg_for_each_input_object(i, u) + io++; + + oo = io; + qcomtee_arg_for_each_output_object(i, u) { + if (qcomtee_object_qtee_init(oic, &u[i].o, msg->args[oo].o)) + ret = -EINVAL; + + oo++; + } + + return ret; +} + +/** + * qcomtee_prepare_args() - Parse the QTEE request from the outbound buffer. + * @oic: context to use for the invocation. + * + * It initializes &qcomtee_object_invoke_ctx->u based on the QTEE request in + * the outbound buffer. It sets %QCOMTEE_ARG_TYPE_INV at the end of the array. + * + * On failure, it continues processing the QTEE message. The caller should + * do the necessary cleanup, including calling qcomtee_object_put() + * on the input objects. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +static int qcomtee_prepare_args(struct qcomtee_object_invoke_ctx *oic) +{ + struct qcomtee_msg_callback *msg; + int i, ret = 0; + + /* Use the output message buffer in 'oic'. */ + msg = oic->out_msg.addr; + + qcomtee_msg_for_each_input_buffer(i, msg) { + oic->u[i].b.addr = + qcomtee_msg_offset_to_ptr(msg, msg->args[i].b.offset); + oic->u[i].b.size = msg->args[i].b.size; + oic->u[i].type = QCOMTEE_ARG_TYPE_IB; + } + + qcomtee_msg_for_each_output_buffer(i, msg) { + oic->u[i].b.addr = + qcomtee_msg_offset_to_ptr(msg, msg->args[i].b.offset); + oic->u[i].b.size = msg->args[i].b.size; + oic->u[i].type = QCOMTEE_ARG_TYPE_OB; + } + + qcomtee_msg_for_each_input_object(i, msg) { + if (qcomtee_object_qtee_init(oic, &oic->u[i].o, msg->args[i].o)) + ret = -EINVAL; + + oic->u[i].type = QCOMTEE_ARG_TYPE_IO; + } + + qcomtee_msg_for_each_output_object(i, msg) + oic->u[i].type = QCOMTEE_ARG_TYPE_OO; + + /* End of Arguments. */ + oic->u[i].type = QCOMTEE_ARG_TYPE_INV; + + return ret; +} + +static int qcomtee_update_msg(struct qcomtee_object_invoke_ctx *oic) +{ + struct qcomtee_msg_callback *msg; + int i, ib, ob, io, oo; + + /* Use the output message buffer in 'oic'. */ + msg = oic->out_msg.addr; + + ib = 0; + qcomtee_arg_for_each_input_buffer(i, oic->u) + ib++; + + ob = ib; + qcomtee_arg_for_each_output_buffer(i, oic->u) { + /* Only reduce size; never increase it. */ + if (msg->args[ob].b.size < oic->u[i].b.size) + return -EINVAL; + + msg->args[ob].b.size = oic->u[i].b.size; + ob++; + } + + io = ob; + qcomtee_arg_for_each_input_object(i, oic->u) + io++; + + oo = io; + qcomtee_arg_for_each_output_object(i, oic->u) { + if (qcomtee_object_id_get(oic, oic->u[i].o, &msg->args[oo].o)) { + for (oo--; oo >= io; oo--) + qcomtee_object_id_put(oic, msg->args[oo].o); + + return -ENOSPC; + } + + oo++; + } + + return 0; +} + +/* Invoke a callback object. */ +static void qcomtee_cb_object_invoke(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_msg_callback *msg) +{ + int i, errno; + u32 op; + + /* Get the object being invoked. */ + unsigned int object_id = msg->cxt; + struct qcomtee_object *object; + + /* QTEE cannot invoke a NULL object or objects it hosts. */ + if (qcomtee_object_type(object_id) == QCOMTEE_OBJECT_TYPE_NULL || + qcomtee_object_type(object_id) == QCOMTEE_OBJECT_TYPE_TEE) { + errno = -EINVAL; + goto out; + } + + object = qcomtee_local_object_get(oic, object_id); + if (object == NULL_QCOMTEE_OBJECT) { + errno = -EINVAL; + goto out; + } + + oic->object = object; + + /* Filter bits used by transport. */ + op = msg->op & QCOMTEE_MSG_OBJECT_OP_MASK; + + switch (op) { + case QCOMTEE_MSG_OBJECT_OP_RELEASE: + qcomtee_object_id_put(oic, object_id); + qcomtee_object_put(object); + errno = 0; + + break; + case QCOMTEE_MSG_OBJECT_OP_RETAIN: + qcomtee_object_get(object); + errno = 0; + + break; + default: + errno = qcomtee_prepare_args(oic); + if (errno) { + /* Release any object that arrived as input. */ + qcomtee_arg_for_each_input_buffer(i, oic->u) + qcomtee_object_put(oic->u[i].o); + + break; + } + + errno = object->ops->dispatch(oic, object, op, oic->u); + if (!errno) { + /* On success, notify at the appropriate time. */ + oic->flags |= QCOMTEE_OIC_FLAG_NOTIFY; + } + } + +out: + + oic->errno = errno; +} + +static int +qcomtee_object_invoke_ctx_invoke(struct qcomtee_object_invoke_ctx *oic, + int *result, u64 *res_type) +{ + phys_addr_t out_msg_paddr; + phys_addr_t in_msg_paddr; + int ret; + u64 res; + + tee_shm_get_pa(oic->out_shm, 0, &out_msg_paddr); + tee_shm_get_pa(oic->in_shm, 0, &in_msg_paddr); + if (!(oic->flags & QCOMTEE_OIC_FLAG_BUSY)) + ret = qcom_scm_qtee_invoke_smc(in_msg_paddr, oic->in_msg.size, + out_msg_paddr, oic->out_msg.size, + &res, res_type); + else + ret = qcom_scm_qtee_callback_response(out_msg_paddr, + oic->out_msg.size, + &res, res_type); + + if (ret) + pr_err("QTEE returned with %d.\n", ret); + else + *result = (int)res; + + return ret; +} + +/** + * qcomtee_qtee_objects_put() - Put the callback objects in the argument array. + * @u: array of arguments. + * + * When qcomtee_object_do_invoke_internal() is successfully invoked, + * QTEE takes ownership of the callback objects. If the invocation fails, + * qcomtee_object_do_invoke_internal() calls qcomtee_qtee_objects_put() + * to mimic the release of callback objects by QTEE. + */ +static void qcomtee_qtee_objects_put(struct qcomtee_arg *u) +{ + int i; + + qcomtee_arg_for_each_input_object(i, u) { + if (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB) + qcomtee_object_put(u[i].o); + } +} + +/** + * qcomtee_object_do_invoke_internal() - Submit an invocation for an object. + * @oic: context to use for the current invocation. + * @object: object being invoked. + * @op: requested operation on the object. + * @u: array of arguments for the current invocation. + * @result: result returned from QTEE. + * + * The caller is responsible for keeping track of the refcount for each + * object, including @object. On return, the caller loses ownership of all + * input objects of type %QCOMTEE_OBJECT_TYPE_CB. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +int qcomtee_object_do_invoke_internal(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *object, u32 op, + struct qcomtee_arg *u, int *result) +{ + struct qcomtee_msg_callback *cb_msg; + struct qcomtee_object *qto; + int i, ret, errno; + u64 res_type; + + /* Allocate inbound and outbound buffers. */ + ret = qcomtee_msg_buffers_alloc(oic, u); + if (ret) { + qcomtee_qtee_objects_put(u); + + return ret; + } + + ret = qcomtee_prepare_msg(oic, object, op, u); + if (ret) { + qcomtee_qtee_objects_put(u); + + goto out; + } + + /* Use input message buffer in 'oic'. */ + cb_msg = oic->out_msg.addr; + + while (1) { + if (oic->flags & QCOMTEE_OIC_FLAG_BUSY) { + errno = oic->errno; + if (!errno) + errno = qcomtee_update_msg(oic); + qcomtee_msg_set_result(cb_msg, errno); + } + + /* Invoke the remote object. */ + ret = qcomtee_object_invoke_ctx_invoke(oic, result, &res_type); + /* Return form callback objects result submission: */ + if (oic->flags & QCOMTEE_OIC_FLAG_BUSY) { + qto = oic->object; + if (qto) { + if (oic->flags & QCOMTEE_OIC_FLAG_NOTIFY) { + if (qto->ops->notify) + qto->ops->notify(oic, qto, + errno || ret); + } + + /* Get is in qcomtee_cb_object_invoke(). */ + qcomtee_object_put(qto); + } + + oic->object = NULL_QCOMTEE_OBJECT; + oic->flags &= ~(QCOMTEE_OIC_FLAG_BUSY | + QCOMTEE_OIC_FLAG_NOTIFY); + } + + if (ret) { + /* + * Unable to finished the invocation. + * If QCOMTEE_OIC_FLAG_SHARED is not set, put + * QCOMTEE_OBJECT_TYPE_CB input objects. + */ + if (!(oic->flags & QCOMTEE_OIC_FLAG_SHARED)) + qcomtee_qtee_objects_put(u); + else + ret = -ENODEV; + + goto out; + + } else { + /* + * QTEE obtained ownership of QCOMTEE_OBJECT_TYPE_CB + * input objects in 'u'. On further failure, QTEE is + * responsible for releasing them. + */ + oic->flags |= QCOMTEE_OIC_FLAG_SHARED; + } + + /* Is it a callback request? */ + if (res_type != QCOMTEE_RESULT_INBOUND_REQ_NEEDED) { + /* + * Parse results. If failed, assume the service + * was unavailable (i.e. QCOMTEE_MSG_ERROR_UNAVAIL) + * and put output objects to initiate cleanup. + */ + if (!*result && qcomtee_update_args(u, oic)) { + *result = QCOMTEE_MSG_ERROR_UNAVAIL; + qcomtee_arg_for_each_output_object(i, u) + qcomtee_object_put(u[i].o); + } + + break; + + } else { + oic->flags |= QCOMTEE_OIC_FLAG_BUSY; + qcomtee_fetch_async_reqs(oic); + qcomtee_cb_object_invoke(oic, cb_msg); + } + } + + qcomtee_fetch_async_reqs(oic); +out: + qcomtee_msg_buffers_free(oic); + + return ret; +} + +int qcomtee_object_do_invoke(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *object, u32 op, + struct qcomtee_arg *u, int *result) +{ + /* User can not set bits used by transport. */ + if (op & ~QCOMTEE_MSG_OBJECT_OP_MASK) + return -EINVAL; + + /* User can only invoke QTEE hosted objects. */ + if (typeof_qcomtee_object(object) != QCOMTEE_OBJECT_TYPE_TEE && + typeof_qcomtee_object(object) != QCOMTEE_OBJECT_TYPE_ROOT) + return -EINVAL; + + /* User cannot directly issue these operations to QTEE. */ + if (op == QCOMTEE_MSG_OBJECT_OP_RELEASE || + op == QCOMTEE_MSG_OBJECT_OP_RETAIN) + return -EINVAL; + + return qcomtee_object_do_invoke_internal(oic, object, op, u, result); +} + +/** + * qcomtee_object_get_client_env() - Get a privileged client env. object. + * @oic: context to use for the current invocation. + * + * The caller should call qcomtee_object_put() on the returned object + * to release it. + * + * Return: On error, returns %NULL_QCOMTEE_OBJECT. + * On success, returns the object. + */ +struct qcomtee_object * +qcomtee_object_get_client_env(struct qcomtee_object_invoke_ctx *oic) +{ + struct qcomtee_arg u[3] = { 0 }; + int ret, result; + + u[0].o = NULL_QCOMTEE_OBJECT; + u[0].type = QCOMTEE_ARG_TYPE_IO; + u[1].type = QCOMTEE_ARG_TYPE_OO; + ret = qcomtee_object_do_invoke(oic, ROOT_QCOMTEE_OBJECT, + QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS, u, + &result); + if (ret || result) + return NULL_QCOMTEE_OBJECT; + + return u[1].o; +} + +struct qcomtee_object * +qcomtee_object_get_service(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *client_env, u32 uid) +{ + struct qcomtee_arg u[3] = { 0 }; + int ret, result; + + u[0].b.addr = &uid; + u[0].b.size = sizeof(uid); + u[0].type = QCOMTEE_ARG_TYPE_IB; + u[1].type = QCOMTEE_ARG_TYPE_OO; + ret = qcomtee_object_do_invoke(oic, client_env, QCOMTEE_CLIENT_ENV_OPEN, + u, &result); + + if (ret || result) + return NULL_QCOMTEE_OBJECT; + + return u[1].o; +} diff --git a/drivers/tee/qcomtee/mem_obj.c b/drivers/tee/qcomtee/mem_obj.c new file mode 100644 index 000000000000..228a3e30a31b --- /dev/null +++ b/drivers/tee/qcomtee/mem_obj.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/firmware/qcom/qcom_scm.h> +#include <linux/mm.h> + +#include "qcomtee.h" + +/** + * DOC: Memory and Mapping Objects + * + * QTEE uses memory objects for memory sharing with Linux. + * A memory object can be a standard dma_buf or a contiguous memory range, + * e.g., tee_shm. A memory object should support one operation: map. When + * invoked by QTEE, a mapping object is generated. A mapping object supports + * one operation: unmap. + * + * (1) To map a memory object, QTEE invokes the primordial object with + * %QCOMTEE_OBJECT_OP_MAP_REGION operation; see + * qcomtee_primordial_obj_dispatch(). + * (2) To unmap a memory object, QTEE releases the mapping object which + * calls qcomtee_mem_object_release(). + * + * The map operation is implemented in the primordial object as a privileged + * operation instead of qcomtee_mem_object_dispatch(). Otherwise, on + * platforms without shm_bridge, a user can trick QTEE into writing to the + * kernel memory by passing a user object as a memory object and returning a + * random physical address as the result of the mapping request. + */ + +struct qcomtee_mem_object { + struct qcomtee_object object; + struct tee_shm *shm; + /* QTEE requires these felids to be page aligned. */ + phys_addr_t paddr; /* Physical address of range. */ + size_t size; /* Size of the range. */ +}; + +#define to_qcomtee_mem_object(o) \ + container_of((o), struct qcomtee_mem_object, object) + +static struct qcomtee_object_operations qcomtee_mem_object_ops; + +/* Is it a memory object using tee_shm? */ +int is_qcomtee_memobj_object(struct qcomtee_object *object) +{ + return object != NULL_QCOMTEE_OBJECT && + typeof_qcomtee_object(object) == QCOMTEE_OBJECT_TYPE_CB && + object->ops == &qcomtee_mem_object_ops; +} + +static int qcomtee_mem_object_dispatch(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *object, u32 op, + struct qcomtee_arg *args) +{ + return -EINVAL; +} + +static void qcomtee_mem_object_release(struct qcomtee_object *object) +{ + struct qcomtee_mem_object *mem_object = to_qcomtee_mem_object(object); + + /* Matching get is in qcomtee_memobj_param_to_object(). */ + tee_shm_put(mem_object->shm); + kfree(mem_object); +} + +static struct qcomtee_object_operations qcomtee_mem_object_ops = { + .release = qcomtee_mem_object_release, + .dispatch = qcomtee_mem_object_dispatch, +}; + +/** + * qcomtee_memobj_param_to_object() - OBJREF parameter to &struct qcomtee_object. + * @object: object returned. + * @param: TEE parameter. + * @ctx: context in which the conversion should happen. + * + * @param is an OBJREF with %QCOMTEE_OBJREF_FLAG_MEM flags. + * + * Return: On success return 0 or <0 on failure. + */ +int qcomtee_memobj_param_to_object(struct qcomtee_object **object, + struct tee_param *param, + struct tee_context *ctx) +{ + struct qcomtee_mem_object *mem_object __free(kfree) = NULL; + struct tee_shm *shm; + int err; + + mem_object = kzalloc(sizeof(*mem_object), GFP_KERNEL); + if (!mem_object) + return -ENOMEM; + + shm = tee_shm_get_from_id(ctx, param->u.objref.id); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + /* mem-object wrapping the memref. */ + err = qcomtee_object_user_init(&mem_object->object, + QCOMTEE_OBJECT_TYPE_CB, + &qcomtee_mem_object_ops, "tee-shm-%d", + shm->id); + if (err) { + tee_shm_put(shm); + + return err; + } + + mem_object->paddr = shm->paddr; + mem_object->size = shm->size; + mem_object->shm = shm; + + *object = &no_free_ptr(mem_object)->object; + + return 0; +} + +/* Reverse what qcomtee_memobj_param_to_object() does. */ +int qcomtee_memobj_param_from_object(struct tee_param *param, + struct qcomtee_object *object, + struct tee_context *ctx) +{ + struct qcomtee_mem_object *mem_object; + + mem_object = to_qcomtee_mem_object(object); + /* Sure if the memobj is in a same context it is originated from. */ + if (mem_object->shm->ctx != ctx) + return -EINVAL; + + param->u.objref.id = mem_object->shm->id; + param->u.objref.flags = QCOMTEE_OBJREF_FLAG_MEM; + + /* Passing shm->id to userspace; drop the reference. */ + qcomtee_object_put(object); + + return 0; +} + +/** + * qcomtee_mem_object_map() - Map a memory object. + * @object: memory object. + * @map_object: created mapping object. + * @mem_paddr: physical address of the memory. + * @mem_size: size of the memory. + * @perms: QTEE access permissions. + * + * Return: On success return 0 or <0 on failure. + */ +int qcomtee_mem_object_map(struct qcomtee_object *object, + struct qcomtee_object **map_object, u64 *mem_paddr, + u64 *mem_size, u32 *perms) +{ + struct qcomtee_mem_object *mem_object = to_qcomtee_mem_object(object); + + /* Reuses the memory object as a mapping object by re-sharing it. */ + qcomtee_object_get(&mem_object->object); + + *map_object = &mem_object->object; + *mem_paddr = mem_object->paddr; + *mem_size = mem_object->size; + *perms = QCOM_SCM_PERM_RW; + + return 0; +} diff --git a/drivers/tee/qcomtee/primordial_obj.c b/drivers/tee/qcomtee/primordial_obj.c new file mode 100644 index 000000000000..b6f811e83b11 --- /dev/null +++ b/drivers/tee/qcomtee/primordial_obj.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#include <linux/delay.h> +#include "qcomtee.h" + +/** + * DOC: Primordial Object + * + * After boot, the kernel provides a static object of type + * %QCOMTEE_OBJECT_TYPE_CB called the primordial object. This object is used + * for native kernel services or privileged operations. + * + * We support: + * - %QCOMTEE_OBJECT_OP_MAP_REGION to map a memory object and return mapping + * object and mapping information (see qcomtee_mem_object_map()). + * - %QCOMTEE_OBJECT_OP_YIELD to yield by the thread running in QTEE. + * - %QCOMTEE_OBJECT_OP_SLEEP to wait for a period of time. + */ + +#define QCOMTEE_OBJECT_OP_MAP_REGION 0 +#define QCOMTEE_OBJECT_OP_YIELD 1 +#define QCOMTEE_OBJECT_OP_SLEEP 2 + +/* Mapping information format as expected by QTEE. */ +struct qcomtee_mapping_info { + u64 paddr; + u64 len; + u32 perms; +} __packed; + +static int +qcomtee_primordial_obj_dispatch(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *primordial_object_unused, + u32 op, struct qcomtee_arg *args) +{ + struct qcomtee_mapping_info *map_info; + struct qcomtee_object *mem_object; + struct qcomtee_object *map_object; + int err = 0; + + switch (op) { + case QCOMTEE_OBJECT_OP_YIELD: + cond_resched(); + /* No output object. */ + oic->data = NULL; + + break; + case QCOMTEE_OBJECT_OP_SLEEP: + /* Check message format matched QCOMTEE_OBJECT_OP_SLEEP op. */ + if (qcomtee_args_len(args) != 1 || + args[0].type != QCOMTEE_ARG_TYPE_IB || + args[0].b.size < sizeof(u32)) + return -EINVAL; + + msleep(*(u32 *)(args[0].b.addr)); + /* No output object. */ + oic->data = NULL; + + break; + case QCOMTEE_OBJECT_OP_MAP_REGION: + if (qcomtee_args_len(args) != 3 || + args[0].type != QCOMTEE_ARG_TYPE_OB || + args[1].type != QCOMTEE_ARG_TYPE_IO || + args[2].type != QCOMTEE_ARG_TYPE_OO || + args[0].b.size < sizeof(struct qcomtee_mapping_info)) + return -EINVAL; + + map_info = args[0].b.addr; + mem_object = args[1].o; + + qcomtee_mem_object_map(mem_object, &map_object, + &map_info->paddr, &map_info->len, + &map_info->perms); + + args[2].o = map_object; + /* One output object; pass it for cleanup to notify. */ + oic->data = map_object; + + qcomtee_object_put(mem_object); + + break; + default: + err = -EINVAL; + } + + return err; +} + +/* Called after submitting the callback response. */ +static void qcomtee_primordial_obj_notify(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *unused, + int err) +{ + struct qcomtee_object *object = oic->data; + + /* If err, QTEE did not obtain mapping object. Drop it. */ + if (object && err) + qcomtee_object_put(object); +} + +static struct qcomtee_object_operations qcomtee_primordial_obj_ops = { + .dispatch = qcomtee_primordial_obj_dispatch, + .notify = qcomtee_primordial_obj_notify, +}; + +struct qcomtee_object qcomtee_primordial_object = { + .name = "primordial", + .object_type = QCOMTEE_OBJECT_TYPE_CB, + .ops = &qcomtee_primordial_obj_ops +}; diff --git a/drivers/tee/qcomtee/qcomtee.h b/drivers/tee/qcomtee/qcomtee.h new file mode 100644 index 000000000000..f39bf63fd1c2 --- /dev/null +++ b/drivers/tee/qcomtee/qcomtee.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#ifndef QCOMTEE_H +#define QCOMTEE_H + +#include <linux/kobject.h> +#include <linux/tee_core.h> + +#include "qcomtee_msg.h" +#include "qcomtee_object.h" + +/* Flags relating to object reference. */ +#define QCOMTEE_OBJREF_FLAG_TEE BIT(0) +#define QCOMTEE_OBJREF_FLAG_USER BIT(1) +#define QCOMTEE_OBJREF_FLAG_MEM BIT(2) + +/** + * struct qcomtee - Main service struct. + * @teedev: client device. + * @pool: shared memory pool. + * @ctx: driver private context. + * @oic: context to use for the current driver invocation. + * @wq: workqueue for QTEE async operations. + * @xa_local_objects: array of objects exported to QTEE. + * @xa_last_id: next ID to allocate. + * @qtee_version: QTEE version. + */ +struct qcomtee { + struct tee_device *teedev; + struct tee_shm_pool *pool; + struct tee_context *ctx; + struct qcomtee_object_invoke_ctx oic; + struct workqueue_struct *wq; + struct xarray xa_local_objects; + u32 xa_last_id; + u32 qtee_version; +}; + +void qcomtee_fetch_async_reqs(struct qcomtee_object_invoke_ctx *oic); +struct qcomtee_object *qcomtee_idx_erase(struct qcomtee_object_invoke_ctx *oic, + u32 idx); + +struct tee_shm_pool *qcomtee_shm_pool_alloc(void); +void qcomtee_msg_buffers_free(struct qcomtee_object_invoke_ctx *oic); +int qcomtee_msg_buffers_alloc(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_arg *u); + +/** + * qcomtee_object_do_invoke_internal() - Submit an invocation for an object. + * @oic: context to use for the current invocation. + * @object: object being invoked. + * @op: requested operation on the object. + * @u: array of arguments for the current invocation. + * @result: result returned from QTEE. + * + * The caller is responsible for keeping track of the refcount for each + * object, including @object. On return, the caller loses ownership of all + * input objects of type %QCOMTEE_OBJECT_TYPE_CB. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +int qcomtee_object_do_invoke_internal(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *object, u32 op, + struct qcomtee_arg *u, int *result); + +/** + * struct qcomtee_context_data - Clients' or supplicants' context. + * @qtee_objects_idr: QTEE objects in this context. + * @qtee_lock: mutex for @qtee_objects_idr. + * @reqs_idr: requests in this context that hold ID. + * @reqs_list: FIFO for requests in PROCESSING or QUEUED state. + * @reqs_lock: mutex for @reqs_idr, @reqs_list and request states. + * @req_c: completion used when the supplicant is waiting for requests. + * @released: state of this context. + */ +struct qcomtee_context_data { + struct idr qtee_objects_idr; + /* Synchronize access to @qtee_objects_idr. */ + struct mutex qtee_lock; + + struct idr reqs_idr; + struct list_head reqs_list; + /* Synchronize access to @reqs_idr, @reqs_list and updating requests states. */ + struct mutex reqs_lock; + + struct completion req_c; + + bool released; +}; + +int qcomtee_context_add_qtee_object(struct tee_param *param, + struct qcomtee_object *object, + struct tee_context *ctx); +int qcomtee_context_find_qtee_object(struct qcomtee_object **object, + struct tee_param *param, + struct tee_context *ctx); +void qcomtee_context_del_qtee_object(struct tee_param *param, + struct tee_context *ctx); + +int qcomtee_objref_to_arg(struct qcomtee_arg *arg, struct tee_param *param, + struct tee_context *ctx); +int qcomtee_objref_from_arg(struct tee_param *param, struct qcomtee_arg *arg, + struct tee_context *ctx); + +/* OBJECTS: */ + +/* (1) User Object API. */ + +int is_qcomtee_user_object(struct qcomtee_object *object); +void qcomtee_user_object_set_notify(struct qcomtee_object *object, bool notify); +void qcomtee_requests_destroy(struct qcomtee_context_data *ctxdata); +int qcomtee_user_param_to_object(struct qcomtee_object **object, + struct tee_param *param, + struct tee_context *ctx); +int qcomtee_user_param_from_object(struct tee_param *param, + struct qcomtee_object *object, + struct tee_context *ctx); + +/** + * struct qcomtee_user_object_request_data - Data for user object request. + * @id: ID assigned to the request. + * @object_id: Object ID being invoked by QTEE. + * @op: Requested operation on object. + * @np: Number of parameters in the request. + */ +struct qcomtee_user_object_request_data { + int id; + u64 object_id; + u32 op; + int np; +}; + +int qcomtee_user_object_select(struct tee_context *ctx, + struct tee_param *params, int num_params, + void __user *uaddr, size_t size, + struct qcomtee_user_object_request_data *data); +int qcomtee_user_object_submit(struct tee_context *ctx, + struct tee_param *params, int num_params, + int req_id, int errno); + +/* (2) Primordial Object. */ +extern struct qcomtee_object qcomtee_primordial_object; + +/* (3) Memory Object API. */ + +/* Is it a memory object using tee_shm? */ +int is_qcomtee_memobj_object(struct qcomtee_object *object); + +/** + * qcomtee_memobj_param_to_object() - OBJREF parameter to &struct qcomtee_object. + * @object: object returned. + * @param: TEE parameter. + * @ctx: context in which the conversion should happen. + * + * @param is an OBJREF with %QCOMTEE_OBJREF_FLAG_MEM flags. + * + * Return: On success return 0 or <0 on failure. + */ +int qcomtee_memobj_param_to_object(struct qcomtee_object **object, + struct tee_param *param, + struct tee_context *ctx); + +/* Reverse what qcomtee_memobj_param_to_object() does. */ +int qcomtee_memobj_param_from_object(struct tee_param *param, + struct qcomtee_object *object, + struct tee_context *ctx); + +/** + * qcomtee_mem_object_map() - Map a memory object. + * @object: memory object. + * @map_object: created mapping object. + * @mem_paddr: physical address of the memory. + * @mem_size: size of the memory. + * @perms: QTEE access permissions. + * + * Return: On success return 0 or <0 on failure. + */ +int qcomtee_mem_object_map(struct qcomtee_object *object, + struct qcomtee_object **map_object, u64 *mem_paddr, + u64 *mem_size, u32 *perms); + +#endif /* QCOMTEE_H */ diff --git a/drivers/tee/qcomtee/qcomtee_msg.h b/drivers/tee/qcomtee/qcomtee_msg.h new file mode 100644 index 000000000000..878f70178a5b --- /dev/null +++ b/drivers/tee/qcomtee/qcomtee_msg.h @@ -0,0 +1,304 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#ifndef QCOMTEE_MSG_H +#define QCOMTEE_MSG_H + +#include <linux/bitfield.h> + +/** + * DOC: ''Qualcomm TEE'' (QTEE) Transport Message + * + * There are two buffers shared with QTEE: inbound and outbound buffers. + * The inbound buffer is used for direct object invocation, and the outbound + * buffer is used to make a request from QTEE to the kernel; i.e., a callback + * request. + * + * The unused tail of the outbound buffer is also used for sending and + * receiving asynchronous messages. An asynchronous message is independent of + * the current object invocation (i.e., contents of the inbound buffer) or + * callback request (i.e., the head of the outbound buffer); see + * qcomtee_get_async_buffer(). It is used by endpoints (QTEE or kernel) as an + * optimization to reduce the number of context switches between the secure and + * non-secure worlds. + * + * For instance, QTEE never sends an explicit callback request to release an + * object in the kernel. Instead, it sends asynchronous release messages in the + * outbound buffer when QTEE returns from the previous direct object invocation, + * or appends asynchronous release messages after the current callback request. + * + * QTEE supports two types of arguments in a message: buffer and object + * arguments. Depending on the direction of data flow, they could be input + * buffer (IO) to QTEE, output buffer (OB) from QTEE, input object (IO) to QTEE, + * or output object (OO) from QTEE. Object arguments hold object IDs. Buffer + * arguments hold (offset, size) pairs into the inbound or outbound buffers. + * + * QTEE holds an object table for objects it hosts and exposes to the kernel. + * An object ID is an index to the object table in QTEE. + * + * For the direct object invocation message format in the inbound buffer, see + * &struct qcomtee_msg_object_invoke. For the callback request message format + * in the outbound buffer, see &struct qcomtee_msg_callback. For the message + * format for asynchronous messages in the outbound buffer, see + * &struct qcomtee_async_msg_hdr. + */ + +/** + * define QCOMTEE_MSG_OBJECT_NS_BIT - Non-secure bit + * + * Object ID is a globally unique 32-bit number. IDs referencing objects + * in the kernel should have %QCOMTEE_MSG_OBJECT_NS_BIT set. + */ +#define QCOMTEE_MSG_OBJECT_NS_BIT BIT(31) + +/* Static object IDs recognized by QTEE. */ +#define QCOMTEE_MSG_OBJECT_NULL (0U) +#define QCOMTEE_MSG_OBJECT_ROOT (1U) + +/* Definitions from QTEE as part of the transport protocol. */ + +/* qcomtee_msg_arg is an argument as recognized by QTEE. */ +union qcomtee_msg_arg { + struct { + u32 offset; + u32 size; + } b; + u32 o; +}; + +/* BI and BO payloads in QTEE messages should be at 64-bit boundaries. */ +#define qcomtee_msg_offset_align(o) ALIGN((o), sizeof(u64)) + +/* Operations for objects are 32-bit. Transport uses the upper 16 bits. */ +#define QCOMTEE_MSG_OBJECT_OP_MASK GENMASK(15, 0) + +/* Reserved Operation IDs sent to QTEE: */ +/* QCOMTEE_MSG_OBJECT_OP_RELEASE - Reduces the refcount and releases the object. + * QCOMTEE_MSG_OBJECT_OP_RETAIN - Increases the refcount. + * + * These operation IDs are valid for all objects. + */ + +#define QCOMTEE_MSG_OBJECT_OP_RELEASE (QCOMTEE_MSG_OBJECT_OP_MASK - 0) +#define QCOMTEE_MSG_OBJECT_OP_RETAIN (QCOMTEE_MSG_OBJECT_OP_MASK - 1) + +/* Subset of operations supported by QTEE root object. */ + +#define QCOMTEE_ROOT_OP_REG_WITH_CREDENTIALS 5 +#define QCOMTEE_ROOT_OP_NOTIFY_DOMAIN_CHANGE 4 +#define QCOMTEE_ROOT_OP_ADCI_ACCEPT 8 +#define QCOMTEE_ROOT_OP_ADCI_SHUTDOWN 9 + +/* Subset of operations supported by client_env object. */ + +#define QCOMTEE_CLIENT_ENV_OPEN 0 + +/* List of available QTEE service UIDs and subset of operations. */ + +#define QCOMTEE_FEATURE_VER_UID 2033 +#define QCOMTEE_FEATURE_VER_OP_GET 0 +/* Get QTEE version number. */ +#define QCOMTEE_FEATURE_VER_OP_GET_QTEE_ID 10 +#define QTEE_VERSION_GET_MAJOR(x) (((x) >> 22) & 0xffU) +#define QTEE_VERSION_GET_MINOR(x) (((x) >> 12) & 0xffU) +#define QTEE_VERSION_GET_PATCH(x) ((x) >> 0 & 0xfffU) + +/* Response types as returned from qcomtee_object_invoke_ctx_invoke(). */ + +/* The message contains a callback request. */ +#define QCOMTEE_RESULT_INBOUND_REQ_NEEDED 3 + +/** + * struct qcomtee_msg_object_invoke - Direct object invocation message. + * @ctx: object ID hosted in QTEE. + * @op: operation for the object. + * @counts: number of different types of arguments in @args. + * @args: array of arguments. + * + * @counts consists of 4 * 4-bit fields. Bits 0 - 3 represent the number of + * input buffers, bits 4 - 7 represent the number of output buffers, + * bits 8 - 11 represent the number of input objects, and bits 12 - 15 + * represent the number of output objects. The remaining bits should be zero. + * + * 15 12 11 8 7 4 3 0 + * +----------------+----------------+----------------+----------------+ + * | #OO objects | #IO objects | #OB buffers | #IB buffers | + * +----------------+----------------+----------------+----------------+ + * + * The maximum number of arguments of each type is defined by + * %QCOMTEE_ARGS_PER_TYPE. + */ +struct qcomtee_msg_object_invoke { + u32 cxt; + u32 op; + u32 counts; + union qcomtee_msg_arg args[]; +}; + +/* Bit masks for the four 4-bit nibbles holding the counts. */ +#define QCOMTEE_MASK_IB GENMASK(3, 0) +#define QCOMTEE_MASK_OB GENMASK(7, 4) +#define QCOMTEE_MASK_IO GENMASK(11, 8) +#define QCOMTEE_MASK_OO GENMASK(15, 12) + +/** + * struct qcomtee_msg_callback - Callback request message. + * @result: result of operation @op on the object referenced by @cxt. + * @cxt: object ID hosted in the kernel. + * @op: operation for the object. + * @counts: number of different types of arguments in @args. + * @args: array of arguments. + * + * For details of @counts, see &qcomtee_msg_object_invoke.counts. + */ +struct qcomtee_msg_callback { + u32 result; + u32 cxt; + u32 op; + u32 counts; + union qcomtee_msg_arg args[]; +}; + +/* Offset in the message for the beginning of the buffer argument's contents. */ +#define qcomtee_msg_buffer_args(t, n) \ + qcomtee_msg_offset_align(struct_size_t(t, args, n)) +/* Pointer to the beginning of a buffer argument's content at an offset. */ +#define qcomtee_msg_offset_to_ptr(m, off) ((void *)&((char *)(m))[(off)]) + +/* Some helpers to manage msg.counts. */ + +static inline unsigned int qcomtee_msg_num_ib(u32 counts) +{ + return FIELD_GET(QCOMTEE_MASK_IB, counts); +} + +static inline unsigned int qcomtee_msg_num_ob(u32 counts) +{ + return FIELD_GET(QCOMTEE_MASK_OB, counts); +} + +static inline unsigned int qcomtee_msg_num_io(u32 counts) +{ + return FIELD_GET(QCOMTEE_MASK_IO, counts); +} + +static inline unsigned int qcomtee_msg_num_oo(u32 counts) +{ + return FIELD_GET(QCOMTEE_MASK_OO, counts); +} + +static inline unsigned int qcomtee_msg_idx_ib(u32 counts) +{ + return 0; +} + +static inline unsigned int qcomtee_msg_idx_ob(u32 counts) +{ + return qcomtee_msg_num_ib(counts); +} + +static inline unsigned int qcomtee_msg_idx_io(u32 counts) +{ + return qcomtee_msg_idx_ob(counts) + qcomtee_msg_num_ob(counts); +} + +static inline unsigned int qcomtee_msg_idx_oo(u32 counts) +{ + return qcomtee_msg_idx_io(counts) + qcomtee_msg_num_io(counts); +} + +#define qcomtee_msg_for_each(i, first, num) \ + for ((i) = (first); (i) < (first) + (num); (i)++) + +#define qcomtee_msg_for_each_input_buffer(i, m) \ + qcomtee_msg_for_each(i, qcomtee_msg_idx_ib((m)->counts), \ + qcomtee_msg_num_ib((m)->counts)) + +#define qcomtee_msg_for_each_output_buffer(i, m) \ + qcomtee_msg_for_each(i, qcomtee_msg_idx_ob((m)->counts), \ + qcomtee_msg_num_ob((m)->counts)) + +#define qcomtee_msg_for_each_input_object(i, m) \ + qcomtee_msg_for_each(i, qcomtee_msg_idx_io((m)->counts), \ + qcomtee_msg_num_io((m)->counts)) + +#define qcomtee_msg_for_each_output_object(i, m) \ + qcomtee_msg_for_each(i, qcomtee_msg_idx_oo((m)->counts), \ + qcomtee_msg_num_oo((m)->counts)) + +/* Sum of arguments in a message. */ +#define qcomtee_msg_args(m) \ + (qcomtee_msg_idx_oo((m)->counts) + qcomtee_msg_num_oo((m)->counts)) + +static inline void qcomtee_msg_init(struct qcomtee_msg_object_invoke *msg, + u32 cxt, u32 op, int in_buffer, + int out_buffer, int in_object, + int out_object) +{ + u32 counts = 0; + + counts |= (in_buffer & 0xfU); + counts |= ((out_buffer - in_buffer) & 0xfU) << 4; + counts |= ((in_object - out_buffer) & 0xfU) << 8; + counts |= ((out_object - in_object) & 0xfU) << 12; + + msg->cxt = cxt; + msg->op = op; + msg->counts = counts; +} + +/* Generic error codes. */ +#define QCOMTEE_MSG_OK 0 /* non-specific success code. */ +#define QCOMTEE_MSG_ERROR 1 /* non-specific error. */ +#define QCOMTEE_MSG_ERROR_INVALID 2 /* unsupported/unrecognized request. */ +#define QCOMTEE_MSG_ERROR_SIZE_IN 3 /* supplied buffer/string too large. */ +#define QCOMTEE_MSG_ERROR_SIZE_OUT 4 /* supplied output buffer too small. */ +#define QCOMTEE_MSG_ERROR_USERBASE 10 /* start of user-defined error range. */ + +/* Transport layer error codes. */ +#define QCOMTEE_MSG_ERROR_DEFUNCT -90 /* object no longer exists. */ +#define QCOMTEE_MSG_ERROR_ABORT -91 /* calling thread must exit. */ +#define QCOMTEE_MSG_ERROR_BADOBJ -92 /* invalid object context. */ +#define QCOMTEE_MSG_ERROR_NOSLOTS -93 /* caller's object table full. */ +#define QCOMTEE_MSG_ERROR_MAXARGS -94 /* too many args. */ +#define QCOMTEE_MSG_ERROR_MAXDATA -95 /* buffers too large. */ +#define QCOMTEE_MSG_ERROR_UNAVAIL -96 /* the request could not be processed. */ +#define QCOMTEE_MSG_ERROR_KMEM -97 /* kernel out of memory. */ +#define QCOMTEE_MSG_ERROR_REMOTE -98 /* local method sent to remote object. */ +#define QCOMTEE_MSG_ERROR_BUSY -99 /* Object is busy. */ +#define QCOMTEE_MSG_ERROR_TIMEOUT -103 /* Call Back Object invocation timed out. */ + +static inline void qcomtee_msg_set_result(struct qcomtee_msg_callback *cb_msg, + int err) +{ + if (!err) { + cb_msg->result = QCOMTEE_MSG_OK; + } else if (err < 0) { + /* If err < 0, then it is a transport error. */ + switch (err) { + case -ENOMEM: + cb_msg->result = QCOMTEE_MSG_ERROR_KMEM; + break; + case -ENODEV: + cb_msg->result = QCOMTEE_MSG_ERROR_DEFUNCT; + break; + case -ENOSPC: + case -EBUSY: + cb_msg->result = QCOMTEE_MSG_ERROR_BUSY; + break; + case -EBADF: + case -EINVAL: + cb_msg->result = QCOMTEE_MSG_ERROR_UNAVAIL; + break; + default: + cb_msg->result = QCOMTEE_MSG_ERROR; + } + } else { + /* If err > 0, then it is user defined error, pass it as is. */ + cb_msg->result = err; + } +} + +#endif /* QCOMTEE_MSG_H */ diff --git a/drivers/tee/qcomtee/qcomtee_object.h b/drivers/tee/qcomtee/qcomtee_object.h new file mode 100644 index 000000000000..5221449be7db --- /dev/null +++ b/drivers/tee/qcomtee/qcomtee_object.h @@ -0,0 +1,316 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#ifndef QCOMTEE_OBJECT_H +#define QCOMTEE_OBJECT_H + +#include <linux/completion.h> +#include <linux/kref.h> +#include <linux/slab.h> +#include <linux/workqueue.h> + +struct qcomtee_object; + +/** + * DOC: Overview + * + * qcomtee_object provides object refcounting, ID allocation for objects hosted + * in the kernel, and necessary message marshaling for Qualcomm TEE (QTEE). + * + * To invoke an object in QTEE, the user calls qcomtee_object_do_invoke() + * while passing an instance of &struct qcomtee_object and the requested + * operation + arguments. + * + * After boot, QTEE provides a static object %ROOT_QCOMTEE_OBJECT (type of + * %QCOMTEE_OBJECT_TYPE_ROOT). The root object is invoked to pass the user's + * credentials and obtain other instances of &struct qcomtee_object (type of + * %QCOMTEE_OBJECT_TYPE_TEE) that represent services and TAs in QTEE; + * see &enum qcomtee_object_type. + * + * The objects received from QTEE are refcounted. So the owner of these objects + * can issue qcomtee_object_get() to increase the refcount and pass objects + * to other clients, or issue qcomtee_object_put() to decrease the refcount + * and release the resources in QTEE. + * + * The kernel can host services accessible to QTEE. A driver should embed + * an instance of &struct qcomtee_object in the struct it wants to export to + * QTEE (this is called a callback object). It issues qcomtee_object_user_init() + * to set the dispatch() operation for the callback object and set its type + * to %QCOMTEE_OBJECT_TYPE_CB. + * + * core.c holds an object table for callback objects. An object ID is assigned + * to each callback object, which is an index to the object table. QTEE uses + * these IDs to reference or invoke callback objects. + * + * If QTEE invokes a callback object in the kernel, the dispatch() operation is + * called in the context of the thread that originally called + * qcomtee_object_do_invoke(). + */ + +/** + * enum qcomtee_object_type - Object types. + * @QCOMTEE_OBJECT_TYPE_TEE: object hosted on QTEE. + * @QCOMTEE_OBJECT_TYPE_CB: object hosted on kernel. + * @QCOMTEE_OBJECT_TYPE_ROOT: 'primordial' object. + * @QCOMTEE_OBJECT_TYPE_NULL: NULL object. + * + * The primordial object is used for bootstrapping the IPC connection between + * the kernel and QTEE. It is invoked by the kernel when it wants to get a + * 'client env'. + */ +enum qcomtee_object_type { + QCOMTEE_OBJECT_TYPE_TEE, + QCOMTEE_OBJECT_TYPE_CB, + QCOMTEE_OBJECT_TYPE_ROOT, + QCOMTEE_OBJECT_TYPE_NULL, +}; + +/** + * enum qcomtee_arg_type - Type of QTEE argument. + * @QCOMTEE_ARG_TYPE_INV: invalid type. + * @QCOMTEE_ARG_TYPE_OB: output buffer (OB). + * @QCOMTEE_ARG_TYPE_OO: output object (OO). + * @QCOMTEE_ARG_TYPE_IB: input buffer (IB). + * @QCOMTEE_ARG_TYPE_IO: input object (IO). + * + * Use the invalid type to specify the end of the argument array. + */ +enum qcomtee_arg_type { + QCOMTEE_ARG_TYPE_INV = 0, + QCOMTEE_ARG_TYPE_OB, + QCOMTEE_ARG_TYPE_OO, + QCOMTEE_ARG_TYPE_IB, + QCOMTEE_ARG_TYPE_IO, + QCOMTEE_ARG_TYPE_NR, +}; + +/** + * define QCOMTEE_ARGS_PER_TYPE - Maximum arguments of a specific type. + * + * The QTEE transport protocol limits the maximum number of arguments of + * a specific type (i.e., IB, OB, IO, and OO). + */ +#define QCOMTEE_ARGS_PER_TYPE 16 + +/* Maximum arguments that can fit in a QTEE message, ignoring the type. */ +#define QCOMTEE_ARGS_MAX (QCOMTEE_ARGS_PER_TYPE * (QCOMTEE_ARG_TYPE_NR - 1)) + +struct qcomtee_buffer { + union { + void *addr; + void __user *uaddr; + }; + size_t size; +}; + +/** + * struct qcomtee_arg - Argument for QTEE object invocation. + * @type: type of argument as &enum qcomtee_arg_type. + * @flags: extra flags. + * @b: address and size if the type of argument is a buffer. + * @o: object instance if the type of argument is an object. + * + * &qcomtee_arg.flags only accepts %QCOMTEE_ARG_FLAGS_UADDR for now, which + * states that &qcomtee_arg.b contains a userspace address in uaddr. + */ +struct qcomtee_arg { + enum qcomtee_arg_type type; +/* 'b.uaddr' holds a __user address. */ +#define QCOMTEE_ARG_FLAGS_UADDR BIT(0) + unsigned int flags; + union { + struct qcomtee_buffer b; + struct qcomtee_object *o; + }; +}; + +static inline int qcomtee_args_len(struct qcomtee_arg *args) +{ + int i = 0; + + while (args[i].type != QCOMTEE_ARG_TYPE_INV) + i++; + return i; +} + +/* Context is busy (callback is in progress). */ +#define QCOMTEE_OIC_FLAG_BUSY BIT(1) +/* Context needs to notify the current object. */ +#define QCOMTEE_OIC_FLAG_NOTIFY BIT(2) +/* Context has shared state with QTEE. */ +#define QCOMTEE_OIC_FLAG_SHARED BIT(3) + +/** + * struct qcomtee_object_invoke_ctx - QTEE context for object invocation. + * @ctx: TEE context for this invocation. + * @flags: flags for the invocation context. + * @errno: error code for the invocation. + * @object: current object invoked in this callback context. + * @u: array of arguments for the current invocation (+1 for ending arg). + * @in_msg: inbound buffer shared with QTEE. + * @out_msg: outbound buffer shared with QTEE. + * @in_shm: TEE shm allocated for inbound buffer. + * @out_shm: TEE shm allocated for outbound buffer. + * @data: extra data attached to this context. + */ +struct qcomtee_object_invoke_ctx { + struct tee_context *ctx; + unsigned long flags; + int errno; + + struct qcomtee_object *object; + struct qcomtee_arg u[QCOMTEE_ARGS_MAX + 1]; + + struct qcomtee_buffer in_msg; + struct qcomtee_buffer out_msg; + struct tee_shm *in_shm; + struct tee_shm *out_shm; + + void *data; +}; + +static inline struct qcomtee_object_invoke_ctx * +qcomtee_object_invoke_ctx_alloc(struct tee_context *ctx) +{ + struct qcomtee_object_invoke_ctx *oic; + + oic = kzalloc(sizeof(*oic), GFP_KERNEL); + if (oic) + oic->ctx = ctx; + return oic; +} + +/** + * qcomtee_object_do_invoke() - Submit an invocation for an object. + * @oic: context to use for the current invocation. + * @object: object being invoked. + * @op: requested operation on the object. + * @u: array of arguments for the current invocation. + * @result: result returned from QTEE. + * + * The caller is responsible for keeping track of the refcount for each object, + * including @object. On return, the caller loses ownership of all input + * objects of type %QCOMTEE_OBJECT_TYPE_CB. + * + * @object can be of %QCOMTEE_OBJECT_TYPE_ROOT or %QCOMTEE_OBJECT_TYPE_TEE. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +int qcomtee_object_do_invoke(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *object, u32 op, + struct qcomtee_arg *u, int *result); + +/** + * struct qcomtee_object_operations - Callback object operations. + * @release: release the object if QTEE is not using it. + * @dispatch: dispatch the operation requested by QTEE. + * @notify: report the status of any pending response submitted by @dispatch. + */ +struct qcomtee_object_operations { + void (*release)(struct qcomtee_object *object); + int (*dispatch)(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *object, u32 op, + struct qcomtee_arg *args); + void (*notify)(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *object, int err); +}; + +/** + * struct qcomtee_object - QTEE or kernel object. + * @name: object name. + * @refcount: reference counter. + * @object_type: object type as &enum qcomtee_object_type. + * @info: extra information for the object. + * @ops: callback operations for objects of type %QCOMTEE_OBJECT_TYPE_CB. + * @work: work for async operations on the object. + * + * @work is used for releasing objects of %QCOMTEE_OBJECT_TYPE_TEE type. + */ +struct qcomtee_object { + const char *name; + struct kref refcount; + + enum qcomtee_object_type object_type; + struct object_info { + unsigned long qtee_id; + /* TEE context for QTEE object async requests. */ + struct tee_context *qcomtee_async_ctx; + } info; + + struct qcomtee_object_operations *ops; + struct work_struct work; +}; + +/* Static instances of qcomtee_object objects. */ +#define NULL_QCOMTEE_OBJECT ((struct qcomtee_object *)(0)) +extern struct qcomtee_object qcomtee_object_root; +#define ROOT_QCOMTEE_OBJECT (&qcomtee_object_root) + +static inline enum qcomtee_object_type +typeof_qcomtee_object(struct qcomtee_object *object) +{ + if (object == NULL_QCOMTEE_OBJECT) + return QCOMTEE_OBJECT_TYPE_NULL; + return object->object_type; +} + +static inline const char *qcomtee_object_name(struct qcomtee_object *object) +{ + if (object == NULL_QCOMTEE_OBJECT) + return "null"; + + if (!object->name) + return "no-name"; + return object->name; +} + +/** + * qcomtee_object_user_init() - Initialize an object for the user. + * @object: object to initialize. + * @ot: type of object as &enum qcomtee_object_type. + * @ops: instance of callbacks. + * @fmt: name assigned to the object. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +int qcomtee_object_user_init(struct qcomtee_object *object, + enum qcomtee_object_type ot, + struct qcomtee_object_operations *ops, + const char *fmt, ...) __printf(4, 5); + +/* Object release is RCU protected. */ +int qcomtee_object_get(struct qcomtee_object *object); +void qcomtee_object_put(struct qcomtee_object *object); + +#define qcomtee_arg_for_each(i, args) \ + for (i = 0; args[i].type != QCOMTEE_ARG_TYPE_INV; i++) + +/* Next argument of type @type after index @i. */ +int qcomtee_next_arg_type(struct qcomtee_arg *u, int i, + enum qcomtee_arg_type type); + +/* Iterate over argument of given type. */ +#define qcomtee_arg_for_each_type(i, args, at) \ + for (i = qcomtee_next_arg_type(args, 0, at); \ + args[i].type != QCOMTEE_ARG_TYPE_INV; \ + i = qcomtee_next_arg_type(args, i + 1, at)) + +#define qcomtee_arg_for_each_input_buffer(i, args) \ + qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_IB) +#define qcomtee_arg_for_each_output_buffer(i, args) \ + qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_OB) +#define qcomtee_arg_for_each_input_object(i, args) \ + qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_IO) +#define qcomtee_arg_for_each_output_object(i, args) \ + qcomtee_arg_for_each_type(i, args, QCOMTEE_ARG_TYPE_OO) + +struct qcomtee_object * +qcomtee_object_get_client_env(struct qcomtee_object_invoke_ctx *oic); + +struct qcomtee_object * +qcomtee_object_get_service(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *client_env, u32 uid); + +#endif /* QCOMTEE_OBJECT_H */ diff --git a/drivers/tee/qcomtee/shm.c b/drivers/tee/qcomtee/shm.c new file mode 100644 index 000000000000..580bd25f98ed --- /dev/null +++ b/drivers/tee/qcomtee/shm.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/firmware/qcom/qcom_tzmem.h> +#include <linux/mm.h> + +#include "qcomtee.h" + +/** + * define MAX_OUTBOUND_BUFFER_SIZE - Maximum size of outbound buffers. + * + * The size of outbound buffer depends on QTEE callback requests. + */ +#define MAX_OUTBOUND_BUFFER_SIZE SZ_4K + +/** + * define MAX_INBOUND_BUFFER_SIZE - Maximum size of the inbound buffer. + * + * The size of the inbound buffer depends on the user's requests, + * specifically the number of IB and OB arguments. If an invocation + * requires a size larger than %MAX_INBOUND_BUFFER_SIZE, the user should + * consider using another form of shared memory with QTEE. + */ +#define MAX_INBOUND_BUFFER_SIZE SZ_4M + +/** + * qcomtee_msg_buffers_alloc() - Allocate inbound and outbound buffers. + * @oic: context to use for the current invocation. + * @u: array of arguments for the current invocation. + * + * It calculates the size of inbound and outbound buffers based on the + * arguments in @u. It allocates the buffers from the teedev pool. + * + * Return: On success, returns 0. On error, returns < 0. + */ +int qcomtee_msg_buffers_alloc(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_arg *u) +{ + struct tee_context *ctx = oic->ctx; + struct tee_shm *shm; + size_t size; + int i; + + /* Start offset in a message for buffer arguments. */ + size = qcomtee_msg_buffer_args(struct qcomtee_msg_object_invoke, + qcomtee_args_len(u)); + if (size > MAX_INBOUND_BUFFER_SIZE) + return -EINVAL; + + /* Add size of IB arguments. */ + qcomtee_arg_for_each_input_buffer(i, u) { + size = size_add(size, qcomtee_msg_offset_align(u[i].b.size)); + if (size > MAX_INBOUND_BUFFER_SIZE) + return -EINVAL; + } + + /* Add size of OB arguments. */ + qcomtee_arg_for_each_output_buffer(i, u) { + size = size_add(size, qcomtee_msg_offset_align(u[i].b.size)); + if (size > MAX_INBOUND_BUFFER_SIZE) + return -EINVAL; + } + + shm = tee_shm_alloc_priv_buf(ctx, size); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + /* Allocate inbound buffer. */ + oic->in_shm = shm; + shm = tee_shm_alloc_priv_buf(ctx, MAX_OUTBOUND_BUFFER_SIZE); + if (IS_ERR(shm)) { + tee_shm_free(oic->in_shm); + + return PTR_ERR(shm); + } + /* Allocate outbound buffer. */ + oic->out_shm = shm; + + oic->in_msg.addr = tee_shm_get_va(oic->in_shm, 0); + oic->in_msg.size = tee_shm_get_size(oic->in_shm); + oic->out_msg.addr = tee_shm_get_va(oic->out_shm, 0); + oic->out_msg.size = tee_shm_get_size(oic->out_shm); + /* QTEE assume unused buffers are zeroed. */ + memzero_explicit(oic->in_msg.addr, oic->in_msg.size); + memzero_explicit(oic->out_msg.addr, oic->out_msg.size); + + return 0; +} + +void qcomtee_msg_buffers_free(struct qcomtee_object_invoke_ctx *oic) +{ + tee_shm_free(oic->in_shm); + tee_shm_free(oic->out_shm); +} + +/* Dynamic shared memory pool based on tee_dyn_shm_alloc_helper(). */ + +static int qcomtee_shm_register(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start) +{ + return qcom_tzmem_shm_bridge_create(shm->paddr, shm->size, + &shm->sec_world_id); +} + +static int qcomtee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm) +{ + qcom_tzmem_shm_bridge_delete(shm->sec_world_id); + + return 0; +} + +static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align) +{ + return tee_dyn_shm_alloc_helper(shm, size, align, qcomtee_shm_register); +} + +static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm) +{ + tee_dyn_shm_free_helper(shm, qcomtee_shm_unregister); +} + +static void pool_op_destroy_pool(struct tee_shm_pool *pool) +{ + kfree(pool); +} + +static const struct tee_shm_pool_ops pool_ops = { + .alloc = pool_op_alloc, + .free = pool_op_free, + .destroy_pool = pool_op_destroy_pool, +}; + +struct tee_shm_pool *qcomtee_shm_pool_alloc(void) +{ + struct tee_shm_pool *pool; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); + if (!pool) + return ERR_PTR(-ENOMEM); + + pool->ops = &pool_ops; + + return pool; +} diff --git a/drivers/tee/qcomtee/user_obj.c b/drivers/tee/qcomtee/user_obj.c new file mode 100644 index 000000000000..0139905f2684 --- /dev/null +++ b/drivers/tee/qcomtee/user_obj.c @@ -0,0 +1,692 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/slab.h> +#include <linux/uaccess.h> + +#include "qcomtee.h" + +/** + * DOC: User Objects aka Supplicants + * + * Any userspace process with access to the TEE device file can behave as a + * supplicant by creating a user object. Any TEE parameter of type OBJREF with + * %QCOMTEE_OBJREF_FLAG_USER flag set is considered a user object. + * + * A supplicant uses qcomtee_user_object_select() (i.e. TEE_IOC_SUPPL_RECV) to + * receive a QTEE user object request and qcomtee_user_object_submit() + * (i.e. TEE_IOC_SUPPL_SEND) to submit a response. QTEE expects to receive the + * response, including OB and OO in a specific order in the message; parameters + * submitted with qcomtee_user_object_submit() should maintain this order. + */ + +/** + * struct qcomtee_user_object - User object. + * @object: &struct qcomtee_object representing the user object. + * @ctx: context for which the user object is defined. + * @object_id: object ID in @ctx. + * @notify: notify on release. + * + * Any object managed in userspace is represented by this struct. + * If @notify is set, a notification message is sent back to userspace + * upon release. + */ +struct qcomtee_user_object { + struct qcomtee_object object; + struct tee_context *ctx; + u64 object_id; + bool notify; +}; + +#define to_qcomtee_user_object(o) \ + container_of((o), struct qcomtee_user_object, object) + +static struct qcomtee_object_operations qcomtee_user_object_ops; + +/* Is it a user object? */ +int is_qcomtee_user_object(struct qcomtee_object *object) +{ + return object != NULL_QCOMTEE_OBJECT && + typeof_qcomtee_object(object) == QCOMTEE_OBJECT_TYPE_CB && + object->ops == &qcomtee_user_object_ops; +} + +/* Set the user object's 'notify on release' flag. */ +void qcomtee_user_object_set_notify(struct qcomtee_object *object, bool notify) +{ + if (is_qcomtee_user_object(object)) + to_qcomtee_user_object(object)->notify = notify; +} + +/* Supplicant Requests: */ + +/** + * enum qcomtee_req_state - Current state of request. + * @QCOMTEE_REQ_QUEUED: Request is waiting for supplicant. + * @QCOMTEE_REQ_PROCESSING: Request has been picked by the supplicant. + * @QCOMTEE_REQ_PROCESSED: Response has been submitted for the request. + */ +enum qcomtee_req_state { + QCOMTEE_REQ_QUEUED = 1, + QCOMTEE_REQ_PROCESSING, + QCOMTEE_REQ_PROCESSED, +}; + +/* User requests sent to supplicants. */ +struct qcomtee_ureq { + enum qcomtee_req_state state; + + /* User Request: */ + int req_id; + u64 object_id; + u32 op; + struct qcomtee_arg *args; + int errno; + + struct list_head node; + struct completion c; /* Completion for whoever wait. */ +}; + +/* + * Placeholder for a PROCESSING request in qcomtee_context.reqs_idr. + * + * If the thread that calls qcomtee_object_invoke() dies and the supplicant + * is processing the request, replace the entry in qcomtee_context.reqs_idr + * with empty_ureq. This ensures that (1) the req_id remains busy and is not + * reused, and (2) the supplicant fails to submit the response and performs + * the necessary rollback. + */ +static struct qcomtee_ureq empty_ureq = { .state = QCOMTEE_REQ_PROCESSING }; + +/* Enqueue a user request for a context and assign a request ID. */ +static int ureq_enqueue(struct qcomtee_context_data *ctxdata, + struct qcomtee_ureq *ureq) +{ + int ret; + + guard(mutex)(&ctxdata->reqs_lock); + /* Supplicant is dying. */ + if (ctxdata->released) + return -ENODEV; + + /* Allocate an ID and queue the request. */ + ret = idr_alloc(&ctxdata->reqs_idr, ureq, 0, 0, GFP_KERNEL); + if (ret < 0) + return ret; + + ureq->req_id = ret; + ureq->state = QCOMTEE_REQ_QUEUED; + list_add_tail(&ureq->node, &ctxdata->reqs_list); + + return 0; +} + +/** + * ureq_dequeue() - Dequeue a user request from a context. + * @ctxdata: context data for a context to dequeue the request. + * @req_id: ID of the request to be dequeued. + * + * It dequeues a user request and releases its request ID. + * + * Context: The caller should hold &qcomtee_context_data->reqs_lock. + * Return: Returns the user request associated with this ID; otherwise, NULL. + */ +static struct qcomtee_ureq *ureq_dequeue(struct qcomtee_context_data *ctxdata, + int req_id) +{ + struct qcomtee_ureq *ureq; + + ureq = idr_remove(&ctxdata->reqs_idr, req_id); + if (ureq == &empty_ureq || !ureq) + return NULL; + + list_del(&ureq->node); + + return ureq; +} + +/** + * ureq_select() - Select the next request in a context. + * @ctxdata: context data for a context to pop a request. + * @ubuf_size: size of the available buffer for UBUF parameters. + * @num_params: number of entries for the TEE parameter array. + * + * It checks if @num_params is large enough to fit the next request arguments. + * It checks if @ubuf_size is large enough to fit IB buffer arguments. + * + * Context: The caller should hold &qcomtee_context_data->reqs_lock. + * Return: On success, returns a request; + * on failure, returns NULL and ERR_PTR. + */ +static struct qcomtee_ureq *ureq_select(struct qcomtee_context_data *ctxdata, + size_t ubuf_size, int num_params) +{ + struct qcomtee_ureq *req, *ureq = NULL; + struct qcomtee_arg *u; + int i; + + /* Find the a queued request. */ + list_for_each_entry(req, &ctxdata->reqs_list, node) { + if (req->state == QCOMTEE_REQ_QUEUED) { + ureq = req; + break; + } + } + + if (!ureq) + return NULL; + + u = ureq->args; + /* (1) Is there enough TEE parameters? */ + if (num_params < qcomtee_args_len(u)) + return ERR_PTR(-EINVAL); + /* (2) Is there enough space to pass input buffers? */ + qcomtee_arg_for_each_input_buffer(i, u) { + ubuf_size = size_sub(ubuf_size, u[i].b.size); + if (ubuf_size == SIZE_MAX) + return ERR_PTR(-EINVAL); + + ubuf_size = round_down(ubuf_size, 8); + } + + return ureq; +} + +/* Gets called when the user closes the device. */ +void qcomtee_requests_destroy(struct qcomtee_context_data *ctxdata) +{ + struct qcomtee_ureq *req, *ureq; + + guard(mutex)(&ctxdata->reqs_lock); + /* So ureq_enqueue() refuses new requests from QTEE. */ + ctxdata->released = true; + /* ureqs in reqs_list are in QUEUED or PROCESSING (!= empty_ureq) state. */ + list_for_each_entry_safe(ureq, req, &ctxdata->reqs_list, node) { + ureq_dequeue(ctxdata, ureq->req_id); + + if (ureq->op != QCOMTEE_MSG_OBJECT_OP_RELEASE) { + ureq->state = QCOMTEE_REQ_PROCESSED; + ureq->errno = -ENODEV; + + complete(&ureq->c); + } else { + kfree(ureq); + } + } +} + +/* User Object API. */ + +/* User object dispatcher. */ +static int qcomtee_user_object_dispatch(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *object, u32 op, + struct qcomtee_arg *args) +{ + struct qcomtee_user_object *uo = to_qcomtee_user_object(object); + struct qcomtee_context_data *ctxdata = uo->ctx->data; + struct qcomtee_ureq *ureq __free(kfree) = NULL; + int errno; + + ureq = kzalloc(sizeof(*ureq), GFP_KERNEL); + if (!ureq) + return -ENOMEM; + + init_completion(&ureq->c); + ureq->object_id = uo->object_id; + ureq->op = op; + ureq->args = args; + + /* Queue the request. */ + if (ureq_enqueue(ctxdata, ureq)) + return -ENODEV; + /* Wakeup supplicant to process it. */ + complete(&ctxdata->req_c); + + /* + * Wait for the supplicant to process the request. Wait as KILLABLE + * in case the supplicant and invoke thread are both running from the + * same process, the supplicant crashes, or the shutdown sequence + * starts with supplicant dies first; otherwise, it stuck indefinitely. + * + * If the supplicant processes long-running requests, also use + * TASK_FREEZABLE to allow the device to safely suspend if needed. + */ + if (!wait_for_completion_state(&ureq->c, + TASK_KILLABLE | TASK_FREEZABLE)) { + errno = ureq->errno; + if (!errno) + oic->data = no_free_ptr(ureq); + } else { + enum qcomtee_req_state prev_state; + + errno = -ENODEV; + + scoped_guard(mutex, &ctxdata->reqs_lock) { + prev_state = ureq->state; + /* Replace with empty_ureq to keep req_id reserved. */ + if (prev_state == QCOMTEE_REQ_PROCESSING) { + list_del(&ureq->node); + idr_replace(&ctxdata->reqs_idr, + &empty_ureq, ureq->req_id); + + /* Remove as supplicant has never seen this request. */ + } else if (prev_state == QCOMTEE_REQ_QUEUED) { + ureq_dequeue(ctxdata, ureq->req_id); + } + } + + /* Supplicant did some work, do not discard it. */ + if (prev_state == QCOMTEE_REQ_PROCESSED) { + errno = ureq->errno; + if (!errno) + oic->data = no_free_ptr(ureq); + } + } + + return errno; +} + +/* Gets called after submitting the dispatcher response. */ +static void qcomtee_user_object_notify(struct qcomtee_object_invoke_ctx *oic, + struct qcomtee_object *unused_object, + int err) +{ + struct qcomtee_ureq *ureq = oic->data; + struct qcomtee_arg *u = ureq->args; + int i; + + /* + * If err, there was a transport issue, and QTEE did not receive the + * response for the dispatcher. Release the callback object created for + * QTEE, in addition to the copies of objects kept for the drivers. + */ + qcomtee_arg_for_each_output_object(i, u) { + if (err && + (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB)) + qcomtee_object_put(u[i].o); + qcomtee_object_put(u[i].o); + } + + kfree(ureq); +} + +static void qcomtee_user_object_release(struct qcomtee_object *object) +{ + struct qcomtee_user_object *uo = to_qcomtee_user_object(object); + struct qcomtee_context_data *ctxdata = uo->ctx->data; + struct qcomtee_ureq *ureq; + + /* RELEASE does not require any argument. */ + static struct qcomtee_arg args[] = { { .type = QCOMTEE_ARG_TYPE_INV } }; + + if (!uo->notify) + goto out_no_notify; + + ureq = kzalloc(sizeof(*ureq), GFP_KERNEL); + if (!ureq) + goto out_no_notify; + + /* QUEUE a release request: */ + ureq->object_id = uo->object_id; + ureq->op = QCOMTEE_MSG_OBJECT_OP_RELEASE; + ureq->args = args; + if (ureq_enqueue(ctxdata, ureq)) { + kfree(ureq); + /* Ignore the notification if it cannot be queued. */ + goto out_no_notify; + } + + complete(&ctxdata->req_c); + +out_no_notify: + teedev_ctx_put(uo->ctx); + kfree(uo); +} + +static struct qcomtee_object_operations qcomtee_user_object_ops = { + .release = qcomtee_user_object_release, + .notify = qcomtee_user_object_notify, + .dispatch = qcomtee_user_object_dispatch, +}; + +/** + * qcomtee_user_param_to_object() - OBJREF parameter to &struct qcomtee_object. + * @object: object returned. + * @param: TEE parameter. + * @ctx: context in which the conversion should happen. + * + * @param is an OBJREF with %QCOMTEE_OBJREF_FLAG_USER flags. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +int qcomtee_user_param_to_object(struct qcomtee_object **object, + struct tee_param *param, + struct tee_context *ctx) +{ + struct qcomtee_user_object *user_object __free(kfree) = NULL; + int err; + + user_object = kzalloc(sizeof(*user_object), GFP_KERNEL); + if (!user_object) + return -ENOMEM; + + user_object->ctx = ctx; + user_object->object_id = param->u.objref.id; + /* By default, always notify userspace upon release. */ + user_object->notify = true; + err = qcomtee_object_user_init(&user_object->object, + QCOMTEE_OBJECT_TYPE_CB, + &qcomtee_user_object_ops, "uo-%llu", + param->u.objref.id); + if (err) + return err; + /* Matching teedev_ctx_put() is in qcomtee_user_object_release(). */ + teedev_ctx_get(ctx); + + *object = &no_free_ptr(user_object)->object; + + return 0; +} + +/* Reverse what qcomtee_user_param_to_object() does. */ +int qcomtee_user_param_from_object(struct tee_param *param, + struct qcomtee_object *object, + struct tee_context *ctx) +{ + struct qcomtee_user_object *uo; + + uo = to_qcomtee_user_object(object); + /* Ensure the object is in the same context as the caller. */ + if (uo->ctx != ctx) + return -EINVAL; + + param->u.objref.id = uo->object_id; + param->u.objref.flags = QCOMTEE_OBJREF_FLAG_USER; + + /* User objects are valid in userspace; do not keep a copy. */ + qcomtee_object_put(object); + + return 0; +} + +/** + * qcomtee_cb_params_from_args() - Convert QTEE arguments to TEE parameters. + * @params: TEE parameters. + * @u: QTEE arguments. + * @num_params: number of elements in the parameter array. + * @ubuf_addr: user buffer for arguments of type %QCOMTEE_ARG_TYPE_IB. + * @ubuf_size: size of the user buffer. + * @ctx: context in which the conversion should happen. + * + * It expects @params to have enough entries for @u. Entries in @params are of + * %TEE_IOCTL_PARAM_ATTR_TYPE_NONE. + * + * Return: On success, returns the number of input parameters; + * on failure, returns < 0. + */ +static int qcomtee_cb_params_from_args(struct tee_param *params, + struct qcomtee_arg *u, int num_params, + void __user *ubuf_addr, size_t ubuf_size, + struct tee_context *ctx) +{ + int i, np; + void __user *uaddr; + + qcomtee_arg_for_each(i, u) { + switch (u[i].type) { + case QCOMTEE_ARG_TYPE_IB: + params[i].attr = TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT; + + /* Underflow already checked in ureq_select(). */ + ubuf_size = round_down(ubuf_size - u[i].b.size, 8); + uaddr = (void __user *)(ubuf_addr + ubuf_size); + + params[i].u.ubuf.uaddr = uaddr; + params[i].u.ubuf.size = u[i].b.size; + if (copy_to_user(params[i].u.ubuf.uaddr, u[i].b.addr, + u[i].b.size)) + goto out_failed; + + break; + case QCOMTEE_ARG_TYPE_OB: + params[i].attr = TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT; + /* Let the user knows the maximum size QTEE expects. */ + params[i].u.ubuf.size = u[i].b.size; + + break; + case QCOMTEE_ARG_TYPE_IO: + params[i].attr = TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT; + if (qcomtee_objref_from_arg(¶ms[i], &u[i], ctx)) + goto out_failed; + + break; + case QCOMTEE_ARG_TYPE_OO: + params[i].attr = + TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT; + + break; + default: /* Never get here! */ + goto out_failed; + } + } + + return i; + +out_failed: + /* Undo qcomtee_objref_from_arg(). */ + for (np = i; np >= 0; np--) { + if (params[np].attr == TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT) + qcomtee_context_del_qtee_object(¶ms[np], ctx); + } + + /* Release any IO objects not processed. */ + for (; u[i].type; i++) { + if (u[i].type == QCOMTEE_ARG_TYPE_IO) + qcomtee_object_put(u[i].o); + } + + return -EINVAL; +} + +/** + * qcomtee_cb_params_to_args() - Convert TEE parameters to QTEE arguments. + * @u: QTEE arguments. + * @params: TEE parameters. + * @num_params: number of elements in the parameter array. + * @ctx: context in which the conversion should happen. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +static int qcomtee_cb_params_to_args(struct qcomtee_arg *u, + struct tee_param *params, int num_params, + struct tee_context *ctx) +{ + int i; + + qcomtee_arg_for_each(i, u) { + switch (u[i].type) { + case QCOMTEE_ARG_TYPE_IB: + if (params[i].attr != + TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT) + goto out_failed; + + break; + case QCOMTEE_ARG_TYPE_OB: + if (params[i].attr != + TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT) + goto out_failed; + + /* Client can not send more data than requested. */ + if (params[i].u.ubuf.size > u[i].b.size) + goto out_failed; + + if (copy_from_user(u[i].b.addr, params[i].u.ubuf.uaddr, + params[i].u.ubuf.size)) + goto out_failed; + + u[i].b.size = params[i].u.ubuf.size; + + break; + case QCOMTEE_ARG_TYPE_IO: + if (params[i].attr != + TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT) + goto out_failed; + + break; + case QCOMTEE_ARG_TYPE_OO: + if (params[i].attr != + TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT) + goto out_failed; + + if (qcomtee_objref_to_arg(&u[i], ¶ms[i], ctx)) + goto out_failed; + + break; + default: /* Never get here! */ + goto out_failed; + } + } + + return 0; + +out_failed: + /* Undo qcomtee_objref_to_arg(). */ + for (i--; i >= 0; i--) { + if (u[i].type != QCOMTEE_ARG_TYPE_OO) + continue; + + qcomtee_user_object_set_notify(u[i].o, false); + if (typeof_qcomtee_object(u[i].o) == QCOMTEE_OBJECT_TYPE_CB) + qcomtee_object_put(u[i].o); + + qcomtee_object_put(u[i].o); + } + + return -EINVAL; +} + +/** + * qcomtee_user_object_select() - Select a request for a user object. + * @ctx: context to look for a user object. + * @params: parameters for @op. + * @num_params: number of elements in the parameter array. + * @uaddr: user buffer for output UBUF parameters. + * @size: size of user buffer @uaddr. + * @data: information for the selected request. + * + * @params is filled along with @data for the selected request. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +int qcomtee_user_object_select(struct tee_context *ctx, + struct tee_param *params, int num_params, + void __user *uaddr, size_t size, + struct qcomtee_user_object_request_data *data) +{ + struct qcomtee_context_data *ctxdata = ctx->data; + struct qcomtee_ureq *ureq; + int ret; + + /* + * Hold the reqs_lock not only for ureq_select() and updating the ureq + * state to PROCESSING but for the entire duration of ureq access. + * This prevents qcomtee_user_object_dispatch() from freeing + * ureq while it is still in use, if client dies. + */ + + while (1) { + scoped_guard(mutex, &ctxdata->reqs_lock) { + ureq = ureq_select(ctxdata, size, num_params); + if (!ureq) + goto wait_for_request; + + if (IS_ERR(ureq)) + return PTR_ERR(ureq); + + /* Processing the request 'QUEUED -> PROCESSING'. */ + ureq->state = QCOMTEE_REQ_PROCESSING; + /* ''Prepare user request:'' */ + data->id = ureq->req_id; + data->object_id = ureq->object_id; + data->op = ureq->op; + ret = qcomtee_cb_params_from_args(params, ureq->args, + num_params, uaddr, + size, ctx); + if (ret >= 0) + goto done_request; + + /* Something is wrong with the request: */ + ureq_dequeue(ctxdata, data->id); + /* Send error to QTEE. */ + ureq->state = QCOMTEE_REQ_PROCESSED; + ureq->errno = ret; + + complete(&ureq->c); + } + + continue; +wait_for_request: + /* Wait for a new QUEUED request. */ + if (wait_for_completion_interruptible(&ctxdata->req_c)) + return -ERESTARTSYS; + } + +done_request: + /* No one is waiting for the response. */ + if (data->op == QCOMTEE_MSG_OBJECT_OP_RELEASE) { + scoped_guard(mutex, &ctxdata->reqs_lock) + ureq_dequeue(ctxdata, data->id); + kfree(ureq); + } + + data->np = ret; + + return 0; +} + +/** + * qcomtee_user_object_submit() - Submit a response for a user object. + * @ctx: context to look for a user object. + * @params: returned parameters. + * @num_params: number of elements in the parameter array. + * @req_id: request ID for the response. + * @errno: result of user object invocation. + * + * Return: On success, returns 0; on failure, returns < 0. + */ +int qcomtee_user_object_submit(struct tee_context *ctx, + struct tee_param *params, int num_params, + int req_id, int errno) +{ + struct qcomtee_context_data *ctxdata = ctx->data; + struct qcomtee_ureq *ureq; + + /* See comments for reqs_lock in qcomtee_user_object_select(). */ + guard(mutex)(&ctxdata->reqs_lock); + + ureq = ureq_dequeue(ctxdata, req_id); + if (!ureq) + return -EINVAL; + + ureq->state = QCOMTEE_REQ_PROCESSED; + + if (!errno) + ureq->errno = qcomtee_cb_params_to_args(ureq->args, params, + num_params, ctx); + else + ureq->errno = errno; + /* Return errno if qcomtee_cb_params_to_args() failed; otherwise 0. */ + if (!errno && ureq->errno) + errno = ureq->errno; + else + errno = 0; + + /* Send result to QTEE. */ + complete(&ureq->c); + + return errno; +} diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index acc7998758ad..d65d47cc154e 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -14,7 +14,6 @@ #include <linux/slab.h> #include <linux/tee_core.h> #include <linux/uaccess.h> -#include <crypto/hash.h> #include <crypto/sha1.h> #include "tee_private.h" @@ -80,6 +79,7 @@ void teedev_ctx_get(struct tee_context *ctx) kref_get(&ctx->refcount); } +EXPORT_SYMBOL_GPL(teedev_ctx_get); static void teedev_ctx_release(struct kref *ref) { @@ -97,11 +97,15 @@ void teedev_ctx_put(struct tee_context *ctx) kref_put(&ctx->refcount, teedev_ctx_release); } +EXPORT_SYMBOL_GPL(teedev_ctx_put); void teedev_close_context(struct tee_context *ctx) { struct tee_device *teedev = ctx->teedev; + if (teedev->desc->ops->close_context) + teedev->desc->ops->close_context(ctx); + teedev_ctx_put(ctx); tee_device_put(teedev); } @@ -142,58 +146,22 @@ static int tee_release(struct inode *inode, struct file *filp) * This implements section (for SHA-1): * 4.3. Algorithm for Creating a Name-Based UUID */ -static int uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name, - size_t size) +static void uuid_v5(uuid_t *uuid, const uuid_t *ns, const void *name, + size_t size) { unsigned char hash[SHA1_DIGEST_SIZE]; - struct crypto_shash *shash = NULL; - struct shash_desc *desc = NULL; - int rc; - - shash = crypto_alloc_shash("sha1", 0, 0); - if (IS_ERR(shash)) { - rc = PTR_ERR(shash); - pr_err("shash(sha1) allocation failed\n"); - return rc; - } - - desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash), - GFP_KERNEL); - if (!desc) { - rc = -ENOMEM; - goto out_free_shash; - } - - desc->tfm = shash; - - rc = crypto_shash_init(desc); - if (rc < 0) - goto out_free_desc; + struct sha1_ctx ctx; - rc = crypto_shash_update(desc, (const u8 *)ns, sizeof(*ns)); - if (rc < 0) - goto out_free_desc; - - rc = crypto_shash_update(desc, (const u8 *)name, size); - if (rc < 0) - goto out_free_desc; - - rc = crypto_shash_final(desc, hash); - if (rc < 0) - goto out_free_desc; + sha1_init(&ctx); + sha1_update(&ctx, (const u8 *)ns, sizeof(*ns)); + sha1_update(&ctx, (const u8 *)name, size); + sha1_final(&ctx, hash); memcpy(uuid->b, hash, UUID_SIZE); /* Tag for version 5 */ uuid->b[6] = (hash[6] & 0x0F) | 0x50; uuid->b[8] = (hash[8] & 0x3F) | 0x80; - -out_free_desc: - kfree(desc); - -out_free_shash: - crypto_free_shash(shash); - return rc; } int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, @@ -203,7 +171,7 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, kgid_t grp = INVALID_GID; char *name = NULL; int name_len; - int rc; + int rc = 0; if (connection_method == TEE_IOCTL_LOGIN_PUBLIC || connection_method == TEE_IOCTL_LOGIN_REE_KERNEL) { @@ -260,7 +228,7 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, goto out_free_name; } - rc = uuid_v5(uuid, &tee_client_uuid_ns, name, name_len); + uuid_v5(uuid, &tee_client_uuid_ns, name, name_len); out_free_name: kfree(name); @@ -354,6 +322,113 @@ tee_ioctl_shm_register(struct tee_context *ctx, return ret; } +static int +tee_ioctl_shm_register_fd(struct tee_context *ctx, + struct tee_ioctl_shm_register_fd_data __user *udata) +{ + struct tee_ioctl_shm_register_fd_data data; + struct tee_shm *shm; + long ret; + + if (copy_from_user(&data, udata, sizeof(data))) + return -EFAULT; + + /* Currently no input flags are supported */ + if (data.flags) + return -EINVAL; + + shm = tee_shm_register_fd(ctx, data.fd); + if (IS_ERR(shm)) + return -EINVAL; + + data.id = shm->id; + data.flags = shm->flags; + data.size = shm->size; + + if (copy_to_user(udata, &data, sizeof(data))) + ret = -EFAULT; + else + ret = tee_shm_get_fd(shm); + + /* + * When user space closes the file descriptor the shared memory + * should be freed or if tee_shm_get_fd() failed then it will + * be freed immediately. + */ + tee_shm_put(shm); + return ret; +} + +static int param_from_user_memref(struct tee_context *ctx, + struct tee_param_memref *memref, + struct tee_ioctl_param *ip) +{ + struct tee_shm *shm; + size_t offs = 0; + + /* + * If a NULL pointer is passed to a TA in the TEE, + * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL + * indicating a NULL memory reference. + */ + if (ip->c != TEE_MEMREF_NULL) { + /* + * If we fail to get a pointer to a shared + * memory object (and increase the ref count) + * from an identifier we return an error. All + * pointers that has been added in params have + * an increased ref count. It's the callers + * responibility to do tee_shm_put() on all + * resolved pointers. + */ + shm = tee_shm_get_from_id(ctx, ip->c); + if (IS_ERR(shm)) + return PTR_ERR(shm); + + /* + * Ensure offset + size does not overflow + * offset and does not overflow the size of + * the referred shared memory object. + */ + if ((ip->a + ip->b) < ip->a || + (ip->a + ip->b) > shm->size) { + tee_shm_put(shm); + return -EINVAL; + } + + if (shm->flags & TEE_SHM_DMA_BUF) { + struct tee_shm_dmabuf_ref *ref; + + ref = container_of(shm, struct tee_shm_dmabuf_ref, shm); + if (ref->parent_shm) { + /* + * The shm already has one reference to + * ref->parent_shm so we are clear of 0. + * We're getting another reference since + * this shm will be used in the parameter + * list instead of the shm we got with + * tee_shm_get_from_id() above. + */ + refcount_inc(&ref->parent_shm->refcount); + tee_shm_put(shm); + shm = ref->parent_shm; + offs = ref->offset; + } + } + } else if (ctx->cap_memref_null) { + /* Pass NULL pointer to OP-TEE */ + shm = NULL; + } else { + return -EINVAL; + } + + memref->shm_offs = ip->a + offs; + memref->size = ip->b; + memref->shm = shm; + + return 0; +} + static int params_from_user(struct tee_context *ctx, struct tee_param *params, size_t num_params, struct tee_ioctl_param __user *uparams) @@ -361,8 +436,8 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params, size_t n; for (n = 0; n < num_params; n++) { - struct tee_shm *shm; struct tee_ioctl_param ip; + int rc; if (copy_from_user(&ip, uparams + n, sizeof(ip))) return -EFAULT; @@ -375,6 +450,7 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params, switch (ip.attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT: break; case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: @@ -382,48 +458,29 @@ static int params_from_user(struct tee_context *ctx, struct tee_param *params, params[n].u.value.b = ip.b; params[n].u.value.c = ip.c; break; + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT: + params[n].u.ubuf.uaddr = u64_to_user_ptr(ip.a); + params[n].u.ubuf.size = ip.b; + + if (!access_ok(params[n].u.ubuf.uaddr, + params[n].u.ubuf.size)) + return -EFAULT; + + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT: + params[n].u.objref.id = ip.a; + params[n].u.objref.flags = ip.b; + break; case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: - /* - * If a NULL pointer is passed to a TA in the TEE, - * the ip.c IOCTL parameters is set to TEE_MEMREF_NULL - * indicating a NULL memory reference. - */ - if (ip.c != TEE_MEMREF_NULL) { - /* - * If we fail to get a pointer to a shared - * memory object (and increase the ref count) - * from an identifier we return an error. All - * pointers that has been added in params have - * an increased ref count. It's the callers - * responibility to do tee_shm_put() on all - * resolved pointers. - */ - shm = tee_shm_get_from_id(ctx, ip.c); - if (IS_ERR(shm)) - return PTR_ERR(shm); - - /* - * Ensure offset + size does not overflow - * offset and does not overflow the size of - * the referred shared memory object. - */ - if ((ip.a + ip.b) < ip.a || - (ip.a + ip.b) > shm->size) { - tee_shm_put(shm); - return -EINVAL; - } - } else if (ctx->cap_memref_null) { - /* Pass NULL pointer to OP-TEE */ - shm = NULL; - } else { - return -EINVAL; - } - - params[n].u.memref.shm_offs = ip.a; - params[n].u.memref.size = ip.b; - params[n].u.memref.shm = shm; + rc = param_from_user_memref(ctx, ¶ms[n].u.memref, + &ip); + if (rc) + return rc; break; default: /* Unknown attribute */ @@ -450,6 +507,17 @@ static int params_to_user(struct tee_ioctl_param __user *uparams, put_user(p->u.value.c, &up->c)) return -EFAULT; break; + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT: + if (put_user((u64)p->u.ubuf.size, &up->b)) + return -EFAULT; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT: + if (put_user(p->u.objref.id, &up->a) || + put_user(p->u.objref.flags, &up->b)) + return -EFAULT; + break; case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: if (put_user((u64)p->u.memref.size, &up->b)) @@ -602,6 +670,66 @@ out: return rc; } +static int tee_ioctl_object_invoke(struct tee_context *ctx, + struct tee_ioctl_buf_data __user *ubuf) +{ + int rc; + size_t n; + struct tee_ioctl_buf_data buf; + struct tee_ioctl_object_invoke_arg __user *uarg; + struct tee_ioctl_object_invoke_arg arg; + struct tee_ioctl_param __user *uparams = NULL; + struct tee_param *params = NULL; + + if (!ctx->teedev->desc->ops->object_invoke_func) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, sizeof(buf))) + return -EFAULT; + + if (buf.buf_len > TEE_MAX_ARG_SIZE || + buf.buf_len < sizeof(struct tee_ioctl_object_invoke_arg)) + return -EINVAL; + + uarg = u64_to_user_ptr(buf.buf_ptr); + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len) + return -EINVAL; + + if (arg.num_params) { + params = kcalloc(arg.num_params, sizeof(struct tee_param), + GFP_KERNEL); + if (!params) + return -ENOMEM; + uparams = uarg->params; + rc = params_from_user(ctx, params, arg.num_params, uparams); + if (rc) + goto out; + } + + rc = ctx->teedev->desc->ops->object_invoke_func(ctx, &arg, params); + if (rc) + goto out; + + if (put_user(arg.ret, &uarg->ret)) { + rc = -EFAULT; + goto out; + } + rc = params_to_user(uparams, arg.num_params, params); +out: + if (params) { + /* Decrease ref count for all valid shared memory pointers */ + for (n = 0; n < arg.num_params; n++) + if (tee_param_is_memref(params + n) && + params[n].u.memref.shm) + tee_shm_put(params[n].u.memref.shm); + kfree(params); + } + return rc; +} + static int tee_ioctl_cancel(struct tee_context *ctx, struct tee_ioctl_cancel_arg __user *uarg) { @@ -650,6 +778,19 @@ static int params_to_supp(struct tee_context *ctx, ip.b = p->u.value.b; ip.c = p->u.value.c; break; + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT: + ip.a = (__force unsigned long)p->u.ubuf.uaddr; + ip.b = p->u.ubuf.size; + ip.c = 0; + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT: + ip.a = p->u.objref.id; + ip.b = p->u.objref.flags; + ip.c = 0; + break; case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: @@ -752,6 +893,21 @@ static int params_from_supp(struct tee_param *params, size_t num_params, p->u.value.b = ip.b; p->u.value.c = ip.c; break; + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_UBUF_INOUT: + p->u.ubuf.uaddr = u64_to_user_ptr(ip.a); + p->u.ubuf.size = ip.b; + + if (!access_ok(params[n].u.ubuf.uaddr, + params[n].u.ubuf.size)) + return -EFAULT; + + break; + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_OBJREF_INOUT: + p->u.objref.id = ip.a; + p->u.objref.flags = ip.b; + break; case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: /* @@ -828,10 +984,14 @@ static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return tee_ioctl_shm_alloc(ctx, uarg); case TEE_IOC_SHM_REGISTER: return tee_ioctl_shm_register(ctx, uarg); + case TEE_IOC_SHM_REGISTER_FD: + return tee_ioctl_shm_register_fd(ctx, uarg); case TEE_IOC_OPEN_SESSION: return tee_ioctl_open_session(ctx, uarg); case TEE_IOC_INVOKE: return tee_ioctl_invoke(ctx, uarg); + case TEE_IOC_OBJECT_INVOKE: + return tee_ioctl_object_invoke(ctx, uarg); case TEE_IOC_CANCEL: return tee_ioctl_cancel(ctx, uarg); case TEE_IOC_CLOSE_SESSION: @@ -889,7 +1049,7 @@ struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, if (!teedesc || !teedesc->name || !teedesc->ops || !teedesc->ops->get_version || !teedesc->ops->open || - !teedesc->ops->release || !pool) + !teedesc->ops->release) return ERR_PTR(-EINVAL); teedev = kzalloc(sizeof(*teedev), GFP_KERNEL); @@ -977,7 +1137,7 @@ static ssize_t implementation_id_show(struct device *dev, struct tee_ioctl_version_data vers; teedev->desc->ops->get_version(teedev, &vers); - return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id); + return sysfs_emit(buf, "%d\n", vers.impl_id); } static DEVICE_ATTR_RO(implementation_id); @@ -1038,6 +1198,7 @@ void tee_device_put(struct tee_device *teedev) } mutex_unlock(&teedev->mutex); } +EXPORT_SYMBOL_GPL(tee_device_put); bool tee_device_get(struct tee_device *teedev) { @@ -1050,6 +1211,7 @@ bool tee_device_get(struct tee_device *teedev) mutex_unlock(&teedev->mutex); return true; } +EXPORT_SYMBOL_GPL(tee_device_get); /** * tee_device_unregister() - Removes a TEE device @@ -1064,6 +1226,8 @@ void tee_device_unregister(struct tee_device *teedev) if (!teedev) return; + tee_device_put_all_dma_heaps(teedev); + if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) cdev_device_del(&teedev->cdev, &teedev->dev); @@ -1287,3 +1451,5 @@ MODULE_AUTHOR("Linaro"); MODULE_DESCRIPTION("TEE Driver"); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2"); +MODULE_IMPORT_NS("DMA_BUF"); +MODULE_IMPORT_NS("DMA_BUF_HEAP"); diff --git a/drivers/tee/tee_heap.c b/drivers/tee/tee_heap.c new file mode 100644 index 000000000000..d8d7735cdffb --- /dev/null +++ b/drivers/tee/tee_heap.c @@ -0,0 +1,500 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2025, Linaro Limited + */ + +#include <linux/dma-buf.h> +#include <linux/dma-heap.h> +#include <linux/genalloc.h> +#include <linux/module.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/tee_core.h> +#include <linux/xarray.h> + +#include "tee_private.h" + +struct tee_dma_heap { + struct dma_heap *heap; + enum tee_dma_heap_id id; + struct kref kref; + struct tee_protmem_pool *pool; + struct tee_device *teedev; + bool shutting_down; + /* Protects pool, teedev, and shutting_down above */ + struct mutex mu; +}; + +struct tee_heap_buffer { + struct tee_dma_heap *heap; + size_t size; + size_t offs; + struct sg_table table; +}; + +struct tee_heap_attachment { + struct sg_table table; + struct device *dev; +}; + +struct tee_protmem_static_pool { + struct tee_protmem_pool pool; + struct gen_pool *gen_pool; + phys_addr_t pa_base; +}; + +#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS) +static DEFINE_XARRAY_ALLOC(tee_dma_heap); + +static void tee_heap_release(struct kref *kref) +{ + struct tee_dma_heap *h = container_of(kref, struct tee_dma_heap, kref); + + h->pool->ops->destroy_pool(h->pool); + tee_device_put(h->teedev); + h->pool = NULL; + h->teedev = NULL; +} + +static void put_tee_heap(struct tee_dma_heap *h) +{ + kref_put(&h->kref, tee_heap_release); +} + +static void get_tee_heap(struct tee_dma_heap *h) +{ + kref_get(&h->kref); +} + +static int copy_sg_table(struct sg_table *dst, struct sg_table *src) +{ + struct scatterlist *dst_sg; + struct scatterlist *src_sg; + int ret; + int i; + + ret = sg_alloc_table(dst, src->orig_nents, GFP_KERNEL); + if (ret) + return ret; + + dst_sg = dst->sgl; + for_each_sgtable_sg(src, src_sg, i) { + sg_set_page(dst_sg, sg_page(src_sg), src_sg->length, + src_sg->offset); + dst_sg = sg_next(dst_sg); + } + + return 0; +} + +static int tee_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct tee_heap_buffer *buf = dmabuf->priv; + struct tee_heap_attachment *a; + int ret; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + ret = copy_sg_table(&a->table, &buf->table); + if (ret) { + kfree(a); + return ret; + } + + a->dev = attachment->dev; + attachment->priv = a; + + return 0; +} + +static void tee_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct tee_heap_attachment *a = attachment->priv; + + sg_free_table(&a->table); + kfree(a); +} + +static struct sg_table * +tee_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct tee_heap_attachment *a = attachment->priv; + int ret; + + ret = dma_map_sgtable(attachment->dev, &a->table, direction, + DMA_ATTR_SKIP_CPU_SYNC); + if (ret) + return ERR_PTR(ret); + + return &a->table; +} + +static void tee_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct tee_heap_attachment *a = attachment->priv; + + WARN_ON(&a->table != table); + + dma_unmap_sgtable(attachment->dev, table, direction, + DMA_ATTR_SKIP_CPU_SYNC); +} + +static void tee_heap_buf_free(struct dma_buf *dmabuf) +{ + struct tee_heap_buffer *buf = dmabuf->priv; + + buf->heap->pool->ops->free(buf->heap->pool, &buf->table); + mutex_lock(&buf->heap->mu); + put_tee_heap(buf->heap); + mutex_unlock(&buf->heap->mu); + kfree(buf); +} + +static const struct dma_buf_ops tee_heap_buf_ops = { + .attach = tee_heap_attach, + .detach = tee_heap_detach, + .map_dma_buf = tee_heap_map_dma_buf, + .unmap_dma_buf = tee_heap_unmap_dma_buf, + .release = tee_heap_buf_free, +}; + +static struct dma_buf *tee_dma_heap_alloc(struct dma_heap *heap, + unsigned long len, u32 fd_flags, + u64 heap_flags) +{ + struct tee_dma_heap *h = dma_heap_get_drvdata(heap); + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + struct tee_device *teedev = NULL; + struct tee_heap_buffer *buf; + struct tee_protmem_pool *pool; + struct dma_buf *dmabuf; + int rc; + + mutex_lock(&h->mu); + if (h->teedev) { + teedev = h->teedev; + pool = h->pool; + get_tee_heap(h); + } + mutex_unlock(&h->mu); + + if (!teedev) + return ERR_PTR(-EINVAL); + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) { + dmabuf = ERR_PTR(-ENOMEM); + goto err; + } + buf->size = len; + buf->heap = h; + + rc = pool->ops->alloc(pool, &buf->table, len, &buf->offs); + if (rc) { + dmabuf = ERR_PTR(rc); + goto err_kfree; + } + + exp_info.ops = &tee_heap_buf_ops; + exp_info.size = len; + exp_info.priv = buf; + exp_info.flags = fd_flags; + dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(dmabuf)) + goto err_protmem_free; + + return dmabuf; + +err_protmem_free: + pool->ops->free(pool, &buf->table); +err_kfree: + kfree(buf); +err: + mutex_lock(&h->mu); + put_tee_heap(h); + mutex_unlock(&h->mu); + return dmabuf; +} + +static const struct dma_heap_ops tee_dma_heap_ops = { + .allocate = tee_dma_heap_alloc, +}; + +static const char *heap_id_2_name(enum tee_dma_heap_id id) +{ + switch (id) { + case TEE_DMA_HEAP_SECURE_VIDEO_PLAY: + return "protected,secure-video"; + case TEE_DMA_HEAP_TRUSTED_UI: + return "protected,trusted-ui"; + case TEE_DMA_HEAP_SECURE_VIDEO_RECORD: + return "protected,secure-video-record"; + default: + return NULL; + } +} + +static int alloc_dma_heap(struct tee_device *teedev, enum tee_dma_heap_id id, + struct tee_protmem_pool *pool) +{ + struct dma_heap_export_info exp_info = { + .ops = &tee_dma_heap_ops, + .name = heap_id_2_name(id), + }; + struct tee_dma_heap *h; + int rc; + + if (!exp_info.name) + return -EINVAL; + + if (xa_reserve(&tee_dma_heap, id, GFP_KERNEL)) { + if (!xa_load(&tee_dma_heap, id)) + return -EEXIST; + return -ENOMEM; + } + + h = kzalloc(sizeof(*h), GFP_KERNEL); + if (!h) + return -ENOMEM; + h->id = id; + kref_init(&h->kref); + h->teedev = teedev; + h->pool = pool; + mutex_init(&h->mu); + + exp_info.priv = h; + h->heap = dma_heap_add(&exp_info); + if (IS_ERR(h->heap)) { + rc = PTR_ERR(h->heap); + kfree(h); + + return rc; + } + + /* "can't fail" due to the call to xa_reserve() above */ + return WARN_ON(xa_is_err(xa_store(&tee_dma_heap, id, h, GFP_KERNEL))); +} + +int tee_device_register_dma_heap(struct tee_device *teedev, + enum tee_dma_heap_id id, + struct tee_protmem_pool *pool) +{ + struct tee_dma_heap *h; + int rc; + + if (!tee_device_get(teedev)) + return -EINVAL; + + h = xa_load(&tee_dma_heap, id); + if (h) { + mutex_lock(&h->mu); + if (h->teedev) { + rc = -EBUSY; + } else { + kref_init(&h->kref); + h->shutting_down = false; + h->teedev = teedev; + h->pool = pool; + rc = 0; + } + mutex_unlock(&h->mu); + } else { + rc = alloc_dma_heap(teedev, id, pool); + } + + if (rc) { + tee_device_put(teedev); + dev_err(&teedev->dev, "can't register DMA heap id %d (%s)\n", + id, heap_id_2_name(id)); + } + + return rc; +} +EXPORT_SYMBOL_GPL(tee_device_register_dma_heap); + +void tee_device_put_all_dma_heaps(struct tee_device *teedev) +{ + struct tee_dma_heap *h; + u_long i; + + xa_for_each(&tee_dma_heap, i, h) { + if (h) { + mutex_lock(&h->mu); + if (h->teedev == teedev && !h->shutting_down) { + h->shutting_down = true; + put_tee_heap(h); + } + mutex_unlock(&h->mu); + } + } +} +EXPORT_SYMBOL_GPL(tee_device_put_all_dma_heaps); + +int tee_heap_update_from_dma_buf(struct tee_device *teedev, + struct dma_buf *dmabuf, size_t *offset, + struct tee_shm *shm, + struct tee_shm **parent_shm) +{ + struct tee_heap_buffer *buf; + int rc; + + /* The DMA-buf must be from our heap */ + if (dmabuf->ops != &tee_heap_buf_ops) + return -EINVAL; + + buf = dmabuf->priv; + /* The buffer must be from the same teedev */ + if (buf->heap->teedev != teedev) + return -EINVAL; + + shm->size = buf->size; + + rc = buf->heap->pool->ops->update_shm(buf->heap->pool, &buf->table, + buf->offs, shm, parent_shm); + if (!rc && *parent_shm) + *offset = buf->offs; + + return rc; +} +#else +int tee_device_register_dma_heap(struct tee_device *teedev __always_unused, + enum tee_dma_heap_id id __always_unused, + struct tee_protmem_pool *pool __always_unused) +{ + return -EINVAL; +} +EXPORT_SYMBOL_GPL(tee_device_register_dma_heap); + +void +tee_device_put_all_dma_heaps(struct tee_device *teedev __always_unused) +{ +} +EXPORT_SYMBOL_GPL(tee_device_put_all_dma_heaps); + +int tee_heap_update_from_dma_buf(struct tee_device *teedev __always_unused, + struct dma_buf *dmabuf __always_unused, + size_t *offset __always_unused, + struct tee_shm *shm __always_unused, + struct tee_shm **parent_shm __always_unused) +{ + return -EINVAL; +} +#endif + +static struct tee_protmem_static_pool * +to_protmem_static_pool(struct tee_protmem_pool *pool) +{ + return container_of(pool, struct tee_protmem_static_pool, pool); +} + +static int protmem_pool_op_static_alloc(struct tee_protmem_pool *pool, + struct sg_table *sgt, size_t size, + size_t *offs) +{ + struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool); + phys_addr_t pa; + int ret; + + pa = gen_pool_alloc(stp->gen_pool, size); + if (!pa) + return -ENOMEM; + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (ret) { + gen_pool_free(stp->gen_pool, pa, size); + return ret; + } + + sg_set_page(sgt->sgl, phys_to_page(pa), size, 0); + *offs = pa - stp->pa_base; + + return 0; +} + +static void protmem_pool_op_static_free(struct tee_protmem_pool *pool, + struct sg_table *sgt) +{ + struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool); + struct scatterlist *sg; + int i; + + for_each_sgtable_sg(sgt, sg, i) + gen_pool_free(stp->gen_pool, sg_phys(sg), sg->length); + sg_free_table(sgt); +} + +static int protmem_pool_op_static_update_shm(struct tee_protmem_pool *pool, + struct sg_table *sgt, size_t offs, + struct tee_shm *shm, + struct tee_shm **parent_shm) +{ + struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool); + + shm->paddr = stp->pa_base + offs; + *parent_shm = NULL; + + return 0; +} + +static void protmem_pool_op_static_destroy_pool(struct tee_protmem_pool *pool) +{ + struct tee_protmem_static_pool *stp = to_protmem_static_pool(pool); + + gen_pool_destroy(stp->gen_pool); + kfree(stp); +} + +static struct tee_protmem_pool_ops protmem_pool_ops_static = { + .alloc = protmem_pool_op_static_alloc, + .free = protmem_pool_op_static_free, + .update_shm = protmem_pool_op_static_update_shm, + .destroy_pool = protmem_pool_op_static_destroy_pool, +}; + +struct tee_protmem_pool *tee_protmem_static_pool_alloc(phys_addr_t paddr, + size_t size) +{ + const size_t page_mask = PAGE_SIZE - 1; + struct tee_protmem_static_pool *stp; + int rc; + + /* Check it's page aligned */ + if ((paddr | size) & page_mask) + return ERR_PTR(-EINVAL); + + if (!pfn_valid(PHYS_PFN(paddr))) + return ERR_PTR(-EINVAL); + + stp = kzalloc(sizeof(*stp), GFP_KERNEL); + if (!stp) + return ERR_PTR(-ENOMEM); + + stp->gen_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!stp->gen_pool) { + rc = -ENOMEM; + goto err_free; + } + + rc = gen_pool_add(stp->gen_pool, paddr, size, -1); + if (rc) + goto err_free_pool; + + stp->pool.ops = &protmem_pool_ops_static; + stp->pa_base = paddr; + return &stp->pool; + +err_free_pool: + gen_pool_destroy(stp->gen_pool); +err_free: + kfree(stp); + + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(tee_protmem_static_pool_alloc); diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h index 9bc50605227c..6bde688bfcb1 100644 --- a/drivers/tee/tee_private.h +++ b/drivers/tee/tee_private.h @@ -8,20 +8,28 @@ #include <linux/cdev.h> #include <linux/completion.h> #include <linux/device.h> +#include <linux/dma-buf.h> #include <linux/kref.h> #include <linux/mutex.h> #include <linux/types.h> -int tee_shm_get_fd(struct tee_shm *shm); - -bool tee_device_get(struct tee_device *teedev); -void tee_device_put(struct tee_device *teedev); +/* extra references appended to shm object for registered shared memory */ +struct tee_shm_dmabuf_ref { + struct tee_shm shm; + size_t offset; + struct dma_buf *dmabuf; + struct tee_shm *parent_shm; +}; -void teedev_ctx_get(struct tee_context *ctx); -void teedev_ctx_put(struct tee_context *ctx); +int tee_shm_get_fd(struct tee_shm *shm); struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size); struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx, unsigned long addr, size_t length); +int tee_heap_update_from_dma_buf(struct tee_device *teedev, + struct dma_buf *dmabuf, size_t *offset, + struct tee_shm *shm, + struct tee_shm **parent_shm); + #endif /*TEE_PRIVATE_H*/ diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 2a7d253d9c55..4a47de4bb2e5 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -4,6 +4,9 @@ */ #include <linux/anon_inodes.h> #include <linux/device.h> +#include <linux/dma-buf.h> +#include <linux/dma-mapping.h> +#include <linux/highmem.h> #include <linux/idr.h> #include <linux/io.h> #include <linux/mm.h> @@ -12,9 +15,14 @@ #include <linux/tee_core.h> #include <linux/uaccess.h> #include <linux/uio.h> -#include <linux/highmem.h> #include "tee_private.h" +struct tee_shm_dma_mem { + struct tee_shm shm; + dma_addr_t dma_addr; + struct page *page; +}; + static void shm_put_kernel_pages(struct page **pages, size_t page_count) { size_t n; @@ -45,7 +53,24 @@ static void release_registered_pages(struct tee_shm *shm) static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) { - if (shm->flags & TEE_SHM_POOL) { + void *p = shm; + + if (shm->flags & TEE_SHM_DMA_MEM) { +#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS) + struct tee_shm_dma_mem *dma_mem; + + dma_mem = container_of(shm, struct tee_shm_dma_mem, shm); + p = dma_mem; + dma_free_pages(&teedev->dev, shm->size, dma_mem->page, + dma_mem->dma_addr, DMA_BIDIRECTIONAL); +#endif + } else if (shm->flags & TEE_SHM_DMA_BUF) { + struct tee_shm_dmabuf_ref *ref; + + ref = container_of(shm, struct tee_shm_dmabuf_ref, shm); + p = ref; + dma_buf_put(ref->dmabuf); + } else if (shm->flags & TEE_SHM_POOL) { teedev->pool->ops->free(teedev->pool, shm); } else if (shm->flags & TEE_SHM_DYNAMIC) { int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); @@ -59,7 +84,7 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm) teedev_ctx_put(shm->ctx); - kfree(shm); + kfree(p); tee_device_put(teedev); } @@ -169,7 +194,7 @@ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size) * tee_client_invoke_func(). The memory allocated is later freed with a * call to tee_shm_free(). * - * @returns a pointer to 'struct tee_shm' + * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure */ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) { @@ -179,6 +204,62 @@ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size) } EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf); +struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd) +{ + struct tee_shm_dmabuf_ref *ref; + int rc; + + if (!tee_device_get(ctx->teedev)) + return ERR_PTR(-EINVAL); + + teedev_ctx_get(ctx); + + ref = kzalloc(sizeof(*ref), GFP_KERNEL); + if (!ref) { + rc = -ENOMEM; + goto err_put_tee; + } + + refcount_set(&ref->shm.refcount, 1); + ref->shm.ctx = ctx; + ref->shm.id = -1; + ref->shm.flags = TEE_SHM_DMA_BUF; + + ref->dmabuf = dma_buf_get(fd); + if (IS_ERR(ref->dmabuf)) { + rc = PTR_ERR(ref->dmabuf); + goto err_kfree_ref; + } + + rc = tee_heap_update_from_dma_buf(ctx->teedev, ref->dmabuf, + &ref->offset, &ref->shm, + &ref->parent_shm); + if (rc) + goto err_put_dmabuf; + + mutex_lock(&ref->shm.ctx->teedev->mutex); + ref->shm.id = idr_alloc(&ref->shm.ctx->teedev->idr, &ref->shm, + 1, 0, GFP_KERNEL); + mutex_unlock(&ref->shm.ctx->teedev->mutex); + if (ref->shm.id < 0) { + rc = ref->shm.id; + goto err_put_dmabuf; + } + + return &ref->shm; + +err_put_dmabuf: + dma_buf_put(ref->dmabuf); +err_kfree_ref: + kfree(ref); +err_put_tee: + teedev_ctx_put(ctx); + tee_device_put(ctx->teedev); + + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(tee_shm_register_fd); + /** * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared * kernel buffer @@ -203,6 +284,71 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size) } EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf); +#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS) +/** + * tee_shm_alloc_dma_mem() - Allocate DMA memory as shared memory object + * @ctx: Context that allocates the shared memory + * @page_count: Number of pages + * + * The allocated memory is expected to be lent (made inaccessible to the + * kernel) to the TEE while it's used and returned (accessible to the + * kernel again) before it's freed. + * + * This function should normally only be used internally in the TEE + * drivers. + * + * @returns a pointer to 'struct tee_shm' + */ +struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx, + size_t page_count) +{ + struct tee_device *teedev = ctx->teedev; + struct tee_shm_dma_mem *dma_mem; + dma_addr_t dma_addr; + struct page *page; + + if (!tee_device_get(teedev)) + return ERR_PTR(-EINVAL); + + page = dma_alloc_pages(&teedev->dev, page_count * PAGE_SIZE, + &dma_addr, DMA_BIDIRECTIONAL, GFP_KERNEL); + if (!page) + goto err_put_teedev; + + dma_mem = kzalloc(sizeof(*dma_mem), GFP_KERNEL); + if (!dma_mem) + goto err_free_pages; + + refcount_set(&dma_mem->shm.refcount, 1); + dma_mem->shm.ctx = ctx; + dma_mem->shm.paddr = page_to_phys(page); + dma_mem->dma_addr = dma_addr; + dma_mem->page = page; + dma_mem->shm.size = page_count * PAGE_SIZE; + dma_mem->shm.flags = TEE_SHM_DMA_MEM; + + teedev_ctx_get(ctx); + + return &dma_mem->shm; + +err_free_pages: + dma_free_pages(&teedev->dev, page_count * PAGE_SIZE, page, dma_addr, + DMA_BIDIRECTIONAL); +err_put_teedev: + tee_device_put(teedev); + + return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem); +#else +struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx, + size_t page_count) +{ + return ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem); +#endif + int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align, int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm, @@ -321,6 +467,14 @@ register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags, if (unlikely(len <= 0)) { ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM); goto err_free_shm_pages; + } else if (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) { + /* + * If we only got a few pages, update to release the + * correct amount below. + */ + shm->num_pages = len / PAGE_SIZE; + ret = ERR_PTR(-ENOMEM); + goto err_put_shm_pages; } /* @@ -444,6 +598,9 @@ static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma) /* Refuse sharing shared memory provided by application */ if (shm->flags & TEE_SHM_USER_MAPPED) return -EINVAL; + /* Refuse sharing registered DMA_bufs with the application */ + if (shm->flags & TEE_SHM_DMA_BUF) + return -EINVAL; /* check for overflowing the buffer's size */ if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) |