summaryrefslogtreecommitdiff
path: root/drivers/tee/tee_shm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tee/tee_shm.c')
-rw-r--r--drivers/tee/tee_shm.c165
1 files changed, 161 insertions, 4 deletions
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 2a7d253d9c55..4a47de4bb2e5 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -4,6 +4,9 @@
*/
#include <linux/anon_inodes.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/io.h>
#include <linux/mm.h>
@@ -12,9 +15,14 @@
#include <linux/tee_core.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
-#include <linux/highmem.h>
#include "tee_private.h"
+struct tee_shm_dma_mem {
+ struct tee_shm shm;
+ dma_addr_t dma_addr;
+ struct page *page;
+};
+
static void shm_put_kernel_pages(struct page **pages, size_t page_count)
{
size_t n;
@@ -45,7 +53,24 @@ static void release_registered_pages(struct tee_shm *shm)
static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
{
- if (shm->flags & TEE_SHM_POOL) {
+ void *p = shm;
+
+ if (shm->flags & TEE_SHM_DMA_MEM) {
+#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
+ struct tee_shm_dma_mem *dma_mem;
+
+ dma_mem = container_of(shm, struct tee_shm_dma_mem, shm);
+ p = dma_mem;
+ dma_free_pages(&teedev->dev, shm->size, dma_mem->page,
+ dma_mem->dma_addr, DMA_BIDIRECTIONAL);
+#endif
+ } else if (shm->flags & TEE_SHM_DMA_BUF) {
+ struct tee_shm_dmabuf_ref *ref;
+
+ ref = container_of(shm, struct tee_shm_dmabuf_ref, shm);
+ p = ref;
+ dma_buf_put(ref->dmabuf);
+ } else if (shm->flags & TEE_SHM_POOL) {
teedev->pool->ops->free(teedev->pool, shm);
} else if (shm->flags & TEE_SHM_DYNAMIC) {
int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
@@ -59,7 +84,7 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
teedev_ctx_put(shm->ctx);
- kfree(shm);
+ kfree(p);
tee_device_put(teedev);
}
@@ -169,7 +194,7 @@ struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
* tee_client_invoke_func(). The memory allocated is later freed with a
* call to tee_shm_free().
*
- * @returns a pointer to 'struct tee_shm'
+ * @returns a pointer to 'struct tee_shm' on success, and ERR_PTR on failure
*/
struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
{
@@ -179,6 +204,62 @@ struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
}
EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
+struct tee_shm *tee_shm_register_fd(struct tee_context *ctx, int fd)
+{
+ struct tee_shm_dmabuf_ref *ref;
+ int rc;
+
+ if (!tee_device_get(ctx->teedev))
+ return ERR_PTR(-EINVAL);
+
+ teedev_ctx_get(ctx);
+
+ ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!ref) {
+ rc = -ENOMEM;
+ goto err_put_tee;
+ }
+
+ refcount_set(&ref->shm.refcount, 1);
+ ref->shm.ctx = ctx;
+ ref->shm.id = -1;
+ ref->shm.flags = TEE_SHM_DMA_BUF;
+
+ ref->dmabuf = dma_buf_get(fd);
+ if (IS_ERR(ref->dmabuf)) {
+ rc = PTR_ERR(ref->dmabuf);
+ goto err_kfree_ref;
+ }
+
+ rc = tee_heap_update_from_dma_buf(ctx->teedev, ref->dmabuf,
+ &ref->offset, &ref->shm,
+ &ref->parent_shm);
+ if (rc)
+ goto err_put_dmabuf;
+
+ mutex_lock(&ref->shm.ctx->teedev->mutex);
+ ref->shm.id = idr_alloc(&ref->shm.ctx->teedev->idr, &ref->shm,
+ 1, 0, GFP_KERNEL);
+ mutex_unlock(&ref->shm.ctx->teedev->mutex);
+ if (ref->shm.id < 0) {
+ rc = ref->shm.id;
+ goto err_put_dmabuf;
+ }
+
+ return &ref->shm;
+
+err_put_dmabuf:
+ dma_buf_put(ref->dmabuf);
+err_kfree_ref:
+ kfree(ref);
+err_put_tee:
+ teedev_ctx_put(ctx);
+ tee_device_put(ctx->teedev);
+
+ return ERR_PTR(rc);
+}
+EXPORT_SYMBOL_GPL(tee_shm_register_fd);
+
/**
* tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
* kernel buffer
@@ -203,6 +284,71 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
}
EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
+#if IS_ENABLED(CONFIG_TEE_DMABUF_HEAPS)
+/**
+ * tee_shm_alloc_dma_mem() - Allocate DMA memory as shared memory object
+ * @ctx: Context that allocates the shared memory
+ * @page_count: Number of pages
+ *
+ * The allocated memory is expected to be lent (made inaccessible to the
+ * kernel) to the TEE while it's used and returned (accessible to the
+ * kernel again) before it's freed.
+ *
+ * This function should normally only be used internally in the TEE
+ * drivers.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
+ size_t page_count)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct tee_shm_dma_mem *dma_mem;
+ dma_addr_t dma_addr;
+ struct page *page;
+
+ if (!tee_device_get(teedev))
+ return ERR_PTR(-EINVAL);
+
+ page = dma_alloc_pages(&teedev->dev, page_count * PAGE_SIZE,
+ &dma_addr, DMA_BIDIRECTIONAL, GFP_KERNEL);
+ if (!page)
+ goto err_put_teedev;
+
+ dma_mem = kzalloc(sizeof(*dma_mem), GFP_KERNEL);
+ if (!dma_mem)
+ goto err_free_pages;
+
+ refcount_set(&dma_mem->shm.refcount, 1);
+ dma_mem->shm.ctx = ctx;
+ dma_mem->shm.paddr = page_to_phys(page);
+ dma_mem->dma_addr = dma_addr;
+ dma_mem->page = page;
+ dma_mem->shm.size = page_count * PAGE_SIZE;
+ dma_mem->shm.flags = TEE_SHM_DMA_MEM;
+
+ teedev_ctx_get(ctx);
+
+ return &dma_mem->shm;
+
+err_free_pages:
+ dma_free_pages(&teedev->dev, page_count * PAGE_SIZE, page, dma_addr,
+ DMA_BIDIRECTIONAL);
+err_put_teedev:
+ tee_device_put(teedev);
+
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
+#else
+struct tee_shm *tee_shm_alloc_dma_mem(struct tee_context *ctx,
+ size_t page_count)
+{
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(tee_shm_alloc_dma_mem);
+#endif
+
int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
int (*shm_register)(struct tee_context *ctx,
struct tee_shm *shm,
@@ -321,6 +467,14 @@ register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
if (unlikely(len <= 0)) {
ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM);
goto err_free_shm_pages;
+ } else if (DIV_ROUND_UP(len + off, PAGE_SIZE) != num_pages) {
+ /*
+ * If we only got a few pages, update to release the
+ * correct amount below.
+ */
+ shm->num_pages = len / PAGE_SIZE;
+ ret = ERR_PTR(-ENOMEM);
+ goto err_put_shm_pages;
}
/*
@@ -444,6 +598,9 @@ static int tee_shm_fop_mmap(struct file *filp, struct vm_area_struct *vma)
/* Refuse sharing shared memory provided by application */
if (shm->flags & TEE_SHM_USER_MAPPED)
return -EINVAL;
+ /* Refuse sharing registered DMA_bufs with the application */
+ if (shm->flags & TEE_SHM_DMA_BUF)
+ return -EINVAL;
/* check for overflowing the buffer's size */
if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)