From 909f4abd1097769d024c3a9c2e59c2fbe5d2d0c0 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Thu, 28 Sep 2023 00:15:23 -0700 Subject: iommu: Add new iommu op to create domains owned by userspace Introduce a new iommu_domain op to create domains owned by userspace, e.g. through IOMMUFD. These domains have a few different properties compares to kernel owned domains: - They may be PAGING domains, but created with special parameters. For instance aperture size changes/number of levels, different IOPTE formats, or other things necessary to make a vIOMMU work - We have to track all the memory allocations with GFP_KERNEL_ACCOUNT to make the cgroup sandbox stronger - Device-specialty domains, such as NESTED domains can be created by IOMMUFD. The new op clearly says the domain is being created by IOMMUFD, that the domain is intended for userspace use, and it provides a way to pass user flags or a driver specific uAPI structure to customize the created domain to exactly what the vIOMMU userspace driver requires. iommu drivers that cannot support VFIO/IOMMUFD should not support this op. This includes any driver that cannot provide a fully functional PAGING domain. This new op for now is only supposed to be used by IOMMUFD, hence no wrapper for it. IOMMUFD would call the callback directly. As for domain free, IOMMUFD would use iommu_domain_free(). Link: https://lore.kernel.org/r/20230928071528.26258-2-yi.l.liu@intel.com Suggested-by: Jason Gunthorpe Signed-off-by: Lu Baolu Co-developed-by: Nicolin Chen Signed-off-by: Nicolin Chen Signed-off-by: Yi Liu Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe --- include/linux/iommu.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index c50a769d569a..3861d66b65c1 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -234,7 +234,15 @@ struct iommu_iotlb_gather { * op is allocated in the iommu driver and freed by the caller after * use. The information type is one of enum iommu_hw_info_type defined * in include/uapi/linux/iommufd.h. - * @domain_alloc: allocate iommu domain + * @domain_alloc: allocate and return an iommu domain if success. Otherwise + * NULL is returned. The domain is not fully initialized until + * the caller iommu_domain_alloc() returns. + * @domain_alloc_user: Allocate an iommu domain corresponding to the input + * parameters as defined in include/uapi/linux/iommufd.h. + * Unlike @domain_alloc, it is called only by IOMMUFD and + * must fully initialize the new domain before return. + * Upon success, a domain is returned. Upon failure, + * ERR_PTR must be returned. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU @@ -267,6 +275,7 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); + struct iommu_domain *(*domain_alloc_user)(struct device *dev, u32 flags); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); -- cgit v1.2.3 From 8c9c727b6142325ed5697240fceb99cbeb4ac2ec Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:50:53 +0100 Subject: vfio: Move iova_bitmap into iommufd Both VFIO and IOMMUFD will need iova bitmap for storing dirties and walking the user bitmaps, so move to the common dependency into IOMMUFD. In doing so, create the symbol IOMMUFD_DRIVER which designates the builtin code that will be used by drivers when selected. Today this means MLX5_VFIO_PCI and PDS_VFIO_PCI. IOMMU drivers will do the same (in future patches) when supporting dirty tracking and select IOMMUFD_DRIVER accordingly. Given that the symbol maybe be disabled, add header definitions in iova_bitmap.h for when IOMMUFD_DRIVER=n Link: https://lore.kernel.org/r/20231024135109.73787-3-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Jason Gunthorpe Reviewed-by: Brett Creeley Reviewed-by: Kevin Tian Reviewed-by: Alex Williamson Signed-off-by: Jason Gunthorpe --- drivers/iommu/Kconfig | 4 + drivers/iommu/iommufd/Makefile | 1 + drivers/iommu/iommufd/iova_bitmap.c | 426 ++++++++++++++++++++++++++++++++++++ drivers/vfio/Makefile | 3 +- drivers/vfio/iova_bitmap.c | 426 ------------------------------------ drivers/vfio/pci/mlx5/Kconfig | 1 + drivers/vfio/pci/pds/Kconfig | 1 + include/linux/iova_bitmap.h | 26 +++ 8 files changed, 460 insertions(+), 428 deletions(-) create mode 100644 drivers/iommu/iommufd/iova_bitmap.c delete mode 100644 drivers/vfio/iova_bitmap.c (limited to 'include/linux') diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 2b12b583ef4b..5cc869db1b79 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -7,6 +7,10 @@ config IOMMU_IOVA config IOMMU_API bool +config IOMMUFD_DRIVER + bool + default n + menuconfig IOMMU_SUPPORT bool "IOMMU Hardware Support" depends on MMU diff --git a/drivers/iommu/iommufd/Makefile b/drivers/iommu/iommufd/Makefile index 8aeba81800c5..34b446146961 100644 --- a/drivers/iommu/iommufd/Makefile +++ b/drivers/iommu/iommufd/Makefile @@ -11,3 +11,4 @@ iommufd-y := \ iommufd-$(CONFIG_IOMMUFD_TEST) += selftest.o obj-$(CONFIG_IOMMUFD) += iommufd.o +obj-$(CONFIG_IOMMUFD_DRIVER) += iova_bitmap.o diff --git a/drivers/iommu/iommufd/iova_bitmap.c b/drivers/iommu/iommufd/iova_bitmap.c new file mode 100644 index 000000000000..f54b56388e00 --- /dev/null +++ b/drivers/iommu/iommufd/iova_bitmap.c @@ -0,0 +1,426 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2022, Oracle and/or its affiliates. + * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved + */ +#include +#include +#include +#include + +#define BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE) + +/* + * struct iova_bitmap_map - A bitmap representing an IOVA range + * + * Main data structure for tracking mapped user pages of bitmap data. + * + * For example, for something recording dirty IOVAs, it will be provided a + * struct iova_bitmap structure, as a general structure for iterating the + * total IOVA range. The struct iova_bitmap_map, though, represents the + * subset of said IOVA space that is pinned by its parent structure (struct + * iova_bitmap). + * + * The user does not need to exact location of the bits in the bitmap. + * From user perspective the only API available is iova_bitmap_set() which + * records the IOVA *range* in the bitmap by setting the corresponding + * bits. + * + * The bitmap is an array of u64 whereas each bit represents an IOVA of + * range of (1 << pgshift). Thus formula for the bitmap data to be set is: + * + * data[(iova / page_size) / 64] & (1ULL << (iova % 64)) + */ +struct iova_bitmap_map { + /* base IOVA representing bit 0 of the first page */ + unsigned long iova; + + /* page size order that each bit granules to */ + unsigned long pgshift; + + /* page offset of the first user page pinned */ + unsigned long pgoff; + + /* number of pages pinned */ + unsigned long npages; + + /* pinned pages representing the bitmap data */ + struct page **pages; +}; + +/* + * struct iova_bitmap - The IOVA bitmap object + * + * Main data structure for iterating over the bitmap data. + * + * Abstracts the pinning work and iterates in IOVA ranges. + * It uses a windowing scheme and pins the bitmap in relatively + * big ranges e.g. + * + * The bitmap object uses one base page to store all the pinned pages + * pointers related to the bitmap. For sizeof(struct page*) == 8 it stores + * 512 struct page pointers which, if the base page size is 4K, it means + * 2M of bitmap data is pinned at a time. If the iova_bitmap page size is + * also 4K then the range window to iterate is 64G. + * + * For example iterating on a total IOVA range of 4G..128G, it will walk + * through this set of ranges: + * + * 4G - 68G-1 (64G) + * 68G - 128G-1 (64G) + * + * An example of the APIs on how to use/iterate over the IOVA bitmap: + * + * bitmap = iova_bitmap_alloc(iova, length, page_size, data); + * if (IS_ERR(bitmap)) + * return PTR_ERR(bitmap); + * + * ret = iova_bitmap_for_each(bitmap, arg, dirty_reporter_fn); + * + * iova_bitmap_free(bitmap); + * + * Each iteration of the @dirty_reporter_fn is called with a unique @iova + * and @length argument, indicating the current range available through the + * iova_bitmap. The @dirty_reporter_fn uses iova_bitmap_set() to mark dirty + * areas (@iova_length) within that provided range, as following: + * + * iova_bitmap_set(bitmap, iova, iova_length); + * + * The internals of the object uses an index @mapped_base_index that indexes + * which u64 word of the bitmap is mapped, up to @mapped_total_index. + * Those keep being incremented until @mapped_total_index is reached while + * mapping up to PAGE_SIZE / sizeof(struct page*) maximum of pages. + * + * The IOVA bitmap is usually located on what tracks DMA mapped ranges or + * some form of IOVA range tracking that co-relates to the user passed + * bitmap. + */ +struct iova_bitmap { + /* IOVA range representing the currently mapped bitmap data */ + struct iova_bitmap_map mapped; + + /* userspace address of the bitmap */ + u64 __user *bitmap; + + /* u64 index that @mapped points to */ + unsigned long mapped_base_index; + + /* how many u64 can we walk in total */ + unsigned long mapped_total_index; + + /* base IOVA of the whole bitmap */ + unsigned long iova; + + /* length of the IOVA range for the whole bitmap */ + size_t length; +}; + +/* + * Converts a relative IOVA to a bitmap index. + * This function provides the index into the u64 array (bitmap::bitmap) + * for a given IOVA offset. + * Relative IOVA means relative to the bitmap::mapped base IOVA + * (stored in mapped::iova). All computations in this file are done using + * relative IOVAs and thus avoid an extra subtraction against mapped::iova. + * The user API iova_bitmap_set() always uses a regular absolute IOVAs. + */ +static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap, + unsigned long iova) +{ + unsigned long pgsize = 1 << bitmap->mapped.pgshift; + + return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize); +} + +/* + * Converts a bitmap index to a *relative* IOVA. + */ +static unsigned long iova_bitmap_index_to_offset(struct iova_bitmap *bitmap, + unsigned long index) +{ + unsigned long pgshift = bitmap->mapped.pgshift; + + return (index * BITS_PER_TYPE(*bitmap->bitmap)) << pgshift; +} + +/* + * Returns the base IOVA of the mapped range. + */ +static unsigned long iova_bitmap_mapped_iova(struct iova_bitmap *bitmap) +{ + unsigned long skip = bitmap->mapped_base_index; + + return bitmap->iova + iova_bitmap_index_to_offset(bitmap, skip); +} + +/* + * Pins the bitmap user pages for the current range window. + * This is internal to IOVA bitmap and called when advancing the + * index (@mapped_base_index) or allocating the bitmap. + */ +static int iova_bitmap_get(struct iova_bitmap *bitmap) +{ + struct iova_bitmap_map *mapped = &bitmap->mapped; + unsigned long npages; + u64 __user *addr; + long ret; + + /* + * @mapped_base_index is the index of the currently mapped u64 words + * that we have access. Anything before @mapped_base_index is not + * mapped. The range @mapped_base_index .. @mapped_total_index-1 is + * mapped but capped at a maximum number of pages. + */ + npages = DIV_ROUND_UP((bitmap->mapped_total_index - + bitmap->mapped_base_index) * + sizeof(*bitmap->bitmap), PAGE_SIZE); + + /* + * We always cap at max number of 'struct page' a base page can fit. + * This is, for example, on x86 means 2M of bitmap data max. + */ + npages = min(npages, PAGE_SIZE / sizeof(struct page *)); + + /* + * Bitmap address to be pinned is calculated via pointer arithmetic + * with bitmap u64 word index. + */ + addr = bitmap->bitmap + bitmap->mapped_base_index; + + ret = pin_user_pages_fast((unsigned long)addr, npages, + FOLL_WRITE, mapped->pages); + if (ret <= 0) + return -EFAULT; + + mapped->npages = (unsigned long)ret; + /* Base IOVA where @pages point to i.e. bit 0 of the first page */ + mapped->iova = iova_bitmap_mapped_iova(bitmap); + + /* + * offset of the page where pinned pages bit 0 is located. + * This handles the case where the bitmap is not PAGE_SIZE + * aligned. + */ + mapped->pgoff = offset_in_page(addr); + return 0; +} + +/* + * Unpins the bitmap user pages and clears @npages + * (un)pinning is abstracted from API user and it's done when advancing + * the index or freeing the bitmap. + */ +static void iova_bitmap_put(struct iova_bitmap *bitmap) +{ + struct iova_bitmap_map *mapped = &bitmap->mapped; + + if (mapped->npages) { + unpin_user_pages(mapped->pages, mapped->npages); + mapped->npages = 0; + } +} + +/** + * iova_bitmap_alloc() - Allocates an IOVA bitmap object + * @iova: Start address of the IOVA range + * @length: Length of the IOVA range + * @page_size: Page size of the IOVA bitmap. It defines what each bit + * granularity represents + * @data: Userspace address of the bitmap + * + * Allocates an IOVA object and initializes all its fields including the + * first user pages of @data. + * + * Return: A pointer to a newly allocated struct iova_bitmap + * or ERR_PTR() on error. + */ +struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, + unsigned long page_size, u64 __user *data) +{ + struct iova_bitmap_map *mapped; + struct iova_bitmap *bitmap; + int rc; + + bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); + if (!bitmap) + return ERR_PTR(-ENOMEM); + + mapped = &bitmap->mapped; + mapped->pgshift = __ffs(page_size); + bitmap->bitmap = data; + bitmap->mapped_total_index = + iova_bitmap_offset_to_index(bitmap, length - 1) + 1; + bitmap->iova = iova; + bitmap->length = length; + mapped->iova = iova; + mapped->pages = (struct page **)__get_free_page(GFP_KERNEL); + if (!mapped->pages) { + rc = -ENOMEM; + goto err; + } + + rc = iova_bitmap_get(bitmap); + if (rc) + goto err; + return bitmap; + +err: + iova_bitmap_free(bitmap); + return ERR_PTR(rc); +} +EXPORT_SYMBOL_GPL(iova_bitmap_alloc); + +/** + * iova_bitmap_free() - Frees an IOVA bitmap object + * @bitmap: IOVA bitmap to free + * + * It unpins and releases pages array memory and clears any leftover + * state. + */ +void iova_bitmap_free(struct iova_bitmap *bitmap) +{ + struct iova_bitmap_map *mapped = &bitmap->mapped; + + iova_bitmap_put(bitmap); + + if (mapped->pages) { + free_page((unsigned long)mapped->pages); + mapped->pages = NULL; + } + + kfree(bitmap); +} +EXPORT_SYMBOL_GPL(iova_bitmap_free); + +/* + * Returns the remaining bitmap indexes from mapped_total_index to process for + * the currently pinned bitmap pages. + */ +static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap) +{ + unsigned long remaining, bytes; + + bytes = (bitmap->mapped.npages << PAGE_SHIFT) - bitmap->mapped.pgoff; + + remaining = bitmap->mapped_total_index - bitmap->mapped_base_index; + remaining = min_t(unsigned long, remaining, + bytes / sizeof(*bitmap->bitmap)); + + return remaining; +} + +/* + * Returns the length of the mapped IOVA range. + */ +static unsigned long iova_bitmap_mapped_length(struct iova_bitmap *bitmap) +{ + unsigned long max_iova = bitmap->iova + bitmap->length - 1; + unsigned long iova = iova_bitmap_mapped_iova(bitmap); + unsigned long remaining; + + /* + * iova_bitmap_mapped_remaining() returns a number of indexes which + * when converted to IOVA gives us a max length that the bitmap + * pinned data can cover. Afterwards, that is capped to + * only cover the IOVA range in @bitmap::iova .. @bitmap::length. + */ + remaining = iova_bitmap_index_to_offset(bitmap, + iova_bitmap_mapped_remaining(bitmap)); + + if (iova + remaining - 1 > max_iova) + remaining -= ((iova + remaining - 1) - max_iova); + + return remaining; +} + +/* + * Returns true if there's not more data to iterate. + */ +static bool iova_bitmap_done(struct iova_bitmap *bitmap) +{ + return bitmap->mapped_base_index >= bitmap->mapped_total_index; +} + +/* + * Advances to the next range, releases the current pinned + * pages and pins the next set of bitmap pages. + * Returns 0 on success or otherwise errno. + */ +static int iova_bitmap_advance(struct iova_bitmap *bitmap) +{ + unsigned long iova = iova_bitmap_mapped_length(bitmap) - 1; + unsigned long count = iova_bitmap_offset_to_index(bitmap, iova) + 1; + + bitmap->mapped_base_index += count; + + iova_bitmap_put(bitmap); + if (iova_bitmap_done(bitmap)) + return 0; + + /* When advancing the index we pin the next set of bitmap pages */ + return iova_bitmap_get(bitmap); +} + +/** + * iova_bitmap_for_each() - Iterates over the bitmap + * @bitmap: IOVA bitmap to iterate + * @opaque: Additional argument to pass to the callback + * @fn: Function that gets called for each IOVA range + * + * Helper function to iterate over bitmap data representing a portion of IOVA + * space. It hides the complexity of iterating bitmaps and translating the + * mapped bitmap user pages into IOVA ranges to process. + * + * Return: 0 on success, and an error on failure either upon + * iteration or when the callback returns an error. + */ +int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, + iova_bitmap_fn_t fn) +{ + int ret = 0; + + for (; !iova_bitmap_done(bitmap) && !ret; + ret = iova_bitmap_advance(bitmap)) { + ret = fn(bitmap, iova_bitmap_mapped_iova(bitmap), + iova_bitmap_mapped_length(bitmap), opaque); + if (ret) + break; + } + + return ret; +} +EXPORT_SYMBOL_GPL(iova_bitmap_for_each); + +/** + * iova_bitmap_set() - Records an IOVA range in bitmap + * @bitmap: IOVA bitmap + * @iova: IOVA to start + * @length: IOVA range length + * + * Set the bits corresponding to the range [iova .. iova+length-1] in + * the user bitmap. + * + */ +void iova_bitmap_set(struct iova_bitmap *bitmap, + unsigned long iova, size_t length) +{ + struct iova_bitmap_map *mapped = &bitmap->mapped; + unsigned long cur_bit = ((iova - mapped->iova) >> + mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; + unsigned long last_bit = (((iova + length - 1) - mapped->iova) >> + mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; + + do { + unsigned int page_idx = cur_bit / BITS_PER_PAGE; + unsigned int offset = cur_bit % BITS_PER_PAGE; + unsigned int nbits = min(BITS_PER_PAGE - offset, + last_bit - cur_bit + 1); + void *kaddr; + + kaddr = kmap_local_page(mapped->pages[page_idx]); + bitmap_set(kaddr, offset, nbits); + kunmap_local(kaddr); + cur_bit += nbits; + } while (cur_bit <= last_bit); +} +EXPORT_SYMBOL_GPL(iova_bitmap_set); diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile index c82ea032d352..68c05705200f 100644 --- a/drivers/vfio/Makefile +++ b/drivers/vfio/Makefile @@ -1,8 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_VFIO) += vfio.o -vfio-y += vfio_main.o \ - iova_bitmap.o +vfio-y += vfio_main.o vfio-$(CONFIG_VFIO_DEVICE_CDEV) += device_cdev.o vfio-$(CONFIG_VFIO_GROUP) += group.o vfio-$(CONFIG_IOMMUFD) += iommufd.o diff --git a/drivers/vfio/iova_bitmap.c b/drivers/vfio/iova_bitmap.c deleted file mode 100644 index f54b56388e00..000000000000 --- a/drivers/vfio/iova_bitmap.c +++ /dev/null @@ -1,426 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2022, Oracle and/or its affiliates. - * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved - */ -#include -#include -#include -#include - -#define BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE) - -/* - * struct iova_bitmap_map - A bitmap representing an IOVA range - * - * Main data structure for tracking mapped user pages of bitmap data. - * - * For example, for something recording dirty IOVAs, it will be provided a - * struct iova_bitmap structure, as a general structure for iterating the - * total IOVA range. The struct iova_bitmap_map, though, represents the - * subset of said IOVA space that is pinned by its parent structure (struct - * iova_bitmap). - * - * The user does not need to exact location of the bits in the bitmap. - * From user perspective the only API available is iova_bitmap_set() which - * records the IOVA *range* in the bitmap by setting the corresponding - * bits. - * - * The bitmap is an array of u64 whereas each bit represents an IOVA of - * range of (1 << pgshift). Thus formula for the bitmap data to be set is: - * - * data[(iova / page_size) / 64] & (1ULL << (iova % 64)) - */ -struct iova_bitmap_map { - /* base IOVA representing bit 0 of the first page */ - unsigned long iova; - - /* page size order that each bit granules to */ - unsigned long pgshift; - - /* page offset of the first user page pinned */ - unsigned long pgoff; - - /* number of pages pinned */ - unsigned long npages; - - /* pinned pages representing the bitmap data */ - struct page **pages; -}; - -/* - * struct iova_bitmap - The IOVA bitmap object - * - * Main data structure for iterating over the bitmap data. - * - * Abstracts the pinning work and iterates in IOVA ranges. - * It uses a windowing scheme and pins the bitmap in relatively - * big ranges e.g. - * - * The bitmap object uses one base page to store all the pinned pages - * pointers related to the bitmap. For sizeof(struct page*) == 8 it stores - * 512 struct page pointers which, if the base page size is 4K, it means - * 2M of bitmap data is pinned at a time. If the iova_bitmap page size is - * also 4K then the range window to iterate is 64G. - * - * For example iterating on a total IOVA range of 4G..128G, it will walk - * through this set of ranges: - * - * 4G - 68G-1 (64G) - * 68G - 128G-1 (64G) - * - * An example of the APIs on how to use/iterate over the IOVA bitmap: - * - * bitmap = iova_bitmap_alloc(iova, length, page_size, data); - * if (IS_ERR(bitmap)) - * return PTR_ERR(bitmap); - * - * ret = iova_bitmap_for_each(bitmap, arg, dirty_reporter_fn); - * - * iova_bitmap_free(bitmap); - * - * Each iteration of the @dirty_reporter_fn is called with a unique @iova - * and @length argument, indicating the current range available through the - * iova_bitmap. The @dirty_reporter_fn uses iova_bitmap_set() to mark dirty - * areas (@iova_length) within that provided range, as following: - * - * iova_bitmap_set(bitmap, iova, iova_length); - * - * The internals of the object uses an index @mapped_base_index that indexes - * which u64 word of the bitmap is mapped, up to @mapped_total_index. - * Those keep being incremented until @mapped_total_index is reached while - * mapping up to PAGE_SIZE / sizeof(struct page*) maximum of pages. - * - * The IOVA bitmap is usually located on what tracks DMA mapped ranges or - * some form of IOVA range tracking that co-relates to the user passed - * bitmap. - */ -struct iova_bitmap { - /* IOVA range representing the currently mapped bitmap data */ - struct iova_bitmap_map mapped; - - /* userspace address of the bitmap */ - u64 __user *bitmap; - - /* u64 index that @mapped points to */ - unsigned long mapped_base_index; - - /* how many u64 can we walk in total */ - unsigned long mapped_total_index; - - /* base IOVA of the whole bitmap */ - unsigned long iova; - - /* length of the IOVA range for the whole bitmap */ - size_t length; -}; - -/* - * Converts a relative IOVA to a bitmap index. - * This function provides the index into the u64 array (bitmap::bitmap) - * for a given IOVA offset. - * Relative IOVA means relative to the bitmap::mapped base IOVA - * (stored in mapped::iova). All computations in this file are done using - * relative IOVAs and thus avoid an extra subtraction against mapped::iova. - * The user API iova_bitmap_set() always uses a regular absolute IOVAs. - */ -static unsigned long iova_bitmap_offset_to_index(struct iova_bitmap *bitmap, - unsigned long iova) -{ - unsigned long pgsize = 1 << bitmap->mapped.pgshift; - - return iova / (BITS_PER_TYPE(*bitmap->bitmap) * pgsize); -} - -/* - * Converts a bitmap index to a *relative* IOVA. - */ -static unsigned long iova_bitmap_index_to_offset(struct iova_bitmap *bitmap, - unsigned long index) -{ - unsigned long pgshift = bitmap->mapped.pgshift; - - return (index * BITS_PER_TYPE(*bitmap->bitmap)) << pgshift; -} - -/* - * Returns the base IOVA of the mapped range. - */ -static unsigned long iova_bitmap_mapped_iova(struct iova_bitmap *bitmap) -{ - unsigned long skip = bitmap->mapped_base_index; - - return bitmap->iova + iova_bitmap_index_to_offset(bitmap, skip); -} - -/* - * Pins the bitmap user pages for the current range window. - * This is internal to IOVA bitmap and called when advancing the - * index (@mapped_base_index) or allocating the bitmap. - */ -static int iova_bitmap_get(struct iova_bitmap *bitmap) -{ - struct iova_bitmap_map *mapped = &bitmap->mapped; - unsigned long npages; - u64 __user *addr; - long ret; - - /* - * @mapped_base_index is the index of the currently mapped u64 words - * that we have access. Anything before @mapped_base_index is not - * mapped. The range @mapped_base_index .. @mapped_total_index-1 is - * mapped but capped at a maximum number of pages. - */ - npages = DIV_ROUND_UP((bitmap->mapped_total_index - - bitmap->mapped_base_index) * - sizeof(*bitmap->bitmap), PAGE_SIZE); - - /* - * We always cap at max number of 'struct page' a base page can fit. - * This is, for example, on x86 means 2M of bitmap data max. - */ - npages = min(npages, PAGE_SIZE / sizeof(struct page *)); - - /* - * Bitmap address to be pinned is calculated via pointer arithmetic - * with bitmap u64 word index. - */ - addr = bitmap->bitmap + bitmap->mapped_base_index; - - ret = pin_user_pages_fast((unsigned long)addr, npages, - FOLL_WRITE, mapped->pages); - if (ret <= 0) - return -EFAULT; - - mapped->npages = (unsigned long)ret; - /* Base IOVA where @pages point to i.e. bit 0 of the first page */ - mapped->iova = iova_bitmap_mapped_iova(bitmap); - - /* - * offset of the page where pinned pages bit 0 is located. - * This handles the case where the bitmap is not PAGE_SIZE - * aligned. - */ - mapped->pgoff = offset_in_page(addr); - return 0; -} - -/* - * Unpins the bitmap user pages and clears @npages - * (un)pinning is abstracted from API user and it's done when advancing - * the index or freeing the bitmap. - */ -static void iova_bitmap_put(struct iova_bitmap *bitmap) -{ - struct iova_bitmap_map *mapped = &bitmap->mapped; - - if (mapped->npages) { - unpin_user_pages(mapped->pages, mapped->npages); - mapped->npages = 0; - } -} - -/** - * iova_bitmap_alloc() - Allocates an IOVA bitmap object - * @iova: Start address of the IOVA range - * @length: Length of the IOVA range - * @page_size: Page size of the IOVA bitmap. It defines what each bit - * granularity represents - * @data: Userspace address of the bitmap - * - * Allocates an IOVA object and initializes all its fields including the - * first user pages of @data. - * - * Return: A pointer to a newly allocated struct iova_bitmap - * or ERR_PTR() on error. - */ -struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, - unsigned long page_size, u64 __user *data) -{ - struct iova_bitmap_map *mapped; - struct iova_bitmap *bitmap; - int rc; - - bitmap = kzalloc(sizeof(*bitmap), GFP_KERNEL); - if (!bitmap) - return ERR_PTR(-ENOMEM); - - mapped = &bitmap->mapped; - mapped->pgshift = __ffs(page_size); - bitmap->bitmap = data; - bitmap->mapped_total_index = - iova_bitmap_offset_to_index(bitmap, length - 1) + 1; - bitmap->iova = iova; - bitmap->length = length; - mapped->iova = iova; - mapped->pages = (struct page **)__get_free_page(GFP_KERNEL); - if (!mapped->pages) { - rc = -ENOMEM; - goto err; - } - - rc = iova_bitmap_get(bitmap); - if (rc) - goto err; - return bitmap; - -err: - iova_bitmap_free(bitmap); - return ERR_PTR(rc); -} -EXPORT_SYMBOL_GPL(iova_bitmap_alloc); - -/** - * iova_bitmap_free() - Frees an IOVA bitmap object - * @bitmap: IOVA bitmap to free - * - * It unpins and releases pages array memory and clears any leftover - * state. - */ -void iova_bitmap_free(struct iova_bitmap *bitmap) -{ - struct iova_bitmap_map *mapped = &bitmap->mapped; - - iova_bitmap_put(bitmap); - - if (mapped->pages) { - free_page((unsigned long)mapped->pages); - mapped->pages = NULL; - } - - kfree(bitmap); -} -EXPORT_SYMBOL_GPL(iova_bitmap_free); - -/* - * Returns the remaining bitmap indexes from mapped_total_index to process for - * the currently pinned bitmap pages. - */ -static unsigned long iova_bitmap_mapped_remaining(struct iova_bitmap *bitmap) -{ - unsigned long remaining, bytes; - - bytes = (bitmap->mapped.npages << PAGE_SHIFT) - bitmap->mapped.pgoff; - - remaining = bitmap->mapped_total_index - bitmap->mapped_base_index; - remaining = min_t(unsigned long, remaining, - bytes / sizeof(*bitmap->bitmap)); - - return remaining; -} - -/* - * Returns the length of the mapped IOVA range. - */ -static unsigned long iova_bitmap_mapped_length(struct iova_bitmap *bitmap) -{ - unsigned long max_iova = bitmap->iova + bitmap->length - 1; - unsigned long iova = iova_bitmap_mapped_iova(bitmap); - unsigned long remaining; - - /* - * iova_bitmap_mapped_remaining() returns a number of indexes which - * when converted to IOVA gives us a max length that the bitmap - * pinned data can cover. Afterwards, that is capped to - * only cover the IOVA range in @bitmap::iova .. @bitmap::length. - */ - remaining = iova_bitmap_index_to_offset(bitmap, - iova_bitmap_mapped_remaining(bitmap)); - - if (iova + remaining - 1 > max_iova) - remaining -= ((iova + remaining - 1) - max_iova); - - return remaining; -} - -/* - * Returns true if there's not more data to iterate. - */ -static bool iova_bitmap_done(struct iova_bitmap *bitmap) -{ - return bitmap->mapped_base_index >= bitmap->mapped_total_index; -} - -/* - * Advances to the next range, releases the current pinned - * pages and pins the next set of bitmap pages. - * Returns 0 on success or otherwise errno. - */ -static int iova_bitmap_advance(struct iova_bitmap *bitmap) -{ - unsigned long iova = iova_bitmap_mapped_length(bitmap) - 1; - unsigned long count = iova_bitmap_offset_to_index(bitmap, iova) + 1; - - bitmap->mapped_base_index += count; - - iova_bitmap_put(bitmap); - if (iova_bitmap_done(bitmap)) - return 0; - - /* When advancing the index we pin the next set of bitmap pages */ - return iova_bitmap_get(bitmap); -} - -/** - * iova_bitmap_for_each() - Iterates over the bitmap - * @bitmap: IOVA bitmap to iterate - * @opaque: Additional argument to pass to the callback - * @fn: Function that gets called for each IOVA range - * - * Helper function to iterate over bitmap data representing a portion of IOVA - * space. It hides the complexity of iterating bitmaps and translating the - * mapped bitmap user pages into IOVA ranges to process. - * - * Return: 0 on success, and an error on failure either upon - * iteration or when the callback returns an error. - */ -int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, - iova_bitmap_fn_t fn) -{ - int ret = 0; - - for (; !iova_bitmap_done(bitmap) && !ret; - ret = iova_bitmap_advance(bitmap)) { - ret = fn(bitmap, iova_bitmap_mapped_iova(bitmap), - iova_bitmap_mapped_length(bitmap), opaque); - if (ret) - break; - } - - return ret; -} -EXPORT_SYMBOL_GPL(iova_bitmap_for_each); - -/** - * iova_bitmap_set() - Records an IOVA range in bitmap - * @bitmap: IOVA bitmap - * @iova: IOVA to start - * @length: IOVA range length - * - * Set the bits corresponding to the range [iova .. iova+length-1] in - * the user bitmap. - * - */ -void iova_bitmap_set(struct iova_bitmap *bitmap, - unsigned long iova, size_t length) -{ - struct iova_bitmap_map *mapped = &bitmap->mapped; - unsigned long cur_bit = ((iova - mapped->iova) >> - mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; - unsigned long last_bit = (((iova + length - 1) - mapped->iova) >> - mapped->pgshift) + mapped->pgoff * BITS_PER_BYTE; - - do { - unsigned int page_idx = cur_bit / BITS_PER_PAGE; - unsigned int offset = cur_bit % BITS_PER_PAGE; - unsigned int nbits = min(BITS_PER_PAGE - offset, - last_bit - cur_bit + 1); - void *kaddr; - - kaddr = kmap_local_page(mapped->pages[page_idx]); - bitmap_set(kaddr, offset, nbits); - kunmap_local(kaddr); - cur_bit += nbits; - } while (cur_bit <= last_bit); -} -EXPORT_SYMBOL_GPL(iova_bitmap_set); diff --git a/drivers/vfio/pci/mlx5/Kconfig b/drivers/vfio/pci/mlx5/Kconfig index 7088edc4fb28..c3ced56b7787 100644 --- a/drivers/vfio/pci/mlx5/Kconfig +++ b/drivers/vfio/pci/mlx5/Kconfig @@ -3,6 +3,7 @@ config MLX5_VFIO_PCI tristate "VFIO support for MLX5 PCI devices" depends on MLX5_CORE select VFIO_PCI_CORE + select IOMMUFD_DRIVER help This provides migration support for MLX5 devices using the VFIO framework. diff --git a/drivers/vfio/pci/pds/Kconfig b/drivers/vfio/pci/pds/Kconfig index 407b3fd32733..fff368a8183b 100644 --- a/drivers/vfio/pci/pds/Kconfig +++ b/drivers/vfio/pci/pds/Kconfig @@ -5,6 +5,7 @@ config PDS_VFIO_PCI tristate "VFIO support for PDS PCI devices" depends on PDS_CORE select VFIO_PCI_CORE + select IOMMUFD_DRIVER help This provides generic PCI support for PDS devices using the VFIO framework. diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h index c006cf0a25f3..1c338f5e5b7a 100644 --- a/include/linux/iova_bitmap.h +++ b/include/linux/iova_bitmap.h @@ -7,6 +7,7 @@ #define _IOVA_BITMAP_H_ #include +#include struct iova_bitmap; @@ -14,6 +15,7 @@ typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap, unsigned long iova, size_t length, void *opaque); +#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER) struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length, unsigned long page_size, u64 __user *data); @@ -22,5 +24,29 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, iova_bitmap_fn_t fn); void iova_bitmap_set(struct iova_bitmap *bitmap, unsigned long iova, size_t length); +#else +static inline struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, + size_t length, + unsigned long page_size, + u64 __user *data) +{ + return NULL; +} + +static inline void iova_bitmap_free(struct iova_bitmap *bitmap) +{ +} + +static inline int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque, + iova_bitmap_fn_t fn) +{ + return -EOPNOTSUPP; +} + +static inline void iova_bitmap_set(struct iova_bitmap *bitmap, + unsigned long iova, size_t length) +{ +} +#endif #endif -- cgit v1.2.3 From 750e2e902b7180cb82d2f9b1e372e32087bb8b1b Mon Sep 17 00:00:00 2001 From: Joao Martins Date: Tue, 24 Oct 2023 14:50:55 +0100 Subject: iommu: Add iommu_domain ops for dirty tracking Add to iommu domain operations a set of callbacks to perform dirty tracking, particulary to start and stop tracking and to read and clear the dirty data. Drivers are generally expected to dynamically change its translation structures to toggle the tracking and flush some form of control state structure that stands in the IOVA translation path. Though it's not mandatory, as drivers can also enable dirty tracking at boot, and just clear the dirty bits before setting dirty tracking. For each of the newly added IOMMU core APIs: iommu_cap::IOMMU_CAP_DIRTY_TRACKING: new device iommu_capable value when probing for capabilities of the device. .set_dirty_tracking(): an iommu driver is expected to change its translation structures and enable dirty tracking for the devices in the iommu_domain. For drivers making dirty tracking always-enabled, it should just return 0. .read_and_clear_dirty(): an iommu driver is expected to walk the pagetables for the iova range passed in and use iommu_dirty_bitmap_record() to record dirty info per IOVA. When detecting that a given IOVA is dirty it should also clear its dirty state from the PTE, *unless* the flag IOMMU_DIRTY_NO_CLEAR is passed in -- flushing is steered from the caller of the domain_op via iotlb_gather. The iommu core APIs use the same data structure in use for dirty tracking for VFIO device dirty (struct iova_bitmap) abstracted by iommu_dirty_bitmap_record() helper function. domain::dirty_ops: IOMMU domains will store the dirty ops depending on whether the iommu device supports dirty tracking or not. iommu drivers can then use this field to figure if the dirty tracking is supported+enforced on attach. The enforcement is enable via domain_alloc_user() which is done via IOMMUFD hwpt flag introduced later. Link: https://lore.kernel.org/r/20231024135109.73787-5-joao.m.martins@oracle.com Signed-off-by: Joao Martins Reviewed-by: Jason Gunthorpe Reviewed-by: Lu Baolu Reviewed-by: Kevin Tian Signed-off-by: Jason Gunthorpe --- include/linux/io-pgtable.h | 4 +++ include/linux/iommu.h | 70 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) (limited to 'include/linux') diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 1b7a44b35616..25142a0e2fc2 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -166,6 +166,10 @@ struct io_pgtable_ops { struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); + int (*read_and_clear_dirty)(struct io_pgtable_ops *ops, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); }; /** diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 3861d66b65c1..1d42bdb37cbc 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #define IOMMU_READ (1 << 0) @@ -37,6 +38,7 @@ struct bus_type; struct device; struct iommu_domain; struct iommu_domain_ops; +struct iommu_dirty_ops; struct notifier_block; struct iommu_sva; struct iommu_fault_event; @@ -95,6 +97,8 @@ struct iommu_domain_geometry { struct iommu_domain { unsigned type; const struct iommu_domain_ops *ops; + const struct iommu_dirty_ops *dirty_ops; + unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ struct iommu_domain_geometry geometry; struct iommu_dma_cookie *iova_cookie; @@ -133,6 +137,7 @@ enum iommu_cap { * usefully support the non-strict DMA flush queue. */ IOMMU_CAP_DEFERRED_FLUSH, + IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */ }; /* These are the possible reserved region types */ @@ -227,6 +232,35 @@ struct iommu_iotlb_gather { bool queued; }; +/** + * struct iommu_dirty_bitmap - Dirty IOVA bitmap state + * @bitmap: IOVA bitmap + * @gather: Range information for a pending IOTLB flush + */ +struct iommu_dirty_bitmap { + struct iova_bitmap *bitmap; + struct iommu_iotlb_gather *gather; +}; + +/* Read but do not clear any dirty bits */ +#define IOMMU_DIRTY_NO_CLEAR (1 << 0) + +/** + * struct iommu_dirty_ops - domain specific dirty tracking operations + * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain + * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled + * into a bitmap, with a bit represented as a page. + * Reads the dirty PTE bits and clears it from IO + * pagetables. + */ +struct iommu_dirty_ops { + int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled); + int (*read_and_clear_dirty)(struct iommu_domain *domain, + unsigned long iova, size_t size, + unsigned long flags, + struct iommu_dirty_bitmap *dirty); +}; + /** * struct iommu_ops - iommu ops and capabilities * @capable: check capability @@ -641,6 +675,28 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return gather && gather->queued; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ + if (gather) + iommu_iotlb_gather_init(gather); + + dirty->bitmap = bitmap; + dirty->gather = gather; +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ + if (dirty->bitmap) + iova_bitmap_set(dirty->bitmap, iova, length); + + if (dirty->gather) + iommu_iotlb_gather_add_range(dirty->gather, iova, length); +} + /* PCI device grouping function */ extern struct iommu_group *pci_device_group(struct device *dev); /* Generic device grouping function */ @@ -746,6 +802,8 @@ struct iommu_fwspec {}; struct iommu_device {}; struct iommu_fault_param {}; struct iommu_iotlb_gather {}; +struct iommu_dirty_bitmap {}; +struct iommu_dirty_ops {}; static inline bool iommu_present(const struct bus_type *bus) { @@ -978,6 +1036,18 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) return false; } +static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, + struct iova_bitmap *bitmap, + struct iommu_iotlb_gather *gather) +{ +} + +static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, + unsigned long iova, + unsigned long length) +{ +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } -- cgit v1.2.3 From 54d606816b32401de5431f6776a78b1de135bfa2 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Wed, 25 Oct 2023 21:39:29 -0700 Subject: iommu: Add IOMMU_DOMAIN_NESTED Introduce a new domain type for a user I/O page table, which is nested on top of another user space address represented by a PAGING domain. This new domain can be allocated by the domain_alloc_user op, and attached to a device through the existing iommu_attach_device/group() interfaces. The mappings of a nested domain are managed by user space software, so it is not necessary to have map/unmap callbacks. Link: https://lore.kernel.org/r/20231026043938.63898-2-yi.l.liu@intel.com Signed-off-by: Lu Baolu Signed-off-by: Nicolin Chen Signed-off-by: Yi Liu Reviewed-by: Kevin Tian Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- include/linux/iommu.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 1d42bdb37cbc..bc303cb2af37 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -67,6 +67,9 @@ struct iommu_domain_geometry { #define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */ +#define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested + on a stage-2 translation */ + #define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ /* * This are the possible domain-types @@ -93,6 +96,7 @@ struct iommu_domain_geometry { __IOMMU_DOMAIN_DMA_API | \ __IOMMU_DOMAIN_DMA_FQ) #define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA) +#define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED) struct iommu_domain { unsigned type; -- cgit v1.2.3 From 2bdabb8e82f564d19eeeb7c83e6b2467af0707cb Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Wed, 25 Oct 2023 21:39:34 -0700 Subject: iommu: Pass in parent domain with user_data to domain_alloc_user op domain_alloc_user op already accepts user flags for domain allocation, add a parent domain pointer and a driver specific user data support as well. The user data would be tagged with a type for iommu drivers to add their own driver specific user data per hw_pagetable. Add a struct iommu_user_data as a bundle of data_ptr/data_len/type from an iommufd core uAPI structure. Make the user data opaque to the core, since a userspace driver must match the kernel driver. In the future, if drivers share some common parameter, there would be a generic parameter as well. Link: https://lore.kernel.org/r/20231026043938.63898-7-yi.l.liu@intel.com Signed-off-by: Lu Baolu Co-developed-by: Nicolin Chen Signed-off-by: Nicolin Chen Signed-off-by: Yi Liu Reviewed-by: Kevin Tian Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- drivers/iommu/amd/iommu.c | 9 ++++++--- drivers/iommu/intel/iommu.c | 7 ++++++- drivers/iommu/iommufd/hw_pagetable.c | 3 ++- drivers/iommu/iommufd/selftest.c | 7 ++++++- include/linux/iommu.h | 27 ++++++++++++++++++++++++--- 5 files changed, 44 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index caad10f9cee3..b399c5741378 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2219,12 +2219,15 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned int type) return domain; } -static struct iommu_domain *amd_iommu_domain_alloc_user(struct device *dev, - u32 flags) +static struct iommu_domain * +amd_iommu_domain_alloc_user(struct device *dev, u32 flags, + struct iommu_domain *parent, + const struct iommu_user_data *user_data) + { unsigned int type = IOMMU_DOMAIN_UNMANAGED; - if (flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) + if ((flags & ~IOMMU_HWPT_ALLOC_DIRTY_TRACKING) || parent || user_data) return ERR_PTR(-EOPNOTSUPP); return do_iommu_domain_alloc(type, dev, flags); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index eb92a201cc0b..fe67f8d77b09 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4076,7 +4076,9 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) } static struct iommu_domain * -intel_iommu_domain_alloc_user(struct device *dev, u32 flags) +intel_iommu_domain_alloc_user(struct device *dev, u32 flags, + struct iommu_domain *parent, + const struct iommu_user_data *user_data) { struct iommu_domain *domain; struct intel_iommu *iommu; @@ -4086,6 +4088,9 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags) (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING))) return ERR_PTR(-EOPNOTSUPP); + if (parent || user_data) + return ERR_PTR(-EOPNOTSUPP); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) return ERR_PTR(-ENODEV); diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c index 6bce9af0cb8d..198ecbd536f7 100644 --- a/drivers/iommu/iommufd/hw_pagetable.c +++ b/drivers/iommu/iommufd/hw_pagetable.c @@ -108,7 +108,8 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, hwpt_paging->ioas = ioas; if (ops->domain_alloc_user) { - hwpt->domain = ops->domain_alloc_user(idev->dev, flags); + hwpt->domain = + ops->domain_alloc_user(idev->dev, flags, NULL, NULL); if (IS_ERR(hwpt->domain)) { rc = PTR_ERR(hwpt->domain); hwpt->domain = NULL; diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c index 068928ba7950..d71007234896 100644 --- a/drivers/iommu/iommufd/selftest.c +++ b/drivers/iommu/iommufd/selftest.c @@ -241,7 +241,9 @@ static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type) } static struct iommu_domain * -mock_domain_alloc_user(struct device *dev, u32 flags) +mock_domain_alloc_user(struct device *dev, u32 flags, + struct iommu_domain *parent, + const struct iommu_user_data *user_data) { struct mock_dev *mdev = container_of(dev, struct mock_dev, dev); struct iommu_domain *domain; @@ -250,6 +252,9 @@ mock_domain_alloc_user(struct device *dev, u32 flags) (~(IOMMU_HWPT_ALLOC_NEST_PARENT | IOMMU_HWPT_ALLOC_DIRTY_TRACKING))) return ERR_PTR(-EOPNOTSUPP); + if (parent || user_data) + return ERR_PTR(-EOPNOTSUPP); + if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) && (mdev->flags & MOCK_FLAGS_DEVICE_NO_DIRTY)) return ERR_PTR(-EOPNOTSUPP); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index bc303cb2af37..2ddd99f55471 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -265,6 +265,21 @@ struct iommu_dirty_ops { struct iommu_dirty_bitmap *dirty); }; +/** + * struct iommu_user_data - iommu driver specific user space data info + * @type: The data type of the user buffer + * @uptr: Pointer to the user buffer for copy_from_user() + * @len: The length of the user buffer in bytes + * + * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h + * @type, @uptr and @len should be just copied from an iommufd core uAPI struct. + */ +struct iommu_user_data { + unsigned int type; + void __user *uptr; + size_t len; +}; + /** * struct iommu_ops - iommu ops and capabilities * @capable: check capability @@ -279,8 +294,12 @@ struct iommu_dirty_ops { * parameters as defined in include/uapi/linux/iommufd.h. * Unlike @domain_alloc, it is called only by IOMMUFD and * must fully initialize the new domain before return. - * Upon success, a domain is returned. Upon failure, - * ERR_PTR must be returned. + * Upon success, if the @user_data is valid and the @parent + * points to a kernel-managed domain, the new domain must be + * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be + * NULL while the @user_data can be optionally provided, the + * new domain must support __IOMMU_DOMAIN_PAGING. + * Upon failure, ERR_PTR must be returned. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU @@ -313,7 +332,9 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); - struct iommu_domain *(*domain_alloc_user)(struct device *dev, u32 flags); + struct iommu_domain *(*domain_alloc_user)( + struct device *dev, u32 flags, struct iommu_domain *parent, + const struct iommu_user_data *user_data); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); -- cgit v1.2.3 From e9d36c07bb787840e4813fb09a929a17d522a69f Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 25 Oct 2023 21:39:36 -0700 Subject: iommu: Add iommu_copy_struct_from_user helper Wrap up the data type/pointer/len sanity and a copy_struct_from_user call for iommu drivers to copy driver specific data via struct iommu_user_data. And expect it to be used in the domain_alloc_user op for example. Link: https://lore.kernel.org/r/20231026043938.63898-9-yi.l.liu@intel.com Signed-off-by: Nicolin Chen Co-developed-by: Yi Liu Signed-off-by: Yi Liu Reviewed-by: Kevin Tian Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- include/linux/iommu.h | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'include/linux') diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2ddd99f55471..8fb1b41b4d15 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -280,6 +280,46 @@ struct iommu_user_data { size_t len; }; +/** + * __iommu_copy_struct_from_user - Copy iommu driver specific user space data + * @dst_data: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @src_data: Pointer to a struct iommu_user_data for user space data info + * @data_type: The data type of the @dst_data. Must match with @src_data.type + * @data_len: Length of current user data structure, i.e. sizeof(struct _dst) + * @min_len: Initial length of user data structure for backward compatibility. + * This should be offsetofend using the last member in the user data + * struct that was initially added to include/uapi/linux/iommufd.h + */ +static inline int __iommu_copy_struct_from_user( + void *dst_data, const struct iommu_user_data *src_data, + unsigned int data_type, size_t data_len, size_t min_len) +{ + if (src_data->type != data_type) + return -EINVAL; + if (WARN_ON(!dst_data || !src_data)) + return -EINVAL; + if (src_data->len < min_len || data_len < src_data->len) + return -EINVAL; + return copy_struct_from_user(dst_data, data_len, src_data->uptr, + src_data->len); +} + +/** + * iommu_copy_struct_from_user - Copy iommu driver specific user space data + * @kdst: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @user_data: Pointer to a struct iommu_user_data for user space data info + * @data_type: The data type of the @kdst. Must match with @user_data->type + * @min_last: The last memember of the data structure @kdst points in the + * initial version. + * Return 0 for success, otherwise -error. + */ +#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \ + __iommu_copy_struct_from_user(kdst, user_data, data_type, \ + sizeof(*kdst), \ + offsetofend(typeof(*kdst), min_last)) + /** * struct iommu_ops - iommu ops and capabilities * @capable: check capability -- cgit v1.2.3