summaryrefslogtreecommitdiff
path: root/drivers/virtio/virtio_ring.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/virtio/virtio_ring.c')
-rw-r--r--drivers/virtio/virtio_ring.c463
1 files changed, 292 insertions, 171 deletions
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index f5062061c408..7b6205253b46 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -166,7 +166,7 @@ struct vring_virtqueue {
bool packed_ring;
/* Is DMA API used? */
- bool use_dma_api;
+ bool use_map_api;
/* Can we use weak barriers? */
bool weak_barriers;
@@ -210,8 +210,7 @@ struct vring_virtqueue {
/* DMA, allocation, and size information */
bool we_own_ring;
- /* Device used for doing DMA */
- struct device *dma_dev;
+ union virtio_map map;
#ifdef DEBUG
/* They're supposed to lock for us. */
@@ -268,7 +267,7 @@ static bool virtqueue_use_indirect(const struct vring_virtqueue *vq,
* unconditionally on data path.
*/
-static bool vring_use_dma_api(const struct virtio_device *vdev)
+static bool vring_use_map_api(const struct virtio_device *vdev)
{
if (!virtio_has_dma_quirk(vdev))
return true;
@@ -291,33 +290,39 @@ static bool vring_use_dma_api(const struct virtio_device *vdev)
static bool vring_need_unmap_buffer(const struct vring_virtqueue *vring,
const struct vring_desc_extra *extra)
{
- return vring->use_dma_api && (extra->addr != DMA_MAPPING_ERROR);
+ return vring->use_map_api && (extra->addr != DMA_MAPPING_ERROR);
}
size_t virtio_max_dma_size(const struct virtio_device *vdev)
{
size_t max_segment_size = SIZE_MAX;
- if (vring_use_dma_api(vdev))
- max_segment_size = dma_max_mapping_size(vdev->dev.parent);
+ if (vring_use_map_api(vdev)) {
+ if (vdev->map) {
+ max_segment_size =
+ vdev->map->max_mapping_size(vdev->vmap);
+ } else
+ max_segment_size =
+ dma_max_mapping_size(vdev->dev.parent);
+ }
return max_segment_size;
}
EXPORT_SYMBOL_GPL(virtio_max_dma_size);
static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag,
- struct device *dma_dev)
+ dma_addr_t *map_handle, gfp_t flag,
+ union virtio_map map)
{
- if (vring_use_dma_api(vdev)) {
- return dma_alloc_coherent(dma_dev, size,
- dma_handle, flag);
+ if (vring_use_map_api(vdev)) {
+ return virtqueue_map_alloc_coherent(vdev, map, size,
+ map_handle, flag);
} else {
void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
if (queue) {
phys_addr_t phys_addr = virt_to_phys(queue);
- *dma_handle = (dma_addr_t)phys_addr;
+ *map_handle = (dma_addr_t)phys_addr;
/*
* Sanity check: make sure we dind't truncate
@@ -330,7 +335,7 @@ static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
* warning and abort if we end up with an
* unrepresentable address.
*/
- if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
+ if (WARN_ON_ONCE(*map_handle != phys_addr)) {
free_pages_exact(queue, PAGE_ALIGN(size));
return NULL;
}
@@ -340,11 +345,12 @@ static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
}
static void vring_free_queue(struct virtio_device *vdev, size_t size,
- void *queue, dma_addr_t dma_handle,
- struct device *dma_dev)
+ void *queue, dma_addr_t map_handle,
+ union virtio_map map)
{
- if (vring_use_dma_api(vdev))
- dma_free_coherent(dma_dev, size, queue, dma_handle);
+ if (vring_use_map_api(vdev))
+ virtqueue_map_free_coherent(vdev, map, size,
+ queue, map_handle);
else
free_pages_exact(queue, PAGE_ALIGN(size));
}
@@ -356,7 +362,21 @@ static void vring_free_queue(struct virtio_device *vdev, size_t size,
*/
static struct device *vring_dma_dev(const struct vring_virtqueue *vq)
{
- return vq->dma_dev;
+ return vq->map.dma_dev;
+}
+
+static int vring_mapping_error(const struct vring_virtqueue *vq,
+ dma_addr_t addr)
+{
+ struct virtio_device *vdev = vq->vq.vdev;
+
+ if (!vq->use_map_api)
+ return 0;
+
+ if (vdev->map)
+ return vdev->map->mapping_error(vq->map, addr);
+ else
+ return dma_mapping_error(vring_dma_dev(vq), addr);
}
/* Map one sg entry. */
@@ -372,13 +392,13 @@ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist
*len = sg->length;
- if (!vq->use_dma_api) {
+ if (!vq->use_map_api) {
/*
* If DMA is not used, KMSAN doesn't know that the scatterlist
* is initialized by the hardware. Explicitly check/unpoison it
* depending on the direction.
*/
- kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
+ kmsan_handle_dma(sg_phys(sg), sg->length, direction);
*addr = (dma_addr_t)sg_phys(sg);
return 0;
}
@@ -388,11 +408,11 @@ static int vring_map_one_sg(const struct vring_virtqueue *vq, struct scatterlist
* the way it expects (we don't guarantee that the scatterlist
* will exist for the lifetime of the mapping).
*/
- *addr = dma_map_page(vring_dma_dev(vq),
- sg_page(sg), sg->offset, sg->length,
- direction);
+ *addr = virtqueue_map_page_attrs(&vq->vq, sg_page(sg),
+ sg->offset, sg->length,
+ direction, 0);
- if (dma_mapping_error(vring_dma_dev(vq), *addr))
+ if (vring_mapping_error(vq, *addr))
return -ENOMEM;
return 0;
@@ -402,20 +422,11 @@ static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
void *cpu_addr, size_t size,
enum dma_data_direction direction)
{
- if (!vq->use_dma_api)
+ if (!vq->use_map_api)
return (dma_addr_t)virt_to_phys(cpu_addr);
- return dma_map_single(vring_dma_dev(vq),
- cpu_addr, size, direction);
-}
-
-static int vring_mapping_error(const struct vring_virtqueue *vq,
- dma_addr_t addr)
-{
- if (!vq->use_dma_api)
- return 0;
-
- return dma_mapping_error(vring_dma_dev(vq), addr);
+ return virtqueue_map_single_attrs(&vq->vq, cpu_addr,
+ size, direction, 0);
}
static void virtqueue_init(struct vring_virtqueue *vq, u32 num)
@@ -449,24 +460,17 @@ static unsigned int vring_unmap_one_split(const struct vring_virtqueue *vq,
flags = extra->flags;
if (flags & VRING_DESC_F_INDIRECT) {
- if (!vq->use_dma_api)
- goto out;
-
- dma_unmap_single(vring_dma_dev(vq),
- extra->addr,
- extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- } else {
- if (!vring_need_unmap_buffer(vq, extra))
+ if (!vq->use_map_api)
goto out;
+ } else if (!vring_need_unmap_buffer(vq, extra))
+ goto out;
- dma_unmap_page(vring_dma_dev(vq),
- extra->addr,
- extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- }
+ virtqueue_unmap_page_attrs(&vq->vq,
+ extra->addr,
+ extra->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE,
+ 0);
out:
return extra->next;
@@ -790,7 +794,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
extra = (struct vring_desc_extra *)&indir_desc[num];
- if (vq->use_dma_api) {
+ if (vq->use_map_api) {
for (j = 0; j < num; j++)
vring_unmap_one_split(vq, &extra[j]);
}
@@ -1064,12 +1068,13 @@ err_state:
}
static void vring_free_split(struct vring_virtqueue_split *vring_split,
- struct virtio_device *vdev, struct device *dma_dev)
+ struct virtio_device *vdev,
+ union virtio_map map)
{
vring_free_queue(vdev, vring_split->queue_size_in_bytes,
vring_split->vring.desc,
vring_split->queue_dma_addr,
- dma_dev);
+ map);
kfree(vring_split->desc_state);
kfree(vring_split->desc_extra);
@@ -1080,7 +1085,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
u32 num,
unsigned int vring_align,
bool may_reduce_num,
- struct device *dma_dev)
+ union virtio_map map)
{
void *queue = NULL;
dma_addr_t dma_addr;
@@ -1096,7 +1101,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
&dma_addr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
- dma_dev);
+ map);
if (queue)
break;
if (!may_reduce_num)
@@ -1110,7 +1115,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
/* Try to get a single page. You are my only hope! */
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
&dma_addr, GFP_KERNEL | __GFP_ZERO,
- dma_dev);
+ map);
}
if (!queue)
return -ENOMEM;
@@ -1134,7 +1139,7 @@ static struct virtqueue *__vring_new_virtqueue_split(unsigned int index,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
- struct device *dma_dev)
+ union virtio_map map)
{
struct vring_virtqueue *vq;
int err;
@@ -1157,8 +1162,8 @@ static struct virtqueue *__vring_new_virtqueue_split(unsigned int index,
#else
vq->broken = false;
#endif
- vq->dma_dev = dma_dev;
- vq->use_dma_api = vring_use_dma_api(vdev);
+ vq->map = map;
+ vq->use_map_api = vring_use_map_api(vdev);
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -1195,21 +1200,21 @@ static struct virtqueue *vring_create_virtqueue_split(
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
- struct device *dma_dev)
+ union virtio_map map)
{
struct vring_virtqueue_split vring_split = {};
struct virtqueue *vq;
int err;
err = vring_alloc_queue_split(&vring_split, vdev, num, vring_align,
- may_reduce_num, dma_dev);
+ may_reduce_num, map);
if (err)
return NULL;
vq = __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers,
- context, notify, callback, name, dma_dev);
+ context, notify, callback, name, map);
if (!vq) {
- vring_free_split(&vring_split, vdev, dma_dev);
+ vring_free_split(&vring_split, vdev, map);
return NULL;
}
@@ -1228,7 +1233,7 @@ static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
err = vring_alloc_queue_split(&vring_split, vdev, num,
vq->split.vring_align,
vq->split.may_reduce_num,
- vring_dma_dev(vq));
+ vq->map);
if (err)
goto err;
@@ -1246,7 +1251,7 @@ static int virtqueue_resize_split(struct virtqueue *_vq, u32 num)
return 0;
err_state_extra:
- vring_free_split(&vring_split, vdev, vring_dma_dev(vq));
+ vring_free_split(&vring_split, vdev, vq->map);
err:
virtqueue_reinit_split(vq);
return -ENOMEM;
@@ -1274,22 +1279,16 @@ static void vring_unmap_extra_packed(const struct vring_virtqueue *vq,
flags = extra->flags;
if (flags & VRING_DESC_F_INDIRECT) {
- if (!vq->use_dma_api)
- return;
-
- dma_unmap_single(vring_dma_dev(vq),
- extra->addr, extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- } else {
- if (!vring_need_unmap_buffer(vq, extra))
+ if (!vq->use_map_api)
return;
+ } else if (!vring_need_unmap_buffer(vq, extra))
+ return;
- dma_unmap_page(vring_dma_dev(vq),
- extra->addr, extra->len,
- (flags & VRING_DESC_F_WRITE) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
- }
+ virtqueue_unmap_page_attrs(&vq->vq,
+ extra->addr, extra->len,
+ (flags & VRING_DESC_F_WRITE) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE,
+ 0);
}
static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
@@ -1366,7 +1365,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
desc[i].addr = cpu_to_le64(addr);
desc[i].len = cpu_to_le32(len);
- if (unlikely(vq->use_dma_api)) {
+ if (unlikely(vq->use_map_api)) {
extra[i].addr = premapped ? DMA_MAPPING_ERROR : addr;
extra[i].len = len;
extra[i].flags = n < out_sgs ? 0 : VRING_DESC_F_WRITE;
@@ -1388,7 +1387,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
sizeof(struct vring_packed_desc));
vq->packed.vring.desc[head].id = cpu_to_le16(id);
- if (vq->use_dma_api) {
+ if (vq->use_map_api) {
vq->packed.desc_extra[id].addr = addr;
vq->packed.desc_extra[id].len = total_sg *
sizeof(struct vring_packed_desc);
@@ -1530,7 +1529,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
desc[i].len = cpu_to_le32(len);
desc[i].id = cpu_to_le16(id);
- if (unlikely(vq->use_dma_api)) {
+ if (unlikely(vq->use_map_api)) {
vq->packed.desc_extra[curr].addr = premapped ?
DMA_MAPPING_ERROR : addr;
vq->packed.desc_extra[curr].len = len;
@@ -1665,7 +1664,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
vq->free_head = id;
vq->vq.num_free += state->num;
- if (unlikely(vq->use_dma_api)) {
+ if (unlikely(vq->use_map_api)) {
curr = id;
for (i = 0; i < state->num; i++) {
vring_unmap_extra_packed(vq,
@@ -1683,7 +1682,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
if (!desc)
return;
- if (vq->use_dma_api) {
+ if (vq->use_map_api) {
len = vq->packed.desc_extra[id].len;
num = len / sizeof(struct vring_packed_desc);
@@ -1962,25 +1961,25 @@ static struct vring_desc_extra *vring_alloc_desc_extra(unsigned int num)
static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
struct virtio_device *vdev,
- struct device *dma_dev)
+ union virtio_map map)
{
if (vring_packed->vring.desc)
vring_free_queue(vdev, vring_packed->ring_size_in_bytes,
vring_packed->vring.desc,
vring_packed->ring_dma_addr,
- dma_dev);
+ map);
if (vring_packed->vring.driver)
vring_free_queue(vdev, vring_packed->event_size_in_bytes,
vring_packed->vring.driver,
vring_packed->driver_event_dma_addr,
- dma_dev);
+ map);
if (vring_packed->vring.device)
vring_free_queue(vdev, vring_packed->event_size_in_bytes,
vring_packed->vring.device,
vring_packed->device_event_dma_addr,
- dma_dev);
+ map);
kfree(vring_packed->desc_state);
kfree(vring_packed->desc_extra);
@@ -1988,7 +1987,7 @@ static void vring_free_packed(struct vring_virtqueue_packed *vring_packed,
static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
struct virtio_device *vdev,
- u32 num, struct device *dma_dev)
+ u32 num, union virtio_map map)
{
struct vring_packed_desc *ring;
struct vring_packed_desc_event *driver, *device;
@@ -2000,7 +1999,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
ring = vring_alloc_queue(vdev, ring_size_in_bytes,
&ring_dma_addr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
- dma_dev);
+ map);
if (!ring)
goto err;
@@ -2013,7 +2012,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
driver = vring_alloc_queue(vdev, event_size_in_bytes,
&driver_event_dma_addr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
- dma_dev);
+ map);
if (!driver)
goto err;
@@ -2024,7 +2023,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
device = vring_alloc_queue(vdev, event_size_in_bytes,
&device_event_dma_addr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
- dma_dev);
+ map);
if (!device)
goto err;
@@ -2036,7 +2035,7 @@ static int vring_alloc_queue_packed(struct vring_virtqueue_packed *vring_packed,
return 0;
err:
- vring_free_packed(vring_packed, vdev, dma_dev);
+ vring_free_packed(vring_packed, vdev, map);
return -ENOMEM;
}
@@ -2112,7 +2111,7 @@ static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
- struct device *dma_dev)
+ union virtio_map map)
{
struct vring_virtqueue *vq;
int err;
@@ -2135,8 +2134,8 @@ static struct virtqueue *__vring_new_virtqueue_packed(unsigned int index,
vq->broken = false;
#endif
vq->packed_ring = true;
- vq->dma_dev = dma_dev;
- vq->use_dma_api = vring_use_dma_api(vdev);
+ vq->map = map;
+ vq->use_map_api = vring_use_map_api(vdev);
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
!context;
@@ -2173,18 +2172,18 @@ static struct virtqueue *vring_create_virtqueue_packed(
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
- struct device *dma_dev)
+ union virtio_map map)
{
struct vring_virtqueue_packed vring_packed = {};
struct virtqueue *vq;
- if (vring_alloc_queue_packed(&vring_packed, vdev, num, dma_dev))
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num, map))
return NULL;
vq = __vring_new_virtqueue_packed(index, &vring_packed, vdev, weak_barriers,
- context, notify, callback, name, dma_dev);
+ context, notify, callback, name, map);
if (!vq) {
- vring_free_packed(&vring_packed, vdev, dma_dev);
+ vring_free_packed(&vring_packed, vdev, map);
return NULL;
}
@@ -2200,7 +2199,7 @@ static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
struct virtio_device *vdev = _vq->vdev;
int err;
- if (vring_alloc_queue_packed(&vring_packed, vdev, num, vring_dma_dev(vq)))
+ if (vring_alloc_queue_packed(&vring_packed, vdev, num, vq->map))
goto err_ring;
err = vring_alloc_state_extra_packed(&vring_packed);
@@ -2217,7 +2216,7 @@ static int virtqueue_resize_packed(struct virtqueue *_vq, u32 num)
return 0;
err_state_extra:
- vring_free_packed(&vring_packed, vdev, vring_dma_dev(vq));
+ vring_free_packed(&vring_packed, vdev, vq->map);
err_ring:
virtqueue_reinit_packed(vq);
return -ENOMEM;
@@ -2448,8 +2447,8 @@ struct device *virtqueue_dma_dev(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
- if (vq->use_dma_api)
- return vring_dma_dev(vq);
+ if (vq->use_map_api && !_vq->vdev->map)
+ return vq->map.dma_dev;
else
return NULL;
}
@@ -2734,19 +2733,20 @@ struct virtqueue *vring_create_virtqueue(
void (*callback)(struct virtqueue *),
const char *name)
{
+ union virtio_map map = {.dma_dev = vdev->dev.parent};
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return vring_create_virtqueue_packed(index, num, vring_align,
vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name, vdev->dev.parent);
+ context, notify, callback, name, map);
return vring_create_virtqueue_split(index, num, vring_align,
vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name, vdev->dev.parent);
+ context, notify, callback, name, map);
}
EXPORT_SYMBOL_GPL(vring_create_virtqueue);
-struct virtqueue *vring_create_virtqueue_dma(
+struct virtqueue *vring_create_virtqueue_map(
unsigned int index,
unsigned int num,
unsigned int vring_align,
@@ -2757,19 +2757,19 @@ struct virtqueue *vring_create_virtqueue_dma(
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name,
- struct device *dma_dev)
+ union virtio_map map)
{
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
return vring_create_virtqueue_packed(index, num, vring_align,
vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name, dma_dev);
+ context, notify, callback, name, map);
return vring_create_virtqueue_split(index, num, vring_align,
vdev, weak_barriers, may_reduce_num,
- context, notify, callback, name, dma_dev);
+ context, notify, callback, name, map);
}
-EXPORT_SYMBOL_GPL(vring_create_virtqueue_dma);
+EXPORT_SYMBOL_GPL(vring_create_virtqueue_map);
/**
* virtqueue_resize - resize the vring of vq
@@ -2880,6 +2880,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
const char *name)
{
struct vring_virtqueue_split vring_split = {};
+ union virtio_map map = {.dma_dev = vdev->dev.parent};
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
struct vring_virtqueue_packed vring_packed = {};
@@ -2889,13 +2890,13 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
return __vring_new_virtqueue_packed(index, &vring_packed,
vdev, weak_barriers,
context, notify, callback,
- name, vdev->dev.parent);
+ name, map);
}
vring_init(&vring_split.vring, num, pages, vring_align);
return __vring_new_virtqueue_split(index, &vring_split, vdev, weak_barriers,
context, notify, callback, name,
- vdev->dev.parent);
+ map);
}
EXPORT_SYMBOL_GPL(vring_new_virtqueue);
@@ -2909,19 +2910,19 @@ static void vring_free(struct virtqueue *_vq)
vq->packed.ring_size_in_bytes,
vq->packed.vring.desc,
vq->packed.ring_dma_addr,
- vring_dma_dev(vq));
+ vq->map);
vring_free_queue(vq->vq.vdev,
vq->packed.event_size_in_bytes,
vq->packed.vring.driver,
vq->packed.driver_event_dma_addr,
- vring_dma_dev(vq));
+ vq->map);
vring_free_queue(vq->vq.vdev,
vq->packed.event_size_in_bytes,
vq->packed.vring.device,
vq->packed.device_event_dma_addr,
- vring_dma_dev(vq));
+ vq->map);
kfree(vq->packed.desc_state);
kfree(vq->packed.desc_extra);
@@ -2930,7 +2931,7 @@ static void vring_free(struct virtqueue *_vq)
vq->split.queue_size_in_bytes,
vq->split.vring.desc,
vq->split.queue_dma_addr,
- vring_dma_dev(vq));
+ vq->map);
}
}
if (!vq->packed_ring) {
@@ -3137,7 +3138,108 @@ const struct vring *virtqueue_get_vring(const struct virtqueue *vq)
EXPORT_SYMBOL_GPL(virtqueue_get_vring);
/**
- * virtqueue_dma_map_single_attrs - map DMA for _vq
+ * virtqueue_map_alloc_coherent - alloc coherent mapping
+ * @vdev: the virtio device we are talking to
+ * @map: metadata for performing mapping
+ * @size: the size of the buffer
+ * @map_handle: the pointer to the mapped address
+ * @gfp: allocation flag (GFP_XXX)
+ *
+ * return virtual address or NULL on error
+ */
+void *virtqueue_map_alloc_coherent(struct virtio_device *vdev,
+ union virtio_map map,
+ size_t size, dma_addr_t *map_handle,
+ gfp_t gfp)
+{
+ if (vdev->map)
+ return vdev->map->alloc(map, size,
+ map_handle, gfp);
+ else
+ return dma_alloc_coherent(map.dma_dev, size,
+ map_handle, gfp);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_alloc_coherent);
+
+/**
+ * virtqueue_map_free_coherent - free coherent mapping
+ * @vdev: the virtio device we are talking to
+ * @map: metadata for performing mapping
+ * @size: the size of the buffer
+ * @map_handle: the mapped address that needs to be freed
+ *
+ */
+void virtqueue_map_free_coherent(struct virtio_device *vdev,
+ union virtio_map map, size_t size, void *vaddr,
+ dma_addr_t map_handle)
+{
+ if (vdev->map)
+ vdev->map->free(map, size, vaddr,
+ map_handle, 0);
+ else
+ dma_free_coherent(map.dma_dev, size, vaddr, map_handle);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_free_coherent);
+
+/**
+ * virtqueue_map_page_attrs - map a page to the device
+ * @_vq: the virtqueue we are talking to
+ * @page: the page that will be mapped by the device
+ * @offset: the offset in the page for a buffer
+ * @size: the buffer size
+ * @dir: mapping direction
+ * @attrs: mapping attributes
+ *
+ * Returns mapped address. Caller should check that by virtqueue_mapping_error().
+ */
+dma_addr_t virtqueue_map_page_attrs(const struct virtqueue *_vq,
+ struct page *page,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+
+ if (vdev->map)
+ return vdev->map->map_page(vq->map,
+ page, offset, size,
+ dir, attrs);
+
+ return dma_map_page_attrs(vring_dma_dev(vq),
+ page, offset, size,
+ dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_map_page_attrs);
+
+/**
+ * virtqueue_unmap_page_attrs - map a page to the device
+ * @_vq: the virtqueue we are talking to
+ * @map_handle: the mapped address
+ * @size: the buffer size
+ * @dir: mapping direction
+ * @attrs: unmapping attributes
+ */
+void virtqueue_unmap_page_attrs(const struct virtqueue *_vq,
+ dma_addr_t map_handle,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
+
+ if (vdev->map)
+ vdev->map->unmap_page(vq->map,
+ map_handle, size, dir, attrs);
+ else
+ dma_unmap_page_attrs(vring_dma_dev(vq), map_handle,
+ size, dir, attrs);
+}
+EXPORT_SYMBOL_GPL(virtqueue_unmap_page_attrs);
+
+/**
+ * virtqueue_map_single_attrs - map DMA for _vq
* @_vq: the struct virtqueue we're talking about.
* @ptr: the pointer of the buffer to do dma
* @size: the size of the buffer to do dma
@@ -3147,139 +3249,158 @@ EXPORT_SYMBOL_GPL(virtqueue_get_vring);
* The caller calls this to do dma mapping in advance. The DMA address can be
* passed to this _vq when it is in pre-mapped mode.
*
- * return DMA address. Caller should check that by virtqueue_dma_mapping_error().
+ * return mapped address. Caller should check that by virtqueue_mapping_error().
*/
-dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr,
- size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+dma_addr_t virtqueue_map_single_attrs(const struct virtqueue *_vq, void *ptr,
+ size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
- if (!vq->use_dma_api) {
- kmsan_handle_dma(virt_to_page(ptr), offset_in_page(ptr), size, dir);
+ if (!vq->use_map_api) {
+ kmsan_handle_dma(virt_to_phys(ptr), size, dir);
return (dma_addr_t)virt_to_phys(ptr);
}
- return dma_map_single_attrs(vring_dma_dev(vq), ptr, size, dir, attrs);
+ /* DMA must never operate on areas that might be remapped. */
+ if (dev_WARN_ONCE(&_vq->vdev->dev, is_vmalloc_addr(ptr),
+ "rejecting DMA map of vmalloc memory\n"))
+ return DMA_MAPPING_ERROR;
+
+ return virtqueue_map_page_attrs(&vq->vq, virt_to_page(ptr),
+ offset_in_page(ptr), size, dir, attrs);
}
-EXPORT_SYMBOL_GPL(virtqueue_dma_map_single_attrs);
+EXPORT_SYMBOL_GPL(virtqueue_map_single_attrs);
/**
- * virtqueue_dma_unmap_single_attrs - unmap DMA for _vq
+ * virtqueue_unmap_single_attrs - unmap map for _vq
* @_vq: the struct virtqueue we're talking about.
* @addr: the dma address to unmap
* @size: the size of the buffer
* @dir: DMA direction
* @attrs: DMA Attrs
*
- * Unmap the address that is mapped by the virtqueue_dma_map_* APIs.
+ * Unmap the address that is mapped by the virtqueue_map_* APIs.
*
*/
-void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+void virtqueue_unmap_single_attrs(const struct virtqueue *_vq,
+ dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
- if (!vq->use_dma_api)
+ if (!vq->use_map_api)
return;
- dma_unmap_single_attrs(vring_dma_dev(vq), addr, size, dir, attrs);
+ virtqueue_unmap_page_attrs(_vq, addr, size, dir, attrs);
}
-EXPORT_SYMBOL_GPL(virtqueue_dma_unmap_single_attrs);
+EXPORT_SYMBOL_GPL(virtqueue_unmap_single_attrs);
/**
- * virtqueue_dma_mapping_error - check dma address
+ * virtqueue_mapping_error - check dma address
* @_vq: the struct virtqueue we're talking about.
* @addr: DMA address
*
* Returns 0 means dma valid. Other means invalid dma address.
*/
-int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr)
+int virtqueue_map_mapping_error(const struct virtqueue *_vq, dma_addr_t addr)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
-
- if (!vq->use_dma_api)
- return 0;
+ const struct vring_virtqueue *vq = to_vvq(_vq);
- return dma_mapping_error(vring_dma_dev(vq), addr);
+ return vring_mapping_error(vq, addr);
}
-EXPORT_SYMBOL_GPL(virtqueue_dma_mapping_error);
+EXPORT_SYMBOL_GPL(virtqueue_map_mapping_error);
/**
- * virtqueue_dma_need_sync - check a dma address needs sync
+ * virtqueue_map_need_sync - check a dma address needs sync
* @_vq: the struct virtqueue we're talking about.
* @addr: DMA address
*
- * Check if the dma address mapped by the virtqueue_dma_map_* APIs needs to be
+ * Check if the dma address mapped by the virtqueue_map_* APIs needs to be
* synchronized
*
* return bool
*/
-bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr)
+bool virtqueue_map_need_sync(const struct virtqueue *_vq, dma_addr_t addr)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
- if (!vq->use_dma_api)
+ if (!vq->use_map_api)
return false;
- return dma_need_sync(vring_dma_dev(vq), addr);
+ if (vdev->map)
+ return vdev->map->need_sync(vq->map, addr);
+ else
+ return dma_need_sync(vring_dma_dev(vq), addr);
}
-EXPORT_SYMBOL_GPL(virtqueue_dma_need_sync);
+EXPORT_SYMBOL_GPL(virtqueue_map_need_sync);
/**
- * virtqueue_dma_sync_single_range_for_cpu - dma sync for cpu
+ * virtqueue_map_sync_single_range_for_cpu - map sync for cpu
* @_vq: the struct virtqueue we're talking about.
* @addr: DMA address
* @offset: DMA address offset
* @size: buf size for sync
* @dir: DMA direction
*
- * Before calling this function, use virtqueue_dma_need_sync() to confirm that
+ * Before calling this function, use virtqueue_map_need_sync() to confirm that
* the DMA address really needs to be synchronized
*
*/
-void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq,
+void virtqueue_map_sync_single_range_for_cpu(const struct virtqueue *_vq,
dma_addr_t addr,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
- struct device *dev = vring_dma_dev(vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
- if (!vq->use_dma_api)
+ if (!vq->use_map_api)
return;
- dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+ if (vdev->map)
+ vdev->map->sync_single_for_cpu(vq->map,
+ addr + offset, size, dir);
+ else
+ dma_sync_single_range_for_cpu(vring_dma_dev(vq),
+ addr, offset, size, dir);
}
-EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_cpu);
+EXPORT_SYMBOL_GPL(virtqueue_map_sync_single_range_for_cpu);
/**
- * virtqueue_dma_sync_single_range_for_device - dma sync for device
+ * virtqueue_map_sync_single_range_for_device - map sync for device
* @_vq: the struct virtqueue we're talking about.
* @addr: DMA address
* @offset: DMA address offset
* @size: buf size for sync
* @dir: DMA direction
*
- * Before calling this function, use virtqueue_dma_need_sync() to confirm that
+ * Before calling this function, use virtqueue_map_need_sync() to confirm that
* the DMA address really needs to be synchronized
*/
-void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq,
+void virtqueue_map_sync_single_range_for_device(const struct virtqueue *_vq,
dma_addr_t addr,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
- struct vring_virtqueue *vq = to_vvq(_vq);
- struct device *dev = vring_dma_dev(vq);
+ const struct vring_virtqueue *vq = to_vvq(_vq);
+ struct virtio_device *vdev = _vq->vdev;
- if (!vq->use_dma_api)
+ if (!vq->use_map_api)
return;
- dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+ if (vdev->map)
+ vdev->map->sync_single_for_device(vq->map,
+ addr + offset,
+ size, dir);
+ else
+ dma_sync_single_range_for_device(vring_dma_dev(vq), addr,
+ offset, size, dir);
}
-EXPORT_SYMBOL_GPL(virtqueue_dma_sync_single_range_for_device);
+EXPORT_SYMBOL_GPL(virtqueue_map_sync_single_range_for_device);
MODULE_DESCRIPTION("Virtio ring implementation");
MODULE_LICENSE("GPL");