summaryrefslogtreecommitdiff
path: root/drivers/firewire
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/firewire')
-rw-r--r--drivers/firewire/core-cdev.c5
-rw-r--r--drivers/firewire/core-iso.c48
-rw-r--r--drivers/firewire/ohci.c24
3 files changed, 38 insertions, 39 deletions
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 2b8a878c8aae..bb4d0f938f5b 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -67,7 +67,6 @@ struct client {
u64 iso_closure;
struct fw_iso_buffer buffer;
unsigned long vm_start;
- bool buffer_is_mapped;
struct list_head phy_receiver_link;
u64 phy_receiver_closure;
@@ -1098,7 +1097,7 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
}
// The DMA mapping operation is available if the buffer is already allocated by
// mmap(2) system call. If not, it is delegated to the system call.
- if (!client->buffer_is_mapped) {
+ if (client->buffer.pages && !client->buffer.dma_addrs) {
ret = fw_iso_buffer_map_dma(&client->buffer, client->device->card,
iso_dma_direction(context));
if (ret < 0) {
@@ -1106,7 +1105,6 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
return ret;
}
- client->buffer_is_mapped = true;
}
client->iso_closure = a->closure;
client->iso_context = context;
@@ -1837,7 +1835,6 @@ static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
iso_dma_direction(client->iso_context));
if (ret < 0)
goto fail;
- client->buffer_is_mapped = true;
}
}
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index f2e35ac7a476..3f36243ec0c1 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -55,25 +55,32 @@ int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count)
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction)
{
- dma_addr_t address;
+ dma_addr_t *dma_addrs __free(kfree) = kcalloc(buffer->page_count, sizeof(dma_addrs[0]),
+ GFP_KERNEL);
int i;
- buffer->direction = direction;
+ if (!dma_addrs)
+ return -ENOMEM;
// Retrieve DMA mapping addresses for the pages. They are not contiguous. Maintain the cache
// coherency for the pages by hand.
for (i = 0; i < buffer->page_count; i++) {
// The dma_map_phys() with a physical address per page is available here, instead.
- address = dma_map_page(card->device, buffer->pages[i],
- 0, PAGE_SIZE, direction);
- if (dma_mapping_error(card->device, address))
+ dma_addr_t dma_addr = dma_map_page(card->device, buffer->pages[i], 0, PAGE_SIZE,
+ direction);
+ if (dma_mapping_error(card->device, dma_addr))
break;
- set_page_private(buffer->pages[i], address);
+ dma_addrs[i] = dma_addr;
}
- buffer->page_count_mapped = i;
- if (i < buffer->page_count)
+ if (i < buffer->page_count) {
+ while (i-- > 0)
+ dma_unmap_page(card->device, dma_addrs[i], PAGE_SIZE, buffer->direction);
return -ENOMEM;
+ }
+
+ buffer->direction = direction;
+ buffer->dma_addrs = no_free_ptr(dma_addrs);
return 0;
}
@@ -98,13 +105,13 @@ EXPORT_SYMBOL(fw_iso_buffer_init);
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
struct fw_card *card)
{
- int i;
- dma_addr_t address;
-
- for (i = 0; i < buffer->page_count_mapped; i++) {
- address = page_private(buffer->pages[i]);
- dma_unmap_page(card->device, address,
- PAGE_SIZE, buffer->direction);
+ if (buffer->dma_addrs) {
+ for (int i = 0; i < buffer->page_count; ++i) {
+ dma_addr_t dma_addr = buffer->dma_addrs[i];
+ dma_unmap_page(card->device, dma_addr, PAGE_SIZE, buffer->direction);
+ }
+ kfree(buffer->dma_addrs);
+ buffer->dma_addrs = NULL;
}
if (buffer->pages) {
@@ -114,20 +121,15 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
}
buffer->page_count = 0;
- buffer->page_count_mapped = 0;
}
EXPORT_SYMBOL(fw_iso_buffer_destroy);
/* Convert DMA address to offset into virtually contiguous buffer. */
size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
{
- size_t i;
- dma_addr_t address;
- ssize_t offset;
-
- for (i = 0; i < buffer->page_count; i++) {
- address = page_private(buffer->pages[i]);
- offset = (ssize_t)completed - (ssize_t)address;
+ for (int i = 0; i < buffer->page_count; i++) {
+ dma_addr_t dma_addr = buffer->dma_addrs[i];
+ ssize_t offset = (ssize_t)completed - (ssize_t)dma_addr;
if (offset > 0 && offset <= PAGE_SIZE)
return (i << PAGE_SHIFT) + offset;
}
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index e3e78dc42530..68a336577d36 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -3184,7 +3184,7 @@ static int queue_iso_transmit(struct iso_context *ctx,
struct descriptor *d, *last, *pd;
struct fw_iso_packet *p;
__le32 *header;
- dma_addr_t d_bus, page_bus;
+ dma_addr_t d_bus;
u32 z, header_z, payload_z, irq;
u32 payload_index, payload_end_index, next_page_index;
int page, end_page, i, length, offset;
@@ -3254,11 +3254,11 @@ static int queue_iso_transmit(struct iso_context *ctx,
min(next_page_index, payload_end_index) - payload_index;
pd[i].req_count = cpu_to_le16(length);
- page_bus = page_private(buffer->pages[page]);
- pd[i].data_address = cpu_to_le32(page_bus + offset);
+ dma_addr_t dma_addr = buffer->dma_addrs[i];
+ pd[i].data_address = cpu_to_le32(dma_addr + offset);
dma_sync_single_range_for_device(ctx->context.ohci->card.device,
- page_bus, offset, length,
+ dma_addr, offset, length,
DMA_TO_DEVICE);
payload_index += length;
@@ -3287,7 +3287,7 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
{
struct device *device = ctx->context.ohci->card.device;
struct descriptor *d, *pd;
- dma_addr_t d_bus, page_bus;
+ dma_addr_t d_bus;
u32 z, header_z, rest;
int i, j, length;
int page, offset, packet_count, header_size, payload_per_buffer;
@@ -3337,10 +3337,10 @@ static int queue_iso_packet_per_buffer(struct iso_context *ctx,
pd->res_count = pd->req_count;
pd->transfer_status = 0;
- page_bus = page_private(buffer->pages[page]);
- pd->data_address = cpu_to_le32(page_bus + offset);
+ dma_addr_t dma_addr = buffer->dma_addrs[page];
+ pd->data_address = cpu_to_le32(dma_addr + offset);
- dma_sync_single_range_for_device(device, page_bus,
+ dma_sync_single_range_for_device(device, dma_addr,
offset, length,
DMA_FROM_DEVICE);
@@ -3367,7 +3367,7 @@ static int queue_iso_buffer_fill(struct iso_context *ctx,
unsigned long payload)
{
struct descriptor *d;
- dma_addr_t d_bus, page_bus;
+ dma_addr_t d_bus;
int page, offset, rest, z, i, length;
page = payload >> PAGE_SHIFT;
@@ -3400,11 +3400,11 @@ static int queue_iso_buffer_fill(struct iso_context *ctx,
d->res_count = d->req_count;
d->transfer_status = 0;
- page_bus = page_private(buffer->pages[page]);
- d->data_address = cpu_to_le32(page_bus + offset);
+ dma_addr_t dma_addr = buffer->dma_addrs[page];
+ d->data_address = cpu_to_le32(dma_addr + offset);
dma_sync_single_range_for_device(ctx->context.ohci->card.device,
- page_bus, offset, length,
+ dma_addr, offset, length,
DMA_FROM_DEVICE);
rest -= length;