diff options
Diffstat (limited to 'drivers/platform/raspberrypi/vchiq-interface')
| -rw-r--r-- | drivers/platform/raspberrypi/vchiq-interface/TESTING | 125 | ||||
| -rw-r--r-- | drivers/platform/raspberrypi/vchiq-interface/TODO | 4 | ||||
| -rw-r--r-- | drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c | 1477 | ||||
| -rw-r--r-- | drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c | 112 | ||||
| -rw-r--r-- | drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c | 4013 | ||||
| -rw-r--r-- | drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c | 157 | ||||
| -rw-r--r-- | drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c | 1355 | ||||
| -rw-r--r-- | drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h | 112 |
8 files changed, 7355 insertions, 0 deletions
diff --git a/drivers/platform/raspberrypi/vchiq-interface/TESTING b/drivers/platform/raspberrypi/vchiq-interface/TESTING new file mode 100644 index 000000000000..c98f688b07e0 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/TESTING @@ -0,0 +1,125 @@ +This document contains some hints to test the function of the VCHIQ driver +without having additional hardware to the Raspberry Pi. + +* Requirements & limitations + +Testing the VCHIQ driver requires a Raspberry Pi with one of the following SoC: + - BCM2835 ( e.g. Raspberry Pi Zero W ) + - BCM2836 ( e.g. Raspberry Pi 2 ) + - BCM2837 ( e.g. Raspberry Pi 3 B+ ) + +The BCM2711 used in the Raspberry Pi 4 is currently not supported in the +mainline kernel. + +There are no specific requirements to the VideoCore firmware to get VCHIQ +working. + +The test scenarios described in this document based on the tool vchiq_test. +Its source code is available here: https://github.com/raspberrypi/userland + +* Configuration + +Here are the most common kernel configurations: + + 1. BCM2835 target SoC (ARM 32 bit) + + Just use bcm2835_defconfig which already has VCHIQ enabled. + + 2. BCM2836/7 target SoC (ARM 32 bit) + + Use the multi_v7_defconfig as a base and then enable all VCHIQ options. + + 3. BCM2837 target SoC (ARM 64 bit) + + Use the defconfig which has most of the VCHIQ options enabled. + +* Scenarios + + * Initial test + + Check the driver is probed and /dev/vchiq is created + + * Functional test + + Command: vchiq_test -f 10 + + Expected output: + Functional test - iters:10 + ======== iteration 1 ======== + Testing bulk transfer for alignment. + Testing bulk transfer at PAGE_SIZE. + ... + + * Ping test + + Command: vchiq_test -p + + Expected output: + Ping test - service:echo, iters:1000, version 3 + vchi ping (size 0) -> 57.000000us + vchi ping (size 0, 0 async, 0 oneway) -> 122.000000us + vchi bulk (size 0, 0 async, 0 oneway) -> 546.000000us + vchi bulk (size 0, 0 oneway) -> 230.000000us + vchi ping (size 0) -> 49.000000us + vchi ping (size 0, 0 async, 0 oneway) -> 70.000000us + vchi bulk (size 0, 0 async, 0 oneway) -> 296.000000us + vchi bulk (size 0, 0 oneway) -> 266.000000us + vchi ping (size 0, 1 async, 0 oneway) -> 65.000000us + vchi bulk (size 0, 0 oneway) -> 456.000000us + vchi ping (size 0, 2 async, 0 oneway) -> 74.000000us + vchi bulk (size 0, 0 oneway) -> 640.000000us + vchi ping (size 0, 10 async, 0 oneway) -> 125.000000us + vchi bulk (size 0, 0 oneway) -> 2309.000000us + vchi ping (size 0, 0 async, 1 oneway) -> 70.000000us + vchi ping (size 0, 0 async, 2 oneway) -> 76.000000us + vchi ping (size 0, 0 async, 10 oneway) -> 105.000000us + vchi ping (size 0, 10 async, 10 oneway) -> 165.000000us + vchi ping (size 0, 100 async, 0 oneway) -> nanus + vchi bulk (size 0, 0 oneway) -> nanus + vchi ping (size 0, 0 async, 100 oneway) -> nanus + vchi ping (size 0, 100 async, 100 oneway) -> infus + vchi ping (size 0, 200 async, 0 oneway) -> infus + ... + + * Debugfs test + + Command: cat /sys/kernel/debug/vchiq/state + + Example output: + State 0: CONNECTED + tx_pos=0x1e8(@43b0acda), rx_pos=0x170(@05493af8) + Version: 8 (min 3) + Stats: ctrl_tx_count=7, ctrl_rx_count=7, error_count=0 + Slots: 30 available (29 data), 0 recyclable, 0 stalls (0 data) + Platform: 2835 (VC master) + Local: slots 34-64 tx_pos=0x1e8 recycle=0x1f + Slots claimed: + DEBUG: SLOT_HANDLER_COUNT = 20(0x14) + DEBUG: SLOT_HANDLER_LINE = 1937(0x791) + DEBUG: PARSE_LINE = 1864(0x748) + DEBUG: PARSE_HEADER = -249155224(0xf1263168) + DEBUG: PARSE_MSGID = 67362817(0x403e001) + DEBUG: AWAIT_COMPLETION_LINE = 0(0x0) + DEBUG: DEQUEUE_MESSAGE_LINE = 0(0x0) + DEBUG: SERVICE_CALLBACK_LINE = 0(0x0) + DEBUG: MSG_QUEUE_FULL_COUNT = 0(0x0) + DEBUG: COMPLETION_QUEUE_FULL_COUNT = 0(0x0) + Remote: slots 2-32 tx_pos=0x170 recycle=0x1f + Slots claimed: + 2: 10/9 + DEBUG: SLOT_HANDLER_COUNT = 20(0x14) + DEBUG: SLOT_HANDLER_LINE = 1851(0x73b) + DEBUG: PARSE_LINE = 1827(0x723) + DEBUG: PARSE_HEADER = -150330912(0xf70a21e0) + DEBUG: PARSE_MSGID = 67113022(0x400103e) + DEBUG: AWAIT_COMPLETION_LINE = 0(0x0) + DEBUG: DEQUEUE_MESSAGE_LINE = 0(0x0) + DEBUG: SERVICE_CALLBACK_LINE = 0(0x0) + DEBUG: MSG_QUEUE_FULL_COUNT = 0(0x0) + DEBUG: COMPLETION_QUEUE_FULL_COUNT = 0(0x0) + Service 0: LISTENING (ref 1) 'PEEK little-endian (0x4b454550)' remote n/a (msg use 0/3840, slot use 0/15) + Bulk: tx_pending=0 (size 0), rx_pending=0 (size 0) + Ctrl: tx_count=0, tx_bytes=0, rx_count=0, rx_bytes=0 + Bulk: tx_count=0, tx_bytes=0, rx_count=0, rx_bytes=0 + 0 quota stalls, 0 slot stalls, 0 bulk stalls, 0 aborted, 0 errors + instance b511f60b diff --git a/drivers/platform/raspberrypi/vchiq-interface/TODO b/drivers/platform/raspberrypi/vchiq-interface/TODO new file mode 100644 index 000000000000..2357dae413f1 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/TODO @@ -0,0 +1,4 @@ +* Documentation + +A short top-down description of this driver's architecture (function of +kthreads, userspace, limitations) could be very helpful for reviewers. diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c new file mode 100644 index 000000000000..6a7b96d3dae6 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c @@ -0,0 +1,1477 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. + * Copyright (c) 2010-2012 Broadcom. All rights reserved. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched/signal.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/device/bus.h> +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/bug.h> +#include <linux/completion.h> +#include <linux/list.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/compat.h> +#include <linux/dma-mapping.h> +#include <linux/rcupdate.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/uaccess.h> +#include <soc/bcm2835/raspberrypi-firmware.h> + +#include <linux/raspberrypi/vchiq_core.h> +#include <linux/raspberrypi/vchiq_arm.h> +#include <linux/raspberrypi/vchiq_bus.h> +#include <linux/raspberrypi/vchiq_debugfs.h> + +#include "vchiq_ioctl.h" + +#define DEVICE_NAME "vchiq" + +#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32) + +#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2) + +#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1 + +#define BELL0 0x00 + +#define ARM_DS_ACTIVE BIT(2) + +/* Override the default prefix, which would be vchiq_arm (from the filename) */ +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX DEVICE_NAME "." + +#define KEEPALIVE_VER 1 +#define KEEPALIVE_VER_MIN KEEPALIVE_VER + +/* + * The devices implemented in the VCHIQ firmware are not discoverable, + * so we need to maintain a list of them in order to register them with + * the interface. + */ +static struct vchiq_device *bcm2835_audio; + +static const struct vchiq_platform_info bcm2835_info = { + .cache_line_size = 32, +}; + +static const struct vchiq_platform_info bcm2836_info = { + .cache_line_size = 64, +}; + +struct vchiq_arm_state { + /* + * Keepalive-related data + * + * The keepalive mechanism was retro-fitted to VCHIQ to allow active + * services to prevent the system from suspending. + * This feature is not used on Raspberry Pi devices. + */ + struct task_struct *ka_thread; + struct completion ka_evt; + atomic_t ka_use_count; + atomic_t ka_use_ack_count; + atomic_t ka_release_count; + + rwlock_t susp_res_lock; + + struct vchiq_state *state; + + /* + * Global use count for videocore. + * This is equal to the sum of the use counts for all services. When + * this hits zero the videocore suspend procedure will be initiated. + */ + int videocore_use_count; + + /* + * Use count to track requests from videocore peer. + * This use count is not associated with a service, so needs to be + * tracked separately with the state. + */ + int peer_use_count; + + /* + * Flag to indicate that the first vchiq connect has made it through. + * This means that both sides should be fully ready, and we should + * be able to suspend after this point. + */ + int first_connect; +}; + +static int +vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_bulk *bulk_params); + +static irqreturn_t +vchiq_doorbell_irq(int irq, void *dev_id) +{ + struct vchiq_state *state = dev_id; + struct vchiq_drv_mgmt *mgmt; + irqreturn_t ret = IRQ_NONE; + unsigned int status; + + mgmt = dev_get_drvdata(state->dev); + + /* Read (and clear) the doorbell */ + status = readl(mgmt->regs + BELL0); + + if (status & ARM_DS_ACTIVE) { /* Was the doorbell rung? */ + remote_event_pollall(state); + ret = IRQ_HANDLED; + } + + return ret; +} + +/* + * This function is called by the vchiq stack once it has been connected to + * the videocore and clients can start to use the stack. + */ +static void vchiq_call_connected_callbacks(struct vchiq_drv_mgmt *drv_mgmt) +{ + int i; + + if (mutex_lock_killable(&drv_mgmt->connected_mutex)) + return; + + for (i = 0; i < drv_mgmt->num_deferred_callbacks; i++) + drv_mgmt->deferred_callback[i](); + + drv_mgmt->num_deferred_callbacks = 0; + drv_mgmt->connected = true; + mutex_unlock(&drv_mgmt->connected_mutex); +} + +/* + * This function is used to defer initialization until the vchiq stack is + * initialized. If the stack is already initialized, then the callback will + * be made immediately, otherwise it will be deferred until + * vchiq_call_connected_callbacks is called. + */ +void vchiq_add_connected_callback(struct vchiq_device *device, void (*callback)(void)) +{ + struct vchiq_drv_mgmt *drv_mgmt = device->drv_mgmt; + + if (mutex_lock_killable(&drv_mgmt->connected_mutex)) + return; + + if (drv_mgmt->connected) { + /* We're already connected. Call the callback immediately. */ + callback(); + } else { + if (drv_mgmt->num_deferred_callbacks >= VCHIQ_DRV_MAX_CALLBACKS) { + dev_err(&device->dev, + "core: deferred callbacks(%d) exceeded the maximum limit(%d)\n", + drv_mgmt->num_deferred_callbacks, VCHIQ_DRV_MAX_CALLBACKS); + } else { + drv_mgmt->deferred_callback[drv_mgmt->num_deferred_callbacks] = + callback; + drv_mgmt->num_deferred_callbacks++; + } + } + mutex_unlock(&drv_mgmt->connected_mutex); +} +EXPORT_SYMBOL(vchiq_add_connected_callback); + +static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state) +{ + struct device *dev = &pdev->dev; + struct vchiq_drv_mgmt *drv_mgmt = platform_get_drvdata(pdev); + struct rpi_firmware *fw = drv_mgmt->fw; + struct vchiq_slot_zero *vchiq_slot_zero; + void *slot_mem; + dma_addr_t slot_phys; + u32 channelbase; + int slot_mem_size, frag_mem_size; + int err, irq, i; + + /* + * VCHI messages between the CPU and firmware use + * 32-bit bus addresses. + */ + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + + if (err < 0) + return err; + + drv_mgmt->fragments_size = 2 * drv_mgmt->info->cache_line_size; + + /* Allocate space for the channels in coherent memory */ + slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE); + frag_mem_size = PAGE_ALIGN(drv_mgmt->fragments_size * MAX_FRAGMENTS); + + slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size, + &slot_phys, GFP_KERNEL); + if (!slot_mem) { + dev_err(dev, "could not allocate DMA memory\n"); + return -ENOMEM; + } + + WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0); + + vchiq_slot_zero = vchiq_init_slots(dev, slot_mem, slot_mem_size); + if (!vchiq_slot_zero) + return -ENOMEM; + + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] = + (int)slot_phys + slot_mem_size; + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] = + MAX_FRAGMENTS; + + drv_mgmt->fragments_base = (char *)slot_mem + slot_mem_size; + + drv_mgmt->free_fragments = drv_mgmt->fragments_base; + for (i = 0; i < (MAX_FRAGMENTS - 1); i++) { + *(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] = + &drv_mgmt->fragments_base[(i + 1) * drv_mgmt->fragments_size]; + } + *(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] = NULL; + sema_init(&drv_mgmt->free_fragments_sema, MAX_FRAGMENTS); + sema_init(&drv_mgmt->free_fragments_mutex, 1); + + err = vchiq_init_state(state, vchiq_slot_zero, dev); + if (err) + return err; + + drv_mgmt->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(drv_mgmt->regs)) + return PTR_ERR(drv_mgmt->regs); + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return irq; + + err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL, + "VCHIQ doorbell", state); + if (err) { + dev_err(dev, "failed to register irq=%d\n", irq); + return err; + } + + /* Send the base address of the slots to VideoCore */ + channelbase = slot_phys; + err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT, + &channelbase, sizeof(channelbase)); + if (err) { + dev_err(dev, "failed to send firmware property: %d\n", err); + return err; + } + + if (channelbase) { + dev_err(dev, "failed to set channelbase (response: %x)\n", + channelbase); + return -ENXIO; + } + + dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %p, phys %pad)\n", + vchiq_slot_zero, &slot_phys); + + mutex_init(&drv_mgmt->connected_mutex); + vchiq_call_connected_callbacks(drv_mgmt); + + return 0; +} + +int +vchiq_platform_init_state(struct vchiq_state *state) +{ + struct vchiq_arm_state *platform_state; + + platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL); + if (!platform_state) + return -ENOMEM; + + rwlock_init(&platform_state->susp_res_lock); + + init_completion(&platform_state->ka_evt); + atomic_set(&platform_state->ka_use_count, 0); + atomic_set(&platform_state->ka_use_ack_count, 0); + atomic_set(&platform_state->ka_release_count, 0); + + platform_state->state = state; + + state->platform_state = (struct opaque_platform_state *)platform_state; + + return 0; +} + +static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state) +{ + return (struct vchiq_arm_state *)state->platform_state; +} + +static void +vchiq_platform_uninit(struct vchiq_drv_mgmt *mgmt) +{ + struct vchiq_arm_state *arm_state; + + kthread_stop(mgmt->state.sync_thread); + kthread_stop(mgmt->state.recycle_thread); + kthread_stop(mgmt->state.slot_handler_thread); + + arm_state = vchiq_platform_get_arm_state(&mgmt->state); + if (!IS_ERR_OR_NULL(arm_state->ka_thread)) + kthread_stop(arm_state->ka_thread); +} + +void vchiq_dump_platform_state(struct seq_file *f) +{ + seq_puts(f, " Platform: 2835 (VC master)\n"); +} + +#define VCHIQ_INIT_RETRIES 10 +int vchiq_initialise(struct vchiq_state *state, struct vchiq_instance **instance_out) +{ + struct vchiq_instance *instance = NULL; + int i, ret; + + /* + * VideoCore may not be ready due to boot up timing. + * It may never be ready if kernel and firmware are mismatched,so don't + * block forever. + */ + for (i = 0; i < VCHIQ_INIT_RETRIES; i++) { + if (vchiq_remote_initialised(state)) + break; + usleep_range(500, 600); + } + if (i == VCHIQ_INIT_RETRIES) { + dev_err(state->dev, "core: %s: Videocore not initialized\n", __func__); + ret = -ENOTCONN; + goto failed; + } else if (i > 0) { + dev_warn(state->dev, "core: %s: videocore initialized after %d retries\n", + __func__, i); + } + + instance = kzalloc(sizeof(*instance), GFP_KERNEL); + if (!instance) { + ret = -ENOMEM; + goto failed; + } + + instance->connected = 0; + instance->state = state; + mutex_init(&instance->bulk_waiter_list_mutex); + INIT_LIST_HEAD(&instance->bulk_waiter_list); + + *instance_out = instance; + + ret = 0; + +failed: + dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret); + + return ret; +} +EXPORT_SYMBOL(vchiq_initialise); + +void free_bulk_waiter(struct vchiq_instance *instance) +{ + struct bulk_waiter_node *waiter, *next; + + list_for_each_entry_safe(waiter, next, + &instance->bulk_waiter_list, list) { + list_del(&waiter->list); + dev_dbg(instance->state->dev, + "arm: bulk_waiter - cleaned up %p for pid %d\n", + waiter, waiter->pid); + kfree(waiter); + } +} + +int vchiq_shutdown(struct vchiq_instance *instance) +{ + struct vchiq_state *state = instance->state; + int ret = 0; + + mutex_lock(&state->mutex); + + /* Remove all services */ + vchiq_shutdown_internal(state, instance); + + mutex_unlock(&state->mutex); + + dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret); + + free_bulk_waiter(instance); + kfree(instance); + + return ret; +} +EXPORT_SYMBOL(vchiq_shutdown); + +static int vchiq_is_connected(struct vchiq_instance *instance) +{ + return instance->connected; +} + +int vchiq_connect(struct vchiq_instance *instance) +{ + struct vchiq_state *state = instance->state; + int ret; + + if (mutex_lock_killable(&state->mutex)) { + dev_dbg(state->dev, + "core: call to mutex_lock failed\n"); + ret = -EAGAIN; + goto failed; + } + ret = vchiq_connect_internal(state, instance); + + if (!ret) + instance->connected = 1; + + mutex_unlock(&state->mutex); + +failed: + dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret); + + return ret; +} +EXPORT_SYMBOL(vchiq_connect); + +static int +vchiq_add_service(struct vchiq_instance *instance, + const struct vchiq_service_params_kernel *params, + unsigned int *phandle) +{ + struct vchiq_state *state = instance->state; + struct vchiq_service *service = NULL; + int srvstate, ret; + + *phandle = VCHIQ_SERVICE_HANDLE_INVALID; + + srvstate = vchiq_is_connected(instance) + ? VCHIQ_SRVSTATE_LISTENING + : VCHIQ_SRVSTATE_HIDDEN; + + service = vchiq_add_service_internal(state, params, srvstate, instance, NULL); + + if (service) { + *phandle = service->handle; + ret = 0; + } else { + ret = -EINVAL; + } + + dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret); + + return ret; +} + +int +vchiq_open_service(struct vchiq_instance *instance, + const struct vchiq_service_params_kernel *params, + unsigned int *phandle) +{ + struct vchiq_state *state = instance->state; + struct vchiq_service *service = NULL; + int ret = -EINVAL; + + *phandle = VCHIQ_SERVICE_HANDLE_INVALID; + + if (!vchiq_is_connected(instance)) + goto failed; + + service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL); + + if (service) { + *phandle = service->handle; + ret = vchiq_open_service_internal(service, current->pid); + if (ret) { + vchiq_remove_service(instance, service->handle); + *phandle = VCHIQ_SERVICE_HANDLE_INVALID; + } + } + +failed: + dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret); + + return ret; +} +EXPORT_SYMBOL(vchiq_open_service); + +int +vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data, + unsigned int size, void *userdata, enum vchiq_bulk_mode mode) +{ + struct vchiq_bulk bulk_params = {}; + int ret; + + switch (mode) { + case VCHIQ_BULK_MODE_NOCALLBACK: + case VCHIQ_BULK_MODE_CALLBACK: + + bulk_params.offset = (void *)data; + bulk_params.mode = mode; + bulk_params.size = size; + bulk_params.cb_data = userdata; + bulk_params.dir = VCHIQ_BULK_TRANSMIT; + + ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params); + break; + case VCHIQ_BULK_MODE_BLOCKING: + bulk_params.offset = (void *)data; + bulk_params.mode = mode; + bulk_params.size = size; + bulk_params.dir = VCHIQ_BULK_TRANSMIT; + + ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params); + break; + default: + return -EINVAL; + } + + return ret; +} +EXPORT_SYMBOL(vchiq_bulk_transmit); + +int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle, + void *data, unsigned int size, void *userdata, + enum vchiq_bulk_mode mode) +{ + struct vchiq_bulk bulk_params = {}; + int ret; + + switch (mode) { + case VCHIQ_BULK_MODE_NOCALLBACK: + case VCHIQ_BULK_MODE_CALLBACK: + + bulk_params.offset = (void *)data; + bulk_params.mode = mode; + bulk_params.size = size; + bulk_params.cb_data = userdata; + bulk_params.dir = VCHIQ_BULK_RECEIVE; + + ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params); + break; + case VCHIQ_BULK_MODE_BLOCKING: + bulk_params.offset = (void *)data; + bulk_params.mode = mode; + bulk_params.size = size; + bulk_params.dir = VCHIQ_BULK_RECEIVE; + + ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params); + break; + default: + return -EINVAL; + } + + return ret; +} +EXPORT_SYMBOL(vchiq_bulk_receive); + +static int +vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_bulk *bulk_params) +{ + struct vchiq_service *service; + struct bulk_waiter_node *waiter = NULL, *iter; + int ret; + + service = find_service_by_handle(instance, handle); + if (!service) + return -EINVAL; + + vchiq_service_put(service); + + mutex_lock(&instance->bulk_waiter_list_mutex); + list_for_each_entry(iter, &instance->bulk_waiter_list, list) { + if (iter->pid == current->pid) { + list_del(&iter->list); + waiter = iter; + break; + } + } + mutex_unlock(&instance->bulk_waiter_list_mutex); + + if (waiter) { + struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; + + if (bulk) { + /* This thread has an outstanding bulk transfer. */ + /* FIXME: why compare a dma address to a pointer? */ + if ((bulk->dma_addr != (dma_addr_t)(uintptr_t)bulk_params->dma_addr) || + (bulk->size != bulk_params->size)) { + /* + * This is not a retry of the previous one. + * Cancel the signal when the transfer completes. + */ + spin_lock(&service->state->bulk_waiter_spinlock); + bulk->waiter = NULL; + spin_unlock(&service->state->bulk_waiter_spinlock); + } + } + } else { + waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); + if (!waiter) + return -ENOMEM; + } + + bulk_params->waiter = &waiter->bulk_waiter; + + ret = vchiq_bulk_xfer_blocking(instance, handle, bulk_params); + if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) { + struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; + + if (bulk) { + /* Cancel the signal when the transfer completes. */ + spin_lock(&service->state->bulk_waiter_spinlock); + bulk->waiter = NULL; + spin_unlock(&service->state->bulk_waiter_spinlock); + } + kfree(waiter); + } else { + waiter->pid = current->pid; + mutex_lock(&instance->bulk_waiter_list_mutex); + list_add(&waiter->list, &instance->bulk_waiter_list); + mutex_unlock(&instance->bulk_waiter_list_mutex); + dev_dbg(instance->state->dev, "arm: saved bulk_waiter %p for pid %d\n", + waiter, current->pid); + } + + return ret; +} + +static int +add_completion(struct vchiq_instance *instance, enum vchiq_reason reason, + struct vchiq_header *header, struct user_service *user_service, + void *cb_data, void __user *cb_userdata) +{ + struct vchiq_completion_data_kernel *completion; + struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev); + int insert; + + DEBUG_INITIALISE(mgmt->state.local); + + insert = instance->completion_insert; + while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) { + /* Out of space - wait for the client */ + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + dev_dbg(instance->state->dev, "core: completion queue full\n"); + DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT); + if (wait_for_completion_interruptible(&instance->remove_event)) { + dev_dbg(instance->state->dev, "arm: service_callback interrupted\n"); + return -EAGAIN; + } else if (instance->closing) { + dev_dbg(instance->state->dev, "arm: service_callback closing\n"); + return 0; + } + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + } + + completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)]; + + completion->header = header; + completion->reason = reason; + /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */ + completion->service_userdata = user_service->service; + completion->cb_data = cb_data; + completion->cb_userdata = cb_userdata; + + if (reason == VCHIQ_SERVICE_CLOSED) { + /* + * Take an extra reference, to be held until + * this CLOSED notification is delivered. + */ + vchiq_service_get(user_service->service); + if (instance->use_close_delivered) + user_service->close_pending = 1; + } + + /* + * A write barrier is needed here to ensure that the entire completion + * record is written out before the insert point. + */ + wmb(); + + if (reason == VCHIQ_MESSAGE_AVAILABLE) + user_service->message_available_pos = insert; + + insert++; + instance->completion_insert = insert; + + complete(&instance->insert_event); + + return 0; +} + +static int +service_single_message(struct vchiq_instance *instance, + enum vchiq_reason reason, struct vchiq_service *service, + void *cb_data, void __user *cb_userdata) +{ + struct user_service *user_service; + + user_service = (struct user_service *)service->base.userdata; + + dev_dbg(service->state->dev, "arm: msg queue full\n"); + /* + * If there is no MESSAGE_AVAILABLE in the completion + * queue, add one + */ + if ((user_service->message_available_pos - + instance->completion_remove) < 0) { + int ret; + + dev_dbg(instance->state->dev, + "arm: Inserting extra MESSAGE_AVAILABLE\n"); + ret = add_completion(instance, reason, NULL, user_service, + cb_data, cb_userdata); + if (ret) + return ret; + } + + if (wait_for_completion_interruptible(&user_service->remove_event)) { + dev_dbg(instance->state->dev, "arm: interrupted\n"); + return -EAGAIN; + } else if (instance->closing) { + dev_dbg(instance->state->dev, "arm: closing\n"); + return -EINVAL; + } + + return 0; +} + +int +service_callback(struct vchiq_instance *instance, enum vchiq_reason reason, + struct vchiq_header *header, unsigned int handle, + void *cb_data, void __user *cb_userdata) +{ + /* + * How do we ensure the callback goes to the right client? + * The service_user data points to a user_service record + * containing the original callback and the user state structure, which + * contains a circular buffer for completion records. + */ + struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev); + struct user_service *user_service; + struct vchiq_service *service; + bool skip_completion = false; + + DEBUG_INITIALISE(mgmt->state.local); + + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + + rcu_read_lock(); + service = handle_to_service(instance, handle); + if (WARN_ON(!service)) { + rcu_read_unlock(); + return 0; + } + + user_service = (struct user_service *)service->base.userdata; + + if (instance->closing) { + rcu_read_unlock(); + return 0; + } + + /* + * As hopping around different synchronization mechanism, + * taking an extra reference results in simpler implementation. + */ + vchiq_service_get(service); + rcu_read_unlock(); + + dev_dbg(service->state->dev, + "arm: service %p(%d,%p), reason %d, header %p, instance %p, cb_data %p, cb_userdata %p\n", + user_service, service->localport, user_service->userdata, + reason, header, instance, cb_data, cb_userdata); + + if (header && user_service->is_vchi) { + spin_lock(&service->state->msg_queue_spinlock); + while (user_service->msg_insert == + (user_service->msg_remove + MSG_QUEUE_SIZE)) { + int ret; + + spin_unlock(&service->state->msg_queue_spinlock); + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + DEBUG_COUNT(MSG_QUEUE_FULL_COUNT); + + ret = service_single_message(instance, reason, service, + cb_data, cb_userdata); + if (ret) { + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); + return ret; + } + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + spin_lock(&service->state->msg_queue_spinlock); + } + + user_service->msg_queue[user_service->msg_insert & + (MSG_QUEUE_SIZE - 1)] = header; + user_service->msg_insert++; + + /* + * If there is a thread waiting in DEQUEUE_MESSAGE, or if + * there is a MESSAGE_AVAILABLE in the completion queue then + * bypass the completion queue. + */ + if (((user_service->message_available_pos - + instance->completion_remove) >= 0) || + user_service->dequeue_pending) { + user_service->dequeue_pending = 0; + skip_completion = true; + } + + spin_unlock(&service->state->msg_queue_spinlock); + complete(&user_service->insert_event); + + header = NULL; + } + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); + + if (skip_completion) + return 0; + + return add_completion(instance, reason, header, user_service, + cb_data, cb_userdata); +} + +void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f) +{ + int i; + + if (!vchiq_remote_initialised(state)) + return; + + /* + * There is no list of instances, so instead scan all services, + * marking those that have been dumped. + */ + + rcu_read_lock(); + for (i = 0; i < state->unused_service; i++) { + struct vchiq_service *service; + struct vchiq_instance *instance; + + service = rcu_dereference(state->services[i]); + if (!service || service->base.callback != service_callback) + continue; + + instance = service->instance; + if (instance) + instance->mark = 0; + } + rcu_read_unlock(); + + for (i = 0; i < state->unused_service; i++) { + struct vchiq_service *service; + struct vchiq_instance *instance; + + rcu_read_lock(); + service = rcu_dereference(state->services[i]); + if (!service || service->base.callback != service_callback) { + rcu_read_unlock(); + continue; + } + + instance = service->instance; + if (!instance || instance->mark) { + rcu_read_unlock(); + continue; + } + rcu_read_unlock(); + + seq_printf(f, "Instance %pK: pid %d,%s completions %d/%d\n", + instance, instance->pid, + instance->connected ? " connected, " : + "", + instance->completion_insert - + instance->completion_remove, + MAX_COMPLETIONS); + instance->mark = 1; + } +} + +void vchiq_dump_platform_service_state(struct seq_file *f, + struct vchiq_service *service) +{ + struct user_service *user_service = + (struct user_service *)service->base.userdata; + + seq_printf(f, " instance %pK", service->instance); + + if ((service->base.callback == service_callback) && user_service->is_vchi) { + seq_printf(f, ", %d/%d messages", + user_service->msg_insert - user_service->msg_remove, + MSG_QUEUE_SIZE); + + if (user_service->dequeue_pending) + seq_puts(f, " (dequeue pending)"); + } + + seq_puts(f, "\n"); +} + +/* + * Autosuspend related functionality + */ + +static int +vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance, + enum vchiq_reason reason, + struct vchiq_header *header, + unsigned int service_user, + void *cb_data, void __user *cb_userdata) +{ + dev_err(instance->state->dev, "suspend: %s: callback reason %d\n", + __func__, reason); + return 0; +} + +static int +vchiq_keepalive_thread_func(void *v) +{ + struct vchiq_state *state = (struct vchiq_state *)v; + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + struct vchiq_instance *instance; + unsigned int ka_handle; + int ret; + + struct vchiq_service_params_kernel params = { + .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'), + .callback = vchiq_keepalive_vchiq_callback, + .version = KEEPALIVE_VER, + .version_min = KEEPALIVE_VER_MIN + }; + + ret = vchiq_initialise(state, &instance); + if (ret) { + dev_err(state->dev, "suspend: %s: vchiq_initialise failed %d\n", __func__, ret); + goto exit; + } + + ret = vchiq_connect(instance); + if (ret) { + dev_err(state->dev, "suspend: %s: vchiq_connect failed %d\n", __func__, ret); + goto shutdown; + } + + ret = vchiq_add_service(instance, ¶ms, &ka_handle); + if (ret) { + dev_err(state->dev, "suspend: %s: vchiq_open_service failed %d\n", + __func__, ret); + goto shutdown; + } + + while (!kthread_should_stop()) { + long rc = 0, uc = 0; + + if (wait_for_completion_interruptible(&arm_state->ka_evt)) { + dev_dbg(state->dev, "suspend: %s: interrupted\n", __func__); + flush_signals(current); + continue; + } + + /* + * read and clear counters. Do release_count then use_count to + * prevent getting more releases than uses + */ + rc = atomic_xchg(&arm_state->ka_release_count, 0); + uc = atomic_xchg(&arm_state->ka_use_count, 0); + + /* + * Call use/release service the requisite number of times. + * Process use before release so use counts don't go negative + */ + while (uc--) { + atomic_inc(&arm_state->ka_use_ack_count); + ret = vchiq_use_service(instance, ka_handle); + if (ret) { + dev_err(state->dev, "suspend: %s: vchiq_use_service error %d\n", + __func__, ret); + } + } + while (rc--) { + ret = vchiq_release_service(instance, ka_handle); + if (ret) { + dev_err(state->dev, "suspend: %s: vchiq_release_service error %d\n", + __func__, ret); + } + } + } + +shutdown: + vchiq_shutdown(instance); +exit: + return 0; +} + +int +vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service, + enum USE_TYPE_E use_type) +{ + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + int ret = 0; + char entity[64]; + int *entity_uc; + int local_uc; + + if (!arm_state) { + ret = -EINVAL; + goto out; + } + + if (use_type == USE_TYPE_VCHIQ) { + snprintf(entity, sizeof(entity), "VCHIQ: "); + entity_uc = &arm_state->peer_use_count; + } else if (service) { + snprintf(entity, sizeof(entity), "%p4cc:%03d", + &service->base.fourcc, + service->client_id); + entity_uc = &service->service_use_count; + } else { + dev_err(state->dev, "suspend: %s: null service ptr\n", __func__); + ret = -EINVAL; + goto out; + } + + write_lock_bh(&arm_state->susp_res_lock); + local_uc = ++arm_state->videocore_use_count; + ++(*entity_uc); + + dev_dbg(state->dev, "suspend: %s count %d, state count %d\n", + entity, *entity_uc, local_uc); + + write_unlock_bh(&arm_state->susp_res_lock); + + if (!ret) { + int ret = 0; + long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0); + + while (ack_cnt && !ret) { + /* Send the use notify to videocore */ + ret = vchiq_send_remote_use_active(state); + if (!ret) + ack_cnt--; + else + atomic_add(ack_cnt, &arm_state->ka_use_ack_count); + } + } + +out: + dev_dbg(state->dev, "suspend: exit %d\n", ret); + return ret; +} + +int +vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service) +{ + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + int ret = 0; + char entity[64]; + int *entity_uc; + + if (!arm_state) { + ret = -EINVAL; + goto out; + } + + if (service) { + snprintf(entity, sizeof(entity), "%p4cc:%03d", + &service->base.fourcc, + service->client_id); + entity_uc = &service->service_use_count; + } else { + snprintf(entity, sizeof(entity), "PEER: "); + entity_uc = &arm_state->peer_use_count; + } + + write_lock_bh(&arm_state->susp_res_lock); + if (!arm_state->videocore_use_count || !(*entity_uc)) { + WARN_ON(!arm_state->videocore_use_count); + WARN_ON(!(*entity_uc)); + ret = -EINVAL; + goto unlock; + } + --arm_state->videocore_use_count; + --(*entity_uc); + + dev_dbg(state->dev, "suspend: %s count %d, state count %d\n", + entity, *entity_uc, arm_state->videocore_use_count); + +unlock: + write_unlock_bh(&arm_state->susp_res_lock); + +out: + dev_dbg(state->dev, "suspend: exit %d\n", ret); + return ret; +} + +void +vchiq_on_remote_use(struct vchiq_state *state) +{ + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + + atomic_inc(&arm_state->ka_use_count); + complete(&arm_state->ka_evt); +} + +void +vchiq_on_remote_release(struct vchiq_state *state) +{ + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + + atomic_inc(&arm_state->ka_release_count); + complete(&arm_state->ka_evt); +} + +int +vchiq_use_service_internal(struct vchiq_service *service) +{ + return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE); +} + +int +vchiq_release_service_internal(struct vchiq_service *service) +{ + return vchiq_release_internal(service->state, service); +} + +struct vchiq_debugfs_node * +vchiq_instance_get_debugfs_node(struct vchiq_instance *instance) +{ + return &instance->debugfs_node; +} + +int +vchiq_instance_get_use_count(struct vchiq_instance *instance) +{ + struct vchiq_service *service; + int use_count = 0, i; + + i = 0; + rcu_read_lock(); + while ((service = __next_service_by_instance(instance->state, + instance, &i))) + use_count += service->service_use_count; + rcu_read_unlock(); + return use_count; +} + +int +vchiq_instance_get_pid(struct vchiq_instance *instance) +{ + return instance->pid; +} + +int +vchiq_instance_get_trace(struct vchiq_instance *instance) +{ + return instance->trace; +} + +void +vchiq_instance_set_trace(struct vchiq_instance *instance, int trace) +{ + struct vchiq_service *service; + int i; + + i = 0; + rcu_read_lock(); + while ((service = __next_service_by_instance(instance->state, + instance, &i))) + service->trace = trace; + rcu_read_unlock(); + instance->trace = (trace != 0); +} + +int +vchiq_use_service(struct vchiq_instance *instance, unsigned int handle) +{ + int ret = -EINVAL; + struct vchiq_service *service = find_service_by_handle(instance, handle); + + if (service) { + ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE); + vchiq_service_put(service); + } + return ret; +} +EXPORT_SYMBOL(vchiq_use_service); + +int +vchiq_release_service(struct vchiq_instance *instance, unsigned int handle) +{ + int ret = -EINVAL; + struct vchiq_service *service = find_service_by_handle(instance, handle); + + if (service) { + ret = vchiq_release_internal(service->state, service); + vchiq_service_put(service); + } + return ret; +} +EXPORT_SYMBOL(vchiq_release_service); + +struct service_data_struct { + int fourcc; + int clientid; + int use_count; +}; + +void +vchiq_dump_service_use_state(struct vchiq_state *state) +{ + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + struct service_data_struct *service_data; + int i, found = 0; + /* + * If there's more than 64 services, only dump ones with + * non-zero counts + */ + int only_nonzero = 0; + static const char *nz = "<-- preventing suspend"; + + int peer_count; + int vc_use_count; + int active_services; + + if (!arm_state) + return; + + service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data), + GFP_KERNEL); + if (!service_data) + return; + + read_lock_bh(&arm_state->susp_res_lock); + peer_count = arm_state->peer_use_count; + vc_use_count = arm_state->videocore_use_count; + active_services = state->unused_service; + if (active_services > MAX_SERVICES) + only_nonzero = 1; + + rcu_read_lock(); + for (i = 0; i < active_services; i++) { + struct vchiq_service *service_ptr = + rcu_dereference(state->services[i]); + + if (!service_ptr) + continue; + + if (only_nonzero && !service_ptr->service_use_count) + continue; + + if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE) + continue; + + service_data[found].fourcc = service_ptr->base.fourcc; + service_data[found].clientid = service_ptr->client_id; + service_data[found].use_count = service_ptr->service_use_count; + found++; + if (found >= MAX_SERVICES) + break; + } + rcu_read_unlock(); + + read_unlock_bh(&arm_state->susp_res_lock); + + if (only_nonzero) + dev_warn(state->dev, + "suspend: Too many active services (%d). Only dumping up to first %d services with non-zero use-count\n", + active_services, found); + + for (i = 0; i < found; i++) { + dev_warn(state->dev, + "suspend: %p4cc:%d service count %d %s\n", + &service_data[i].fourcc, + service_data[i].clientid, service_data[i].use_count, + service_data[i].use_count ? nz : ""); + } + dev_warn(state->dev, "suspend: VCHIQ use count %d\n", peer_count); + dev_warn(state->dev, "suspend: Overall vchiq instance use count %d\n", vc_use_count); + + kfree(service_data); +} + +int +vchiq_check_service(struct vchiq_service *service) +{ + struct vchiq_arm_state *arm_state; + int ret = -EINVAL; + + if (!service || !service->state) + goto out; + + arm_state = vchiq_platform_get_arm_state(service->state); + + read_lock_bh(&arm_state->susp_res_lock); + if (service->service_use_count) + ret = 0; + read_unlock_bh(&arm_state->susp_res_lock); + + if (ret) { + dev_err(service->state->dev, + "suspend: %s: %p4cc:%d service count %d, state count %d\n", + __func__, &service->base.fourcc, service->client_id, + service->service_use_count, arm_state->videocore_use_count); + vchiq_dump_service_use_state(service->state); + } +out: + return ret; +} + +void vchiq_platform_conn_state_changed(struct vchiq_state *state, + enum vchiq_connstate oldstate, + enum vchiq_connstate newstate) +{ + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + char threadname[16]; + + dev_dbg(state->dev, "suspend: %d: %s->%s\n", + state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate)); + if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED) + return; + + write_lock_bh(&arm_state->susp_res_lock); + if (arm_state->first_connect) { + write_unlock_bh(&arm_state->susp_res_lock); + return; + } + + arm_state->first_connect = 1; + write_unlock_bh(&arm_state->susp_res_lock); + snprintf(threadname, sizeof(threadname), "vchiq-keep/%d", + state->id); + arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func, + (void *)state, + threadname); + if (IS_ERR(arm_state->ka_thread)) { + dev_err(state->dev, "suspend: Couldn't create thread %s\n", + threadname); + } else { + wake_up_process(arm_state->ka_thread); + } +} + +static const struct of_device_id vchiq_of_match[] = { + { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_info }, + { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_info }, + {}, +}; +MODULE_DEVICE_TABLE(of, vchiq_of_match); + +static int vchiq_probe(struct platform_device *pdev) +{ + const struct vchiq_platform_info *info; + struct vchiq_drv_mgmt *mgmt; + int ret; + + info = of_device_get_match_data(&pdev->dev); + if (!info) + return -EINVAL; + + struct device_node *fw_node __free(device_node) = + of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware"); + if (!fw_node) { + dev_err(&pdev->dev, "Missing firmware node\n"); + return -ENOENT; + } + + mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); + if (!mgmt) + return -ENOMEM; + + mgmt->fw = devm_rpi_firmware_get(&pdev->dev, fw_node); + if (!mgmt->fw) + return -EPROBE_DEFER; + + mgmt->info = info; + platform_set_drvdata(pdev, mgmt); + + ret = vchiq_platform_init(pdev, &mgmt->state); + if (ret) { + dev_err(&pdev->dev, "arm: Could not initialize vchiq platform\n"); + return ret; + } + + dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n", + VCHIQ_VERSION, VCHIQ_VERSION_MIN); + + /* + * Simply exit on error since the function handles cleanup in + * cases of failure. + */ + ret = vchiq_register_chrdev(&pdev->dev); + if (ret) { + dev_err(&pdev->dev, "arm: Failed to initialize vchiq cdev\n"); + vchiq_platform_uninit(mgmt); + return ret; + } + + vchiq_debugfs_init(&mgmt->state); + + bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio"); + + return 0; +} + +static void vchiq_remove(struct platform_device *pdev) +{ + struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(&pdev->dev); + + vchiq_device_unregister(bcm2835_audio); + vchiq_debugfs_deinit(); + vchiq_deregister_chrdev(); + vchiq_platform_uninit(mgmt); +} + +static struct platform_driver vchiq_driver = { + .driver = { + .name = "bcm2835_vchiq", + .of_match_table = vchiq_of_match, + }, + .probe = vchiq_probe, + .remove = vchiq_remove, +}; + +static int __init vchiq_driver_init(void) +{ + int ret; + + ret = bus_register(&vchiq_bus_type); + if (ret) { + pr_err("Failed to register %s\n", vchiq_bus_type.name); + return ret; + } + + ret = platform_driver_register(&vchiq_driver); + if (ret) { + pr_err("Failed to register vchiq driver\n"); + bus_unregister(&vchiq_bus_type); + } + + return ret; +} +module_init(vchiq_driver_init); + +static void __exit vchiq_driver_exit(void) +{ + bus_unregister(&vchiq_bus_type); + platform_driver_unregister(&vchiq_driver); +} +module_exit(vchiq_driver_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Videocore VCHIQ driver"); +MODULE_AUTHOR("Broadcom Corporation"); diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c new file mode 100644 index 000000000000..f50e637d505c --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * vchiq_device.c - VCHIQ generic device and bus-type + * + * Copyright (c) 2023 Ideas On Board Oy + */ + +#include <linux/device/bus.h> +#include <linux/dma-mapping.h> +#include <linux/of_device.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include <linux/raspberrypi/vchiq_arm.h> +#include <linux/raspberrypi/vchiq_bus.h> + +static int vchiq_bus_type_match(struct device *dev, const struct device_driver *drv) +{ + if (dev->bus == &vchiq_bus_type && + strcmp(dev_name(dev), drv->name) == 0) + return true; + + return false; +} + +static int vchiq_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) +{ + const struct vchiq_device *device = container_of_const(dev, struct vchiq_device, dev); + + return add_uevent_var(env, "MODALIAS=vchiq:%s", dev_name(&device->dev)); +} + +static int vchiq_bus_probe(struct device *dev) +{ + struct vchiq_device *device = to_vchiq_device(dev); + struct vchiq_driver *driver = to_vchiq_driver(dev->driver); + + return driver->probe(device); +} + +static void vchiq_bus_remove(struct device *dev) +{ + struct vchiq_device *device = to_vchiq_device(dev); + struct vchiq_driver *driver = to_vchiq_driver(dev->driver); + + if (driver->remove) + driver->remove(device); +} + +const struct bus_type vchiq_bus_type = { + .name = "vchiq-bus", + .match = vchiq_bus_type_match, + .uevent = vchiq_bus_uevent, + .probe = vchiq_bus_probe, + .remove = vchiq_bus_remove, +}; + +static void vchiq_device_release(struct device *dev) +{ + struct vchiq_device *device = to_vchiq_device(dev); + + kfree(device); +} + +struct vchiq_device * +vchiq_device_register(struct device *parent, const char *name) +{ + struct vchiq_device *device; + int ret; + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) + return NULL; + + device->dev.init_name = name; + device->dev.parent = parent; + device->dev.bus = &vchiq_bus_type; + device->dev.dma_mask = &device->dev.coherent_dma_mask; + device->dev.release = vchiq_device_release; + + device->drv_mgmt = dev_get_drvdata(parent); + + of_dma_configure(&device->dev, parent->of_node, true); + + ret = device_register(&device->dev); + if (ret) { + dev_err(parent, "Cannot register %s: %d\n", name, ret); + put_device(&device->dev); + return NULL; + } + + return device; +} + +void vchiq_device_unregister(struct vchiq_device *vchiq_dev) +{ + device_unregister(&vchiq_dev->dev); +} + +int vchiq_driver_register(struct vchiq_driver *vchiq_drv) +{ + vchiq_drv->driver.bus = &vchiq_bus_type; + + return driver_register(&vchiq_drv->driver); +} +EXPORT_SYMBOL_GPL(vchiq_driver_register); + +void vchiq_driver_unregister(struct vchiq_driver *vchiq_drv) +{ + driver_unregister(&vchiq_drv->driver); +} +EXPORT_SYMBOL_GPL(vchiq_driver_unregister); diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c new file mode 100644 index 000000000000..83de27cfd469 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c @@ -0,0 +1,4013 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */ + +#include <linux/types.h> +#include <linux/completion.h> +#include <linux/mutex.h> +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/highmem.h> +#include <linux/kthread.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/kref.h> +#include <linux/rcupdate.h> +#include <linux/sched/signal.h> + +#include <linux/raspberrypi/vchiq_arm.h> +#include <linux/raspberrypi/vchiq_core.h> + +#define VCHIQ_SLOT_HANDLER_STACK 8192 + +#define VCHIQ_MSG_PADDING 0 /* - */ +#define VCHIQ_MSG_CONNECT 1 /* - */ +#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */ +#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */ +#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */ +#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */ +#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */ +#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */ +#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */ +#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */ +#define VCHIQ_MSG_PAUSE 10 /* - */ +#define VCHIQ_MSG_RESUME 11 /* - */ +#define VCHIQ_MSG_REMOTE_USE 12 /* - */ +#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */ +#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */ + +#define TYPE_SHIFT 24 + +#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1) +#define VCHIQ_PORT_FREE 0x1000 +#define VCHIQ_PORT_IS_VALID(port) ((port) < VCHIQ_PORT_FREE) +#define VCHIQ_MAKE_MSG(type, srcport, dstport) \ + (((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0)) +#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)(msgid) >> TYPE_SHIFT) +#define VCHIQ_MSG_SRCPORT(msgid) \ + ((unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff)) +#define VCHIQ_MSG_DSTPORT(msgid) \ + ((unsigned short)(msgid) & 0xfff) + +#define MAKE_CONNECT (VCHIQ_MSG_CONNECT << TYPE_SHIFT) +#define MAKE_OPEN(srcport) \ + ((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12)) +#define MAKE_OPENACK(srcport, dstport) \ + ((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0)) +#define MAKE_CLOSE(srcport, dstport) \ + ((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0)) +#define MAKE_DATA(srcport, dstport) \ + ((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0)) +#define MAKE_PAUSE (VCHIQ_MSG_PAUSE << TYPE_SHIFT) +#define MAKE_RESUME (VCHIQ_MSG_RESUME << TYPE_SHIFT) +#define MAKE_REMOTE_USE (VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT) +#define MAKE_REMOTE_USE_ACTIVE (VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT) + +#define PAGELIST_WRITE 0 +#define PAGELIST_READ 1 +#define PAGELIST_READ_WITH_FRAGMENTS 2 + +#define BELL2 0x08 + +/* Ensure the fields are wide enough */ +static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX)) == 0); +static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0); +static_assert((unsigned int)VCHIQ_PORT_MAX < (unsigned int)VCHIQ_PORT_FREE); + +#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0) +#define VCHIQ_MSGID_CLAIMED 0x40000000 + +#define VCHIQ_FOURCC_INVALID 0x00000000 +#define VCHIQ_FOURCC_IS_LEGAL(fourcc) ((fourcc) != VCHIQ_FOURCC_INVALID) + +#define VCHIQ_BULK_ACTUAL_ABORTED -1 + +#if VCHIQ_ENABLE_STATS +#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++) +#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++) +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \ + (service->stats. stat += addend) +#else +#define VCHIQ_STATS_INC(state, stat) ((void)0) +#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0) +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0) +#endif + +#define HANDLE_STATE_SHIFT 12 + +#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index)) +#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index)) +#define SLOT_INDEX_FROM_DATA(state, data) \ + (((unsigned int)((char *)data - (char *)state->slot_data)) / \ + VCHIQ_SLOT_SIZE) +#define SLOT_INDEX_FROM_INFO(state, info) \ + ((unsigned int)(info - state->slot_info)) +#define SLOT_QUEUE_INDEX_FROM_POS(pos) \ + ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE)) +#define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \ + (SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK) + +#define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1)) + +#define NO_CLOSE_RECVD 0 +#define CLOSE_RECVD 1 + +#define NO_RETRY_POLL 0 +#define RETRY_POLL 1 + +struct vchiq_open_payload { + int fourcc; + int client_id; + short version; + short version_min; +}; + +struct vchiq_openack_payload { + short version; +}; + +enum { + QMFLAGS_IS_BLOCKING = BIT(0), + QMFLAGS_NO_MUTEX_LOCK = BIT(1), + QMFLAGS_NO_MUTEX_UNLOCK = BIT(2) +}; + +enum { + VCHIQ_POLL_TERMINATE, + VCHIQ_POLL_REMOVE, + VCHIQ_POLL_TXNOTIFY, + VCHIQ_POLL_RXNOTIFY, + VCHIQ_POLL_COUNT +}; + +/* we require this for consistency between endpoints */ +static_assert(sizeof(struct vchiq_header) == 8); +static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN); + +static inline void check_sizes(void) +{ + BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE); + BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS); + BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE); + BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header)); + BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS); + BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS); + BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES); +} + +static unsigned int handle_seq; + +static const char *const srvstate_names[] = { + "FREE", + "HIDDEN", + "LISTENING", + "OPENING", + "OPEN", + "OPENSYNC", + "CLOSESENT", + "CLOSERECVD", + "CLOSEWAIT", + "CLOSED" +}; + +static const char *const reason_names[] = { + "SERVICE_OPENED", + "SERVICE_CLOSED", + "MESSAGE_AVAILABLE", + "BULK_TRANSMIT_DONE", + "BULK_RECEIVE_DONE", + "BULK_TRANSMIT_ABORTED", + "BULK_RECEIVE_ABORTED" +}; + +static const char *const conn_state_names[] = { + "DISCONNECTED", + "CONNECTING", + "CONNECTED", + "PAUSING", + "PAUSE_SENT", + "PAUSED", + "RESUMING", + "PAUSE_TIMEOUT", + "RESUME_TIMEOUT" +}; + +static void +release_message_sync(struct vchiq_state *state, struct vchiq_header *header); + +static const char *msg_type_str(unsigned int msg_type) +{ + switch (msg_type) { + case VCHIQ_MSG_PADDING: return "PADDING"; + case VCHIQ_MSG_CONNECT: return "CONNECT"; + case VCHIQ_MSG_OPEN: return "OPEN"; + case VCHIQ_MSG_OPENACK: return "OPENACK"; + case VCHIQ_MSG_CLOSE: return "CLOSE"; + case VCHIQ_MSG_DATA: return "DATA"; + case VCHIQ_MSG_BULK_RX: return "BULK_RX"; + case VCHIQ_MSG_BULK_TX: return "BULK_TX"; + case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE"; + case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE"; + case VCHIQ_MSG_PAUSE: return "PAUSE"; + case VCHIQ_MSG_RESUME: return "RESUME"; + case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE"; + case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE"; + case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE"; + } + return "???"; +} + +static inline void +set_service_state(struct vchiq_service *service, int newstate) +{ + dev_dbg(service->state->dev, "core: %d: srv:%d %s->%s\n", + service->state->id, service->localport, + srvstate_names[service->srvstate], + srvstate_names[newstate]); + service->srvstate = newstate; +} + +struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle) +{ + int idx = handle & (VCHIQ_MAX_SERVICES - 1); + + return rcu_dereference(instance->state->services[idx]); +} + +struct vchiq_service * +find_service_by_handle(struct vchiq_instance *instance, unsigned int handle) +{ + struct vchiq_service *service; + + rcu_read_lock(); + service = handle_to_service(instance, handle); + if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && + service->handle == handle && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + rcu_read_unlock(); + dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle); + return NULL; +} + +struct vchiq_service * +find_service_by_port(struct vchiq_state *state, unsigned int localport) +{ + if (localport <= VCHIQ_PORT_MAX) { + struct vchiq_service *service; + + rcu_read_lock(); + service = rcu_dereference(state->services[localport]); + if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + rcu_read_unlock(); + } + dev_dbg(state->dev, "core: Invalid port %u\n", localport); + return NULL; +} + +struct vchiq_service * +find_service_for_instance(struct vchiq_instance *instance, unsigned int handle) +{ + struct vchiq_service *service; + + rcu_read_lock(); + service = handle_to_service(instance, handle); + if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && + service->handle == handle && + service->instance == instance && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + rcu_read_unlock(); + dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle); + return NULL; +} + +struct vchiq_service * +find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle) +{ + struct vchiq_service *service; + + rcu_read_lock(); + service = handle_to_service(instance, handle); + if (service && + (service->srvstate == VCHIQ_SRVSTATE_FREE || + service->srvstate == VCHIQ_SRVSTATE_CLOSED) && + service->handle == handle && + service->instance == instance && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + rcu_read_unlock(); + dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle); + return service; +} + +struct vchiq_service * +__next_service_by_instance(struct vchiq_state *state, + struct vchiq_instance *instance, + int *pidx) +{ + struct vchiq_service *service = NULL; + int idx = *pidx; + + while (idx < state->unused_service) { + struct vchiq_service *srv; + + srv = rcu_dereference(state->services[idx]); + idx++; + if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE && + srv->instance == instance) { + service = srv; + break; + } + } + + *pidx = idx; + return service; +} + +struct vchiq_service * +next_service_by_instance(struct vchiq_state *state, + struct vchiq_instance *instance, + int *pidx) +{ + struct vchiq_service *service; + + rcu_read_lock(); + while (1) { + service = __next_service_by_instance(state, instance, pidx); + if (!service) + break; + if (kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + break; + } + } + rcu_read_unlock(); + return service; +} + +void +vchiq_service_get(struct vchiq_service *service) +{ + if (!service) { + WARN(1, "%s service is NULL\n", __func__); + return; + } + kref_get(&service->ref_count); +} + +static void service_release(struct kref *kref) +{ + struct vchiq_service *service = + container_of(kref, struct vchiq_service, ref_count); + struct vchiq_state *state = service->state; + + WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE); + rcu_assign_pointer(state->services[service->localport], NULL); + if (service->userdata_term) + service->userdata_term(service->base.userdata); + kfree_rcu(service, rcu); +} + +void +vchiq_service_put(struct vchiq_service *service) +{ + if (!service) { + WARN(1, "%s: service is NULL\n", __func__); + return; + } + kref_put(&service->ref_count, service_release); +} + +int +vchiq_get_client_id(struct vchiq_instance *instance, unsigned int handle) +{ + struct vchiq_service *service; + int id; + + rcu_read_lock(); + service = handle_to_service(instance, handle); + id = service ? service->client_id : 0; + rcu_read_unlock(); + return id; +} + +void * +vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int handle) +{ + void *userdata; + struct vchiq_service *service; + + rcu_read_lock(); + service = handle_to_service(instance, handle); + userdata = service ? service->base.userdata : NULL; + rcu_read_unlock(); + return userdata; +} +EXPORT_SYMBOL(vchiq_get_service_userdata); + +static void +mark_service_closing_internal(struct vchiq_service *service, int sh_thread) +{ + struct vchiq_state *state = service->state; + struct vchiq_service_quota *quota; + + service->closing = 1; + + /* Synchronise with other threads. */ + mutex_lock(&state->recycle_mutex); + mutex_unlock(&state->recycle_mutex); + if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) { + /* + * If we're pausing then the slot_mutex is held until resume + * by the slot handler. Therefore don't try to acquire this + * mutex if we're the slot handler and in the pause sent state. + * We don't need to in this case anyway. + */ + mutex_lock(&state->slot_mutex); + mutex_unlock(&state->slot_mutex); + } + + /* Unblock any sending thread. */ + quota = &state->service_quotas[service->localport]; + complete("a->quota_event); +} + +static void +mark_service_closing(struct vchiq_service *service) +{ + mark_service_closing_internal(service, 0); +} + +static inline int +make_service_callback(struct vchiq_service *service, enum vchiq_reason reason, + struct vchiq_header *header, struct vchiq_bulk *bulk) +{ + void *cb_data = NULL; + void __user *cb_userdata = NULL; + int status; + + /* + * If a bulk transfer is in progress, pass bulk->cb_*data to the + * callback function. + */ + if (bulk) { + cb_data = bulk->cb_data; + cb_userdata = bulk->cb_userdata; + } + + dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %p, %p %p)\n", + service->state->id, service->localport, reason_names[reason], + header, cb_data, cb_userdata); + status = service->base.callback(service->instance, reason, header, service->handle, + cb_data, cb_userdata); + if (status && (status != -EAGAIN)) { + dev_warn(service->state->dev, + "core: %d: ignoring ERROR from callback to service %x\n", + service->state->id, service->handle); + status = 0; + } + + if (reason != VCHIQ_MESSAGE_AVAILABLE) + vchiq_release_message(service->instance, service->handle, header); + + return status; +} + +inline void +vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate) +{ + enum vchiq_connstate oldstate = state->conn_state; + + dev_dbg(state->dev, "core: %d: %s->%s\n", + state->id, conn_state_names[oldstate], conn_state_names[newstate]); + state->conn_state = newstate; + vchiq_platform_conn_state_changed(state, oldstate, newstate); +} + +/* This initialises a single remote_event, and the associated wait_queue. */ +static inline void +remote_event_create(wait_queue_head_t *wq, struct remote_event *event) +{ + event->armed = 0; + /* + * Don't clear the 'fired' flag because it may already have been set + * by the other side. + */ + init_waitqueue_head(wq); +} + +/* + * All the event waiting routines in VCHIQ used a custom semaphore + * implementation that filtered most signals. This achieved a behaviour similar + * to the "killable" family of functions. While cleaning up this code all the + * routines where switched to the "interruptible" family of functions, as the + * former was deemed unjustified and the use "killable" set all VCHIQ's + * threads in D state. + * + * Returns: 0 on success, a negative error code on failure + */ +static inline int +remote_event_wait(wait_queue_head_t *wq, struct remote_event *event) +{ + int ret = 0; + + if (!event->fired) { + event->armed = 1; + dsb(sy); + ret = wait_event_interruptible(*wq, event->fired); + if (ret) { + event->armed = 0; + return ret; + } + event->armed = 0; + /* Ensure that the peer sees that we are not waiting (armed == 0). */ + wmb(); + } + + event->fired = 0; + return ret; +} + +static void +remote_event_signal(struct vchiq_state *state, struct remote_event *event) +{ + struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(state->dev); + + /* + * Ensure that all writes to shared data structures have completed + * before signalling the peer. + */ + wmb(); + + event->fired = 1; + + dsb(sy); /* data barrier operation */ + + if (event->armed) + writel(0, mgmt->regs + BELL2); /* trigger vc interrupt */ +} + +/* + * Acknowledge that the event has been signalled, and wake any waiters. Usually + * called as a result of the doorbell being rung. + */ +static inline void +remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event) +{ + event->fired = 1; + event->armed = 0; + wake_up_all(wq); +} + +/* Check if a single event has been signalled, waking the waiters if it has. */ +static inline void +remote_event_poll(wait_queue_head_t *wq, struct remote_event *event) +{ + if (event->fired && event->armed) + remote_event_signal_local(wq, event); +} + +/* + * VCHIQ used a small, fixed number of remote events. It is simplest to + * enumerate them here for polling. + */ +void +remote_event_pollall(struct vchiq_state *state) +{ + remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger); + remote_event_poll(&state->sync_release_event, &state->local->sync_release); + remote_event_poll(&state->trigger_event, &state->local->trigger); + remote_event_poll(&state->recycle_event, &state->local->recycle); +} + +/* + * Round up message sizes so that any space at the end of a slot is always big + * enough for a header. This relies on header size being a power of two, which + * has been verified earlier by a static assertion. + */ + +static inline size_t +calc_stride(size_t size) +{ + /* Allow room for the header */ + size += sizeof(struct vchiq_header); + + /* Round up */ + return (size + sizeof(struct vchiq_header) - 1) & + ~(sizeof(struct vchiq_header) - 1); +} + +/* Called by the slot handler thread */ +static struct vchiq_service * +get_listening_service(struct vchiq_state *state, int fourcc) +{ + int i; + + WARN_ON(fourcc == VCHIQ_FOURCC_INVALID); + + rcu_read_lock(); + for (i = 0; i < state->unused_service; i++) { + struct vchiq_service *service; + + service = rcu_dereference(state->services[i]); + if (service && + service->public_fourcc == fourcc && + (service->srvstate == VCHIQ_SRVSTATE_LISTENING || + (service->srvstate == VCHIQ_SRVSTATE_OPEN && + service->remoteport == VCHIQ_PORT_FREE)) && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + } + rcu_read_unlock(); + return NULL; +} + +/* Called by the slot handler thread */ +static struct vchiq_service * +get_connected_service(struct vchiq_state *state, unsigned int port) +{ + int i; + + rcu_read_lock(); + for (i = 0; i < state->unused_service; i++) { + struct vchiq_service *service = + rcu_dereference(state->services[i]); + + if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN && + service->remoteport == port && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + } + rcu_read_unlock(); + return NULL; +} + +inline void +request_poll(struct vchiq_state *state, struct vchiq_service *service, + int poll_type) +{ + u32 value; + int index; + + if (!service) + goto skip_service; + + do { + value = atomic_read(&service->poll_flags); + } while (atomic_cmpxchg(&service->poll_flags, value, + value | BIT(poll_type)) != value); + + index = BITSET_WORD(service->localport); + do { + value = atomic_read(&state->poll_services[index]); + } while (atomic_cmpxchg(&state->poll_services[index], + value, value | BIT(service->localport & 0x1f)) != value); + +skip_service: + state->poll_needed = 1; + /* Ensure the slot handler thread sees the poll_needed flag. */ + wmb(); + + /* ... and ensure the slot handler runs. */ + remote_event_signal_local(&state->trigger_event, &state->local->trigger); +} + +/* + * Called from queue_message, by the slot handler and application threads, + * with slot_mutex held + */ +static struct vchiq_header * +reserve_space(struct vchiq_state *state, size_t space, int is_blocking) +{ + struct vchiq_shared_state *local = state->local; + int tx_pos = state->local_tx_pos; + int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK); + + if (space > slot_space) { + struct vchiq_header *header; + /* Fill the remaining space with padding */ + WARN_ON(!state->tx_data); + header = (struct vchiq_header *) + (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK)); + header->msgid = VCHIQ_MSGID_PADDING; + header->size = slot_space - sizeof(struct vchiq_header); + + tx_pos += slot_space; + } + + /* If necessary, get the next slot. */ + if ((tx_pos & VCHIQ_SLOT_MASK) == 0) { + int slot_index; + + /* If there is no free slot... */ + + if (!try_wait_for_completion(&state->slot_available_event)) { + /* ...wait for one. */ + + VCHIQ_STATS_INC(state, slot_stalls); + + /* But first, flush through the last slot. */ + state->local_tx_pos = tx_pos; + local->tx_pos = tx_pos; + remote_event_signal(state, &state->remote->trigger); + + if (!is_blocking || + (wait_for_completion_interruptible(&state->slot_available_event))) + return NULL; /* No space available */ + } + + if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) { + complete(&state->slot_available_event); + dev_warn(state->dev, "%s: invalid tx_pos: %d\n", + __func__, tx_pos); + return NULL; + } + + slot_index = local->slot_queue[SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)]; + state->tx_data = + (char *)SLOT_DATA_FROM_INDEX(state, slot_index); + } + + state->local_tx_pos = tx_pos + space; + + return (struct vchiq_header *)(state->tx_data + + (tx_pos & VCHIQ_SLOT_MASK)); +} + +static void +process_free_data_message(struct vchiq_state *state, u32 *service_found, + struct vchiq_header *header) +{ + int msgid = header->msgid; + int port = VCHIQ_MSG_SRCPORT(msgid); + struct vchiq_service_quota *quota = &state->service_quotas[port]; + int count; + + spin_lock(&state->quota_spinlock); + count = quota->message_use_count; + if (count > 0) + quota->message_use_count = count - 1; + spin_unlock(&state->quota_spinlock); + + if (count == quota->message_quota) { + /* + * Signal the service that it + * has dropped below its quota + */ + complete("a->quota_event); + } else if (count == 0) { + dev_err(state->dev, + "core: service %d message_use_count=%d (header %p, msgid %x, header->msgid %x, header->size %x)\n", + port, quota->message_use_count, header, msgid, + header->msgid, header->size); + WARN(1, "invalid message use count\n"); + } + if (!BITSET_IS_SET(service_found, port)) { + /* Set the found bit for this service */ + BITSET_SET(service_found, port); + + spin_lock(&state->quota_spinlock); + count = quota->slot_use_count; + if (count > 0) + quota->slot_use_count = count - 1; + spin_unlock(&state->quota_spinlock); + + if (count > 0) { + /* + * Signal the service in case + * it has dropped below its quota + */ + complete("a->quota_event); + dev_dbg(state->dev, "core: %d: pfq:%d %x@%p - slot_use->%d\n", + state->id, port, header->size, header, count - 1); + } else { + dev_err(state->dev, + "core: service %d slot_use_count=%d (header %p, msgid %x, header->msgid %x, header->size %x)\n", + port, count, header, msgid, header->msgid, header->size); + WARN(1, "bad slot use count\n"); + } + } +} + +/* Called by the recycle thread. */ +static void +process_free_queue(struct vchiq_state *state, u32 *service_found, + size_t length) +{ + struct vchiq_shared_state *local = state->local; + int slot_queue_available; + + /* + * Find slots which have been freed by the other side, and return them + * to the available queue. + */ + slot_queue_available = state->slot_queue_available; + + /* + * Use a memory barrier to ensure that any state that may have been + * modified by another thread is not masked by stale prefetched + * values. + */ + mb(); + + while (slot_queue_available != local->slot_queue_recycle) { + unsigned int pos; + int slot_index = local->slot_queue[slot_queue_available & + VCHIQ_SLOT_QUEUE_MASK]; + char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index); + int data_found = 0; + + slot_queue_available++; + /* + * Beware of the address dependency - data is calculated + * using an index written by the other side. + */ + rmb(); + + dev_dbg(state->dev, "core: %d: pfq %d=%p %x %x\n", + state->id, slot_index, data, local->slot_queue_recycle, + slot_queue_available); + + /* Initialise the bitmask for services which have used this slot */ + memset(service_found, 0, length); + + pos = 0; + + while (pos < VCHIQ_SLOT_SIZE) { + struct vchiq_header *header = + (struct vchiq_header *)(data + pos); + int msgid = header->msgid; + + if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) { + process_free_data_message(state, service_found, + header); + data_found = 1; + } + + pos += calc_stride(header->size); + if (pos > VCHIQ_SLOT_SIZE) { + dev_err(state->dev, + "core: pfq - pos %x: header %p, msgid %x, header->msgid %x, header->size %x\n", + pos, header, msgid, header->msgid, header->size); + WARN(1, "invalid slot position\n"); + } + } + + if (data_found) { + int count; + + spin_lock(&state->quota_spinlock); + count = state->data_use_count; + if (count > 0) + state->data_use_count = count - 1; + spin_unlock(&state->quota_spinlock); + if (count == state->data_quota) + complete(&state->data_quota_event); + } + + /* + * Don't allow the slot to be reused until we are no + * longer interested in it. + */ + mb(); + + state->slot_queue_available = slot_queue_available; + complete(&state->slot_available_event); + } +} + +static ssize_t +memcpy_copy_callback(void *context, void *dest, size_t offset, size_t maxsize) +{ + memcpy(dest + offset, context + offset, maxsize); + return maxsize; +} + +static ssize_t +copy_message_data(ssize_t (*copy_callback)(void *context, void *dest, size_t offset, + size_t maxsize), + void *context, + void *dest, + size_t size) +{ + size_t pos = 0; + + while (pos < size) { + ssize_t callback_result; + size_t max_bytes = size - pos; + + callback_result = copy_callback(context, dest + pos, pos, + max_bytes); + + if (callback_result < 0) + return callback_result; + + if (!callback_result) + return -EIO; + + if (callback_result > max_bytes) + return -EIO; + + pos += callback_result; + } + + return size; +} + +/* Called by the slot handler and application threads */ +static int +queue_message(struct vchiq_state *state, struct vchiq_service *service, + int msgid, + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, size_t size, int flags) +{ + struct vchiq_shared_state *local; + struct vchiq_service_quota *quota = NULL; + struct vchiq_header *header; + int type = VCHIQ_MSG_TYPE(msgid); + int svc_fourcc; + + size_t stride; + + local = state->local; + + stride = calc_stride(size); + + WARN_ON(stride > VCHIQ_SLOT_SIZE); + + if (!(flags & QMFLAGS_NO_MUTEX_LOCK) && + mutex_lock_killable(&state->slot_mutex)) + return -EINTR; + + if (type == VCHIQ_MSG_DATA) { + int tx_end_index; + + if (!service) { + WARN(1, "%s: service is NULL\n", __func__); + mutex_unlock(&state->slot_mutex); + return -EINVAL; + } + + WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK | + QMFLAGS_NO_MUTEX_UNLOCK)); + + if (service->closing) { + /* The service has been closed */ + mutex_unlock(&state->slot_mutex); + return -EHOSTDOWN; + } + + quota = &state->service_quotas[service->localport]; + + spin_lock(&state->quota_spinlock); + + /* + * Ensure this service doesn't use more than its quota of + * messages or slots + */ + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1); + + /* + * Ensure data messages don't use more than their quota of + * slots + */ + while ((tx_end_index != state->previous_data_index) && + (state->data_use_count == state->data_quota)) { + VCHIQ_STATS_INC(state, data_stalls); + spin_unlock(&state->quota_spinlock); + mutex_unlock(&state->slot_mutex); + + if (wait_for_completion_killable(&state->data_quota_event)) + return -EINTR; + + mutex_lock(&state->slot_mutex); + spin_lock(&state->quota_spinlock); + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1); + if ((tx_end_index == state->previous_data_index) || + (state->data_use_count < state->data_quota)) { + /* Pass the signal on to other waiters */ + complete(&state->data_quota_event); + break; + } + } + + while ((quota->message_use_count == quota->message_quota) || + ((tx_end_index != quota->previous_tx_index) && + (quota->slot_use_count == quota->slot_quota))) { + spin_unlock(&state->quota_spinlock); + dev_dbg(state->dev, + "core: %d: qm:%d %s,%zx - quota stall (msg %d, slot %d)\n", + state->id, service->localport, msg_type_str(type), size, + quota->message_use_count, quota->slot_use_count); + VCHIQ_SERVICE_STATS_INC(service, quota_stalls); + mutex_unlock(&state->slot_mutex); + if (wait_for_completion_killable("a->quota_event)) + return -EINTR; + if (service->closing) + return -EHOSTDOWN; + if (mutex_lock_killable(&state->slot_mutex)) + return -EINTR; + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) { + /* The service has been closed */ + mutex_unlock(&state->slot_mutex); + return -EHOSTDOWN; + } + spin_lock(&state->quota_spinlock); + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1); + } + + spin_unlock(&state->quota_spinlock); + } + + header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING); + + if (!header) { + if (service) + VCHIQ_SERVICE_STATS_INC(service, slot_stalls); + /* + * In the event of a failure, return the mutex to the + * state it was in + */ + if (!(flags & QMFLAGS_NO_MUTEX_LOCK)) + mutex_unlock(&state->slot_mutex); + return -EAGAIN; + } + + if (type == VCHIQ_MSG_DATA) { + ssize_t callback_result; + int tx_end_index; + int slot_use_count; + + dev_dbg(state->dev, "core: %d: qm %s@%p,%zx (%d->%d)\n", + state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size, + VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid)); + + WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK | + QMFLAGS_NO_MUTEX_UNLOCK)); + + callback_result = + copy_message_data(copy_callback, context, + header->data, size); + + if (callback_result < 0) { + mutex_unlock(&state->slot_mutex); + VCHIQ_SERVICE_STATS_INC(service, error_count); + return -EINVAL; + } + + vchiq_log_dump_mem(state->dev, "Sent", 0, + header->data, + min_t(size_t, 16, callback_result)); + + spin_lock(&state->quota_spinlock); + quota->message_use_count++; + + tx_end_index = + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1); + + /* + * If this transmission can't fit in the last slot used by any + * service, the data_use_count must be increased. + */ + if (tx_end_index != state->previous_data_index) { + state->previous_data_index = tx_end_index; + state->data_use_count++; + } + + /* + * If this isn't the same slot last used by this service, + * the service's slot_use_count must be increased. + */ + if (tx_end_index != quota->previous_tx_index) { + quota->previous_tx_index = tx_end_index; + slot_use_count = ++quota->slot_use_count; + } else { + slot_use_count = 0; + } + + spin_unlock(&state->quota_spinlock); + + if (slot_use_count) + dev_dbg(state->dev, "core: %d: qm:%d %s,%zx - slot_use->%d (hdr %p)\n", + state->id, service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)), + size, slot_use_count, header); + + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count); + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size); + } else { + dev_dbg(state->dev, "core: %d: qm %s@%p,%zx (%d->%d)\n", + state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size, + VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid)); + if (size != 0) { + /* + * It is assumed for now that this code path + * only happens from calls inside this file. + * + * External callers are through the vchiq_queue_message + * path which always sets the type to be VCHIQ_MSG_DATA + * + * At first glance this appears to be correct but + * more review is needed. + */ + copy_message_data(copy_callback, context, + header->data, size); + } + VCHIQ_STATS_INC(state, ctrl_tx_count); + } + + header->msgid = msgid; + header->size = size; + + svc_fourcc = service ? service->base.fourcc + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); + + dev_dbg(state->dev, "core_msg: Sent Msg %s(%u) to %p4cc s:%u d:%d len:%zu\n", + msg_type_str(VCHIQ_MSG_TYPE(msgid)), + VCHIQ_MSG_TYPE(msgid), &svc_fourcc, + VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid), size); + + /* Make sure the new header is visible to the peer. */ + wmb(); + + /* Make the new tx_pos visible to the peer. */ + local->tx_pos = state->local_tx_pos; + wmb(); + + if (service && (type == VCHIQ_MSG_CLOSE)) + set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT); + + if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK)) + mutex_unlock(&state->slot_mutex); + + remote_event_signal(state, &state->remote->trigger); + + return 0; +} + +/* Called by the slot handler and application threads */ +static int +queue_message_sync(struct vchiq_state *state, struct vchiq_service *service, + int msgid, + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, int size) +{ + struct vchiq_shared_state *local; + struct vchiq_header *header; + ssize_t callback_result; + int svc_fourcc; + int ret; + + local = state->local; + + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME && + mutex_lock_killable(&state->sync_mutex)) + return -EAGAIN; + + ret = remote_event_wait(&state->sync_release_event, &local->sync_release); + if (ret) + return ret; + + /* Ensure that reads don't overtake the remote_event_wait. */ + rmb(); + + header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, + local->slot_sync); + + { + int oldmsgid = header->msgid; + + if (oldmsgid != VCHIQ_MSGID_PADDING) + dev_err(state->dev, "core: %d: qms - msgid %x, not PADDING\n", + state->id, oldmsgid); + } + + dev_dbg(state->dev, "sync: %d: qms %s@%p,%x (%d->%d)\n", + state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size, + VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid)); + + callback_result = copy_message_data(copy_callback, context, + header->data, size); + + if (callback_result < 0) { + mutex_unlock(&state->slot_mutex); + VCHIQ_SERVICE_STATS_INC(service, error_count); + return -EINVAL; + } + + if (service) { + vchiq_log_dump_mem(state->dev, "Sent", 0, + header->data, + min_t(size_t, 16, callback_result)); + + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count); + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size); + } else { + VCHIQ_STATS_INC(state, ctrl_tx_count); + } + + header->size = size; + header->msgid = msgid; + + svc_fourcc = service ? service->base.fourcc + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); + + dev_dbg(state->dev, + "sync: Sent Sync Msg %s(%u) to %p4cc s:%u d:%d len:%d\n", + msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid), + &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid), + VCHIQ_MSG_DSTPORT(msgid), size); + + remote_event_signal(state, &state->remote->sync_trigger); + + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE) + mutex_unlock(&state->sync_mutex); + + return 0; +} + +static inline void +claim_slot(struct vchiq_slot_info *slot) +{ + slot->use_count++; +} + +static void +release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info, + struct vchiq_header *header, struct vchiq_service *service) +{ + mutex_lock(&state->recycle_mutex); + + if (header) { + int msgid = header->msgid; + + if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || (service && service->closing)) { + mutex_unlock(&state->recycle_mutex); + return; + } + + /* Rewrite the message header to prevent a double release */ + header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED; + } + + slot_info->release_count++; + + if (slot_info->release_count == slot_info->use_count) { + int slot_queue_recycle; + /* Add to the freed queue */ + + /* + * A read barrier is necessary here to prevent speculative + * fetches of remote->slot_queue_recycle from overtaking the + * mutex. + */ + rmb(); + + slot_queue_recycle = state->remote->slot_queue_recycle; + state->remote->slot_queue[slot_queue_recycle & + VCHIQ_SLOT_QUEUE_MASK] = + SLOT_INDEX_FROM_INFO(state, slot_info); + state->remote->slot_queue_recycle = slot_queue_recycle + 1; + dev_dbg(state->dev, "core: %d: %d - recycle->%x\n", + state->id, SLOT_INDEX_FROM_INFO(state, slot_info), + state->remote->slot_queue_recycle); + + /* + * A write barrier is necessary, but remote_event_signal + * contains one. + */ + remote_event_signal(state, &state->remote->recycle); + } + + mutex_unlock(&state->recycle_mutex); +} + +static inline enum vchiq_reason +get_bulk_reason(struct vchiq_bulk *bulk) +{ + if (bulk->dir == VCHIQ_BULK_TRANSMIT) { + if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED) + return VCHIQ_BULK_TRANSMIT_ABORTED; + + return VCHIQ_BULK_TRANSMIT_DONE; + } + + if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED) + return VCHIQ_BULK_RECEIVE_ABORTED; + + return VCHIQ_BULK_RECEIVE_DONE; +} + +static int service_notify_bulk(struct vchiq_service *service, + struct vchiq_bulk *bulk) +{ + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) { + if (bulk->dir == VCHIQ_BULK_TRANSMIT) { + VCHIQ_SERVICE_STATS_INC(service, bulk_tx_count); + VCHIQ_SERVICE_STATS_ADD(service, bulk_tx_bytes, + bulk->actual); + } else { + VCHIQ_SERVICE_STATS_INC(service, bulk_rx_count); + VCHIQ_SERVICE_STATS_ADD(service, bulk_rx_bytes, + bulk->actual); + } + } else { + VCHIQ_SERVICE_STATS_INC(service, bulk_aborted_count); + } + + if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) { + struct bulk_waiter *waiter; + + spin_lock(&service->state->bulk_waiter_spinlock); + waiter = bulk->waiter; + if (waiter) { + waiter->actual = bulk->actual; + complete(&waiter->event); + } + spin_unlock(&service->state->bulk_waiter_spinlock); + } else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) { + enum vchiq_reason reason = get_bulk_reason(bulk); + + return make_service_callback(service, reason, NULL, bulk); + } + + return 0; +} + +/* Called by the slot handler - don't hold the bulk mutex */ +static int +notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue, + int retry_poll) +{ + int status = 0; + + dev_dbg(service->state->dev, + "core: %d: nb:%d %cx - p=%x rn=%x r=%x\n", + service->state->id, service->localport, + (queue == &service->bulk_tx) ? 't' : 'r', + queue->process, queue->remote_notify, queue->remove); + + queue->remote_notify = queue->process; + + while (queue->remove != queue->remote_notify) { + struct vchiq_bulk *bulk = + &queue->bulks[BULK_INDEX(queue->remove)]; + + /* + * Only generate callbacks for non-dummy bulk + * requests, and non-terminated services + */ + if (bulk->dma_addr && service->instance) { + status = service_notify_bulk(service, bulk); + if (status == -EAGAIN) + break; + } + + queue->remove++; + complete(&service->bulk_remove_event); + } + if (!retry_poll) + status = 0; + + if (status == -EAGAIN) + request_poll(service->state, service, (queue == &service->bulk_tx) ? + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY); + + return status; +} + +static void +poll_services_of_group(struct vchiq_state *state, int group) +{ + u32 flags = atomic_xchg(&state->poll_services[group], 0); + int i; + + for (i = 0; flags; i++) { + struct vchiq_service *service; + u32 service_flags; + + if ((flags & BIT(i)) == 0) + continue; + + service = find_service_by_port(state, (group << 5) + i); + flags &= ~BIT(i); + + if (!service) + continue; + + service_flags = atomic_xchg(&service->poll_flags, 0); + if (service_flags & BIT(VCHIQ_POLL_REMOVE)) { + dev_dbg(state->dev, "core: %d: ps - remove %d<->%d\n", + state->id, service->localport, service->remoteport); + + /* + * Make it look like a client, because + * it must be removed and not left in + * the LISTENING state. + */ + service->public_fourcc = VCHIQ_FOURCC_INVALID; + + if (vchiq_close_service_internal(service, NO_CLOSE_RECVD)) + request_poll(state, service, VCHIQ_POLL_REMOVE); + } else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) { + dev_dbg(state->dev, "core: %d: ps - terminate %d<->%d\n", + state->id, service->localport, service->remoteport); + if (vchiq_close_service_internal(service, NO_CLOSE_RECVD)) + request_poll(state, service, VCHIQ_POLL_TERMINATE); + } + if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY)) + notify_bulks(service, &service->bulk_tx, RETRY_POLL); + if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY)) + notify_bulks(service, &service->bulk_rx, RETRY_POLL); + vchiq_service_put(service); + } +} + +/* Called by the slot handler thread */ +static void +poll_services(struct vchiq_state *state) +{ + int group; + + for (group = 0; group < BITSET_SIZE(state->unused_service); group++) + poll_services_of_group(state, group); +} + +static void +cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo) +{ + if (pagelistinfo->scatterlist_mapped) { + dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist, + pagelistinfo->num_pages, pagelistinfo->dma_dir); + } + + if (pagelistinfo->pages_need_release) + unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages); + + dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size, + pagelistinfo->pagelist, pagelistinfo->dma_addr); +} + +static inline bool +is_adjacent_block(u32 *addrs, dma_addr_t addr, unsigned int k) +{ + u32 tmp; + + if (!k) + return false; + + tmp = (addrs[k - 1] & PAGE_MASK) + + (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT); + + return tmp == (addr & PAGE_MASK); +} + +/* There is a potential problem with partial cache lines (pages?) + * at the ends of the block when reading. If the CPU accessed anything in + * the same line (page?) then it may have pulled old data into the cache, + * obscuring the new data underneath. We can solve this by transferring the + * partial cache lines separately, and allowing the ARM to copy into the + * cached area. + */ +static struct vchiq_pagelist_info * +create_pagelist(struct vchiq_instance *instance, struct vchiq_bulk *bulk) +{ + struct vchiq_drv_mgmt *drv_mgmt; + struct pagelist *pagelist; + struct vchiq_pagelist_info *pagelistinfo; + struct page **pages; + u32 *addrs; + unsigned int num_pages, offset, i, k; + int actual_pages; + size_t pagelist_size; + struct scatterlist *scatterlist, *sg; + int dma_buffers; + unsigned int cache_line_size; + dma_addr_t dma_addr; + size_t count = bulk->size; + unsigned short type = (bulk->dir == VCHIQ_BULK_RECEIVE) + ? PAGELIST_READ : PAGELIST_WRITE; + + if (count >= INT_MAX - PAGE_SIZE) + return NULL; + + drv_mgmt = dev_get_drvdata(instance->state->dev); + + if (bulk->offset) + offset = (uintptr_t)bulk->offset & (PAGE_SIZE - 1); + else + offset = (uintptr_t)bulk->uoffset & (PAGE_SIZE - 1); + num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE); + + if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) - + sizeof(struct vchiq_pagelist_info)) / + (sizeof(u32) + sizeof(pages[0]) + + sizeof(struct scatterlist))) + return NULL; + + pagelist_size = sizeof(struct pagelist) + + (num_pages * sizeof(u32)) + + (num_pages * sizeof(pages[0]) + + (num_pages * sizeof(struct scatterlist))) + + sizeof(struct vchiq_pagelist_info); + + /* Allocate enough storage to hold the page pointers and the page + * list + */ + pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr, + GFP_KERNEL); + + dev_dbg(instance->state->dev, "arm: %p\n", pagelist); + + if (!pagelist) + return NULL; + + addrs = pagelist->addrs; + pages = (struct page **)(addrs + num_pages); + scatterlist = (struct scatterlist *)(pages + num_pages); + pagelistinfo = (struct vchiq_pagelist_info *) + (scatterlist + num_pages); + + pagelist->length = count; + pagelist->type = type; + pagelist->offset = offset; + + /* Populate the fields of the pagelistinfo structure */ + pagelistinfo->pagelist = pagelist; + pagelistinfo->pagelist_buffer_size = pagelist_size; + pagelistinfo->dma_addr = dma_addr; + pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE; + pagelistinfo->num_pages = num_pages; + pagelistinfo->pages_need_release = 0; + pagelistinfo->pages = pages; + pagelistinfo->scatterlist = scatterlist; + pagelistinfo->scatterlist_mapped = 0; + + if (bulk->offset) { + unsigned long length = count; + unsigned int off = offset; + + for (actual_pages = 0; actual_pages < num_pages; + actual_pages++) { + struct page *pg = + vmalloc_to_page(((unsigned int *)bulk->offset + + (actual_pages * PAGE_SIZE))); + size_t bytes = PAGE_SIZE - off; + + if (!pg) { + cleanup_pagelistinfo(instance, pagelistinfo); + return NULL; + } + + if (bytes > length) + bytes = length; + pages[actual_pages] = pg; + length -= bytes; + off = 0; + } + /* do not try and release vmalloc pages */ + } else { + actual_pages = + pin_user_pages_fast((unsigned long)bulk->uoffset & PAGE_MASK, num_pages, + type == PAGELIST_READ, pages); + + if (actual_pages != num_pages) { + dev_dbg(instance->state->dev, "arm: Only %d/%d pages locked\n", + actual_pages, num_pages); + + /* This is probably due to the process being killed */ + if (actual_pages > 0) + unpin_user_pages(pages, actual_pages); + cleanup_pagelistinfo(instance, pagelistinfo); + return NULL; + } + /* release user pages */ + pagelistinfo->pages_need_release = 1; + } + + /* + * Initialize the scatterlist so that the magic cookie + * is filled if debugging is enabled + */ + sg_init_table(scatterlist, num_pages); + /* Now set the pages for each scatterlist */ + for (i = 0; i < num_pages; i++) { + unsigned int len = PAGE_SIZE - offset; + + if (len > count) + len = count; + sg_set_page(scatterlist + i, pages[i], len, offset); + offset = 0; + count -= len; + } + + dma_buffers = dma_map_sg(instance->state->dev, + scatterlist, + num_pages, + pagelistinfo->dma_dir); + + if (dma_buffers == 0) { + cleanup_pagelistinfo(instance, pagelistinfo); + return NULL; + } + + pagelistinfo->scatterlist_mapped = 1; + + /* Combine adjacent blocks for performance */ + k = 0; + for_each_sg(scatterlist, sg, dma_buffers, i) { + unsigned int len = sg_dma_len(sg); + dma_addr_t addr = sg_dma_address(sg); + + /* Note: addrs is the address + page_count - 1 + * The firmware expects blocks after the first to be page- + * aligned and a multiple of the page size + */ + WARN_ON(len == 0); + WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK)); + WARN_ON(i && (addr & ~PAGE_MASK)); + if (is_adjacent_block(addrs, addr, k)) + addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT); + else + addrs[k++] = (addr & PAGE_MASK) | + (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1); + } + + /* Partial cache lines (fragments) require special measures */ + cache_line_size = drv_mgmt->info->cache_line_size; + if ((type == PAGELIST_READ) && + ((pagelist->offset & (cache_line_size - 1)) || + ((pagelist->offset + pagelist->length) & (cache_line_size - 1)))) { + char *fragments; + + if (down_interruptible(&drv_mgmt->free_fragments_sema)) { + cleanup_pagelistinfo(instance, pagelistinfo); + return NULL; + } + + WARN_ON(!drv_mgmt->free_fragments); + + down(&drv_mgmt->free_fragments_mutex); + fragments = drv_mgmt->free_fragments; + WARN_ON(!fragments); + drv_mgmt->free_fragments = *(char **)drv_mgmt->free_fragments; + up(&drv_mgmt->free_fragments_mutex); + pagelist->type = PAGELIST_READ_WITH_FRAGMENTS + + (fragments - drv_mgmt->fragments_base) / drv_mgmt->fragments_size; + } + + return pagelistinfo; +} + +static void +free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo, + int actual) +{ + struct vchiq_drv_mgmt *drv_mgmt; + struct pagelist *pagelist = pagelistinfo->pagelist; + struct page **pages = pagelistinfo->pages; + unsigned int num_pages = pagelistinfo->num_pages; + unsigned int cache_line_size; + + dev_dbg(instance->state->dev, "arm: %p, %d\n", pagelistinfo->pagelist, actual); + + drv_mgmt = dev_get_drvdata(instance->state->dev); + + /* + * NOTE: dma_unmap_sg must be called before the + * cpu can touch any of the data/pages. + */ + dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist, + pagelistinfo->num_pages, pagelistinfo->dma_dir); + pagelistinfo->scatterlist_mapped = 0; + + /* Deal with any partial cache lines (fragments) */ + cache_line_size = drv_mgmt->info->cache_line_size; + if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && drv_mgmt->fragments_base) { + char *fragments = drv_mgmt->fragments_base + + (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) * + drv_mgmt->fragments_size; + int head_bytes, tail_bytes; + + head_bytes = (cache_line_size - pagelist->offset) & + (cache_line_size - 1); + tail_bytes = (pagelist->offset + actual) & + (cache_line_size - 1); + + if ((actual >= 0) && (head_bytes != 0)) { + if (head_bytes > actual) + head_bytes = actual; + + memcpy_to_page(pages[0], pagelist->offset, + fragments, head_bytes); + } + if ((actual >= 0) && (head_bytes < actual) && + (tail_bytes != 0)) + memcpy_to_page(pages[num_pages - 1], + (pagelist->offset + actual) & + (PAGE_SIZE - 1) & ~(cache_line_size - 1), + fragments + cache_line_size, + tail_bytes); + + down(&drv_mgmt->free_fragments_mutex); + *(char **)fragments = drv_mgmt->free_fragments; + drv_mgmt->free_fragments = fragments; + up(&drv_mgmt->free_fragments_mutex); + up(&drv_mgmt->free_fragments_sema); + } + + /* Need to mark all the pages dirty. */ + if (pagelist->type != PAGELIST_WRITE && + pagelistinfo->pages_need_release) { + unsigned int i; + + for (i = 0; i < num_pages; i++) + set_page_dirty(pages[i]); + } + + cleanup_pagelistinfo(instance, pagelistinfo); +} + +static int +vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk) +{ + struct vchiq_pagelist_info *pagelistinfo; + + pagelistinfo = create_pagelist(instance, bulk); + + if (!pagelistinfo) + return -ENOMEM; + + bulk->dma_addr = pagelistinfo->dma_addr; + + /* + * Store the pagelistinfo address in remote_data, + * which isn't used by the slave. + */ + bulk->remote_data = pagelistinfo; + + return 0; +} + +static void +vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk) +{ + if (bulk && bulk->remote_data && bulk->actual) + free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data, + bulk->actual); +} + +/* Called with the bulk_mutex held */ +static void +abort_outstanding_bulks(struct vchiq_service *service, + struct vchiq_bulk_queue *queue) +{ + int is_tx = (queue == &service->bulk_tx); + + dev_dbg(service->state->dev, + "core: %d: aob:%d %cx - li=%x ri=%x p=%x\n", + service->state->id, service->localport, + is_tx ? 't' : 'r', queue->local_insert, + queue->remote_insert, queue->process); + + WARN_ON((int)(queue->local_insert - queue->process) < 0); + WARN_ON((int)(queue->remote_insert - queue->process) < 0); + + while ((queue->process != queue->local_insert) || + (queue->process != queue->remote_insert)) { + struct vchiq_bulk *bulk = &queue->bulks[BULK_INDEX(queue->process)]; + + if (queue->process == queue->remote_insert) { + /* fabricate a matching dummy bulk */ + bulk->remote_data = NULL; + bulk->remote_size = 0; + queue->remote_insert++; + } + + if (queue->process != queue->local_insert) { + vchiq_complete_bulk(service->instance, bulk); + + dev_dbg(service->state->dev, + "core_msg: %s %p4cc d:%d ABORTED - tx len:%d, rx len:%d\n", + is_tx ? "Send Bulk to" : "Recv Bulk from", + &service->base.fourcc, + service->remoteport, bulk->size, bulk->remote_size); + } else { + /* fabricate a matching dummy bulk */ + bulk->dma_addr = 0; + bulk->size = 0; + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED; + bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT : + VCHIQ_BULK_RECEIVE; + queue->local_insert++; + } + + queue->process++; + } +} + +static int +parse_open(struct vchiq_state *state, struct vchiq_header *header) +{ + const struct vchiq_open_payload *payload; + struct vchiq_openack_payload ack_payload; + struct vchiq_service *service = NULL; + int msgid, size; + int openack_id; + unsigned int localport, remoteport, fourcc; + short version, version_min; + + msgid = header->msgid; + size = header->size; + localport = VCHIQ_MSG_DSTPORT(msgid); + remoteport = VCHIQ_MSG_SRCPORT(msgid); + if (size < sizeof(struct vchiq_open_payload)) + goto fail_open; + + payload = (struct vchiq_open_payload *)header->data; + fourcc = payload->fourcc; + dev_dbg(state->dev, "core: %d: prs OPEN@%p (%d->'%p4cc')\n", + state->id, header, localport, &fourcc); + + service = get_listening_service(state, fourcc); + if (!service) + goto fail_open; + + /* A matching service exists */ + version = payload->version; + version_min = payload->version_min; + + if ((service->version < version_min) || (version < service->version_min)) { + /* Version mismatch */ + dev_err(state->dev, "%d: service %d (%p4cc) version mismatch - local (%d, min %d) vs. remote (%d, min %d)", + state->id, service->localport, &fourcc, + service->version, service->version_min, version, version_min); + vchiq_service_put(service); + service = NULL; + goto fail_open; + } + service->peer_version = version; + + if (service->srvstate != VCHIQ_SRVSTATE_LISTENING) + goto done; + + ack_payload.version = service->version; + openack_id = MAKE_OPENACK(service->localport, remoteport); + + if (state->version_common < VCHIQ_VERSION_SYNCHRONOUS_MODE) + service->sync = 0; + + /* Acknowledge the OPEN */ + if (service->sync) { + if (queue_message_sync(state, NULL, openack_id, + memcpy_copy_callback, + &ack_payload, + sizeof(ack_payload)) == -EAGAIN) + goto bail_not_ready; + + /* The service is now open */ + set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC); + } else { + if (queue_message(state, NULL, openack_id, + memcpy_copy_callback, &ack_payload, + sizeof(ack_payload), 0) == -EINTR) + goto bail_not_ready; + + /* The service is now open */ + set_service_state(service, VCHIQ_SRVSTATE_OPEN); + } + +done: + /* Success - the message has been dealt with */ + vchiq_service_put(service); + return 1; + +fail_open: + /* No available service, or an invalid request - send a CLOSE */ + if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)), + NULL, NULL, 0, 0) == -EINTR) + goto bail_not_ready; + + return 1; + +bail_not_ready: + if (service) + vchiq_service_put(service); + + return 0; +} + +/** + * parse_message() - parses a single message from the rx slot + * @state: vchiq state struct + * @header: message header + * + * Context: Process context + * + * Return: + * * >= 0 - size of the parsed message payload (without header) + * * -EINVAL - fatal error occurred, bail out is required + */ +static int +parse_message(struct vchiq_state *state, struct vchiq_header *header) +{ + struct vchiq_service *service = NULL; + unsigned int localport, remoteport; + int msgid, size, type, ret = -EINVAL; + int svc_fourcc; + + DEBUG_INITIALISE(state->local); + + DEBUG_VALUE(PARSE_HEADER, (int)(long)header); + msgid = header->msgid; + DEBUG_VALUE(PARSE_MSGID, msgid); + size = header->size; + type = VCHIQ_MSG_TYPE(msgid); + localport = VCHIQ_MSG_DSTPORT(msgid); + remoteport = VCHIQ_MSG_SRCPORT(msgid); + + if (type != VCHIQ_MSG_DATA) + VCHIQ_STATS_INC(state, ctrl_rx_count); + + switch (type) { + case VCHIQ_MSG_OPENACK: + case VCHIQ_MSG_CLOSE: + case VCHIQ_MSG_DATA: + case VCHIQ_MSG_BULK_RX: + case VCHIQ_MSG_BULK_TX: + case VCHIQ_MSG_BULK_RX_DONE: + case VCHIQ_MSG_BULK_TX_DONE: + service = find_service_by_port(state, localport); + if ((!service || + ((service->remoteport != remoteport) && + (service->remoteport != VCHIQ_PORT_FREE))) && + (localport == 0) && + (type == VCHIQ_MSG_CLOSE)) { + /* + * This could be a CLOSE from a client which + * hadn't yet received the OPENACK - look for + * the connected service + */ + if (service) + vchiq_service_put(service); + service = get_connected_service(state, remoteport); + if (service) + dev_warn(state->dev, + "core: %d: prs %s@%p (%d->%d) - found connected service %d\n", + state->id, msg_type_str(type), header, + remoteport, localport, service->localport); + } + + if (!service) { + dev_err(state->dev, + "core: %d: prs %s@%p (%d->%d) - invalid/closed service %d\n", + state->id, msg_type_str(type), header, remoteport, + localport, localport); + goto skip_message; + } + break; + default: + break; + } + + svc_fourcc = service ? service->base.fourcc + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); + + dev_dbg(state->dev, "core_msg: Rcvd Msg %s(%u) from %p4cc s:%d d:%d len:%d\n", + msg_type_str(type), type, &svc_fourcc, remoteport, localport, size); + if (size > 0) + vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size)); + + if (((unsigned long)header & VCHIQ_SLOT_MASK) + + calc_stride(size) > VCHIQ_SLOT_SIZE) { + dev_err(state->dev, "core: header %p (msgid %x) - size %x too big for slot\n", + header, (unsigned int)msgid, (unsigned int)size); + WARN(1, "oversized for slot\n"); + } + + switch (type) { + case VCHIQ_MSG_OPEN: + WARN_ON(VCHIQ_MSG_DSTPORT(msgid)); + if (!parse_open(state, header)) + goto bail_not_ready; + break; + case VCHIQ_MSG_OPENACK: + if (size >= sizeof(struct vchiq_openack_payload)) { + const struct vchiq_openack_payload *payload = + (struct vchiq_openack_payload *) + header->data; + service->peer_version = payload->version; + } + dev_dbg(state->dev, + "core: %d: prs OPENACK@%p,%x (%d->%d) v:%d\n", + state->id, header, size, remoteport, localport, + service->peer_version); + if (service->srvstate == VCHIQ_SRVSTATE_OPENING) { + service->remoteport = remoteport; + set_service_state(service, VCHIQ_SRVSTATE_OPEN); + complete(&service->remove_event); + } else { + dev_err(state->dev, "core: OPENACK received in state %s\n", + srvstate_names[service->srvstate]); + } + break; + case VCHIQ_MSG_CLOSE: + WARN_ON(size); /* There should be no data */ + + dev_dbg(state->dev, "core: %d: prs CLOSE@%p (%d->%d)\n", + state->id, header, remoteport, localport); + + mark_service_closing_internal(service, 1); + + if (vchiq_close_service_internal(service, CLOSE_RECVD) == -EAGAIN) + goto bail_not_ready; + + dev_dbg(state->dev, "core: Close Service %p4cc s:%u d:%d\n", + &service->base.fourcc, service->localport, service->remoteport); + break; + case VCHIQ_MSG_DATA: + dev_dbg(state->dev, "core: %d: prs DATA@%p,%x (%d->%d)\n", + state->id, header, size, remoteport, localport); + + if ((service->remoteport == remoteport) && + (service->srvstate == VCHIQ_SRVSTATE_OPEN)) { + header->msgid = msgid | VCHIQ_MSGID_CLAIMED; + claim_slot(state->rx_info); + DEBUG_TRACE(PARSE_LINE); + if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header, + NULL) == -EAGAIN) { + DEBUG_TRACE(PARSE_LINE); + goto bail_not_ready; + } + VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count); + VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, size); + } else { + VCHIQ_STATS_INC(state, error_count); + } + break; + case VCHIQ_MSG_CONNECT: + dev_dbg(state->dev, "core: %d: prs CONNECT@%p\n", + state->id, header); + state->version_common = ((struct vchiq_slot_zero *) + state->slot_data)->version; + complete(&state->connect); + break; + case VCHIQ_MSG_BULK_RX: + case VCHIQ_MSG_BULK_TX: + /* + * We should never receive a bulk request from the + * other side since we're not setup to perform as the + * master. + */ + WARN_ON(1); + break; + case VCHIQ_MSG_BULK_RX_DONE: + case VCHIQ_MSG_BULK_TX_DONE: + if ((service->remoteport == remoteport) && + (service->srvstate != VCHIQ_SRVSTATE_FREE)) { + struct vchiq_bulk_queue *queue; + struct vchiq_bulk *bulk; + + queue = (type == VCHIQ_MSG_BULK_RX_DONE) ? + &service->bulk_rx : &service->bulk_tx; + + DEBUG_TRACE(PARSE_LINE); + if (mutex_lock_killable(&service->bulk_mutex)) { + DEBUG_TRACE(PARSE_LINE); + goto bail_not_ready; + } + if ((int)(queue->remote_insert - + queue->local_insert) >= 0) { + dev_err(state->dev, + "core: %d: prs %s@%p (%d->%d) unexpected (ri=%d,li=%d)\n", + state->id, msg_type_str(type), header, remoteport, + localport, queue->remote_insert, queue->local_insert); + mutex_unlock(&service->bulk_mutex); + break; + } + if (queue->process != queue->remote_insert) { + dev_err(state->dev, "%s: p %x != ri %x\n", + __func__, queue->process, + queue->remote_insert); + mutex_unlock(&service->bulk_mutex); + goto bail_not_ready; + } + + bulk = &queue->bulks[BULK_INDEX(queue->remote_insert)]; + bulk->actual = *(int *)header->data; + queue->remote_insert++; + + dev_dbg(state->dev, "core: %d: prs %s@%p (%d->%d) %x@%pad\n", + state->id, msg_type_str(type), header, remoteport, + localport, bulk->actual, &bulk->dma_addr); + + dev_dbg(state->dev, "core: %d: prs:%d %cx li=%x ri=%x p=%x\n", + state->id, localport, + (type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't', + queue->local_insert, queue->remote_insert, queue->process); + + DEBUG_TRACE(PARSE_LINE); + WARN_ON(queue->process == queue->local_insert); + vchiq_complete_bulk(service->instance, bulk); + queue->process++; + mutex_unlock(&service->bulk_mutex); + DEBUG_TRACE(PARSE_LINE); + notify_bulks(service, queue, RETRY_POLL); + DEBUG_TRACE(PARSE_LINE); + } + break; + case VCHIQ_MSG_PADDING: + dev_dbg(state->dev, "core: %d: prs PADDING@%p,%x\n", + state->id, header, size); + break; + case VCHIQ_MSG_PAUSE: + /* If initiated, signal the application thread */ + dev_dbg(state->dev, "core: %d: prs PAUSE@%p,%x\n", + state->id, header, size); + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) { + dev_err(state->dev, "core: %d: PAUSE received in state PAUSED\n", + state->id); + break; + } + if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) { + /* Send a PAUSE in response */ + if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0, + QMFLAGS_NO_MUTEX_UNLOCK) == -EINTR) + goto bail_not_ready; + } + /* At this point slot_mutex is held */ + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED); + break; + case VCHIQ_MSG_RESUME: + dev_dbg(state->dev, "core: %d: prs RESUME@%p,%x\n", + state->id, header, size); + /* Release the slot mutex */ + mutex_unlock(&state->slot_mutex); + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); + break; + + case VCHIQ_MSG_REMOTE_USE: + vchiq_on_remote_use(state); + break; + case VCHIQ_MSG_REMOTE_RELEASE: + vchiq_on_remote_release(state); + break; + case VCHIQ_MSG_REMOTE_USE_ACTIVE: + break; + + default: + dev_err(state->dev, "core: %d: prs invalid msgid %x@%p,%x\n", + state->id, msgid, header, size); + WARN(1, "invalid message\n"); + break; + } + +skip_message: + ret = size; + +bail_not_ready: + if (service) + vchiq_service_put(service); + + return ret; +} + +/* Called by the slot handler thread */ +static void +parse_rx_slots(struct vchiq_state *state) +{ + struct vchiq_shared_state *remote = state->remote; + int tx_pos; + + DEBUG_INITIALISE(state->local); + + tx_pos = remote->tx_pos; + + while (state->rx_pos != tx_pos) { + struct vchiq_header *header; + int size; + + DEBUG_TRACE(PARSE_LINE); + if (!state->rx_data) { + int rx_index; + + WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK); + rx_index = remote->slot_queue[ + SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)]; + state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state, + rx_index); + state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index); + + /* + * Initialise use_count to one, and increment + * release_count at the end of the slot to avoid + * releasing the slot prematurely. + */ + state->rx_info->use_count = 1; + state->rx_info->release_count = 0; + } + + header = (struct vchiq_header *)(state->rx_data + + (state->rx_pos & VCHIQ_SLOT_MASK)); + size = parse_message(state, header); + if (size < 0) + return; + + state->rx_pos += calc_stride(size); + + DEBUG_TRACE(PARSE_LINE); + /* + * Perform some housekeeping when the end of the slot is + * reached. + */ + if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) { + /* Remove the extra reference count. */ + release_slot(state, state->rx_info, NULL, NULL); + state->rx_data = NULL; + } + } +} + +/** + * handle_poll() - handle service polling and other rare conditions + * @state: vchiq state struct + * + * Context: Process context + * + * Return: + * * 0 - poll handled successful + * * -EAGAIN - retry later + */ +static int +handle_poll(struct vchiq_state *state) +{ + switch (state->conn_state) { + case VCHIQ_CONNSTATE_CONNECTED: + /* Poll the services as requested */ + poll_services(state); + break; + + case VCHIQ_CONNSTATE_PAUSING: + if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0, + QMFLAGS_NO_MUTEX_UNLOCK) != -EINTR) { + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT); + } else { + /* Retry later */ + return -EAGAIN; + } + break; + + case VCHIQ_CONNSTATE_RESUMING: + if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0, + QMFLAGS_NO_MUTEX_LOCK) != -EINTR) { + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); + } else { + /* + * This should really be impossible, + * since the PAUSE should have flushed + * through outstanding messages. + */ + dev_err(state->dev, "core: Failed to send RESUME message\n"); + } + break; + default: + break; + } + + return 0; +} + +/* Called by the slot handler thread */ +static int +slot_handler_func(void *v) +{ + struct vchiq_state *state = v; + struct vchiq_shared_state *local = state->local; + int ret; + + DEBUG_INITIALISE(local); + + while (!kthread_should_stop()) { + DEBUG_COUNT(SLOT_HANDLER_COUNT); + DEBUG_TRACE(SLOT_HANDLER_LINE); + ret = remote_event_wait(&state->trigger_event, &local->trigger); + if (ret) + return ret; + + /* Ensure that reads don't overtake the remote_event_wait. */ + rmb(); + + DEBUG_TRACE(SLOT_HANDLER_LINE); + if (state->poll_needed) { + state->poll_needed = 0; + + /* + * Handle service polling and other rare conditions here + * out of the mainline code + */ + if (handle_poll(state) == -EAGAIN) + state->poll_needed = 1; + } + + DEBUG_TRACE(SLOT_HANDLER_LINE); + parse_rx_slots(state); + } + return 0; +} + +/* Called by the recycle thread */ +static int +recycle_func(void *v) +{ + struct vchiq_state *state = v; + struct vchiq_shared_state *local = state->local; + u32 *found; + size_t length; + int ret; + + length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES); + + found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found), + GFP_KERNEL); + if (!found) + return -ENOMEM; + + while (!kthread_should_stop()) { + ret = remote_event_wait(&state->recycle_event, &local->recycle); + if (ret) + return ret; + + process_free_queue(state, found, length); + } + return 0; +} + +/* Called by the sync thread */ +static int +sync_func(void *v) +{ + struct vchiq_state *state = v; + struct vchiq_shared_state *local = state->local; + struct vchiq_header *header = + (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, + state->remote->slot_sync); + int svc_fourcc; + int ret; + + while (!kthread_should_stop()) { + struct vchiq_service *service; + int msgid, size; + int type; + unsigned int localport, remoteport; + + ret = remote_event_wait(&state->sync_trigger_event, &local->sync_trigger); + if (ret) + return ret; + + /* Ensure that reads don't overtake the remote_event_wait. */ + rmb(); + + msgid = header->msgid; + size = header->size; + type = VCHIQ_MSG_TYPE(msgid); + localport = VCHIQ_MSG_DSTPORT(msgid); + remoteport = VCHIQ_MSG_SRCPORT(msgid); + + service = find_service_by_port(state, localport); + + if (!service) { + dev_err(state->dev, + "sync: %d: sf %s@%p (%d->%d) - invalid/closed service %d\n", + state->id, msg_type_str(type), header, remoteport, + localport, localport); + release_message_sync(state, header); + continue; + } + + svc_fourcc = service->base.fourcc; + + dev_dbg(state->dev, "sync: Rcvd Msg %s from %p4cc s:%d d:%d len:%d\n", + msg_type_str(type), &svc_fourcc, remoteport, localport, size); + if (size > 0) + vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size)); + + switch (type) { + case VCHIQ_MSG_OPENACK: + if (size >= sizeof(struct vchiq_openack_payload)) { + const struct vchiq_openack_payload *payload = + (struct vchiq_openack_payload *) + header->data; + service->peer_version = payload->version; + } + dev_err(state->dev, "sync: %d: sf OPENACK@%p,%x (%d->%d) v:%d\n", + state->id, header, size, remoteport, localport, + service->peer_version); + if (service->srvstate == VCHIQ_SRVSTATE_OPENING) { + service->remoteport = remoteport; + set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC); + service->sync = 1; + complete(&service->remove_event); + } + release_message_sync(state, header); + break; + + case VCHIQ_MSG_DATA: + dev_dbg(state->dev, "sync: %d: sf DATA@%p,%x (%d->%d)\n", + state->id, header, size, remoteport, localport); + + if ((service->remoteport == remoteport) && + (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) { + if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header, + NULL) == -EAGAIN) + dev_err(state->dev, + "sync: error: synchronous callback to service %d returns -EAGAIN\n", + localport); + } + break; + + default: + dev_err(state->dev, "sync: error: %d: sf unexpected msgid %x@%p,%x\n", + state->id, msgid, header, size); + release_message_sync(state, header); + break; + } + + vchiq_service_put(service); + } + + return 0; +} + +inline const char * +get_conn_state_name(enum vchiq_connstate conn_state) +{ + return conn_state_names[conn_state]; +} + +struct vchiq_slot_zero * +vchiq_init_slots(struct device *dev, void *mem_base, int mem_size) +{ + int mem_align = + (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK); + struct vchiq_slot_zero *slot_zero = + (struct vchiq_slot_zero *)(mem_base + mem_align); + int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE; + int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS; + + check_sizes(); + + /* Ensure there is enough memory to run an absolutely minimum system */ + num_slots -= first_data_slot; + + if (num_slots < 4) { + dev_err(dev, "core: %s: Insufficient memory %x bytes\n", + __func__, mem_size); + return NULL; + } + + memset(slot_zero, 0, sizeof(struct vchiq_slot_zero)); + + slot_zero->magic = VCHIQ_MAGIC; + slot_zero->version = VCHIQ_VERSION; + slot_zero->version_min = VCHIQ_VERSION_MIN; + slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero); + slot_zero->slot_size = VCHIQ_SLOT_SIZE; + slot_zero->max_slots = VCHIQ_MAX_SLOTS; + slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE; + + slot_zero->master.slot_sync = first_data_slot; + slot_zero->master.slot_first = first_data_slot + 1; + slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1; + slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2); + slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1; + slot_zero->slave.slot_last = first_data_slot + num_slots - 1; + + return slot_zero; +} + +int +vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev) +{ + struct vchiq_shared_state *local; + struct vchiq_shared_state *remote; + char threadname[16]; + int i, ret; + + local = &slot_zero->slave; + remote = &slot_zero->master; + + if (local->initialised) { + if (remote->initialised) + dev_err(dev, "local state has already been initialised\n"); + else + dev_err(dev, "master/slave mismatch two slaves\n"); + + return -EINVAL; + } + + memset(state, 0, sizeof(struct vchiq_state)); + + state->dev = dev; + + /* + * initialize shared state pointers + */ + + state->local = local; + state->remote = remote; + state->slot_data = (struct vchiq_slot *)slot_zero; + + /* + * initialize events and mutexes + */ + + init_completion(&state->connect); + mutex_init(&state->mutex); + mutex_init(&state->slot_mutex); + mutex_init(&state->recycle_mutex); + mutex_init(&state->sync_mutex); + + spin_lock_init(&state->msg_queue_spinlock); + spin_lock_init(&state->bulk_waiter_spinlock); + spin_lock_init(&state->quota_spinlock); + + init_completion(&state->slot_available_event); + init_completion(&state->data_quota_event); + + state->slot_queue_available = 0; + + for (i = 0; i < VCHIQ_MAX_SERVICES; i++) { + struct vchiq_service_quota *quota = &state->service_quotas[i]; + + init_completion("a->quota_event); + } + + for (i = local->slot_first; i <= local->slot_last; i++) { + local->slot_queue[state->slot_queue_available] = i; + state->slot_queue_available++; + complete(&state->slot_available_event); + } + + state->default_slot_quota = state->slot_queue_available / 2; + state->default_message_quota = + min_t(unsigned short, state->default_slot_quota * 256, ~0); + + state->previous_data_index = -1; + state->data_use_count = 0; + state->data_quota = state->slot_queue_available - 1; + + remote_event_create(&state->trigger_event, &local->trigger); + local->tx_pos = 0; + remote_event_create(&state->recycle_event, &local->recycle); + local->slot_queue_recycle = state->slot_queue_available; + remote_event_create(&state->sync_trigger_event, &local->sync_trigger); + remote_event_create(&state->sync_release_event, &local->sync_release); + + /* At start-of-day, the slot is empty and available */ + ((struct vchiq_header *) + SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid = + VCHIQ_MSGID_PADDING; + remote_event_signal_local(&state->sync_release_event, &local->sync_release); + + local->debug[DEBUG_ENTRIES] = DEBUG_MAX; + + ret = vchiq_platform_init_state(state); + if (ret) + return ret; + + /* + * bring up slot handler thread + */ + snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id); + state->slot_handler_thread = kthread_create(&slot_handler_func, (void *)state, threadname); + + if (IS_ERR(state->slot_handler_thread)) { + dev_err(state->dev, "couldn't create thread %s\n", threadname); + return PTR_ERR(state->slot_handler_thread); + } + set_user_nice(state->slot_handler_thread, -19); + + snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id); + state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname); + if (IS_ERR(state->recycle_thread)) { + dev_err(state->dev, "couldn't create thread %s\n", threadname); + ret = PTR_ERR(state->recycle_thread); + goto fail_free_handler_thread; + } + set_user_nice(state->recycle_thread, -19); + + snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id); + state->sync_thread = kthread_create(&sync_func, (void *)state, threadname); + if (IS_ERR(state->sync_thread)) { + dev_err(state->dev, "couldn't create thread %s\n", threadname); + ret = PTR_ERR(state->sync_thread); + goto fail_free_recycle_thread; + } + set_user_nice(state->sync_thread, -20); + + wake_up_process(state->slot_handler_thread); + wake_up_process(state->recycle_thread); + wake_up_process(state->sync_thread); + + /* Indicate readiness to the other side */ + local->initialised = 1; + + return 0; + +fail_free_recycle_thread: + kthread_stop(state->recycle_thread); +fail_free_handler_thread: + kthread_stop(state->slot_handler_thread); + + return ret; +} + +void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_header *header) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + int pos; + + if (!service) + return; + + while (service->msg_queue_write == service->msg_queue_read + + VCHIQ_MAX_SLOTS) { + if (wait_for_completion_interruptible(&service->msg_queue_pop)) + flush_signals(current); + } + + pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1); + service->msg_queue_write++; + service->msg_queue[pos] = header; + + complete(&service->msg_queue_push); +} +EXPORT_SYMBOL(vchiq_msg_queue_push); + +struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + struct vchiq_header *header; + int pos; + + if (!service) + return NULL; + + if (service->msg_queue_write == service->msg_queue_read) + return NULL; + + while (service->msg_queue_write == service->msg_queue_read) { + if (wait_for_completion_interruptible(&service->msg_queue_push)) + flush_signals(current); + } + + pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1); + service->msg_queue_read++; + header = service->msg_queue[pos]; + + complete(&service->msg_queue_pop); + + return header; +} +EXPORT_SYMBOL(vchiq_msg_hold); + +static int vchiq_validate_params(struct vchiq_state *state, + const struct vchiq_service_params_kernel *params) +{ + if (!params->callback || !params->fourcc) { + dev_err(state->dev, "Can't add service, invalid params\n"); + return -EINVAL; + } + + return 0; +} + +/* Called from application thread when a client or server service is created. */ +struct vchiq_service * +vchiq_add_service_internal(struct vchiq_state *state, + const struct vchiq_service_params_kernel *params, + int srvstate, struct vchiq_instance *instance, + void (*userdata_term)(void *userdata)) +{ + struct vchiq_service *service; + struct vchiq_service __rcu **pservice = NULL; + struct vchiq_service_quota *quota; + int ret; + int i; + + ret = vchiq_validate_params(state, params); + if (ret) + return NULL; + + service = kzalloc(sizeof(*service), GFP_KERNEL); + if (!service) + return service; + + service->base.fourcc = params->fourcc; + service->base.callback = params->callback; + service->base.userdata = params->userdata; + service->handle = VCHIQ_SERVICE_HANDLE_INVALID; + kref_init(&service->ref_count); + service->srvstate = VCHIQ_SRVSTATE_FREE; + service->userdata_term = userdata_term; + service->localport = VCHIQ_PORT_FREE; + service->remoteport = VCHIQ_PORT_FREE; + + service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ? + VCHIQ_FOURCC_INVALID : params->fourcc; + service->auto_close = 1; + atomic_set(&service->poll_flags, 0); + service->version = params->version; + service->version_min = params->version_min; + service->state = state; + service->instance = instance; + init_completion(&service->remove_event); + init_completion(&service->bulk_remove_event); + init_completion(&service->msg_queue_pop); + init_completion(&service->msg_queue_push); + mutex_init(&service->bulk_mutex); + + /* + * Although it is perfectly possible to use a spinlock + * to protect the creation of services, it is overkill as it + * disables interrupts while the array is searched. + * The only danger is of another thread trying to create a + * service - service deletion is safe. + * Therefore it is preferable to use state->mutex which, + * although slower to claim, doesn't block interrupts while + * it is held. + */ + + mutex_lock(&state->mutex); + + /* Prepare to use a previously unused service */ + if (state->unused_service < VCHIQ_MAX_SERVICES) + pservice = &state->services[state->unused_service]; + + if (srvstate == VCHIQ_SRVSTATE_OPENING) { + for (i = 0; i < state->unused_service; i++) { + if (!rcu_access_pointer(state->services[i])) { + pservice = &state->services[i]; + break; + } + } + } else { + rcu_read_lock(); + for (i = (state->unused_service - 1); i >= 0; i--) { + struct vchiq_service *srv; + + srv = rcu_dereference(state->services[i]); + if (!srv) { + pservice = &state->services[i]; + } else if ((srv->public_fourcc == params->fourcc) && + ((srv->instance != instance) || + (srv->base.callback != params->callback))) { + /* + * There is another server using this + * fourcc which doesn't match. + */ + pservice = NULL; + break; + } + } + rcu_read_unlock(); + } + + if (pservice) { + service->localport = (pservice - state->services); + if (!handle_seq) + handle_seq = VCHIQ_MAX_STATES * + VCHIQ_MAX_SERVICES; + service->handle = handle_seq | + (state->id * VCHIQ_MAX_SERVICES) | + service->localport; + handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES; + rcu_assign_pointer(*pservice, service); + if (pservice == &state->services[state->unused_service]) + state->unused_service++; + } + + mutex_unlock(&state->mutex); + + if (!pservice) { + kfree(service); + return NULL; + } + + quota = &state->service_quotas[service->localport]; + quota->slot_quota = state->default_slot_quota; + quota->message_quota = state->default_message_quota; + if (quota->slot_use_count == 0) + quota->previous_tx_index = + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos) + - 1; + + /* Bring this service online */ + set_service_state(service, srvstate); + + dev_dbg(state->dev, "core_msg: %s Service %p4cc SrcPort:%d\n", + (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add", + ¶ms->fourcc, service->localport); + + /* Don't unlock the service - leave it with a ref_count of 1. */ + + return service; +} + +int +vchiq_open_service_internal(struct vchiq_service *service, int client_id) +{ + struct vchiq_open_payload payload = { + service->base.fourcc, + client_id, + service->version, + service->version_min + }; + int status = 0; + + service->client_id = client_id; + vchiq_use_service_internal(service); + status = queue_message(service->state, + NULL, MAKE_OPEN(service->localport), + memcpy_copy_callback, + &payload, + sizeof(payload), + QMFLAGS_IS_BLOCKING); + + if (status) + return status; + + /* Wait for the ACK/NAK */ + if (wait_for_completion_interruptible(&service->remove_event)) { + status = -EAGAIN; + vchiq_release_service_internal(service); + } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) && + (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) { + if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) + dev_err(service->state->dev, + "core: %d: osi - srvstate = %s (ref %u)\n", + service->state->id, srvstate_names[service->srvstate], + kref_read(&service->ref_count)); + status = -EINVAL; + VCHIQ_SERVICE_STATS_INC(service, error_count); + vchiq_release_service_internal(service); + } + + return status; +} + +static void +release_service_messages(struct vchiq_service *service) +{ + struct vchiq_state *state = service->state; + int slot_last = state->remote->slot_last; + int i; + + /* Release any claimed messages aimed at this service */ + + if (service->sync) { + struct vchiq_header *header = + (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, + state->remote->slot_sync); + if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport) + release_message_sync(state, header); + + return; + } + + for (i = state->remote->slot_first; i <= slot_last; i++) { + struct vchiq_slot_info *slot_info = + SLOT_INFO_FROM_INDEX(state, i); + unsigned int pos, end; + char *data; + + if (slot_info->release_count == slot_info->use_count) + continue; + + data = (char *)SLOT_DATA_FROM_INDEX(state, i); + end = VCHIQ_SLOT_SIZE; + if (data == state->rx_data) + /* + * This buffer is still being read from - stop + * at the current read position + */ + end = state->rx_pos & VCHIQ_SLOT_MASK; + + pos = 0; + + while (pos < end) { + struct vchiq_header *header = + (struct vchiq_header *)(data + pos); + int msgid = header->msgid; + int port = VCHIQ_MSG_DSTPORT(msgid); + + if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) { + dev_dbg(state->dev, "core: fsi - hdr %p\n", header); + release_slot(state, slot_info, header, NULL); + } + pos += calc_stride(header->size); + if (pos > VCHIQ_SLOT_SIZE) { + dev_err(state->dev, + "core: fsi - pos %x: header %p, msgid %x, header->msgid %x, header->size %x\n", + pos, header, msgid, header->msgid, header->size); + WARN(1, "invalid slot position\n"); + } + } + } +} + +static int +do_abort_bulks(struct vchiq_service *service) +{ + int status; + + /* Abort any outstanding bulk transfers */ + if (mutex_lock_killable(&service->bulk_mutex)) + return 0; + abort_outstanding_bulks(service, &service->bulk_tx); + abort_outstanding_bulks(service, &service->bulk_rx); + mutex_unlock(&service->bulk_mutex); + + status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL); + if (status) + return 0; + + status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL); + return !status; +} + +static int +close_service_complete(struct vchiq_service *service, int failstate) +{ + int status; + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID); + int newstate; + + switch (service->srvstate) { + case VCHIQ_SRVSTATE_OPEN: + case VCHIQ_SRVSTATE_CLOSESENT: + case VCHIQ_SRVSTATE_CLOSERECVD: + if (is_server) { + if (service->auto_close) { + service->client_id = 0; + service->remoteport = VCHIQ_PORT_FREE; + newstate = VCHIQ_SRVSTATE_LISTENING; + } else { + newstate = VCHIQ_SRVSTATE_CLOSEWAIT; + } + } else { + newstate = VCHIQ_SRVSTATE_CLOSED; + } + set_service_state(service, newstate); + break; + case VCHIQ_SRVSTATE_LISTENING: + break; + default: + dev_err(service->state->dev, "core: (%x) called in state %s\n", + service->handle, srvstate_names[service->srvstate]); + WARN(1, "%s in unexpected state\n", __func__); + return -EINVAL; + } + + status = make_service_callback(service, VCHIQ_SERVICE_CLOSED, NULL, NULL); + + if (status != -EAGAIN) { + int uc = service->service_use_count; + int i; + /* Complete the close process */ + for (i = 0; i < uc; i++) + /* + * cater for cases where close is forced and the + * client may not close all it's handles + */ + vchiq_release_service_internal(service); + + service->client_id = 0; + service->remoteport = VCHIQ_PORT_FREE; + + if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) { + vchiq_free_service_internal(service); + } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) { + if (is_server) + service->closing = 0; + + complete(&service->remove_event); + } + } else { + set_service_state(service, failstate); + } + + return status; +} + +/* + * Prepares a bulk transfer to be queued. The function is interruptible and is + * intended to be called from user threads. It may return -EAGAIN to indicate + * that a signal has been received and the call should be retried after being + * returned to user context. + */ +static int +vchiq_bulk_xfer_queue_msg_killable(struct vchiq_service *service, + struct vchiq_bulk *bulk_params) +{ + struct vchiq_bulk_queue *queue; + struct bulk_waiter *bulk_waiter = NULL; + struct vchiq_bulk *bulk; + struct vchiq_state *state = service->state; + const char dir_char = (bulk_params->dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r'; + const int dir_msgtype = (bulk_params->dir == VCHIQ_BULK_TRANSMIT) ? + VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX; + int status = -EINVAL; + int payload[2]; + + if (bulk_params->mode == VCHIQ_BULK_MODE_BLOCKING) { + bulk_waiter = bulk_params->waiter; + init_completion(&bulk_waiter->event); + bulk_waiter->actual = 0; + bulk_waiter->bulk = NULL; + } + + queue = (bulk_params->dir == VCHIQ_BULK_TRANSMIT) ? + &service->bulk_tx : &service->bulk_rx; + + if (mutex_lock_killable(&service->bulk_mutex)) + return -EINTR; + + if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) { + VCHIQ_SERVICE_STATS_INC(service, bulk_stalls); + do { + mutex_unlock(&service->bulk_mutex); + if (wait_for_completion_killable(&service->bulk_remove_event)) + return -EINTR; + if (mutex_lock_killable(&service->bulk_mutex)) + return -EINTR; + } while (queue->local_insert == queue->remove + + VCHIQ_NUM_SERVICE_BULKS); + } + + bulk = &queue->bulks[BULK_INDEX(queue->local_insert)]; + + /* Initiliaze the 'bulk' slot with bulk parameters passed in. */ + bulk->mode = bulk_params->mode; + bulk->dir = bulk_params->dir; + bulk->waiter = bulk_params->waiter; + bulk->cb_data = bulk_params->cb_data; + bulk->cb_userdata = bulk_params->cb_userdata; + bulk->size = bulk_params->size; + bulk->offset = bulk_params->offset; + bulk->uoffset = bulk_params->uoffset; + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED; + + if (vchiq_prepare_bulk_data(service->instance, bulk)) + goto unlock_error_exit; + + /* + * Ensure that the bulk data record is visible to the peer + * before proceeding. + */ + wmb(); + + dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %p\n", + state->id, service->localport, service->remoteport, + dir_char, bulk->size, &bulk->dma_addr, bulk->cb_data); + + /* + * The slot mutex must be held when the service is being closed, so + * claim it here to ensure that isn't happening + */ + if (mutex_lock_killable(&state->slot_mutex)) { + status = -EINTR; + goto cancel_bulk_error_exit; + } + + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) + goto unlock_both_error_exit; + + payload[0] = lower_32_bits(bulk->dma_addr); + payload[1] = bulk->size; + status = queue_message(state, + NULL, + VCHIQ_MAKE_MSG(dir_msgtype, + service->localport, + service->remoteport), + memcpy_copy_callback, + &payload, + sizeof(payload), + QMFLAGS_IS_BLOCKING | + QMFLAGS_NO_MUTEX_LOCK | + QMFLAGS_NO_MUTEX_UNLOCK); + if (status) + goto unlock_both_error_exit; + + queue->local_insert++; + + mutex_unlock(&state->slot_mutex); + mutex_unlock(&service->bulk_mutex); + + dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n", + state->id, service->localport, dir_char, queue->local_insert, + queue->remote_insert, queue->process); + + if (bulk_waiter) { + bulk_waiter->bulk = bulk; + if (wait_for_completion_killable(&bulk_waiter->event)) + status = -EINTR; + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED) + status = -EINVAL; + } + + return status; + +unlock_both_error_exit: + mutex_unlock(&state->slot_mutex); +cancel_bulk_error_exit: + vchiq_complete_bulk(service->instance, bulk); +unlock_error_exit: + mutex_unlock(&service->bulk_mutex); + + return status; +} + +/* Called by the slot handler */ +int +vchiq_close_service_internal(struct vchiq_service *service, int close_recvd) +{ + struct vchiq_state *state = service->state; + int status = 0; + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID); + int close_id = MAKE_CLOSE(service->localport, + VCHIQ_MSG_DSTPORT(service->remoteport)); + + dev_dbg(state->dev, "core: %d: csi:%d,%d (%s)\n", + service->state->id, service->localport, close_recvd, + srvstate_names[service->srvstate]); + + switch (service->srvstate) { + case VCHIQ_SRVSTATE_CLOSED: + case VCHIQ_SRVSTATE_HIDDEN: + case VCHIQ_SRVSTATE_LISTENING: + case VCHIQ_SRVSTATE_CLOSEWAIT: + if (close_recvd) { + dev_err(state->dev, "core: (1) called in state %s\n", + srvstate_names[service->srvstate]); + break; + } else if (!is_server) { + vchiq_free_service_internal(service); + break; + } + + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) { + status = -EINVAL; + } else { + service->client_id = 0; + service->remoteport = VCHIQ_PORT_FREE; + if (service->srvstate == VCHIQ_SRVSTATE_CLOSEWAIT) + set_service_state(service, VCHIQ_SRVSTATE_LISTENING); + } + complete(&service->remove_event); + break; + case VCHIQ_SRVSTATE_OPENING: + if (close_recvd) { + /* The open was rejected - tell the user */ + set_service_state(service, VCHIQ_SRVSTATE_CLOSEWAIT); + complete(&service->remove_event); + } else { + /* Shutdown mid-open - let the other side know */ + status = queue_message(state, service, close_id, NULL, NULL, 0, 0); + } + break; + + case VCHIQ_SRVSTATE_OPENSYNC: + mutex_lock(&state->sync_mutex); + fallthrough; + case VCHIQ_SRVSTATE_OPEN: + if (close_recvd) { + if (!do_abort_bulks(service)) + status = -EAGAIN; + } + + release_service_messages(service); + + if (!status) + status = queue_message(state, service, close_id, NULL, + NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK); + + if (status) { + if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) + mutex_unlock(&state->sync_mutex); + break; + } + + if (!close_recvd) { + /* Change the state while the mutex is still held */ + set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT); + mutex_unlock(&state->slot_mutex); + if (service->sync) + mutex_unlock(&state->sync_mutex); + break; + } + + /* Change the state while the mutex is still held */ + set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD); + mutex_unlock(&state->slot_mutex); + if (service->sync) + mutex_unlock(&state->sync_mutex); + + status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD); + break; + + case VCHIQ_SRVSTATE_CLOSESENT: + if (!close_recvd) + /* This happens when a process is killed mid-close */ + break; + + if (!do_abort_bulks(service)) { + status = -EAGAIN; + break; + } + + if (!status) + status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD); + break; + + case VCHIQ_SRVSTATE_CLOSERECVD: + if (!close_recvd && is_server) + /* Force into LISTENING mode */ + set_service_state(service, VCHIQ_SRVSTATE_LISTENING); + status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD); + break; + + default: + dev_err(state->dev, "core: (%d) called in state %s\n", + close_recvd, srvstate_names[service->srvstate]); + break; + } + + return status; +} + +/* Called from the application process upon process death */ +void +vchiq_terminate_service_internal(struct vchiq_service *service) +{ + struct vchiq_state *state = service->state; + + dev_dbg(state->dev, "core: %d: tsi - (%d<->%d)\n", + state->id, service->localport, service->remoteport); + + mark_service_closing(service); + + /* Mark the service for removal by the slot handler */ + request_poll(state, service, VCHIQ_POLL_REMOVE); +} + +/* Called from the slot handler */ +void +vchiq_free_service_internal(struct vchiq_service *service) +{ + struct vchiq_state *state = service->state; + + dev_dbg(state->dev, "core: %d: fsi - (%d)\n", state->id, service->localport); + + switch (service->srvstate) { + case VCHIQ_SRVSTATE_OPENING: + case VCHIQ_SRVSTATE_CLOSED: + case VCHIQ_SRVSTATE_HIDDEN: + case VCHIQ_SRVSTATE_LISTENING: + case VCHIQ_SRVSTATE_CLOSEWAIT: + break; + default: + dev_err(state->dev, "core: %d: fsi - (%d) in state %s\n", + state->id, service->localport, srvstate_names[service->srvstate]); + return; + } + + set_service_state(service, VCHIQ_SRVSTATE_FREE); + + complete(&service->remove_event); + + /* Release the initial lock */ + vchiq_service_put(service); +} + +int +vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance) +{ + struct vchiq_service *service; + int status = 0; + int i; + + /* Find all services registered to this client and enable them. */ + i = 0; + while ((service = next_service_by_instance(state, instance, &i)) != NULL) { + if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN) + set_service_state(service, VCHIQ_SRVSTATE_LISTENING); + vchiq_service_put(service); + } + + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) { + status = queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 0, + QMFLAGS_IS_BLOCKING); + if (status) + return status; + + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING); + } + + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) { + if (wait_for_completion_interruptible(&state->connect)) + return -EAGAIN; + + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); + complete(&state->connect); + } + + return status; +} + +void +vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance) +{ + struct vchiq_service *service; + int i; + + /* Find all services registered to this client and remove them. */ + i = 0; + while ((service = next_service_by_instance(state, instance, &i)) != NULL) { + (void)vchiq_remove_service(instance, service->handle); + vchiq_service_put(service); + } +} + +int +vchiq_close_service(struct vchiq_instance *instance, unsigned int handle) +{ + /* Unregister the service */ + struct vchiq_service *service = find_service_by_handle(instance, handle); + int status = 0; + + if (!service) + return -EINVAL; + + dev_dbg(service->state->dev, "core: %d: close_service:%d\n", + service->state->id, service->localport); + + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) || + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) || + (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) { + vchiq_service_put(service); + return -EINVAL; + } + + mark_service_closing(service); + + if (current == service->state->slot_handler_thread) { + status = vchiq_close_service_internal(service, NO_CLOSE_RECVD); + WARN_ON(status == -EAGAIN); + } else { + /* Mark the service for termination by the slot handler */ + request_poll(service->state, service, VCHIQ_POLL_TERMINATE); + } + + while (1) { + if (wait_for_completion_interruptible(&service->remove_event)) { + status = -EAGAIN; + break; + } + + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) || + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) || + (service->srvstate == VCHIQ_SRVSTATE_OPEN)) + break; + + dev_warn(service->state->dev, + "core: %d: close_service:%d - waiting in state %s\n", + service->state->id, service->localport, + srvstate_names[service->srvstate]); + } + + if (!status && + (service->srvstate != VCHIQ_SRVSTATE_FREE) && + (service->srvstate != VCHIQ_SRVSTATE_LISTENING)) + status = -EINVAL; + + vchiq_service_put(service); + + return status; +} +EXPORT_SYMBOL(vchiq_close_service); + +int +vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle) +{ + /* Unregister the service */ + struct vchiq_service *service = find_service_by_handle(instance, handle); + int status = 0; + + if (!service) + return -EINVAL; + + dev_dbg(service->state->dev, "core: %d: remove_service:%d\n", + service->state->id, service->localport); + + if (service->srvstate == VCHIQ_SRVSTATE_FREE) { + vchiq_service_put(service); + return -EINVAL; + } + + mark_service_closing(service); + + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) || + (current == service->state->slot_handler_thread)) { + /* + * Make it look like a client, because it must be removed and + * not left in the LISTENING state. + */ + service->public_fourcc = VCHIQ_FOURCC_INVALID; + + status = vchiq_close_service_internal(service, NO_CLOSE_RECVD); + WARN_ON(status == -EAGAIN); + } else { + /* Mark the service for removal by the slot handler */ + request_poll(service->state, service, VCHIQ_POLL_REMOVE); + } + while (1) { + if (wait_for_completion_interruptible(&service->remove_event)) { + status = -EAGAIN; + break; + } + + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) || + (service->srvstate == VCHIQ_SRVSTATE_OPEN)) + break; + + dev_warn(service->state->dev, + "core: %d: remove_service:%d - waiting in state %s\n", + service->state->id, service->localport, + srvstate_names[service->srvstate]); + } + + if (!status && (service->srvstate != VCHIQ_SRVSTATE_FREE)) + status = -EINVAL; + + vchiq_service_put(service); + + return status; +} + +int +vchiq_bulk_xfer_blocking(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_bulk *bulk_params) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + int status = -EINVAL; + + if (!service) + return -EINVAL; + + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) + goto error_exit; + + if (!bulk_params->offset && !bulk_params->uoffset) + goto error_exit; + + if (vchiq_check_service(service)) + goto error_exit; + + status = vchiq_bulk_xfer_queue_msg_killable(service, bulk_params); + +error_exit: + vchiq_service_put(service); + + return status; +} + +int +vchiq_bulk_xfer_callback(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_bulk *bulk_params) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + int status = -EINVAL; + + if (!service) + return -EINVAL; + + if (bulk_params->mode != VCHIQ_BULK_MODE_CALLBACK && + bulk_params->mode != VCHIQ_BULK_MODE_NOCALLBACK) + goto error_exit; + + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) + goto error_exit; + + if (!bulk_params->offset && !bulk_params->uoffset) + goto error_exit; + + if (vchiq_check_service(service)) + goto error_exit; + + status = vchiq_bulk_xfer_queue_msg_killable(service, bulk_params); + +error_exit: + vchiq_service_put(service); + + return status; +} + +/* + * This function is called by VCHIQ ioctl interface and is interruptible. + * It may receive -EAGAIN to indicate that a signal has been received + * and the call should be retried after being returned to user context. + */ +int +vchiq_bulk_xfer_waiting(struct vchiq_instance *instance, + unsigned int handle, struct bulk_waiter *waiter) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + struct bulk_waiter *bulk_waiter; + int status = -EINVAL; + + if (!service) + return -EINVAL; + + if (!waiter) + goto error_exit; + + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) + goto error_exit; + + if (vchiq_check_service(service)) + goto error_exit; + + bulk_waiter = waiter; + + vchiq_service_put(service); + + status = 0; + + if (wait_for_completion_killable(&bulk_waiter->event)) + return -EINTR; + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED) + return -EINVAL; + + return status; + +error_exit: + vchiq_service_put(service); + + return status; +} + +int +vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle, + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, + size_t size) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + int status = -EINVAL; + int data_id; + + if (!service) + goto error_exit; + + if (vchiq_check_service(service)) + goto error_exit; + + if (!size) { + VCHIQ_SERVICE_STATS_INC(service, error_count); + goto error_exit; + } + + if (size > VCHIQ_MAX_MSG_SIZE) { + VCHIQ_SERVICE_STATS_INC(service, error_count); + goto error_exit; + } + + data_id = MAKE_DATA(service->localport, service->remoteport); + + switch (service->srvstate) { + case VCHIQ_SRVSTATE_OPEN: + status = queue_message(service->state, service, data_id, + copy_callback, context, size, + QMFLAGS_IS_BLOCKING); + break; + case VCHIQ_SRVSTATE_OPENSYNC: + status = queue_message_sync(service->state, service, data_id, + copy_callback, context, size); + break; + default: + status = -EINVAL; + break; + } + +error_exit: + if (service) + vchiq_service_put(service); + + return status; +} + +int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, void *data, + unsigned int size) +{ + return vchiq_queue_message(instance, handle, memcpy_copy_callback, + data, size); +} +EXPORT_SYMBOL(vchiq_queue_kernel_message); + +void +vchiq_release_message(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_header *header) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + struct vchiq_shared_state *remote; + struct vchiq_state *state; + int slot_index; + + if (!service) + return; + + state = service->state; + remote = state->remote; + + slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header); + + if ((slot_index >= remote->slot_first) && + (slot_index <= remote->slot_last)) { + int msgid = header->msgid; + + if (msgid & VCHIQ_MSGID_CLAIMED) { + struct vchiq_slot_info *slot_info = + SLOT_INFO_FROM_INDEX(state, slot_index); + + release_slot(state, slot_info, header, service); + } + } else if (slot_index == remote->slot_sync) { + release_message_sync(state, header); + } + + vchiq_service_put(service); +} +EXPORT_SYMBOL(vchiq_release_message); + +static void +release_message_sync(struct vchiq_state *state, struct vchiq_header *header) +{ + header->msgid = VCHIQ_MSGID_PADDING; + remote_event_signal(state, &state->remote->sync_release); +} + +int +vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle, short *peer_version) +{ + int status = -EINVAL; + struct vchiq_service *service = find_service_by_handle(instance, handle); + + if (!service) + goto exit; + + if (vchiq_check_service(service)) + goto exit; + + if (!peer_version) + goto exit; + + *peer_version = service->peer_version; + status = 0; + +exit: + if (service) + vchiq_service_put(service); + return status; +} +EXPORT_SYMBOL(vchiq_get_peer_version); + +void vchiq_get_config(struct vchiq_config *config) +{ + config->max_msg_size = VCHIQ_MAX_MSG_SIZE; + config->bulk_threshold = VCHIQ_MAX_MSG_SIZE; + config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS; + config->max_services = VCHIQ_MAX_SERVICES; + config->version = VCHIQ_VERSION; + config->version_min = VCHIQ_VERSION_MIN; +} + +int +vchiq_set_service_option(struct vchiq_instance *instance, unsigned int handle, + enum vchiq_service_option option, int value) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + struct vchiq_service_quota *quota; + int ret = -EINVAL; + + if (!service) + return -EINVAL; + + switch (option) { + case VCHIQ_SERVICE_OPTION_AUTOCLOSE: + service->auto_close = value; + ret = 0; + break; + + case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: + quota = &service->state->service_quotas[service->localport]; + if (value == 0) + value = service->state->default_slot_quota; + if ((value >= quota->slot_use_count) && + (value < (unsigned short)~0)) { + quota->slot_quota = value; + if ((value >= quota->slot_use_count) && + (quota->message_quota >= quota->message_use_count)) + /* + * Signal the service that it may have + * dropped below its quota + */ + complete("a->quota_event); + ret = 0; + } + break; + + case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: + quota = &service->state->service_quotas[service->localport]; + if (value == 0) + value = service->state->default_message_quota; + if ((value >= quota->message_use_count) && + (value < (unsigned short)~0)) { + quota->message_quota = value; + if ((value >= quota->message_use_count) && + (quota->slot_quota >= quota->slot_use_count)) + /* + * Signal the service that it may have + * dropped below its quota + */ + complete("a->quota_event); + ret = 0; + } + break; + + case VCHIQ_SERVICE_OPTION_SYNCHRONOUS: + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) || + (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) { + service->sync = value; + ret = 0; + } + break; + + case VCHIQ_SERVICE_OPTION_TRACE: + service->trace = value; + ret = 0; + break; + + default: + break; + } + vchiq_service_put(service); + + return ret; +} + +static void +vchiq_dump_shared_state(struct seq_file *f, struct vchiq_state *state, + struct vchiq_shared_state *shared, const char *label) +{ + static const char *const debug_names[] = { + "<entries>", + "SLOT_HANDLER_COUNT", + "SLOT_HANDLER_LINE", + "PARSE_LINE", + "PARSE_HEADER", + "PARSE_MSGID", + "AWAIT_COMPLETION_LINE", + "DEQUEUE_MESSAGE_LINE", + "SERVICE_CALLBACK_LINE", + "MSG_QUEUE_FULL_COUNT", + "COMPLETION_QUEUE_FULL_COUNT" + }; + int i; + + seq_printf(f, " %s: slots %d-%d tx_pos=0x%x recycle=0x%x\n", + label, shared->slot_first, shared->slot_last, + shared->tx_pos, shared->slot_queue_recycle); + + seq_puts(f, " Slots claimed:\n"); + + for (i = shared->slot_first; i <= shared->slot_last; i++) { + struct vchiq_slot_info slot_info = + *SLOT_INFO_FROM_INDEX(state, i); + if (slot_info.use_count != slot_info.release_count) { + seq_printf(f, " %d: %d/%d\n", i, slot_info.use_count, + slot_info.release_count); + } + } + + for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) { + seq_printf(f, " DEBUG: %s = %d(0x%x)\n", + debug_names[i], shared->debug[i], shared->debug[i]); + } +} + +static void +vchiq_dump_service_state(struct seq_file *f, struct vchiq_service *service) +{ + unsigned int ref_count; + + /*Don't include the lock just taken*/ + ref_count = kref_read(&service->ref_count) - 1; + seq_printf(f, "Service %u: %s (ref %u)", service->localport, + srvstate_names[service->srvstate], ref_count); + + if (service->srvstate != VCHIQ_SRVSTATE_FREE) { + char remoteport[30]; + struct vchiq_service_quota *quota = + &service->state->service_quotas[service->localport]; + int fourcc = service->base.fourcc; + int tx_pending, rx_pending, tx_size = 0, rx_size = 0; + + if (service->remoteport != VCHIQ_PORT_FREE) { + int len2 = scnprintf(remoteport, sizeof(remoteport), + "%u", service->remoteport); + + if (service->public_fourcc != VCHIQ_FOURCC_INVALID) + scnprintf(remoteport + len2, sizeof(remoteport) - len2, + " (client 0x%x)", service->client_id); + } else { + strscpy(remoteport, "n/a", sizeof(remoteport)); + } + + seq_printf(f, " '%p4cc' remote %s (msg use %d/%d, slot use %d/%d)\n", + &fourcc, remoteport, + quota->message_use_count, quota->message_quota, + quota->slot_use_count, quota->slot_quota); + + tx_pending = service->bulk_tx.local_insert - + service->bulk_tx.remote_insert; + if (tx_pending) { + unsigned int i = BULK_INDEX(service->bulk_tx.remove); + + tx_size = service->bulk_tx.bulks[i].size; + } + + rx_pending = service->bulk_rx.local_insert - + service->bulk_rx.remote_insert; + if (rx_pending) { + unsigned int i = BULK_INDEX(service->bulk_rx.remove); + + rx_size = service->bulk_rx.bulks[i].size; + } + + seq_printf(f, " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)\n", + tx_pending, tx_size, rx_pending, rx_size); + + if (VCHIQ_ENABLE_STATS) { + seq_printf(f, " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n", + service->stats.ctrl_tx_count, + service->stats.ctrl_tx_bytes, + service->stats.ctrl_rx_count, + service->stats.ctrl_rx_bytes); + + seq_printf(f, " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n", + service->stats.bulk_tx_count, + service->stats.bulk_tx_bytes, + service->stats.bulk_rx_count, + service->stats.bulk_rx_bytes); + + seq_printf(f, " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors\n", + service->stats.quota_stalls, + service->stats.slot_stalls, + service->stats.bulk_stalls, + service->stats.bulk_aborted_count, + service->stats.error_count); + } + } + + vchiq_dump_platform_service_state(f, service); +} + +void vchiq_dump_state(struct seq_file *f, struct vchiq_state *state) +{ + int i; + + seq_printf(f, "State %d: %s\n", state->id, + conn_state_names[state->conn_state]); + + seq_printf(f, " tx_pos=0x%x(@%pK), rx_pos=0x%x(@%pK)\n", + state->local->tx_pos, + state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK), + state->rx_pos, + state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK)); + + seq_printf(f, " Version: %d (min %d)\n", VCHIQ_VERSION, + VCHIQ_VERSION_MIN); + + if (VCHIQ_ENABLE_STATS) { + seq_printf(f, " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d\n", + state->stats.ctrl_tx_count, state->stats.ctrl_rx_count, + state->stats.error_count); + } + + seq_printf(f, " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)\n", + ((state->slot_queue_available * VCHIQ_SLOT_SIZE) - + state->local_tx_pos) / VCHIQ_SLOT_SIZE, + state->data_quota - state->data_use_count, + state->local->slot_queue_recycle - state->slot_queue_available, + state->stats.slot_stalls, state->stats.data_stalls); + + vchiq_dump_platform_state(f); + + vchiq_dump_shared_state(f, state, state->local, "Local"); + + vchiq_dump_shared_state(f, state, state->remote, "Remote"); + + vchiq_dump_platform_instances(state, f); + + for (i = 0; i < state->unused_service; i++) { + struct vchiq_service *service = find_service_by_port(state, i); + + if (service) { + vchiq_dump_service_state(f, service); + vchiq_service_put(service); + } + } +} + +int vchiq_send_remote_use(struct vchiq_state *state) +{ + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) + return -ENOTCONN; + + return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0); +} + +int vchiq_send_remote_use_active(struct vchiq_state *state) +{ + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) + return -ENOTCONN; + + return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE, + NULL, NULL, 0, 0); +} + +void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr, + const void *void_mem, size_t num_bytes) +{ + const u8 *mem = void_mem; + size_t offset; + char line_buf[100]; + char *s; + + while (num_bytes > 0) { + s = line_buf; + + for (offset = 0; offset < 16; offset++) { + if (offset < num_bytes) + s += scnprintf(s, 4, "%02x ", mem[offset]); + else + s += scnprintf(s, 4, " "); + } + + for (offset = 0; offset < 16; offset++) { + if (offset < num_bytes) { + u8 ch = mem[offset]; + + if ((ch < ' ') || (ch > '~')) + ch = '.'; + *s++ = (char)ch; + } + } + *s++ = '\0'; + + dev_dbg(dev, "core: %s: %08x: %s\n", label, addr, line_buf); + + addr += 16; + mem += 16; + if (num_bytes > 16) + num_bytes -= 16; + else + num_bytes = 0; + } +} diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c new file mode 100644 index 000000000000..c82326a9b6d9 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. + * Copyright (c) 2010-2012 Broadcom. All rights reserved. + */ + +#include <linux/debugfs.h> +#include <linux/raspberrypi/vchiq_core.h> +#include <linux/raspberrypi/vchiq_arm.h> +#include <linux/raspberrypi/vchiq_debugfs.h> + +#ifdef CONFIG_DEBUG_FS + +#define DEBUGFS_WRITE_BUF_SIZE 256 + +/* Global 'vchiq' debugfs and clients entry used by all instances */ +static struct dentry *vchiq_dbg_dir; +static struct dentry *vchiq_dbg_clients; + +static int debugfs_usecount_show(struct seq_file *f, void *offset) +{ + struct vchiq_instance *instance = f->private; + int use_count; + + use_count = vchiq_instance_get_use_count(instance); + seq_printf(f, "%d\n", use_count); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_usecount); + +static int debugfs_trace_show(struct seq_file *f, void *offset) +{ + struct vchiq_instance *instance = f->private; + int trace; + + trace = vchiq_instance_get_trace(instance); + seq_printf(f, "%s\n", trace ? "Y" : "N"); + + return 0; +} + +static int vchiq_dump_show(struct seq_file *f, void *offset) +{ + struct vchiq_state *state = f->private; + + vchiq_dump_state(f, state); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(vchiq_dump); + +static int debugfs_trace_open(struct inode *inode, struct file *file) +{ + return single_open(file, debugfs_trace_show, inode->i_private); +} + +static ssize_t debugfs_trace_write(struct file *file, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct seq_file *f = (struct seq_file *)file->private_data; + struct vchiq_instance *instance = f->private; + char firstchar; + + if (copy_from_user(&firstchar, buffer, 1)) + return -EFAULT; + + switch (firstchar) { + case 'Y': + case 'y': + case '1': + vchiq_instance_set_trace(instance, 1); + break; + case 'N': + case 'n': + case '0': + vchiq_instance_set_trace(instance, 0); + break; + default: + break; + } + + *ppos += count; + + return count; +} + +static const struct file_operations debugfs_trace_fops = { + .owner = THIS_MODULE, + .open = debugfs_trace_open, + .write = debugfs_trace_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* add an instance (process) to the debugfs entries */ +void vchiq_debugfs_add_instance(struct vchiq_instance *instance) +{ + char pidstr[16]; + struct dentry *top; + + snprintf(pidstr, sizeof(pidstr), "%d", + vchiq_instance_get_pid(instance)); + + top = debugfs_create_dir(pidstr, vchiq_dbg_clients); + + debugfs_create_file("use_count", 0444, top, instance, + &debugfs_usecount_fops); + debugfs_create_file("trace", 0644, top, instance, &debugfs_trace_fops); + + vchiq_instance_get_debugfs_node(instance)->dentry = top; +} + +void vchiq_debugfs_remove_instance(struct vchiq_instance *instance) +{ + struct vchiq_debugfs_node *node = + vchiq_instance_get_debugfs_node(instance); + + debugfs_remove_recursive(node->dentry); +} + +void vchiq_debugfs_init(struct vchiq_state *state) +{ + vchiq_dbg_dir = debugfs_create_dir("vchiq", NULL); + vchiq_dbg_clients = debugfs_create_dir("clients", vchiq_dbg_dir); + + debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, state, + &vchiq_dump_fops); +} + +/* remove all the debugfs entries */ +void vchiq_debugfs_deinit(void) +{ + debugfs_remove_recursive(vchiq_dbg_dir); +} + +#else /* CONFIG_DEBUG_FS */ + +void vchiq_debugfs_init(struct vchiq_state *state) +{ +} + +void vchiq_debugfs_deinit(void) +{ +} + +void vchiq_debugfs_add_instance(struct vchiq_instance *instance) +{ +} + +void vchiq_debugfs_remove_instance(struct vchiq_instance *instance) +{ +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c new file mode 100644 index 000000000000..0f3dde2657d6 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c @@ -0,0 +1,1355 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. + * Copyright (c) 2010-2012 Broadcom. All rights reserved. + */ + +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/compat.h> +#include <linux/miscdevice.h> + +#include <linux/raspberrypi/vchiq_core.h> +#include <linux/raspberrypi/vchiq_arm.h> +#include <linux/raspberrypi/vchiq_debugfs.h> + +#include "vchiq_ioctl.h" + +static const char *const ioctl_names[] = { + "CONNECT", + "SHUTDOWN", + "CREATE_SERVICE", + "REMOVE_SERVICE", + "QUEUE_MESSAGE", + "QUEUE_BULK_TRANSMIT", + "QUEUE_BULK_RECEIVE", + "AWAIT_COMPLETION", + "DEQUEUE_MESSAGE", + "GET_CLIENT_ID", + "GET_CONFIG", + "CLOSE_SERVICE", + "USE_SERVICE", + "RELEASE_SERVICE", + "SET_SERVICE_OPTION", + "DUMP_PHYS_MEM", + "LIB_VERSION", + "CLOSE_DELIVERED" +}; + +static_assert(ARRAY_SIZE(ioctl_names) == (VCHIQ_IOC_MAX + 1)); + +static void +user_service_free(void *userdata) +{ + kfree(userdata); +} + +static void close_delivered(struct user_service *user_service) +{ + dev_dbg(user_service->service->state->dev, + "arm: (handle=%x)\n", user_service->service->handle); + + if (user_service->close_pending) { + /* Allow the underlying service to be culled */ + vchiq_service_put(user_service->service); + + /* Wake the user-thread blocked in close_ or remove_service */ + complete(&user_service->close_event); + + user_service->close_pending = 0; + } +} + +struct vchiq_io_copy_callback_context { + struct vchiq_element *element; + size_t element_offset; + unsigned long elements_to_go; +}; + +static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest, + size_t offset, size_t maxsize) +{ + struct vchiq_io_copy_callback_context *cc = context; + size_t total_bytes_copied = 0; + size_t bytes_this_round; + + while (total_bytes_copied < maxsize) { + if (!cc->elements_to_go) + return total_bytes_copied; + + if (!cc->element->size) { + cc->elements_to_go--; + cc->element++; + cc->element_offset = 0; + continue; + } + + bytes_this_round = min(cc->element->size - cc->element_offset, + maxsize - total_bytes_copied); + + if (copy_from_user(dest + total_bytes_copied, + cc->element->data + cc->element_offset, + bytes_this_round)) + return -EFAULT; + + cc->element_offset += bytes_this_round; + total_bytes_copied += bytes_this_round; + + if (cc->element_offset == cc->element->size) { + cc->elements_to_go--; + cc->element++; + cc->element_offset = 0; + } + } + + return maxsize; +} + +static int +vchiq_ioc_queue_message(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_element *elements, unsigned long count) +{ + struct vchiq_io_copy_callback_context context; + int status = 0; + unsigned long i; + size_t total_size = 0; + + context.element = elements; + context.element_offset = 0; + context.elements_to_go = count; + + for (i = 0; i < count; i++) { + if (!elements[i].data && elements[i].size != 0) + return -EFAULT; + + total_size += elements[i].size; + } + + status = vchiq_queue_message(instance, handle, vchiq_ioc_copy_element_data, + &context, total_size); + + if (status == -EINVAL) + return -EIO; + else if (status == -EAGAIN) + return -EINTR; + return 0; +} + +static int vchiq_ioc_create_service(struct vchiq_instance *instance, + struct vchiq_create_service *args) +{ + struct user_service *user_service = NULL; + struct vchiq_service *service; + int status = 0; + struct vchiq_service_params_kernel params; + int srvstate; + + if (args->is_open && !instance->connected) + return -ENOTCONN; + + user_service = kmalloc(sizeof(*user_service), GFP_KERNEL); + if (!user_service) + return -ENOMEM; + + if (args->is_open) { + srvstate = VCHIQ_SRVSTATE_OPENING; + } else { + srvstate = instance->connected ? + VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN; + } + + params = (struct vchiq_service_params_kernel) { + .fourcc = args->params.fourcc, + .callback = service_callback, + .userdata = user_service, + .version = args->params.version, + .version_min = args->params.version_min, + }; + service = vchiq_add_service_internal(instance->state, ¶ms, + srvstate, instance, + user_service_free); + if (!service) { + kfree(user_service); + return -EEXIST; + } + + user_service->service = service; + user_service->userdata = args->params.userdata; + user_service->instance = instance; + user_service->is_vchi = (args->is_vchi != 0); + user_service->dequeue_pending = 0; + user_service->close_pending = 0; + user_service->message_available_pos = instance->completion_remove - 1; + user_service->msg_insert = 0; + user_service->msg_remove = 0; + init_completion(&user_service->insert_event); + init_completion(&user_service->remove_event); + init_completion(&user_service->close_event); + + if (args->is_open) { + status = vchiq_open_service_internal(service, instance->pid); + if (status) { + vchiq_remove_service(instance, service->handle); + return (status == -EAGAIN) ? + -EINTR : -EIO; + } + } + args->handle = service->handle; + + return 0; +} + +static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance, + struct vchiq_dequeue_message *args) +{ + struct user_service *user_service; + struct vchiq_service *service; + struct vchiq_header *header; + int ret; + + DEBUG_INITIALISE(instance->state->local); + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); + service = find_service_for_instance(instance, args->handle); + if (!service) + return -EINVAL; + + user_service = (struct user_service *)service->base.userdata; + if (user_service->is_vchi == 0) { + ret = -EINVAL; + goto out; + } + + spin_lock(&service->state->msg_queue_spinlock); + if (user_service->msg_remove == user_service->msg_insert) { + if (!args->blocking) { + spin_unlock(&service->state->msg_queue_spinlock); + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); + ret = -EWOULDBLOCK; + goto out; + } + user_service->dequeue_pending = 1; + ret = 0; + do { + spin_unlock(&service->state->msg_queue_spinlock); + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); + if (wait_for_completion_interruptible(&user_service->insert_event)) { + dev_dbg(service->state->dev, "arm: DEQUEUE_MESSAGE interrupted\n"); + ret = -EINTR; + break; + } + spin_lock(&service->state->msg_queue_spinlock); + } while (user_service->msg_remove == user_service->msg_insert); + + if (ret) + goto out; + } + + if (WARN_ON_ONCE((int)(user_service->msg_insert - + user_service->msg_remove) < 0)) { + spin_unlock(&service->state->msg_queue_spinlock); + ret = -EINVAL; + goto out; + } + + header = user_service->msg_queue[user_service->msg_remove & + (MSG_QUEUE_SIZE - 1)]; + user_service->msg_remove++; + spin_unlock(&service->state->msg_queue_spinlock); + + complete(&user_service->remove_event); + if (!header) { + ret = -ENOTCONN; + } else if (header->size <= args->bufsize) { + /* Copy to user space if msgbuf is not NULL */ + if (!args->buf || (copy_to_user(args->buf, header->data, header->size) == 0)) { + ret = header->size; + vchiq_release_message(instance, service->handle, header); + } else { + ret = -EFAULT; + } + } else { + dev_err(service->state->dev, + "arm: header %p: bufsize %x < size %x\n", + header, args->bufsize, header->size); + WARN(1, "invalid size\n"); + ret = -EMSGSIZE; + } + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); +out: + vchiq_service_put(service); + return ret; +} + +static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance, + struct vchiq_queue_bulk_transfer *args, + enum vchiq_bulk_dir dir, + enum vchiq_bulk_mode __user *mode) +{ + struct vchiq_service *service; + struct bulk_waiter_node *waiter = NULL, *iter; + struct vchiq_bulk bulk_params = {}; + int status = 0; + int ret; + + service = find_service_for_instance(instance, args->handle); + if (!service) + return -EINVAL; + + if (args->mode == VCHIQ_BULK_MODE_BLOCKING) { + waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); + if (!waiter) { + ret = -ENOMEM; + goto out; + } + + bulk_params.uoffset = args->data; + bulk_params.mode = args->mode; + bulk_params.size = args->size; + bulk_params.dir = dir; + bulk_params.waiter = &waiter->bulk_waiter; + + status = vchiq_bulk_xfer_blocking(instance, args->handle, + &bulk_params); + } else if (args->mode == VCHIQ_BULK_MODE_WAITING) { + mutex_lock(&instance->bulk_waiter_list_mutex); + list_for_each_entry(iter, &instance->bulk_waiter_list, + list) { + if (iter->pid == current->pid) { + list_del(&iter->list); + waiter = iter; + break; + } + } + mutex_unlock(&instance->bulk_waiter_list_mutex); + if (!waiter) { + dev_err(service->state->dev, + "arm: no bulk_waiter found for pid %d\n", current->pid); + ret = -ESRCH; + goto out; + } + dev_dbg(service->state->dev, "arm: found bulk_waiter %p for pid %d\n", + waiter, current->pid); + + status = vchiq_bulk_xfer_waiting(instance, args->handle, + &waiter->bulk_waiter); + } else { + bulk_params.uoffset = args->data; + bulk_params.mode = args->mode; + bulk_params.size = args->size; + bulk_params.dir = dir; + bulk_params.cb_userdata = args->userdata; + + status = vchiq_bulk_xfer_callback(instance, args->handle, + &bulk_params); + } + + if (!waiter) { + ret = 0; + goto out; + } + + if ((status != -EAGAIN) || fatal_signal_pending(current) || + !waiter->bulk_waiter.bulk) { + if (waiter->bulk_waiter.bulk) { + /* Cancel the signal when the transfer completes. */ + spin_lock(&service->state->bulk_waiter_spinlock); + waiter->bulk_waiter.bulk->waiter = NULL; + spin_unlock(&service->state->bulk_waiter_spinlock); + } + kfree(waiter); + ret = 0; + } else { + const enum vchiq_bulk_mode mode_waiting = + VCHIQ_BULK_MODE_WAITING; + waiter->pid = current->pid; + mutex_lock(&instance->bulk_waiter_list_mutex); + list_add(&waiter->list, &instance->bulk_waiter_list); + mutex_unlock(&instance->bulk_waiter_list_mutex); + dev_dbg(service->state->dev, "arm: saved bulk_waiter %p for pid %d\n", + waiter, current->pid); + + ret = put_user(mode_waiting, mode); + } +out: + vchiq_service_put(service); + if (ret) + return ret; + else if (status == -EINVAL) + return -EIO; + else if (status == -EAGAIN) + return -EINTR; + return 0; +} + +/* read a user pointer value from an array pointers in user space */ +static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index) +{ + int ret; + + if (in_compat_syscall()) { + compat_uptr_t ptr32; + compat_uptr_t __user *uptr = ubuf; + + ret = get_user(ptr32, uptr + index); + if (ret) + return ret; + + *buf = compat_ptr(ptr32); + } else { + uintptr_t ptr, __user *uptr = ubuf; + + ret = get_user(ptr, uptr + index); + + if (ret) + return ret; + + *buf = (void __user *)ptr; + } + + return 0; +} + +struct vchiq_completion_data32 { + enum vchiq_reason reason; + compat_uptr_t header; + compat_uptr_t service_userdata; + compat_uptr_t cb_data; +}; + +static int vchiq_put_completion(struct vchiq_completion_data __user *buf, + struct vchiq_completion_data *completion, + int index) +{ + struct vchiq_completion_data32 __user *buf32 = (void __user *)buf; + + if (in_compat_syscall()) { + struct vchiq_completion_data32 tmp = { + .reason = completion->reason, + .header = ptr_to_compat(completion->header), + .service_userdata = ptr_to_compat(completion->service_userdata), + .cb_data = ptr_to_compat(completion->cb_userdata), + }; + if (copy_to_user(&buf32[index], &tmp, sizeof(tmp))) + return -EFAULT; + } else { + if (copy_to_user(&buf[index], completion, sizeof(*completion))) + return -EFAULT; + } + + return 0; +} + +static int vchiq_ioc_await_completion(struct vchiq_instance *instance, + struct vchiq_await_completion *args, + int __user *msgbufcountp) +{ + int msgbufcount; + int remove; + int ret; + + DEBUG_INITIALISE(instance->state->local); + + DEBUG_TRACE(AWAIT_COMPLETION_LINE); + if (!instance->connected) + return -ENOTCONN; + + mutex_lock(&instance->completion_mutex); + + DEBUG_TRACE(AWAIT_COMPLETION_LINE); + while ((instance->completion_remove == instance->completion_insert) && !instance->closing) { + int rc; + + DEBUG_TRACE(AWAIT_COMPLETION_LINE); + mutex_unlock(&instance->completion_mutex); + rc = wait_for_completion_interruptible(&instance->insert_event); + mutex_lock(&instance->completion_mutex); + if (rc) { + DEBUG_TRACE(AWAIT_COMPLETION_LINE); + dev_dbg(instance->state->dev, "arm: AWAIT_COMPLETION interrupted\n"); + ret = -EINTR; + goto out; + } + } + DEBUG_TRACE(AWAIT_COMPLETION_LINE); + + msgbufcount = args->msgbufcount; + remove = instance->completion_remove; + + for (ret = 0; ret < args->count; ret++) { + struct vchiq_completion_data_kernel *completion; + struct vchiq_completion_data user_completion; + struct vchiq_service *service; + struct user_service *user_service; + struct vchiq_header *header; + + if (remove == instance->completion_insert) + break; + + completion = &instance->completions[remove & (MAX_COMPLETIONS - 1)]; + + /* + * A read memory barrier is needed to stop + * prefetch of a stale completion record + */ + rmb(); + + service = completion->service_userdata; + user_service = service->base.userdata; + + memset(&user_completion, 0, sizeof(user_completion)); + user_completion = (struct vchiq_completion_data) { + .reason = completion->reason, + .service_userdata = user_service->userdata, + }; + + header = completion->header; + if (header) { + void __user *msgbuf; + int msglen; + + msglen = header->size + sizeof(struct vchiq_header); + /* This must be a VCHIQ-style service */ + if (args->msgbufsize < msglen) { + dev_err(service->state->dev, + "arm: header %p: msgbufsize %x < msglen %x\n", + header, args->msgbufsize, msglen); + WARN(1, "invalid message size\n"); + if (ret == 0) + ret = -EMSGSIZE; + break; + } + if (msgbufcount <= 0) + /* Stall here for lack of a buffer for the message. */ + break; + /* Get the pointer from user space */ + msgbufcount--; + if (vchiq_get_user_ptr(&msgbuf, args->msgbufs, + msgbufcount)) { + if (ret == 0) + ret = -EFAULT; + break; + } + + /* Copy the message to user space */ + if (copy_to_user(msgbuf, header, msglen)) { + if (ret == 0) + ret = -EFAULT; + break; + } + + /* Now it has been copied, the message can be released. */ + vchiq_release_message(instance, service->handle, header); + + /* The completion must point to the msgbuf. */ + user_completion.header = msgbuf; + } + + if ((completion->reason == VCHIQ_SERVICE_CLOSED) && + !instance->use_close_delivered) + vchiq_service_put(service); + + user_completion.cb_userdata = completion->cb_userdata; + + if (vchiq_put_completion(args->buf, &user_completion, ret)) { + if (ret == 0) + ret = -EFAULT; + break; + } + + /* + * Ensure that the above copy has completed + * before advancing the remove pointer. + */ + mb(); + remove++; + instance->completion_remove = remove; + } + + if (msgbufcount != args->msgbufcount) { + if (put_user(msgbufcount, msgbufcountp)) + ret = -EFAULT; + } +out: + if (ret) + complete(&instance->remove_event); + mutex_unlock(&instance->completion_mutex); + DEBUG_TRACE(AWAIT_COMPLETION_LINE); + + return ret; +} + +static long +vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct vchiq_instance *instance = file->private_data; + int status = 0; + struct vchiq_service *service = NULL; + long ret = 0; + int i, rc; + + dev_dbg(instance->state->dev, "arm: instance %p, cmd %s, arg %lx\n", instance, + ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ? + ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg); + + switch (cmd) { + case VCHIQ_IOC_SHUTDOWN: + if (!instance->connected) + break; + + /* Remove all services */ + i = 0; + while ((service = next_service_by_instance(instance->state, + instance, &i))) { + status = vchiq_remove_service(instance, service->handle); + vchiq_service_put(service); + if (status) + break; + } + service = NULL; + + if (!status) { + /* Wake the completion thread and ask it to exit */ + instance->closing = 1; + complete(&instance->insert_event); + } + + break; + + case VCHIQ_IOC_CONNECT: + if (instance->connected) { + ret = -EINVAL; + break; + } + rc = mutex_lock_killable(&instance->state->mutex); + if (rc) { + dev_err(instance->state->dev, + "arm: vchiq: connect: could not lock mutex for state %d: %d\n", + instance->state->id, rc); + ret = -EINTR; + break; + } + status = vchiq_connect_internal(instance->state, instance); + mutex_unlock(&instance->state->mutex); + + if (!status) + instance->connected = 1; + else + dev_err(instance->state->dev, + "arm: vchiq: could not connect: %d\n", status); + break; + + case VCHIQ_IOC_CREATE_SERVICE: { + struct vchiq_create_service __user *argp; + struct vchiq_create_service args; + + argp = (void __user *)arg; + if (copy_from_user(&args, argp, sizeof(args))) { + ret = -EFAULT; + break; + } + + ret = vchiq_ioc_create_service(instance, &args); + if (ret < 0) + break; + + if (put_user(args.handle, &argp->handle)) { + vchiq_remove_service(instance, args.handle); + ret = -EFAULT; + } + } break; + + case VCHIQ_IOC_CLOSE_SERVICE: + case VCHIQ_IOC_REMOVE_SERVICE: { + unsigned int handle = (unsigned int)arg; + struct user_service *user_service; + + service = find_service_for_instance(instance, handle); + if (!service) { + ret = -EINVAL; + break; + } + + user_service = service->base.userdata; + + /* + * close_pending is false on first entry, and when the + * wait in vchiq_close_service has been interrupted. + */ + if (!user_service->close_pending) { + status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ? + vchiq_close_service(instance, service->handle) : + vchiq_remove_service(instance, service->handle); + if (status) + break; + } + + /* + * close_pending is true once the underlying service + * has been closed until the client library calls the + * CLOSE_DELIVERED ioctl, signalling close_event. + */ + if (user_service->close_pending && + wait_for_completion_interruptible(&user_service->close_event)) + status = -EAGAIN; + break; + } + + case VCHIQ_IOC_USE_SERVICE: + case VCHIQ_IOC_RELEASE_SERVICE: { + unsigned int handle = (unsigned int)arg; + + service = find_service_for_instance(instance, handle); + if (service) { + ret = (cmd == VCHIQ_IOC_USE_SERVICE) ? + vchiq_use_service_internal(service) : + vchiq_release_service_internal(service); + if (ret) { + dev_err(instance->state->dev, + "suspend: cmd %s returned error %ld for service %p4cc:%03d\n", + (cmd == VCHIQ_IOC_USE_SERVICE) ? + "VCHIQ_IOC_USE_SERVICE" : + "VCHIQ_IOC_RELEASE_SERVICE", + ret, &service->base.fourcc, + service->client_id); + } + } else { + ret = -EINVAL; + } + } break; + + case VCHIQ_IOC_QUEUE_MESSAGE: { + struct vchiq_queue_message args; + + if (copy_from_user(&args, (const void __user *)arg, + sizeof(args))) { + ret = -EFAULT; + break; + } + + service = find_service_for_instance(instance, args.handle); + + if (service && (args.count <= MAX_ELEMENTS)) { + /* Copy elements into kernel space */ + struct vchiq_element elements[MAX_ELEMENTS]; + + if (copy_from_user(elements, args.elements, + args.count * sizeof(struct vchiq_element)) == 0) + ret = vchiq_ioc_queue_message(instance, args.handle, elements, + args.count); + else + ret = -EFAULT; + } else { + ret = -EINVAL; + } + } break; + + case VCHIQ_IOC_QUEUE_BULK_TRANSMIT: + case VCHIQ_IOC_QUEUE_BULK_RECEIVE: { + struct vchiq_queue_bulk_transfer args; + struct vchiq_queue_bulk_transfer __user *argp; + + enum vchiq_bulk_dir dir = + (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ? + VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE; + + argp = (void __user *)arg; + if (copy_from_user(&args, argp, sizeof(args))) { + ret = -EFAULT; + break; + } + + ret = vchiq_irq_queue_bulk_tx_rx(instance, &args, + dir, &argp->mode); + } break; + + case VCHIQ_IOC_AWAIT_COMPLETION: { + struct vchiq_await_completion args; + struct vchiq_await_completion __user *argp; + + argp = (void __user *)arg; + if (copy_from_user(&args, argp, sizeof(args))) { + ret = -EFAULT; + break; + } + + ret = vchiq_ioc_await_completion(instance, &args, + &argp->msgbufcount); + } break; + + case VCHIQ_IOC_DEQUEUE_MESSAGE: { + struct vchiq_dequeue_message args; + + if (copy_from_user(&args, (const void __user *)arg, + sizeof(args))) { + ret = -EFAULT; + break; + } + + ret = vchiq_ioc_dequeue_message(instance, &args); + } break; + + case VCHIQ_IOC_GET_CLIENT_ID: { + unsigned int handle = (unsigned int)arg; + + ret = vchiq_get_client_id(instance, handle); + } break; + + case VCHIQ_IOC_GET_CONFIG: { + struct vchiq_get_config args; + struct vchiq_config config; + + if (copy_from_user(&args, (const void __user *)arg, + sizeof(args))) { + ret = -EFAULT; + break; + } + if (args.config_size > sizeof(config)) { + ret = -EINVAL; + break; + } + + vchiq_get_config(&config); + if (copy_to_user(args.pconfig, &config, args.config_size)) { + ret = -EFAULT; + break; + } + } break; + + case VCHIQ_IOC_SET_SERVICE_OPTION: { + struct vchiq_set_service_option args; + + if (copy_from_user(&args, (const void __user *)arg, + sizeof(args))) { + ret = -EFAULT; + break; + } + + service = find_service_for_instance(instance, args.handle); + if (!service) { + ret = -EINVAL; + break; + } + + ret = vchiq_set_service_option(instance, args.handle, args.option, + args.value); + } break; + + case VCHIQ_IOC_LIB_VERSION: { + unsigned int lib_version = (unsigned int)arg; + + if (lib_version < VCHIQ_VERSION_MIN) + ret = -EINVAL; + else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED) + instance->use_close_delivered = 1; + } break; + + case VCHIQ_IOC_CLOSE_DELIVERED: { + unsigned int handle = (unsigned int)arg; + + service = find_closed_service_for_instance(instance, handle); + if (service) { + struct user_service *user_service = + (struct user_service *)service->base.userdata; + close_delivered(user_service); + } else { + ret = -EINVAL; + } + } break; + + default: + ret = -ENOTTY; + break; + } + + if (service) + vchiq_service_put(service); + + if (ret == 0) { + if (status == -EINVAL) + ret = -EIO; + else if (status == -EAGAIN) + ret = -EINTR; + } + + if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK)) { + dev_dbg(instance->state->dev, + "arm: ioctl instance %p, cmd %s -> status %d, %ld\n", + instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ? + ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret); + } else { + dev_dbg(instance->state->dev, + "arm: ioctl instance %p, cmd %s -> status %d\n, %ld\n", + instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ? + ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret); + } + + return ret; +} + +#if defined(CONFIG_COMPAT) + +struct vchiq_service_params32 { + int fourcc; + compat_uptr_t callback; + compat_uptr_t userdata; + short version; /* Increment for non-trivial changes */ + short version_min; /* Update for incompatible changes */ +}; + +struct vchiq_create_service32 { + struct vchiq_service_params32 params; + int is_open; + int is_vchi; + unsigned int handle; /* OUT */ +}; + +#define VCHIQ_IOC_CREATE_SERVICE32 \ + _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32) + +static long +vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd, + struct vchiq_create_service32 __user *ptrargs32) +{ + struct vchiq_create_service args; + struct vchiq_create_service32 args32; + struct vchiq_instance *instance = file->private_data; + long ret; + + if (copy_from_user(&args32, ptrargs32, sizeof(args32))) + return -EFAULT; + + args = (struct vchiq_create_service) { + .params = { + .fourcc = args32.params.fourcc, + .callback = compat_ptr(args32.params.callback), + .userdata = compat_ptr(args32.params.userdata), + .version = args32.params.version, + .version_min = args32.params.version_min, + }, + .is_open = args32.is_open, + .is_vchi = args32.is_vchi, + .handle = args32.handle, + }; + + ret = vchiq_ioc_create_service(instance, &args); + if (ret < 0) + return ret; + + if (put_user(args.handle, &ptrargs32->handle)) { + vchiq_remove_service(instance, args.handle); + return -EFAULT; + } + + return 0; +} + +struct vchiq_element32 { + compat_uptr_t data; + unsigned int size; +}; + +struct vchiq_queue_message32 { + unsigned int handle; + unsigned int count; + compat_uptr_t elements; +}; + +#define VCHIQ_IOC_QUEUE_MESSAGE32 \ + _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message32) + +static long +vchiq_compat_ioctl_queue_message(struct file *file, + unsigned int cmd, + struct vchiq_queue_message32 __user *arg) +{ + struct vchiq_queue_message args; + struct vchiq_queue_message32 args32; + struct vchiq_service *service; + struct vchiq_instance *instance = file->private_data; + int ret; + + if (copy_from_user(&args32, arg, sizeof(args32))) + return -EFAULT; + + args = (struct vchiq_queue_message) { + .handle = args32.handle, + .count = args32.count, + .elements = compat_ptr(args32.elements), + }; + + if (args32.count > MAX_ELEMENTS) + return -EINVAL; + + service = find_service_for_instance(instance, args.handle); + if (!service) + return -EINVAL; + + if (args32.elements && args32.count) { + struct vchiq_element32 element32[MAX_ELEMENTS]; + struct vchiq_element elements[MAX_ELEMENTS]; + unsigned int count; + + if (copy_from_user(&element32, args.elements, + sizeof(element32))) { + vchiq_service_put(service); + return -EFAULT; + } + + for (count = 0; count < args32.count; count++) { + elements[count].data = + compat_ptr(element32[count].data); + elements[count].size = element32[count].size; + } + ret = vchiq_ioc_queue_message(instance, args.handle, elements, + args.count); + } else { + ret = -EINVAL; + } + vchiq_service_put(service); + + return ret; +} + +struct vchiq_queue_bulk_transfer32 { + unsigned int handle; + compat_uptr_t data; + unsigned int size; + compat_uptr_t userdata; + enum vchiq_bulk_mode mode; +}; + +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \ + _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32) +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \ + _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32) + +static long +vchiq_compat_ioctl_queue_bulk(struct file *file, + unsigned int cmd, + struct vchiq_queue_bulk_transfer32 __user *argp) +{ + struct vchiq_queue_bulk_transfer32 args32; + struct vchiq_queue_bulk_transfer args; + enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ? + VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE; + + if (copy_from_user(&args32, argp, sizeof(args32))) + return -EFAULT; + + args = (struct vchiq_queue_bulk_transfer) { + .handle = args32.handle, + .data = compat_ptr(args32.data), + .size = args32.size, + .userdata = compat_ptr(args32.userdata), + .mode = args32.mode, + }; + + return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args, + dir, &argp->mode); +} + +struct vchiq_await_completion32 { + unsigned int count; + compat_uptr_t buf; + unsigned int msgbufsize; + unsigned int msgbufcount; /* IN/OUT */ + compat_uptr_t msgbufs; +}; + +#define VCHIQ_IOC_AWAIT_COMPLETION32 \ + _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32) + +static long +vchiq_compat_ioctl_await_completion(struct file *file, + unsigned int cmd, + struct vchiq_await_completion32 __user *argp) +{ + struct vchiq_await_completion args; + struct vchiq_await_completion32 args32; + + if (copy_from_user(&args32, argp, sizeof(args32))) + return -EFAULT; + + args = (struct vchiq_await_completion) { + .count = args32.count, + .buf = compat_ptr(args32.buf), + .msgbufsize = args32.msgbufsize, + .msgbufcount = args32.msgbufcount, + .msgbufs = compat_ptr(args32.msgbufs), + }; + + return vchiq_ioc_await_completion(file->private_data, &args, + &argp->msgbufcount); +} + +struct vchiq_dequeue_message32 { + unsigned int handle; + int blocking; + unsigned int bufsize; + compat_uptr_t buf; +}; + +#define VCHIQ_IOC_DEQUEUE_MESSAGE32 \ + _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32) + +static long +vchiq_compat_ioctl_dequeue_message(struct file *file, + unsigned int cmd, + struct vchiq_dequeue_message32 __user *arg) +{ + struct vchiq_dequeue_message32 args32; + struct vchiq_dequeue_message args; + + if (copy_from_user(&args32, arg, sizeof(args32))) + return -EFAULT; + + args = (struct vchiq_dequeue_message) { + .handle = args32.handle, + .blocking = args32.blocking, + .bufsize = args32.bufsize, + .buf = compat_ptr(args32.buf), + }; + + return vchiq_ioc_dequeue_message(file->private_data, &args); +} + +struct vchiq_get_config32 { + unsigned int config_size; + compat_uptr_t pconfig; +}; + +#define VCHIQ_IOC_GET_CONFIG32 \ + _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32) + +static long +vchiq_compat_ioctl_get_config(struct file *file, + unsigned int cmd, + struct vchiq_get_config32 __user *arg) +{ + struct vchiq_get_config32 args32; + struct vchiq_config config; + void __user *ptr; + + if (copy_from_user(&args32, arg, sizeof(args32))) + return -EFAULT; + if (args32.config_size > sizeof(config)) + return -EINVAL; + + vchiq_get_config(&config); + ptr = compat_ptr(args32.pconfig); + if (copy_to_user(ptr, &config, args32.config_size)) + return -EFAULT; + + return 0; +} + +static long +vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *argp = compat_ptr(arg); + + switch (cmd) { + case VCHIQ_IOC_CREATE_SERVICE32: + return vchiq_compat_ioctl_create_service(file, cmd, argp); + case VCHIQ_IOC_QUEUE_MESSAGE32: + return vchiq_compat_ioctl_queue_message(file, cmd, argp); + case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32: + case VCHIQ_IOC_QUEUE_BULK_RECEIVE32: + return vchiq_compat_ioctl_queue_bulk(file, cmd, argp); + case VCHIQ_IOC_AWAIT_COMPLETION32: + return vchiq_compat_ioctl_await_completion(file, cmd, argp); + case VCHIQ_IOC_DEQUEUE_MESSAGE32: + return vchiq_compat_ioctl_dequeue_message(file, cmd, argp); + case VCHIQ_IOC_GET_CONFIG32: + return vchiq_compat_ioctl_get_config(file, cmd, argp); + default: + return vchiq_ioctl(file, cmd, (unsigned long)argp); + } +} + +#endif + +static int vchiq_open(struct inode *inode, struct file *file) +{ + struct miscdevice *vchiq_miscdev = file->private_data; + struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(vchiq_miscdev->parent); + struct vchiq_state *state = &mgmt->state; + struct vchiq_instance *instance; + + dev_dbg(state->dev, "arm: vchiq open\n"); + + if (!vchiq_remote_initialised(state)) { + dev_dbg(state->dev, "arm: vchiq has no connection to VideoCore\n"); + return -ENOTCONN; + } + + instance = kzalloc(sizeof(*instance), GFP_KERNEL); + if (!instance) + return -ENOMEM; + + instance->state = state; + instance->pid = current->tgid; + + vchiq_debugfs_add_instance(instance); + + init_completion(&instance->insert_event); + init_completion(&instance->remove_event); + mutex_init(&instance->completion_mutex); + mutex_init(&instance->bulk_waiter_list_mutex); + INIT_LIST_HEAD(&instance->bulk_waiter_list); + + file->private_data = instance; + + return 0; +} + +static int vchiq_release(struct inode *inode, struct file *file) +{ + struct vchiq_instance *instance = file->private_data; + struct vchiq_state *state = instance->state; + struct vchiq_service *service; + int ret = 0; + int i; + + dev_dbg(state->dev, "arm: instance=%p\n", instance); + + if (!vchiq_remote_initialised(state)) { + ret = -EPERM; + goto out; + } + + /* Ensure videocore is awake to allow termination. */ + vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ); + + mutex_lock(&instance->completion_mutex); + + /* Wake the completion thread and ask it to exit */ + instance->closing = 1; + complete(&instance->insert_event); + + mutex_unlock(&instance->completion_mutex); + + /* Wake the slot handler if the completion queue is full. */ + complete(&instance->remove_event); + + /* Mark all services for termination... */ + i = 0; + while ((service = next_service_by_instance(state, instance, &i))) { + struct user_service *user_service = service->base.userdata; + + /* Wake the slot handler if the msg queue is full. */ + complete(&user_service->remove_event); + + vchiq_terminate_service_internal(service); + vchiq_service_put(service); + } + + /* ...and wait for them to die */ + i = 0; + while ((service = next_service_by_instance(state, instance, &i))) { + struct user_service *user_service = service->base.userdata; + + wait_for_completion(&service->remove_event); + + if (WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE)) { + vchiq_service_put(service); + break; + } + + spin_lock(&service->state->msg_queue_spinlock); + + while (user_service->msg_remove != user_service->msg_insert) { + struct vchiq_header *header; + int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1); + + header = user_service->msg_queue[m]; + user_service->msg_remove++; + spin_unlock(&service->state->msg_queue_spinlock); + + if (header) + vchiq_release_message(instance, service->handle, header); + spin_lock(&service->state->msg_queue_spinlock); + } + + spin_unlock(&service->state->msg_queue_spinlock); + + vchiq_service_put(service); + } + + /* Release any closed services */ + while (instance->completion_remove != instance->completion_insert) { + struct vchiq_completion_data_kernel *completion; + struct vchiq_service *service; + + completion = &instance->completions[instance->completion_remove + & (MAX_COMPLETIONS - 1)]; + service = completion->service_userdata; + if (completion->reason == VCHIQ_SERVICE_CLOSED) { + struct user_service *user_service = + service->base.userdata; + + /* Wake any blocked user-thread */ + if (instance->use_close_delivered) + complete(&user_service->close_event); + vchiq_service_put(service); + } + instance->completion_remove++; + } + + /* Release the PEER service count. */ + vchiq_release_internal(instance->state, NULL); + + free_bulk_waiter(instance); + + vchiq_debugfs_remove_instance(instance); + + kfree(instance); + file->private_data = NULL; + +out: + return ret; +} + +static const struct file_operations +vchiq_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = vchiq_ioctl, +#if defined(CONFIG_COMPAT) + .compat_ioctl = vchiq_compat_ioctl, +#endif + .open = vchiq_open, + .release = vchiq_release, +}; + +static struct miscdevice vchiq_miscdev = { + .fops = &vchiq_fops, + .minor = MISC_DYNAMIC_MINOR, + .name = "vchiq", + +}; + +/** + * vchiq_register_chrdev - Register the char driver for vchiq + * and create the necessary class and + * device files in userspace. + * @parent: The parent of the char device. + * + * Returns 0 on success else returns the error code. + */ +int vchiq_register_chrdev(struct device *parent) +{ + vchiq_miscdev.parent = parent; + + return misc_register(&vchiq_miscdev); +} + +/** + * vchiq_deregister_chrdev - Deregister and cleanup the vchiq char + * driver and device files + */ +void vchiq_deregister_chrdev(void) +{ + misc_deregister(&vchiq_miscdev); +} diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h b/drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h new file mode 100644 index 000000000000..d0c759f6d8ea --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */ + +#ifndef VCHIQ_IOCTLS_H +#define VCHIQ_IOCTLS_H + +#include <linux/ioctl.h> +#include <linux/raspberrypi/vchiq.h> + +#define VCHIQ_IOC_MAGIC 0xc4 +#define VCHIQ_INVALID_HANDLE (~0) + +struct vchiq_service_params { + int fourcc; + int __user (*callback)(enum vchiq_reason reason, + struct vchiq_header *header, + unsigned int handle, + void *bulk_userdata); + void __user *userdata; + short version; /* Increment for non-trivial changes */ + short version_min; /* Update for incompatible changes */ +}; + +struct vchiq_create_service { + struct vchiq_service_params params; + int is_open; + int is_vchi; + unsigned int handle; /* OUT */ +}; + +struct vchiq_queue_message { + unsigned int handle; + unsigned int count; + const struct vchiq_element __user *elements; +}; + +struct vchiq_queue_bulk_transfer { + unsigned int handle; + void __user *data; + unsigned int size; + void __user *userdata; + enum vchiq_bulk_mode mode; +}; + +struct vchiq_completion_data { + enum vchiq_reason reason; + struct vchiq_header __user *header; + void __user *service_userdata; + void __user *cb_userdata; +}; + +struct vchiq_await_completion { + unsigned int count; + struct vchiq_completion_data __user *buf; + unsigned int msgbufsize; + unsigned int msgbufcount; /* IN/OUT */ + void * __user *msgbufs; +}; + +struct vchiq_dequeue_message { + unsigned int handle; + int blocking; + unsigned int bufsize; + void __user *buf; +}; + +struct vchiq_get_config { + unsigned int config_size; + struct vchiq_config __user *pconfig; +}; + +struct vchiq_set_service_option { + unsigned int handle; + enum vchiq_service_option option; + int value; +}; + +struct vchiq_dump_mem { + void __user *virt_addr; + size_t num_bytes; +}; + +#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0) +#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1) +#define VCHIQ_IOC_CREATE_SERVICE \ + _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service) +#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3) +#define VCHIQ_IOC_QUEUE_MESSAGE \ + _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message) +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \ + _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer) +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \ + _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer) +#define VCHIQ_IOC_AWAIT_COMPLETION \ + _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion) +#define VCHIQ_IOC_DEQUEUE_MESSAGE \ + _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message) +#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9) +#define VCHIQ_IOC_GET_CONFIG \ + _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config) +#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11) +#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12) +#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13) +#define VCHIQ_IOC_SET_SERVICE_OPTION \ + _IOW(VCHIQ_IOC_MAGIC, 14, struct vchiq_set_service_option) +#define VCHIQ_IOC_DUMP_PHYS_MEM \ + _IOW(VCHIQ_IOC_MAGIC, 15, struct vchiq_dump_mem) +#define VCHIQ_IOC_LIB_VERSION _IO(VCHIQ_IOC_MAGIC, 16) +#define VCHIQ_IOC_CLOSE_DELIVERED _IO(VCHIQ_IOC_MAGIC, 17) +#define VCHIQ_IOC_MAX 17 + +#endif |
