diff options
Diffstat (limited to 'drivers/platform')
32 files changed, 11316 insertions, 27 deletions
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index 960fd6a82450..324c69c63f76 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig @@ -18,3 +18,5 @@ source "drivers/platform/surface/Kconfig" source "drivers/platform/x86/Kconfig" source "drivers/platform/arm64/Kconfig" + +source "drivers/platform/raspberrypi/Kconfig" diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index 19ac54648586..b0935c602ada 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_CHROME_PLATFORMS) += chrome/ obj-$(CONFIG_CZNIC_PLATFORMS) += cznic/ obj-$(CONFIG_SURFACE_PLATFORMS) += surface/ obj-$(CONFIG_ARM64_PLATFORM_DEVICES) += arm64/ +obj-$(CONFIG_BCM2835_VCHIQ) += raspberrypi/ diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c index 4e74e702c5a2..3766cef81fe8 100644 --- a/drivers/platform/chrome/cros_ec_ishtp.c +++ b/drivers/platform/chrome/cros_ec_ishtp.c @@ -667,6 +667,7 @@ static void cros_ec_ishtp_remove(struct ishtp_cl_device *cl_device) cancel_work_sync(&client_data->work_ishtp_reset); cancel_work_sync(&client_data->work_ec_evt); + cros_ec_unregister(client_data->ec_dev); cros_ish_deinit(cros_ish_cl); ishtp_put_device(cl_device); } diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c index 87634f6921b7..8352e9732791 100644 --- a/drivers/platform/chrome/cros_ec_lightbar.c +++ b/drivers/platform/chrome/cros_ec_lightbar.c @@ -30,6 +30,13 @@ static unsigned long lb_interval_jiffies = 50 * HZ / 1000; */ static bool userspace_control; +/* + * Whether or not the lightbar supports the manual suspend commands. + * The Pixel 2013 (Link) does not while all other devices with a + * lightbar do. + */ +static bool has_manual_suspend; + static ssize_t interval_msec_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -550,7 +557,7 @@ static int cros_ec_lightbar_probe(struct platform_device *pd) return -ENODEV; /* Take control of the lightbar from the EC. */ - lb_manual_suspend_ctrl(ec_dev, 1); + has_manual_suspend = (lb_manual_suspend_ctrl(ec_dev, 1) != -EINVAL); ret = sysfs_create_group(&ec_dev->class_dev.kobj, &cros_ec_lightbar_attr_group); @@ -569,14 +576,15 @@ static void cros_ec_lightbar_remove(struct platform_device *pd) &cros_ec_lightbar_attr_group); /* Let the EC take over the lightbar again. */ - lb_manual_suspend_ctrl(ec_dev, 0); + if (has_manual_suspend) + lb_manual_suspend_ctrl(ec_dev, 0); } static int __maybe_unused cros_ec_lightbar_resume(struct device *dev) { struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent); - if (userspace_control) + if (userspace_control || !has_manual_suspend) return 0; return lb_send_empty_cmd(ec_dev, LIGHTBAR_CMD_RESUME); @@ -586,7 +594,7 @@ static int __maybe_unused cros_ec_lightbar_suspend(struct device *dev) { struct cros_ec_dev *ec_dev = dev_get_drvdata(dev->parent); - if (userspace_control) + if (userspace_control || !has_manual_suspend) return 0; return lb_send_empty_cmd(ec_dev, LIGHTBAR_CMD_SUSPEND); diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c index 1205219515d6..a10579144c34 100644 --- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c +++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c @@ -129,6 +129,17 @@ int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub, /* We expect to receive a payload of 4 bytes, ignore. */ if (ret > 0) ret = 0; + /* + * Some platforms (such as Smaug) don't support the FIFO_INT_ENABLE + * command and the interrupt is always enabled. In the case, it + * returns -EINVAL. + * + * N.B: there is no danger of -EINVAL meaning any other invalid + * parameter since fifo_int_enable.enable is a bool and can never + * be in an invalid range. + */ + else if (ret == -EINVAL) + ret = 0; return ret; } diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c index 313d2bcd577b..c90174360004 100644 --- a/drivers/platform/chrome/cros_usbpd_notify.c +++ b/drivers/platform/chrome/cros_usbpd_notify.c @@ -6,6 +6,7 @@ */ #include <linux/acpi.h> +#include <linux/fwnode.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_data/cros_ec_proto.h> @@ -15,6 +16,7 @@ #define DRV_NAME "cros-usbpd-notify" #define DRV_NAME_PLAT_ACPI "cros-usbpd-notify-acpi" #define ACPI_DRV_NAME "GOOG0003" +#define CREC_DRV_NAME "GOOG0004" static BLOCKING_NOTIFIER_HEAD(cros_usbpd_notifier_list); @@ -98,8 +100,9 @@ static int cros_usbpd_notify_probe_acpi(struct platform_device *pdev) { struct cros_usbpd_notify_data *pdnotify; struct device *dev = &pdev->dev; - struct acpi_device *adev; + struct acpi_device *adev, *parent_adev; struct cros_ec_device *ec_dev; + struct fwnode_handle *parent_fwnode; acpi_status status; adev = ACPI_COMPANION(dev); @@ -114,8 +117,18 @@ static int cros_usbpd_notify_probe_acpi(struct platform_device *pdev) /* * We continue even for older devices which don't have the * correct device heirarchy, namely, GOOG0003 is a child - * of GOOG0004. + * of GOOG0004. If GOOG0003 is a child of GOOG0004 and we + * can't get a pointer to the Chrome EC device, defer the + * probe function. */ + parent_fwnode = fwnode_get_parent(dev->fwnode); + if (parent_fwnode) { + parent_adev = to_acpi_device_node(parent_fwnode); + if (parent_adev && + acpi_dev_hid_match(parent_adev, CREC_DRV_NAME)) { + return -EPROBE_DEFER; + } + } dev_warn(dev, "Couldn't get Chrome EC device pointer.\n"); } diff --git a/drivers/platform/raspberrypi/Kconfig b/drivers/platform/raspberrypi/Kconfig new file mode 100644 index 000000000000..2c928440a47c --- /dev/null +++ b/drivers/platform/raspberrypi/Kconfig @@ -0,0 +1,52 @@ +# SPDX-License-Identifier: GPL-2.0 + +menuconfig BCM_VIDEOCORE + tristate "Broadcom VideoCore support" + depends on OF + depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE) + default y + help + Support for Broadcom VideoCore services including + the BCM2835 family of products which is used + by the Raspberry PI. + +if BCM_VIDEOCORE + +config BCM2835_VCHIQ + tristate "BCM2835 VCHIQ" + depends on HAS_DMA + imply VCHIQ_CDEV + help + Broadcom BCM2835 and similar SoCs have a VPU called VideoCore. + This config enables the VCHIQ driver, which implements a + messaging interface between the kernel and the firmware running + on VideoCore. Other drivers use this interface to communicate to + the VPU. More specifically, the VCHIQ driver is used by + audio/video and camera drivers as well as for implementing MMAL + API, which is in turn used by several multimedia services on the + BCM2835 family of SoCs. + + Defaults to Y when the Broadcom Videocore services are included + in the build, N otherwise. + +if BCM2835_VCHIQ + +config VCHIQ_CDEV + bool "VCHIQ Character Driver" + help + Enable the creation of VCHIQ character driver. The cdev exposes + ioctls used by userspace libraries and testing tools to interact + with VideoCore, via the VCHIQ core driver (Check BCM2835_VCHIQ + for more info). + + This can be set to 'N' if the VideoCore communication is not + needed by userspace but only by other kernel modules + (like bcm2835-audio). + + If not sure, set this to 'Y'. + +endif + +source "drivers/platform/raspberrypi/vchiq-mmal/Kconfig" + +endif diff --git a/drivers/platform/raspberrypi/Makefile b/drivers/platform/raspberrypi/Makefile new file mode 100644 index 000000000000..2a7c9511e5d8 --- /dev/null +++ b/drivers/platform/raspberrypi/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_BCM2835_VCHIQ) += vchiq.o + +vchiq-objs := \ + vchiq-interface/vchiq_core.o \ + vchiq-interface/vchiq_arm.o \ + vchiq-interface/vchiq_bus.o \ + vchiq-interface/vchiq_debugfs.o \ + +ifdef CONFIG_VCHIQ_CDEV +vchiq-objs += vchiq-interface/vchiq_dev.o +endif + +obj-$(CONFIG_BCM2835_VCHIQ_MMAL) += vchiq-mmal/ diff --git a/drivers/platform/raspberrypi/vchiq-interface/TESTING b/drivers/platform/raspberrypi/vchiq-interface/TESTING new file mode 100644 index 000000000000..c98f688b07e0 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/TESTING @@ -0,0 +1,125 @@ +This document contains some hints to test the function of the VCHIQ driver +without having additional hardware to the Raspberry Pi. + +* Requirements & limitations + +Testing the VCHIQ driver requires a Raspberry Pi with one of the following SoC: + - BCM2835 ( e.g. Raspberry Pi Zero W ) + - BCM2836 ( e.g. Raspberry Pi 2 ) + - BCM2837 ( e.g. Raspberry Pi 3 B+ ) + +The BCM2711 used in the Raspberry Pi 4 is currently not supported in the +mainline kernel. + +There are no specific requirements to the VideoCore firmware to get VCHIQ +working. + +The test scenarios described in this document based on the tool vchiq_test. +Its source code is available here: https://github.com/raspberrypi/userland + +* Configuration + +Here are the most common kernel configurations: + + 1. BCM2835 target SoC (ARM 32 bit) + + Just use bcm2835_defconfig which already has VCHIQ enabled. + + 2. BCM2836/7 target SoC (ARM 32 bit) + + Use the multi_v7_defconfig as a base and then enable all VCHIQ options. + + 3. BCM2837 target SoC (ARM 64 bit) + + Use the defconfig which has most of the VCHIQ options enabled. + +* Scenarios + + * Initial test + + Check the driver is probed and /dev/vchiq is created + + * Functional test + + Command: vchiq_test -f 10 + + Expected output: + Functional test - iters:10 + ======== iteration 1 ======== + Testing bulk transfer for alignment. + Testing bulk transfer at PAGE_SIZE. + ... + + * Ping test + + Command: vchiq_test -p + + Expected output: + Ping test - service:echo, iters:1000, version 3 + vchi ping (size 0) -> 57.000000us + vchi ping (size 0, 0 async, 0 oneway) -> 122.000000us + vchi bulk (size 0, 0 async, 0 oneway) -> 546.000000us + vchi bulk (size 0, 0 oneway) -> 230.000000us + vchi ping (size 0) -> 49.000000us + vchi ping (size 0, 0 async, 0 oneway) -> 70.000000us + vchi bulk (size 0, 0 async, 0 oneway) -> 296.000000us + vchi bulk (size 0, 0 oneway) -> 266.000000us + vchi ping (size 0, 1 async, 0 oneway) -> 65.000000us + vchi bulk (size 0, 0 oneway) -> 456.000000us + vchi ping (size 0, 2 async, 0 oneway) -> 74.000000us + vchi bulk (size 0, 0 oneway) -> 640.000000us + vchi ping (size 0, 10 async, 0 oneway) -> 125.000000us + vchi bulk (size 0, 0 oneway) -> 2309.000000us + vchi ping (size 0, 0 async, 1 oneway) -> 70.000000us + vchi ping (size 0, 0 async, 2 oneway) -> 76.000000us + vchi ping (size 0, 0 async, 10 oneway) -> 105.000000us + vchi ping (size 0, 10 async, 10 oneway) -> 165.000000us + vchi ping (size 0, 100 async, 0 oneway) -> nanus + vchi bulk (size 0, 0 oneway) -> nanus + vchi ping (size 0, 0 async, 100 oneway) -> nanus + vchi ping (size 0, 100 async, 100 oneway) -> infus + vchi ping (size 0, 200 async, 0 oneway) -> infus + ... + + * Debugfs test + + Command: cat /sys/kernel/debug/vchiq/state + + Example output: + State 0: CONNECTED + tx_pos=0x1e8(@43b0acda), rx_pos=0x170(@05493af8) + Version: 8 (min 3) + Stats: ctrl_tx_count=7, ctrl_rx_count=7, error_count=0 + Slots: 30 available (29 data), 0 recyclable, 0 stalls (0 data) + Platform: 2835 (VC master) + Local: slots 34-64 tx_pos=0x1e8 recycle=0x1f + Slots claimed: + DEBUG: SLOT_HANDLER_COUNT = 20(0x14) + DEBUG: SLOT_HANDLER_LINE = 1937(0x791) + DEBUG: PARSE_LINE = 1864(0x748) + DEBUG: PARSE_HEADER = -249155224(0xf1263168) + DEBUG: PARSE_MSGID = 67362817(0x403e001) + DEBUG: AWAIT_COMPLETION_LINE = 0(0x0) + DEBUG: DEQUEUE_MESSAGE_LINE = 0(0x0) + DEBUG: SERVICE_CALLBACK_LINE = 0(0x0) + DEBUG: MSG_QUEUE_FULL_COUNT = 0(0x0) + DEBUG: COMPLETION_QUEUE_FULL_COUNT = 0(0x0) + Remote: slots 2-32 tx_pos=0x170 recycle=0x1f + Slots claimed: + 2: 10/9 + DEBUG: SLOT_HANDLER_COUNT = 20(0x14) + DEBUG: SLOT_HANDLER_LINE = 1851(0x73b) + DEBUG: PARSE_LINE = 1827(0x723) + DEBUG: PARSE_HEADER = -150330912(0xf70a21e0) + DEBUG: PARSE_MSGID = 67113022(0x400103e) + DEBUG: AWAIT_COMPLETION_LINE = 0(0x0) + DEBUG: DEQUEUE_MESSAGE_LINE = 0(0x0) + DEBUG: SERVICE_CALLBACK_LINE = 0(0x0) + DEBUG: MSG_QUEUE_FULL_COUNT = 0(0x0) + DEBUG: COMPLETION_QUEUE_FULL_COUNT = 0(0x0) + Service 0: LISTENING (ref 1) 'PEEK little-endian (0x4b454550)' remote n/a (msg use 0/3840, slot use 0/15) + Bulk: tx_pending=0 (size 0), rx_pending=0 (size 0) + Ctrl: tx_count=0, tx_bytes=0, rx_count=0, rx_bytes=0 + Bulk: tx_count=0, tx_bytes=0, rx_count=0, rx_bytes=0 + 0 quota stalls, 0 slot stalls, 0 bulk stalls, 0 aborted, 0 errors + instance b511f60b diff --git a/drivers/platform/raspberrypi/vchiq-interface/TODO b/drivers/platform/raspberrypi/vchiq-interface/TODO new file mode 100644 index 000000000000..2357dae413f1 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/TODO @@ -0,0 +1,4 @@ +* Documentation + +A short top-down description of this driver's architecture (function of +kthreads, userspace, limitations) could be very helpful for reviewers. diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c new file mode 100644 index 000000000000..6a7b96d3dae6 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_arm.c @@ -0,0 +1,1477 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. + * Copyright (c) 2010-2012 Broadcom. All rights reserved. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/sched/signal.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/device/bus.h> +#include <linux/mm.h> +#include <linux/pagemap.h> +#include <linux/bug.h> +#include <linux/completion.h> +#include <linux/list.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/compat.h> +#include <linux/dma-mapping.h> +#include <linux/rcupdate.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/uaccess.h> +#include <soc/bcm2835/raspberrypi-firmware.h> + +#include <linux/raspberrypi/vchiq_core.h> +#include <linux/raspberrypi/vchiq_arm.h> +#include <linux/raspberrypi/vchiq_bus.h> +#include <linux/raspberrypi/vchiq_debugfs.h> + +#include "vchiq_ioctl.h" + +#define DEVICE_NAME "vchiq" + +#define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32) + +#define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2) + +#define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0 +#define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1 + +#define BELL0 0x00 + +#define ARM_DS_ACTIVE BIT(2) + +/* Override the default prefix, which would be vchiq_arm (from the filename) */ +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX DEVICE_NAME "." + +#define KEEPALIVE_VER 1 +#define KEEPALIVE_VER_MIN KEEPALIVE_VER + +/* + * The devices implemented in the VCHIQ firmware are not discoverable, + * so we need to maintain a list of them in order to register them with + * the interface. + */ +static struct vchiq_device *bcm2835_audio; + +static const struct vchiq_platform_info bcm2835_info = { + .cache_line_size = 32, +}; + +static const struct vchiq_platform_info bcm2836_info = { + .cache_line_size = 64, +}; + +struct vchiq_arm_state { + /* + * Keepalive-related data + * + * The keepalive mechanism was retro-fitted to VCHIQ to allow active + * services to prevent the system from suspending. + * This feature is not used on Raspberry Pi devices. + */ + struct task_struct *ka_thread; + struct completion ka_evt; + atomic_t ka_use_count; + atomic_t ka_use_ack_count; + atomic_t ka_release_count; + + rwlock_t susp_res_lock; + + struct vchiq_state *state; + + /* + * Global use count for videocore. + * This is equal to the sum of the use counts for all services. When + * this hits zero the videocore suspend procedure will be initiated. + */ + int videocore_use_count; + + /* + * Use count to track requests from videocore peer. + * This use count is not associated with a service, so needs to be + * tracked separately with the state. + */ + int peer_use_count; + + /* + * Flag to indicate that the first vchiq connect has made it through. + * This means that both sides should be fully ready, and we should + * be able to suspend after this point. + */ + int first_connect; +}; + +static int +vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_bulk *bulk_params); + +static irqreturn_t +vchiq_doorbell_irq(int irq, void *dev_id) +{ + struct vchiq_state *state = dev_id; + struct vchiq_drv_mgmt *mgmt; + irqreturn_t ret = IRQ_NONE; + unsigned int status; + + mgmt = dev_get_drvdata(state->dev); + + /* Read (and clear) the doorbell */ + status = readl(mgmt->regs + BELL0); + + if (status & ARM_DS_ACTIVE) { /* Was the doorbell rung? */ + remote_event_pollall(state); + ret = IRQ_HANDLED; + } + + return ret; +} + +/* + * This function is called by the vchiq stack once it has been connected to + * the videocore and clients can start to use the stack. + */ +static void vchiq_call_connected_callbacks(struct vchiq_drv_mgmt *drv_mgmt) +{ + int i; + + if (mutex_lock_killable(&drv_mgmt->connected_mutex)) + return; + + for (i = 0; i < drv_mgmt->num_deferred_callbacks; i++) + drv_mgmt->deferred_callback[i](); + + drv_mgmt->num_deferred_callbacks = 0; + drv_mgmt->connected = true; + mutex_unlock(&drv_mgmt->connected_mutex); +} + +/* + * This function is used to defer initialization until the vchiq stack is + * initialized. If the stack is already initialized, then the callback will + * be made immediately, otherwise it will be deferred until + * vchiq_call_connected_callbacks is called. + */ +void vchiq_add_connected_callback(struct vchiq_device *device, void (*callback)(void)) +{ + struct vchiq_drv_mgmt *drv_mgmt = device->drv_mgmt; + + if (mutex_lock_killable(&drv_mgmt->connected_mutex)) + return; + + if (drv_mgmt->connected) { + /* We're already connected. Call the callback immediately. */ + callback(); + } else { + if (drv_mgmt->num_deferred_callbacks >= VCHIQ_DRV_MAX_CALLBACKS) { + dev_err(&device->dev, + "core: deferred callbacks(%d) exceeded the maximum limit(%d)\n", + drv_mgmt->num_deferred_callbacks, VCHIQ_DRV_MAX_CALLBACKS); + } else { + drv_mgmt->deferred_callback[drv_mgmt->num_deferred_callbacks] = + callback; + drv_mgmt->num_deferred_callbacks++; + } + } + mutex_unlock(&drv_mgmt->connected_mutex); +} +EXPORT_SYMBOL(vchiq_add_connected_callback); + +static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state) +{ + struct device *dev = &pdev->dev; + struct vchiq_drv_mgmt *drv_mgmt = platform_get_drvdata(pdev); + struct rpi_firmware *fw = drv_mgmt->fw; + struct vchiq_slot_zero *vchiq_slot_zero; + void *slot_mem; + dma_addr_t slot_phys; + u32 channelbase; + int slot_mem_size, frag_mem_size; + int err, irq, i; + + /* + * VCHI messages between the CPU and firmware use + * 32-bit bus addresses. + */ + err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + + if (err < 0) + return err; + + drv_mgmt->fragments_size = 2 * drv_mgmt->info->cache_line_size; + + /* Allocate space for the channels in coherent memory */ + slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE); + frag_mem_size = PAGE_ALIGN(drv_mgmt->fragments_size * MAX_FRAGMENTS); + + slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size, + &slot_phys, GFP_KERNEL); + if (!slot_mem) { + dev_err(dev, "could not allocate DMA memory\n"); + return -ENOMEM; + } + + WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0); + + vchiq_slot_zero = vchiq_init_slots(dev, slot_mem, slot_mem_size); + if (!vchiq_slot_zero) + return -ENOMEM; + + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] = + (int)slot_phys + slot_mem_size; + vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] = + MAX_FRAGMENTS; + + drv_mgmt->fragments_base = (char *)slot_mem + slot_mem_size; + + drv_mgmt->free_fragments = drv_mgmt->fragments_base; + for (i = 0; i < (MAX_FRAGMENTS - 1); i++) { + *(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] = + &drv_mgmt->fragments_base[(i + 1) * drv_mgmt->fragments_size]; + } + *(char **)&drv_mgmt->fragments_base[i * drv_mgmt->fragments_size] = NULL; + sema_init(&drv_mgmt->free_fragments_sema, MAX_FRAGMENTS); + sema_init(&drv_mgmt->free_fragments_mutex, 1); + + err = vchiq_init_state(state, vchiq_slot_zero, dev); + if (err) + return err; + + drv_mgmt->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(drv_mgmt->regs)) + return PTR_ERR(drv_mgmt->regs); + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) + return irq; + + err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL, + "VCHIQ doorbell", state); + if (err) { + dev_err(dev, "failed to register irq=%d\n", irq); + return err; + } + + /* Send the base address of the slots to VideoCore */ + channelbase = slot_phys; + err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT, + &channelbase, sizeof(channelbase)); + if (err) { + dev_err(dev, "failed to send firmware property: %d\n", err); + return err; + } + + if (channelbase) { + dev_err(dev, "failed to set channelbase (response: %x)\n", + channelbase); + return -ENXIO; + } + + dev_dbg(&pdev->dev, "arm: vchiq_init - done (slots %p, phys %pad)\n", + vchiq_slot_zero, &slot_phys); + + mutex_init(&drv_mgmt->connected_mutex); + vchiq_call_connected_callbacks(drv_mgmt); + + return 0; +} + +int +vchiq_platform_init_state(struct vchiq_state *state) +{ + struct vchiq_arm_state *platform_state; + + platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL); + if (!platform_state) + return -ENOMEM; + + rwlock_init(&platform_state->susp_res_lock); + + init_completion(&platform_state->ka_evt); + atomic_set(&platform_state->ka_use_count, 0); + atomic_set(&platform_state->ka_use_ack_count, 0); + atomic_set(&platform_state->ka_release_count, 0); + + platform_state->state = state; + + state->platform_state = (struct opaque_platform_state *)platform_state; + + return 0; +} + +static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state) +{ + return (struct vchiq_arm_state *)state->platform_state; +} + +static void +vchiq_platform_uninit(struct vchiq_drv_mgmt *mgmt) +{ + struct vchiq_arm_state *arm_state; + + kthread_stop(mgmt->state.sync_thread); + kthread_stop(mgmt->state.recycle_thread); + kthread_stop(mgmt->state.slot_handler_thread); + + arm_state = vchiq_platform_get_arm_state(&mgmt->state); + if (!IS_ERR_OR_NULL(arm_state->ka_thread)) + kthread_stop(arm_state->ka_thread); +} + +void vchiq_dump_platform_state(struct seq_file *f) +{ + seq_puts(f, " Platform: 2835 (VC master)\n"); +} + +#define VCHIQ_INIT_RETRIES 10 +int vchiq_initialise(struct vchiq_state *state, struct vchiq_instance **instance_out) +{ + struct vchiq_instance *instance = NULL; + int i, ret; + + /* + * VideoCore may not be ready due to boot up timing. + * It may never be ready if kernel and firmware are mismatched,so don't + * block forever. + */ + for (i = 0; i < VCHIQ_INIT_RETRIES; i++) { + if (vchiq_remote_initialised(state)) + break; + usleep_range(500, 600); + } + if (i == VCHIQ_INIT_RETRIES) { + dev_err(state->dev, "core: %s: Videocore not initialized\n", __func__); + ret = -ENOTCONN; + goto failed; + } else if (i > 0) { + dev_warn(state->dev, "core: %s: videocore initialized after %d retries\n", + __func__, i); + } + + instance = kzalloc(sizeof(*instance), GFP_KERNEL); + if (!instance) { + ret = -ENOMEM; + goto failed; + } + + instance->connected = 0; + instance->state = state; + mutex_init(&instance->bulk_waiter_list_mutex); + INIT_LIST_HEAD(&instance->bulk_waiter_list); + + *instance_out = instance; + + ret = 0; + +failed: + dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret); + + return ret; +} +EXPORT_SYMBOL(vchiq_initialise); + +void free_bulk_waiter(struct vchiq_instance *instance) +{ + struct bulk_waiter_node *waiter, *next; + + list_for_each_entry_safe(waiter, next, + &instance->bulk_waiter_list, list) { + list_del(&waiter->list); + dev_dbg(instance->state->dev, + "arm: bulk_waiter - cleaned up %p for pid %d\n", + waiter, waiter->pid); + kfree(waiter); + } +} + +int vchiq_shutdown(struct vchiq_instance *instance) +{ + struct vchiq_state *state = instance->state; + int ret = 0; + + mutex_lock(&state->mutex); + + /* Remove all services */ + vchiq_shutdown_internal(state, instance); + + mutex_unlock(&state->mutex); + + dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret); + + free_bulk_waiter(instance); + kfree(instance); + + return ret; +} +EXPORT_SYMBOL(vchiq_shutdown); + +static int vchiq_is_connected(struct vchiq_instance *instance) +{ + return instance->connected; +} + +int vchiq_connect(struct vchiq_instance *instance) +{ + struct vchiq_state *state = instance->state; + int ret; + + if (mutex_lock_killable(&state->mutex)) { + dev_dbg(state->dev, + "core: call to mutex_lock failed\n"); + ret = -EAGAIN; + goto failed; + } + ret = vchiq_connect_internal(state, instance); + + if (!ret) + instance->connected = 1; + + mutex_unlock(&state->mutex); + +failed: + dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret); + + return ret; +} +EXPORT_SYMBOL(vchiq_connect); + +static int +vchiq_add_service(struct vchiq_instance *instance, + const struct vchiq_service_params_kernel *params, + unsigned int *phandle) +{ + struct vchiq_state *state = instance->state; + struct vchiq_service *service = NULL; + int srvstate, ret; + + *phandle = VCHIQ_SERVICE_HANDLE_INVALID; + + srvstate = vchiq_is_connected(instance) + ? VCHIQ_SRVSTATE_LISTENING + : VCHIQ_SRVSTATE_HIDDEN; + + service = vchiq_add_service_internal(state, params, srvstate, instance, NULL); + + if (service) { + *phandle = service->handle; + ret = 0; + } else { + ret = -EINVAL; + } + + dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret); + + return ret; +} + +int +vchiq_open_service(struct vchiq_instance *instance, + const struct vchiq_service_params_kernel *params, + unsigned int *phandle) +{ + struct vchiq_state *state = instance->state; + struct vchiq_service *service = NULL; + int ret = -EINVAL; + + *phandle = VCHIQ_SERVICE_HANDLE_INVALID; + + if (!vchiq_is_connected(instance)) + goto failed; + + service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL); + + if (service) { + *phandle = service->handle; + ret = vchiq_open_service_internal(service, current->pid); + if (ret) { + vchiq_remove_service(instance, service->handle); + *phandle = VCHIQ_SERVICE_HANDLE_INVALID; + } + } + +failed: + dev_dbg(state->dev, "core: (%p): returning %d\n", instance, ret); + + return ret; +} +EXPORT_SYMBOL(vchiq_open_service); + +int +vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data, + unsigned int size, void *userdata, enum vchiq_bulk_mode mode) +{ + struct vchiq_bulk bulk_params = {}; + int ret; + + switch (mode) { + case VCHIQ_BULK_MODE_NOCALLBACK: + case VCHIQ_BULK_MODE_CALLBACK: + + bulk_params.offset = (void *)data; + bulk_params.mode = mode; + bulk_params.size = size; + bulk_params.cb_data = userdata; + bulk_params.dir = VCHIQ_BULK_TRANSMIT; + + ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params); + break; + case VCHIQ_BULK_MODE_BLOCKING: + bulk_params.offset = (void *)data; + bulk_params.mode = mode; + bulk_params.size = size; + bulk_params.dir = VCHIQ_BULK_TRANSMIT; + + ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params); + break; + default: + return -EINVAL; + } + + return ret; +} +EXPORT_SYMBOL(vchiq_bulk_transmit); + +int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle, + void *data, unsigned int size, void *userdata, + enum vchiq_bulk_mode mode) +{ + struct vchiq_bulk bulk_params = {}; + int ret; + + switch (mode) { + case VCHIQ_BULK_MODE_NOCALLBACK: + case VCHIQ_BULK_MODE_CALLBACK: + + bulk_params.offset = (void *)data; + bulk_params.mode = mode; + bulk_params.size = size; + bulk_params.cb_data = userdata; + bulk_params.dir = VCHIQ_BULK_RECEIVE; + + ret = vchiq_bulk_xfer_callback(instance, handle, &bulk_params); + break; + case VCHIQ_BULK_MODE_BLOCKING: + bulk_params.offset = (void *)data; + bulk_params.mode = mode; + bulk_params.size = size; + bulk_params.dir = VCHIQ_BULK_RECEIVE; + + ret = vchiq_blocking_bulk_transfer(instance, handle, &bulk_params); + break; + default: + return -EINVAL; + } + + return ret; +} +EXPORT_SYMBOL(vchiq_bulk_receive); + +static int +vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_bulk *bulk_params) +{ + struct vchiq_service *service; + struct bulk_waiter_node *waiter = NULL, *iter; + int ret; + + service = find_service_by_handle(instance, handle); + if (!service) + return -EINVAL; + + vchiq_service_put(service); + + mutex_lock(&instance->bulk_waiter_list_mutex); + list_for_each_entry(iter, &instance->bulk_waiter_list, list) { + if (iter->pid == current->pid) { + list_del(&iter->list); + waiter = iter; + break; + } + } + mutex_unlock(&instance->bulk_waiter_list_mutex); + + if (waiter) { + struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; + + if (bulk) { + /* This thread has an outstanding bulk transfer. */ + /* FIXME: why compare a dma address to a pointer? */ + if ((bulk->dma_addr != (dma_addr_t)(uintptr_t)bulk_params->dma_addr) || + (bulk->size != bulk_params->size)) { + /* + * This is not a retry of the previous one. + * Cancel the signal when the transfer completes. + */ + spin_lock(&service->state->bulk_waiter_spinlock); + bulk->waiter = NULL; + spin_unlock(&service->state->bulk_waiter_spinlock); + } + } + } else { + waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); + if (!waiter) + return -ENOMEM; + } + + bulk_params->waiter = &waiter->bulk_waiter; + + ret = vchiq_bulk_xfer_blocking(instance, handle, bulk_params); + if ((ret != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) { + struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk; + + if (bulk) { + /* Cancel the signal when the transfer completes. */ + spin_lock(&service->state->bulk_waiter_spinlock); + bulk->waiter = NULL; + spin_unlock(&service->state->bulk_waiter_spinlock); + } + kfree(waiter); + } else { + waiter->pid = current->pid; + mutex_lock(&instance->bulk_waiter_list_mutex); + list_add(&waiter->list, &instance->bulk_waiter_list); + mutex_unlock(&instance->bulk_waiter_list_mutex); + dev_dbg(instance->state->dev, "arm: saved bulk_waiter %p for pid %d\n", + waiter, current->pid); + } + + return ret; +} + +static int +add_completion(struct vchiq_instance *instance, enum vchiq_reason reason, + struct vchiq_header *header, struct user_service *user_service, + void *cb_data, void __user *cb_userdata) +{ + struct vchiq_completion_data_kernel *completion; + struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev); + int insert; + + DEBUG_INITIALISE(mgmt->state.local); + + insert = instance->completion_insert; + while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) { + /* Out of space - wait for the client */ + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + dev_dbg(instance->state->dev, "core: completion queue full\n"); + DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT); + if (wait_for_completion_interruptible(&instance->remove_event)) { + dev_dbg(instance->state->dev, "arm: service_callback interrupted\n"); + return -EAGAIN; + } else if (instance->closing) { + dev_dbg(instance->state->dev, "arm: service_callback closing\n"); + return 0; + } + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + } + + completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)]; + + completion->header = header; + completion->reason = reason; + /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */ + completion->service_userdata = user_service->service; + completion->cb_data = cb_data; + completion->cb_userdata = cb_userdata; + + if (reason == VCHIQ_SERVICE_CLOSED) { + /* + * Take an extra reference, to be held until + * this CLOSED notification is delivered. + */ + vchiq_service_get(user_service->service); + if (instance->use_close_delivered) + user_service->close_pending = 1; + } + + /* + * A write barrier is needed here to ensure that the entire completion + * record is written out before the insert point. + */ + wmb(); + + if (reason == VCHIQ_MESSAGE_AVAILABLE) + user_service->message_available_pos = insert; + + insert++; + instance->completion_insert = insert; + + complete(&instance->insert_event); + + return 0; +} + +static int +service_single_message(struct vchiq_instance *instance, + enum vchiq_reason reason, struct vchiq_service *service, + void *cb_data, void __user *cb_userdata) +{ + struct user_service *user_service; + + user_service = (struct user_service *)service->base.userdata; + + dev_dbg(service->state->dev, "arm: msg queue full\n"); + /* + * If there is no MESSAGE_AVAILABLE in the completion + * queue, add one + */ + if ((user_service->message_available_pos - + instance->completion_remove) < 0) { + int ret; + + dev_dbg(instance->state->dev, + "arm: Inserting extra MESSAGE_AVAILABLE\n"); + ret = add_completion(instance, reason, NULL, user_service, + cb_data, cb_userdata); + if (ret) + return ret; + } + + if (wait_for_completion_interruptible(&user_service->remove_event)) { + dev_dbg(instance->state->dev, "arm: interrupted\n"); + return -EAGAIN; + } else if (instance->closing) { + dev_dbg(instance->state->dev, "arm: closing\n"); + return -EINVAL; + } + + return 0; +} + +int +service_callback(struct vchiq_instance *instance, enum vchiq_reason reason, + struct vchiq_header *header, unsigned int handle, + void *cb_data, void __user *cb_userdata) +{ + /* + * How do we ensure the callback goes to the right client? + * The service_user data points to a user_service record + * containing the original callback and the user state structure, which + * contains a circular buffer for completion records. + */ + struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(instance->state->dev); + struct user_service *user_service; + struct vchiq_service *service; + bool skip_completion = false; + + DEBUG_INITIALISE(mgmt->state.local); + + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + + rcu_read_lock(); + service = handle_to_service(instance, handle); + if (WARN_ON(!service)) { + rcu_read_unlock(); + return 0; + } + + user_service = (struct user_service *)service->base.userdata; + + if (instance->closing) { + rcu_read_unlock(); + return 0; + } + + /* + * As hopping around different synchronization mechanism, + * taking an extra reference results in simpler implementation. + */ + vchiq_service_get(service); + rcu_read_unlock(); + + dev_dbg(service->state->dev, + "arm: service %p(%d,%p), reason %d, header %p, instance %p, cb_data %p, cb_userdata %p\n", + user_service, service->localport, user_service->userdata, + reason, header, instance, cb_data, cb_userdata); + + if (header && user_service->is_vchi) { + spin_lock(&service->state->msg_queue_spinlock); + while (user_service->msg_insert == + (user_service->msg_remove + MSG_QUEUE_SIZE)) { + int ret; + + spin_unlock(&service->state->msg_queue_spinlock); + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + DEBUG_COUNT(MSG_QUEUE_FULL_COUNT); + + ret = service_single_message(instance, reason, service, + cb_data, cb_userdata); + if (ret) { + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); + return ret; + } + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + spin_lock(&service->state->msg_queue_spinlock); + } + + user_service->msg_queue[user_service->msg_insert & + (MSG_QUEUE_SIZE - 1)] = header; + user_service->msg_insert++; + + /* + * If there is a thread waiting in DEQUEUE_MESSAGE, or if + * there is a MESSAGE_AVAILABLE in the completion queue then + * bypass the completion queue. + */ + if (((user_service->message_available_pos - + instance->completion_remove) >= 0) || + user_service->dequeue_pending) { + user_service->dequeue_pending = 0; + skip_completion = true; + } + + spin_unlock(&service->state->msg_queue_spinlock); + complete(&user_service->insert_event); + + header = NULL; + } + DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); + + if (skip_completion) + return 0; + + return add_completion(instance, reason, header, user_service, + cb_data, cb_userdata); +} + +void vchiq_dump_platform_instances(struct vchiq_state *state, struct seq_file *f) +{ + int i; + + if (!vchiq_remote_initialised(state)) + return; + + /* + * There is no list of instances, so instead scan all services, + * marking those that have been dumped. + */ + + rcu_read_lock(); + for (i = 0; i < state->unused_service; i++) { + struct vchiq_service *service; + struct vchiq_instance *instance; + + service = rcu_dereference(state->services[i]); + if (!service || service->base.callback != service_callback) + continue; + + instance = service->instance; + if (instance) + instance->mark = 0; + } + rcu_read_unlock(); + + for (i = 0; i < state->unused_service; i++) { + struct vchiq_service *service; + struct vchiq_instance *instance; + + rcu_read_lock(); + service = rcu_dereference(state->services[i]); + if (!service || service->base.callback != service_callback) { + rcu_read_unlock(); + continue; + } + + instance = service->instance; + if (!instance || instance->mark) { + rcu_read_unlock(); + continue; + } + rcu_read_unlock(); + + seq_printf(f, "Instance %pK: pid %d,%s completions %d/%d\n", + instance, instance->pid, + instance->connected ? " connected, " : + "", + instance->completion_insert - + instance->completion_remove, + MAX_COMPLETIONS); + instance->mark = 1; + } +} + +void vchiq_dump_platform_service_state(struct seq_file *f, + struct vchiq_service *service) +{ + struct user_service *user_service = + (struct user_service *)service->base.userdata; + + seq_printf(f, " instance %pK", service->instance); + + if ((service->base.callback == service_callback) && user_service->is_vchi) { + seq_printf(f, ", %d/%d messages", + user_service->msg_insert - user_service->msg_remove, + MSG_QUEUE_SIZE); + + if (user_service->dequeue_pending) + seq_puts(f, " (dequeue pending)"); + } + + seq_puts(f, "\n"); +} + +/* + * Autosuspend related functionality + */ + +static int +vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance, + enum vchiq_reason reason, + struct vchiq_header *header, + unsigned int service_user, + void *cb_data, void __user *cb_userdata) +{ + dev_err(instance->state->dev, "suspend: %s: callback reason %d\n", + __func__, reason); + return 0; +} + +static int +vchiq_keepalive_thread_func(void *v) +{ + struct vchiq_state *state = (struct vchiq_state *)v; + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + struct vchiq_instance *instance; + unsigned int ka_handle; + int ret; + + struct vchiq_service_params_kernel params = { + .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'), + .callback = vchiq_keepalive_vchiq_callback, + .version = KEEPALIVE_VER, + .version_min = KEEPALIVE_VER_MIN + }; + + ret = vchiq_initialise(state, &instance); + if (ret) { + dev_err(state->dev, "suspend: %s: vchiq_initialise failed %d\n", __func__, ret); + goto exit; + } + + ret = vchiq_connect(instance); + if (ret) { + dev_err(state->dev, "suspend: %s: vchiq_connect failed %d\n", __func__, ret); + goto shutdown; + } + + ret = vchiq_add_service(instance, ¶ms, &ka_handle); + if (ret) { + dev_err(state->dev, "suspend: %s: vchiq_open_service failed %d\n", + __func__, ret); + goto shutdown; + } + + while (!kthread_should_stop()) { + long rc = 0, uc = 0; + + if (wait_for_completion_interruptible(&arm_state->ka_evt)) { + dev_dbg(state->dev, "suspend: %s: interrupted\n", __func__); + flush_signals(current); + continue; + } + + /* + * read and clear counters. Do release_count then use_count to + * prevent getting more releases than uses + */ + rc = atomic_xchg(&arm_state->ka_release_count, 0); + uc = atomic_xchg(&arm_state->ka_use_count, 0); + + /* + * Call use/release service the requisite number of times. + * Process use before release so use counts don't go negative + */ + while (uc--) { + atomic_inc(&arm_state->ka_use_ack_count); + ret = vchiq_use_service(instance, ka_handle); + if (ret) { + dev_err(state->dev, "suspend: %s: vchiq_use_service error %d\n", + __func__, ret); + } + } + while (rc--) { + ret = vchiq_release_service(instance, ka_handle); + if (ret) { + dev_err(state->dev, "suspend: %s: vchiq_release_service error %d\n", + __func__, ret); + } + } + } + +shutdown: + vchiq_shutdown(instance); +exit: + return 0; +} + +int +vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service, + enum USE_TYPE_E use_type) +{ + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + int ret = 0; + char entity[64]; + int *entity_uc; + int local_uc; + + if (!arm_state) { + ret = -EINVAL; + goto out; + } + + if (use_type == USE_TYPE_VCHIQ) { + snprintf(entity, sizeof(entity), "VCHIQ: "); + entity_uc = &arm_state->peer_use_count; + } else if (service) { + snprintf(entity, sizeof(entity), "%p4cc:%03d", + &service->base.fourcc, + service->client_id); + entity_uc = &service->service_use_count; + } else { + dev_err(state->dev, "suspend: %s: null service ptr\n", __func__); + ret = -EINVAL; + goto out; + } + + write_lock_bh(&arm_state->susp_res_lock); + local_uc = ++arm_state->videocore_use_count; + ++(*entity_uc); + + dev_dbg(state->dev, "suspend: %s count %d, state count %d\n", + entity, *entity_uc, local_uc); + + write_unlock_bh(&arm_state->susp_res_lock); + + if (!ret) { + int ret = 0; + long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0); + + while (ack_cnt && !ret) { + /* Send the use notify to videocore */ + ret = vchiq_send_remote_use_active(state); + if (!ret) + ack_cnt--; + else + atomic_add(ack_cnt, &arm_state->ka_use_ack_count); + } + } + +out: + dev_dbg(state->dev, "suspend: exit %d\n", ret); + return ret; +} + +int +vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service) +{ + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + int ret = 0; + char entity[64]; + int *entity_uc; + + if (!arm_state) { + ret = -EINVAL; + goto out; + } + + if (service) { + snprintf(entity, sizeof(entity), "%p4cc:%03d", + &service->base.fourcc, + service->client_id); + entity_uc = &service->service_use_count; + } else { + snprintf(entity, sizeof(entity), "PEER: "); + entity_uc = &arm_state->peer_use_count; + } + + write_lock_bh(&arm_state->susp_res_lock); + if (!arm_state->videocore_use_count || !(*entity_uc)) { + WARN_ON(!arm_state->videocore_use_count); + WARN_ON(!(*entity_uc)); + ret = -EINVAL; + goto unlock; + } + --arm_state->videocore_use_count; + --(*entity_uc); + + dev_dbg(state->dev, "suspend: %s count %d, state count %d\n", + entity, *entity_uc, arm_state->videocore_use_count); + +unlock: + write_unlock_bh(&arm_state->susp_res_lock); + +out: + dev_dbg(state->dev, "suspend: exit %d\n", ret); + return ret; +} + +void +vchiq_on_remote_use(struct vchiq_state *state) +{ + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + + atomic_inc(&arm_state->ka_use_count); + complete(&arm_state->ka_evt); +} + +void +vchiq_on_remote_release(struct vchiq_state *state) +{ + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + + atomic_inc(&arm_state->ka_release_count); + complete(&arm_state->ka_evt); +} + +int +vchiq_use_service_internal(struct vchiq_service *service) +{ + return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE); +} + +int +vchiq_release_service_internal(struct vchiq_service *service) +{ + return vchiq_release_internal(service->state, service); +} + +struct vchiq_debugfs_node * +vchiq_instance_get_debugfs_node(struct vchiq_instance *instance) +{ + return &instance->debugfs_node; +} + +int +vchiq_instance_get_use_count(struct vchiq_instance *instance) +{ + struct vchiq_service *service; + int use_count = 0, i; + + i = 0; + rcu_read_lock(); + while ((service = __next_service_by_instance(instance->state, + instance, &i))) + use_count += service->service_use_count; + rcu_read_unlock(); + return use_count; +} + +int +vchiq_instance_get_pid(struct vchiq_instance *instance) +{ + return instance->pid; +} + +int +vchiq_instance_get_trace(struct vchiq_instance *instance) +{ + return instance->trace; +} + +void +vchiq_instance_set_trace(struct vchiq_instance *instance, int trace) +{ + struct vchiq_service *service; + int i; + + i = 0; + rcu_read_lock(); + while ((service = __next_service_by_instance(instance->state, + instance, &i))) + service->trace = trace; + rcu_read_unlock(); + instance->trace = (trace != 0); +} + +int +vchiq_use_service(struct vchiq_instance *instance, unsigned int handle) +{ + int ret = -EINVAL; + struct vchiq_service *service = find_service_by_handle(instance, handle); + + if (service) { + ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE); + vchiq_service_put(service); + } + return ret; +} +EXPORT_SYMBOL(vchiq_use_service); + +int +vchiq_release_service(struct vchiq_instance *instance, unsigned int handle) +{ + int ret = -EINVAL; + struct vchiq_service *service = find_service_by_handle(instance, handle); + + if (service) { + ret = vchiq_release_internal(service->state, service); + vchiq_service_put(service); + } + return ret; +} +EXPORT_SYMBOL(vchiq_release_service); + +struct service_data_struct { + int fourcc; + int clientid; + int use_count; +}; + +void +vchiq_dump_service_use_state(struct vchiq_state *state) +{ + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + struct service_data_struct *service_data; + int i, found = 0; + /* + * If there's more than 64 services, only dump ones with + * non-zero counts + */ + int only_nonzero = 0; + static const char *nz = "<-- preventing suspend"; + + int peer_count; + int vc_use_count; + int active_services; + + if (!arm_state) + return; + + service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data), + GFP_KERNEL); + if (!service_data) + return; + + read_lock_bh(&arm_state->susp_res_lock); + peer_count = arm_state->peer_use_count; + vc_use_count = arm_state->videocore_use_count; + active_services = state->unused_service; + if (active_services > MAX_SERVICES) + only_nonzero = 1; + + rcu_read_lock(); + for (i = 0; i < active_services; i++) { + struct vchiq_service *service_ptr = + rcu_dereference(state->services[i]); + + if (!service_ptr) + continue; + + if (only_nonzero && !service_ptr->service_use_count) + continue; + + if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE) + continue; + + service_data[found].fourcc = service_ptr->base.fourcc; + service_data[found].clientid = service_ptr->client_id; + service_data[found].use_count = service_ptr->service_use_count; + found++; + if (found >= MAX_SERVICES) + break; + } + rcu_read_unlock(); + + read_unlock_bh(&arm_state->susp_res_lock); + + if (only_nonzero) + dev_warn(state->dev, + "suspend: Too many active services (%d). Only dumping up to first %d services with non-zero use-count\n", + active_services, found); + + for (i = 0; i < found; i++) { + dev_warn(state->dev, + "suspend: %p4cc:%d service count %d %s\n", + &service_data[i].fourcc, + service_data[i].clientid, service_data[i].use_count, + service_data[i].use_count ? nz : ""); + } + dev_warn(state->dev, "suspend: VCHIQ use count %d\n", peer_count); + dev_warn(state->dev, "suspend: Overall vchiq instance use count %d\n", vc_use_count); + + kfree(service_data); +} + +int +vchiq_check_service(struct vchiq_service *service) +{ + struct vchiq_arm_state *arm_state; + int ret = -EINVAL; + + if (!service || !service->state) + goto out; + + arm_state = vchiq_platform_get_arm_state(service->state); + + read_lock_bh(&arm_state->susp_res_lock); + if (service->service_use_count) + ret = 0; + read_unlock_bh(&arm_state->susp_res_lock); + + if (ret) { + dev_err(service->state->dev, + "suspend: %s: %p4cc:%d service count %d, state count %d\n", + __func__, &service->base.fourcc, service->client_id, + service->service_use_count, arm_state->videocore_use_count); + vchiq_dump_service_use_state(service->state); + } +out: + return ret; +} + +void vchiq_platform_conn_state_changed(struct vchiq_state *state, + enum vchiq_connstate oldstate, + enum vchiq_connstate newstate) +{ + struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + char threadname[16]; + + dev_dbg(state->dev, "suspend: %d: %s->%s\n", + state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate)); + if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED) + return; + + write_lock_bh(&arm_state->susp_res_lock); + if (arm_state->first_connect) { + write_unlock_bh(&arm_state->susp_res_lock); + return; + } + + arm_state->first_connect = 1; + write_unlock_bh(&arm_state->susp_res_lock); + snprintf(threadname, sizeof(threadname), "vchiq-keep/%d", + state->id); + arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func, + (void *)state, + threadname); + if (IS_ERR(arm_state->ka_thread)) { + dev_err(state->dev, "suspend: Couldn't create thread %s\n", + threadname); + } else { + wake_up_process(arm_state->ka_thread); + } +} + +static const struct of_device_id vchiq_of_match[] = { + { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_info }, + { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_info }, + {}, +}; +MODULE_DEVICE_TABLE(of, vchiq_of_match); + +static int vchiq_probe(struct platform_device *pdev) +{ + const struct vchiq_platform_info *info; + struct vchiq_drv_mgmt *mgmt; + int ret; + + info = of_device_get_match_data(&pdev->dev); + if (!info) + return -EINVAL; + + struct device_node *fw_node __free(device_node) = + of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware"); + if (!fw_node) { + dev_err(&pdev->dev, "Missing firmware node\n"); + return -ENOENT; + } + + mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL); + if (!mgmt) + return -ENOMEM; + + mgmt->fw = devm_rpi_firmware_get(&pdev->dev, fw_node); + if (!mgmt->fw) + return -EPROBE_DEFER; + + mgmt->info = info; + platform_set_drvdata(pdev, mgmt); + + ret = vchiq_platform_init(pdev, &mgmt->state); + if (ret) { + dev_err(&pdev->dev, "arm: Could not initialize vchiq platform\n"); + return ret; + } + + dev_dbg(&pdev->dev, "arm: platform initialised - version %d (min %d)\n", + VCHIQ_VERSION, VCHIQ_VERSION_MIN); + + /* + * Simply exit on error since the function handles cleanup in + * cases of failure. + */ + ret = vchiq_register_chrdev(&pdev->dev); + if (ret) { + dev_err(&pdev->dev, "arm: Failed to initialize vchiq cdev\n"); + vchiq_platform_uninit(mgmt); + return ret; + } + + vchiq_debugfs_init(&mgmt->state); + + bcm2835_audio = vchiq_device_register(&pdev->dev, "bcm2835-audio"); + + return 0; +} + +static void vchiq_remove(struct platform_device *pdev) +{ + struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(&pdev->dev); + + vchiq_device_unregister(bcm2835_audio); + vchiq_debugfs_deinit(); + vchiq_deregister_chrdev(); + vchiq_platform_uninit(mgmt); +} + +static struct platform_driver vchiq_driver = { + .driver = { + .name = "bcm2835_vchiq", + .of_match_table = vchiq_of_match, + }, + .probe = vchiq_probe, + .remove = vchiq_remove, +}; + +static int __init vchiq_driver_init(void) +{ + int ret; + + ret = bus_register(&vchiq_bus_type); + if (ret) { + pr_err("Failed to register %s\n", vchiq_bus_type.name); + return ret; + } + + ret = platform_driver_register(&vchiq_driver); + if (ret) { + pr_err("Failed to register vchiq driver\n"); + bus_unregister(&vchiq_bus_type); + } + + return ret; +} +module_init(vchiq_driver_init); + +static void __exit vchiq_driver_exit(void) +{ + bus_unregister(&vchiq_bus_type); + platform_driver_unregister(&vchiq_driver); +} +module_exit(vchiq_driver_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Videocore VCHIQ driver"); +MODULE_AUTHOR("Broadcom Corporation"); diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c new file mode 100644 index 000000000000..f50e637d505c --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_bus.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * vchiq_device.c - VCHIQ generic device and bus-type + * + * Copyright (c) 2023 Ideas On Board Oy + */ + +#include <linux/device/bus.h> +#include <linux/dma-mapping.h> +#include <linux/of_device.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include <linux/raspberrypi/vchiq_arm.h> +#include <linux/raspberrypi/vchiq_bus.h> + +static int vchiq_bus_type_match(struct device *dev, const struct device_driver *drv) +{ + if (dev->bus == &vchiq_bus_type && + strcmp(dev_name(dev), drv->name) == 0) + return true; + + return false; +} + +static int vchiq_bus_uevent(const struct device *dev, struct kobj_uevent_env *env) +{ + const struct vchiq_device *device = container_of_const(dev, struct vchiq_device, dev); + + return add_uevent_var(env, "MODALIAS=vchiq:%s", dev_name(&device->dev)); +} + +static int vchiq_bus_probe(struct device *dev) +{ + struct vchiq_device *device = to_vchiq_device(dev); + struct vchiq_driver *driver = to_vchiq_driver(dev->driver); + + return driver->probe(device); +} + +static void vchiq_bus_remove(struct device *dev) +{ + struct vchiq_device *device = to_vchiq_device(dev); + struct vchiq_driver *driver = to_vchiq_driver(dev->driver); + + if (driver->remove) + driver->remove(device); +} + +const struct bus_type vchiq_bus_type = { + .name = "vchiq-bus", + .match = vchiq_bus_type_match, + .uevent = vchiq_bus_uevent, + .probe = vchiq_bus_probe, + .remove = vchiq_bus_remove, +}; + +static void vchiq_device_release(struct device *dev) +{ + struct vchiq_device *device = to_vchiq_device(dev); + + kfree(device); +} + +struct vchiq_device * +vchiq_device_register(struct device *parent, const char *name) +{ + struct vchiq_device *device; + int ret; + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) + return NULL; + + device->dev.init_name = name; + device->dev.parent = parent; + device->dev.bus = &vchiq_bus_type; + device->dev.dma_mask = &device->dev.coherent_dma_mask; + device->dev.release = vchiq_device_release; + + device->drv_mgmt = dev_get_drvdata(parent); + + of_dma_configure(&device->dev, parent->of_node, true); + + ret = device_register(&device->dev); + if (ret) { + dev_err(parent, "Cannot register %s: %d\n", name, ret); + put_device(&device->dev); + return NULL; + } + + return device; +} + +void vchiq_device_unregister(struct vchiq_device *vchiq_dev) +{ + device_unregister(&vchiq_dev->dev); +} + +int vchiq_driver_register(struct vchiq_driver *vchiq_drv) +{ + vchiq_drv->driver.bus = &vchiq_bus_type; + + return driver_register(&vchiq_drv->driver); +} +EXPORT_SYMBOL_GPL(vchiq_driver_register); + +void vchiq_driver_unregister(struct vchiq_driver *vchiq_drv) +{ + driver_unregister(&vchiq_drv->driver); +} +EXPORT_SYMBOL_GPL(vchiq_driver_unregister); diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c new file mode 100644 index 000000000000..83de27cfd469 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_core.c @@ -0,0 +1,4013 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */ + +#include <linux/types.h> +#include <linux/completion.h> +#include <linux/mutex.h> +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/highmem.h> +#include <linux/kthread.h> +#include <linux/wait.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/kref.h> +#include <linux/rcupdate.h> +#include <linux/sched/signal.h> + +#include <linux/raspberrypi/vchiq_arm.h> +#include <linux/raspberrypi/vchiq_core.h> + +#define VCHIQ_SLOT_HANDLER_STACK 8192 + +#define VCHIQ_MSG_PADDING 0 /* - */ +#define VCHIQ_MSG_CONNECT 1 /* - */ +#define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */ +#define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */ +#define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */ +#define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */ +#define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */ +#define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */ +#define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */ +#define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */ +#define VCHIQ_MSG_PAUSE 10 /* - */ +#define VCHIQ_MSG_RESUME 11 /* - */ +#define VCHIQ_MSG_REMOTE_USE 12 /* - */ +#define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */ +#define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */ + +#define TYPE_SHIFT 24 + +#define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1) +#define VCHIQ_PORT_FREE 0x1000 +#define VCHIQ_PORT_IS_VALID(port) ((port) < VCHIQ_PORT_FREE) +#define VCHIQ_MAKE_MSG(type, srcport, dstport) \ + (((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0)) +#define VCHIQ_MSG_TYPE(msgid) ((unsigned int)(msgid) >> TYPE_SHIFT) +#define VCHIQ_MSG_SRCPORT(msgid) \ + ((unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff)) +#define VCHIQ_MSG_DSTPORT(msgid) \ + ((unsigned short)(msgid) & 0xfff) + +#define MAKE_CONNECT (VCHIQ_MSG_CONNECT << TYPE_SHIFT) +#define MAKE_OPEN(srcport) \ + ((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12)) +#define MAKE_OPENACK(srcport, dstport) \ + ((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0)) +#define MAKE_CLOSE(srcport, dstport) \ + ((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0)) +#define MAKE_DATA(srcport, dstport) \ + ((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0)) +#define MAKE_PAUSE (VCHIQ_MSG_PAUSE << TYPE_SHIFT) +#define MAKE_RESUME (VCHIQ_MSG_RESUME << TYPE_SHIFT) +#define MAKE_REMOTE_USE (VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT) +#define MAKE_REMOTE_USE_ACTIVE (VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT) + +#define PAGELIST_WRITE 0 +#define PAGELIST_READ 1 +#define PAGELIST_READ_WITH_FRAGMENTS 2 + +#define BELL2 0x08 + +/* Ensure the fields are wide enough */ +static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX)) == 0); +static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0); +static_assert((unsigned int)VCHIQ_PORT_MAX < (unsigned int)VCHIQ_PORT_FREE); + +#define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0) +#define VCHIQ_MSGID_CLAIMED 0x40000000 + +#define VCHIQ_FOURCC_INVALID 0x00000000 +#define VCHIQ_FOURCC_IS_LEGAL(fourcc) ((fourcc) != VCHIQ_FOURCC_INVALID) + +#define VCHIQ_BULK_ACTUAL_ABORTED -1 + +#if VCHIQ_ENABLE_STATS +#define VCHIQ_STATS_INC(state, stat) (state->stats. stat++) +#define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++) +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \ + (service->stats. stat += addend) +#else +#define VCHIQ_STATS_INC(state, stat) ((void)0) +#define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0) +#define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0) +#endif + +#define HANDLE_STATE_SHIFT 12 + +#define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index)) +#define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index)) +#define SLOT_INDEX_FROM_DATA(state, data) \ + (((unsigned int)((char *)data - (char *)state->slot_data)) / \ + VCHIQ_SLOT_SIZE) +#define SLOT_INDEX_FROM_INFO(state, info) \ + ((unsigned int)(info - state->slot_info)) +#define SLOT_QUEUE_INDEX_FROM_POS(pos) \ + ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE)) +#define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \ + (SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK) + +#define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1)) + +#define NO_CLOSE_RECVD 0 +#define CLOSE_RECVD 1 + +#define NO_RETRY_POLL 0 +#define RETRY_POLL 1 + +struct vchiq_open_payload { + int fourcc; + int client_id; + short version; + short version_min; +}; + +struct vchiq_openack_payload { + short version; +}; + +enum { + QMFLAGS_IS_BLOCKING = BIT(0), + QMFLAGS_NO_MUTEX_LOCK = BIT(1), + QMFLAGS_NO_MUTEX_UNLOCK = BIT(2) +}; + +enum { + VCHIQ_POLL_TERMINATE, + VCHIQ_POLL_REMOVE, + VCHIQ_POLL_TXNOTIFY, + VCHIQ_POLL_RXNOTIFY, + VCHIQ_POLL_COUNT +}; + +/* we require this for consistency between endpoints */ +static_assert(sizeof(struct vchiq_header) == 8); +static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN); + +static inline void check_sizes(void) +{ + BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE); + BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS); + BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE); + BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header)); + BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS); + BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS); + BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES); +} + +static unsigned int handle_seq; + +static const char *const srvstate_names[] = { + "FREE", + "HIDDEN", + "LISTENING", + "OPENING", + "OPEN", + "OPENSYNC", + "CLOSESENT", + "CLOSERECVD", + "CLOSEWAIT", + "CLOSED" +}; + +static const char *const reason_names[] = { + "SERVICE_OPENED", + "SERVICE_CLOSED", + "MESSAGE_AVAILABLE", + "BULK_TRANSMIT_DONE", + "BULK_RECEIVE_DONE", + "BULK_TRANSMIT_ABORTED", + "BULK_RECEIVE_ABORTED" +}; + +static const char *const conn_state_names[] = { + "DISCONNECTED", + "CONNECTING", + "CONNECTED", + "PAUSING", + "PAUSE_SENT", + "PAUSED", + "RESUMING", + "PAUSE_TIMEOUT", + "RESUME_TIMEOUT" +}; + +static void +release_message_sync(struct vchiq_state *state, struct vchiq_header *header); + +static const char *msg_type_str(unsigned int msg_type) +{ + switch (msg_type) { + case VCHIQ_MSG_PADDING: return "PADDING"; + case VCHIQ_MSG_CONNECT: return "CONNECT"; + case VCHIQ_MSG_OPEN: return "OPEN"; + case VCHIQ_MSG_OPENACK: return "OPENACK"; + case VCHIQ_MSG_CLOSE: return "CLOSE"; + case VCHIQ_MSG_DATA: return "DATA"; + case VCHIQ_MSG_BULK_RX: return "BULK_RX"; + case VCHIQ_MSG_BULK_TX: return "BULK_TX"; + case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE"; + case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE"; + case VCHIQ_MSG_PAUSE: return "PAUSE"; + case VCHIQ_MSG_RESUME: return "RESUME"; + case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE"; + case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE"; + case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE"; + } + return "???"; +} + +static inline void +set_service_state(struct vchiq_service *service, int newstate) +{ + dev_dbg(service->state->dev, "core: %d: srv:%d %s->%s\n", + service->state->id, service->localport, + srvstate_names[service->srvstate], + srvstate_names[newstate]); + service->srvstate = newstate; +} + +struct vchiq_service *handle_to_service(struct vchiq_instance *instance, unsigned int handle) +{ + int idx = handle & (VCHIQ_MAX_SERVICES - 1); + + return rcu_dereference(instance->state->services[idx]); +} + +struct vchiq_service * +find_service_by_handle(struct vchiq_instance *instance, unsigned int handle) +{ + struct vchiq_service *service; + + rcu_read_lock(); + service = handle_to_service(instance, handle); + if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && + service->handle == handle && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + rcu_read_unlock(); + dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle); + return NULL; +} + +struct vchiq_service * +find_service_by_port(struct vchiq_state *state, unsigned int localport) +{ + if (localport <= VCHIQ_PORT_MAX) { + struct vchiq_service *service; + + rcu_read_lock(); + service = rcu_dereference(state->services[localport]); + if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + rcu_read_unlock(); + } + dev_dbg(state->dev, "core: Invalid port %u\n", localport); + return NULL; +} + +struct vchiq_service * +find_service_for_instance(struct vchiq_instance *instance, unsigned int handle) +{ + struct vchiq_service *service; + + rcu_read_lock(); + service = handle_to_service(instance, handle); + if (service && service->srvstate != VCHIQ_SRVSTATE_FREE && + service->handle == handle && + service->instance == instance && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + rcu_read_unlock(); + dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle); + return NULL; +} + +struct vchiq_service * +find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle) +{ + struct vchiq_service *service; + + rcu_read_lock(); + service = handle_to_service(instance, handle); + if (service && + (service->srvstate == VCHIQ_SRVSTATE_FREE || + service->srvstate == VCHIQ_SRVSTATE_CLOSED) && + service->handle == handle && + service->instance == instance && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + rcu_read_unlock(); + dev_dbg(instance->state->dev, "core: Invalid service handle 0x%x\n", handle); + return service; +} + +struct vchiq_service * +__next_service_by_instance(struct vchiq_state *state, + struct vchiq_instance *instance, + int *pidx) +{ + struct vchiq_service *service = NULL; + int idx = *pidx; + + while (idx < state->unused_service) { + struct vchiq_service *srv; + + srv = rcu_dereference(state->services[idx]); + idx++; + if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE && + srv->instance == instance) { + service = srv; + break; + } + } + + *pidx = idx; + return service; +} + +struct vchiq_service * +next_service_by_instance(struct vchiq_state *state, + struct vchiq_instance *instance, + int *pidx) +{ + struct vchiq_service *service; + + rcu_read_lock(); + while (1) { + service = __next_service_by_instance(state, instance, pidx); + if (!service) + break; + if (kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + break; + } + } + rcu_read_unlock(); + return service; +} + +void +vchiq_service_get(struct vchiq_service *service) +{ + if (!service) { + WARN(1, "%s service is NULL\n", __func__); + return; + } + kref_get(&service->ref_count); +} + +static void service_release(struct kref *kref) +{ + struct vchiq_service *service = + container_of(kref, struct vchiq_service, ref_count); + struct vchiq_state *state = service->state; + + WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE); + rcu_assign_pointer(state->services[service->localport], NULL); + if (service->userdata_term) + service->userdata_term(service->base.userdata); + kfree_rcu(service, rcu); +} + +void +vchiq_service_put(struct vchiq_service *service) +{ + if (!service) { + WARN(1, "%s: service is NULL\n", __func__); + return; + } + kref_put(&service->ref_count, service_release); +} + +int +vchiq_get_client_id(struct vchiq_instance *instance, unsigned int handle) +{ + struct vchiq_service *service; + int id; + + rcu_read_lock(); + service = handle_to_service(instance, handle); + id = service ? service->client_id : 0; + rcu_read_unlock(); + return id; +} + +void * +vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int handle) +{ + void *userdata; + struct vchiq_service *service; + + rcu_read_lock(); + service = handle_to_service(instance, handle); + userdata = service ? service->base.userdata : NULL; + rcu_read_unlock(); + return userdata; +} +EXPORT_SYMBOL(vchiq_get_service_userdata); + +static void +mark_service_closing_internal(struct vchiq_service *service, int sh_thread) +{ + struct vchiq_state *state = service->state; + struct vchiq_service_quota *quota; + + service->closing = 1; + + /* Synchronise with other threads. */ + mutex_lock(&state->recycle_mutex); + mutex_unlock(&state->recycle_mutex); + if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) { + /* + * If we're pausing then the slot_mutex is held until resume + * by the slot handler. Therefore don't try to acquire this + * mutex if we're the slot handler and in the pause sent state. + * We don't need to in this case anyway. + */ + mutex_lock(&state->slot_mutex); + mutex_unlock(&state->slot_mutex); + } + + /* Unblock any sending thread. */ + quota = &state->service_quotas[service->localport]; + complete("a->quota_event); +} + +static void +mark_service_closing(struct vchiq_service *service) +{ + mark_service_closing_internal(service, 0); +} + +static inline int +make_service_callback(struct vchiq_service *service, enum vchiq_reason reason, + struct vchiq_header *header, struct vchiq_bulk *bulk) +{ + void *cb_data = NULL; + void __user *cb_userdata = NULL; + int status; + + /* + * If a bulk transfer is in progress, pass bulk->cb_*data to the + * callback function. + */ + if (bulk) { + cb_data = bulk->cb_data; + cb_userdata = bulk->cb_userdata; + } + + dev_dbg(service->state->dev, "core: %d: callback:%d (%s, %p, %p %p)\n", + service->state->id, service->localport, reason_names[reason], + header, cb_data, cb_userdata); + status = service->base.callback(service->instance, reason, header, service->handle, + cb_data, cb_userdata); + if (status && (status != -EAGAIN)) { + dev_warn(service->state->dev, + "core: %d: ignoring ERROR from callback to service %x\n", + service->state->id, service->handle); + status = 0; + } + + if (reason != VCHIQ_MESSAGE_AVAILABLE) + vchiq_release_message(service->instance, service->handle, header); + + return status; +} + +inline void +vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate) +{ + enum vchiq_connstate oldstate = state->conn_state; + + dev_dbg(state->dev, "core: %d: %s->%s\n", + state->id, conn_state_names[oldstate], conn_state_names[newstate]); + state->conn_state = newstate; + vchiq_platform_conn_state_changed(state, oldstate, newstate); +} + +/* This initialises a single remote_event, and the associated wait_queue. */ +static inline void +remote_event_create(wait_queue_head_t *wq, struct remote_event *event) +{ + event->armed = 0; + /* + * Don't clear the 'fired' flag because it may already have been set + * by the other side. + */ + init_waitqueue_head(wq); +} + +/* + * All the event waiting routines in VCHIQ used a custom semaphore + * implementation that filtered most signals. This achieved a behaviour similar + * to the "killable" family of functions. While cleaning up this code all the + * routines where switched to the "interruptible" family of functions, as the + * former was deemed unjustified and the use "killable" set all VCHIQ's + * threads in D state. + * + * Returns: 0 on success, a negative error code on failure + */ +static inline int +remote_event_wait(wait_queue_head_t *wq, struct remote_event *event) +{ + int ret = 0; + + if (!event->fired) { + event->armed = 1; + dsb(sy); + ret = wait_event_interruptible(*wq, event->fired); + if (ret) { + event->armed = 0; + return ret; + } + event->armed = 0; + /* Ensure that the peer sees that we are not waiting (armed == 0). */ + wmb(); + } + + event->fired = 0; + return ret; +} + +static void +remote_event_signal(struct vchiq_state *state, struct remote_event *event) +{ + struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(state->dev); + + /* + * Ensure that all writes to shared data structures have completed + * before signalling the peer. + */ + wmb(); + + event->fired = 1; + + dsb(sy); /* data barrier operation */ + + if (event->armed) + writel(0, mgmt->regs + BELL2); /* trigger vc interrupt */ +} + +/* + * Acknowledge that the event has been signalled, and wake any waiters. Usually + * called as a result of the doorbell being rung. + */ +static inline void +remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event) +{ + event->fired = 1; + event->armed = 0; + wake_up_all(wq); +} + +/* Check if a single event has been signalled, waking the waiters if it has. */ +static inline void +remote_event_poll(wait_queue_head_t *wq, struct remote_event *event) +{ + if (event->fired && event->armed) + remote_event_signal_local(wq, event); +} + +/* + * VCHIQ used a small, fixed number of remote events. It is simplest to + * enumerate them here for polling. + */ +void +remote_event_pollall(struct vchiq_state *state) +{ + remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger); + remote_event_poll(&state->sync_release_event, &state->local->sync_release); + remote_event_poll(&state->trigger_event, &state->local->trigger); + remote_event_poll(&state->recycle_event, &state->local->recycle); +} + +/* + * Round up message sizes so that any space at the end of a slot is always big + * enough for a header. This relies on header size being a power of two, which + * has been verified earlier by a static assertion. + */ + +static inline size_t +calc_stride(size_t size) +{ + /* Allow room for the header */ + size += sizeof(struct vchiq_header); + + /* Round up */ + return (size + sizeof(struct vchiq_header) - 1) & + ~(sizeof(struct vchiq_header) - 1); +} + +/* Called by the slot handler thread */ +static struct vchiq_service * +get_listening_service(struct vchiq_state *state, int fourcc) +{ + int i; + + WARN_ON(fourcc == VCHIQ_FOURCC_INVALID); + + rcu_read_lock(); + for (i = 0; i < state->unused_service; i++) { + struct vchiq_service *service; + + service = rcu_dereference(state->services[i]); + if (service && + service->public_fourcc == fourcc && + (service->srvstate == VCHIQ_SRVSTATE_LISTENING || + (service->srvstate == VCHIQ_SRVSTATE_OPEN && + service->remoteport == VCHIQ_PORT_FREE)) && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + } + rcu_read_unlock(); + return NULL; +} + +/* Called by the slot handler thread */ +static struct vchiq_service * +get_connected_service(struct vchiq_state *state, unsigned int port) +{ + int i; + + rcu_read_lock(); + for (i = 0; i < state->unused_service; i++) { + struct vchiq_service *service = + rcu_dereference(state->services[i]); + + if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN && + service->remoteport == port && + kref_get_unless_zero(&service->ref_count)) { + service = rcu_pointer_handoff(service); + rcu_read_unlock(); + return service; + } + } + rcu_read_unlock(); + return NULL; +} + +inline void +request_poll(struct vchiq_state *state, struct vchiq_service *service, + int poll_type) +{ + u32 value; + int index; + + if (!service) + goto skip_service; + + do { + value = atomic_read(&service->poll_flags); + } while (atomic_cmpxchg(&service->poll_flags, value, + value | BIT(poll_type)) != value); + + index = BITSET_WORD(service->localport); + do { + value = atomic_read(&state->poll_services[index]); + } while (atomic_cmpxchg(&state->poll_services[index], + value, value | BIT(service->localport & 0x1f)) != value); + +skip_service: + state->poll_needed = 1; + /* Ensure the slot handler thread sees the poll_needed flag. */ + wmb(); + + /* ... and ensure the slot handler runs. */ + remote_event_signal_local(&state->trigger_event, &state->local->trigger); +} + +/* + * Called from queue_message, by the slot handler and application threads, + * with slot_mutex held + */ +static struct vchiq_header * +reserve_space(struct vchiq_state *state, size_t space, int is_blocking) +{ + struct vchiq_shared_state *local = state->local; + int tx_pos = state->local_tx_pos; + int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK); + + if (space > slot_space) { + struct vchiq_header *header; + /* Fill the remaining space with padding */ + WARN_ON(!state->tx_data); + header = (struct vchiq_header *) + (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK)); + header->msgid = VCHIQ_MSGID_PADDING; + header->size = slot_space - sizeof(struct vchiq_header); + + tx_pos += slot_space; + } + + /* If necessary, get the next slot. */ + if ((tx_pos & VCHIQ_SLOT_MASK) == 0) { + int slot_index; + + /* If there is no free slot... */ + + if (!try_wait_for_completion(&state->slot_available_event)) { + /* ...wait for one. */ + + VCHIQ_STATS_INC(state, slot_stalls); + + /* But first, flush through the last slot. */ + state->local_tx_pos = tx_pos; + local->tx_pos = tx_pos; + remote_event_signal(state, &state->remote->trigger); + + if (!is_blocking || + (wait_for_completion_interruptible(&state->slot_available_event))) + return NULL; /* No space available */ + } + + if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) { + complete(&state->slot_available_event); + dev_warn(state->dev, "%s: invalid tx_pos: %d\n", + __func__, tx_pos); + return NULL; + } + + slot_index = local->slot_queue[SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)]; + state->tx_data = + (char *)SLOT_DATA_FROM_INDEX(state, slot_index); + } + + state->local_tx_pos = tx_pos + space; + + return (struct vchiq_header *)(state->tx_data + + (tx_pos & VCHIQ_SLOT_MASK)); +} + +static void +process_free_data_message(struct vchiq_state *state, u32 *service_found, + struct vchiq_header *header) +{ + int msgid = header->msgid; + int port = VCHIQ_MSG_SRCPORT(msgid); + struct vchiq_service_quota *quota = &state->service_quotas[port]; + int count; + + spin_lock(&state->quota_spinlock); + count = quota->message_use_count; + if (count > 0) + quota->message_use_count = count - 1; + spin_unlock(&state->quota_spinlock); + + if (count == quota->message_quota) { + /* + * Signal the service that it + * has dropped below its quota + */ + complete("a->quota_event); + } else if (count == 0) { + dev_err(state->dev, + "core: service %d message_use_count=%d (header %p, msgid %x, header->msgid %x, header->size %x)\n", + port, quota->message_use_count, header, msgid, + header->msgid, header->size); + WARN(1, "invalid message use count\n"); + } + if (!BITSET_IS_SET(service_found, port)) { + /* Set the found bit for this service */ + BITSET_SET(service_found, port); + + spin_lock(&state->quota_spinlock); + count = quota->slot_use_count; + if (count > 0) + quota->slot_use_count = count - 1; + spin_unlock(&state->quota_spinlock); + + if (count > 0) { + /* + * Signal the service in case + * it has dropped below its quota + */ + complete("a->quota_event); + dev_dbg(state->dev, "core: %d: pfq:%d %x@%p - slot_use->%d\n", + state->id, port, header->size, header, count - 1); + } else { + dev_err(state->dev, + "core: service %d slot_use_count=%d (header %p, msgid %x, header->msgid %x, header->size %x)\n", + port, count, header, msgid, header->msgid, header->size); + WARN(1, "bad slot use count\n"); + } + } +} + +/* Called by the recycle thread. */ +static void +process_free_queue(struct vchiq_state *state, u32 *service_found, + size_t length) +{ + struct vchiq_shared_state *local = state->local; + int slot_queue_available; + + /* + * Find slots which have been freed by the other side, and return them + * to the available queue. + */ + slot_queue_available = state->slot_queue_available; + + /* + * Use a memory barrier to ensure that any state that may have been + * modified by another thread is not masked by stale prefetched + * values. + */ + mb(); + + while (slot_queue_available != local->slot_queue_recycle) { + unsigned int pos; + int slot_index = local->slot_queue[slot_queue_available & + VCHIQ_SLOT_QUEUE_MASK]; + char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index); + int data_found = 0; + + slot_queue_available++; + /* + * Beware of the address dependency - data is calculated + * using an index written by the other side. + */ + rmb(); + + dev_dbg(state->dev, "core: %d: pfq %d=%p %x %x\n", + state->id, slot_index, data, local->slot_queue_recycle, + slot_queue_available); + + /* Initialise the bitmask for services which have used this slot */ + memset(service_found, 0, length); + + pos = 0; + + while (pos < VCHIQ_SLOT_SIZE) { + struct vchiq_header *header = + (struct vchiq_header *)(data + pos); + int msgid = header->msgid; + + if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) { + process_free_data_message(state, service_found, + header); + data_found = 1; + } + + pos += calc_stride(header->size); + if (pos > VCHIQ_SLOT_SIZE) { + dev_err(state->dev, + "core: pfq - pos %x: header %p, msgid %x, header->msgid %x, header->size %x\n", + pos, header, msgid, header->msgid, header->size); + WARN(1, "invalid slot position\n"); + } + } + + if (data_found) { + int count; + + spin_lock(&state->quota_spinlock); + count = state->data_use_count; + if (count > 0) + state->data_use_count = count - 1; + spin_unlock(&state->quota_spinlock); + if (count == state->data_quota) + complete(&state->data_quota_event); + } + + /* + * Don't allow the slot to be reused until we are no + * longer interested in it. + */ + mb(); + + state->slot_queue_available = slot_queue_available; + complete(&state->slot_available_event); + } +} + +static ssize_t +memcpy_copy_callback(void *context, void *dest, size_t offset, size_t maxsize) +{ + memcpy(dest + offset, context + offset, maxsize); + return maxsize; +} + +static ssize_t +copy_message_data(ssize_t (*copy_callback)(void *context, void *dest, size_t offset, + size_t maxsize), + void *context, + void *dest, + size_t size) +{ + size_t pos = 0; + + while (pos < size) { + ssize_t callback_result; + size_t max_bytes = size - pos; + + callback_result = copy_callback(context, dest + pos, pos, + max_bytes); + + if (callback_result < 0) + return callback_result; + + if (!callback_result) + return -EIO; + + if (callback_result > max_bytes) + return -EIO; + + pos += callback_result; + } + + return size; +} + +/* Called by the slot handler and application threads */ +static int +queue_message(struct vchiq_state *state, struct vchiq_service *service, + int msgid, + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, size_t size, int flags) +{ + struct vchiq_shared_state *local; + struct vchiq_service_quota *quota = NULL; + struct vchiq_header *header; + int type = VCHIQ_MSG_TYPE(msgid); + int svc_fourcc; + + size_t stride; + + local = state->local; + + stride = calc_stride(size); + + WARN_ON(stride > VCHIQ_SLOT_SIZE); + + if (!(flags & QMFLAGS_NO_MUTEX_LOCK) && + mutex_lock_killable(&state->slot_mutex)) + return -EINTR; + + if (type == VCHIQ_MSG_DATA) { + int tx_end_index; + + if (!service) { + WARN(1, "%s: service is NULL\n", __func__); + mutex_unlock(&state->slot_mutex); + return -EINVAL; + } + + WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK | + QMFLAGS_NO_MUTEX_UNLOCK)); + + if (service->closing) { + /* The service has been closed */ + mutex_unlock(&state->slot_mutex); + return -EHOSTDOWN; + } + + quota = &state->service_quotas[service->localport]; + + spin_lock(&state->quota_spinlock); + + /* + * Ensure this service doesn't use more than its quota of + * messages or slots + */ + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1); + + /* + * Ensure data messages don't use more than their quota of + * slots + */ + while ((tx_end_index != state->previous_data_index) && + (state->data_use_count == state->data_quota)) { + VCHIQ_STATS_INC(state, data_stalls); + spin_unlock(&state->quota_spinlock); + mutex_unlock(&state->slot_mutex); + + if (wait_for_completion_killable(&state->data_quota_event)) + return -EINTR; + + mutex_lock(&state->slot_mutex); + spin_lock(&state->quota_spinlock); + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1); + if ((tx_end_index == state->previous_data_index) || + (state->data_use_count < state->data_quota)) { + /* Pass the signal on to other waiters */ + complete(&state->data_quota_event); + break; + } + } + + while ((quota->message_use_count == quota->message_quota) || + ((tx_end_index != quota->previous_tx_index) && + (quota->slot_use_count == quota->slot_quota))) { + spin_unlock(&state->quota_spinlock); + dev_dbg(state->dev, + "core: %d: qm:%d %s,%zx - quota stall (msg %d, slot %d)\n", + state->id, service->localport, msg_type_str(type), size, + quota->message_use_count, quota->slot_use_count); + VCHIQ_SERVICE_STATS_INC(service, quota_stalls); + mutex_unlock(&state->slot_mutex); + if (wait_for_completion_killable("a->quota_event)) + return -EINTR; + if (service->closing) + return -EHOSTDOWN; + if (mutex_lock_killable(&state->slot_mutex)) + return -EINTR; + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) { + /* The service has been closed */ + mutex_unlock(&state->slot_mutex); + return -EHOSTDOWN; + } + spin_lock(&state->quota_spinlock); + tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1); + } + + spin_unlock(&state->quota_spinlock); + } + + header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING); + + if (!header) { + if (service) + VCHIQ_SERVICE_STATS_INC(service, slot_stalls); + /* + * In the event of a failure, return the mutex to the + * state it was in + */ + if (!(flags & QMFLAGS_NO_MUTEX_LOCK)) + mutex_unlock(&state->slot_mutex); + return -EAGAIN; + } + + if (type == VCHIQ_MSG_DATA) { + ssize_t callback_result; + int tx_end_index; + int slot_use_count; + + dev_dbg(state->dev, "core: %d: qm %s@%p,%zx (%d->%d)\n", + state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size, + VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid)); + + WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK | + QMFLAGS_NO_MUTEX_UNLOCK)); + + callback_result = + copy_message_data(copy_callback, context, + header->data, size); + + if (callback_result < 0) { + mutex_unlock(&state->slot_mutex); + VCHIQ_SERVICE_STATS_INC(service, error_count); + return -EINVAL; + } + + vchiq_log_dump_mem(state->dev, "Sent", 0, + header->data, + min_t(size_t, 16, callback_result)); + + spin_lock(&state->quota_spinlock); + quota->message_use_count++; + + tx_end_index = + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1); + + /* + * If this transmission can't fit in the last slot used by any + * service, the data_use_count must be increased. + */ + if (tx_end_index != state->previous_data_index) { + state->previous_data_index = tx_end_index; + state->data_use_count++; + } + + /* + * If this isn't the same slot last used by this service, + * the service's slot_use_count must be increased. + */ + if (tx_end_index != quota->previous_tx_index) { + quota->previous_tx_index = tx_end_index; + slot_use_count = ++quota->slot_use_count; + } else { + slot_use_count = 0; + } + + spin_unlock(&state->quota_spinlock); + + if (slot_use_count) + dev_dbg(state->dev, "core: %d: qm:%d %s,%zx - slot_use->%d (hdr %p)\n", + state->id, service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)), + size, slot_use_count, header); + + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count); + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size); + } else { + dev_dbg(state->dev, "core: %d: qm %s@%p,%zx (%d->%d)\n", + state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size, + VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid)); + if (size != 0) { + /* + * It is assumed for now that this code path + * only happens from calls inside this file. + * + * External callers are through the vchiq_queue_message + * path which always sets the type to be VCHIQ_MSG_DATA + * + * At first glance this appears to be correct but + * more review is needed. + */ + copy_message_data(copy_callback, context, + header->data, size); + } + VCHIQ_STATS_INC(state, ctrl_tx_count); + } + + header->msgid = msgid; + header->size = size; + + svc_fourcc = service ? service->base.fourcc + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); + + dev_dbg(state->dev, "core_msg: Sent Msg %s(%u) to %p4cc s:%u d:%d len:%zu\n", + msg_type_str(VCHIQ_MSG_TYPE(msgid)), + VCHIQ_MSG_TYPE(msgid), &svc_fourcc, + VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid), size); + + /* Make sure the new header is visible to the peer. */ + wmb(); + + /* Make the new tx_pos visible to the peer. */ + local->tx_pos = state->local_tx_pos; + wmb(); + + if (service && (type == VCHIQ_MSG_CLOSE)) + set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT); + + if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK)) + mutex_unlock(&state->slot_mutex); + + remote_event_signal(state, &state->remote->trigger); + + return 0; +} + +/* Called by the slot handler and application threads */ +static int +queue_message_sync(struct vchiq_state *state, struct vchiq_service *service, + int msgid, + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, int size) +{ + struct vchiq_shared_state *local; + struct vchiq_header *header; + ssize_t callback_result; + int svc_fourcc; + int ret; + + local = state->local; + + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME && + mutex_lock_killable(&state->sync_mutex)) + return -EAGAIN; + + ret = remote_event_wait(&state->sync_release_event, &local->sync_release); + if (ret) + return ret; + + /* Ensure that reads don't overtake the remote_event_wait. */ + rmb(); + + header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, + local->slot_sync); + + { + int oldmsgid = header->msgid; + + if (oldmsgid != VCHIQ_MSGID_PADDING) + dev_err(state->dev, "core: %d: qms - msgid %x, not PADDING\n", + state->id, oldmsgid); + } + + dev_dbg(state->dev, "sync: %d: qms %s@%p,%x (%d->%d)\n", + state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size, + VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid)); + + callback_result = copy_message_data(copy_callback, context, + header->data, size); + + if (callback_result < 0) { + mutex_unlock(&state->slot_mutex); + VCHIQ_SERVICE_STATS_INC(service, error_count); + return -EINVAL; + } + + if (service) { + vchiq_log_dump_mem(state->dev, "Sent", 0, + header->data, + min_t(size_t, 16, callback_result)); + + VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count); + VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size); + } else { + VCHIQ_STATS_INC(state, ctrl_tx_count); + } + + header->size = size; + header->msgid = msgid; + + svc_fourcc = service ? service->base.fourcc + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); + + dev_dbg(state->dev, + "sync: Sent Sync Msg %s(%u) to %p4cc s:%u d:%d len:%d\n", + msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid), + &svc_fourcc, VCHIQ_MSG_SRCPORT(msgid), + VCHIQ_MSG_DSTPORT(msgid), size); + + remote_event_signal(state, &state->remote->sync_trigger); + + if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE) + mutex_unlock(&state->sync_mutex); + + return 0; +} + +static inline void +claim_slot(struct vchiq_slot_info *slot) +{ + slot->use_count++; +} + +static void +release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info, + struct vchiq_header *header, struct vchiq_service *service) +{ + mutex_lock(&state->recycle_mutex); + + if (header) { + int msgid = header->msgid; + + if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || (service && service->closing)) { + mutex_unlock(&state->recycle_mutex); + return; + } + + /* Rewrite the message header to prevent a double release */ + header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED; + } + + slot_info->release_count++; + + if (slot_info->release_count == slot_info->use_count) { + int slot_queue_recycle; + /* Add to the freed queue */ + + /* + * A read barrier is necessary here to prevent speculative + * fetches of remote->slot_queue_recycle from overtaking the + * mutex. + */ + rmb(); + + slot_queue_recycle = state->remote->slot_queue_recycle; + state->remote->slot_queue[slot_queue_recycle & + VCHIQ_SLOT_QUEUE_MASK] = + SLOT_INDEX_FROM_INFO(state, slot_info); + state->remote->slot_queue_recycle = slot_queue_recycle + 1; + dev_dbg(state->dev, "core: %d: %d - recycle->%x\n", + state->id, SLOT_INDEX_FROM_INFO(state, slot_info), + state->remote->slot_queue_recycle); + + /* + * A write barrier is necessary, but remote_event_signal + * contains one. + */ + remote_event_signal(state, &state->remote->recycle); + } + + mutex_unlock(&state->recycle_mutex); +} + +static inline enum vchiq_reason +get_bulk_reason(struct vchiq_bulk *bulk) +{ + if (bulk->dir == VCHIQ_BULK_TRANSMIT) { + if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED) + return VCHIQ_BULK_TRANSMIT_ABORTED; + + return VCHIQ_BULK_TRANSMIT_DONE; + } + + if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED) + return VCHIQ_BULK_RECEIVE_ABORTED; + + return VCHIQ_BULK_RECEIVE_DONE; +} + +static int service_notify_bulk(struct vchiq_service *service, + struct vchiq_bulk *bulk) +{ + if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) { + if (bulk->dir == VCHIQ_BULK_TRANSMIT) { + VCHIQ_SERVICE_STATS_INC(service, bulk_tx_count); + VCHIQ_SERVICE_STATS_ADD(service, bulk_tx_bytes, + bulk->actual); + } else { + VCHIQ_SERVICE_STATS_INC(service, bulk_rx_count); + VCHIQ_SERVICE_STATS_ADD(service, bulk_rx_bytes, + bulk->actual); + } + } else { + VCHIQ_SERVICE_STATS_INC(service, bulk_aborted_count); + } + + if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) { + struct bulk_waiter *waiter; + + spin_lock(&service->state->bulk_waiter_spinlock); + waiter = bulk->waiter; + if (waiter) { + waiter->actual = bulk->actual; + complete(&waiter->event); + } + spin_unlock(&service->state->bulk_waiter_spinlock); + } else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) { + enum vchiq_reason reason = get_bulk_reason(bulk); + + return make_service_callback(service, reason, NULL, bulk); + } + + return 0; +} + +/* Called by the slot handler - don't hold the bulk mutex */ +static int +notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue, + int retry_poll) +{ + int status = 0; + + dev_dbg(service->state->dev, + "core: %d: nb:%d %cx - p=%x rn=%x r=%x\n", + service->state->id, service->localport, + (queue == &service->bulk_tx) ? 't' : 'r', + queue->process, queue->remote_notify, queue->remove); + + queue->remote_notify = queue->process; + + while (queue->remove != queue->remote_notify) { + struct vchiq_bulk *bulk = + &queue->bulks[BULK_INDEX(queue->remove)]; + + /* + * Only generate callbacks for non-dummy bulk + * requests, and non-terminated services + */ + if (bulk->dma_addr && service->instance) { + status = service_notify_bulk(service, bulk); + if (status == -EAGAIN) + break; + } + + queue->remove++; + complete(&service->bulk_remove_event); + } + if (!retry_poll) + status = 0; + + if (status == -EAGAIN) + request_poll(service->state, service, (queue == &service->bulk_tx) ? + VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY); + + return status; +} + +static void +poll_services_of_group(struct vchiq_state *state, int group) +{ + u32 flags = atomic_xchg(&state->poll_services[group], 0); + int i; + + for (i = 0; flags; i++) { + struct vchiq_service *service; + u32 service_flags; + + if ((flags & BIT(i)) == 0) + continue; + + service = find_service_by_port(state, (group << 5) + i); + flags &= ~BIT(i); + + if (!service) + continue; + + service_flags = atomic_xchg(&service->poll_flags, 0); + if (service_flags & BIT(VCHIQ_POLL_REMOVE)) { + dev_dbg(state->dev, "core: %d: ps - remove %d<->%d\n", + state->id, service->localport, service->remoteport); + + /* + * Make it look like a client, because + * it must be removed and not left in + * the LISTENING state. + */ + service->public_fourcc = VCHIQ_FOURCC_INVALID; + + if (vchiq_close_service_internal(service, NO_CLOSE_RECVD)) + request_poll(state, service, VCHIQ_POLL_REMOVE); + } else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) { + dev_dbg(state->dev, "core: %d: ps - terminate %d<->%d\n", + state->id, service->localport, service->remoteport); + if (vchiq_close_service_internal(service, NO_CLOSE_RECVD)) + request_poll(state, service, VCHIQ_POLL_TERMINATE); + } + if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY)) + notify_bulks(service, &service->bulk_tx, RETRY_POLL); + if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY)) + notify_bulks(service, &service->bulk_rx, RETRY_POLL); + vchiq_service_put(service); + } +} + +/* Called by the slot handler thread */ +static void +poll_services(struct vchiq_state *state) +{ + int group; + + for (group = 0; group < BITSET_SIZE(state->unused_service); group++) + poll_services_of_group(state, group); +} + +static void +cleanup_pagelistinfo(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo) +{ + if (pagelistinfo->scatterlist_mapped) { + dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist, + pagelistinfo->num_pages, pagelistinfo->dma_dir); + } + + if (pagelistinfo->pages_need_release) + unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages); + + dma_free_coherent(instance->state->dev, pagelistinfo->pagelist_buffer_size, + pagelistinfo->pagelist, pagelistinfo->dma_addr); +} + +static inline bool +is_adjacent_block(u32 *addrs, dma_addr_t addr, unsigned int k) +{ + u32 tmp; + + if (!k) + return false; + + tmp = (addrs[k - 1] & PAGE_MASK) + + (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT); + + return tmp == (addr & PAGE_MASK); +} + +/* There is a potential problem with partial cache lines (pages?) + * at the ends of the block when reading. If the CPU accessed anything in + * the same line (page?) then it may have pulled old data into the cache, + * obscuring the new data underneath. We can solve this by transferring the + * partial cache lines separately, and allowing the ARM to copy into the + * cached area. + */ +static struct vchiq_pagelist_info * +create_pagelist(struct vchiq_instance *instance, struct vchiq_bulk *bulk) +{ + struct vchiq_drv_mgmt *drv_mgmt; + struct pagelist *pagelist; + struct vchiq_pagelist_info *pagelistinfo; + struct page **pages; + u32 *addrs; + unsigned int num_pages, offset, i, k; + int actual_pages; + size_t pagelist_size; + struct scatterlist *scatterlist, *sg; + int dma_buffers; + unsigned int cache_line_size; + dma_addr_t dma_addr; + size_t count = bulk->size; + unsigned short type = (bulk->dir == VCHIQ_BULK_RECEIVE) + ? PAGELIST_READ : PAGELIST_WRITE; + + if (count >= INT_MAX - PAGE_SIZE) + return NULL; + + drv_mgmt = dev_get_drvdata(instance->state->dev); + + if (bulk->offset) + offset = (uintptr_t)bulk->offset & (PAGE_SIZE - 1); + else + offset = (uintptr_t)bulk->uoffset & (PAGE_SIZE - 1); + num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE); + + if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) - + sizeof(struct vchiq_pagelist_info)) / + (sizeof(u32) + sizeof(pages[0]) + + sizeof(struct scatterlist))) + return NULL; + + pagelist_size = sizeof(struct pagelist) + + (num_pages * sizeof(u32)) + + (num_pages * sizeof(pages[0]) + + (num_pages * sizeof(struct scatterlist))) + + sizeof(struct vchiq_pagelist_info); + + /* Allocate enough storage to hold the page pointers and the page + * list + */ + pagelist = dma_alloc_coherent(instance->state->dev, pagelist_size, &dma_addr, + GFP_KERNEL); + + dev_dbg(instance->state->dev, "arm: %p\n", pagelist); + + if (!pagelist) + return NULL; + + addrs = pagelist->addrs; + pages = (struct page **)(addrs + num_pages); + scatterlist = (struct scatterlist *)(pages + num_pages); + pagelistinfo = (struct vchiq_pagelist_info *) + (scatterlist + num_pages); + + pagelist->length = count; + pagelist->type = type; + pagelist->offset = offset; + + /* Populate the fields of the pagelistinfo structure */ + pagelistinfo->pagelist = pagelist; + pagelistinfo->pagelist_buffer_size = pagelist_size; + pagelistinfo->dma_addr = dma_addr; + pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE; + pagelistinfo->num_pages = num_pages; + pagelistinfo->pages_need_release = 0; + pagelistinfo->pages = pages; + pagelistinfo->scatterlist = scatterlist; + pagelistinfo->scatterlist_mapped = 0; + + if (bulk->offset) { + unsigned long length = count; + unsigned int off = offset; + + for (actual_pages = 0; actual_pages < num_pages; + actual_pages++) { + struct page *pg = + vmalloc_to_page(((unsigned int *)bulk->offset + + (actual_pages * PAGE_SIZE))); + size_t bytes = PAGE_SIZE - off; + + if (!pg) { + cleanup_pagelistinfo(instance, pagelistinfo); + return NULL; + } + + if (bytes > length) + bytes = length; + pages[actual_pages] = pg; + length -= bytes; + off = 0; + } + /* do not try and release vmalloc pages */ + } else { + actual_pages = + pin_user_pages_fast((unsigned long)bulk->uoffset & PAGE_MASK, num_pages, + type == PAGELIST_READ, pages); + + if (actual_pages != num_pages) { + dev_dbg(instance->state->dev, "arm: Only %d/%d pages locked\n", + actual_pages, num_pages); + + /* This is probably due to the process being killed */ + if (actual_pages > 0) + unpin_user_pages(pages, actual_pages); + cleanup_pagelistinfo(instance, pagelistinfo); + return NULL; + } + /* release user pages */ + pagelistinfo->pages_need_release = 1; + } + + /* + * Initialize the scatterlist so that the magic cookie + * is filled if debugging is enabled + */ + sg_init_table(scatterlist, num_pages); + /* Now set the pages for each scatterlist */ + for (i = 0; i < num_pages; i++) { + unsigned int len = PAGE_SIZE - offset; + + if (len > count) + len = count; + sg_set_page(scatterlist + i, pages[i], len, offset); + offset = 0; + count -= len; + } + + dma_buffers = dma_map_sg(instance->state->dev, + scatterlist, + num_pages, + pagelistinfo->dma_dir); + + if (dma_buffers == 0) { + cleanup_pagelistinfo(instance, pagelistinfo); + return NULL; + } + + pagelistinfo->scatterlist_mapped = 1; + + /* Combine adjacent blocks for performance */ + k = 0; + for_each_sg(scatterlist, sg, dma_buffers, i) { + unsigned int len = sg_dma_len(sg); + dma_addr_t addr = sg_dma_address(sg); + + /* Note: addrs is the address + page_count - 1 + * The firmware expects blocks after the first to be page- + * aligned and a multiple of the page size + */ + WARN_ON(len == 0); + WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK)); + WARN_ON(i && (addr & ~PAGE_MASK)); + if (is_adjacent_block(addrs, addr, k)) + addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT); + else + addrs[k++] = (addr & PAGE_MASK) | + (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1); + } + + /* Partial cache lines (fragments) require special measures */ + cache_line_size = drv_mgmt->info->cache_line_size; + if ((type == PAGELIST_READ) && + ((pagelist->offset & (cache_line_size - 1)) || + ((pagelist->offset + pagelist->length) & (cache_line_size - 1)))) { + char *fragments; + + if (down_interruptible(&drv_mgmt->free_fragments_sema)) { + cleanup_pagelistinfo(instance, pagelistinfo); + return NULL; + } + + WARN_ON(!drv_mgmt->free_fragments); + + down(&drv_mgmt->free_fragments_mutex); + fragments = drv_mgmt->free_fragments; + WARN_ON(!fragments); + drv_mgmt->free_fragments = *(char **)drv_mgmt->free_fragments; + up(&drv_mgmt->free_fragments_mutex); + pagelist->type = PAGELIST_READ_WITH_FRAGMENTS + + (fragments - drv_mgmt->fragments_base) / drv_mgmt->fragments_size; + } + + return pagelistinfo; +} + +static void +free_pagelist(struct vchiq_instance *instance, struct vchiq_pagelist_info *pagelistinfo, + int actual) +{ + struct vchiq_drv_mgmt *drv_mgmt; + struct pagelist *pagelist = pagelistinfo->pagelist; + struct page **pages = pagelistinfo->pages; + unsigned int num_pages = pagelistinfo->num_pages; + unsigned int cache_line_size; + + dev_dbg(instance->state->dev, "arm: %p, %d\n", pagelistinfo->pagelist, actual); + + drv_mgmt = dev_get_drvdata(instance->state->dev); + + /* + * NOTE: dma_unmap_sg must be called before the + * cpu can touch any of the data/pages. + */ + dma_unmap_sg(instance->state->dev, pagelistinfo->scatterlist, + pagelistinfo->num_pages, pagelistinfo->dma_dir); + pagelistinfo->scatterlist_mapped = 0; + + /* Deal with any partial cache lines (fragments) */ + cache_line_size = drv_mgmt->info->cache_line_size; + if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS && drv_mgmt->fragments_base) { + char *fragments = drv_mgmt->fragments_base + + (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) * + drv_mgmt->fragments_size; + int head_bytes, tail_bytes; + + head_bytes = (cache_line_size - pagelist->offset) & + (cache_line_size - 1); + tail_bytes = (pagelist->offset + actual) & + (cache_line_size - 1); + + if ((actual >= 0) && (head_bytes != 0)) { + if (head_bytes > actual) + head_bytes = actual; + + memcpy_to_page(pages[0], pagelist->offset, + fragments, head_bytes); + } + if ((actual >= 0) && (head_bytes < actual) && + (tail_bytes != 0)) + memcpy_to_page(pages[num_pages - 1], + (pagelist->offset + actual) & + (PAGE_SIZE - 1) & ~(cache_line_size - 1), + fragments + cache_line_size, + tail_bytes); + + down(&drv_mgmt->free_fragments_mutex); + *(char **)fragments = drv_mgmt->free_fragments; + drv_mgmt->free_fragments = fragments; + up(&drv_mgmt->free_fragments_mutex); + up(&drv_mgmt->free_fragments_sema); + } + + /* Need to mark all the pages dirty. */ + if (pagelist->type != PAGELIST_WRITE && + pagelistinfo->pages_need_release) { + unsigned int i; + + for (i = 0; i < num_pages; i++) + set_page_dirty(pages[i]); + } + + cleanup_pagelistinfo(instance, pagelistinfo); +} + +static int +vchiq_prepare_bulk_data(struct vchiq_instance *instance, struct vchiq_bulk *bulk) +{ + struct vchiq_pagelist_info *pagelistinfo; + + pagelistinfo = create_pagelist(instance, bulk); + + if (!pagelistinfo) + return -ENOMEM; + + bulk->dma_addr = pagelistinfo->dma_addr; + + /* + * Store the pagelistinfo address in remote_data, + * which isn't used by the slave. + */ + bulk->remote_data = pagelistinfo; + + return 0; +} + +static void +vchiq_complete_bulk(struct vchiq_instance *instance, struct vchiq_bulk *bulk) +{ + if (bulk && bulk->remote_data && bulk->actual) + free_pagelist(instance, (struct vchiq_pagelist_info *)bulk->remote_data, + bulk->actual); +} + +/* Called with the bulk_mutex held */ +static void +abort_outstanding_bulks(struct vchiq_service *service, + struct vchiq_bulk_queue *queue) +{ + int is_tx = (queue == &service->bulk_tx); + + dev_dbg(service->state->dev, + "core: %d: aob:%d %cx - li=%x ri=%x p=%x\n", + service->state->id, service->localport, + is_tx ? 't' : 'r', queue->local_insert, + queue->remote_insert, queue->process); + + WARN_ON((int)(queue->local_insert - queue->process) < 0); + WARN_ON((int)(queue->remote_insert - queue->process) < 0); + + while ((queue->process != queue->local_insert) || + (queue->process != queue->remote_insert)) { + struct vchiq_bulk *bulk = &queue->bulks[BULK_INDEX(queue->process)]; + + if (queue->process == queue->remote_insert) { + /* fabricate a matching dummy bulk */ + bulk->remote_data = NULL; + bulk->remote_size = 0; + queue->remote_insert++; + } + + if (queue->process != queue->local_insert) { + vchiq_complete_bulk(service->instance, bulk); + + dev_dbg(service->state->dev, + "core_msg: %s %p4cc d:%d ABORTED - tx len:%d, rx len:%d\n", + is_tx ? "Send Bulk to" : "Recv Bulk from", + &service->base.fourcc, + service->remoteport, bulk->size, bulk->remote_size); + } else { + /* fabricate a matching dummy bulk */ + bulk->dma_addr = 0; + bulk->size = 0; + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED; + bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT : + VCHIQ_BULK_RECEIVE; + queue->local_insert++; + } + + queue->process++; + } +} + +static int +parse_open(struct vchiq_state *state, struct vchiq_header *header) +{ + const struct vchiq_open_payload *payload; + struct vchiq_openack_payload ack_payload; + struct vchiq_service *service = NULL; + int msgid, size; + int openack_id; + unsigned int localport, remoteport, fourcc; + short version, version_min; + + msgid = header->msgid; + size = header->size; + localport = VCHIQ_MSG_DSTPORT(msgid); + remoteport = VCHIQ_MSG_SRCPORT(msgid); + if (size < sizeof(struct vchiq_open_payload)) + goto fail_open; + + payload = (struct vchiq_open_payload *)header->data; + fourcc = payload->fourcc; + dev_dbg(state->dev, "core: %d: prs OPEN@%p (%d->'%p4cc')\n", + state->id, header, localport, &fourcc); + + service = get_listening_service(state, fourcc); + if (!service) + goto fail_open; + + /* A matching service exists */ + version = payload->version; + version_min = payload->version_min; + + if ((service->version < version_min) || (version < service->version_min)) { + /* Version mismatch */ + dev_err(state->dev, "%d: service %d (%p4cc) version mismatch - local (%d, min %d) vs. remote (%d, min %d)", + state->id, service->localport, &fourcc, + service->version, service->version_min, version, version_min); + vchiq_service_put(service); + service = NULL; + goto fail_open; + } + service->peer_version = version; + + if (service->srvstate != VCHIQ_SRVSTATE_LISTENING) + goto done; + + ack_payload.version = service->version; + openack_id = MAKE_OPENACK(service->localport, remoteport); + + if (state->version_common < VCHIQ_VERSION_SYNCHRONOUS_MODE) + service->sync = 0; + + /* Acknowledge the OPEN */ + if (service->sync) { + if (queue_message_sync(state, NULL, openack_id, + memcpy_copy_callback, + &ack_payload, + sizeof(ack_payload)) == -EAGAIN) + goto bail_not_ready; + + /* The service is now open */ + set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC); + } else { + if (queue_message(state, NULL, openack_id, + memcpy_copy_callback, &ack_payload, + sizeof(ack_payload), 0) == -EINTR) + goto bail_not_ready; + + /* The service is now open */ + set_service_state(service, VCHIQ_SRVSTATE_OPEN); + } + +done: + /* Success - the message has been dealt with */ + vchiq_service_put(service); + return 1; + +fail_open: + /* No available service, or an invalid request - send a CLOSE */ + if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)), + NULL, NULL, 0, 0) == -EINTR) + goto bail_not_ready; + + return 1; + +bail_not_ready: + if (service) + vchiq_service_put(service); + + return 0; +} + +/** + * parse_message() - parses a single message from the rx slot + * @state: vchiq state struct + * @header: message header + * + * Context: Process context + * + * Return: + * * >= 0 - size of the parsed message payload (without header) + * * -EINVAL - fatal error occurred, bail out is required + */ +static int +parse_message(struct vchiq_state *state, struct vchiq_header *header) +{ + struct vchiq_service *service = NULL; + unsigned int localport, remoteport; + int msgid, size, type, ret = -EINVAL; + int svc_fourcc; + + DEBUG_INITIALISE(state->local); + + DEBUG_VALUE(PARSE_HEADER, (int)(long)header); + msgid = header->msgid; + DEBUG_VALUE(PARSE_MSGID, msgid); + size = header->size; + type = VCHIQ_MSG_TYPE(msgid); + localport = VCHIQ_MSG_DSTPORT(msgid); + remoteport = VCHIQ_MSG_SRCPORT(msgid); + + if (type != VCHIQ_MSG_DATA) + VCHIQ_STATS_INC(state, ctrl_rx_count); + + switch (type) { + case VCHIQ_MSG_OPENACK: + case VCHIQ_MSG_CLOSE: + case VCHIQ_MSG_DATA: + case VCHIQ_MSG_BULK_RX: + case VCHIQ_MSG_BULK_TX: + case VCHIQ_MSG_BULK_RX_DONE: + case VCHIQ_MSG_BULK_TX_DONE: + service = find_service_by_port(state, localport); + if ((!service || + ((service->remoteport != remoteport) && + (service->remoteport != VCHIQ_PORT_FREE))) && + (localport == 0) && + (type == VCHIQ_MSG_CLOSE)) { + /* + * This could be a CLOSE from a client which + * hadn't yet received the OPENACK - look for + * the connected service + */ + if (service) + vchiq_service_put(service); + service = get_connected_service(state, remoteport); + if (service) + dev_warn(state->dev, + "core: %d: prs %s@%p (%d->%d) - found connected service %d\n", + state->id, msg_type_str(type), header, + remoteport, localport, service->localport); + } + + if (!service) { + dev_err(state->dev, + "core: %d: prs %s@%p (%d->%d) - invalid/closed service %d\n", + state->id, msg_type_str(type), header, remoteport, + localport, localport); + goto skip_message; + } + break; + default: + break; + } + + svc_fourcc = service ? service->base.fourcc + : VCHIQ_MAKE_FOURCC('?', '?', '?', '?'); + + dev_dbg(state->dev, "core_msg: Rcvd Msg %s(%u) from %p4cc s:%d d:%d len:%d\n", + msg_type_str(type), type, &svc_fourcc, remoteport, localport, size); + if (size > 0) + vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size)); + + if (((unsigned long)header & VCHIQ_SLOT_MASK) + + calc_stride(size) > VCHIQ_SLOT_SIZE) { + dev_err(state->dev, "core: header %p (msgid %x) - size %x too big for slot\n", + header, (unsigned int)msgid, (unsigned int)size); + WARN(1, "oversized for slot\n"); + } + + switch (type) { + case VCHIQ_MSG_OPEN: + WARN_ON(VCHIQ_MSG_DSTPORT(msgid)); + if (!parse_open(state, header)) + goto bail_not_ready; + break; + case VCHIQ_MSG_OPENACK: + if (size >= sizeof(struct vchiq_openack_payload)) { + const struct vchiq_openack_payload *payload = + (struct vchiq_openack_payload *) + header->data; + service->peer_version = payload->version; + } + dev_dbg(state->dev, + "core: %d: prs OPENACK@%p,%x (%d->%d) v:%d\n", + state->id, header, size, remoteport, localport, + service->peer_version); + if (service->srvstate == VCHIQ_SRVSTATE_OPENING) { + service->remoteport = remoteport; + set_service_state(service, VCHIQ_SRVSTATE_OPEN); + complete(&service->remove_event); + } else { + dev_err(state->dev, "core: OPENACK received in state %s\n", + srvstate_names[service->srvstate]); + } + break; + case VCHIQ_MSG_CLOSE: + WARN_ON(size); /* There should be no data */ + + dev_dbg(state->dev, "core: %d: prs CLOSE@%p (%d->%d)\n", + state->id, header, remoteport, localport); + + mark_service_closing_internal(service, 1); + + if (vchiq_close_service_internal(service, CLOSE_RECVD) == -EAGAIN) + goto bail_not_ready; + + dev_dbg(state->dev, "core: Close Service %p4cc s:%u d:%d\n", + &service->base.fourcc, service->localport, service->remoteport); + break; + case VCHIQ_MSG_DATA: + dev_dbg(state->dev, "core: %d: prs DATA@%p,%x (%d->%d)\n", + state->id, header, size, remoteport, localport); + + if ((service->remoteport == remoteport) && + (service->srvstate == VCHIQ_SRVSTATE_OPEN)) { + header->msgid = msgid | VCHIQ_MSGID_CLAIMED; + claim_slot(state->rx_info); + DEBUG_TRACE(PARSE_LINE); + if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header, + NULL) == -EAGAIN) { + DEBUG_TRACE(PARSE_LINE); + goto bail_not_ready; + } + VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count); + VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, size); + } else { + VCHIQ_STATS_INC(state, error_count); + } + break; + case VCHIQ_MSG_CONNECT: + dev_dbg(state->dev, "core: %d: prs CONNECT@%p\n", + state->id, header); + state->version_common = ((struct vchiq_slot_zero *) + state->slot_data)->version; + complete(&state->connect); + break; + case VCHIQ_MSG_BULK_RX: + case VCHIQ_MSG_BULK_TX: + /* + * We should never receive a bulk request from the + * other side since we're not setup to perform as the + * master. + */ + WARN_ON(1); + break; + case VCHIQ_MSG_BULK_RX_DONE: + case VCHIQ_MSG_BULK_TX_DONE: + if ((service->remoteport == remoteport) && + (service->srvstate != VCHIQ_SRVSTATE_FREE)) { + struct vchiq_bulk_queue *queue; + struct vchiq_bulk *bulk; + + queue = (type == VCHIQ_MSG_BULK_RX_DONE) ? + &service->bulk_rx : &service->bulk_tx; + + DEBUG_TRACE(PARSE_LINE); + if (mutex_lock_killable(&service->bulk_mutex)) { + DEBUG_TRACE(PARSE_LINE); + goto bail_not_ready; + } + if ((int)(queue->remote_insert - + queue->local_insert) >= 0) { + dev_err(state->dev, + "core: %d: prs %s@%p (%d->%d) unexpected (ri=%d,li=%d)\n", + state->id, msg_type_str(type), header, remoteport, + localport, queue->remote_insert, queue->local_insert); + mutex_unlock(&service->bulk_mutex); + break; + } + if (queue->process != queue->remote_insert) { + dev_err(state->dev, "%s: p %x != ri %x\n", + __func__, queue->process, + queue->remote_insert); + mutex_unlock(&service->bulk_mutex); + goto bail_not_ready; + } + + bulk = &queue->bulks[BULK_INDEX(queue->remote_insert)]; + bulk->actual = *(int *)header->data; + queue->remote_insert++; + + dev_dbg(state->dev, "core: %d: prs %s@%p (%d->%d) %x@%pad\n", + state->id, msg_type_str(type), header, remoteport, + localport, bulk->actual, &bulk->dma_addr); + + dev_dbg(state->dev, "core: %d: prs:%d %cx li=%x ri=%x p=%x\n", + state->id, localport, + (type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't', + queue->local_insert, queue->remote_insert, queue->process); + + DEBUG_TRACE(PARSE_LINE); + WARN_ON(queue->process == queue->local_insert); + vchiq_complete_bulk(service->instance, bulk); + queue->process++; + mutex_unlock(&service->bulk_mutex); + DEBUG_TRACE(PARSE_LINE); + notify_bulks(service, queue, RETRY_POLL); + DEBUG_TRACE(PARSE_LINE); + } + break; + case VCHIQ_MSG_PADDING: + dev_dbg(state->dev, "core: %d: prs PADDING@%p,%x\n", + state->id, header, size); + break; + case VCHIQ_MSG_PAUSE: + /* If initiated, signal the application thread */ + dev_dbg(state->dev, "core: %d: prs PAUSE@%p,%x\n", + state->id, header, size); + if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) { + dev_err(state->dev, "core: %d: PAUSE received in state PAUSED\n", + state->id); + break; + } + if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) { + /* Send a PAUSE in response */ + if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0, + QMFLAGS_NO_MUTEX_UNLOCK) == -EINTR) + goto bail_not_ready; + } + /* At this point slot_mutex is held */ + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED); + break; + case VCHIQ_MSG_RESUME: + dev_dbg(state->dev, "core: %d: prs RESUME@%p,%x\n", + state->id, header, size); + /* Release the slot mutex */ + mutex_unlock(&state->slot_mutex); + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); + break; + + case VCHIQ_MSG_REMOTE_USE: + vchiq_on_remote_use(state); + break; + case VCHIQ_MSG_REMOTE_RELEASE: + vchiq_on_remote_release(state); + break; + case VCHIQ_MSG_REMOTE_USE_ACTIVE: + break; + + default: + dev_err(state->dev, "core: %d: prs invalid msgid %x@%p,%x\n", + state->id, msgid, header, size); + WARN(1, "invalid message\n"); + break; + } + +skip_message: + ret = size; + +bail_not_ready: + if (service) + vchiq_service_put(service); + + return ret; +} + +/* Called by the slot handler thread */ +static void +parse_rx_slots(struct vchiq_state *state) +{ + struct vchiq_shared_state *remote = state->remote; + int tx_pos; + + DEBUG_INITIALISE(state->local); + + tx_pos = remote->tx_pos; + + while (state->rx_pos != tx_pos) { + struct vchiq_header *header; + int size; + + DEBUG_TRACE(PARSE_LINE); + if (!state->rx_data) { + int rx_index; + + WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK); + rx_index = remote->slot_queue[ + SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)]; + state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state, + rx_index); + state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index); + + /* + * Initialise use_count to one, and increment + * release_count at the end of the slot to avoid + * releasing the slot prematurely. + */ + state->rx_info->use_count = 1; + state->rx_info->release_count = 0; + } + + header = (struct vchiq_header *)(state->rx_data + + (state->rx_pos & VCHIQ_SLOT_MASK)); + size = parse_message(state, header); + if (size < 0) + return; + + state->rx_pos += calc_stride(size); + + DEBUG_TRACE(PARSE_LINE); + /* + * Perform some housekeeping when the end of the slot is + * reached. + */ + if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) { + /* Remove the extra reference count. */ + release_slot(state, state->rx_info, NULL, NULL); + state->rx_data = NULL; + } + } +} + +/** + * handle_poll() - handle service polling and other rare conditions + * @state: vchiq state struct + * + * Context: Process context + * + * Return: + * * 0 - poll handled successful + * * -EAGAIN - retry later + */ +static int +handle_poll(struct vchiq_state *state) +{ + switch (state->conn_state) { + case VCHIQ_CONNSTATE_CONNECTED: + /* Poll the services as requested */ + poll_services(state); + break; + + case VCHIQ_CONNSTATE_PAUSING: + if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0, + QMFLAGS_NO_MUTEX_UNLOCK) != -EINTR) { + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT); + } else { + /* Retry later */ + return -EAGAIN; + } + break; + + case VCHIQ_CONNSTATE_RESUMING: + if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0, + QMFLAGS_NO_MUTEX_LOCK) != -EINTR) { + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); + } else { + /* + * This should really be impossible, + * since the PAUSE should have flushed + * through outstanding messages. + */ + dev_err(state->dev, "core: Failed to send RESUME message\n"); + } + break; + default: + break; + } + + return 0; +} + +/* Called by the slot handler thread */ +static int +slot_handler_func(void *v) +{ + struct vchiq_state *state = v; + struct vchiq_shared_state *local = state->local; + int ret; + + DEBUG_INITIALISE(local); + + while (!kthread_should_stop()) { + DEBUG_COUNT(SLOT_HANDLER_COUNT); + DEBUG_TRACE(SLOT_HANDLER_LINE); + ret = remote_event_wait(&state->trigger_event, &local->trigger); + if (ret) + return ret; + + /* Ensure that reads don't overtake the remote_event_wait. */ + rmb(); + + DEBUG_TRACE(SLOT_HANDLER_LINE); + if (state->poll_needed) { + state->poll_needed = 0; + + /* + * Handle service polling and other rare conditions here + * out of the mainline code + */ + if (handle_poll(state) == -EAGAIN) + state->poll_needed = 1; + } + + DEBUG_TRACE(SLOT_HANDLER_LINE); + parse_rx_slots(state); + } + return 0; +} + +/* Called by the recycle thread */ +static int +recycle_func(void *v) +{ + struct vchiq_state *state = v; + struct vchiq_shared_state *local = state->local; + u32 *found; + size_t length; + int ret; + + length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES); + + found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found), + GFP_KERNEL); + if (!found) + return -ENOMEM; + + while (!kthread_should_stop()) { + ret = remote_event_wait(&state->recycle_event, &local->recycle); + if (ret) + return ret; + + process_free_queue(state, found, length); + } + return 0; +} + +/* Called by the sync thread */ +static int +sync_func(void *v) +{ + struct vchiq_state *state = v; + struct vchiq_shared_state *local = state->local; + struct vchiq_header *header = + (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, + state->remote->slot_sync); + int svc_fourcc; + int ret; + + while (!kthread_should_stop()) { + struct vchiq_service *service; + int msgid, size; + int type; + unsigned int localport, remoteport; + + ret = remote_event_wait(&state->sync_trigger_event, &local->sync_trigger); + if (ret) + return ret; + + /* Ensure that reads don't overtake the remote_event_wait. */ + rmb(); + + msgid = header->msgid; + size = header->size; + type = VCHIQ_MSG_TYPE(msgid); + localport = VCHIQ_MSG_DSTPORT(msgid); + remoteport = VCHIQ_MSG_SRCPORT(msgid); + + service = find_service_by_port(state, localport); + + if (!service) { + dev_err(state->dev, + "sync: %d: sf %s@%p (%d->%d) - invalid/closed service %d\n", + state->id, msg_type_str(type), header, remoteport, + localport, localport); + release_message_sync(state, header); + continue; + } + + svc_fourcc = service->base.fourcc; + + dev_dbg(state->dev, "sync: Rcvd Msg %s from %p4cc s:%d d:%d len:%d\n", + msg_type_str(type), &svc_fourcc, remoteport, localport, size); + if (size > 0) + vchiq_log_dump_mem(state->dev, "Rcvd", 0, header->data, min(16, size)); + + switch (type) { + case VCHIQ_MSG_OPENACK: + if (size >= sizeof(struct vchiq_openack_payload)) { + const struct vchiq_openack_payload *payload = + (struct vchiq_openack_payload *) + header->data; + service->peer_version = payload->version; + } + dev_err(state->dev, "sync: %d: sf OPENACK@%p,%x (%d->%d) v:%d\n", + state->id, header, size, remoteport, localport, + service->peer_version); + if (service->srvstate == VCHIQ_SRVSTATE_OPENING) { + service->remoteport = remoteport; + set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC); + service->sync = 1; + complete(&service->remove_event); + } + release_message_sync(state, header); + break; + + case VCHIQ_MSG_DATA: + dev_dbg(state->dev, "sync: %d: sf DATA@%p,%x (%d->%d)\n", + state->id, header, size, remoteport, localport); + + if ((service->remoteport == remoteport) && + (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) { + if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header, + NULL) == -EAGAIN) + dev_err(state->dev, + "sync: error: synchronous callback to service %d returns -EAGAIN\n", + localport); + } + break; + + default: + dev_err(state->dev, "sync: error: %d: sf unexpected msgid %x@%p,%x\n", + state->id, msgid, header, size); + release_message_sync(state, header); + break; + } + + vchiq_service_put(service); + } + + return 0; +} + +inline const char * +get_conn_state_name(enum vchiq_connstate conn_state) +{ + return conn_state_names[conn_state]; +} + +struct vchiq_slot_zero * +vchiq_init_slots(struct device *dev, void *mem_base, int mem_size) +{ + int mem_align = + (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK); + struct vchiq_slot_zero *slot_zero = + (struct vchiq_slot_zero *)(mem_base + mem_align); + int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE; + int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS; + + check_sizes(); + + /* Ensure there is enough memory to run an absolutely minimum system */ + num_slots -= first_data_slot; + + if (num_slots < 4) { + dev_err(dev, "core: %s: Insufficient memory %x bytes\n", + __func__, mem_size); + return NULL; + } + + memset(slot_zero, 0, sizeof(struct vchiq_slot_zero)); + + slot_zero->magic = VCHIQ_MAGIC; + slot_zero->version = VCHIQ_VERSION; + slot_zero->version_min = VCHIQ_VERSION_MIN; + slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero); + slot_zero->slot_size = VCHIQ_SLOT_SIZE; + slot_zero->max_slots = VCHIQ_MAX_SLOTS; + slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE; + + slot_zero->master.slot_sync = first_data_slot; + slot_zero->master.slot_first = first_data_slot + 1; + slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1; + slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2); + slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1; + slot_zero->slave.slot_last = first_data_slot + num_slots - 1; + + return slot_zero; +} + +int +vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev) +{ + struct vchiq_shared_state *local; + struct vchiq_shared_state *remote; + char threadname[16]; + int i, ret; + + local = &slot_zero->slave; + remote = &slot_zero->master; + + if (local->initialised) { + if (remote->initialised) + dev_err(dev, "local state has already been initialised\n"); + else + dev_err(dev, "master/slave mismatch two slaves\n"); + + return -EINVAL; + } + + memset(state, 0, sizeof(struct vchiq_state)); + + state->dev = dev; + + /* + * initialize shared state pointers + */ + + state->local = local; + state->remote = remote; + state->slot_data = (struct vchiq_slot *)slot_zero; + + /* + * initialize events and mutexes + */ + + init_completion(&state->connect); + mutex_init(&state->mutex); + mutex_init(&state->slot_mutex); + mutex_init(&state->recycle_mutex); + mutex_init(&state->sync_mutex); + + spin_lock_init(&state->msg_queue_spinlock); + spin_lock_init(&state->bulk_waiter_spinlock); + spin_lock_init(&state->quota_spinlock); + + init_completion(&state->slot_available_event); + init_completion(&state->data_quota_event); + + state->slot_queue_available = 0; + + for (i = 0; i < VCHIQ_MAX_SERVICES; i++) { + struct vchiq_service_quota *quota = &state->service_quotas[i]; + + init_completion("a->quota_event); + } + + for (i = local->slot_first; i <= local->slot_last; i++) { + local->slot_queue[state->slot_queue_available] = i; + state->slot_queue_available++; + complete(&state->slot_available_event); + } + + state->default_slot_quota = state->slot_queue_available / 2; + state->default_message_quota = + min_t(unsigned short, state->default_slot_quota * 256, ~0); + + state->previous_data_index = -1; + state->data_use_count = 0; + state->data_quota = state->slot_queue_available - 1; + + remote_event_create(&state->trigger_event, &local->trigger); + local->tx_pos = 0; + remote_event_create(&state->recycle_event, &local->recycle); + local->slot_queue_recycle = state->slot_queue_available; + remote_event_create(&state->sync_trigger_event, &local->sync_trigger); + remote_event_create(&state->sync_release_event, &local->sync_release); + + /* At start-of-day, the slot is empty and available */ + ((struct vchiq_header *) + SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid = + VCHIQ_MSGID_PADDING; + remote_event_signal_local(&state->sync_release_event, &local->sync_release); + + local->debug[DEBUG_ENTRIES] = DEBUG_MAX; + + ret = vchiq_platform_init_state(state); + if (ret) + return ret; + + /* + * bring up slot handler thread + */ + snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id); + state->slot_handler_thread = kthread_create(&slot_handler_func, (void *)state, threadname); + + if (IS_ERR(state->slot_handler_thread)) { + dev_err(state->dev, "couldn't create thread %s\n", threadname); + return PTR_ERR(state->slot_handler_thread); + } + set_user_nice(state->slot_handler_thread, -19); + + snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id); + state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname); + if (IS_ERR(state->recycle_thread)) { + dev_err(state->dev, "couldn't create thread %s\n", threadname); + ret = PTR_ERR(state->recycle_thread); + goto fail_free_handler_thread; + } + set_user_nice(state->recycle_thread, -19); + + snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id); + state->sync_thread = kthread_create(&sync_func, (void *)state, threadname); + if (IS_ERR(state->sync_thread)) { + dev_err(state->dev, "couldn't create thread %s\n", threadname); + ret = PTR_ERR(state->sync_thread); + goto fail_free_recycle_thread; + } + set_user_nice(state->sync_thread, -20); + + wake_up_process(state->slot_handler_thread); + wake_up_process(state->recycle_thread); + wake_up_process(state->sync_thread); + + /* Indicate readiness to the other side */ + local->initialised = 1; + + return 0; + +fail_free_recycle_thread: + kthread_stop(state->recycle_thread); +fail_free_handler_thread: + kthread_stop(state->slot_handler_thread); + + return ret; +} + +void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_header *header) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + int pos; + + if (!service) + return; + + while (service->msg_queue_write == service->msg_queue_read + + VCHIQ_MAX_SLOTS) { + if (wait_for_completion_interruptible(&service->msg_queue_pop)) + flush_signals(current); + } + + pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1); + service->msg_queue_write++; + service->msg_queue[pos] = header; + + complete(&service->msg_queue_push); +} +EXPORT_SYMBOL(vchiq_msg_queue_push); + +struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + struct vchiq_header *header; + int pos; + + if (!service) + return NULL; + + if (service->msg_queue_write == service->msg_queue_read) + return NULL; + + while (service->msg_queue_write == service->msg_queue_read) { + if (wait_for_completion_interruptible(&service->msg_queue_push)) + flush_signals(current); + } + + pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1); + service->msg_queue_read++; + header = service->msg_queue[pos]; + + complete(&service->msg_queue_pop); + + return header; +} +EXPORT_SYMBOL(vchiq_msg_hold); + +static int vchiq_validate_params(struct vchiq_state *state, + const struct vchiq_service_params_kernel *params) +{ + if (!params->callback || !params->fourcc) { + dev_err(state->dev, "Can't add service, invalid params\n"); + return -EINVAL; + } + + return 0; +} + +/* Called from application thread when a client or server service is created. */ +struct vchiq_service * +vchiq_add_service_internal(struct vchiq_state *state, + const struct vchiq_service_params_kernel *params, + int srvstate, struct vchiq_instance *instance, + void (*userdata_term)(void *userdata)) +{ + struct vchiq_service *service; + struct vchiq_service __rcu **pservice = NULL; + struct vchiq_service_quota *quota; + int ret; + int i; + + ret = vchiq_validate_params(state, params); + if (ret) + return NULL; + + service = kzalloc(sizeof(*service), GFP_KERNEL); + if (!service) + return service; + + service->base.fourcc = params->fourcc; + service->base.callback = params->callback; + service->base.userdata = params->userdata; + service->handle = VCHIQ_SERVICE_HANDLE_INVALID; + kref_init(&service->ref_count); + service->srvstate = VCHIQ_SRVSTATE_FREE; + service->userdata_term = userdata_term; + service->localport = VCHIQ_PORT_FREE; + service->remoteport = VCHIQ_PORT_FREE; + + service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ? + VCHIQ_FOURCC_INVALID : params->fourcc; + service->auto_close = 1; + atomic_set(&service->poll_flags, 0); + service->version = params->version; + service->version_min = params->version_min; + service->state = state; + service->instance = instance; + init_completion(&service->remove_event); + init_completion(&service->bulk_remove_event); + init_completion(&service->msg_queue_pop); + init_completion(&service->msg_queue_push); + mutex_init(&service->bulk_mutex); + + /* + * Although it is perfectly possible to use a spinlock + * to protect the creation of services, it is overkill as it + * disables interrupts while the array is searched. + * The only danger is of another thread trying to create a + * service - service deletion is safe. + * Therefore it is preferable to use state->mutex which, + * although slower to claim, doesn't block interrupts while + * it is held. + */ + + mutex_lock(&state->mutex); + + /* Prepare to use a previously unused service */ + if (state->unused_service < VCHIQ_MAX_SERVICES) + pservice = &state->services[state->unused_service]; + + if (srvstate == VCHIQ_SRVSTATE_OPENING) { + for (i = 0; i < state->unused_service; i++) { + if (!rcu_access_pointer(state->services[i])) { + pservice = &state->services[i]; + break; + } + } + } else { + rcu_read_lock(); + for (i = (state->unused_service - 1); i >= 0; i--) { + struct vchiq_service *srv; + + srv = rcu_dereference(state->services[i]); + if (!srv) { + pservice = &state->services[i]; + } else if ((srv->public_fourcc == params->fourcc) && + ((srv->instance != instance) || + (srv->base.callback != params->callback))) { + /* + * There is another server using this + * fourcc which doesn't match. + */ + pservice = NULL; + break; + } + } + rcu_read_unlock(); + } + + if (pservice) { + service->localport = (pservice - state->services); + if (!handle_seq) + handle_seq = VCHIQ_MAX_STATES * + VCHIQ_MAX_SERVICES; + service->handle = handle_seq | + (state->id * VCHIQ_MAX_SERVICES) | + service->localport; + handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES; + rcu_assign_pointer(*pservice, service); + if (pservice == &state->services[state->unused_service]) + state->unused_service++; + } + + mutex_unlock(&state->mutex); + + if (!pservice) { + kfree(service); + return NULL; + } + + quota = &state->service_quotas[service->localport]; + quota->slot_quota = state->default_slot_quota; + quota->message_quota = state->default_message_quota; + if (quota->slot_use_count == 0) + quota->previous_tx_index = + SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos) + - 1; + + /* Bring this service online */ + set_service_state(service, srvstate); + + dev_dbg(state->dev, "core_msg: %s Service %p4cc SrcPort:%d\n", + (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add", + ¶ms->fourcc, service->localport); + + /* Don't unlock the service - leave it with a ref_count of 1. */ + + return service; +} + +int +vchiq_open_service_internal(struct vchiq_service *service, int client_id) +{ + struct vchiq_open_payload payload = { + service->base.fourcc, + client_id, + service->version, + service->version_min + }; + int status = 0; + + service->client_id = client_id; + vchiq_use_service_internal(service); + status = queue_message(service->state, + NULL, MAKE_OPEN(service->localport), + memcpy_copy_callback, + &payload, + sizeof(payload), + QMFLAGS_IS_BLOCKING); + + if (status) + return status; + + /* Wait for the ACK/NAK */ + if (wait_for_completion_interruptible(&service->remove_event)) { + status = -EAGAIN; + vchiq_release_service_internal(service); + } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) && + (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) { + if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) + dev_err(service->state->dev, + "core: %d: osi - srvstate = %s (ref %u)\n", + service->state->id, srvstate_names[service->srvstate], + kref_read(&service->ref_count)); + status = -EINVAL; + VCHIQ_SERVICE_STATS_INC(service, error_count); + vchiq_release_service_internal(service); + } + + return status; +} + +static void +release_service_messages(struct vchiq_service *service) +{ + struct vchiq_state *state = service->state; + int slot_last = state->remote->slot_last; + int i; + + /* Release any claimed messages aimed at this service */ + + if (service->sync) { + struct vchiq_header *header = + (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state, + state->remote->slot_sync); + if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport) + release_message_sync(state, header); + + return; + } + + for (i = state->remote->slot_first; i <= slot_last; i++) { + struct vchiq_slot_info *slot_info = + SLOT_INFO_FROM_INDEX(state, i); + unsigned int pos, end; + char *data; + + if (slot_info->release_count == slot_info->use_count) + continue; + + data = (char *)SLOT_DATA_FROM_INDEX(state, i); + end = VCHIQ_SLOT_SIZE; + if (data == state->rx_data) + /* + * This buffer is still being read from - stop + * at the current read position + */ + end = state->rx_pos & VCHIQ_SLOT_MASK; + + pos = 0; + + while (pos < end) { + struct vchiq_header *header = + (struct vchiq_header *)(data + pos); + int msgid = header->msgid; + int port = VCHIQ_MSG_DSTPORT(msgid); + + if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) { + dev_dbg(state->dev, "core: fsi - hdr %p\n", header); + release_slot(state, slot_info, header, NULL); + } + pos += calc_stride(header->size); + if (pos > VCHIQ_SLOT_SIZE) { + dev_err(state->dev, + "core: fsi - pos %x: header %p, msgid %x, header->msgid %x, header->size %x\n", + pos, header, msgid, header->msgid, header->size); + WARN(1, "invalid slot position\n"); + } + } + } +} + +static int +do_abort_bulks(struct vchiq_service *service) +{ + int status; + + /* Abort any outstanding bulk transfers */ + if (mutex_lock_killable(&service->bulk_mutex)) + return 0; + abort_outstanding_bulks(service, &service->bulk_tx); + abort_outstanding_bulks(service, &service->bulk_rx); + mutex_unlock(&service->bulk_mutex); + + status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL); + if (status) + return 0; + + status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL); + return !status; +} + +static int +close_service_complete(struct vchiq_service *service, int failstate) +{ + int status; + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID); + int newstate; + + switch (service->srvstate) { + case VCHIQ_SRVSTATE_OPEN: + case VCHIQ_SRVSTATE_CLOSESENT: + case VCHIQ_SRVSTATE_CLOSERECVD: + if (is_server) { + if (service->auto_close) { + service->client_id = 0; + service->remoteport = VCHIQ_PORT_FREE; + newstate = VCHIQ_SRVSTATE_LISTENING; + } else { + newstate = VCHIQ_SRVSTATE_CLOSEWAIT; + } + } else { + newstate = VCHIQ_SRVSTATE_CLOSED; + } + set_service_state(service, newstate); + break; + case VCHIQ_SRVSTATE_LISTENING: + break; + default: + dev_err(service->state->dev, "core: (%x) called in state %s\n", + service->handle, srvstate_names[service->srvstate]); + WARN(1, "%s in unexpected state\n", __func__); + return -EINVAL; + } + + status = make_service_callback(service, VCHIQ_SERVICE_CLOSED, NULL, NULL); + + if (status != -EAGAIN) { + int uc = service->service_use_count; + int i; + /* Complete the close process */ + for (i = 0; i < uc; i++) + /* + * cater for cases where close is forced and the + * client may not close all it's handles + */ + vchiq_release_service_internal(service); + + service->client_id = 0; + service->remoteport = VCHIQ_PORT_FREE; + + if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) { + vchiq_free_service_internal(service); + } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) { + if (is_server) + service->closing = 0; + + complete(&service->remove_event); + } + } else { + set_service_state(service, failstate); + } + + return status; +} + +/* + * Prepares a bulk transfer to be queued. The function is interruptible and is + * intended to be called from user threads. It may return -EAGAIN to indicate + * that a signal has been received and the call should be retried after being + * returned to user context. + */ +static int +vchiq_bulk_xfer_queue_msg_killable(struct vchiq_service *service, + struct vchiq_bulk *bulk_params) +{ + struct vchiq_bulk_queue *queue; + struct bulk_waiter *bulk_waiter = NULL; + struct vchiq_bulk *bulk; + struct vchiq_state *state = service->state; + const char dir_char = (bulk_params->dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r'; + const int dir_msgtype = (bulk_params->dir == VCHIQ_BULK_TRANSMIT) ? + VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX; + int status = -EINVAL; + int payload[2]; + + if (bulk_params->mode == VCHIQ_BULK_MODE_BLOCKING) { + bulk_waiter = bulk_params->waiter; + init_completion(&bulk_waiter->event); + bulk_waiter->actual = 0; + bulk_waiter->bulk = NULL; + } + + queue = (bulk_params->dir == VCHIQ_BULK_TRANSMIT) ? + &service->bulk_tx : &service->bulk_rx; + + if (mutex_lock_killable(&service->bulk_mutex)) + return -EINTR; + + if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) { + VCHIQ_SERVICE_STATS_INC(service, bulk_stalls); + do { + mutex_unlock(&service->bulk_mutex); + if (wait_for_completion_killable(&service->bulk_remove_event)) + return -EINTR; + if (mutex_lock_killable(&service->bulk_mutex)) + return -EINTR; + } while (queue->local_insert == queue->remove + + VCHIQ_NUM_SERVICE_BULKS); + } + + bulk = &queue->bulks[BULK_INDEX(queue->local_insert)]; + + /* Initiliaze the 'bulk' slot with bulk parameters passed in. */ + bulk->mode = bulk_params->mode; + bulk->dir = bulk_params->dir; + bulk->waiter = bulk_params->waiter; + bulk->cb_data = bulk_params->cb_data; + bulk->cb_userdata = bulk_params->cb_userdata; + bulk->size = bulk_params->size; + bulk->offset = bulk_params->offset; + bulk->uoffset = bulk_params->uoffset; + bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED; + + if (vchiq_prepare_bulk_data(service->instance, bulk)) + goto unlock_error_exit; + + /* + * Ensure that the bulk data record is visible to the peer + * before proceeding. + */ + wmb(); + + dev_dbg(state->dev, "core: %d: bt (%d->%d) %cx %x@%pad %p\n", + state->id, service->localport, service->remoteport, + dir_char, bulk->size, &bulk->dma_addr, bulk->cb_data); + + /* + * The slot mutex must be held when the service is being closed, so + * claim it here to ensure that isn't happening + */ + if (mutex_lock_killable(&state->slot_mutex)) { + status = -EINTR; + goto cancel_bulk_error_exit; + } + + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) + goto unlock_both_error_exit; + + payload[0] = lower_32_bits(bulk->dma_addr); + payload[1] = bulk->size; + status = queue_message(state, + NULL, + VCHIQ_MAKE_MSG(dir_msgtype, + service->localport, + service->remoteport), + memcpy_copy_callback, + &payload, + sizeof(payload), + QMFLAGS_IS_BLOCKING | + QMFLAGS_NO_MUTEX_LOCK | + QMFLAGS_NO_MUTEX_UNLOCK); + if (status) + goto unlock_both_error_exit; + + queue->local_insert++; + + mutex_unlock(&state->slot_mutex); + mutex_unlock(&service->bulk_mutex); + + dev_dbg(state->dev, "core: %d: bt:%d %cx li=%x ri=%x p=%x\n", + state->id, service->localport, dir_char, queue->local_insert, + queue->remote_insert, queue->process); + + if (bulk_waiter) { + bulk_waiter->bulk = bulk; + if (wait_for_completion_killable(&bulk_waiter->event)) + status = -EINTR; + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED) + status = -EINVAL; + } + + return status; + +unlock_both_error_exit: + mutex_unlock(&state->slot_mutex); +cancel_bulk_error_exit: + vchiq_complete_bulk(service->instance, bulk); +unlock_error_exit: + mutex_unlock(&service->bulk_mutex); + + return status; +} + +/* Called by the slot handler */ +int +vchiq_close_service_internal(struct vchiq_service *service, int close_recvd) +{ + struct vchiq_state *state = service->state; + int status = 0; + int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID); + int close_id = MAKE_CLOSE(service->localport, + VCHIQ_MSG_DSTPORT(service->remoteport)); + + dev_dbg(state->dev, "core: %d: csi:%d,%d (%s)\n", + service->state->id, service->localport, close_recvd, + srvstate_names[service->srvstate]); + + switch (service->srvstate) { + case VCHIQ_SRVSTATE_CLOSED: + case VCHIQ_SRVSTATE_HIDDEN: + case VCHIQ_SRVSTATE_LISTENING: + case VCHIQ_SRVSTATE_CLOSEWAIT: + if (close_recvd) { + dev_err(state->dev, "core: (1) called in state %s\n", + srvstate_names[service->srvstate]); + break; + } else if (!is_server) { + vchiq_free_service_internal(service); + break; + } + + if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) { + status = -EINVAL; + } else { + service->client_id = 0; + service->remoteport = VCHIQ_PORT_FREE; + if (service->srvstate == VCHIQ_SRVSTATE_CLOSEWAIT) + set_service_state(service, VCHIQ_SRVSTATE_LISTENING); + } + complete(&service->remove_event); + break; + case VCHIQ_SRVSTATE_OPENING: + if (close_recvd) { + /* The open was rejected - tell the user */ + set_service_state(service, VCHIQ_SRVSTATE_CLOSEWAIT); + complete(&service->remove_event); + } else { + /* Shutdown mid-open - let the other side know */ + status = queue_message(state, service, close_id, NULL, NULL, 0, 0); + } + break; + + case VCHIQ_SRVSTATE_OPENSYNC: + mutex_lock(&state->sync_mutex); + fallthrough; + case VCHIQ_SRVSTATE_OPEN: + if (close_recvd) { + if (!do_abort_bulks(service)) + status = -EAGAIN; + } + + release_service_messages(service); + + if (!status) + status = queue_message(state, service, close_id, NULL, + NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK); + + if (status) { + if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) + mutex_unlock(&state->sync_mutex); + break; + } + + if (!close_recvd) { + /* Change the state while the mutex is still held */ + set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT); + mutex_unlock(&state->slot_mutex); + if (service->sync) + mutex_unlock(&state->sync_mutex); + break; + } + + /* Change the state while the mutex is still held */ + set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD); + mutex_unlock(&state->slot_mutex); + if (service->sync) + mutex_unlock(&state->sync_mutex); + + status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD); + break; + + case VCHIQ_SRVSTATE_CLOSESENT: + if (!close_recvd) + /* This happens when a process is killed mid-close */ + break; + + if (!do_abort_bulks(service)) { + status = -EAGAIN; + break; + } + + if (!status) + status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD); + break; + + case VCHIQ_SRVSTATE_CLOSERECVD: + if (!close_recvd && is_server) + /* Force into LISTENING mode */ + set_service_state(service, VCHIQ_SRVSTATE_LISTENING); + status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD); + break; + + default: + dev_err(state->dev, "core: (%d) called in state %s\n", + close_recvd, srvstate_names[service->srvstate]); + break; + } + + return status; +} + +/* Called from the application process upon process death */ +void +vchiq_terminate_service_internal(struct vchiq_service *service) +{ + struct vchiq_state *state = service->state; + + dev_dbg(state->dev, "core: %d: tsi - (%d<->%d)\n", + state->id, service->localport, service->remoteport); + + mark_service_closing(service); + + /* Mark the service for removal by the slot handler */ + request_poll(state, service, VCHIQ_POLL_REMOVE); +} + +/* Called from the slot handler */ +void +vchiq_free_service_internal(struct vchiq_service *service) +{ + struct vchiq_state *state = service->state; + + dev_dbg(state->dev, "core: %d: fsi - (%d)\n", state->id, service->localport); + + switch (service->srvstate) { + case VCHIQ_SRVSTATE_OPENING: + case VCHIQ_SRVSTATE_CLOSED: + case VCHIQ_SRVSTATE_HIDDEN: + case VCHIQ_SRVSTATE_LISTENING: + case VCHIQ_SRVSTATE_CLOSEWAIT: + break; + default: + dev_err(state->dev, "core: %d: fsi - (%d) in state %s\n", + state->id, service->localport, srvstate_names[service->srvstate]); + return; + } + + set_service_state(service, VCHIQ_SRVSTATE_FREE); + + complete(&service->remove_event); + + /* Release the initial lock */ + vchiq_service_put(service); +} + +int +vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance) +{ + struct vchiq_service *service; + int status = 0; + int i; + + /* Find all services registered to this client and enable them. */ + i = 0; + while ((service = next_service_by_instance(state, instance, &i)) != NULL) { + if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN) + set_service_state(service, VCHIQ_SRVSTATE_LISTENING); + vchiq_service_put(service); + } + + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) { + status = queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 0, + QMFLAGS_IS_BLOCKING); + if (status) + return status; + + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING); + } + + if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) { + if (wait_for_completion_interruptible(&state->connect)) + return -EAGAIN; + + vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED); + complete(&state->connect); + } + + return status; +} + +void +vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance) +{ + struct vchiq_service *service; + int i; + + /* Find all services registered to this client and remove them. */ + i = 0; + while ((service = next_service_by_instance(state, instance, &i)) != NULL) { + (void)vchiq_remove_service(instance, service->handle); + vchiq_service_put(service); + } +} + +int +vchiq_close_service(struct vchiq_instance *instance, unsigned int handle) +{ + /* Unregister the service */ + struct vchiq_service *service = find_service_by_handle(instance, handle); + int status = 0; + + if (!service) + return -EINVAL; + + dev_dbg(service->state->dev, "core: %d: close_service:%d\n", + service->state->id, service->localport); + + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) || + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) || + (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) { + vchiq_service_put(service); + return -EINVAL; + } + + mark_service_closing(service); + + if (current == service->state->slot_handler_thread) { + status = vchiq_close_service_internal(service, NO_CLOSE_RECVD); + WARN_ON(status == -EAGAIN); + } else { + /* Mark the service for termination by the slot handler */ + request_poll(service->state, service, VCHIQ_POLL_TERMINATE); + } + + while (1) { + if (wait_for_completion_interruptible(&service->remove_event)) { + status = -EAGAIN; + break; + } + + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) || + (service->srvstate == VCHIQ_SRVSTATE_LISTENING) || + (service->srvstate == VCHIQ_SRVSTATE_OPEN)) + break; + + dev_warn(service->state->dev, + "core: %d: close_service:%d - waiting in state %s\n", + service->state->id, service->localport, + srvstate_names[service->srvstate]); + } + + if (!status && + (service->srvstate != VCHIQ_SRVSTATE_FREE) && + (service->srvstate != VCHIQ_SRVSTATE_LISTENING)) + status = -EINVAL; + + vchiq_service_put(service); + + return status; +} +EXPORT_SYMBOL(vchiq_close_service); + +int +vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle) +{ + /* Unregister the service */ + struct vchiq_service *service = find_service_by_handle(instance, handle); + int status = 0; + + if (!service) + return -EINVAL; + + dev_dbg(service->state->dev, "core: %d: remove_service:%d\n", + service->state->id, service->localport); + + if (service->srvstate == VCHIQ_SRVSTATE_FREE) { + vchiq_service_put(service); + return -EINVAL; + } + + mark_service_closing(service); + + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) || + (current == service->state->slot_handler_thread)) { + /* + * Make it look like a client, because it must be removed and + * not left in the LISTENING state. + */ + service->public_fourcc = VCHIQ_FOURCC_INVALID; + + status = vchiq_close_service_internal(service, NO_CLOSE_RECVD); + WARN_ON(status == -EAGAIN); + } else { + /* Mark the service for removal by the slot handler */ + request_poll(service->state, service, VCHIQ_POLL_REMOVE); + } + while (1) { + if (wait_for_completion_interruptible(&service->remove_event)) { + status = -EAGAIN; + break; + } + + if ((service->srvstate == VCHIQ_SRVSTATE_FREE) || + (service->srvstate == VCHIQ_SRVSTATE_OPEN)) + break; + + dev_warn(service->state->dev, + "core: %d: remove_service:%d - waiting in state %s\n", + service->state->id, service->localport, + srvstate_names[service->srvstate]); + } + + if (!status && (service->srvstate != VCHIQ_SRVSTATE_FREE)) + status = -EINVAL; + + vchiq_service_put(service); + + return status; +} + +int +vchiq_bulk_xfer_blocking(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_bulk *bulk_params) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + int status = -EINVAL; + + if (!service) + return -EINVAL; + + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) + goto error_exit; + + if (!bulk_params->offset && !bulk_params->uoffset) + goto error_exit; + + if (vchiq_check_service(service)) + goto error_exit; + + status = vchiq_bulk_xfer_queue_msg_killable(service, bulk_params); + +error_exit: + vchiq_service_put(service); + + return status; +} + +int +vchiq_bulk_xfer_callback(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_bulk *bulk_params) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + int status = -EINVAL; + + if (!service) + return -EINVAL; + + if (bulk_params->mode != VCHIQ_BULK_MODE_CALLBACK && + bulk_params->mode != VCHIQ_BULK_MODE_NOCALLBACK) + goto error_exit; + + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) + goto error_exit; + + if (!bulk_params->offset && !bulk_params->uoffset) + goto error_exit; + + if (vchiq_check_service(service)) + goto error_exit; + + status = vchiq_bulk_xfer_queue_msg_killable(service, bulk_params); + +error_exit: + vchiq_service_put(service); + + return status; +} + +/* + * This function is called by VCHIQ ioctl interface and is interruptible. + * It may receive -EAGAIN to indicate that a signal has been received + * and the call should be retried after being returned to user context. + */ +int +vchiq_bulk_xfer_waiting(struct vchiq_instance *instance, + unsigned int handle, struct bulk_waiter *waiter) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + struct bulk_waiter *bulk_waiter; + int status = -EINVAL; + + if (!service) + return -EINVAL; + + if (!waiter) + goto error_exit; + + if (service->srvstate != VCHIQ_SRVSTATE_OPEN) + goto error_exit; + + if (vchiq_check_service(service)) + goto error_exit; + + bulk_waiter = waiter; + + vchiq_service_put(service); + + status = 0; + + if (wait_for_completion_killable(&bulk_waiter->event)) + return -EINTR; + else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED) + return -EINVAL; + + return status; + +error_exit: + vchiq_service_put(service); + + return status; +} + +int +vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle, + ssize_t (*copy_callback)(void *context, void *dest, + size_t offset, size_t maxsize), + void *context, + size_t size) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + int status = -EINVAL; + int data_id; + + if (!service) + goto error_exit; + + if (vchiq_check_service(service)) + goto error_exit; + + if (!size) { + VCHIQ_SERVICE_STATS_INC(service, error_count); + goto error_exit; + } + + if (size > VCHIQ_MAX_MSG_SIZE) { + VCHIQ_SERVICE_STATS_INC(service, error_count); + goto error_exit; + } + + data_id = MAKE_DATA(service->localport, service->remoteport); + + switch (service->srvstate) { + case VCHIQ_SRVSTATE_OPEN: + status = queue_message(service->state, service, data_id, + copy_callback, context, size, + QMFLAGS_IS_BLOCKING); + break; + case VCHIQ_SRVSTATE_OPENSYNC: + status = queue_message_sync(service->state, service, data_id, + copy_callback, context, size); + break; + default: + status = -EINVAL; + break; + } + +error_exit: + if (service) + vchiq_service_put(service); + + return status; +} + +int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, void *data, + unsigned int size) +{ + return vchiq_queue_message(instance, handle, memcpy_copy_callback, + data, size); +} +EXPORT_SYMBOL(vchiq_queue_kernel_message); + +void +vchiq_release_message(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_header *header) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + struct vchiq_shared_state *remote; + struct vchiq_state *state; + int slot_index; + + if (!service) + return; + + state = service->state; + remote = state->remote; + + slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header); + + if ((slot_index >= remote->slot_first) && + (slot_index <= remote->slot_last)) { + int msgid = header->msgid; + + if (msgid & VCHIQ_MSGID_CLAIMED) { + struct vchiq_slot_info *slot_info = + SLOT_INFO_FROM_INDEX(state, slot_index); + + release_slot(state, slot_info, header, service); + } + } else if (slot_index == remote->slot_sync) { + release_message_sync(state, header); + } + + vchiq_service_put(service); +} +EXPORT_SYMBOL(vchiq_release_message); + +static void +release_message_sync(struct vchiq_state *state, struct vchiq_header *header) +{ + header->msgid = VCHIQ_MSGID_PADDING; + remote_event_signal(state, &state->remote->sync_release); +} + +int +vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle, short *peer_version) +{ + int status = -EINVAL; + struct vchiq_service *service = find_service_by_handle(instance, handle); + + if (!service) + goto exit; + + if (vchiq_check_service(service)) + goto exit; + + if (!peer_version) + goto exit; + + *peer_version = service->peer_version; + status = 0; + +exit: + if (service) + vchiq_service_put(service); + return status; +} +EXPORT_SYMBOL(vchiq_get_peer_version); + +void vchiq_get_config(struct vchiq_config *config) +{ + config->max_msg_size = VCHIQ_MAX_MSG_SIZE; + config->bulk_threshold = VCHIQ_MAX_MSG_SIZE; + config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS; + config->max_services = VCHIQ_MAX_SERVICES; + config->version = VCHIQ_VERSION; + config->version_min = VCHIQ_VERSION_MIN; +} + +int +vchiq_set_service_option(struct vchiq_instance *instance, unsigned int handle, + enum vchiq_service_option option, int value) +{ + struct vchiq_service *service = find_service_by_handle(instance, handle); + struct vchiq_service_quota *quota; + int ret = -EINVAL; + + if (!service) + return -EINVAL; + + switch (option) { + case VCHIQ_SERVICE_OPTION_AUTOCLOSE: + service->auto_close = value; + ret = 0; + break; + + case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: + quota = &service->state->service_quotas[service->localport]; + if (value == 0) + value = service->state->default_slot_quota; + if ((value >= quota->slot_use_count) && + (value < (unsigned short)~0)) { + quota->slot_quota = value; + if ((value >= quota->slot_use_count) && + (quota->message_quota >= quota->message_use_count)) + /* + * Signal the service that it may have + * dropped below its quota + */ + complete("a->quota_event); + ret = 0; + } + break; + + case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: + quota = &service->state->service_quotas[service->localport]; + if (value == 0) + value = service->state->default_message_quota; + if ((value >= quota->message_use_count) && + (value < (unsigned short)~0)) { + quota->message_quota = value; + if ((value >= quota->message_use_count) && + (quota->slot_quota >= quota->slot_use_count)) + /* + * Signal the service that it may have + * dropped below its quota + */ + complete("a->quota_event); + ret = 0; + } + break; + + case VCHIQ_SERVICE_OPTION_SYNCHRONOUS: + if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) || + (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) { + service->sync = value; + ret = 0; + } + break; + + case VCHIQ_SERVICE_OPTION_TRACE: + service->trace = value; + ret = 0; + break; + + default: + break; + } + vchiq_service_put(service); + + return ret; +} + +static void +vchiq_dump_shared_state(struct seq_file *f, struct vchiq_state *state, + struct vchiq_shared_state *shared, const char *label) +{ + static const char *const debug_names[] = { + "<entries>", + "SLOT_HANDLER_COUNT", + "SLOT_HANDLER_LINE", + "PARSE_LINE", + "PARSE_HEADER", + "PARSE_MSGID", + "AWAIT_COMPLETION_LINE", + "DEQUEUE_MESSAGE_LINE", + "SERVICE_CALLBACK_LINE", + "MSG_QUEUE_FULL_COUNT", + "COMPLETION_QUEUE_FULL_COUNT" + }; + int i; + + seq_printf(f, " %s: slots %d-%d tx_pos=0x%x recycle=0x%x\n", + label, shared->slot_first, shared->slot_last, + shared->tx_pos, shared->slot_queue_recycle); + + seq_puts(f, " Slots claimed:\n"); + + for (i = shared->slot_first; i <= shared->slot_last; i++) { + struct vchiq_slot_info slot_info = + *SLOT_INFO_FROM_INDEX(state, i); + if (slot_info.use_count != slot_info.release_count) { + seq_printf(f, " %d: %d/%d\n", i, slot_info.use_count, + slot_info.release_count); + } + } + + for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) { + seq_printf(f, " DEBUG: %s = %d(0x%x)\n", + debug_names[i], shared->debug[i], shared->debug[i]); + } +} + +static void +vchiq_dump_service_state(struct seq_file *f, struct vchiq_service *service) +{ + unsigned int ref_count; + + /*Don't include the lock just taken*/ + ref_count = kref_read(&service->ref_count) - 1; + seq_printf(f, "Service %u: %s (ref %u)", service->localport, + srvstate_names[service->srvstate], ref_count); + + if (service->srvstate != VCHIQ_SRVSTATE_FREE) { + char remoteport[30]; + struct vchiq_service_quota *quota = + &service->state->service_quotas[service->localport]; + int fourcc = service->base.fourcc; + int tx_pending, rx_pending, tx_size = 0, rx_size = 0; + + if (service->remoteport != VCHIQ_PORT_FREE) { + int len2 = scnprintf(remoteport, sizeof(remoteport), + "%u", service->remoteport); + + if (service->public_fourcc != VCHIQ_FOURCC_INVALID) + scnprintf(remoteport + len2, sizeof(remoteport) - len2, + " (client 0x%x)", service->client_id); + } else { + strscpy(remoteport, "n/a", sizeof(remoteport)); + } + + seq_printf(f, " '%p4cc' remote %s (msg use %d/%d, slot use %d/%d)\n", + &fourcc, remoteport, + quota->message_use_count, quota->message_quota, + quota->slot_use_count, quota->slot_quota); + + tx_pending = service->bulk_tx.local_insert - + service->bulk_tx.remote_insert; + if (tx_pending) { + unsigned int i = BULK_INDEX(service->bulk_tx.remove); + + tx_size = service->bulk_tx.bulks[i].size; + } + + rx_pending = service->bulk_rx.local_insert - + service->bulk_rx.remote_insert; + if (rx_pending) { + unsigned int i = BULK_INDEX(service->bulk_rx.remove); + + rx_size = service->bulk_rx.bulks[i].size; + } + + seq_printf(f, " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)\n", + tx_pending, tx_size, rx_pending, rx_size); + + if (VCHIQ_ENABLE_STATS) { + seq_printf(f, " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n", + service->stats.ctrl_tx_count, + service->stats.ctrl_tx_bytes, + service->stats.ctrl_rx_count, + service->stats.ctrl_rx_bytes); + + seq_printf(f, " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu\n", + service->stats.bulk_tx_count, + service->stats.bulk_tx_bytes, + service->stats.bulk_rx_count, + service->stats.bulk_rx_bytes); + + seq_printf(f, " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors\n", + service->stats.quota_stalls, + service->stats.slot_stalls, + service->stats.bulk_stalls, + service->stats.bulk_aborted_count, + service->stats.error_count); + } + } + + vchiq_dump_platform_service_state(f, service); +} + +void vchiq_dump_state(struct seq_file *f, struct vchiq_state *state) +{ + int i; + + seq_printf(f, "State %d: %s\n", state->id, + conn_state_names[state->conn_state]); + + seq_printf(f, " tx_pos=0x%x(@%pK), rx_pos=0x%x(@%pK)\n", + state->local->tx_pos, + state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK), + state->rx_pos, + state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK)); + + seq_printf(f, " Version: %d (min %d)\n", VCHIQ_VERSION, + VCHIQ_VERSION_MIN); + + if (VCHIQ_ENABLE_STATS) { + seq_printf(f, " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d\n", + state->stats.ctrl_tx_count, state->stats.ctrl_rx_count, + state->stats.error_count); + } + + seq_printf(f, " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)\n", + ((state->slot_queue_available * VCHIQ_SLOT_SIZE) - + state->local_tx_pos) / VCHIQ_SLOT_SIZE, + state->data_quota - state->data_use_count, + state->local->slot_queue_recycle - state->slot_queue_available, + state->stats.slot_stalls, state->stats.data_stalls); + + vchiq_dump_platform_state(f); + + vchiq_dump_shared_state(f, state, state->local, "Local"); + + vchiq_dump_shared_state(f, state, state->remote, "Remote"); + + vchiq_dump_platform_instances(state, f); + + for (i = 0; i < state->unused_service; i++) { + struct vchiq_service *service = find_service_by_port(state, i); + + if (service) { + vchiq_dump_service_state(f, service); + vchiq_service_put(service); + } + } +} + +int vchiq_send_remote_use(struct vchiq_state *state) +{ + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) + return -ENOTCONN; + + return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0); +} + +int vchiq_send_remote_use_active(struct vchiq_state *state) +{ + if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) + return -ENOTCONN; + + return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE, + NULL, NULL, 0, 0); +} + +void vchiq_log_dump_mem(struct device *dev, const char *label, u32 addr, + const void *void_mem, size_t num_bytes) +{ + const u8 *mem = void_mem; + size_t offset; + char line_buf[100]; + char *s; + + while (num_bytes > 0) { + s = line_buf; + + for (offset = 0; offset < 16; offset++) { + if (offset < num_bytes) + s += scnprintf(s, 4, "%02x ", mem[offset]); + else + s += scnprintf(s, 4, " "); + } + + for (offset = 0; offset < 16; offset++) { + if (offset < num_bytes) { + u8 ch = mem[offset]; + + if ((ch < ' ') || (ch > '~')) + ch = '.'; + *s++ = (char)ch; + } + } + *s++ = '\0'; + + dev_dbg(dev, "core: %s: %08x: %s\n", label, addr, line_buf); + + addr += 16; + mem += 16; + if (num_bytes > 16) + num_bytes -= 16; + else + num_bytes = 0; + } +} diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c new file mode 100644 index 000000000000..c82326a9b6d9 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_debugfs.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. + * Copyright (c) 2010-2012 Broadcom. All rights reserved. + */ + +#include <linux/debugfs.h> +#include <linux/raspberrypi/vchiq_core.h> +#include <linux/raspberrypi/vchiq_arm.h> +#include <linux/raspberrypi/vchiq_debugfs.h> + +#ifdef CONFIG_DEBUG_FS + +#define DEBUGFS_WRITE_BUF_SIZE 256 + +/* Global 'vchiq' debugfs and clients entry used by all instances */ +static struct dentry *vchiq_dbg_dir; +static struct dentry *vchiq_dbg_clients; + +static int debugfs_usecount_show(struct seq_file *f, void *offset) +{ + struct vchiq_instance *instance = f->private; + int use_count; + + use_count = vchiq_instance_get_use_count(instance); + seq_printf(f, "%d\n", use_count); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(debugfs_usecount); + +static int debugfs_trace_show(struct seq_file *f, void *offset) +{ + struct vchiq_instance *instance = f->private; + int trace; + + trace = vchiq_instance_get_trace(instance); + seq_printf(f, "%s\n", trace ? "Y" : "N"); + + return 0; +} + +static int vchiq_dump_show(struct seq_file *f, void *offset) +{ + struct vchiq_state *state = f->private; + + vchiq_dump_state(f, state); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(vchiq_dump); + +static int debugfs_trace_open(struct inode *inode, struct file *file) +{ + return single_open(file, debugfs_trace_show, inode->i_private); +} + +static ssize_t debugfs_trace_write(struct file *file, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct seq_file *f = (struct seq_file *)file->private_data; + struct vchiq_instance *instance = f->private; + char firstchar; + + if (copy_from_user(&firstchar, buffer, 1)) + return -EFAULT; + + switch (firstchar) { + case 'Y': + case 'y': + case '1': + vchiq_instance_set_trace(instance, 1); + break; + case 'N': + case 'n': + case '0': + vchiq_instance_set_trace(instance, 0); + break; + default: + break; + } + + *ppos += count; + + return count; +} + +static const struct file_operations debugfs_trace_fops = { + .owner = THIS_MODULE, + .open = debugfs_trace_open, + .write = debugfs_trace_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* add an instance (process) to the debugfs entries */ +void vchiq_debugfs_add_instance(struct vchiq_instance *instance) +{ + char pidstr[16]; + struct dentry *top; + + snprintf(pidstr, sizeof(pidstr), "%d", + vchiq_instance_get_pid(instance)); + + top = debugfs_create_dir(pidstr, vchiq_dbg_clients); + + debugfs_create_file("use_count", 0444, top, instance, + &debugfs_usecount_fops); + debugfs_create_file("trace", 0644, top, instance, &debugfs_trace_fops); + + vchiq_instance_get_debugfs_node(instance)->dentry = top; +} + +void vchiq_debugfs_remove_instance(struct vchiq_instance *instance) +{ + struct vchiq_debugfs_node *node = + vchiq_instance_get_debugfs_node(instance); + + debugfs_remove_recursive(node->dentry); +} + +void vchiq_debugfs_init(struct vchiq_state *state) +{ + vchiq_dbg_dir = debugfs_create_dir("vchiq", NULL); + vchiq_dbg_clients = debugfs_create_dir("clients", vchiq_dbg_dir); + + debugfs_create_file("state", S_IFREG | 0444, vchiq_dbg_dir, state, + &vchiq_dump_fops); +} + +/* remove all the debugfs entries */ +void vchiq_debugfs_deinit(void) +{ + debugfs_remove_recursive(vchiq_dbg_dir); +} + +#else /* CONFIG_DEBUG_FS */ + +void vchiq_debugfs_init(struct vchiq_state *state) +{ +} + +void vchiq_debugfs_deinit(void) +{ +} + +void vchiq_debugfs_add_instance(struct vchiq_instance *instance) +{ +} + +void vchiq_debugfs_remove_instance(struct vchiq_instance *instance) +{ +} + +#endif /* CONFIG_DEBUG_FS */ diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c b/drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c new file mode 100644 index 000000000000..0f3dde2657d6 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_dev.c @@ -0,0 +1,1355 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved. + * Copyright (c) 2010-2012 Broadcom. All rights reserved. + */ + +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/compat.h> +#include <linux/miscdevice.h> + +#include <linux/raspberrypi/vchiq_core.h> +#include <linux/raspberrypi/vchiq_arm.h> +#include <linux/raspberrypi/vchiq_debugfs.h> + +#include "vchiq_ioctl.h" + +static const char *const ioctl_names[] = { + "CONNECT", + "SHUTDOWN", + "CREATE_SERVICE", + "REMOVE_SERVICE", + "QUEUE_MESSAGE", + "QUEUE_BULK_TRANSMIT", + "QUEUE_BULK_RECEIVE", + "AWAIT_COMPLETION", + "DEQUEUE_MESSAGE", + "GET_CLIENT_ID", + "GET_CONFIG", + "CLOSE_SERVICE", + "USE_SERVICE", + "RELEASE_SERVICE", + "SET_SERVICE_OPTION", + "DUMP_PHYS_MEM", + "LIB_VERSION", + "CLOSE_DELIVERED" +}; + +static_assert(ARRAY_SIZE(ioctl_names) == (VCHIQ_IOC_MAX + 1)); + +static void +user_service_free(void *userdata) +{ + kfree(userdata); +} + +static void close_delivered(struct user_service *user_service) +{ + dev_dbg(user_service->service->state->dev, + "arm: (handle=%x)\n", user_service->service->handle); + + if (user_service->close_pending) { + /* Allow the underlying service to be culled */ + vchiq_service_put(user_service->service); + + /* Wake the user-thread blocked in close_ or remove_service */ + complete(&user_service->close_event); + + user_service->close_pending = 0; + } +} + +struct vchiq_io_copy_callback_context { + struct vchiq_element *element; + size_t element_offset; + unsigned long elements_to_go; +}; + +static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest, + size_t offset, size_t maxsize) +{ + struct vchiq_io_copy_callback_context *cc = context; + size_t total_bytes_copied = 0; + size_t bytes_this_round; + + while (total_bytes_copied < maxsize) { + if (!cc->elements_to_go) + return total_bytes_copied; + + if (!cc->element->size) { + cc->elements_to_go--; + cc->element++; + cc->element_offset = 0; + continue; + } + + bytes_this_round = min(cc->element->size - cc->element_offset, + maxsize - total_bytes_copied); + + if (copy_from_user(dest + total_bytes_copied, + cc->element->data + cc->element_offset, + bytes_this_round)) + return -EFAULT; + + cc->element_offset += bytes_this_round; + total_bytes_copied += bytes_this_round; + + if (cc->element_offset == cc->element->size) { + cc->elements_to_go--; + cc->element++; + cc->element_offset = 0; + } + } + + return maxsize; +} + +static int +vchiq_ioc_queue_message(struct vchiq_instance *instance, unsigned int handle, + struct vchiq_element *elements, unsigned long count) +{ + struct vchiq_io_copy_callback_context context; + int status = 0; + unsigned long i; + size_t total_size = 0; + + context.element = elements; + context.element_offset = 0; + context.elements_to_go = count; + + for (i = 0; i < count; i++) { + if (!elements[i].data && elements[i].size != 0) + return -EFAULT; + + total_size += elements[i].size; + } + + status = vchiq_queue_message(instance, handle, vchiq_ioc_copy_element_data, + &context, total_size); + + if (status == -EINVAL) + return -EIO; + else if (status == -EAGAIN) + return -EINTR; + return 0; +} + +static int vchiq_ioc_create_service(struct vchiq_instance *instance, + struct vchiq_create_service *args) +{ + struct user_service *user_service = NULL; + struct vchiq_service *service; + int status = 0; + struct vchiq_service_params_kernel params; + int srvstate; + + if (args->is_open && !instance->connected) + return -ENOTCONN; + + user_service = kmalloc(sizeof(*user_service), GFP_KERNEL); + if (!user_service) + return -ENOMEM; + + if (args->is_open) { + srvstate = VCHIQ_SRVSTATE_OPENING; + } else { + srvstate = instance->connected ? + VCHIQ_SRVSTATE_LISTENING : VCHIQ_SRVSTATE_HIDDEN; + } + + params = (struct vchiq_service_params_kernel) { + .fourcc = args->params.fourcc, + .callback = service_callback, + .userdata = user_service, + .version = args->params.version, + .version_min = args->params.version_min, + }; + service = vchiq_add_service_internal(instance->state, ¶ms, + srvstate, instance, + user_service_free); + if (!service) { + kfree(user_service); + return -EEXIST; + } + + user_service->service = service; + user_service->userdata = args->params.userdata; + user_service->instance = instance; + user_service->is_vchi = (args->is_vchi != 0); + user_service->dequeue_pending = 0; + user_service->close_pending = 0; + user_service->message_available_pos = instance->completion_remove - 1; + user_service->msg_insert = 0; + user_service->msg_remove = 0; + init_completion(&user_service->insert_event); + init_completion(&user_service->remove_event); + init_completion(&user_service->close_event); + + if (args->is_open) { + status = vchiq_open_service_internal(service, instance->pid); + if (status) { + vchiq_remove_service(instance, service->handle); + return (status == -EAGAIN) ? + -EINTR : -EIO; + } + } + args->handle = service->handle; + + return 0; +} + +static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance, + struct vchiq_dequeue_message *args) +{ + struct user_service *user_service; + struct vchiq_service *service; + struct vchiq_header *header; + int ret; + + DEBUG_INITIALISE(instance->state->local); + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); + service = find_service_for_instance(instance, args->handle); + if (!service) + return -EINVAL; + + user_service = (struct user_service *)service->base.userdata; + if (user_service->is_vchi == 0) { + ret = -EINVAL; + goto out; + } + + spin_lock(&service->state->msg_queue_spinlock); + if (user_service->msg_remove == user_service->msg_insert) { + if (!args->blocking) { + spin_unlock(&service->state->msg_queue_spinlock); + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); + ret = -EWOULDBLOCK; + goto out; + } + user_service->dequeue_pending = 1; + ret = 0; + do { + spin_unlock(&service->state->msg_queue_spinlock); + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); + if (wait_for_completion_interruptible(&user_service->insert_event)) { + dev_dbg(service->state->dev, "arm: DEQUEUE_MESSAGE interrupted\n"); + ret = -EINTR; + break; + } + spin_lock(&service->state->msg_queue_spinlock); + } while (user_service->msg_remove == user_service->msg_insert); + + if (ret) + goto out; + } + + if (WARN_ON_ONCE((int)(user_service->msg_insert - + user_service->msg_remove) < 0)) { + spin_unlock(&service->state->msg_queue_spinlock); + ret = -EINVAL; + goto out; + } + + header = user_service->msg_queue[user_service->msg_remove & + (MSG_QUEUE_SIZE - 1)]; + user_service->msg_remove++; + spin_unlock(&service->state->msg_queue_spinlock); + + complete(&user_service->remove_event); + if (!header) { + ret = -ENOTCONN; + } else if (header->size <= args->bufsize) { + /* Copy to user space if msgbuf is not NULL */ + if (!args->buf || (copy_to_user(args->buf, header->data, header->size) == 0)) { + ret = header->size; + vchiq_release_message(instance, service->handle, header); + } else { + ret = -EFAULT; + } + } else { + dev_err(service->state->dev, + "arm: header %p: bufsize %x < size %x\n", + header, args->bufsize, header->size); + WARN(1, "invalid size\n"); + ret = -EMSGSIZE; + } + DEBUG_TRACE(DEQUEUE_MESSAGE_LINE); +out: + vchiq_service_put(service); + return ret; +} + +static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance, + struct vchiq_queue_bulk_transfer *args, + enum vchiq_bulk_dir dir, + enum vchiq_bulk_mode __user *mode) +{ + struct vchiq_service *service; + struct bulk_waiter_node *waiter = NULL, *iter; + struct vchiq_bulk bulk_params = {}; + int status = 0; + int ret; + + service = find_service_for_instance(instance, args->handle); + if (!service) + return -EINVAL; + + if (args->mode == VCHIQ_BULK_MODE_BLOCKING) { + waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); + if (!waiter) { + ret = -ENOMEM; + goto out; + } + + bulk_params.uoffset = args->data; + bulk_params.mode = args->mode; + bulk_params.size = args->size; + bulk_params.dir = dir; + bulk_params.waiter = &waiter->bulk_waiter; + + status = vchiq_bulk_xfer_blocking(instance, args->handle, + &bulk_params); + } else if (args->mode == VCHIQ_BULK_MODE_WAITING) { + mutex_lock(&instance->bulk_waiter_list_mutex); + list_for_each_entry(iter, &instance->bulk_waiter_list, + list) { + if (iter->pid == current->pid) { + list_del(&iter->list); + waiter = iter; + break; + } + } + mutex_unlock(&instance->bulk_waiter_list_mutex); + if (!waiter) { + dev_err(service->state->dev, + "arm: no bulk_waiter found for pid %d\n", current->pid); + ret = -ESRCH; + goto out; + } + dev_dbg(service->state->dev, "arm: found bulk_waiter %p for pid %d\n", + waiter, current->pid); + + status = vchiq_bulk_xfer_waiting(instance, args->handle, + &waiter->bulk_waiter); + } else { + bulk_params.uoffset = args->data; + bulk_params.mode = args->mode; + bulk_params.size = args->size; + bulk_params.dir = dir; + bulk_params.cb_userdata = args->userdata; + + status = vchiq_bulk_xfer_callback(instance, args->handle, + &bulk_params); + } + + if (!waiter) { + ret = 0; + goto out; + } + + if ((status != -EAGAIN) || fatal_signal_pending(current) || + !waiter->bulk_waiter.bulk) { + if (waiter->bulk_waiter.bulk) { + /* Cancel the signal when the transfer completes. */ + spin_lock(&service->state->bulk_waiter_spinlock); + waiter->bulk_waiter.bulk->waiter = NULL; + spin_unlock(&service->state->bulk_waiter_spinlock); + } + kfree(waiter); + ret = 0; + } else { + const enum vchiq_bulk_mode mode_waiting = + VCHIQ_BULK_MODE_WAITING; + waiter->pid = current->pid; + mutex_lock(&instance->bulk_waiter_list_mutex); + list_add(&waiter->list, &instance->bulk_waiter_list); + mutex_unlock(&instance->bulk_waiter_list_mutex); + dev_dbg(service->state->dev, "arm: saved bulk_waiter %p for pid %d\n", + waiter, current->pid); + + ret = put_user(mode_waiting, mode); + } +out: + vchiq_service_put(service); + if (ret) + return ret; + else if (status == -EINVAL) + return -EIO; + else if (status == -EAGAIN) + return -EINTR; + return 0; +} + +/* read a user pointer value from an array pointers in user space */ +static inline int vchiq_get_user_ptr(void __user **buf, void __user *ubuf, int index) +{ + int ret; + + if (in_compat_syscall()) { + compat_uptr_t ptr32; + compat_uptr_t __user *uptr = ubuf; + + ret = get_user(ptr32, uptr + index); + if (ret) + return ret; + + *buf = compat_ptr(ptr32); + } else { + uintptr_t ptr, __user *uptr = ubuf; + + ret = get_user(ptr, uptr + index); + + if (ret) + return ret; + + *buf = (void __user *)ptr; + } + + return 0; +} + +struct vchiq_completion_data32 { + enum vchiq_reason reason; + compat_uptr_t header; + compat_uptr_t service_userdata; + compat_uptr_t cb_data; +}; + +static int vchiq_put_completion(struct vchiq_completion_data __user *buf, + struct vchiq_completion_data *completion, + int index) +{ + struct vchiq_completion_data32 __user *buf32 = (void __user *)buf; + + if (in_compat_syscall()) { + struct vchiq_completion_data32 tmp = { + .reason = completion->reason, + .header = ptr_to_compat(completion->header), + .service_userdata = ptr_to_compat(completion->service_userdata), + .cb_data = ptr_to_compat(completion->cb_userdata), + }; + if (copy_to_user(&buf32[index], &tmp, sizeof(tmp))) + return -EFAULT; + } else { + if (copy_to_user(&buf[index], completion, sizeof(*completion))) + return -EFAULT; + } + + return 0; +} + +static int vchiq_ioc_await_completion(struct vchiq_instance *instance, + struct vchiq_await_completion *args, + int __user *msgbufcountp) +{ + int msgbufcount; + int remove; + int ret; + + DEBUG_INITIALISE(instance->state->local); + + DEBUG_TRACE(AWAIT_COMPLETION_LINE); + if (!instance->connected) + return -ENOTCONN; + + mutex_lock(&instance->completion_mutex); + + DEBUG_TRACE(AWAIT_COMPLETION_LINE); + while ((instance->completion_remove == instance->completion_insert) && !instance->closing) { + int rc; + + DEBUG_TRACE(AWAIT_COMPLETION_LINE); + mutex_unlock(&instance->completion_mutex); + rc = wait_for_completion_interruptible(&instance->insert_event); + mutex_lock(&instance->completion_mutex); + if (rc) { + DEBUG_TRACE(AWAIT_COMPLETION_LINE); + dev_dbg(instance->state->dev, "arm: AWAIT_COMPLETION interrupted\n"); + ret = -EINTR; + goto out; + } + } + DEBUG_TRACE(AWAIT_COMPLETION_LINE); + + msgbufcount = args->msgbufcount; + remove = instance->completion_remove; + + for (ret = 0; ret < args->count; ret++) { + struct vchiq_completion_data_kernel *completion; + struct vchiq_completion_data user_completion; + struct vchiq_service *service; + struct user_service *user_service; + struct vchiq_header *header; + + if (remove == instance->completion_insert) + break; + + completion = &instance->completions[remove & (MAX_COMPLETIONS - 1)]; + + /* + * A read memory barrier is needed to stop + * prefetch of a stale completion record + */ + rmb(); + + service = completion->service_userdata; + user_service = service->base.userdata; + + memset(&user_completion, 0, sizeof(user_completion)); + user_completion = (struct vchiq_completion_data) { + .reason = completion->reason, + .service_userdata = user_service->userdata, + }; + + header = completion->header; + if (header) { + void __user *msgbuf; + int msglen; + + msglen = header->size + sizeof(struct vchiq_header); + /* This must be a VCHIQ-style service */ + if (args->msgbufsize < msglen) { + dev_err(service->state->dev, + "arm: header %p: msgbufsize %x < msglen %x\n", + header, args->msgbufsize, msglen); + WARN(1, "invalid message size\n"); + if (ret == 0) + ret = -EMSGSIZE; + break; + } + if (msgbufcount <= 0) + /* Stall here for lack of a buffer for the message. */ + break; + /* Get the pointer from user space */ + msgbufcount--; + if (vchiq_get_user_ptr(&msgbuf, args->msgbufs, + msgbufcount)) { + if (ret == 0) + ret = -EFAULT; + break; + } + + /* Copy the message to user space */ + if (copy_to_user(msgbuf, header, msglen)) { + if (ret == 0) + ret = -EFAULT; + break; + } + + /* Now it has been copied, the message can be released. */ + vchiq_release_message(instance, service->handle, header); + + /* The completion must point to the msgbuf. */ + user_completion.header = msgbuf; + } + + if ((completion->reason == VCHIQ_SERVICE_CLOSED) && + !instance->use_close_delivered) + vchiq_service_put(service); + + user_completion.cb_userdata = completion->cb_userdata; + + if (vchiq_put_completion(args->buf, &user_completion, ret)) { + if (ret == 0) + ret = -EFAULT; + break; + } + + /* + * Ensure that the above copy has completed + * before advancing the remove pointer. + */ + mb(); + remove++; + instance->completion_remove = remove; + } + + if (msgbufcount != args->msgbufcount) { + if (put_user(msgbufcount, msgbufcountp)) + ret = -EFAULT; + } +out: + if (ret) + complete(&instance->remove_event); + mutex_unlock(&instance->completion_mutex); + DEBUG_TRACE(AWAIT_COMPLETION_LINE); + + return ret; +} + +static long +vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct vchiq_instance *instance = file->private_data; + int status = 0; + struct vchiq_service *service = NULL; + long ret = 0; + int i, rc; + + dev_dbg(instance->state->dev, "arm: instance %p, cmd %s, arg %lx\n", instance, + ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ? + ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg); + + switch (cmd) { + case VCHIQ_IOC_SHUTDOWN: + if (!instance->connected) + break; + + /* Remove all services */ + i = 0; + while ((service = next_service_by_instance(instance->state, + instance, &i))) { + status = vchiq_remove_service(instance, service->handle); + vchiq_service_put(service); + if (status) + break; + } + service = NULL; + + if (!status) { + /* Wake the completion thread and ask it to exit */ + instance->closing = 1; + complete(&instance->insert_event); + } + + break; + + case VCHIQ_IOC_CONNECT: + if (instance->connected) { + ret = -EINVAL; + break; + } + rc = mutex_lock_killable(&instance->state->mutex); + if (rc) { + dev_err(instance->state->dev, + "arm: vchiq: connect: could not lock mutex for state %d: %d\n", + instance->state->id, rc); + ret = -EINTR; + break; + } + status = vchiq_connect_internal(instance->state, instance); + mutex_unlock(&instance->state->mutex); + + if (!status) + instance->connected = 1; + else + dev_err(instance->state->dev, + "arm: vchiq: could not connect: %d\n", status); + break; + + case VCHIQ_IOC_CREATE_SERVICE: { + struct vchiq_create_service __user *argp; + struct vchiq_create_service args; + + argp = (void __user *)arg; + if (copy_from_user(&args, argp, sizeof(args))) { + ret = -EFAULT; + break; + } + + ret = vchiq_ioc_create_service(instance, &args); + if (ret < 0) + break; + + if (put_user(args.handle, &argp->handle)) { + vchiq_remove_service(instance, args.handle); + ret = -EFAULT; + } + } break; + + case VCHIQ_IOC_CLOSE_SERVICE: + case VCHIQ_IOC_REMOVE_SERVICE: { + unsigned int handle = (unsigned int)arg; + struct user_service *user_service; + + service = find_service_for_instance(instance, handle); + if (!service) { + ret = -EINVAL; + break; + } + + user_service = service->base.userdata; + + /* + * close_pending is false on first entry, and when the + * wait in vchiq_close_service has been interrupted. + */ + if (!user_service->close_pending) { + status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ? + vchiq_close_service(instance, service->handle) : + vchiq_remove_service(instance, service->handle); + if (status) + break; + } + + /* + * close_pending is true once the underlying service + * has been closed until the client library calls the + * CLOSE_DELIVERED ioctl, signalling close_event. + */ + if (user_service->close_pending && + wait_for_completion_interruptible(&user_service->close_event)) + status = -EAGAIN; + break; + } + + case VCHIQ_IOC_USE_SERVICE: + case VCHIQ_IOC_RELEASE_SERVICE: { + unsigned int handle = (unsigned int)arg; + + service = find_service_for_instance(instance, handle); + if (service) { + ret = (cmd == VCHIQ_IOC_USE_SERVICE) ? + vchiq_use_service_internal(service) : + vchiq_release_service_internal(service); + if (ret) { + dev_err(instance->state->dev, + "suspend: cmd %s returned error %ld for service %p4cc:%03d\n", + (cmd == VCHIQ_IOC_USE_SERVICE) ? + "VCHIQ_IOC_USE_SERVICE" : + "VCHIQ_IOC_RELEASE_SERVICE", + ret, &service->base.fourcc, + service->client_id); + } + } else { + ret = -EINVAL; + } + } break; + + case VCHIQ_IOC_QUEUE_MESSAGE: { + struct vchiq_queue_message args; + + if (copy_from_user(&args, (const void __user *)arg, + sizeof(args))) { + ret = -EFAULT; + break; + } + + service = find_service_for_instance(instance, args.handle); + + if (service && (args.count <= MAX_ELEMENTS)) { + /* Copy elements into kernel space */ + struct vchiq_element elements[MAX_ELEMENTS]; + + if (copy_from_user(elements, args.elements, + args.count * sizeof(struct vchiq_element)) == 0) + ret = vchiq_ioc_queue_message(instance, args.handle, elements, + args.count); + else + ret = -EFAULT; + } else { + ret = -EINVAL; + } + } break; + + case VCHIQ_IOC_QUEUE_BULK_TRANSMIT: + case VCHIQ_IOC_QUEUE_BULK_RECEIVE: { + struct vchiq_queue_bulk_transfer args; + struct vchiq_queue_bulk_transfer __user *argp; + + enum vchiq_bulk_dir dir = + (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ? + VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE; + + argp = (void __user *)arg; + if (copy_from_user(&args, argp, sizeof(args))) { + ret = -EFAULT; + break; + } + + ret = vchiq_irq_queue_bulk_tx_rx(instance, &args, + dir, &argp->mode); + } break; + + case VCHIQ_IOC_AWAIT_COMPLETION: { + struct vchiq_await_completion args; + struct vchiq_await_completion __user *argp; + + argp = (void __user *)arg; + if (copy_from_user(&args, argp, sizeof(args))) { + ret = -EFAULT; + break; + } + + ret = vchiq_ioc_await_completion(instance, &args, + &argp->msgbufcount); + } break; + + case VCHIQ_IOC_DEQUEUE_MESSAGE: { + struct vchiq_dequeue_message args; + + if (copy_from_user(&args, (const void __user *)arg, + sizeof(args))) { + ret = -EFAULT; + break; + } + + ret = vchiq_ioc_dequeue_message(instance, &args); + } break; + + case VCHIQ_IOC_GET_CLIENT_ID: { + unsigned int handle = (unsigned int)arg; + + ret = vchiq_get_client_id(instance, handle); + } break; + + case VCHIQ_IOC_GET_CONFIG: { + struct vchiq_get_config args; + struct vchiq_config config; + + if (copy_from_user(&args, (const void __user *)arg, + sizeof(args))) { + ret = -EFAULT; + break; + } + if (args.config_size > sizeof(config)) { + ret = -EINVAL; + break; + } + + vchiq_get_config(&config); + if (copy_to_user(args.pconfig, &config, args.config_size)) { + ret = -EFAULT; + break; + } + } break; + + case VCHIQ_IOC_SET_SERVICE_OPTION: { + struct vchiq_set_service_option args; + + if (copy_from_user(&args, (const void __user *)arg, + sizeof(args))) { + ret = -EFAULT; + break; + } + + service = find_service_for_instance(instance, args.handle); + if (!service) { + ret = -EINVAL; + break; + } + + ret = vchiq_set_service_option(instance, args.handle, args.option, + args.value); + } break; + + case VCHIQ_IOC_LIB_VERSION: { + unsigned int lib_version = (unsigned int)arg; + + if (lib_version < VCHIQ_VERSION_MIN) + ret = -EINVAL; + else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED) + instance->use_close_delivered = 1; + } break; + + case VCHIQ_IOC_CLOSE_DELIVERED: { + unsigned int handle = (unsigned int)arg; + + service = find_closed_service_for_instance(instance, handle); + if (service) { + struct user_service *user_service = + (struct user_service *)service->base.userdata; + close_delivered(user_service); + } else { + ret = -EINVAL; + } + } break; + + default: + ret = -ENOTTY; + break; + } + + if (service) + vchiq_service_put(service); + + if (ret == 0) { + if (status == -EINVAL) + ret = -EIO; + else if (status == -EAGAIN) + ret = -EINTR; + } + + if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK)) { + dev_dbg(instance->state->dev, + "arm: ioctl instance %p, cmd %s -> status %d, %ld\n", + instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ? + ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret); + } else { + dev_dbg(instance->state->dev, + "arm: ioctl instance %p, cmd %s -> status %d\n, %ld\n", + instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ? + ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret); + } + + return ret; +} + +#if defined(CONFIG_COMPAT) + +struct vchiq_service_params32 { + int fourcc; + compat_uptr_t callback; + compat_uptr_t userdata; + short version; /* Increment for non-trivial changes */ + short version_min; /* Update for incompatible changes */ +}; + +struct vchiq_create_service32 { + struct vchiq_service_params32 params; + int is_open; + int is_vchi; + unsigned int handle; /* OUT */ +}; + +#define VCHIQ_IOC_CREATE_SERVICE32 \ + _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service32) + +static long +vchiq_compat_ioctl_create_service(struct file *file, unsigned int cmd, + struct vchiq_create_service32 __user *ptrargs32) +{ + struct vchiq_create_service args; + struct vchiq_create_service32 args32; + struct vchiq_instance *instance = file->private_data; + long ret; + + if (copy_from_user(&args32, ptrargs32, sizeof(args32))) + return -EFAULT; + + args = (struct vchiq_create_service) { + .params = { + .fourcc = args32.params.fourcc, + .callback = compat_ptr(args32.params.callback), + .userdata = compat_ptr(args32.params.userdata), + .version = args32.params.version, + .version_min = args32.params.version_min, + }, + .is_open = args32.is_open, + .is_vchi = args32.is_vchi, + .handle = args32.handle, + }; + + ret = vchiq_ioc_create_service(instance, &args); + if (ret < 0) + return ret; + + if (put_user(args.handle, &ptrargs32->handle)) { + vchiq_remove_service(instance, args.handle); + return -EFAULT; + } + + return 0; +} + +struct vchiq_element32 { + compat_uptr_t data; + unsigned int size; +}; + +struct vchiq_queue_message32 { + unsigned int handle; + unsigned int count; + compat_uptr_t elements; +}; + +#define VCHIQ_IOC_QUEUE_MESSAGE32 \ + _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message32) + +static long +vchiq_compat_ioctl_queue_message(struct file *file, + unsigned int cmd, + struct vchiq_queue_message32 __user *arg) +{ + struct vchiq_queue_message args; + struct vchiq_queue_message32 args32; + struct vchiq_service *service; + struct vchiq_instance *instance = file->private_data; + int ret; + + if (copy_from_user(&args32, arg, sizeof(args32))) + return -EFAULT; + + args = (struct vchiq_queue_message) { + .handle = args32.handle, + .count = args32.count, + .elements = compat_ptr(args32.elements), + }; + + if (args32.count > MAX_ELEMENTS) + return -EINVAL; + + service = find_service_for_instance(instance, args.handle); + if (!service) + return -EINVAL; + + if (args32.elements && args32.count) { + struct vchiq_element32 element32[MAX_ELEMENTS]; + struct vchiq_element elements[MAX_ELEMENTS]; + unsigned int count; + + if (copy_from_user(&element32, args.elements, + sizeof(element32))) { + vchiq_service_put(service); + return -EFAULT; + } + + for (count = 0; count < args32.count; count++) { + elements[count].data = + compat_ptr(element32[count].data); + elements[count].size = element32[count].size; + } + ret = vchiq_ioc_queue_message(instance, args.handle, elements, + args.count); + } else { + ret = -EINVAL; + } + vchiq_service_put(service); + + return ret; +} + +struct vchiq_queue_bulk_transfer32 { + unsigned int handle; + compat_uptr_t data; + unsigned int size; + compat_uptr_t userdata; + enum vchiq_bulk_mode mode; +}; + +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT32 \ + _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer32) +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE32 \ + _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer32) + +static long +vchiq_compat_ioctl_queue_bulk(struct file *file, + unsigned int cmd, + struct vchiq_queue_bulk_transfer32 __user *argp) +{ + struct vchiq_queue_bulk_transfer32 args32; + struct vchiq_queue_bulk_transfer args; + enum vchiq_bulk_dir dir = (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT32) ? + VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE; + + if (copy_from_user(&args32, argp, sizeof(args32))) + return -EFAULT; + + args = (struct vchiq_queue_bulk_transfer) { + .handle = args32.handle, + .data = compat_ptr(args32.data), + .size = args32.size, + .userdata = compat_ptr(args32.userdata), + .mode = args32.mode, + }; + + return vchiq_irq_queue_bulk_tx_rx(file->private_data, &args, + dir, &argp->mode); +} + +struct vchiq_await_completion32 { + unsigned int count; + compat_uptr_t buf; + unsigned int msgbufsize; + unsigned int msgbufcount; /* IN/OUT */ + compat_uptr_t msgbufs; +}; + +#define VCHIQ_IOC_AWAIT_COMPLETION32 \ + _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion32) + +static long +vchiq_compat_ioctl_await_completion(struct file *file, + unsigned int cmd, + struct vchiq_await_completion32 __user *argp) +{ + struct vchiq_await_completion args; + struct vchiq_await_completion32 args32; + + if (copy_from_user(&args32, argp, sizeof(args32))) + return -EFAULT; + + args = (struct vchiq_await_completion) { + .count = args32.count, + .buf = compat_ptr(args32.buf), + .msgbufsize = args32.msgbufsize, + .msgbufcount = args32.msgbufcount, + .msgbufs = compat_ptr(args32.msgbufs), + }; + + return vchiq_ioc_await_completion(file->private_data, &args, + &argp->msgbufcount); +} + +struct vchiq_dequeue_message32 { + unsigned int handle; + int blocking; + unsigned int bufsize; + compat_uptr_t buf; +}; + +#define VCHIQ_IOC_DEQUEUE_MESSAGE32 \ + _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message32) + +static long +vchiq_compat_ioctl_dequeue_message(struct file *file, + unsigned int cmd, + struct vchiq_dequeue_message32 __user *arg) +{ + struct vchiq_dequeue_message32 args32; + struct vchiq_dequeue_message args; + + if (copy_from_user(&args32, arg, sizeof(args32))) + return -EFAULT; + + args = (struct vchiq_dequeue_message) { + .handle = args32.handle, + .blocking = args32.blocking, + .bufsize = args32.bufsize, + .buf = compat_ptr(args32.buf), + }; + + return vchiq_ioc_dequeue_message(file->private_data, &args); +} + +struct vchiq_get_config32 { + unsigned int config_size; + compat_uptr_t pconfig; +}; + +#define VCHIQ_IOC_GET_CONFIG32 \ + _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config32) + +static long +vchiq_compat_ioctl_get_config(struct file *file, + unsigned int cmd, + struct vchiq_get_config32 __user *arg) +{ + struct vchiq_get_config32 args32; + struct vchiq_config config; + void __user *ptr; + + if (copy_from_user(&args32, arg, sizeof(args32))) + return -EFAULT; + if (args32.config_size > sizeof(config)) + return -EINVAL; + + vchiq_get_config(&config); + ptr = compat_ptr(args32.pconfig); + if (copy_to_user(ptr, &config, args32.config_size)) + return -EFAULT; + + return 0; +} + +static long +vchiq_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + void __user *argp = compat_ptr(arg); + + switch (cmd) { + case VCHIQ_IOC_CREATE_SERVICE32: + return vchiq_compat_ioctl_create_service(file, cmd, argp); + case VCHIQ_IOC_QUEUE_MESSAGE32: + return vchiq_compat_ioctl_queue_message(file, cmd, argp); + case VCHIQ_IOC_QUEUE_BULK_TRANSMIT32: + case VCHIQ_IOC_QUEUE_BULK_RECEIVE32: + return vchiq_compat_ioctl_queue_bulk(file, cmd, argp); + case VCHIQ_IOC_AWAIT_COMPLETION32: + return vchiq_compat_ioctl_await_completion(file, cmd, argp); + case VCHIQ_IOC_DEQUEUE_MESSAGE32: + return vchiq_compat_ioctl_dequeue_message(file, cmd, argp); + case VCHIQ_IOC_GET_CONFIG32: + return vchiq_compat_ioctl_get_config(file, cmd, argp); + default: + return vchiq_ioctl(file, cmd, (unsigned long)argp); + } +} + +#endif + +static int vchiq_open(struct inode *inode, struct file *file) +{ + struct miscdevice *vchiq_miscdev = file->private_data; + struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(vchiq_miscdev->parent); + struct vchiq_state *state = &mgmt->state; + struct vchiq_instance *instance; + + dev_dbg(state->dev, "arm: vchiq open\n"); + + if (!vchiq_remote_initialised(state)) { + dev_dbg(state->dev, "arm: vchiq has no connection to VideoCore\n"); + return -ENOTCONN; + } + + instance = kzalloc(sizeof(*instance), GFP_KERNEL); + if (!instance) + return -ENOMEM; + + instance->state = state; + instance->pid = current->tgid; + + vchiq_debugfs_add_instance(instance); + + init_completion(&instance->insert_event); + init_completion(&instance->remove_event); + mutex_init(&instance->completion_mutex); + mutex_init(&instance->bulk_waiter_list_mutex); + INIT_LIST_HEAD(&instance->bulk_waiter_list); + + file->private_data = instance; + + return 0; +} + +static int vchiq_release(struct inode *inode, struct file *file) +{ + struct vchiq_instance *instance = file->private_data; + struct vchiq_state *state = instance->state; + struct vchiq_service *service; + int ret = 0; + int i; + + dev_dbg(state->dev, "arm: instance=%p\n", instance); + + if (!vchiq_remote_initialised(state)) { + ret = -EPERM; + goto out; + } + + /* Ensure videocore is awake to allow termination. */ + vchiq_use_internal(instance->state, NULL, USE_TYPE_VCHIQ); + + mutex_lock(&instance->completion_mutex); + + /* Wake the completion thread and ask it to exit */ + instance->closing = 1; + complete(&instance->insert_event); + + mutex_unlock(&instance->completion_mutex); + + /* Wake the slot handler if the completion queue is full. */ + complete(&instance->remove_event); + + /* Mark all services for termination... */ + i = 0; + while ((service = next_service_by_instance(state, instance, &i))) { + struct user_service *user_service = service->base.userdata; + + /* Wake the slot handler if the msg queue is full. */ + complete(&user_service->remove_event); + + vchiq_terminate_service_internal(service); + vchiq_service_put(service); + } + + /* ...and wait for them to die */ + i = 0; + while ((service = next_service_by_instance(state, instance, &i))) { + struct user_service *user_service = service->base.userdata; + + wait_for_completion(&service->remove_event); + + if (WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE)) { + vchiq_service_put(service); + break; + } + + spin_lock(&service->state->msg_queue_spinlock); + + while (user_service->msg_remove != user_service->msg_insert) { + struct vchiq_header *header; + int m = user_service->msg_remove & (MSG_QUEUE_SIZE - 1); + + header = user_service->msg_queue[m]; + user_service->msg_remove++; + spin_unlock(&service->state->msg_queue_spinlock); + + if (header) + vchiq_release_message(instance, service->handle, header); + spin_lock(&service->state->msg_queue_spinlock); + } + + spin_unlock(&service->state->msg_queue_spinlock); + + vchiq_service_put(service); + } + + /* Release any closed services */ + while (instance->completion_remove != instance->completion_insert) { + struct vchiq_completion_data_kernel *completion; + struct vchiq_service *service; + + completion = &instance->completions[instance->completion_remove + & (MAX_COMPLETIONS - 1)]; + service = completion->service_userdata; + if (completion->reason == VCHIQ_SERVICE_CLOSED) { + struct user_service *user_service = + service->base.userdata; + + /* Wake any blocked user-thread */ + if (instance->use_close_delivered) + complete(&user_service->close_event); + vchiq_service_put(service); + } + instance->completion_remove++; + } + + /* Release the PEER service count. */ + vchiq_release_internal(instance->state, NULL); + + free_bulk_waiter(instance); + + vchiq_debugfs_remove_instance(instance); + + kfree(instance); + file->private_data = NULL; + +out: + return ret; +} + +static const struct file_operations +vchiq_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = vchiq_ioctl, +#if defined(CONFIG_COMPAT) + .compat_ioctl = vchiq_compat_ioctl, +#endif + .open = vchiq_open, + .release = vchiq_release, +}; + +static struct miscdevice vchiq_miscdev = { + .fops = &vchiq_fops, + .minor = MISC_DYNAMIC_MINOR, + .name = "vchiq", + +}; + +/** + * vchiq_register_chrdev - Register the char driver for vchiq + * and create the necessary class and + * device files in userspace. + * @parent: The parent of the char device. + * + * Returns 0 on success else returns the error code. + */ +int vchiq_register_chrdev(struct device *parent) +{ + vchiq_miscdev.parent = parent; + + return misc_register(&vchiq_miscdev); +} + +/** + * vchiq_deregister_chrdev - Deregister and cleanup the vchiq char + * driver and device files + */ +void vchiq_deregister_chrdev(void) +{ + misc_deregister(&vchiq_miscdev); +} diff --git a/drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h b/drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h new file mode 100644 index 000000000000..d0c759f6d8ea --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-interface/vchiq_ioctl.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright (c) 2010-2012 Broadcom. All rights reserved. */ + +#ifndef VCHIQ_IOCTLS_H +#define VCHIQ_IOCTLS_H + +#include <linux/ioctl.h> +#include <linux/raspberrypi/vchiq.h> + +#define VCHIQ_IOC_MAGIC 0xc4 +#define VCHIQ_INVALID_HANDLE (~0) + +struct vchiq_service_params { + int fourcc; + int __user (*callback)(enum vchiq_reason reason, + struct vchiq_header *header, + unsigned int handle, + void *bulk_userdata); + void __user *userdata; + short version; /* Increment for non-trivial changes */ + short version_min; /* Update for incompatible changes */ +}; + +struct vchiq_create_service { + struct vchiq_service_params params; + int is_open; + int is_vchi; + unsigned int handle; /* OUT */ +}; + +struct vchiq_queue_message { + unsigned int handle; + unsigned int count; + const struct vchiq_element __user *elements; +}; + +struct vchiq_queue_bulk_transfer { + unsigned int handle; + void __user *data; + unsigned int size; + void __user *userdata; + enum vchiq_bulk_mode mode; +}; + +struct vchiq_completion_data { + enum vchiq_reason reason; + struct vchiq_header __user *header; + void __user *service_userdata; + void __user *cb_userdata; +}; + +struct vchiq_await_completion { + unsigned int count; + struct vchiq_completion_data __user *buf; + unsigned int msgbufsize; + unsigned int msgbufcount; /* IN/OUT */ + void * __user *msgbufs; +}; + +struct vchiq_dequeue_message { + unsigned int handle; + int blocking; + unsigned int bufsize; + void __user *buf; +}; + +struct vchiq_get_config { + unsigned int config_size; + struct vchiq_config __user *pconfig; +}; + +struct vchiq_set_service_option { + unsigned int handle; + enum vchiq_service_option option; + int value; +}; + +struct vchiq_dump_mem { + void __user *virt_addr; + size_t num_bytes; +}; + +#define VCHIQ_IOC_CONNECT _IO(VCHIQ_IOC_MAGIC, 0) +#define VCHIQ_IOC_SHUTDOWN _IO(VCHIQ_IOC_MAGIC, 1) +#define VCHIQ_IOC_CREATE_SERVICE \ + _IOWR(VCHIQ_IOC_MAGIC, 2, struct vchiq_create_service) +#define VCHIQ_IOC_REMOVE_SERVICE _IO(VCHIQ_IOC_MAGIC, 3) +#define VCHIQ_IOC_QUEUE_MESSAGE \ + _IOW(VCHIQ_IOC_MAGIC, 4, struct vchiq_queue_message) +#define VCHIQ_IOC_QUEUE_BULK_TRANSMIT \ + _IOWR(VCHIQ_IOC_MAGIC, 5, struct vchiq_queue_bulk_transfer) +#define VCHIQ_IOC_QUEUE_BULK_RECEIVE \ + _IOWR(VCHIQ_IOC_MAGIC, 6, struct vchiq_queue_bulk_transfer) +#define VCHIQ_IOC_AWAIT_COMPLETION \ + _IOWR(VCHIQ_IOC_MAGIC, 7, struct vchiq_await_completion) +#define VCHIQ_IOC_DEQUEUE_MESSAGE \ + _IOWR(VCHIQ_IOC_MAGIC, 8, struct vchiq_dequeue_message) +#define VCHIQ_IOC_GET_CLIENT_ID _IO(VCHIQ_IOC_MAGIC, 9) +#define VCHIQ_IOC_GET_CONFIG \ + _IOWR(VCHIQ_IOC_MAGIC, 10, struct vchiq_get_config) +#define VCHIQ_IOC_CLOSE_SERVICE _IO(VCHIQ_IOC_MAGIC, 11) +#define VCHIQ_IOC_USE_SERVICE _IO(VCHIQ_IOC_MAGIC, 12) +#define VCHIQ_IOC_RELEASE_SERVICE _IO(VCHIQ_IOC_MAGIC, 13) +#define VCHIQ_IOC_SET_SERVICE_OPTION \ + _IOW(VCHIQ_IOC_MAGIC, 14, struct vchiq_set_service_option) +#define VCHIQ_IOC_DUMP_PHYS_MEM \ + _IOW(VCHIQ_IOC_MAGIC, 15, struct vchiq_dump_mem) +#define VCHIQ_IOC_LIB_VERSION _IO(VCHIQ_IOC_MAGIC, 16) +#define VCHIQ_IOC_CLOSE_DELIVERED _IO(VCHIQ_IOC_MAGIC, 17) +#define VCHIQ_IOC_MAX 17 + +#endif diff --git a/drivers/platform/raspberrypi/vchiq-mmal/Kconfig b/drivers/platform/raspberrypi/vchiq-mmal/Kconfig new file mode 100644 index 000000000000..c99525a0bb45 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-mmal/Kconfig @@ -0,0 +1,7 @@ +config BCM2835_VCHIQ_MMAL + tristate "BCM2835 MMAL VCHIQ service" + depends on BCM2835_VCHIQ + help + Enables the MMAL API over VCHIQ interface as used for the + majority of the multimedia services on VideoCore. + Defaults to Y when the Broadcomd BCM2835 camera host is selected. diff --git a/drivers/platform/raspberrypi/vchiq-mmal/Makefile b/drivers/platform/raspberrypi/vchiq-mmal/Makefile new file mode 100644 index 000000000000..6937f6534c26 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-mmal/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +bcm2835-mmal-vchiq-objs := mmal-vchiq.o + +obj-$(CONFIG_BCM2835_VCHIQ_MMAL) += bcm2835-mmal-vchiq.o diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-common.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-common.h new file mode 100644 index 000000000000..b33129403a30 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-common.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Broadcom BCM2835 V4L2 driver + * + * Copyright © 2013 Raspberry Pi (Trading) Ltd. + * + * Authors: Vincent Sanders @ Collabora + * Dave Stevenson @ Broadcom + * (now dave.stevenson@raspberrypi.org) + * Simon Mellor @ Broadcom + * Luke Diamand @ Broadcom + * + * MMAL structures + * + */ +#ifndef MMAL_COMMON_H +#define MMAL_COMMON_H + +#define MMAL_FOURCC(a, b, c, d) ((a) | (b << 8) | (c << 16) | (d << 24)) +#define MMAL_MAGIC MMAL_FOURCC('m', 'm', 'a', 'l') + +/** Special value signalling that time is not known */ +#define MMAL_TIME_UNKNOWN BIT_ULL(63) + +struct mmal_msg_context; + +/* mapping between v4l and mmal video modes */ +struct mmal_fmt { + u32 fourcc; /* v4l2 format id */ + int flags; /* v4l2 flags field */ + u32 mmal; + int depth; + u32 mmal_component; /* MMAL component index to be used to encode */ + u32 ybbp; /* depth of first Y plane for planar formats */ + bool remove_padding; /* Does the GPU have to remove padding, + * or can we do hide padding via bytesperline. + */ +}; + +/* buffer for one video frame */ +struct mmal_buffer { + /* v4l buffer data -- must be first */ + struct vb2_v4l2_buffer vb; + + /* list of buffers available */ + struct list_head list; + + void *buffer; /* buffer pointer */ + unsigned long buffer_size; /* size of allocated buffer */ + + struct mmal_msg_context *msg_context; + + unsigned long length; + u32 mmal_flags; + s64 dts; + s64 pts; +}; + +/* */ +struct mmal_colourfx { + s32 enable; + u32 u; + u32 v; +}; +#endif diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-encodings.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-encodings.h new file mode 100644 index 000000000000..e15ae7b24f73 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-encodings.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Broadcom BCM2835 V4L2 driver + * + * Copyright © 2013 Raspberry Pi (Trading) Ltd. + * + * Authors: Vincent Sanders @ Collabora + * Dave Stevenson @ Broadcom + * (now dave.stevenson@raspberrypi.org) + * Simon Mellor @ Broadcom + * Luke Diamand @ Broadcom + */ +#ifndef MMAL_ENCODINGS_H +#define MMAL_ENCODINGS_H + +#define MMAL_ENCODING_H264 MMAL_FOURCC('H', '2', '6', '4') +#define MMAL_ENCODING_H263 MMAL_FOURCC('H', '2', '6', '3') +#define MMAL_ENCODING_MP4V MMAL_FOURCC('M', 'P', '4', 'V') +#define MMAL_ENCODING_MP2V MMAL_FOURCC('M', 'P', '2', 'V') +#define MMAL_ENCODING_MP1V MMAL_FOURCC('M', 'P', '1', 'V') +#define MMAL_ENCODING_WMV3 MMAL_FOURCC('W', 'M', 'V', '3') +#define MMAL_ENCODING_WMV2 MMAL_FOURCC('W', 'M', 'V', '2') +#define MMAL_ENCODING_WMV1 MMAL_FOURCC('W', 'M', 'V', '1') +#define MMAL_ENCODING_WVC1 MMAL_FOURCC('W', 'V', 'C', '1') +#define MMAL_ENCODING_VP8 MMAL_FOURCC('V', 'P', '8', ' ') +#define MMAL_ENCODING_VP7 MMAL_FOURCC('V', 'P', '7', ' ') +#define MMAL_ENCODING_VP6 MMAL_FOURCC('V', 'P', '6', ' ') +#define MMAL_ENCODING_THEORA MMAL_FOURCC('T', 'H', 'E', 'O') +#define MMAL_ENCODING_SPARK MMAL_FOURCC('S', 'P', 'R', 'K') +#define MMAL_ENCODING_MJPEG MMAL_FOURCC('M', 'J', 'P', 'G') + +#define MMAL_ENCODING_JPEG MMAL_FOURCC('J', 'P', 'E', 'G') +#define MMAL_ENCODING_GIF MMAL_FOURCC('G', 'I', 'F', ' ') +#define MMAL_ENCODING_PNG MMAL_FOURCC('P', 'N', 'G', ' ') +#define MMAL_ENCODING_PPM MMAL_FOURCC('P', 'P', 'M', ' ') +#define MMAL_ENCODING_TGA MMAL_FOURCC('T', 'G', 'A', ' ') +#define MMAL_ENCODING_BMP MMAL_FOURCC('B', 'M', 'P', ' ') + +#define MMAL_ENCODING_I420 MMAL_FOURCC('I', '4', '2', '0') +#define MMAL_ENCODING_I420_SLICE MMAL_FOURCC('S', '4', '2', '0') +#define MMAL_ENCODING_YV12 MMAL_FOURCC('Y', 'V', '1', '2') +#define MMAL_ENCODING_I422 MMAL_FOURCC('I', '4', '2', '2') +#define MMAL_ENCODING_I422_SLICE MMAL_FOURCC('S', '4', '2', '2') +#define MMAL_ENCODING_YUYV MMAL_FOURCC('Y', 'U', 'Y', 'V') +#define MMAL_ENCODING_YVYU MMAL_FOURCC('Y', 'V', 'Y', 'U') +#define MMAL_ENCODING_UYVY MMAL_FOURCC('U', 'Y', 'V', 'Y') +#define MMAL_ENCODING_VYUY MMAL_FOURCC('V', 'Y', 'U', 'Y') +#define MMAL_ENCODING_NV12 MMAL_FOURCC('N', 'V', '1', '2') +#define MMAL_ENCODING_NV21 MMAL_FOURCC('N', 'V', '2', '1') +#define MMAL_ENCODING_ARGB MMAL_FOURCC('A', 'R', 'G', 'B') +#define MMAL_ENCODING_RGBA MMAL_FOURCC('R', 'G', 'B', 'A') +#define MMAL_ENCODING_ABGR MMAL_FOURCC('A', 'B', 'G', 'R') +#define MMAL_ENCODING_BGRA MMAL_FOURCC('B', 'G', 'R', 'A') +#define MMAL_ENCODING_RGB16 MMAL_FOURCC('R', 'G', 'B', '2') +#define MMAL_ENCODING_RGB24 MMAL_FOURCC('R', 'G', 'B', '3') +#define MMAL_ENCODING_RGB32 MMAL_FOURCC('R', 'G', 'B', '4') +#define MMAL_ENCODING_BGR16 MMAL_FOURCC('B', 'G', 'R', '2') +#define MMAL_ENCODING_BGR24 MMAL_FOURCC('B', 'G', 'R', '3') +#define MMAL_ENCODING_BGR32 MMAL_FOURCC('B', 'G', 'R', '4') + +/** SAND Video (YUVUV128) format, native format understood by VideoCore. + * This format is *not* opaque - if requested you will receive full frames + * of YUV_UV video. + */ +#define MMAL_ENCODING_YUVUV128 MMAL_FOURCC('S', 'A', 'N', 'D') + +/** VideoCore opaque image format, image handles are returned to + * the host but not the actual image data. + */ +#define MMAL_ENCODING_OPAQUE MMAL_FOURCC('O', 'P', 'Q', 'V') + +/** An EGL image handle + */ +#define MMAL_ENCODING_EGL_IMAGE MMAL_FOURCC('E', 'G', 'L', 'I') + +/* }@ */ + +/** \name Pre-defined audio encodings */ +/* @{ */ +#define MMAL_ENCODING_PCM_UNSIGNED_BE MMAL_FOURCC('P', 'C', 'M', 'U') +#define MMAL_ENCODING_PCM_UNSIGNED_LE MMAL_FOURCC('p', 'c', 'm', 'u') +#define MMAL_ENCODING_PCM_SIGNED_BE MMAL_FOURCC('P', 'C', 'M', 'S') +#define MMAL_ENCODING_PCM_SIGNED_LE MMAL_FOURCC('p', 'c', 'm', 's') +#define MMAL_ENCODING_PCM_FLOAT_BE MMAL_FOURCC('P', 'C', 'M', 'F') +#define MMAL_ENCODING_PCM_FLOAT_LE MMAL_FOURCC('p', 'c', 'm', 'f') + +/* Pre-defined H264 encoding variants */ + +/** ISO 14496-10 Annex B byte stream format */ +#define MMAL_ENCODING_VARIANT_H264_DEFAULT 0 +/** ISO 14496-15 AVC stream format */ +#define MMAL_ENCODING_VARIANT_H264_AVC1 MMAL_FOURCC('A', 'V', 'C', '1') +/** Implicitly delineated NAL units without emulation prevention */ +#define MMAL_ENCODING_VARIANT_H264_RAW MMAL_FOURCC('R', 'A', 'W', ' ') + +/** \defgroup MmalColorSpace List of pre-defined video color spaces + * This defines a list of common color spaces. This list isn't exhaustive and + * is only provided as a convenience to avoid clients having to use FourCC + * codes directly. However components are allowed to define and use their own + * FourCC codes. + */ +/* @{ */ + +/** Unknown color space */ +#define MMAL_COLOR_SPACE_UNKNOWN 0 +/** ITU-R BT.601-5 [SDTV] */ +#define MMAL_COLOR_SPACE_ITUR_BT601 MMAL_FOURCC('Y', '6', '0', '1') +/** ITU-R BT.709-3 [HDTV] */ +#define MMAL_COLOR_SPACE_ITUR_BT709 MMAL_FOURCC('Y', '7', '0', '9') +/** JPEG JFIF */ +#define MMAL_COLOR_SPACE_JPEG_JFIF MMAL_FOURCC('Y', 'J', 'F', 'I') +/** Title 47 Code of Federal Regulations (2003) 73.682 (a) (20) */ +#define MMAL_COLOR_SPACE_FCC MMAL_FOURCC('Y', 'F', 'C', 'C') +/** Society of Motion Picture and Television Engineers 240M (1999) */ +#define MMAL_COLOR_SPACE_SMPTE240M MMAL_FOURCC('Y', '2', '4', '0') +/** ITU-R BT.470-2 System M */ +#define MMAL_COLOR_SPACE_BT470_2_M MMAL_FOURCC('Y', '_', '_', 'M') +/** ITU-R BT.470-2 System BG */ +#define MMAL_COLOR_SPACE_BT470_2_BG MMAL_FOURCC('Y', '_', 'B', 'G') +/** JPEG JFIF, but with 16..255 luma */ +#define MMAL_COLOR_SPACE_JFIF_Y16_255 MMAL_FOURCC('Y', 'Y', '1', '6') +/* @} MmalColorSpace List */ + +#endif /* MMAL_ENCODINGS_H */ diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-common.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-common.h new file mode 100644 index 000000000000..492d4c5dca08 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-common.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Broadcom BCM2835 V4L2 driver + * + * Copyright © 2013 Raspberry Pi (Trading) Ltd. + * + * Authors: Vincent Sanders @ Collabora + * Dave Stevenson @ Broadcom + * (now dave.stevenson@raspberrypi.org) + * Simon Mellor @ Broadcom + * Luke Diamand @ Broadcom + */ + +#ifndef MMAL_MSG_COMMON_H +#define MMAL_MSG_COMMON_H + +#include <linux/types.h> + +enum mmal_msg_status { + MMAL_MSG_STATUS_SUCCESS = 0, /**< Success */ + MMAL_MSG_STATUS_ENOMEM, /**< Out of memory */ + MMAL_MSG_STATUS_ENOSPC, /**< Out of resources other than memory */ + MMAL_MSG_STATUS_EINVAL, /**< Argument is invalid */ + MMAL_MSG_STATUS_ENOSYS, /**< Function not implemented */ + MMAL_MSG_STATUS_ENOENT, /**< No such file or directory */ + MMAL_MSG_STATUS_ENXIO, /**< No such device or address */ + MMAL_MSG_STATUS_EIO, /**< I/O error */ + MMAL_MSG_STATUS_ESPIPE, /**< Illegal seek */ + MMAL_MSG_STATUS_ECORRUPT, /**< Data is corrupt \attention */ + MMAL_MSG_STATUS_ENOTREADY, /**< Component is not ready */ + MMAL_MSG_STATUS_ECONFIG, /**< Component is not configured */ + MMAL_MSG_STATUS_EISCONN, /**< Port is already connected */ + MMAL_MSG_STATUS_ENOTCONN, /**< Port is disconnected */ + MMAL_MSG_STATUS_EAGAIN, /**< Resource temporarily unavailable. */ + MMAL_MSG_STATUS_EFAULT, /**< Bad address */ +}; + +struct mmal_rect { + s32 x; /**< x coordinate (from left) */ + s32 y; /**< y coordinate (from top) */ + s32 width; /**< width */ + s32 height; /**< height */ +}; + +#endif /* MMAL_MSG_COMMON_H */ diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-format.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-format.h new file mode 100644 index 000000000000..5569876d8c7d --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-format.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Broadcom BCM2835 V4L2 driver + * + * Copyright © 2013 Raspberry Pi (Trading) Ltd. + * + * Authors: Vincent Sanders @ Collabora + * Dave Stevenson @ Broadcom + * (now dave.stevenson@raspberrypi.org) + * Simon Mellor @ Broadcom + * Luke Diamand @ Broadcom + */ + +#ifndef MMAL_MSG_FORMAT_H +#define MMAL_MSG_FORMAT_H + +#include <linux/math.h> + +#include "mmal-msg-common.h" + +/* MMAL_ES_FORMAT_T */ + +struct mmal_audio_format { + u32 channels; /* Number of audio channels */ + u32 sample_rate; /* Sample rate */ + + u32 bits_per_sample; /* Bits per sample */ + u32 block_align; /* Size of a block of data */ +}; + +struct mmal_video_format { + u32 width; /* Width of frame in pixels */ + u32 height; /* Height of frame in rows of pixels */ + struct mmal_rect crop; /* Visible region of the frame */ + struct s32_fract frame_rate; /* Frame rate */ + struct s32_fract par; /* Pixel aspect ratio */ + + /* + * FourCC specifying the color space of the video stream. See the + * MmalColorSpace "pre-defined color spaces" for some examples. + */ + u32 color_space; +}; + +struct mmal_subpicture_format { + u32 x_offset; + u32 y_offset; +}; + +union mmal_es_specific_format { + struct mmal_audio_format audio; + struct mmal_video_format video; + struct mmal_subpicture_format subpicture; +}; + +/* Definition of an elementary stream format (MMAL_ES_FORMAT_T) */ +struct mmal_es_format_local { + u32 type; /* enum mmal_es_type */ + + u32 encoding; /* FourCC specifying encoding of the elementary + * stream. + */ + u32 encoding_variant; /* FourCC specifying the specific + * encoding variant of the elementary + * stream. + */ + + union mmal_es_specific_format *es; /* Type specific + * information for the + * elementary stream + */ + + u32 bitrate; /* Bitrate in bits per second */ + u32 flags; /* Flags describing properties of the elementary + * stream. + */ + + u32 extradata_size; /* Size of the codec specific data */ + u8 *extradata; /* Codec specific data */ +}; + +/* Remote definition of an elementary stream format (MMAL_ES_FORMAT_T) */ +struct mmal_es_format { + u32 type; /* enum mmal_es_type */ + + u32 encoding; /* FourCC specifying encoding of the elementary + * stream. + */ + u32 encoding_variant; /* FourCC specifying the specific + * encoding variant of the elementary + * stream. + */ + + u32 es; /* Type specific + * information for the + * elementary stream + */ + + u32 bitrate; /* Bitrate in bits per second */ + u32 flags; /* Flags describing properties of the elementary + * stream. + */ + + u32 extradata_size; /* Size of the codec specific data */ + u32 extradata; /* Codec specific data */ +}; + +#endif /* MMAL_MSG_FORMAT_H */ diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-port.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-port.h new file mode 100644 index 000000000000..6ee4c1ed7f19 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg-port.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Broadcom BCM2835 V4L2 driver + * + * Copyright © 2013 Raspberry Pi (Trading) Ltd. + * + * Authors: Vincent Sanders @ Collabora + * Dave Stevenson @ Broadcom + * (now dave.stevenson@raspberrypi.org) + * Simon Mellor @ Broadcom + * Luke Diamand @ Broadcom + */ + +/* MMAL_PORT_TYPE_T */ +enum mmal_port_type { + MMAL_PORT_TYPE_UNKNOWN = 0, /* Unknown port type */ + MMAL_PORT_TYPE_CONTROL, /* Control port */ + MMAL_PORT_TYPE_INPUT, /* Input port */ + MMAL_PORT_TYPE_OUTPUT, /* Output port */ + MMAL_PORT_TYPE_CLOCK, /* Clock port */ +}; + +/* The port is pass-through and doesn't need buffer headers allocated */ +#define MMAL_PORT_CAPABILITY_PASSTHROUGH 0x01 +/* + *The port wants to allocate the buffer payloads. + * This signals a preference that payload allocation should be done + * on this port for efficiency reasons. + */ +#define MMAL_PORT_CAPABILITY_ALLOCATION 0x02 +/* + * The port supports format change events. + * This applies to input ports and is used to let the client know + * whether the port supports being reconfigured via a format + * change event (i.e. without having to disable the port). + */ +#define MMAL_PORT_CAPABILITY_SUPPORTS_EVENT_FORMAT_CHANGE 0x04 + +/* + * mmal port structure (MMAL_PORT_T) + * + * most elements are informational only, the pointer values for + * interogation messages are generally provided as additional + * structures within the message. When used to set values only the + * buffer_num, buffer_size and userdata parameters are writable. + */ +struct mmal_port { + u32 priv; /* Private member used by the framework */ + u32 name; /* Port name. Used for debugging purposes (RO) */ + + u32 type; /* Type of the port (RO) enum mmal_port_type */ + u16 index; /* Index of the port in its type list (RO) */ + u16 index_all; /* Index of the port in the list of all ports (RO) */ + + u32 is_enabled; /* Indicates whether the port is enabled or not (RO) */ + u32 format; /* Format of the elementary stream */ + + u32 buffer_num_min; /* Minimum number of buffers the port + * requires (RO). This is set by the + * component. + */ + + u32 buffer_size_min; /* Minimum size of buffers the port + * requires (RO). This is set by the + * component. + */ + + u32 buffer_alignment_min;/* Minimum alignment requirement for + * the buffers (RO). A value of + * zero means no special alignment + * requirements. This is set by the + * component. + */ + + u32 buffer_num_recommended; /* Number of buffers the port + * recommends for optimal + * performance (RO). A value of + * zero means no special + * recommendation. This is set + * by the component. + */ + + u32 buffer_size_recommended; /* Size of buffers the port + * recommends for optimal + * performance (RO). A value of + * zero means no special + * recommendation. This is set + * by the component. + */ + + u32 buffer_num; /* Actual number of buffers the port will use. + * This is set by the client. + */ + + u32 buffer_size; /* Actual maximum size of the buffers that + * will be sent to the port. This is set by + * the client. + */ + + u32 component; /* Component this port belongs to (Read Only) */ + + u32 userdata; /* Field reserved for use by the client */ + + u32 capabilities; /* Flags describing the capabilities of a + * port (RO). Bitwise combination of \ref + * portcapabilities "Port capabilities" + * values. + */ +}; diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg.h new file mode 100644 index 000000000000..1889494425eb --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-msg.h @@ -0,0 +1,406 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Broadcom BCM2835 V4L2 driver + * + * Copyright © 2013 Raspberry Pi (Trading) Ltd. + * + * Authors: Vincent Sanders @ Collabora + * Dave Stevenson @ Broadcom + * (now dave.stevenson@raspberrypi.org) + * Simon Mellor @ Broadcom + * Luke Diamand @ Broadcom + */ + +/* + * all the data structures which serialise the MMAL protocol. note + * these are directly mapped onto the received message data. + * + * BEWARE: They seem to *assume* pointers are u32 and that there is no + * structure padding! + * + * NOTE: this implementation uses kernel types to ensure sizes. Rather + * than assigning values to enums to force their size the + * implementation uses fixed size types and not the enums (though the + * comments have the actual enum type + */ +#ifndef MMAL_MSG_H +#define MMAL_MSG_H + +#define VC_MMAL_VER 15 +#define VC_MMAL_MIN_VER 10 + +/* max total message size is 512 bytes */ +#define MMAL_MSG_MAX_SIZE 512 +/* with six 32bit header elements max payload is therefore 488 bytes */ +#define MMAL_MSG_MAX_PAYLOAD 488 + +#include "mmal-msg-common.h" +#include "mmal-msg-format.h" +#include "mmal-msg-port.h" +#include "mmal-vchiq.h" + +enum mmal_msg_type { + MMAL_MSG_TYPE_QUIT = 1, + MMAL_MSG_TYPE_SERVICE_CLOSED, + MMAL_MSG_TYPE_GET_VERSION, + MMAL_MSG_TYPE_COMPONENT_CREATE, + MMAL_MSG_TYPE_COMPONENT_DESTROY, /* 5 */ + MMAL_MSG_TYPE_COMPONENT_ENABLE, + MMAL_MSG_TYPE_COMPONENT_DISABLE, + MMAL_MSG_TYPE_PORT_INFO_GET, + MMAL_MSG_TYPE_PORT_INFO_SET, + MMAL_MSG_TYPE_PORT_ACTION, /* 10 */ + MMAL_MSG_TYPE_BUFFER_FROM_HOST, + MMAL_MSG_TYPE_BUFFER_TO_HOST, + MMAL_MSG_TYPE_GET_STATS, + MMAL_MSG_TYPE_PORT_PARAMETER_SET, + MMAL_MSG_TYPE_PORT_PARAMETER_GET, /* 15 */ + MMAL_MSG_TYPE_EVENT_TO_HOST, + MMAL_MSG_TYPE_GET_CORE_STATS_FOR_PORT, + MMAL_MSG_TYPE_OPAQUE_ALLOCATOR, + MMAL_MSG_TYPE_CONSUME_MEM, + MMAL_MSG_TYPE_LMK, /* 20 */ + MMAL_MSG_TYPE_OPAQUE_ALLOCATOR_DESC, + MMAL_MSG_TYPE_DRM_GET_LHS32, + MMAL_MSG_TYPE_DRM_GET_TIME, + MMAL_MSG_TYPE_BUFFER_FROM_HOST_ZEROLEN, + MMAL_MSG_TYPE_PORT_FLUSH, /* 25 */ + MMAL_MSG_TYPE_HOST_LOG, + MMAL_MSG_TYPE_MSG_LAST +}; + +/* port action request messages differ depending on the action type */ +enum mmal_msg_port_action_type { + MMAL_MSG_PORT_ACTION_TYPE_UNKNOWN = 0, /* Unknown action */ + MMAL_MSG_PORT_ACTION_TYPE_ENABLE, /* Enable a port */ + MMAL_MSG_PORT_ACTION_TYPE_DISABLE, /* Disable a port */ + MMAL_MSG_PORT_ACTION_TYPE_FLUSH, /* Flush a port */ + MMAL_MSG_PORT_ACTION_TYPE_CONNECT, /* Connect ports */ + MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT, /* Disconnect ports */ + MMAL_MSG_PORT_ACTION_TYPE_SET_REQUIREMENTS, /* Set buffer requirements*/ +}; + +struct mmal_msg_header { + u32 magic; + u32 type; /* enum mmal_msg_type */ + + /* Opaque handle to the control service */ + u32 control_service; + + u32 context; /* a u32 per message context */ + u32 status; /* The status of the vchiq operation */ + u32 padding; +}; + +/* Send from VC to host to report version */ +struct mmal_msg_version { + u32 flags; + u32 major; + u32 minor; + u32 minimum; +}; + +/* request to VC to create component */ +struct mmal_msg_component_create { + u32 client_component; /* component context */ + char name[128]; + u32 pid; /* For debug */ +}; + +/* reply from VC to component creation request */ +struct mmal_msg_component_create_reply { + u32 status; /* enum mmal_msg_status - how does this differ to + * the one in the header? + */ + u32 component_handle; /* VideoCore handle for component */ + u32 input_num; /* Number of input ports */ + u32 output_num; /* Number of output ports */ + u32 clock_num; /* Number of clock ports */ +}; + +/* request to VC to destroy a component */ +struct mmal_msg_component_destroy { + u32 component_handle; +}; + +struct mmal_msg_component_destroy_reply { + u32 status; /* The component destruction status */ +}; + +/* request and reply to VC to enable a component */ +struct mmal_msg_component_enable { + u32 component_handle; +}; + +struct mmal_msg_component_enable_reply { + u32 status; /* The component enable status */ +}; + +/* request and reply to VC to disable a component */ +struct mmal_msg_component_disable { + u32 component_handle; +}; + +struct mmal_msg_component_disable_reply { + u32 status; /* The component disable status */ +}; + +/* request to VC to get port information */ +struct mmal_msg_port_info_get { + u32 component_handle; /* component handle port is associated with */ + u32 port_type; /* enum mmal_msg_port_type */ + u32 index; /* port index to query */ +}; + +/* reply from VC to get port info request */ +struct mmal_msg_port_info_get_reply { + u32 status; /* enum mmal_msg_status */ + u32 component_handle; /* component handle port is associated with */ + u32 port_type; /* enum mmal_msg_port_type */ + u32 port_index; /* port indexed in query */ + s32 found; /* unused */ + u32 port_handle; /* Handle to use for this port */ + struct mmal_port port; + struct mmal_es_format format; /* elementary stream format */ + union mmal_es_specific_format es; /* es type specific data */ + u8 extradata[MMAL_FORMAT_EXTRADATA_MAX_SIZE]; /* es extra data */ +}; + +/* request to VC to set port information */ +struct mmal_msg_port_info_set { + u32 component_handle; + u32 port_type; /* enum mmal_msg_port_type */ + u32 port_index; /* port indexed in query */ + struct mmal_port port; + struct mmal_es_format format; + union mmal_es_specific_format es; + u8 extradata[MMAL_FORMAT_EXTRADATA_MAX_SIZE]; +}; + +/* reply from VC to port info set request */ +struct mmal_msg_port_info_set_reply { + u32 status; + u32 component_handle; /* component handle port is associated with */ + u32 port_type; /* enum mmal_msg_port_type */ + u32 index; /* port indexed in query */ + s32 found; /* unused */ + u32 port_handle; /* Handle to use for this port */ + struct mmal_port port; + struct mmal_es_format format; + union mmal_es_specific_format es; + u8 extradata[MMAL_FORMAT_EXTRADATA_MAX_SIZE]; +}; + +/* port action requests that take a mmal_port as a parameter */ +struct mmal_msg_port_action_port { + u32 component_handle; + u32 port_handle; + u32 action; /* enum mmal_msg_port_action_type */ + struct mmal_port port; +}; + +/* port action requests that take handles as a parameter */ +struct mmal_msg_port_action_handle { + u32 component_handle; + u32 port_handle; + u32 action; /* enum mmal_msg_port_action_type */ + u32 connect_component_handle; + u32 connect_port_handle; +}; + +struct mmal_msg_port_action_reply { + u32 status; /* The port action operation status */ +}; + +/* MMAL buffer transfer */ + +/* Size of space reserved in a buffer message for short messages. */ +#define MMAL_VC_SHORT_DATA 128 + +/* Signals that the current payload is the end of the stream of data */ +#define MMAL_BUFFER_HEADER_FLAG_EOS BIT(0) +/* Signals that the start of the current payload starts a frame */ +#define MMAL_BUFFER_HEADER_FLAG_FRAME_START BIT(1) +/* Signals that the end of the current payload ends a frame */ +#define MMAL_BUFFER_HEADER_FLAG_FRAME_END BIT(2) +/* Signals that the current payload contains only complete frames (>1) */ +#define MMAL_BUFFER_HEADER_FLAG_FRAME \ + (MMAL_BUFFER_HEADER_FLAG_FRAME_START | \ + MMAL_BUFFER_HEADER_FLAG_FRAME_END) +/* Signals that the current payload is a keyframe (i.e. self decodable) */ +#define MMAL_BUFFER_HEADER_FLAG_KEYFRAME BIT(3) +/* + * Signals a discontinuity in the stream of data (e.g. after a seek). + * Can be used for instance by a decoder to reset its state + */ +#define MMAL_BUFFER_HEADER_FLAG_DISCONTINUITY BIT(4) +/* + * Signals a buffer containing some kind of config data for the component + * (e.g. codec config data) + */ +#define MMAL_BUFFER_HEADER_FLAG_CONFIG BIT(5) +/* Signals an encrypted payload */ +#define MMAL_BUFFER_HEADER_FLAG_ENCRYPTED BIT(6) +/* Signals a buffer containing side information */ +#define MMAL_BUFFER_HEADER_FLAG_CODECSIDEINFO BIT(7) +/* + * Signals a buffer which is the snapshot/postview image from a stills + * capture + */ +#define MMAL_BUFFER_HEADER_FLAGS_SNAPSHOT BIT(8) +/* Signals a buffer which contains data known to be corrupted */ +#define MMAL_BUFFER_HEADER_FLAG_CORRUPTED BIT(9) +/* Signals that a buffer failed to be transmitted */ +#define MMAL_BUFFER_HEADER_FLAG_TRANSMISSION_FAILED BIT(10) + +struct mmal_driver_buffer { + u32 magic; + u32 component_handle; + u32 port_handle; + u32 client_context; +}; + +/* buffer header */ +struct mmal_buffer_header { + u32 next; /* next header */ + u32 priv; /* framework private data */ + u32 cmd; + u32 data; + u32 alloc_size; + u32 length; + u32 offset; + u32 flags; + s64 pts; + s64 dts; + u32 type; + u32 user_data; +}; + +struct mmal_buffer_header_type_specific { + union { + struct { + u32 planes; + u32 offset[4]; + u32 pitch[4]; + u32 flags; + } video; + } u; +}; + +struct mmal_msg_buffer_from_host { + /* + *The front 32 bytes of the buffer header are copied + * back to us in the reply to allow for context. This + * area is used to store two mmal_driver_buffer structures to + * allow for multiple concurrent service users. + */ + /* control data */ + struct mmal_driver_buffer drvbuf; + + /* referenced control data for passthrough buffer management */ + struct mmal_driver_buffer drvbuf_ref; + struct mmal_buffer_header buffer_header; /* buffer header itself */ + struct mmal_buffer_header_type_specific buffer_header_type_specific; + s32 is_zero_copy; + s32 has_reference; + + /* allows short data to be xfered in control message */ + u32 payload_in_message; + u8 short_data[MMAL_VC_SHORT_DATA]; +}; + +/* port parameter setting */ + +#define MMAL_WORKER_PORT_PARAMETER_SPACE 96 + +struct mmal_msg_port_parameter_set { + u32 component_handle; /* component */ + u32 port_handle; /* port */ + u32 id; /* Parameter ID */ + u32 size; /* Parameter size */ + u32 value[MMAL_WORKER_PORT_PARAMETER_SPACE]; +}; + +struct mmal_msg_port_parameter_set_reply { + u32 status; /* enum mmal_msg_status todo: how does this + * differ to the one in the header? + */ +}; + +/* port parameter getting */ + +struct mmal_msg_port_parameter_get { + u32 component_handle; /* component */ + u32 port_handle; /* port */ + u32 id; /* Parameter ID */ + u32 size; /* Parameter size */ +}; + +struct mmal_msg_port_parameter_get_reply { + u32 status; /* Status of mmal_port_parameter_get call */ + u32 id; /* Parameter ID */ + u32 size; /* Parameter size */ + u32 value[MMAL_WORKER_PORT_PARAMETER_SPACE]; +}; + +/* event messages */ +#define MMAL_WORKER_EVENT_SPACE 256 + +struct mmal_msg_event_to_host { + u32 client_component; /* component context */ + + u32 port_type; + u32 port_num; + + u32 cmd; + u32 length; + u8 data[MMAL_WORKER_EVENT_SPACE]; + u32 delayed_buffer; +}; + +/* all mmal messages are serialised through this structure */ +struct mmal_msg { + /* header */ + struct mmal_msg_header h; + /* payload */ + union { + struct mmal_msg_version version; + + struct mmal_msg_component_create component_create; + struct mmal_msg_component_create_reply component_create_reply; + + struct mmal_msg_component_destroy component_destroy; + struct mmal_msg_component_destroy_reply component_destroy_reply; + + struct mmal_msg_component_enable component_enable; + struct mmal_msg_component_enable_reply component_enable_reply; + + struct mmal_msg_component_disable component_disable; + struct mmal_msg_component_disable_reply component_disable_reply; + + struct mmal_msg_port_info_get port_info_get; + struct mmal_msg_port_info_get_reply port_info_get_reply; + + struct mmal_msg_port_info_set port_info_set; + struct mmal_msg_port_info_set_reply port_info_set_reply; + + struct mmal_msg_port_action_port port_action_port; + struct mmal_msg_port_action_handle port_action_handle; + struct mmal_msg_port_action_reply port_action_reply; + + struct mmal_msg_buffer_from_host buffer_from_host; + + struct mmal_msg_port_parameter_set port_parameter_set; + struct mmal_msg_port_parameter_set_reply + port_parameter_set_reply; + struct mmal_msg_port_parameter_get + port_parameter_get; + struct mmal_msg_port_parameter_get_reply + port_parameter_get_reply; + + struct mmal_msg_event_to_host event_to_host; + + u8 payload[MMAL_MSG_MAX_PAYLOAD]; + } u; +}; +#endif diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-parameters.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-parameters.h new file mode 100644 index 000000000000..a0cdd28101f2 --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-parameters.h @@ -0,0 +1,752 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Broadcom BCM2835 V4L2 driver + * + * Copyright © 2013 Raspberry Pi (Trading) Ltd. + * + * Authors: Vincent Sanders @ Collabora + * Dave Stevenson @ Broadcom + * (now dave.stevenson@raspberrypi.org) + * Simon Mellor @ Broadcom + * Luke Diamand @ Broadcom + */ + +/* common parameters */ + +/** @name Parameter groups + * Parameters are divided into groups, and then allocated sequentially within + * a group using an enum. + * @{ + */ + +#ifndef MMAL_PARAMETERS_H +#define MMAL_PARAMETERS_H + +#include <linux/math.h> + +/** Common parameter ID group, used with many types of component. */ +#define MMAL_PARAMETER_GROUP_COMMON (0 << 16) +/** Camera-specific parameter ID group. */ +#define MMAL_PARAMETER_GROUP_CAMERA (1 << 16) +/** Video-specific parameter ID group. */ +#define MMAL_PARAMETER_GROUP_VIDEO (2 << 16) +/** Audio-specific parameter ID group. */ +#define MMAL_PARAMETER_GROUP_AUDIO (3 << 16) +/** Clock-specific parameter ID group. */ +#define MMAL_PARAMETER_GROUP_CLOCK (4 << 16) +/** Miracast-specific parameter ID group. */ +#define MMAL_PARAMETER_GROUP_MIRACAST (5 << 16) + +/* Common parameters */ +enum mmal_parameter_common_type { + /**< Never a valid parameter ID */ + MMAL_PARAMETER_UNUSED = MMAL_PARAMETER_GROUP_COMMON, + + /**< MMAL_PARAMETER_ENCODING_T */ + MMAL_PARAMETER_SUPPORTED_ENCODINGS, + /**< MMAL_PARAMETER_URI_T */ + MMAL_PARAMETER_URI, + /** MMAL_PARAMETER_CHANGE_EVENT_REQUEST_T */ + MMAL_PARAMETER_CHANGE_EVENT_REQUEST, + /** MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_ZERO_COPY, + /**< MMAL_PARAMETER_BUFFER_REQUIREMENTS_T */ + MMAL_PARAMETER_BUFFER_REQUIREMENTS, + /**< MMAL_PARAMETER_STATISTICS_T */ + MMAL_PARAMETER_STATISTICS, + /**< MMAL_PARAMETER_CORE_STATISTICS_T */ + MMAL_PARAMETER_CORE_STATISTICS, + /**< MMAL_PARAMETER_MEM_USAGE_T */ + MMAL_PARAMETER_MEM_USAGE, + /**< MMAL_PARAMETER_UINT32_T */ + MMAL_PARAMETER_BUFFER_FLAG_FILTER, + /**< MMAL_PARAMETER_SEEK_T */ + MMAL_PARAMETER_SEEK, + /**< MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_POWERMON_ENABLE, + /**< MMAL_PARAMETER_LOGGING_T */ + MMAL_PARAMETER_LOGGING, + /**< MMAL_PARAMETER_UINT64_T */ + MMAL_PARAMETER_SYSTEM_TIME, + /**< MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_NO_IMAGE_PADDING, +}; + +/* camera parameters */ + +enum mmal_parameter_camera_type { + /* 0 */ + /** @ref MMAL_PARAMETER_THUMBNAIL_CONFIG_T */ + MMAL_PARAMETER_THUMBNAIL_CONFIGURATION = + MMAL_PARAMETER_GROUP_CAMERA, + /**< Unused? */ + MMAL_PARAMETER_CAPTURE_QUALITY, + /**< @ref MMAL_PARAMETER_INT32_T */ + MMAL_PARAMETER_ROTATION, + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_EXIF_DISABLE, + /**< @ref MMAL_PARAMETER_EXIF_T */ + MMAL_PARAMETER_EXIF, + /**< @ref MMAL_PARAM_AWBMODE_T */ + MMAL_PARAMETER_AWB_MODE, + /**< @ref MMAL_PARAMETER_IMAGEFX_T */ + MMAL_PARAMETER_IMAGE_EFFECT, + /**< @ref MMAL_PARAMETER_COLOURFX_T */ + MMAL_PARAMETER_COLOUR_EFFECT, + /**< @ref MMAL_PARAMETER_FLICKERAVOID_T */ + MMAL_PARAMETER_FLICKER_AVOID, + /**< @ref MMAL_PARAMETER_FLASH_T */ + MMAL_PARAMETER_FLASH, + /**< @ref MMAL_PARAMETER_REDEYE_T */ + MMAL_PARAMETER_REDEYE, + /**< @ref MMAL_PARAMETER_FOCUS_T */ + MMAL_PARAMETER_FOCUS, + /**< Unused? */ + MMAL_PARAMETER_FOCAL_LENGTHS, + /**< @ref MMAL_PARAMETER_INT32_T */ + MMAL_PARAMETER_EXPOSURE_COMP, + /**< @ref MMAL_PARAMETER_SCALEFACTOR_T */ + MMAL_PARAMETER_ZOOM, + /**< @ref MMAL_PARAMETER_MIRROR_T */ + MMAL_PARAMETER_MIRROR, + + /* 0x10 */ + /**< @ref MMAL_PARAMETER_UINT32_T */ + MMAL_PARAMETER_CAMERA_NUM, + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_CAPTURE, + /**< @ref MMAL_PARAMETER_EXPOSUREMODE_T */ + MMAL_PARAMETER_EXPOSURE_MODE, + /**< @ref MMAL_PARAMETER_EXPOSUREMETERINGMODE_T */ + MMAL_PARAMETER_EXP_METERING_MODE, + /**< @ref MMAL_PARAMETER_FOCUS_STATUS_T */ + MMAL_PARAMETER_FOCUS_STATUS, + /**< @ref MMAL_PARAMETER_CAMERA_CONFIG_T */ + MMAL_PARAMETER_CAMERA_CONFIG, + /**< @ref MMAL_PARAMETER_CAPTURE_STATUS_T */ + MMAL_PARAMETER_CAPTURE_STATUS, + /**< @ref MMAL_PARAMETER_FACE_TRACK_T */ + MMAL_PARAMETER_FACE_TRACK, + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_DRAW_BOX_FACES_AND_FOCUS, + /**< @ref MMAL_PARAMETER_UINT32_T */ + MMAL_PARAMETER_JPEG_Q_FACTOR, + /**< @ref MMAL_PARAMETER_FRAME_RATE_T */ + MMAL_PARAMETER_FRAME_RATE, + /**< @ref MMAL_PARAMETER_CAMERA_STC_MODE_T */ + MMAL_PARAMETER_USE_STC, + /**< @ref MMAL_PARAMETER_CAMERA_INFO_T */ + MMAL_PARAMETER_CAMERA_INFO, + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_VIDEO_STABILISATION, + /**< @ref MMAL_PARAMETER_FACE_TRACK_RESULTS_T */ + MMAL_PARAMETER_FACE_TRACK_RESULTS, + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_ENABLE_RAW_CAPTURE, + + /* 0x20 */ + /**< @ref MMAL_PARAMETER_URI_T */ + MMAL_PARAMETER_DPF_FILE, + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_ENABLE_DPF_FILE, + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_DPF_FAIL_IS_FATAL, + /**< @ref MMAL_PARAMETER_CAPTUREMODE_T */ + MMAL_PARAMETER_CAPTURE_MODE, + /**< @ref MMAL_PARAMETER_FOCUS_REGIONS_T */ + MMAL_PARAMETER_FOCUS_REGIONS, + /**< @ref MMAL_PARAMETER_INPUT_CROP_T */ + MMAL_PARAMETER_INPUT_CROP, + /**< @ref MMAL_PARAMETER_SENSOR_INFORMATION_T */ + MMAL_PARAMETER_SENSOR_INFORMATION, + /**< @ref MMAL_PARAMETER_FLASH_SELECT_T */ + MMAL_PARAMETER_FLASH_SELECT, + /**< @ref MMAL_PARAMETER_FIELD_OF_VIEW_T */ + MMAL_PARAMETER_FIELD_OF_VIEW, + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_HIGH_DYNAMIC_RANGE, + /**< @ref MMAL_PARAMETER_DRC_T */ + MMAL_PARAMETER_DYNAMIC_RANGE_COMPRESSION, + /**< @ref MMAL_PARAMETER_ALGORITHM_CONTROL_T */ + MMAL_PARAMETER_ALGORITHM_CONTROL, + /**< @ref MMAL_PARAMETER_RATIONAL_T */ + MMAL_PARAMETER_SHARPNESS, + /**< @ref MMAL_PARAMETER_RATIONAL_T */ + MMAL_PARAMETER_CONTRAST, + /**< @ref MMAL_PARAMETER_RATIONAL_T */ + MMAL_PARAMETER_BRIGHTNESS, + /**< @ref MMAL_PARAMETER_RATIONAL_T */ + MMAL_PARAMETER_SATURATION, + + /* 0x30 */ + /**< @ref MMAL_PARAMETER_UINT32_T */ + MMAL_PARAMETER_ISO, + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_ANTISHAKE, + /** @ref MMAL_PARAMETER_IMAGEFX_PARAMETERS_T */ + MMAL_PARAMETER_IMAGE_EFFECT_PARAMETERS, + /** @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_CAMERA_BURST_CAPTURE, + /** @ref MMAL_PARAMETER_UINT32_T */ + MMAL_PARAMETER_CAMERA_MIN_ISO, + /** @ref MMAL_PARAMETER_CAMERA_USE_CASE_T */ + MMAL_PARAMETER_CAMERA_USE_CASE, + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_CAPTURE_STATS_PASS, + /** @ref MMAL_PARAMETER_UINT32_T */ + MMAL_PARAMETER_CAMERA_CUSTOM_SENSOR_CONFIG, + /** @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_ENABLE_REGISTER_FILE, + /** @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_REGISTER_FAIL_IS_FATAL, + /** @ref MMAL_PARAMETER_CONFIGFILE_T */ + MMAL_PARAMETER_CONFIGFILE_REGISTERS, + /** @ref MMAL_PARAMETER_CONFIGFILE_CHUNK_T */ + MMAL_PARAMETER_CONFIGFILE_CHUNK_REGISTERS, + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_JPEG_ATTACH_LOG, + /**< @ref MMAL_PARAMETER_ZEROSHUTTERLAG_T */ + MMAL_PARAMETER_ZERO_SHUTTER_LAG, + /**< @ref MMAL_PARAMETER_FPS_RANGE_T */ + MMAL_PARAMETER_FPS_RANGE, + /**< @ref MMAL_PARAMETER_INT32_T */ + MMAL_PARAMETER_CAPTURE_EXPOSURE_COMP, + + /* 0x40 */ + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_SW_SHARPEN_DISABLE, + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_FLASH_REQUIRED, + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_SW_SATURATION_DISABLE, + /**< Takes a @ref MMAL_PARAMETER_UINT32_T */ + MMAL_PARAMETER_SHUTTER_SPEED, + /**< Takes a @ref MMAL_PARAMETER_AWB_GAINS_T */ + MMAL_PARAMETER_CUSTOM_AWB_GAINS, +}; + +enum mmal_parameter_camera_config_timestamp_mode { + MMAL_PARAM_TIMESTAMP_MODE_ZERO = 0, /* Always timestamp frames as 0 */ + MMAL_PARAM_TIMESTAMP_MODE_RAW_STC, /* Use the raw STC value + * for the frame timestamp + */ + MMAL_PARAM_TIMESTAMP_MODE_RESET_STC, /* Use the STC timestamp + * but subtract the + * timestamp of the first + * frame sent to give a + * zero based timestamp. + */ +}; + +struct mmal_parameter_fps_range { + /**< Low end of the permitted framerate range */ + struct s32_fract fps_low; + /**< High end of the permitted framerate range */ + struct s32_fract fps_high; +}; + +/* camera configuration parameter */ +struct mmal_parameter_camera_config { + /* Parameters for setting up the image pools */ + u32 max_stills_w; /* Max size of stills capture */ + u32 max_stills_h; + u32 stills_yuv422; /* Allow YUV422 stills capture */ + u32 one_shot_stills; /* Continuous or one shot stills captures. */ + + u32 max_preview_video_w; /* Max size of the preview or video + * capture frames + */ + u32 max_preview_video_h; + u32 num_preview_video_frames; + + /** Sets the height of the circular buffer for stills capture. */ + u32 stills_capture_circular_buffer_height; + + /** Allows preview/encode to resume as fast as possible after the stills + * input frame has been received, and then processes the still frame in + * the background whilst preview/encode has resumed. + * Actual mode is controlled by MMAL_PARAMETER_CAPTURE_MODE. + */ + u32 fast_preview_resume; + + /** Selects algorithm for timestamping frames if + * there is no clock component connected. + * enum mmal_parameter_camera_config_timestamp_mode + */ + s32 use_stc_timestamp; +}; + +enum mmal_parameter_exposuremode { + MMAL_PARAM_EXPOSUREMODE_OFF, + MMAL_PARAM_EXPOSUREMODE_AUTO, + MMAL_PARAM_EXPOSUREMODE_NIGHT, + MMAL_PARAM_EXPOSUREMODE_NIGHTPREVIEW, + MMAL_PARAM_EXPOSUREMODE_BACKLIGHT, + MMAL_PARAM_EXPOSUREMODE_SPOTLIGHT, + MMAL_PARAM_EXPOSUREMODE_SPORTS, + MMAL_PARAM_EXPOSUREMODE_SNOW, + MMAL_PARAM_EXPOSUREMODE_BEACH, + MMAL_PARAM_EXPOSUREMODE_VERYLONG, + MMAL_PARAM_EXPOSUREMODE_FIXEDFPS, + MMAL_PARAM_EXPOSUREMODE_ANTISHAKE, + MMAL_PARAM_EXPOSUREMODE_FIREWORKS, +}; + +enum mmal_parameter_exposuremeteringmode { + MMAL_PARAM_EXPOSUREMETERINGMODE_AVERAGE, + MMAL_PARAM_EXPOSUREMETERINGMODE_SPOT, + MMAL_PARAM_EXPOSUREMETERINGMODE_BACKLIT, + MMAL_PARAM_EXPOSUREMETERINGMODE_MATRIX, +}; + +enum mmal_parameter_awbmode { + MMAL_PARAM_AWBMODE_OFF, + MMAL_PARAM_AWBMODE_AUTO, + MMAL_PARAM_AWBMODE_SUNLIGHT, + MMAL_PARAM_AWBMODE_CLOUDY, + MMAL_PARAM_AWBMODE_SHADE, + MMAL_PARAM_AWBMODE_TUNGSTEN, + MMAL_PARAM_AWBMODE_FLUORESCENT, + MMAL_PARAM_AWBMODE_INCANDESCENT, + MMAL_PARAM_AWBMODE_FLASH, + MMAL_PARAM_AWBMODE_HORIZON, +}; + +enum mmal_parameter_imagefx { + MMAL_PARAM_IMAGEFX_NONE, + MMAL_PARAM_IMAGEFX_NEGATIVE, + MMAL_PARAM_IMAGEFX_SOLARIZE, + MMAL_PARAM_IMAGEFX_POSTERIZE, + MMAL_PARAM_IMAGEFX_WHITEBOARD, + MMAL_PARAM_IMAGEFX_BLACKBOARD, + MMAL_PARAM_IMAGEFX_SKETCH, + MMAL_PARAM_IMAGEFX_DENOISE, + MMAL_PARAM_IMAGEFX_EMBOSS, + MMAL_PARAM_IMAGEFX_OILPAINT, + MMAL_PARAM_IMAGEFX_HATCH, + MMAL_PARAM_IMAGEFX_GPEN, + MMAL_PARAM_IMAGEFX_PASTEL, + MMAL_PARAM_IMAGEFX_WATERCOLOUR, + MMAL_PARAM_IMAGEFX_FILM, + MMAL_PARAM_IMAGEFX_BLUR, + MMAL_PARAM_IMAGEFX_SATURATION, + MMAL_PARAM_IMAGEFX_COLOURSWAP, + MMAL_PARAM_IMAGEFX_WASHEDOUT, + MMAL_PARAM_IMAGEFX_POSTERISE, + MMAL_PARAM_IMAGEFX_COLOURPOINT, + MMAL_PARAM_IMAGEFX_COLOURBALANCE, + MMAL_PARAM_IMAGEFX_CARTOON, +}; + +enum MMAL_PARAM_FLICKERAVOID { + MMAL_PARAM_FLICKERAVOID_OFF, + MMAL_PARAM_FLICKERAVOID_AUTO, + MMAL_PARAM_FLICKERAVOID_50HZ, + MMAL_PARAM_FLICKERAVOID_60HZ, + MMAL_PARAM_FLICKERAVOID_MAX = 0x7FFFFFFF +}; + +struct mmal_parameter_awbgains { + struct s32_fract r_gain; /**< Red gain */ + struct s32_fract b_gain; /**< Blue gain */ +}; + +/** Manner of video rate control */ +enum mmal_parameter_rate_control_mode { + MMAL_VIDEO_RATECONTROL_DEFAULT, + MMAL_VIDEO_RATECONTROL_VARIABLE, + MMAL_VIDEO_RATECONTROL_CONSTANT, + MMAL_VIDEO_RATECONTROL_VARIABLE_SKIP_FRAMES, + MMAL_VIDEO_RATECONTROL_CONSTANT_SKIP_FRAMES +}; + +enum mmal_video_profile { + MMAL_VIDEO_PROFILE_H263_BASELINE, + MMAL_VIDEO_PROFILE_H263_H320CODING, + MMAL_VIDEO_PROFILE_H263_BACKWARDCOMPATIBLE, + MMAL_VIDEO_PROFILE_H263_ISWV2, + MMAL_VIDEO_PROFILE_H263_ISWV3, + MMAL_VIDEO_PROFILE_H263_HIGHCOMPRESSION, + MMAL_VIDEO_PROFILE_H263_INTERNET, + MMAL_VIDEO_PROFILE_H263_INTERLACE, + MMAL_VIDEO_PROFILE_H263_HIGHLATENCY, + MMAL_VIDEO_PROFILE_MP4V_SIMPLE, + MMAL_VIDEO_PROFILE_MP4V_SIMPLESCALABLE, + MMAL_VIDEO_PROFILE_MP4V_CORE, + MMAL_VIDEO_PROFILE_MP4V_MAIN, + MMAL_VIDEO_PROFILE_MP4V_NBIT, + MMAL_VIDEO_PROFILE_MP4V_SCALABLETEXTURE, + MMAL_VIDEO_PROFILE_MP4V_SIMPLEFACE, + MMAL_VIDEO_PROFILE_MP4V_SIMPLEFBA, + MMAL_VIDEO_PROFILE_MP4V_BASICANIMATED, + MMAL_VIDEO_PROFILE_MP4V_HYBRID, + MMAL_VIDEO_PROFILE_MP4V_ADVANCEDREALTIME, + MMAL_VIDEO_PROFILE_MP4V_CORESCALABLE, + MMAL_VIDEO_PROFILE_MP4V_ADVANCEDCODING, + MMAL_VIDEO_PROFILE_MP4V_ADVANCEDCORE, + MMAL_VIDEO_PROFILE_MP4V_ADVANCEDSCALABLE, + MMAL_VIDEO_PROFILE_MP4V_ADVANCEDSIMPLE, + MMAL_VIDEO_PROFILE_H264_BASELINE, + MMAL_VIDEO_PROFILE_H264_MAIN, + MMAL_VIDEO_PROFILE_H264_EXTENDED, + MMAL_VIDEO_PROFILE_H264_HIGH, + MMAL_VIDEO_PROFILE_H264_HIGH10, + MMAL_VIDEO_PROFILE_H264_HIGH422, + MMAL_VIDEO_PROFILE_H264_HIGH444, + MMAL_VIDEO_PROFILE_H264_CONSTRAINED_BASELINE, + MMAL_VIDEO_PROFILE_DUMMY = 0x7FFFFFFF +}; + +enum mmal_video_level { + MMAL_VIDEO_LEVEL_H263_10, + MMAL_VIDEO_LEVEL_H263_20, + MMAL_VIDEO_LEVEL_H263_30, + MMAL_VIDEO_LEVEL_H263_40, + MMAL_VIDEO_LEVEL_H263_45, + MMAL_VIDEO_LEVEL_H263_50, + MMAL_VIDEO_LEVEL_H263_60, + MMAL_VIDEO_LEVEL_H263_70, + MMAL_VIDEO_LEVEL_MP4V_0, + MMAL_VIDEO_LEVEL_MP4V_0b, + MMAL_VIDEO_LEVEL_MP4V_1, + MMAL_VIDEO_LEVEL_MP4V_2, + MMAL_VIDEO_LEVEL_MP4V_3, + MMAL_VIDEO_LEVEL_MP4V_4, + MMAL_VIDEO_LEVEL_MP4V_4a, + MMAL_VIDEO_LEVEL_MP4V_5, + MMAL_VIDEO_LEVEL_MP4V_6, + MMAL_VIDEO_LEVEL_H264_1, + MMAL_VIDEO_LEVEL_H264_1b, + MMAL_VIDEO_LEVEL_H264_11, + MMAL_VIDEO_LEVEL_H264_12, + MMAL_VIDEO_LEVEL_H264_13, + MMAL_VIDEO_LEVEL_H264_2, + MMAL_VIDEO_LEVEL_H264_21, + MMAL_VIDEO_LEVEL_H264_22, + MMAL_VIDEO_LEVEL_H264_3, + MMAL_VIDEO_LEVEL_H264_31, + MMAL_VIDEO_LEVEL_H264_32, + MMAL_VIDEO_LEVEL_H264_4, + MMAL_VIDEO_LEVEL_H264_41, + MMAL_VIDEO_LEVEL_H264_42, + MMAL_VIDEO_LEVEL_H264_5, + MMAL_VIDEO_LEVEL_H264_51, + MMAL_VIDEO_LEVEL_DUMMY = 0x7FFFFFFF +}; + +struct mmal_parameter_video_profile { + enum mmal_video_profile profile; + enum mmal_video_level level; +}; + +/* video parameters */ + +enum mmal_parameter_video_type { + /** @ref MMAL_DISPLAYREGION_T */ + MMAL_PARAMETER_DISPLAYREGION = MMAL_PARAMETER_GROUP_VIDEO, + + /** @ref MMAL_PARAMETER_VIDEO_PROFILE_T */ + MMAL_PARAMETER_SUPPORTED_PROFILES, + + /** @ref MMAL_PARAMETER_VIDEO_PROFILE_T */ + MMAL_PARAMETER_PROFILE, + + /** @ref MMAL_PARAMETER_UINT32_T */ + MMAL_PARAMETER_INTRAPERIOD, + + /** @ref MMAL_PARAMETER_VIDEO_RATECONTROL_T */ + MMAL_PARAMETER_RATECONTROL, + + /** @ref MMAL_PARAMETER_VIDEO_NALUNITFORMAT_T */ + MMAL_PARAMETER_NALUNITFORMAT, + + /** @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_MINIMISE_FRAGMENTATION, + + /** @ref MMAL_PARAMETER_UINT32_T. + * Setting the value to zero resets to the default (one slice per + * frame). + */ + MMAL_PARAMETER_MB_ROWS_PER_SLICE, + + /** @ref MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION_T */ + MMAL_PARAMETER_VIDEO_LEVEL_EXTENSION, + + /** @ref MMAL_PARAMETER_VIDEO_EEDE_ENABLE_T */ + MMAL_PARAMETER_VIDEO_EEDE_ENABLE, + + /** @ref MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE_T */ + MMAL_PARAMETER_VIDEO_EEDE_LOSSRATE, + + /** @ref MMAL_PARAMETER_BOOLEAN_T. Request an I-frame. */ + MMAL_PARAMETER_VIDEO_REQUEST_I_FRAME, + /** @ref MMAL_PARAMETER_VIDEO_INTRA_REFRESH_T */ + MMAL_PARAMETER_VIDEO_INTRA_REFRESH, + + /** @ref MMAL_PARAMETER_BOOLEAN_T. */ + MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT, + + /** @ref MMAL_PARAMETER_UINT32_T. Run-time bit rate control */ + MMAL_PARAMETER_VIDEO_BIT_RATE, + + /** @ref MMAL_PARAMETER_FRAME_RATE_T */ + MMAL_PARAMETER_VIDEO_FRAME_RATE, + + /** @ref MMAL_PARAMETER_UINT32_T. */ + MMAL_PARAMETER_VIDEO_ENCODE_MIN_QUANT, + + /** @ref MMAL_PARAMETER_UINT32_T. */ + MMAL_PARAMETER_VIDEO_ENCODE_MAX_QUANT, + + /** @ref MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL_T. */ + MMAL_PARAMETER_VIDEO_ENCODE_RC_MODEL, + + MMAL_PARAMETER_EXTRA_BUFFERS, /**< @ref MMAL_PARAMETER_UINT32_T. */ + /** @ref MMAL_PARAMETER_UINT32_T. + * Changing this parameter from the default can reduce frame rate + * because image buffers need to be re-pitched. + */ + MMAL_PARAMETER_VIDEO_ALIGN_HORIZ, + + /** @ref MMAL_PARAMETER_UINT32_T. + * Changing this parameter from the default can reduce frame rate + * because image buffers need to be re-pitched. + */ + MMAL_PARAMETER_VIDEO_ALIGN_VERT, + + /** @ref MMAL_PARAMETER_BOOLEAN_T. */ + MMAL_PARAMETER_VIDEO_DROPPABLE_PFRAMES, + + /** @ref MMAL_PARAMETER_UINT32_T. */ + MMAL_PARAMETER_VIDEO_ENCODE_INITIAL_QUANT, + + /**< @ref MMAL_PARAMETER_UINT32_T. */ + MMAL_PARAMETER_VIDEO_ENCODE_QP_P, + + /**< @ref MMAL_PARAMETER_UINT32_T. */ + MMAL_PARAMETER_VIDEO_ENCODE_RC_SLICE_DQUANT, + + /** @ref MMAL_PARAMETER_UINT32_T */ + MMAL_PARAMETER_VIDEO_ENCODE_FRAME_LIMIT_BITS, + + /** @ref MMAL_PARAMETER_UINT32_T. */ + MMAL_PARAMETER_VIDEO_ENCODE_PEAK_RATE, + + /* H264 specific parameters */ + + /** @ref MMAL_PARAMETER_BOOLEAN_T. */ + MMAL_PARAMETER_VIDEO_ENCODE_H264_DISABLE_CABAC, + + /** @ref MMAL_PARAMETER_BOOLEAN_T. */ + MMAL_PARAMETER_VIDEO_ENCODE_H264_LOW_LATENCY, + + /** @ref MMAL_PARAMETER_BOOLEAN_T. */ + MMAL_PARAMETER_VIDEO_ENCODE_H264_AU_DELIMITERS, + + /** @ref MMAL_PARAMETER_UINT32_T. */ + MMAL_PARAMETER_VIDEO_ENCODE_H264_DEBLOCK_IDC, + + /** @ref MMAL_PARAMETER_VIDEO_ENCODER_H264_MB_INTRA_MODES_T. */ + MMAL_PARAMETER_VIDEO_ENCODE_H264_MB_INTRA_MODE, + + /** @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_VIDEO_ENCODE_HEADER_ON_OPEN, + + /** @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_VIDEO_ENCODE_PRECODE_FOR_QP, + + /** @ref MMAL_PARAMETER_VIDEO_DRM_INIT_INFO_T. */ + MMAL_PARAMETER_VIDEO_DRM_INIT_INFO, + + /** @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_VIDEO_TIMESTAMP_FIFO, + + /** @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_VIDEO_DECODE_ERROR_CONCEALMENT, + + /** @ref MMAL_PARAMETER_VIDEO_DRM_PROTECT_BUFFER_T. */ + MMAL_PARAMETER_VIDEO_DRM_PROTECT_BUFFER, + + /** @ref MMAL_PARAMETER_BYTES_T */ + MMAL_PARAMETER_VIDEO_DECODE_CONFIG_VD3, + + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_VIDEO_ENCODE_H264_VCL_HRD_PARAMETERS, + + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_VIDEO_ENCODE_H264_LOW_DELAY_HRD_FLAG, + + /**< @ref MMAL_PARAMETER_BOOLEAN_T */ + MMAL_PARAMETER_VIDEO_ENCODE_INLINE_HEADER +}; + +/** Valid mirror modes */ +enum mmal_parameter_mirror { + MMAL_PARAM_MIRROR_NONE, + MMAL_PARAM_MIRROR_VERTICAL, + MMAL_PARAM_MIRROR_HORIZONTAL, + MMAL_PARAM_MIRROR_BOTH, +}; + +enum mmal_parameter_displaytransform { + MMAL_DISPLAY_ROT0 = 0, + MMAL_DISPLAY_MIRROR_ROT0 = 1, + MMAL_DISPLAY_MIRROR_ROT180 = 2, + MMAL_DISPLAY_ROT180 = 3, + MMAL_DISPLAY_MIRROR_ROT90 = 4, + MMAL_DISPLAY_ROT270 = 5, + MMAL_DISPLAY_ROT90 = 6, + MMAL_DISPLAY_MIRROR_ROT270 = 7, +}; + +enum mmal_parameter_displaymode { + MMAL_DISPLAY_MODE_FILL = 0, + MMAL_DISPLAY_MODE_LETTERBOX = 1, +}; + +enum mmal_parameter_displayset { + MMAL_DISPLAY_SET_NONE = 0, + MMAL_DISPLAY_SET_NUM = 1, + MMAL_DISPLAY_SET_FULLSCREEN = 2, + MMAL_DISPLAY_SET_TRANSFORM = 4, + MMAL_DISPLAY_SET_DEST_RECT = 8, + MMAL_DISPLAY_SET_SRC_RECT = 0x10, + MMAL_DISPLAY_SET_MODE = 0x20, + MMAL_DISPLAY_SET_PIXEL = 0x40, + MMAL_DISPLAY_SET_NOASPECT = 0x80, + MMAL_DISPLAY_SET_LAYER = 0x100, + MMAL_DISPLAY_SET_COPYPROTECT = 0x200, + MMAL_DISPLAY_SET_ALPHA = 0x400, +}; + +/* rectangle, used lots so it gets its own struct */ +struct vchiq_mmal_rect { + s32 x; + s32 y; + s32 width; + s32 height; +}; + +struct mmal_parameter_displayregion { + /** Bitfield that indicates which fields are set and should be + * used. All other fields will maintain their current value. + * \ref MMAL_DISPLAYSET_T defines the bits that can be + * combined. + */ + u32 set; + + /** Describes the display output device, with 0 typically + * being a directly connected LCD display. The actual values + * will depend on the hardware. Code using hard-wired numbers + * (e.g. 2) is certain to fail. + */ + + u32 display_num; + /** Indicates that we are using the full device screen area, + * rather than a window of the display. If zero, then + * dest_rect is used to specify a region of the display to + * use. + */ + + s32 fullscreen; + /** Indicates any rotation or flipping used to map frames onto + * the natural display orientation. + */ + u32 transform; /* enum mmal_parameter_displaytransform */ + + /** Where to display the frame within the screen, if + * fullscreen is zero. + */ + struct vchiq_mmal_rect dest_rect; + + /** Indicates which area of the frame to display. If all + * values are zero, the whole frame will be used. + */ + struct vchiq_mmal_rect src_rect; + + /** If set to non-zero, indicates that any display scaling + * should disregard the aspect ratio of the frame region being + * displayed. + */ + s32 noaspect; + + /** Indicates how the image should be scaled to fit the + * display. \code MMAL_DISPLAY_MODE_FILL \endcode indicates + * that the image should fill the screen by potentially + * cropping the frames. Setting \code mode \endcode to \code + * MMAL_DISPLAY_MODE_LETTERBOX \endcode indicates that all the + * source region should be displayed and black bars added if + * necessary. + */ + u32 mode; /* enum mmal_parameter_displaymode */ + + /** If non-zero, defines the width of a source pixel relative + * to \code pixel_y \endcode. If zero, then pixels default to + * being square. + */ + u32 pixel_x; + + /** If non-zero, defines the height of a source pixel relative + * to \code pixel_x \endcode. If zero, then pixels default to + * being square. + */ + u32 pixel_y; + + /** Sets the relative depth of the images, with greater values + * being in front of smaller values. + */ + u32 layer; + + /** Set to non-zero to ensure copy protection is used on + * output. + */ + s32 copyprotect_required; + + /** Level of opacity of the layer, where zero is fully + * transparent and 255 is fully opaque. + */ + u32 alpha; +}; + +#define MMAL_MAX_IMAGEFX_PARAMETERS 5 + +struct mmal_parameter_imagefx_parameters { + enum mmal_parameter_imagefx effect; + u32 num_effect_params; + u32 effect_parameter[MMAL_MAX_IMAGEFX_PARAMETERS]; +}; + +#define MMAL_PARAMETER_CAMERA_INFO_MAX_CAMERAS 4 +#define MMAL_PARAMETER_CAMERA_INFO_MAX_FLASHES 2 +#define MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN 16 + +struct mmal_parameter_camera_info_camera { + u32 port_id; + u32 max_width; + u32 max_height; + u32 lens_present; + u8 camera_name[MMAL_PARAMETER_CAMERA_INFO_MAX_STR_LEN]; +}; + +enum mmal_parameter_camera_info_flash_type { + /* Make values explicit to ensure they match values in config ini */ + MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_XENON = 0, + MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_LED = 1, + MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_OTHER = 2, + MMAL_PARAMETER_CAMERA_INFO_FLASH_TYPE_MAX = 0x7FFFFFFF +}; + +struct mmal_parameter_camera_info_flash { + enum mmal_parameter_camera_info_flash_type flash_type; +}; + +struct mmal_parameter_camera_info { + u32 num_cameras; + u32 num_flashes; + struct mmal_parameter_camera_info_camera + cameras[MMAL_PARAMETER_CAMERA_INFO_MAX_CAMERAS]; + struct mmal_parameter_camera_info_flash + flashes[MMAL_PARAMETER_CAMERA_INFO_MAX_FLASHES]; +}; + +#endif diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c b/drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c new file mode 100644 index 000000000000..cd073ed3ea2d --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.c @@ -0,0 +1,1949 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Broadcom BCM2835 V4L2 driver + * + * Copyright © 2013 Raspberry Pi (Trading) Ltd. + * + * Authors: Vincent Sanders @ Collabora + * Dave Stevenson @ Broadcom + * (now dave.stevenson@raspberrypi.org) + * Simon Mellor @ Broadcom + * Luke Diamand @ Broadcom + * + * V4L2 driver MMAL vchiq interface code + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/completion.h> +#include <linux/raspberrypi/vchiq.h> +#include <linux/vmalloc.h> +#include <media/videobuf2-vmalloc.h> + +#include <linux/raspberrypi/vchiq_arm.h> + +#include "mmal-common.h" +#include "mmal-vchiq.h" +#include "mmal-msg.h" + +/* + * maximum number of components supported. + * This matches the maximum permitted by default on the VPU + */ +#define VCHIQ_MMAL_MAX_COMPONENTS 64 + +/* + * Timeout for synchronous msg responses in seconds. + * Helpful to increase this if stopping in the VPU debugger. + */ +#define SYNC_MSG_TIMEOUT 3 + +/*#define FULL_MSG_DUMP 1*/ + +#ifdef DEBUG +static const char *const msg_type_names[] = { + "UNKNOWN", + "QUIT", + "SERVICE_CLOSED", + "GET_VERSION", + "COMPONENT_CREATE", + "COMPONENT_DESTROY", + "COMPONENT_ENABLE", + "COMPONENT_DISABLE", + "PORT_INFO_GET", + "PORT_INFO_SET", + "PORT_ACTION", + "BUFFER_FROM_HOST", + "BUFFER_TO_HOST", + "GET_STATS", + "PORT_PARAMETER_SET", + "PORT_PARAMETER_GET", + "EVENT_TO_HOST", + "GET_CORE_STATS_FOR_PORT", + "OPAQUE_ALLOCATOR", + "CONSUME_MEM", + "LMK", + "OPAQUE_ALLOCATOR_DESC", + "DRM_GET_LHS32", + "DRM_GET_TIME", + "BUFFER_FROM_HOST_ZEROLEN", + "PORT_FLUSH", + "HOST_LOG", +}; +#endif + +static const char *const port_action_type_names[] = { + "UNKNOWN", + "ENABLE", + "DISABLE", + "FLUSH", + "CONNECT", + "DISCONNECT", + "SET_REQUIREMENTS", +}; + +#if defined(DEBUG) +#if defined(FULL_MSG_DUMP) +#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \ + do { \ + pr_debug(TITLE" type:%s(%d) length:%d\n", \ + msg_type_names[(MSG)->h.type], \ + (MSG)->h.type, (MSG_LEN)); \ + print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \ + 16, 4, (MSG), \ + sizeof(struct mmal_msg_header), 1); \ + print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \ + 16, 4, \ + ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\ + (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \ + } while (0) +#else +#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \ + { \ + pr_debug(TITLE" type:%s(%d) length:%d\n", \ + msg_type_names[(MSG)->h.type], \ + (MSG)->h.type, (MSG_LEN)); \ + } +#endif +#else +#define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) +#endif + +struct vchiq_mmal_instance; + +/* normal message context */ +struct mmal_msg_context { + struct vchiq_mmal_instance *instance; + + /* Index in the context_map idr so that we can find the + * mmal_msg_context again when servicing the VCHI reply. + */ + int handle; + + union { + struct { + /* work struct for buffer_cb callback */ + struct work_struct work; + /* work struct for deferred callback */ + struct work_struct buffer_to_host_work; + /* mmal instance */ + struct vchiq_mmal_instance *instance; + /* mmal port */ + struct vchiq_mmal_port *port; + /* actual buffer used to store bulk reply */ + struct mmal_buffer *buffer; + /* amount of buffer used */ + unsigned long buffer_used; + /* MMAL buffer flags */ + u32 mmal_flags; + /* Presentation and Decode timestamps */ + s64 pts; + s64 dts; + + int status; /* context status */ + + } bulk; /* bulk data */ + + struct { + /* message handle to release */ + struct vchiq_header *msg_handle; + /* pointer to received message */ + struct mmal_msg *msg; + /* received message length */ + u32 msg_len; + /* completion upon reply */ + struct completion cmplt; + } sync; /* synchronous response */ + } u; + +}; + +struct vchiq_mmal_instance { + unsigned int service_handle; + + /* ensure serialised access to service */ + struct mutex vchiq_mutex; + + struct idr context_map; + /* protect accesses to context_map */ + struct mutex context_map_lock; + + struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS]; + + /* ordered workqueue to process all bulk operations */ + struct workqueue_struct *bulk_wq; + + /* handle for a vchiq instance */ + struct vchiq_instance *vchiq_instance; +}; + +static struct mmal_msg_context * +get_msg_context(struct vchiq_mmal_instance *instance) +{ + struct mmal_msg_context *msg_context; + int handle; + + /* todo: should this be allocated from a pool to avoid kzalloc */ + msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL); + + if (!msg_context) + return ERR_PTR(-ENOMEM); + + /* Create an ID that will be passed along with our message so + * that when we service the VCHI reply, we can look up what + * message is being replied to. + */ + mutex_lock(&instance->context_map_lock); + handle = idr_alloc(&instance->context_map, msg_context, + 0, 0, GFP_KERNEL); + mutex_unlock(&instance->context_map_lock); + + if (handle < 0) { + kfree(msg_context); + return ERR_PTR(handle); + } + + msg_context->instance = instance; + msg_context->handle = handle; + + return msg_context; +} + +static struct mmal_msg_context * +lookup_msg_context(struct vchiq_mmal_instance *instance, int handle) +{ + return idr_find(&instance->context_map, handle); +} + +static void +release_msg_context(struct mmal_msg_context *msg_context) +{ + struct vchiq_mmal_instance *instance = msg_context->instance; + + mutex_lock(&instance->context_map_lock); + idr_remove(&instance->context_map, msg_context->handle); + mutex_unlock(&instance->context_map_lock); + kfree(msg_context); +} + +/* deals with receipt of event to host message */ +static void event_to_host_cb(struct vchiq_mmal_instance *instance, + struct mmal_msg *msg, u32 msg_len) +{ + pr_debug("unhandled event\n"); + pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n", + msg->u.event_to_host.client_component, + msg->u.event_to_host.port_type, + msg->u.event_to_host.port_num, + msg->u.event_to_host.cmd, msg->u.event_to_host.length); +} + +/* workqueue scheduled callback + * + * we do this because it is important we do not call any other vchiq + * sync calls from within the message delivery thread + */ +static void buffer_work_cb(struct work_struct *work) +{ + struct mmal_msg_context *msg_context = + container_of(work, struct mmal_msg_context, u.bulk.work); + struct mmal_buffer *buffer = msg_context->u.bulk.buffer; + + if (!buffer) { + pr_err("%s: ctx: %p, No mmal buffer to pass details\n", + __func__, msg_context); + return; + } + + buffer->length = msg_context->u.bulk.buffer_used; + buffer->mmal_flags = msg_context->u.bulk.mmal_flags; + buffer->dts = msg_context->u.bulk.dts; + buffer->pts = msg_context->u.bulk.pts; + + atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu); + + msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance, + msg_context->u.bulk.port, + msg_context->u.bulk.status, + msg_context->u.bulk.buffer); +} + +/* workqueue scheduled callback to handle receiving buffers + * + * VCHI will allow up to 4 bulk receives to be scheduled before blocking. + * If we block in the service_callback context then we can't process the + * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked + * vchiq_bulk_receive() call to complete. + */ +static void buffer_to_host_work_cb(struct work_struct *work) +{ + struct mmal_msg_context *msg_context = + container_of(work, struct mmal_msg_context, + u.bulk.buffer_to_host_work); + struct vchiq_mmal_instance *instance = msg_context->instance; + unsigned long len = msg_context->u.bulk.buffer_used; + int ret; + + if (!len) + /* Dummy receive to ensure the buffers remain in order */ + len = 8; + /* queue the bulk submission */ + vchiq_use_service(instance->vchiq_instance, instance->service_handle); + ret = vchiq_bulk_receive(instance->vchiq_instance, instance->service_handle, + msg_context->u.bulk.buffer->buffer, + /* Actual receive needs to be a multiple + * of 4 bytes + */ + (len + 3) & ~3, + msg_context, + VCHIQ_BULK_MODE_CALLBACK); + + vchiq_release_service(instance->vchiq_instance, instance->service_handle); + + if (ret != 0) + pr_err("%s: ctx: %p, vchiq_bulk_receive failed %d\n", + __func__, msg_context, ret); +} + +/* enqueue a bulk receive for a given message context */ +static int bulk_receive(struct vchiq_mmal_instance *instance, + struct mmal_msg *msg, + struct mmal_msg_context *msg_context) +{ + unsigned long rd_len; + + rd_len = msg->u.buffer_from_host.buffer_header.length; + + if (!msg_context->u.bulk.buffer) { + pr_err("bulk.buffer not configured - error in buffer_from_host\n"); + + /* todo: this is a serious error, we should never have + * committed a buffer_to_host operation to the mmal + * port without the buffer to back it up (underflow + * handling) and there is no obvious way to deal with + * this - how is the mmal service going to react when + * we fail to do the xfer and reschedule a buffer when + * it arrives? perhaps a starved flag to indicate a + * waiting bulk receive? + */ + + return -EINVAL; + } + + /* ensure we do not overrun the available buffer */ + if (rd_len > msg_context->u.bulk.buffer->buffer_size) { + rd_len = msg_context->u.bulk.buffer->buffer_size; + pr_warn("short read as not enough receive buffer space\n"); + /* todo: is this the correct response, what happens to + * the rest of the message data? + */ + } + + /* store length */ + msg_context->u.bulk.buffer_used = rd_len; + msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts; + msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts; + + queue_work(msg_context->instance->bulk_wq, + &msg_context->u.bulk.buffer_to_host_work); + + return 0; +} + +/* data in message, memcpy from packet into output buffer */ +static int inline_receive(struct vchiq_mmal_instance *instance, + struct mmal_msg *msg, + struct mmal_msg_context *msg_context) +{ + memcpy(msg_context->u.bulk.buffer->buffer, + msg->u.buffer_from_host.short_data, + msg->u.buffer_from_host.payload_in_message); + + msg_context->u.bulk.buffer_used = + msg->u.buffer_from_host.payload_in_message; + + return 0; +} + +/* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */ +static int +buffer_from_host(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, struct mmal_buffer *buf) +{ + struct mmal_msg_context *msg_context; + struct mmal_msg m; + int ret; + + if (!port->enabled) + return -EINVAL; + + pr_debug("instance:%u buffer:%p\n", instance->service_handle, buf); + + /* get context */ + if (!buf->msg_context) { + pr_err("%s: msg_context not allocated, buf %p\n", __func__, + buf); + return -EINVAL; + } + msg_context = buf->msg_context; + + /* store bulk message context for when data arrives */ + msg_context->u.bulk.instance = instance; + msg_context->u.bulk.port = port; + msg_context->u.bulk.buffer = buf; + msg_context->u.bulk.buffer_used = 0; + + /* initialise work structure ready to schedule callback */ + INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb); + INIT_WORK(&msg_context->u.bulk.buffer_to_host_work, + buffer_to_host_work_cb); + + atomic_inc(&port->buffers_with_vpu); + + /* prep the buffer from host message */ + memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */ + + m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST; + m.h.magic = MMAL_MAGIC; + m.h.context = msg_context->handle; + m.h.status = 0; + + /* drvbuf is our private data passed back */ + m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC; + m.u.buffer_from_host.drvbuf.component_handle = port->component->handle; + m.u.buffer_from_host.drvbuf.port_handle = port->handle; + m.u.buffer_from_host.drvbuf.client_context = msg_context->handle; + + /* buffer header */ + m.u.buffer_from_host.buffer_header.cmd = 0; + m.u.buffer_from_host.buffer_header.data = + (u32)(unsigned long)buf->buffer; + m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size; + m.u.buffer_from_host.buffer_header.length = 0; /* nothing used yet */ + m.u.buffer_from_host.buffer_header.offset = 0; /* no offset */ + m.u.buffer_from_host.buffer_header.flags = 0; /* no flags */ + m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN; + m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN; + + /* clear buffer type specific data */ + memset(&m.u.buffer_from_host.buffer_header_type_specific, 0, + sizeof(m.u.buffer_from_host.buffer_header_type_specific)); + + /* no payload in message */ + m.u.buffer_from_host.payload_in_message = 0; + + vchiq_use_service(instance->vchiq_instance, instance->service_handle); + + ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, &m, + sizeof(struct mmal_msg_header) + + sizeof(m.u.buffer_from_host)); + if (ret) + atomic_dec(&port->buffers_with_vpu); + + vchiq_release_service(instance->vchiq_instance, instance->service_handle); + + return ret; +} + +/* deals with receipt of buffer to host message */ +static void buffer_to_host_cb(struct vchiq_mmal_instance *instance, + struct mmal_msg *msg, u32 msg_len) +{ + struct mmal_msg_context *msg_context; + u32 handle; + + pr_debug("%s: instance:%p msg:%p msg_len:%d\n", + __func__, instance, msg, msg_len); + + if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) { + handle = msg->u.buffer_from_host.drvbuf.client_context; + msg_context = lookup_msg_context(instance, handle); + + if (!msg_context) { + pr_err("drvbuf.client_context(%u) is invalid\n", + handle); + return; + } + } else { + pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n"); + return; + } + + msg_context->u.bulk.mmal_flags = + msg->u.buffer_from_host.buffer_header.flags; + + if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) { + /* message reception had an error */ + pr_warn("error %d in reply\n", msg->h.status); + + msg_context->u.bulk.status = msg->h.status; + + } else if (msg->u.buffer_from_host.buffer_header.length == 0) { + /* empty buffer */ + if (msg->u.buffer_from_host.buffer_header.flags & + MMAL_BUFFER_HEADER_FLAG_EOS) { + msg_context->u.bulk.status = + bulk_receive(instance, msg, msg_context); + if (msg_context->u.bulk.status == 0) + return; /* successful bulk submission, bulk + * completion will trigger callback + */ + } else { + /* do callback with empty buffer - not EOS though */ + msg_context->u.bulk.status = 0; + msg_context->u.bulk.buffer_used = 0; + } + } else if (msg->u.buffer_from_host.payload_in_message == 0) { + /* data is not in message, queue a bulk receive */ + msg_context->u.bulk.status = + bulk_receive(instance, msg, msg_context); + if (msg_context->u.bulk.status == 0) + return; /* successful bulk submission, bulk + * completion will trigger callback + */ + + /* failed to submit buffer, this will end badly */ + pr_err("error %d on bulk submission\n", + msg_context->u.bulk.status); + + } else if (msg->u.buffer_from_host.payload_in_message <= + MMAL_VC_SHORT_DATA) { + /* data payload within message */ + msg_context->u.bulk.status = inline_receive(instance, msg, + msg_context); + } else { + pr_err("message with invalid short payload\n"); + + /* signal error */ + msg_context->u.bulk.status = -EINVAL; + msg_context->u.bulk.buffer_used = + msg->u.buffer_from_host.payload_in_message; + } + + /* schedule the port callback */ + schedule_work(&msg_context->u.bulk.work); +} + +static void bulk_receive_cb(struct vchiq_mmal_instance *instance, + struct mmal_msg_context *msg_context) +{ + msg_context->u.bulk.status = 0; + + /* schedule the port callback */ + schedule_work(&msg_context->u.bulk.work); +} + +static void bulk_abort_cb(struct vchiq_mmal_instance *instance, + struct mmal_msg_context *msg_context) +{ + pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context); + + msg_context->u.bulk.status = -EINTR; + + schedule_work(&msg_context->u.bulk.work); +} + +/* incoming event service callback */ +static int mmal_service_callback(struct vchiq_instance *vchiq_instance, + enum vchiq_reason reason, struct vchiq_header *header, + unsigned int handle, void *cb_data, + void __user *cb_userdata) +{ + struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(vchiq_instance, handle); + u32 msg_len; + struct mmal_msg *msg; + struct mmal_msg_context *msg_context; + + if (!instance) { + pr_err("Message callback passed NULL instance\n"); + return 0; + } + + switch (reason) { + case VCHIQ_MESSAGE_AVAILABLE: + msg = (void *)header->data; + msg_len = header->size; + + DBG_DUMP_MSG(msg, msg_len, "<<< reply message"); + + /* handling is different for buffer messages */ + switch (msg->h.type) { + case MMAL_MSG_TYPE_BUFFER_FROM_HOST: + vchiq_release_message(vchiq_instance, handle, header); + break; + + case MMAL_MSG_TYPE_EVENT_TO_HOST: + event_to_host_cb(instance, msg, msg_len); + vchiq_release_message(vchiq_instance, handle, header); + + break; + + case MMAL_MSG_TYPE_BUFFER_TO_HOST: + buffer_to_host_cb(instance, msg, msg_len); + vchiq_release_message(vchiq_instance, handle, header); + break; + + default: + /* messages dependent on header context to complete */ + if (!msg->h.context) { + pr_err("received message context was null!\n"); + vchiq_release_message(vchiq_instance, handle, header); + break; + } + + msg_context = lookup_msg_context(instance, + msg->h.context); + if (!msg_context) { + pr_err("received invalid message context %u!\n", + msg->h.context); + vchiq_release_message(vchiq_instance, handle, header); + break; + } + + /* fill in context values */ + msg_context->u.sync.msg_handle = header; + msg_context->u.sync.msg = msg; + msg_context->u.sync.msg_len = msg_len; + + /* todo: should this check (completion_done() + * == 1) for no one waiting? or do we need a + * flag to tell us the completion has been + * interrupted so we can free the message and + * its context. This probably also solves the + * message arriving after interruption todo + * below + */ + + /* complete message so caller knows it happened */ + complete(&msg_context->u.sync.cmplt); + break; + } + + break; + + case VCHIQ_BULK_RECEIVE_DONE: + bulk_receive_cb(instance, cb_data); + break; + + case VCHIQ_BULK_RECEIVE_ABORTED: + bulk_abort_cb(instance, cb_data); + break; + + case VCHIQ_SERVICE_CLOSED: + /* TODO: consider if this requires action if received when + * driver is not explicitly closing the service + */ + break; + + default: + pr_err("Received unhandled message reason %d\n", reason); + break; + } + + return 0; +} + +static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance, + struct mmal_msg *msg, + unsigned int payload_len, + struct mmal_msg **msg_out, + struct vchiq_header **msg_handle) +{ + struct mmal_msg_context *msg_context; + int ret; + unsigned long time_left; + + /* payload size must not cause message to exceed max size */ + if (payload_len > + (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) { + pr_err("payload length %d exceeds max:%d\n", payload_len, + (int)(MMAL_MSG_MAX_SIZE - + sizeof(struct mmal_msg_header))); + return -EINVAL; + } + + msg_context = get_msg_context(instance); + if (IS_ERR(msg_context)) + return PTR_ERR(msg_context); + + init_completion(&msg_context->u.sync.cmplt); + + msg->h.magic = MMAL_MAGIC; + msg->h.context = msg_context->handle; + msg->h.status = 0; + + DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len), + ">>> sync message"); + + vchiq_use_service(instance->vchiq_instance, instance->service_handle); + + ret = vchiq_queue_kernel_message(instance->vchiq_instance, instance->service_handle, msg, + sizeof(struct mmal_msg_header) + + payload_len); + + vchiq_release_service(instance->vchiq_instance, instance->service_handle); + + if (ret) { + pr_err("error %d queuing message\n", ret); + release_msg_context(msg_context); + return ret; + } + + time_left = wait_for_completion_timeout(&msg_context->u.sync.cmplt, + SYNC_MSG_TIMEOUT * HZ); + if (time_left == 0) { + pr_err("timed out waiting for sync completion\n"); + ret = -ETIME; + /* todo: what happens if the message arrives after aborting */ + release_msg_context(msg_context); + return ret; + } + + *msg_out = msg_context->u.sync.msg; + *msg_handle = msg_context->u.sync.msg_handle; + release_msg_context(msg_context); + + return 0; +} + +static void dump_port_info(struct vchiq_mmal_port *port) +{ + pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled); + + pr_debug("buffer minimum num:%d size:%d align:%d\n", + port->minimum_buffer.num, + port->minimum_buffer.size, port->minimum_buffer.alignment); + + pr_debug("buffer recommended num:%d size:%d align:%d\n", + port->recommended_buffer.num, + port->recommended_buffer.size, + port->recommended_buffer.alignment); + + pr_debug("buffer current values num:%d size:%d align:%d\n", + port->current_buffer.num, + port->current_buffer.size, port->current_buffer.alignment); + + pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n", + port->format.type, + port->format.encoding, port->format.encoding_variant); + + pr_debug(" bitrate:%d flags:0x%x\n", + port->format.bitrate, port->format.flags); + + if (port->format.type == MMAL_ES_TYPE_VIDEO) { + pr_debug + ("es video format: width:%d height:%d colourspace:0x%x\n", + port->es.video.width, port->es.video.height, + port->es.video.color_space); + + pr_debug(" : crop xywh %d,%d,%d,%d\n", + port->es.video.crop.x, + port->es.video.crop.y, + port->es.video.crop.width, port->es.video.crop.height); + pr_debug(" : framerate %d/%d aspect %d/%d\n", + port->es.video.frame_rate.numerator, + port->es.video.frame_rate.denominator, + port->es.video.par.numerator, port->es.video.par.denominator); + } +} + +static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p) +{ + /* todo do readonly fields need setting at all? */ + p->type = port->type; + p->index = port->index; + p->index_all = 0; + p->is_enabled = port->enabled; + p->buffer_num_min = port->minimum_buffer.num; + p->buffer_size_min = port->minimum_buffer.size; + p->buffer_alignment_min = port->minimum_buffer.alignment; + p->buffer_num_recommended = port->recommended_buffer.num; + p->buffer_size_recommended = port->recommended_buffer.size; + + /* only three writable fields in a port */ + p->buffer_num = port->current_buffer.num; + p->buffer_size = port->current_buffer.size; + p->userdata = (u32)(unsigned long)port; +} + +static int port_info_set(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port) +{ + int ret; + struct mmal_msg m; + struct mmal_msg *rmsg; + struct vchiq_header *rmsg_handle; + + pr_debug("setting port info port %p\n", port); + if (!port) + return -1; + dump_port_info(port); + + m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET; + + m.u.port_info_set.component_handle = port->component->handle; + m.u.port_info_set.port_type = port->type; + m.u.port_info_set.port_index = port->index; + + port_to_mmal_msg(port, &m.u.port_info_set.port); + + /* elementary stream format setup */ + m.u.port_info_set.format.type = port->format.type; + m.u.port_info_set.format.encoding = port->format.encoding; + m.u.port_info_set.format.encoding_variant = + port->format.encoding_variant; + m.u.port_info_set.format.bitrate = port->format.bitrate; + m.u.port_info_set.format.flags = port->format.flags; + + memcpy(&m.u.port_info_set.es, &port->es, + sizeof(union mmal_es_specific_format)); + + m.u.port_info_set.format.extradata_size = port->format.extradata_size; + memcpy(&m.u.port_info_set.extradata, port->format.extradata, + port->format.extradata_size); + + ret = send_synchronous_mmal_msg(instance, &m, + sizeof(m.u.port_info_set), + &rmsg, &rmsg_handle); + if (ret) + return ret; + + if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) { + /* got an unexpected message type in reply */ + ret = -EINVAL; + goto release_msg; + } + + /* return operation status */ + ret = -rmsg->u.port_info_get_reply.status; + + pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret, + port->component->handle, port->handle); + +release_msg: + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); + + return ret; +} + +/* use port info get message to retrieve port information */ +static int port_info_get(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port) +{ + int ret; + struct mmal_msg m; + struct mmal_msg *rmsg; + struct vchiq_header *rmsg_handle; + + /* port info time */ + m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET; + m.u.port_info_get.component_handle = port->component->handle; + m.u.port_info_get.port_type = port->type; + m.u.port_info_get.index = port->index; + + ret = send_synchronous_mmal_msg(instance, &m, + sizeof(m.u.port_info_get), + &rmsg, &rmsg_handle); + if (ret) + return ret; + + if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) { + /* got an unexpected message type in reply */ + ret = -EINVAL; + goto release_msg; + } + + /* return operation status */ + ret = -rmsg->u.port_info_get_reply.status; + if (ret != MMAL_MSG_STATUS_SUCCESS) + goto release_msg; + + if (rmsg->u.port_info_get_reply.port.is_enabled == 0) + port->enabled = false; + else + port->enabled = true; + + /* copy the values out of the message */ + port->handle = rmsg->u.port_info_get_reply.port_handle; + + /* port type and index cached to use on port info set because + * it does not use a port handle + */ + port->type = rmsg->u.port_info_get_reply.port_type; + port->index = rmsg->u.port_info_get_reply.port_index; + + port->minimum_buffer.num = + rmsg->u.port_info_get_reply.port.buffer_num_min; + port->minimum_buffer.size = + rmsg->u.port_info_get_reply.port.buffer_size_min; + port->minimum_buffer.alignment = + rmsg->u.port_info_get_reply.port.buffer_alignment_min; + + port->recommended_buffer.alignment = + rmsg->u.port_info_get_reply.port.buffer_alignment_min; + port->recommended_buffer.num = + rmsg->u.port_info_get_reply.port.buffer_num_recommended; + + port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num; + port->current_buffer.size = + rmsg->u.port_info_get_reply.port.buffer_size; + + /* stream format */ + port->format.type = rmsg->u.port_info_get_reply.format.type; + port->format.encoding = rmsg->u.port_info_get_reply.format.encoding; + port->format.encoding_variant = + rmsg->u.port_info_get_reply.format.encoding_variant; + port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate; + port->format.flags = rmsg->u.port_info_get_reply.format.flags; + + /* elementary stream format */ + memcpy(&port->es, + &rmsg->u.port_info_get_reply.es, + sizeof(union mmal_es_specific_format)); + port->format.es = &port->es; + + port->format.extradata_size = + rmsg->u.port_info_get_reply.format.extradata_size; + memcpy(port->format.extradata, + rmsg->u.port_info_get_reply.extradata, + port->format.extradata_size); + + pr_debug("received port info\n"); + dump_port_info(port); + +release_msg: + + pr_debug("%s:result:%d component:0x%x port:%d\n", + __func__, ret, port->component->handle, port->handle); + + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); + + return ret; +} + +/* create component on vc */ +static int create_component(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_component *component, + const char *name) +{ + int ret; + struct mmal_msg m; + struct mmal_msg *rmsg; + struct vchiq_header *rmsg_handle; + + /* build component create message */ + m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE; + m.u.component_create.client_component = component->client_component; + strscpy_pad(m.u.component_create.name, name, + sizeof(m.u.component_create.name)); + m.u.component_create.pid = 0; + + ret = send_synchronous_mmal_msg(instance, &m, + sizeof(m.u.component_create), + &rmsg, &rmsg_handle); + if (ret) + return ret; + + if (rmsg->h.type != m.h.type) { + /* got an unexpected message type in reply */ + ret = -EINVAL; + goto release_msg; + } + + ret = -rmsg->u.component_create_reply.status; + if (ret != MMAL_MSG_STATUS_SUCCESS) + goto release_msg; + + /* a valid component response received */ + component->handle = rmsg->u.component_create_reply.component_handle; + component->inputs = rmsg->u.component_create_reply.input_num; + component->outputs = rmsg->u.component_create_reply.output_num; + component->clocks = rmsg->u.component_create_reply.clock_num; + + pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n", + component->handle, + component->inputs, component->outputs, component->clocks); + +release_msg: + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); + + return ret; +} + +/* destroys a component on vc */ +static int destroy_component(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_component *component) +{ + int ret; + struct mmal_msg m; + struct mmal_msg *rmsg; + struct vchiq_header *rmsg_handle; + + m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY; + m.u.component_destroy.component_handle = component->handle; + + ret = send_synchronous_mmal_msg(instance, &m, + sizeof(m.u.component_destroy), + &rmsg, &rmsg_handle); + if (ret) + return ret; + + if (rmsg->h.type != m.h.type) { + /* got an unexpected message type in reply */ + ret = -EINVAL; + goto release_msg; + } + + ret = -rmsg->u.component_destroy_reply.status; + +release_msg: + + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); + + return ret; +} + +/* enable a component on vc */ +static int enable_component(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_component *component) +{ + int ret; + struct mmal_msg m; + struct mmal_msg *rmsg; + struct vchiq_header *rmsg_handle; + + m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE; + m.u.component_enable.component_handle = component->handle; + + ret = send_synchronous_mmal_msg(instance, &m, + sizeof(m.u.component_enable), + &rmsg, &rmsg_handle); + if (ret) + return ret; + + if (rmsg->h.type != m.h.type) { + /* got an unexpected message type in reply */ + ret = -EINVAL; + goto release_msg; + } + + ret = -rmsg->u.component_enable_reply.status; + +release_msg: + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); + + return ret; +} + +/* disable a component on vc */ +static int disable_component(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_component *component) +{ + int ret; + struct mmal_msg m; + struct mmal_msg *rmsg; + struct vchiq_header *rmsg_handle; + + m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE; + m.u.component_disable.component_handle = component->handle; + + ret = send_synchronous_mmal_msg(instance, &m, + sizeof(m.u.component_disable), + &rmsg, &rmsg_handle); + if (ret) + return ret; + + if (rmsg->h.type != m.h.type) { + /* got an unexpected message type in reply */ + ret = -EINVAL; + goto release_msg; + } + + ret = -rmsg->u.component_disable_reply.status; + +release_msg: + + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); + + return ret; +} + +/* get version of mmal implementation */ +static int get_version(struct vchiq_mmal_instance *instance, + u32 *major_out, u32 *minor_out) +{ + int ret; + struct mmal_msg m; + struct mmal_msg *rmsg; + struct vchiq_header *rmsg_handle; + + m.h.type = MMAL_MSG_TYPE_GET_VERSION; + + ret = send_synchronous_mmal_msg(instance, &m, + sizeof(m.u.version), + &rmsg, &rmsg_handle); + if (ret) + return ret; + + if (rmsg->h.type != m.h.type) { + /* got an unexpected message type in reply */ + ret = -EINVAL; + goto release_msg; + } + + *major_out = rmsg->u.version.major; + *minor_out = rmsg->u.version.minor; + +release_msg: + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); + + return ret; +} + +/* do a port action with a port as a parameter */ +static int port_action_port(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, + enum mmal_msg_port_action_type action_type) +{ + int ret; + struct mmal_msg m; + struct mmal_msg *rmsg; + struct vchiq_header *rmsg_handle; + + m.h.type = MMAL_MSG_TYPE_PORT_ACTION; + m.u.port_action_port.component_handle = port->component->handle; + m.u.port_action_port.port_handle = port->handle; + m.u.port_action_port.action = action_type; + + port_to_mmal_msg(port, &m.u.port_action_port.port); + + ret = send_synchronous_mmal_msg(instance, &m, + sizeof(m.u.port_action_port), + &rmsg, &rmsg_handle); + if (ret) + return ret; + + if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) { + /* got an unexpected message type in reply */ + ret = -EINVAL; + goto release_msg; + } + + ret = -rmsg->u.port_action_reply.status; + + pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n", + __func__, + ret, port->component->handle, port->handle, + port_action_type_names[action_type], action_type); + +release_msg: + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); + + return ret; +} + +/* do a port action with handles as parameters */ +static int port_action_handle(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, + enum mmal_msg_port_action_type action_type, + u32 connect_component_handle, + u32 connect_port_handle) +{ + int ret; + struct mmal_msg m; + struct mmal_msg *rmsg; + struct vchiq_header *rmsg_handle; + + m.h.type = MMAL_MSG_TYPE_PORT_ACTION; + + m.u.port_action_handle.component_handle = port->component->handle; + m.u.port_action_handle.port_handle = port->handle; + m.u.port_action_handle.action = action_type; + + m.u.port_action_handle.connect_component_handle = + connect_component_handle; + m.u.port_action_handle.connect_port_handle = connect_port_handle; + + ret = send_synchronous_mmal_msg(instance, &m, + sizeof(m.u.port_action_handle), + &rmsg, &rmsg_handle); + if (ret) + return ret; + + if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) { + /* got an unexpected message type in reply */ + ret = -EINVAL; + goto release_msg; + } + + ret = -rmsg->u.port_action_reply.status; + + pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n", + __func__, + ret, port->component->handle, port->handle, + port_action_type_names[action_type], + action_type, connect_component_handle, connect_port_handle); + +release_msg: + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); + + return ret; +} + +static int port_parameter_set(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, + u32 parameter_id, void *value, u32 value_size) +{ + int ret; + struct mmal_msg m; + struct mmal_msg *rmsg; + struct vchiq_header *rmsg_handle; + + m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET; + + m.u.port_parameter_set.component_handle = port->component->handle; + m.u.port_parameter_set.port_handle = port->handle; + m.u.port_parameter_set.id = parameter_id; + m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size; + memcpy(&m.u.port_parameter_set.value, value, value_size); + + ret = send_synchronous_mmal_msg(instance, &m, + (4 * sizeof(u32)) + value_size, + &rmsg, &rmsg_handle); + if (ret) + return ret; + + if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) { + /* got an unexpected message type in reply */ + ret = -EINVAL; + goto release_msg; + } + + ret = -rmsg->u.port_parameter_set_reply.status; + + pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", + __func__, + ret, port->component->handle, port->handle, parameter_id); + +release_msg: + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); + + return ret; +} + +static int port_parameter_get(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, + u32 parameter_id, void *value, u32 *value_size) +{ + int ret; + struct mmal_msg m; + struct mmal_msg *rmsg; + struct vchiq_header *rmsg_handle; + + m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET; + + m.u.port_parameter_get.component_handle = port->component->handle; + m.u.port_parameter_get.port_handle = port->handle; + m.u.port_parameter_get.id = parameter_id; + m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size; + + ret = send_synchronous_mmal_msg(instance, &m, + sizeof(struct + mmal_msg_port_parameter_get), + &rmsg, &rmsg_handle); + if (ret) + return ret; + + if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) { + /* got an unexpected message type in reply */ + pr_err("Incorrect reply type %d\n", rmsg->h.type); + ret = -EINVAL; + goto release_msg; + } + + ret = rmsg->u.port_parameter_get_reply.status; + + /* port_parameter_get_reply.size includes the header, + * whilst *value_size doesn't. + */ + rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32)); + + if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) { + /* Copy only as much as we have space for + * but report true size of parameter + */ + memcpy(value, &rmsg->u.port_parameter_get_reply.value, + *value_size); + } else { + memcpy(value, &rmsg->u.port_parameter_get_reply.value, + rmsg->u.port_parameter_get_reply.size); + } + /* Always report the size of the returned parameter to the caller */ + *value_size = rmsg->u.port_parameter_get_reply.size; + + pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__, + ret, port->component->handle, port->handle, parameter_id); + +release_msg: + vchiq_release_message(instance->vchiq_instance, instance->service_handle, rmsg_handle); + + return ret; +} + +/* disables a port and drains buffers from it */ +static int port_disable(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port) +{ + int ret; + struct list_head *q, *buf_head; + unsigned long flags = 0; + + if (!port->enabled) + return 0; + + port->enabled = false; + + ret = port_action_port(instance, port, + MMAL_MSG_PORT_ACTION_TYPE_DISABLE); + if (ret == 0) { + /* + * Drain all queued buffers on port. This should only + * apply to buffers that have been queued before the port + * has been enabled. If the port has been enabled and buffers + * passed, then the buffers should have been removed from this + * list, and we should get the relevant callbacks via VCHIQ + * to release the buffers. + */ + spin_lock_irqsave(&port->slock, flags); + + list_for_each_safe(buf_head, q, &port->buffers) { + struct mmal_buffer *mmalbuf; + + mmalbuf = list_entry(buf_head, struct mmal_buffer, + list); + list_del(buf_head); + if (port->buffer_cb) { + mmalbuf->length = 0; + mmalbuf->mmal_flags = 0; + mmalbuf->dts = MMAL_TIME_UNKNOWN; + mmalbuf->pts = MMAL_TIME_UNKNOWN; + port->buffer_cb(instance, + port, 0, mmalbuf); + } + } + + spin_unlock_irqrestore(&port->slock, flags); + + ret = port_info_get(instance, port); + } + + return ret; +} + +/* enable a port */ +static int port_enable(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port) +{ + unsigned int hdr_count; + struct list_head *q, *buf_head; + int ret; + + if (port->enabled) + return 0; + + ret = port_action_port(instance, port, + MMAL_MSG_PORT_ACTION_TYPE_ENABLE); + if (ret) + goto done; + + port->enabled = true; + + if (port->buffer_cb) { + /* send buffer headers to videocore */ + hdr_count = 1; + list_for_each_safe(buf_head, q, &port->buffers) { + struct mmal_buffer *mmalbuf; + + mmalbuf = list_entry(buf_head, struct mmal_buffer, + list); + ret = buffer_from_host(instance, port, mmalbuf); + if (ret) + goto done; + + list_del(buf_head); + hdr_count++; + if (hdr_count > port->current_buffer.num) + break; + } + } + + ret = port_info_get(instance, port); + +done: + return ret; +} + +/* ------------------------------------------------------------------ + * Exported API + *------------------------------------------------------------------ + */ + +int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port) +{ + int ret; + + if (mutex_lock_interruptible(&instance->vchiq_mutex)) + return -EINTR; + + ret = port_info_set(instance, port); + if (ret) + goto release_unlock; + + /* read what has actually been set */ + ret = port_info_get(instance, port); + +release_unlock: + mutex_unlock(&instance->vchiq_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format); + +int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, + u32 parameter, void *value, u32 value_size) +{ + int ret; + + if (mutex_lock_interruptible(&instance->vchiq_mutex)) + return -EINTR; + + ret = port_parameter_set(instance, port, parameter, value, value_size); + + mutex_unlock(&instance->vchiq_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set); + +int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, + u32 parameter, void *value, u32 *value_size) +{ + int ret; + + if (mutex_lock_interruptible(&instance->vchiq_mutex)) + return -EINTR; + + ret = port_parameter_get(instance, port, parameter, value, value_size); + + mutex_unlock(&instance->vchiq_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get); + +/* enable a port + * + * enables a port and queues buffers for satisfying callbacks if we + * provide a callback handler + */ +int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, + vchiq_mmal_buffer_cb buffer_cb) +{ + int ret; + + if (mutex_lock_interruptible(&instance->vchiq_mutex)) + return -EINTR; + + /* already enabled - noop */ + if (port->enabled) { + ret = 0; + goto unlock; + } + + port->buffer_cb = buffer_cb; + + ret = port_enable(instance, port); + +unlock: + mutex_unlock(&instance->vchiq_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable); + +int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port) +{ + int ret; + + if (mutex_lock_interruptible(&instance->vchiq_mutex)) + return -EINTR; + + if (!port->enabled) { + mutex_unlock(&instance->vchiq_mutex); + return 0; + } + + ret = port_disable(instance, port); + + mutex_unlock(&instance->vchiq_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable); + +/* ports will be connected in a tunneled manner so data buffers + * are not handled by client. + */ +int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *src, + struct vchiq_mmal_port *dst) +{ + int ret; + + if (mutex_lock_interruptible(&instance->vchiq_mutex)) + return -EINTR; + + /* disconnect ports if connected */ + if (src->connected) { + ret = port_disable(instance, src); + if (ret) { + pr_err("failed disabling src port(%d)\n", ret); + goto release_unlock; + } + + /* do not need to disable the destination port as they + * are connected and it is done automatically + */ + + ret = port_action_handle(instance, src, + MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT, + src->connected->component->handle, + src->connected->handle); + if (ret < 0) { + pr_err("failed disconnecting src port\n"); + goto release_unlock; + } + src->connected->enabled = false; + src->connected = NULL; + } + + if (!dst) { + /* do not make new connection */ + ret = 0; + pr_debug("not making new connection\n"); + goto release_unlock; + } + + /* copy src port format to dst */ + dst->format.encoding = src->format.encoding; + dst->es.video.width = src->es.video.width; + dst->es.video.height = src->es.video.height; + dst->es.video.crop.x = src->es.video.crop.x; + dst->es.video.crop.y = src->es.video.crop.y; + dst->es.video.crop.width = src->es.video.crop.width; + dst->es.video.crop.height = src->es.video.crop.height; + dst->es.video.frame_rate.numerator = src->es.video.frame_rate.numerator; + dst->es.video.frame_rate.denominator = src->es.video.frame_rate.denominator; + + /* set new format */ + ret = port_info_set(instance, dst); + if (ret) { + pr_debug("setting port info failed\n"); + goto release_unlock; + } + + /* read what has actually been set */ + ret = port_info_get(instance, dst); + if (ret) { + pr_debug("read back port info failed\n"); + goto release_unlock; + } + + /* connect two ports together */ + ret = port_action_handle(instance, src, + MMAL_MSG_PORT_ACTION_TYPE_CONNECT, + dst->component->handle, dst->handle); + if (ret < 0) { + pr_debug("connecting port %d:%d to %d:%d failed\n", + src->component->handle, src->handle, + dst->component->handle, dst->handle); + goto release_unlock; + } + src->connected = dst; + +release_unlock: + + mutex_unlock(&instance->vchiq_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel); + +int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, + struct mmal_buffer *buffer) +{ + unsigned long flags = 0; + int ret; + + ret = buffer_from_host(instance, port, buffer); + if (ret == -EINVAL) { + /* Port is disabled. Queue for when it is enabled. */ + spin_lock_irqsave(&port->slock, flags); + list_add_tail(&buffer->list, &port->buffers); + spin_unlock_irqrestore(&port->slock, flags); + } + + return 0; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer); + +int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance, + struct mmal_buffer *buf) +{ + struct mmal_msg_context *msg_context = get_msg_context(instance); + + if (IS_ERR(msg_context)) + return (PTR_ERR(msg_context)); + + buf->msg_context = msg_context; + return 0; +} +EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init); + +int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf) +{ + struct mmal_msg_context *msg_context = buf->msg_context; + + if (msg_context) + release_msg_context(msg_context); + buf->msg_context = NULL; + + return 0; +} +EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup); + +/* Initialise a mmal component and its ports + * + */ +int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance, + const char *name, + struct vchiq_mmal_component **component_out) +{ + int ret; + int idx; /* port index */ + struct vchiq_mmal_component *component = NULL; + + if (mutex_lock_interruptible(&instance->vchiq_mutex)) + return -EINTR; + + for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) { + if (!instance->component[idx].in_use) { + component = &instance->component[idx]; + component->in_use = true; + break; + } + } + + if (!component) { + ret = -EINVAL; /* todo is this correct error? */ + goto unlock; + } + + /* We need a handle to reference back to our component structure. + * Use the array index in instance->component rather than rolling + * another IDR. + */ + component->client_component = idx; + + ret = create_component(instance, component, name); + if (ret < 0) { + pr_err("%s: failed to create component %d (Not enough GPU mem?)\n", + __func__, ret); + goto unlock; + } + + /* ports info needs gathering */ + component->control.type = MMAL_PORT_TYPE_CONTROL; + component->control.index = 0; + component->control.component = component; + spin_lock_init(&component->control.slock); + INIT_LIST_HEAD(&component->control.buffers); + ret = port_info_get(instance, &component->control); + if (ret < 0) + goto release_component; + + for (idx = 0; idx < component->inputs; idx++) { + component->input[idx].type = MMAL_PORT_TYPE_INPUT; + component->input[idx].index = idx; + component->input[idx].component = component; + spin_lock_init(&component->input[idx].slock); + INIT_LIST_HEAD(&component->input[idx].buffers); + ret = port_info_get(instance, &component->input[idx]); + if (ret < 0) + goto release_component; + } + + for (idx = 0; idx < component->outputs; idx++) { + component->output[idx].type = MMAL_PORT_TYPE_OUTPUT; + component->output[idx].index = idx; + component->output[idx].component = component; + spin_lock_init(&component->output[idx].slock); + INIT_LIST_HEAD(&component->output[idx].buffers); + ret = port_info_get(instance, &component->output[idx]); + if (ret < 0) + goto release_component; + } + + for (idx = 0; idx < component->clocks; idx++) { + component->clock[idx].type = MMAL_PORT_TYPE_CLOCK; + component->clock[idx].index = idx; + component->clock[idx].component = component; + spin_lock_init(&component->clock[idx].slock); + INIT_LIST_HEAD(&component->clock[idx].buffers); + ret = port_info_get(instance, &component->clock[idx]); + if (ret < 0) + goto release_component; + } + + *component_out = component; + + mutex_unlock(&instance->vchiq_mutex); + + return 0; + +release_component: + destroy_component(instance, component); +unlock: + if (component) + component->in_use = false; + mutex_unlock(&instance->vchiq_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_component_init); + +/* + * cause a mmal component to be destroyed + */ +int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_component *component) +{ + int ret; + + if (mutex_lock_interruptible(&instance->vchiq_mutex)) + return -EINTR; + + if (component->enabled) + ret = disable_component(instance, component); + + ret = destroy_component(instance, component); + + component->in_use = false; + + mutex_unlock(&instance->vchiq_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise); + +/* + * cause a mmal component to be enabled + */ +int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_component *component) +{ + int ret; + + if (mutex_lock_interruptible(&instance->vchiq_mutex)) + return -EINTR; + + if (component->enabled) { + mutex_unlock(&instance->vchiq_mutex); + return 0; + } + + ret = enable_component(instance, component); + if (ret == 0) + component->enabled = true; + + mutex_unlock(&instance->vchiq_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable); + +/* + * cause a mmal component to be enabled + */ +int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_component *component) +{ + int ret; + + if (mutex_lock_interruptible(&instance->vchiq_mutex)) + return -EINTR; + + if (!component->enabled) { + mutex_unlock(&instance->vchiq_mutex); + return 0; + } + + ret = disable_component(instance, component); + if (ret == 0) + component->enabled = false; + + mutex_unlock(&instance->vchiq_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable); + +int vchiq_mmal_version(struct vchiq_mmal_instance *instance, + u32 *major_out, u32 *minor_out) +{ + int ret; + + if (mutex_lock_interruptible(&instance->vchiq_mutex)) + return -EINTR; + + ret = get_version(instance, major_out, minor_out); + + mutex_unlock(&instance->vchiq_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_version); + +int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance) +{ + int status = 0; + + if (!instance) + return -EINVAL; + + if (mutex_lock_interruptible(&instance->vchiq_mutex)) + return -EINTR; + + vchiq_use_service(instance->vchiq_instance, instance->service_handle); + + status = vchiq_close_service(instance->vchiq_instance, instance->service_handle); + if (status != 0) + pr_err("mmal-vchiq: VCHIQ close failed\n"); + + mutex_unlock(&instance->vchiq_mutex); + + vchiq_shutdown(instance->vchiq_instance); + destroy_workqueue(instance->bulk_wq); + + idr_destroy(&instance->context_map); + + kfree(instance); + + return status; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_finalise); + +int vchiq_mmal_init(struct device *dev, struct vchiq_mmal_instance **out_instance) +{ + int status; + int err = -ENODEV; + struct vchiq_mmal_instance *instance; + struct vchiq_instance *vchiq_instance; + struct vchiq_service_params_kernel params = { + .version = VC_MMAL_VER, + .version_min = VC_MMAL_MIN_VER, + .fourcc = VCHIQ_MAKE_FOURCC('m', 'm', 'a', 'l'), + .callback = mmal_service_callback, + .userdata = NULL, + }; + struct vchiq_drv_mgmt *mgmt = dev_get_drvdata(dev->parent); + + /* compile time checks to ensure structure size as they are + * directly (de)serialised from memory. + */ + + /* ensure the header structure has packed to the correct size */ + BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24); + + /* ensure message structure does not exceed maximum length */ + BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE); + + /* mmal port struct is correct size */ + BUILD_BUG_ON(sizeof(struct mmal_port) != 64); + + /* create a vchi instance */ + status = vchiq_initialise(&mgmt->state, &vchiq_instance); + if (status) { + pr_err("Failed to initialise VCHI instance (status=%d)\n", + status); + return -EIO; + } + + status = vchiq_connect(vchiq_instance); + if (status) { + pr_err("Failed to connect VCHI instance (status=%d)\n", status); + err = -EIO; + goto err_shutdown_vchiq; + } + + instance = kzalloc(sizeof(*instance), GFP_KERNEL); + + if (!instance) { + err = -ENOMEM; + goto err_shutdown_vchiq; + } + + mutex_init(&instance->vchiq_mutex); + + instance->vchiq_instance = vchiq_instance; + + mutex_init(&instance->context_map_lock); + idr_init_base(&instance->context_map, 1); + + params.userdata = instance; + + instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq", + WQ_MEM_RECLAIM); + if (!instance->bulk_wq) + goto err_free; + + status = vchiq_open_service(vchiq_instance, ¶ms, + &instance->service_handle); + if (status) { + pr_err("Failed to open VCHI service connection (status=%d)\n", + status); + goto err_close_services; + } + + vchiq_release_service(instance->vchiq_instance, instance->service_handle); + + *out_instance = instance; + + return 0; + +err_close_services: + vchiq_close_service(instance->vchiq_instance, instance->service_handle); + destroy_workqueue(instance->bulk_wq); +err_free: + kfree(instance); +err_shutdown_vchiq: + vchiq_shutdown(vchiq_instance); + return err; +} +EXPORT_SYMBOL_GPL(vchiq_mmal_init); + +MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface"); +MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.h b/drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.h new file mode 100644 index 000000000000..8c3959f6f97f --- /dev/null +++ b/drivers/platform/raspberrypi/vchiq-mmal/mmal-vchiq.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Broadcom BCM2835 V4L2 driver + * + * Copyright © 2013 Raspberry Pi (Trading) Ltd. + * + * Authors: Vincent Sanders @ Collabora + * Dave Stevenson @ Broadcom + * (now dave.stevenson@raspberrypi.org) + * Simon Mellor @ Broadcom + * Luke Diamand @ Broadcom + * + * MMAL interface to VCHIQ message passing + */ + +#ifndef MMAL_VCHIQ_H +#define MMAL_VCHIQ_H + +#include "mmal-common.h" +#include "mmal-msg-format.h" + +#define MAX_PORT_COUNT 4 + +/* Maximum size of the format extradata. */ +#define MMAL_FORMAT_EXTRADATA_MAX_SIZE 128 + +struct vchiq_mmal_instance; +struct device; + +enum vchiq_mmal_es_type { + MMAL_ES_TYPE_UNKNOWN, /**< Unknown elementary stream type */ + MMAL_ES_TYPE_CONTROL, /**< Elementary stream of control commands */ + MMAL_ES_TYPE_AUDIO, /**< Audio elementary stream */ + MMAL_ES_TYPE_VIDEO, /**< Video elementary stream */ + MMAL_ES_TYPE_SUBPICTURE /**< Sub-picture elementary stream */ +}; + +struct vchiq_mmal_port_buffer { + unsigned int num; /* number of buffers */ + u32 size; /* size of buffers */ + u32 alignment; /* alignment of buffers */ +}; + +struct vchiq_mmal_port; + +typedef void (*vchiq_mmal_buffer_cb)(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, + int status, struct mmal_buffer *buffer); + +struct vchiq_mmal_port { + bool enabled; + u32 handle; + u32 type; /* port type, cached to use on port info set */ + u32 index; /* port index, cached to use on port info set */ + + /* component port belongs to, allows simple deref */ + struct vchiq_mmal_component *component; + + struct vchiq_mmal_port *connected; /* port connected to */ + + /* buffer info */ + struct vchiq_mmal_port_buffer minimum_buffer; + struct vchiq_mmal_port_buffer recommended_buffer; + struct vchiq_mmal_port_buffer current_buffer; + + /* stream format */ + struct mmal_es_format_local format; + /* elementary stream format */ + union mmal_es_specific_format es; + + /* data buffers to fill */ + struct list_head buffers; + /* lock to serialise adding and removing buffers from list */ + spinlock_t slock; + + /* Count of buffers the VPU has yet to return */ + atomic_t buffers_with_vpu; + /* callback on buffer completion */ + vchiq_mmal_buffer_cb buffer_cb; + /* callback context */ + void *cb_ctx; +}; + +struct vchiq_mmal_component { + bool in_use; + bool enabled; + u32 handle; /* VideoCore handle for component */ + u32 inputs; /* Number of input ports */ + u32 outputs; /* Number of output ports */ + u32 clocks; /* Number of clock ports */ + struct vchiq_mmal_port control; /* control port */ + struct vchiq_mmal_port input[MAX_PORT_COUNT]; /* input ports */ + struct vchiq_mmal_port output[MAX_PORT_COUNT]; /* output ports */ + struct vchiq_mmal_port clock[MAX_PORT_COUNT]; /* clock ports */ + u32 client_component; /* Used to ref back to client struct */ +}; + +int vchiq_mmal_init(struct device *dev, struct vchiq_mmal_instance **out_instance); +int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance); + +/* Initialise a mmal component and its ports + * + */ +int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance, + const char *name, struct vchiq_mmal_component **component_out); + +int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_component *component); + +int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_component *component); + +int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_component *component); + +/* enable a mmal port + * + * enables a port and, if a buffer callback provided, enqueues buffer + * headers as appropriate for the port. + */ +int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, + vchiq_mmal_buffer_cb buffer_cb); + +/* disable a port + * + * disable a port will dequeue any pending buffers + */ +int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port); + +int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, + u32 parameter, + void *value, + u32 value_size); + +int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, + u32 parameter, + void *value, + u32 *value_size); + +int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port); + +int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *src, + struct vchiq_mmal_port *dst); + +int vchiq_mmal_version(struct vchiq_mmal_instance *instance, + u32 *major_out, + u32 *minor_out); + +int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance, + struct vchiq_mmal_port *port, + struct mmal_buffer *buf); + +int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance, + struct mmal_buffer *buf); +int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf); +#endif /* MMAL_VCHIQ_H */ diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c index a594d5fcfcfd..78ac3a8fbb73 100644 --- a/drivers/platform/surface/surface_aggregator_registry.c +++ b/drivers/platform/surface/surface_aggregator_registry.c @@ -491,24 +491,13 @@ static const struct of_device_id ssam_platform_hub_of_match[] __maybe_unused = { static int ssam_platform_hub_probe(struct platform_device *pdev) { const struct software_node **nodes; - const struct of_device_id *match; - struct device_node *fdt_root; struct ssam_controller *ctrl; struct fwnode_handle *root; int status; nodes = (const struct software_node **)acpi_device_get_match_data(&pdev->dev); if (!nodes) { - fdt_root = of_find_node_by_path("/"); - if (!fdt_root) - return -ENODEV; - - match = of_match_node(ssam_platform_hub_of_match, fdt_root); - of_node_put(fdt_root); - if (!match) - return -ENODEV; - - nodes = (const struct software_node **)match->data; + nodes = (const struct software_node **)of_machine_get_match_data(ssam_platform_hub_of_match); if (!nodes) return -ENODEV; } diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig index 19a2246f2770..2900407d6095 100644 --- a/drivers/platform/x86/intel/Kconfig +++ b/drivers/platform/x86/intel/Kconfig @@ -41,6 +41,19 @@ config INTEL_VBTN To compile this driver as a module, choose M here: the module will be called intel_vbtn. +config INTEL_EHL_PSE_IO + tristate "Intel Elkhart Lake PSE I/O driver" + depends on PCI + select AUXILIARY_BUS + help + Select this option to enable Intel Elkhart Lake PSE GPIO and Timed + I/O support. This driver enumerates the PCI parent device and + creates auxiliary child devices for these capabilities. The actual + functionalities are provided by their respective auxiliary drivers. + + To compile this driver as a module, choose M here: the module will + be called intel_ehl_pse_io. + config INTEL_INT0002_VGPIO tristate "Intel ACPI INT0002 Virtual GPIO driver" depends on GPIOLIB && ACPI && PM_SLEEP diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile index 78acb414e154..138b13756158 100644 --- a/drivers/platform/x86/intel/Makefile +++ b/drivers/platform/x86/intel/Makefile @@ -21,6 +21,7 @@ intel-target-$(CONFIG_INTEL_HID_EVENT) += hid.o intel-target-$(CONFIG_INTEL_VBTN) += vbtn.o # Intel miscellaneous drivers +intel-target-$(CONFIG_INTEL_EHL_PSE_IO) += ehl_pse_io.o intel-target-$(CONFIG_INTEL_INT0002_VGPIO) += int0002_vgpio.o intel-target-$(CONFIG_INTEL_ISHTP_ECLITE) += ishtp_eclite.o intel-target-$(CONFIG_INTEL_OAKTRAIL) += oaktrail.o diff --git a/drivers/platform/x86/intel/chtwc_int33fe.c b/drivers/platform/x86/intel/chtwc_int33fe.c index 29e8b5432f4c..d183aa53c318 100644 --- a/drivers/platform/x86/intel/chtwc_int33fe.c +++ b/drivers/platform/x86/intel/chtwc_int33fe.c @@ -77,7 +77,7 @@ static const struct software_node max17047_node = { * software node. */ static struct software_node_ref_args fusb302_mux_refs[] = { - { .node = NULL }, + SOFTWARE_NODE_REFERENCE(NULL), }; static const struct property_entry fusb302_properties[] = { @@ -190,11 +190,6 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data) { software_node_unregister_node_group(node_group); - if (fusb302_mux_refs[0].node) { - fwnode_handle_put(software_node_fwnode(fusb302_mux_refs[0].node)); - fusb302_mux_refs[0].node = NULL; - } - if (data->dp) { data->dp->secondary = NULL; fwnode_handle_put(data->dp); @@ -202,7 +197,15 @@ static void cht_int33fe_remove_nodes(struct cht_int33fe_data *data) } } -static int cht_int33fe_add_nodes(struct cht_int33fe_data *data) +static void cht_int33fe_put_swnode(void *data) +{ + struct fwnode_handle *fwnode = data; + + fwnode_handle_put(fwnode); + fusb302_mux_refs[0] = SOFTWARE_NODE_REFERENCE(NULL); +} + +static int cht_int33fe_add_nodes(struct device *dev, struct cht_int33fe_data *data) { const struct software_node *mux_ref_node; int ret; @@ -212,17 +215,25 @@ static int cht_int33fe_add_nodes(struct cht_int33fe_data *data) * until the mux driver has created software node for the mux device. * It means we depend on the mux driver. This function will return * -EPROBE_DEFER until the mux device is registered. + * + * FIXME: the relevant software node exists in intel-xhci-usb-role-switch + * and - if exported - could be used to set up a static reference. */ mux_ref_node = software_node_find_by_name(NULL, "intel-xhci-usb-sw"); if (!mux_ref_node) return -EPROBE_DEFER; + ret = devm_add_action_or_reset(dev, cht_int33fe_put_swnode, + software_node_fwnode(mux_ref_node)); + if (ret) + return ret; + /* * Update node used in "usb-role-switch" property. Note that we * rely on software_node_register_node_group() to use the original * instance of properties instead of copying them. */ - fusb302_mux_refs[0].node = mux_ref_node; + fusb302_mux_refs[0] = SOFTWARE_NODE_REFERENCE(mux_ref_node); ret = software_node_register_node_group(node_group); if (ret) @@ -345,7 +356,7 @@ static int cht_int33fe_typec_probe(struct platform_device *pdev) return fusb302_irq; } - ret = cht_int33fe_add_nodes(data); + ret = cht_int33fe_add_nodes(dev, data); if (ret) return ret; diff --git a/drivers/platform/x86/intel/ehl_pse_io.c b/drivers/platform/x86/intel/ehl_pse_io.c new file mode 100644 index 000000000000..861e14808b35 --- /dev/null +++ b/drivers/platform/x86/intel/ehl_pse_io.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel Elkhart Lake Programmable Service Engine (PSE) I/O + * + * Copyright (c) 2025 Intel Corporation. + * + * Author: Raag Jadav <raag.jadav@intel.com> + */ + +#include <linux/auxiliary_bus.h> +#include <linux/device/devres.h> +#include <linux/errno.h> +#include <linux/gfp_types.h> +#include <linux/ioport.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/sizes.h> +#include <linux/types.h> + +#include <linux/ehl_pse_io_aux.h> + +#define EHL_PSE_IO_DEV_SIZE SZ_4K + +static int ehl_pse_io_dev_create(struct pci_dev *pci, const char *name, int idx) +{ + struct device *dev = &pci->dev; + struct auxiliary_device *adev; + struct ehl_pse_io_data *data; + resource_size_t start, offset; + u32 id; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + id = (pci_domain_nr(pci->bus) << 16) | pci_dev_id(pci); + start = pci_resource_start(pci, 0); + offset = EHL_PSE_IO_DEV_SIZE * idx; + + data->mem = DEFINE_RES_MEM(start + offset, EHL_PSE_IO_DEV_SIZE); + data->irq = pci_irq_vector(pci, idx); + + adev = __devm_auxiliary_device_create(dev, EHL_PSE_IO_NAME, name, data, id); + + return adev ? 0 : -ENODEV; +} + +static int ehl_pse_io_probe(struct pci_dev *pci, const struct pci_device_id *id) +{ + int ret; + + ret = pcim_enable_device(pci); + if (ret) + return ret; + + pci_set_master(pci); + + ret = pci_alloc_irq_vectors(pci, 2, 2, PCI_IRQ_MSI); + if (ret < 0) + return ret; + + ret = ehl_pse_io_dev_create(pci, EHL_PSE_GPIO_NAME, 0); + if (ret) + return ret; + + return ehl_pse_io_dev_create(pci, EHL_PSE_TIO_NAME, 1); +} + +static const struct pci_device_id ehl_pse_io_ids[] = { + { PCI_VDEVICE(INTEL, 0x4b88) }, + { PCI_VDEVICE(INTEL, 0x4b89) }, + { } +}; +MODULE_DEVICE_TABLE(pci, ehl_pse_io_ids); + +static struct pci_driver ehl_pse_io_driver = { + .name = EHL_PSE_IO_NAME, + .id_table = ehl_pse_io_ids, + .probe = ehl_pse_io_probe, +}; +module_pci_driver(ehl_pse_io_driver); + +MODULE_AUTHOR("Raag Jadav <raag.jadav@intel.com>"); +MODULE_DESCRIPTION("Intel Elkhart Lake PSE I/O driver"); +MODULE_LICENSE("GPL"); |
