From bf73fc0fa9cf78e37d6ee99e8d12bfa2083594d6 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Wed, 3 Jul 2019 15:37:02 +0100 Subject: drm/i915: Show support for accurate sw PMU busyness tracking Expose whether or not we support the PMU software tracking in our scheduler capabilities, so userspace can query at runtime. v2: Use I915_SCHEDULER_CAP_ENGINE_BUSY_STATS for a less ambiguous capability name. Signed-off-by: Chris Wilson Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20190703143702.11339-1-chris@chris-wilson.co.uk --- include/uapi/drm/i915_drm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/uapi') diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 328d05e77d9f..469dc512cca3 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -521,6 +521,7 @@ typedef struct drm_i915_irq_wait { #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) +#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4) #define I915_PARAM_HUC_STATUS 42 -- cgit v1.2.3 From 22be8233b34f4f468934c5fefcbe6151766fb8f2 Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Thu, 11 Jul 2019 04:53:25 -0400 Subject: media: videodev2.h: change V4L2_PIX_FMT_BGRA444 define: fourcc was already in use The V4L2_PIX_FMT_BGRA444 define clashed with the pre-existing V4L2_PIX_FMT_SGRBG12 which strangely enough used the same fourcc, even though that fourcc made no sense for a Bayer format. In any case, you can't have duplicates, so change the fourcc of V4L2_PIX_FMT_BGRA444. Signed-off-by: Hans Verkuil Cc: # for v5.2 and up Fixes: 6c84f9b1d2900 ("media: v4l: Add definitions for missing 16-bit RGB4444 formats") Reviewed-by: Laurent Pinchart Reviewed-by: Kieran Bingham Signed-off-by: Mauro Carvalho Chehab --- include/uapi/linux/videodev2.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 9d9705ceda76..2427bc4d8eba 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -518,7 +518,13 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16 rrrrgggg bbbbxxxx */ #define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16 aaaabbbb ggggrrrr */ #define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16 xxxxbbbb ggggrrrr */ -#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('B', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */ + +/* + * Originally this had 'BA12' as fourcc, but this clashed with the older + * V4L2_PIX_FMT_SGRBG12 which inexplicably used that same fourcc. + * So use 'GA12' instead for V4L2_PIX_FMT_BGRA444. + */ +#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('G', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */ #define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16 bbbbgggg rrrrxxxx */ #define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */ #define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16 ARGB-1-5-5-5 */ -- cgit v1.2.3 From 5edaac063bbf1267260ad2a5b9bb803399343e58 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Thu, 27 Jun 2019 11:58:32 +0200 Subject: nl80211: fix NL80211_HE_MAX_CAPABILITY_LEN NL80211_HE_MAX_CAPABILITY_LEN has changed between D2.0 and D4.0. It is now MAC (6) + PHY (11) + MCS (12) + PPE (25) = 54. Signed-off-by: John Crispin Link: https://lore.kernel.org/r/20190627095832.19445-1-john@phrozen.org Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 75758ec26c8b..beb9a9d0c00a 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2863,7 +2863,7 @@ enum nl80211_attrs { #define NL80211_HT_CAPABILITY_LEN 26 #define NL80211_VHT_CAPABILITY_LEN 12 #define NL80211_HE_MIN_CAPABILITY_LEN 16 -#define NL80211_HE_MAX_CAPABILITY_LEN 51 +#define NL80211_HE_MAX_CAPABILITY_LEN 54 #define NL80211_MAX_NR_CIPHER_SUITES 5 #define NL80211_MAX_NR_AKM_SUITES 2 -- cgit v1.2.3 From 5d4b45a1dd7b00feab57624035dcdbc1bab2e0f8 Mon Sep 17 00:00:00 2001 From: Markus Koch Date: Sun, 21 Jul 2019 20:20:28 +0300 Subject: Input: add support for the FlySky FS-iA6B RC receiver This patch adds support for the FlySky FS-iA6B RC receiver (serial IBUS). It allows the usage of the FlySky FS-i6 and other AFHDS compliant remote controls as a joystick input device. To use it, a patch to inputattach which adds the FS-iA6B as a 115200 baud serial device is required. I will upstream it after this patch is merged. More information about the hardware can be found here: https://notsyncing.net/?p=blog&b=2018.linux-fsia6b Signed-off-by: Markus Koch Signed-off-by: Dmitry Torokhov --- MAINTAINERS | 6 ++ drivers/input/joystick/Kconfig | 10 ++ drivers/input/joystick/Makefile | 5 +- drivers/input/joystick/fsia6b.c | 231 ++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/serio.h | 1 + 5 files changed, 251 insertions(+), 2 deletions(-) create mode 100644 drivers/input/joystick/fsia6b.c (limited to 'include/uapi') diff --git a/MAINTAINERS b/MAINTAINERS index 677ef41cb012..cdd25b0a1218 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12394,6 +12394,12 @@ S: Maintained F: Documentation/input/devices/pxrc.rst F: drivers/input/joystick/pxrc.c +FLYSKY FSIA6B RC RECEIVER +M: Markus Koch +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/input/joystick/fsia6b.c + PHONET PROTOCOL M: Remi Denis-Courmont S: Supported diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 72b932901d00..312b854b5506 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -362,4 +362,14 @@ config JOYSTICK_PXRC To compile this driver as a module, choose M here: the module will be called pxrc. +config JOYSTICK_FSIA6B + tristate "FlySky FS-iA6B RC Receiver" + select SERIO + help + Say Y here if you use a FlySky FS-i6 RC remote control along with the + FS-iA6B RC receiver as a joystick input device. + + To compile this driver as a module, choose M here: the + module will be called fsia6b. + endif diff --git a/drivers/input/joystick/Makefile b/drivers/input/joystick/Makefile index dd0492ebbed7..8656023f6ef5 100644 --- a/drivers/input/joystick/Makefile +++ b/drivers/input/joystick/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_JOYSTICK_AS5011) += as5011.o obj-$(CONFIG_JOYSTICK_ANALOG) += analog.o obj-$(CONFIG_JOYSTICK_COBRA) += cobra.o obj-$(CONFIG_JOYSTICK_DB9) += db9.o +obj-$(CONFIG_JOYSTICK_FSIA6B) += fsia6b.o obj-$(CONFIG_JOYSTICK_GAMECON) += gamecon.o obj-$(CONFIG_JOYSTICK_GF2K) += gf2k.o obj-$(CONFIG_JOYSTICK_GRIP) += grip.o @@ -23,7 +24,7 @@ obj-$(CONFIG_JOYSTICK_JOYDUMP) += joydump.o obj-$(CONFIG_JOYSTICK_MAGELLAN) += magellan.o obj-$(CONFIG_JOYSTICK_MAPLE) += maplecontrol.o obj-$(CONFIG_JOYSTICK_PSXPAD_SPI) += psxpad-spi.o -obj-$(CONFIG_JOYSTICK_PXRC) += pxrc.o +obj-$(CONFIG_JOYSTICK_PXRC) += pxrc.o obj-$(CONFIG_JOYSTICK_SIDEWINDER) += sidewinder.o obj-$(CONFIG_JOYSTICK_SPACEBALL) += spaceball.o obj-$(CONFIG_JOYSTICK_SPACEORB) += spaceorb.o @@ -32,7 +33,7 @@ obj-$(CONFIG_JOYSTICK_TMDC) += tmdc.o obj-$(CONFIG_JOYSTICK_TURBOGRAFX) += turbografx.o obj-$(CONFIG_JOYSTICK_TWIDJOY) += twidjoy.o obj-$(CONFIG_JOYSTICK_WARRIOR) += warrior.o +obj-$(CONFIG_JOYSTICK_WALKERA0701) += walkera0701.o obj-$(CONFIG_JOYSTICK_XPAD) += xpad.o obj-$(CONFIG_JOYSTICK_ZHENHUA) += zhenhua.o -obj-$(CONFIG_JOYSTICK_WALKERA0701) += walkera0701.o diff --git a/drivers/input/joystick/fsia6b.c b/drivers/input/joystick/fsia6b.c new file mode 100644 index 000000000000..e78c4c768990 --- /dev/null +++ b/drivers/input/joystick/fsia6b.c @@ -0,0 +1,231 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * FS-iA6B iBus RC receiver driver + * + * This driver provides all 14 channels of the FlySky FS-ia6B RC receiver + * as analog values. + * + * Additionally, the channels can be converted to discrete switch values. + * By default, it is configured for the offical FS-i6 remote control. + * If you use a different hardware configuration, you can configure it + * using the `switch_config` parameter. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_DESC "FS-iA6B iBus RC receiver" + +MODULE_AUTHOR("Markus Koch "); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); + +#define IBUS_SERVO_COUNT 14 + +static char *switch_config = "00000022320000"; +module_param(switch_config, charp, 0444); +MODULE_PARM_DESC(switch_config, + "Amount of switch positions per channel (14 characters, 0-3)"); + +static int fsia6b_axes[IBUS_SERVO_COUNT] = { + ABS_X, ABS_Y, + ABS_Z, ABS_RX, + ABS_RY, ABS_RZ, + ABS_HAT0X, ABS_HAT0Y, + ABS_HAT1X, ABS_HAT1Y, + ABS_HAT2X, ABS_HAT2Y, + ABS_HAT3X, ABS_HAT3Y +}; + +enum ibus_state { SYNC, COLLECT, PROCESS }; + +struct ibus_packet { + enum ibus_state state; + + int offset; + u16 ibuf; + u16 channel[IBUS_SERVO_COUNT]; +}; + +struct fsia6b { + struct input_dev *dev; + struct ibus_packet packet; + + char phys[32]; +}; + +static irqreturn_t fsia6b_serio_irq(struct serio *serio, + unsigned char data, unsigned int flags) +{ + struct fsia6b *fsia6b = serio_get_drvdata(serio); + int i; + int sw_state; + int sw_id = BTN_0; + + fsia6b->packet.ibuf = (data << 8) | ((fsia6b->packet.ibuf >> 8) & 0xFF); + + switch (fsia6b->packet.state) { + case SYNC: + if (fsia6b->packet.ibuf == 0x4020) + fsia6b->packet.state = COLLECT; + break; + + case COLLECT: + fsia6b->packet.state = PROCESS; + break; + + case PROCESS: + fsia6b->packet.channel[fsia6b->packet.offset] = + fsia6b->packet.ibuf; + fsia6b->packet.offset++; + + if (fsia6b->packet.offset == IBUS_SERVO_COUNT) { + fsia6b->packet.offset = 0; + fsia6b->packet.state = SYNC; + for (i = 0; i < IBUS_SERVO_COUNT; ++i) { + input_report_abs(fsia6b->dev, fsia6b_axes[i], + fsia6b->packet.channel[i]); + + sw_state = 0; + if (fsia6b->packet.channel[i] > 1900) + sw_state = 1; + else if (fsia6b->packet.channel[i] < 1100) + sw_state = 2; + + switch (switch_config[i]) { + case '3': + input_report_key(fsia6b->dev, + sw_id++, + sw_state == 0); + /* fall-through */ + case '2': + input_report_key(fsia6b->dev, + sw_id++, + sw_state == 1); + /* fall-through */ + case '1': + input_report_key(fsia6b->dev, + sw_id++, + sw_state == 2); + } + } + input_sync(fsia6b->dev); + } else { + fsia6b->packet.state = COLLECT; + } + break; + } + + return IRQ_HANDLED; +} + +static int fsia6b_serio_connect(struct serio *serio, struct serio_driver *drv) +{ + struct fsia6b *fsia6b; + struct input_dev *input_dev; + int err; + int i, j; + int sw_id = 0; + + fsia6b = kzalloc(sizeof(*fsia6b), GFP_KERNEL); + if (!fsia6b) + return -ENOMEM; + + fsia6b->packet.ibuf = 0; + fsia6b->packet.offset = 0; + fsia6b->packet.state = SYNC; + + serio_set_drvdata(serio, fsia6b); + + input_dev = input_allocate_device(); + if (!input_dev) { + err = -ENOMEM; + goto fail1; + } + fsia6b->dev = input_dev; + + snprintf(fsia6b->phys, sizeof(fsia6b->phys), "%s/input0", serio->phys); + + input_dev->name = DRIVER_DESC; + input_dev->phys = fsia6b->phys; + input_dev->id.bustype = BUS_RS232; + input_dev->id.vendor = SERIO_FSIA6B; + input_dev->id.product = serio->id.id; + input_dev->id.version = 0x0100; + input_dev->dev.parent = &serio->dev; + + for (i = 0; i < IBUS_SERVO_COUNT; i++) + input_set_abs_params(input_dev, fsia6b_axes[i], + 1000, 2000, 2, 2); + + /* Register switch configuration */ + for (i = 0; i < IBUS_SERVO_COUNT; i++) { + if (switch_config[i] < '0' || switch_config[i] > '3') { + dev_err(&fsia6b->dev->dev, + "Invalid switch configuration supplied for fsia6b.\n"); + err = -EINVAL; + goto fail2; + } + + for (j = '1'; j <= switch_config[i]; j++) { + input_set_capability(input_dev, EV_KEY, BTN_0 + sw_id); + sw_id++; + } + } + + err = serio_open(serio, drv); + if (err) + goto fail2; + + err = input_register_device(fsia6b->dev); + if (err) + goto fail3; + + return 0; + +fail3: serio_close(serio); +fail2: input_free_device(input_dev); +fail1: serio_set_drvdata(serio, NULL); + kfree(fsia6b); + return err; +} + +static void fsia6b_serio_disconnect(struct serio *serio) +{ + struct fsia6b *fsia6b = serio_get_drvdata(serio); + + serio_close(serio); + serio_set_drvdata(serio, NULL); + input_unregister_device(fsia6b->dev); + kfree(fsia6b); +} + +static const struct serio_device_id fsia6b_serio_ids[] = { + { + .type = SERIO_RS232, + .proto = SERIO_FSIA6B, + .id = SERIO_ANY, + .extra = SERIO_ANY, + }, + { 0 } +}; + +MODULE_DEVICE_TABLE(serio, fsia6b_serio_ids); + +static struct serio_driver fsia6b_serio_drv = { + .driver = { + .name = "fsia6b" + }, + .description = DRIVER_DESC, + .id_table = fsia6b_serio_ids, + .interrupt = fsia6b_serio_irq, + .connect = fsia6b_serio_connect, + .disconnect = fsia6b_serio_disconnect +}; + +module_serio_driver(fsia6b_serio_drv) diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h index a0cac1d8670d..50e991952c97 100644 --- a/include/uapi/linux/serio.h +++ b/include/uapi/linux/serio.h @@ -82,5 +82,6 @@ #define SERIO_EGALAX 0x3f #define SERIO_PULSE8_CEC 0x40 #define SERIO_RAINSHADOW_CEC 0x41 +#define SERIO_FSIA6B 0x42 #endif /* _UAPI_SERIO_H */ -- cgit v1.2.3 From f240652b6032b48ad7fa35c5e701cc4c8d697c0b Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Fri, 5 Jul 2019 10:53:21 -0700 Subject: x86/mpx: Remove MPX APIs MPX is being removed from the kernel due to a lack of support in the toolchain going forward (gcc). The first step is to remove the userspace-visible ABIs so that applications will stop using it. The most visible one are the enable/disable prctl()s. Remove them first. This is the most minimal and least invasive change needed to ensure that apps stop using MPX with new kernels. Signed-off-by: Dave Hansen Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/20190705175321.DB42F0AD@viggo.jf.intel.com --- include/uapi/linux/prctl.h | 2 +- kernel/sys.c | 16 ++-------------- 2 files changed, 3 insertions(+), 15 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 094bb03b9cc2..961e0a4a0f73 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -181,7 +181,7 @@ struct prctl_mm_map { #define PR_GET_THP_DISABLE 42 /* - * Tell the kernel to start/stop helping userspace manage bounds tables. + * No longer implemented, but left here to ensure the numbers stay reserved: */ #define PR_MPX_ENABLE_MANAGEMENT 43 #define PR_MPX_DISABLE_MANAGEMENT 44 diff --git a/kernel/sys.c b/kernel/sys.c index 2969304c29fe..384b000b7865 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -103,12 +103,6 @@ #ifndef SET_TSC_CTL # define SET_TSC_CTL(a) (-EINVAL) #endif -#ifndef MPX_ENABLE_MANAGEMENT -# define MPX_ENABLE_MANAGEMENT() (-EINVAL) -#endif -#ifndef MPX_DISABLE_MANAGEMENT -# define MPX_DISABLE_MANAGEMENT() (-EINVAL) -#endif #ifndef GET_FP_MODE # define GET_FP_MODE(a) (-EINVAL) #endif @@ -2456,15 +2450,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, up_write(&me->mm->mmap_sem); break; case PR_MPX_ENABLE_MANAGEMENT: - if (arg2 || arg3 || arg4 || arg5) - return -EINVAL; - error = MPX_ENABLE_MANAGEMENT(); - break; case PR_MPX_DISABLE_MANAGEMENT: - if (arg2 || arg3 || arg4 || arg5) - return -EINVAL; - error = MPX_DISABLE_MANAGEMENT(); - break; + /* No longer implemented: */ + return -EINVAL; case PR_SET_FP_MODE: error = SET_FP_MODE(me, arg2); break; -- cgit v1.2.3 From 62ec3d13601bd626ca7a0edef6d45dbb753d94e8 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 21 Jul 2019 23:23:08 +0900 Subject: ASoC: SOF: use __u32 instead of uint32_t in uapi headers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When CONFIG_UAPI_HEADER_TEST=y, exported headers are compile-tested to make sure they can be included from user-space. Currently, header.h and fw.h are excluded from the test coverage. To make them join the compile-test, we need to fix the build errors attached below. For a case like this, we decided to use __u{8,16,32,64} variable types in this discussion: https://lkml.org/lkml/2019/6/5/18 Build log: CC usr/include/sound/sof/header.h.s CC usr/include/sound/sof/fw.h.s In file included from :32:0: ./usr/include/sound/sof/header.h:19:2: error: unknown type name ‘uint32_t’ uint32_t magic; /**< 'S', 'O', 'F', '\0' */ ^~~~~~~~ ./usr/include/sound/sof/header.h:20:2: error: unknown type name ‘uint32_t’ uint32_t type; /**< component specific type */ ^~~~~~~~ ./usr/include/sound/sof/header.h:21:2: error: unknown type name ‘uint32_t’ uint32_t size; /**< size in bytes of data excl. this struct */ ^~~~~~~~ ./usr/include/sound/sof/header.h:22:2: error: unknown type name ‘uint32_t’ uint32_t abi; /**< SOF ABI version */ ^~~~~~~~ ./usr/include/sound/sof/header.h:23:2: error: unknown type name ‘uint32_t’ uint32_t reserved[4]; /**< reserved for future use */ ^~~~~~~~ ./usr/include/sound/sof/header.h:24:2: error: unknown type name ‘uint32_t’ uint32_t data[0]; /**< Component data - opaque to core */ ^~~~~~~~ In file included from :32:0: ./usr/include/sound/sof/fw.h:49:2: error: unknown type name ‘uint32_t’ uint32_t size; /* bytes minus this header */ ^~~~~~~~ ./usr/include/sound/sof/fw.h:50:2: error: unknown type name ‘uint32_t’ uint32_t offset; /* offset from base */ ^~~~~~~~ ./usr/include/sound/sof/fw.h:64:2: error: unknown type name ‘uint32_t’ uint32_t size; /* bytes minus this header */ ^~~~~~~~ ./usr/include/sound/sof/fw.h:65:2: error: unknown type name ‘uint32_t’ uint32_t num_blocks; /* number of blocks */ ^~~~~~~~ ./usr/include/sound/sof/fw.h:73:2: error: unknown type name ‘uint32_t’ uint32_t file_size; /* size of file minus this header */ ^~~~~~~~ ./usr/include/sound/sof/fw.h:74:2: error: unknown type name ‘uint32_t’ uint32_t num_modules; /* number of modules */ ^~~~~~~~ ./usr/include/sound/sof/fw.h:75:2: error: unknown type name ‘uint32_t’ uint32_t abi; /* version of header format */ ^~~~~~~~ Signed-off-by: Masahiro Yamada Link: https://lore.kernel.org/r/20190721142308.30306-1-yamada.masahiro@socionext.com Signed-off-by: Mark Brown --- include/uapi/sound/sof/fw.h | 16 +++++++++------- include/uapi/sound/sof/header.h | 14 ++++++++------ 2 files changed, 17 insertions(+), 13 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/sound/sof/fw.h b/include/uapi/sound/sof/fw.h index 1afca973eb09..e9f697467a86 100644 --- a/include/uapi/sound/sof/fw.h +++ b/include/uapi/sound/sof/fw.h @@ -13,6 +13,8 @@ #ifndef __INCLUDE_UAPI_SOF_FW_H__ #define __INCLUDE_UAPI_SOF_FW_H__ +#include + #define SND_SOF_FW_SIG_SIZE 4 #define SND_SOF_FW_ABI 1 #define SND_SOF_FW_SIG "Reef" @@ -46,8 +48,8 @@ enum snd_sof_fw_blk_type { struct snd_sof_blk_hdr { enum snd_sof_fw_blk_type type; - uint32_t size; /* bytes minus this header */ - uint32_t offset; /* offset from base */ + __u32 size; /* bytes minus this header */ + __u32 offset; /* offset from base */ } __packed; /* @@ -61,8 +63,8 @@ enum snd_sof_fw_mod_type { struct snd_sof_mod_hdr { enum snd_sof_fw_mod_type type; - uint32_t size; /* bytes minus this header */ - uint32_t num_blocks; /* number of blocks */ + __u32 size; /* bytes minus this header */ + __u32 num_blocks; /* number of blocks */ } __packed; /* @@ -70,9 +72,9 @@ struct snd_sof_mod_hdr { */ struct snd_sof_fw_header { unsigned char sig[SND_SOF_FW_SIG_SIZE]; /* "Reef" */ - uint32_t file_size; /* size of file minus this header */ - uint32_t num_modules; /* number of modules */ - uint32_t abi; /* version of header format */ + __u32 file_size; /* size of file minus this header */ + __u32 num_modules; /* number of modules */ + __u32 abi; /* version of header format */ } __packed; #endif diff --git a/include/uapi/sound/sof/header.h b/include/uapi/sound/sof/header.h index 7868990b0d6f..5f4518e7a972 100644 --- a/include/uapi/sound/sof/header.h +++ b/include/uapi/sound/sof/header.h @@ -9,6 +9,8 @@ #ifndef __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__ #define __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__ +#include + /* * Header for all non IPC ABI data. * @@ -16,12 +18,12 @@ * Used by any bespoke component data structures or binary blobs. */ struct sof_abi_hdr { - uint32_t magic; /**< 'S', 'O', 'F', '\0' */ - uint32_t type; /**< component specific type */ - uint32_t size; /**< size in bytes of data excl. this struct */ - uint32_t abi; /**< SOF ABI version */ - uint32_t reserved[4]; /**< reserved for future use */ - uint32_t data[0]; /**< Component data - opaque to core */ + __u32 magic; /**< 'S', 'O', 'F', '\0' */ + __u32 type; /**< component specific type */ + __u32 size; /**< size in bytes of data excl. this struct */ + __u32 abi; /**< SOF ABI version */ + __u32 reserved[4]; /**< reserved for future use */ + __u32 data[0]; /**< Component data - opaque to core */ } __packed; #endif -- cgit v1.2.3 From ae24fb49d01103c80d6ff3b78714259c1c62c958 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Mon, 22 Jul 2019 15:40:07 +0100 Subject: iommu/virtio: Update to most recent specification Following specification review a few things were changed in v8 of the virtio-iommu series [1], but have been omitted when merging the base driver. Add them now: * Remove the EXEC flag. * Add feature bit for the MMIO flag. * Change domain_bits to domain_range. * Add NOMEM status flag. [1] https://lore.kernel.org/linux-iommu/20190530170929.19366-1-jean-philippe.brucker@arm.com/ Fixes: edcd69ab9a32 ("iommu: Add virtio-iommu driver") Reported-by: Eric Auger Signed-off-by: Jean-Philippe Brucker Signed-off-by: Michael S. Tsirkin Reviewed-by: Eric Auger Tested-by: Eric Auger Acked-by: Joerg Roedel --- drivers/iommu/virtio-iommu.c | 40 ++++++++++++++++++++++++++++----------- include/uapi/linux/virtio_iommu.h | 32 +++++++++++++++++-------------- 2 files changed, 47 insertions(+), 25 deletions(-) (limited to 'include/uapi') diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 433f4d2ee956..80a740df0737 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -2,7 +2,7 @@ /* * Virtio driver for the paravirtualized IOMMU * - * Copyright (C) 2018 Arm Limited + * Copyright (C) 2019 Arm Limited */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -47,7 +47,10 @@ struct viommu_dev { /* Device configuration */ struct iommu_domain_geometry geometry; u64 pgsize_bitmap; - u8 domain_bits; + u32 first_domain; + u32 last_domain; + /* Supported MAP flags */ + u32 map_flags; u32 probe_size; }; @@ -62,6 +65,7 @@ struct viommu_domain { struct viommu_dev *viommu; struct mutex mutex; /* protects viommu pointer */ unsigned int id; + u32 map_flags; spinlock_t mappings_lock; struct rb_root_cached mappings; @@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len) return -ENOENT; case VIRTIO_IOMMU_S_FAULT: return -EFAULT; + case VIRTIO_IOMMU_S_NOMEM: + return -ENOMEM; case VIRTIO_IOMMU_S_IOERR: case VIRTIO_IOMMU_S_DEVERR: default: @@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu, { int ret; struct viommu_domain *vdomain = to_viommu_domain(domain); - unsigned int max_domain = viommu->domain_bits > 31 ? ~0 : - (1U << viommu->domain_bits) - 1; vdomain->viommu = viommu; + vdomain->map_flags = viommu->map_flags; domain->pgsize_bitmap = viommu->pgsize_bitmap; domain->geometry = viommu->geometry; - ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL); + ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, + viommu->last_domain, GFP_KERNEL); if (ret >= 0) vdomain->id = (unsigned int)ret; @@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { int ret; - int flags; + u32 flags; struct virtio_iommu_req_map map; struct viommu_domain *vdomain = to_viommu_domain(domain); @@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); + if (flags & ~vdomain->map_flags) + return -EINVAL; + ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); if (ret) return ret; @@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev) goto err_free_vqs; } - viommu->domain_bits = 32; + viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; + viommu->last_domain = ~0U; /* Optional features */ virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, @@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev) struct virtio_iommu_config, input_range.end, &input_end); - virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS, - struct virtio_iommu_config, domain_bits, - &viommu->domain_bits); + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, + struct virtio_iommu_config, domain_range.start, + &viommu->first_domain); + + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, + struct virtio_iommu_config, domain_range.end, + &viommu->last_domain); virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, struct virtio_iommu_config, probe_size, @@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev) .force_aperture = true, }; + if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO)) + viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; + viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; virtio_device_ready(vdev); @@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev) static unsigned int features[] = { VIRTIO_IOMMU_F_MAP_UNMAP, - VIRTIO_IOMMU_F_DOMAIN_BITS, VIRTIO_IOMMU_F_INPUT_RANGE, + VIRTIO_IOMMU_F_DOMAIN_RANGE, VIRTIO_IOMMU_F_PROBE, + VIRTIO_IOMMU_F_MMIO, }; static struct virtio_device_id id_table[] = { diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h index ba1b460c9944..237e36a280cb 100644 --- a/include/uapi/linux/virtio_iommu.h +++ b/include/uapi/linux/virtio_iommu.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /* - * Virtio-iommu definition v0.9 + * Virtio-iommu definition v0.12 * - * Copyright (C) 2018 Arm Ltd. + * Copyright (C) 2019 Arm Ltd. */ #ifndef _UAPI_LINUX_VIRTIO_IOMMU_H #define _UAPI_LINUX_VIRTIO_IOMMU_H @@ -11,26 +11,31 @@ /* Feature bits */ #define VIRTIO_IOMMU_F_INPUT_RANGE 0 -#define VIRTIO_IOMMU_F_DOMAIN_BITS 1 +#define VIRTIO_IOMMU_F_DOMAIN_RANGE 1 #define VIRTIO_IOMMU_F_MAP_UNMAP 2 #define VIRTIO_IOMMU_F_BYPASS 3 #define VIRTIO_IOMMU_F_PROBE 4 +#define VIRTIO_IOMMU_F_MMIO 5 -struct virtio_iommu_range { - __u64 start; - __u64 end; +struct virtio_iommu_range_64 { + __le64 start; + __le64 end; +}; + +struct virtio_iommu_range_32 { + __le32 start; + __le32 end; }; struct virtio_iommu_config { /* Supported page sizes */ - __u64 page_size_mask; + __le64 page_size_mask; /* Supported IOVA range */ - struct virtio_iommu_range input_range; + struct virtio_iommu_range_64 input_range; /* Max domain ID size */ - __u8 domain_bits; - __u8 padding[3]; + struct virtio_iommu_range_32 domain_range; /* Probe buffer size */ - __u32 probe_size; + __le32 probe_size; }; /* Request types */ @@ -49,6 +54,7 @@ struct virtio_iommu_config { #define VIRTIO_IOMMU_S_RANGE 0x05 #define VIRTIO_IOMMU_S_NOENT 0x06 #define VIRTIO_IOMMU_S_FAULT 0x07 +#define VIRTIO_IOMMU_S_NOMEM 0x08 struct virtio_iommu_req_head { __u8 type; @@ -78,12 +84,10 @@ struct virtio_iommu_req_detach { #define VIRTIO_IOMMU_MAP_F_READ (1 << 0) #define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1) -#define VIRTIO_IOMMU_MAP_F_EXEC (1 << 2) -#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 3) +#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 2) #define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \ VIRTIO_IOMMU_MAP_F_WRITE | \ - VIRTIO_IOMMU_MAP_F_EXEC | \ VIRTIO_IOMMU_MAP_F_MMIO) struct virtio_iommu_req_map { -- cgit v1.2.3 From cf949bbe22bee8749078e0b810ee2dc60a983746 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Wed, 12 Jun 2019 16:34:37 +0300 Subject: scsi: ufs: uapi: Fix SPDX license identifier Added 'WITH Linux-syscall-note' exception which is the officially assigned exception identifier for the kernel syscall exception. This exception makes it possible to include GPL headers into non GPL code without confusing license compliance tools. Fixes: a851b2bd3632 (scsi: uapi: ufs: Make utp_upiu_req visible to user space) Signed-off-by: Avri Altman Reviewed-by: Pedro Sousa Signed-off-by: Martin K. Petersen --- include/uapi/scsi/scsi_bsg_ufs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h index 17c7abd0803a..9988db6ad244 100644 --- a/include/uapi/scsi/scsi_bsg_ufs.h +++ b/include/uapi/scsi/scsi_bsg_ufs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * UFS Transport SGIO v4 BSG Message Support * -- cgit v1.2.3 From 6298b78742be6593d372ed1b5bfa5397e1393595 Mon Sep 17 00:00:00 2001 From: Janusz Jankowski Date: Mon, 22 Jul 2019 09:14:02 -0500 Subject: ASoC: SOF: Intel: ssp: BCLK delay parameter Some codecs require BCLK to be on for some time, before sending any data. SOF can enable BCLK and then wait for guaranteed time, before starting DMA on SSP start. Signed-off-by: Janusz Jankowski Signed-off-by: Pierre-Louis Bossart Link: https://lore.kernel.org/r/20190722141402.7194-22-pierre-louis.bossart@linux.intel.com Signed-off-by: Mark Brown --- include/sound/sof/dai-intel.h | 3 +++ include/uapi/sound/sof/abi.h | 2 +- include/uapi/sound/sof/tokens.h | 1 + sound/soc/sof/topology.c | 3 +++ 4 files changed, 8 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h index 4bb8ee138ba7..a81afd3fbd41 100644 --- a/include/sound/sof/dai-intel.h +++ b/include/sound/sof/dai-intel.h @@ -76,6 +76,9 @@ struct sof_ipc_dai_ssp_params { uint16_t tdm_per_slot_padding_flag; uint32_t clks_control; uint32_t quirks; + uint32_t bclk_delay; /* guaranteed time (ms) for which BCLK + * will be driven, before sending data + */ } __packed; /* HDA Configuration Request - SOF_IPC_DAI_HDA_CONFIG */ diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h index 4a9c24434f42..dff70a42445a 100644 --- a/include/uapi/sound/sof/abi.h +++ b/include/uapi/sound/sof/abi.h @@ -26,7 +26,7 @@ /* SOF ABI version major, minor and patch numbers */ #define SOF_ABI_MAJOR 3 -#define SOF_ABI_MINOR 8 +#define SOF_ABI_MINOR 9 #define SOF_ABI_PATCH 0 /* SOF ABI version number. Format within 32bit word is MMmmmppp */ diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h index dc1b27daaac6..6435240cef13 100644 --- a/include/uapi/sound/sof/tokens.h +++ b/include/uapi/sound/sof/tokens.h @@ -75,6 +75,7 @@ #define SOF_TKN_INTEL_SSP_FRAME_PULSE_WIDTH 503 #define SOF_TKN_INTEL_SSP_QUIRKS 504 #define SOF_TKN_INTEL_SSP_TDM_PADDING_PER_SLOT 505 +#define SOF_TKN_INTEL_SSP_BCLK_DELAY 506 /* DMIC */ #define SOF_TKN_INTEL_DMIC_DRIVER_VERSION 600 diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c index 432ae343f960..12b7d900b9c2 100644 --- a/sound/soc/sof/topology.c +++ b/sound/soc/sof/topology.c @@ -748,6 +748,9 @@ static const struct sof_topology_token ssp_tokens[] = { get_token_u16, offsetof(struct sof_ipc_dai_ssp_params, tdm_per_slot_padding_flag), 0}, + {SOF_TKN_INTEL_SSP_BCLK_DELAY, SND_SOC_TPLG_TUPLE_TYPE_WORD, + get_token_u32, + offsetof(struct sof_ipc_dai_ssp_params, bclk_delay), 0}, }; -- cgit v1.2.3 From 2f5947dfcaecb99f2dd559156eecbeb7b95e4c02 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 24 Jul 2019 09:24:49 +0200 Subject: Documentation: move Documentation/virtual to Documentation/virt Renaming docs seems to be en vogue at the moment, so fix on of the grossly misnamed directories. We usually never use "virtual" as a shortcut for virtualization in the kernel, but always virt, as seen in the virt/ top-level directory. Fix up the documentation to match that. Fixes: ed16648eb5b8 ("Move kvm, uml, and lguest subdirectories under a common "virtual" directory, I.E:") Signed-off-by: Christoph Hellwig Signed-off-by: Paolo Bonzini --- Documentation/admin-guide/kernel-parameters.txt | 2 +- Documentation/virt/index.rst | 18 + Documentation/virt/kvm/amd-memory-encryption.rst | 250 + Documentation/virt/kvm/api.txt | 5296 ++++++++++++++++++++ Documentation/virt/kvm/arm/hyp-abi.txt | 53 + Documentation/virt/kvm/arm/psci.txt | 61 + Documentation/virt/kvm/cpuid.rst | 107 + Documentation/virt/kvm/devices/README | 1 + Documentation/virt/kvm/devices/arm-vgic-its.txt | 181 + Documentation/virt/kvm/devices/arm-vgic-v3.txt | 251 + Documentation/virt/kvm/devices/arm-vgic.txt | 127 + Documentation/virt/kvm/devices/mpic.txt | 53 + Documentation/virt/kvm/devices/s390_flic.txt | 163 + Documentation/virt/kvm/devices/vcpu.txt | 62 + Documentation/virt/kvm/devices/vfio.txt | 36 + Documentation/virt/kvm/devices/vm.txt | 270 + Documentation/virt/kvm/devices/xics.txt | 66 + Documentation/virt/kvm/devices/xive.txt | 197 + Documentation/virt/kvm/halt-polling.txt | 136 + Documentation/virt/kvm/hypercalls.txt | 154 + Documentation/virt/kvm/index.rst | 11 + Documentation/virt/kvm/locking.txt | 215 + Documentation/virt/kvm/mmu.txt | 449 ++ Documentation/virt/kvm/msr.txt | 284 ++ Documentation/virt/kvm/nested-vmx.txt | 240 + Documentation/virt/kvm/ppc-pv.txt | 212 + Documentation/virt/kvm/review-checklist.txt | 38 + Documentation/virt/kvm/s390-diag.txt | 83 + Documentation/virt/kvm/timekeeping.txt | 612 +++ Documentation/virt/kvm/vcpu-requests.rst | 307 ++ Documentation/virt/paravirt_ops.rst | 35 + Documentation/virt/uml/UserModeLinux-HOWTO.txt | 4589 +++++++++++++++++ Documentation/virtual/index.rst | 18 - .../virtual/kvm/amd-memory-encryption.rst | 250 - Documentation/virtual/kvm/api.txt | 5296 -------------------- Documentation/virtual/kvm/arm/hyp-abi.txt | 53 - Documentation/virtual/kvm/arm/psci.txt | 61 - Documentation/virtual/kvm/cpuid.rst | 107 - Documentation/virtual/kvm/devices/README | 1 - Documentation/virtual/kvm/devices/arm-vgic-its.txt | 181 - Documentation/virtual/kvm/devices/arm-vgic-v3.txt | 251 - Documentation/virtual/kvm/devices/arm-vgic.txt | 127 - Documentation/virtual/kvm/devices/mpic.txt | 53 - Documentation/virtual/kvm/devices/s390_flic.txt | 163 - Documentation/virtual/kvm/devices/vcpu.txt | 62 - Documentation/virtual/kvm/devices/vfio.txt | 36 - Documentation/virtual/kvm/devices/vm.txt | 270 - Documentation/virtual/kvm/devices/xics.txt | 66 - Documentation/virtual/kvm/devices/xive.txt | 197 - Documentation/virtual/kvm/halt-polling.txt | 136 - Documentation/virtual/kvm/hypercalls.txt | 154 - Documentation/virtual/kvm/index.rst | 11 - Documentation/virtual/kvm/locking.txt | 215 - Documentation/virtual/kvm/mmu.txt | 449 -- Documentation/virtual/kvm/msr.txt | 284 -- Documentation/virtual/kvm/nested-vmx.txt | 240 - Documentation/virtual/kvm/ppc-pv.txt | 212 - Documentation/virtual/kvm/review-checklist.txt | 38 - Documentation/virtual/kvm/s390-diag.txt | 83 - Documentation/virtual/kvm/timekeeping.txt | 612 --- Documentation/virtual/kvm/vcpu-requests.rst | 307 -- Documentation/virtual/paravirt_ops.rst | 35 - Documentation/virtual/uml/UserModeLinux-HOWTO.txt | 4589 ----------------- MAINTAINERS | 6 +- arch/powerpc/include/uapi/asm/kvm_para.h | 2 +- arch/x86/kvm/mmu.c | 2 +- include/uapi/linux/kvm.h | 4 +- tools/include/uapi/linux/kvm.h | 4 +- virt/kvm/arm/arm.c | 2 +- virt/kvm/arm/vgic/vgic-mmio-v3.c | 2 +- virt/kvm/arm/vgic/vgic.h | 4 +- 71 files changed, 14571 insertions(+), 14571 deletions(-) create mode 100644 Documentation/virt/index.rst create mode 100644 Documentation/virt/kvm/amd-memory-encryption.rst create mode 100644 Documentation/virt/kvm/api.txt create mode 100644 Documentation/virt/kvm/arm/hyp-abi.txt create mode 100644 Documentation/virt/kvm/arm/psci.txt create mode 100644 Documentation/virt/kvm/cpuid.rst create mode 100644 Documentation/virt/kvm/devices/README create mode 100644 Documentation/virt/kvm/devices/arm-vgic-its.txt create mode 100644 Documentation/virt/kvm/devices/arm-vgic-v3.txt create mode 100644 Documentation/virt/kvm/devices/arm-vgic.txt create mode 100644 Documentation/virt/kvm/devices/mpic.txt create mode 100644 Documentation/virt/kvm/devices/s390_flic.txt create mode 100644 Documentation/virt/kvm/devices/vcpu.txt create mode 100644 Documentation/virt/kvm/devices/vfio.txt create mode 100644 Documentation/virt/kvm/devices/vm.txt create mode 100644 Documentation/virt/kvm/devices/xics.txt create mode 100644 Documentation/virt/kvm/devices/xive.txt create mode 100644 Documentation/virt/kvm/halt-polling.txt create mode 100644 Documentation/virt/kvm/hypercalls.txt create mode 100644 Documentation/virt/kvm/index.rst create mode 100644 Documentation/virt/kvm/locking.txt create mode 100644 Documentation/virt/kvm/mmu.txt create mode 100644 Documentation/virt/kvm/msr.txt create mode 100644 Documentation/virt/kvm/nested-vmx.txt create mode 100644 Documentation/virt/kvm/ppc-pv.txt create mode 100644 Documentation/virt/kvm/review-checklist.txt create mode 100644 Documentation/virt/kvm/s390-diag.txt create mode 100644 Documentation/virt/kvm/timekeeping.txt create mode 100644 Documentation/virt/kvm/vcpu-requests.rst create mode 100644 Documentation/virt/paravirt_ops.rst create mode 100644 Documentation/virt/uml/UserModeLinux-HOWTO.txt delete mode 100644 Documentation/virtual/index.rst delete mode 100644 Documentation/virtual/kvm/amd-memory-encryption.rst delete mode 100644 Documentation/virtual/kvm/api.txt delete mode 100644 Documentation/virtual/kvm/arm/hyp-abi.txt delete mode 100644 Documentation/virtual/kvm/arm/psci.txt delete mode 100644 Documentation/virtual/kvm/cpuid.rst delete mode 100644 Documentation/virtual/kvm/devices/README delete mode 100644 Documentation/virtual/kvm/devices/arm-vgic-its.txt delete mode 100644 Documentation/virtual/kvm/devices/arm-vgic-v3.txt delete mode 100644 Documentation/virtual/kvm/devices/arm-vgic.txt delete mode 100644 Documentation/virtual/kvm/devices/mpic.txt delete mode 100644 Documentation/virtual/kvm/devices/s390_flic.txt delete mode 100644 Documentation/virtual/kvm/devices/vcpu.txt delete mode 100644 Documentation/virtual/kvm/devices/vfio.txt delete mode 100644 Documentation/virtual/kvm/devices/vm.txt delete mode 100644 Documentation/virtual/kvm/devices/xics.txt delete mode 100644 Documentation/virtual/kvm/devices/xive.txt delete mode 100644 Documentation/virtual/kvm/halt-polling.txt delete mode 100644 Documentation/virtual/kvm/hypercalls.txt delete mode 100644 Documentation/virtual/kvm/index.rst delete mode 100644 Documentation/virtual/kvm/locking.txt delete mode 100644 Documentation/virtual/kvm/mmu.txt delete mode 100644 Documentation/virtual/kvm/msr.txt delete mode 100644 Documentation/virtual/kvm/nested-vmx.txt delete mode 100644 Documentation/virtual/kvm/ppc-pv.txt delete mode 100644 Documentation/virtual/kvm/review-checklist.txt delete mode 100644 Documentation/virtual/kvm/s390-diag.txt delete mode 100644 Documentation/virtual/kvm/timekeeping.txt delete mode 100644 Documentation/virtual/kvm/vcpu-requests.rst delete mode 100644 Documentation/virtual/paravirt_ops.rst delete mode 100644 Documentation/virtual/uml/UserModeLinux-HOWTO.txt (limited to 'include/uapi') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 099c5a4be95b..8a8880cec34b 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2532,7 +2532,7 @@ mem_encrypt=on: Activate SME mem_encrypt=off: Do not activate SME - Refer to Documentation/virtual/kvm/amd-memory-encryption.rst + Refer to Documentation/virt/kvm/amd-memory-encryption.rst for details on when memory encryption can be activated. mem_sleep_default= [SUSPEND] Default system suspend mode: diff --git a/Documentation/virt/index.rst b/Documentation/virt/index.rst new file mode 100644 index 000000000000..062ffb527043 --- /dev/null +++ b/Documentation/virt/index.rst @@ -0,0 +1,18 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============================ +Linux Virtualization Support +============================ + +.. toctree:: + :maxdepth: 2 + + kvm/index + paravirt_ops + +.. only:: html and subproject + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/virt/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst new file mode 100644 index 000000000000..d18c97b4e140 --- /dev/null +++ b/Documentation/virt/kvm/amd-memory-encryption.rst @@ -0,0 +1,250 @@ +====================================== +Secure Encrypted Virtualization (SEV) +====================================== + +Overview +======== + +Secure Encrypted Virtualization (SEV) is a feature found on AMD processors. + +SEV is an extension to the AMD-V architecture which supports running +virtual machines (VMs) under the control of a hypervisor. When enabled, +the memory contents of a VM will be transparently encrypted with a key +unique to that VM. + +The hypervisor can determine the SEV support through the CPUID +instruction. The CPUID function 0x8000001f reports information related +to SEV:: + + 0x8000001f[eax]: + Bit[1] indicates support for SEV + ... + [ecx]: + Bits[31:0] Number of encrypted guests supported simultaneously + +If support for SEV is present, MSR 0xc001_0010 (MSR_K8_SYSCFG) and MSR 0xc001_0015 +(MSR_K7_HWCR) can be used to determine if it can be enabled:: + + 0xc001_0010: + Bit[23] 1 = memory encryption can be enabled + 0 = memory encryption can not be enabled + + 0xc001_0015: + Bit[0] 1 = memory encryption can be enabled + 0 = memory encryption can not be enabled + +When SEV support is available, it can be enabled in a specific VM by +setting the SEV bit before executing VMRUN.:: + + VMCB[0x90]: + Bit[1] 1 = SEV is enabled + 0 = SEV is disabled + +SEV hardware uses ASIDs to associate a memory encryption key with a VM. +Hence, the ASID for the SEV-enabled guests must be from 1 to a maximum value +defined in the CPUID 0x8000001f[ecx] field. + +SEV Key Management +================== + +The SEV guest key management is handled by a separate processor called the AMD +Secure Processor (AMD-SP). Firmware running inside the AMD-SP provides a secure +key management interface to perform common hypervisor activities such as +encrypting bootstrap code, snapshot, migrating and debugging the guest. For more +information, see the SEV Key Management spec [api-spec]_ + +KVM implements the following commands to support common lifecycle events of SEV +guests, such as launching, running, snapshotting, migrating and decommissioning. + +1. KVM_SEV_INIT +--------------- + +The KVM_SEV_INIT command is used by the hypervisor to initialize the SEV platform +context. In a typical workflow, this command should be the first command issued. + +Returns: 0 on success, -negative on error + +2. KVM_SEV_LAUNCH_START +----------------------- + +The KVM_SEV_LAUNCH_START command is used for creating the memory encryption +context. To create the encryption context, user must provide a guest policy, +the owner's public Diffie-Hellman (PDH) key and session information. + +Parameters: struct kvm_sev_launch_start (in/out) + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_launch_start { + __u32 handle; /* if zero then firmware creates a new handle */ + __u32 policy; /* guest's policy */ + + __u64 dh_uaddr; /* userspace address pointing to the guest owner's PDH key */ + __u32 dh_len; + + __u64 session_addr; /* userspace address which points to the guest session information */ + __u32 session_len; + }; + +On success, the 'handle' field contains a new handle and on error, a negative value. + +For more details, see SEV spec Section 6.2. + +3. KVM_SEV_LAUNCH_UPDATE_DATA +----------------------------- + +The KVM_SEV_LAUNCH_UPDATE_DATA is used for encrypting a memory region. It also +calculates a measurement of the memory contents. The measurement is a signature +of the memory contents that can be sent to the guest owner as an attestation +that the memory was encrypted correctly by the firmware. + +Parameters (in): struct kvm_sev_launch_update_data + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_launch_update { + __u64 uaddr; /* userspace address to be encrypted (must be 16-byte aligned) */ + __u32 len; /* length of the data to be encrypted (must be 16-byte aligned) */ + }; + +For more details, see SEV spec Section 6.3. + +4. KVM_SEV_LAUNCH_MEASURE +------------------------- + +The KVM_SEV_LAUNCH_MEASURE command is used to retrieve the measurement of the +data encrypted by the KVM_SEV_LAUNCH_UPDATE_DATA command. The guest owner may +wait to provide the guest with confidential information until it can verify the +measurement. Since the guest owner knows the initial contents of the guest at +boot, the measurement can be verified by comparing it to what the guest owner +expects. + +Parameters (in): struct kvm_sev_launch_measure + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_launch_measure { + __u64 uaddr; /* where to copy the measurement */ + __u32 len; /* length of measurement blob */ + }; + +For more details on the measurement verification flow, see SEV spec Section 6.4. + +5. KVM_SEV_LAUNCH_FINISH +------------------------ + +After completion of the launch flow, the KVM_SEV_LAUNCH_FINISH command can be +issued to make the guest ready for the execution. + +Returns: 0 on success, -negative on error + +6. KVM_SEV_GUEST_STATUS +----------------------- + +The KVM_SEV_GUEST_STATUS command is used to retrieve status information about a +SEV-enabled guest. + +Parameters (out): struct kvm_sev_guest_status + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_guest_status { + __u32 handle; /* guest handle */ + __u32 policy; /* guest policy */ + __u8 state; /* guest state (see enum below) */ + }; + +SEV guest state: + +:: + + enum { + SEV_STATE_INVALID = 0; + SEV_STATE_LAUNCHING, /* guest is currently being launched */ + SEV_STATE_SECRET, /* guest is being launched and ready to accept the ciphertext data */ + SEV_STATE_RUNNING, /* guest is fully launched and running */ + SEV_STATE_RECEIVING, /* guest is being migrated in from another SEV machine */ + SEV_STATE_SENDING /* guest is getting migrated out to another SEV machine */ + }; + +7. KVM_SEV_DBG_DECRYPT +---------------------- + +The KVM_SEV_DEBUG_DECRYPT command can be used by the hypervisor to request the +firmware to decrypt the data at the given memory region. + +Parameters (in): struct kvm_sev_dbg + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_dbg { + __u64 src_uaddr; /* userspace address of data to decrypt */ + __u64 dst_uaddr; /* userspace address of destination */ + __u32 len; /* length of memory region to decrypt */ + }; + +The command returns an error if the guest policy does not allow debugging. + +8. KVM_SEV_DBG_ENCRYPT +---------------------- + +The KVM_SEV_DEBUG_ENCRYPT command can be used by the hypervisor to request the +firmware to encrypt the data at the given memory region. + +Parameters (in): struct kvm_sev_dbg + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_dbg { + __u64 src_uaddr; /* userspace address of data to encrypt */ + __u64 dst_uaddr; /* userspace address of destination */ + __u32 len; /* length of memory region to encrypt */ + }; + +The command returns an error if the guest policy does not allow debugging. + +9. KVM_SEV_LAUNCH_SECRET +------------------------ + +The KVM_SEV_LAUNCH_SECRET command can be used by the hypervisor to inject secret +data after the measurement has been validated by the guest owner. + +Parameters (in): struct kvm_sev_launch_secret + +Returns: 0 on success, -negative on error + +:: + + struct kvm_sev_launch_secret { + __u64 hdr_uaddr; /* userspace address containing the packet header */ + __u32 hdr_len; + + __u64 guest_uaddr; /* the guest memory region where the secret should be injected */ + __u32 guest_len; + + __u64 trans_uaddr; /* the hypervisor memory region which contains the secret */ + __u32 trans_len; + }; + +References +========== + + +See [white-paper]_, [api-spec]_, [amd-apm]_ and [kvm-forum]_ for more info. + +.. [white-paper] http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf +.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM_API_Specification.pdf +.. [amd-apm] http://support.amd.com/TechDocs/24593.pdf (section 15.34) +.. [kvm-forum] http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt new file mode 100644 index 000000000000..2d067767b617 --- /dev/null +++ b/Documentation/virt/kvm/api.txt @@ -0,0 +1,5296 @@ +The Definitive KVM (Kernel-based Virtual Machine) API Documentation +=================================================================== + +1. General description +---------------------- + +The kvm API is a set of ioctls that are issued to control various aspects +of a virtual machine. The ioctls belong to three classes: + + - System ioctls: These query and set global attributes which affect the + whole kvm subsystem. In addition a system ioctl is used to create + virtual machines. + + - VM ioctls: These query and set attributes that affect an entire virtual + machine, for example memory layout. In addition a VM ioctl is used to + create virtual cpus (vcpus) and devices. + + VM ioctls must be issued from the same process (address space) that was + used to create the VM. + + - vcpu ioctls: These query and set attributes that control the operation + of a single virtual cpu. + + vcpu ioctls should be issued from the same thread that was used to create + the vcpu, except for asynchronous vcpu ioctl that are marked as such in + the documentation. Otherwise, the first ioctl after switching threads + could see a performance impact. + + - device ioctls: These query and set attributes that control the operation + of a single device. + + device ioctls must be issued from the same process (address space) that + was used to create the VM. + +2. File descriptors +------------------- + +The kvm API is centered around file descriptors. An initial +open("/dev/kvm") obtains a handle to the kvm subsystem; this handle +can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this +handle will create a VM file descriptor which can be used to issue VM +ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will +create a virtual cpu or device and return a file descriptor pointing to +the new resource. Finally, ioctls on a vcpu or device fd can be used +to control the vcpu or device. For vcpus, this includes the important +task of actually running guest code. + +In general file descriptors can be migrated among processes by means +of fork() and the SCM_RIGHTS facility of unix domain socket. These +kinds of tricks are explicitly not supported by kvm. While they will +not cause harm to the host, their actual behavior is not guaranteed by +the API. See "General description" for details on the ioctl usage +model that is supported by KVM. + +It is important to note that althought VM ioctls may only be issued from +the process that created the VM, a VM's lifecycle is associated with its +file descriptor, not its creator (process). In other words, the VM and +its resources, *including the associated address space*, are not freed +until the last reference to the VM's file descriptor has been released. +For example, if fork() is issued after ioctl(KVM_CREATE_VM), the VM will +not be freed until both the parent (original) process and its child have +put their references to the VM's file descriptor. + +Because a VM's resources are not freed until the last reference to its +file descriptor is released, creating additional references to a VM via +via fork(), dup(), etc... without careful consideration is strongly +discouraged and may have unwanted side effects, e.g. memory allocated +by and on behalf of the VM's process may not be freed/unaccounted when +the VM is shut down. + + +3. Extensions +------------- + +As of Linux 2.6.22, the KVM ABI has been stabilized: no backward +incompatible change are allowed. However, there is an extension +facility that allows backward-compatible extensions to the API to be +queried and used. + +The extension mechanism is not based on the Linux version number. +Instead, kvm defines extension identifiers and a facility to query +whether a particular extension identifier is available. If it is, a +set of ioctls is available for application use. + + +4. API description +------------------ + +This section describes ioctls that can be used to control kvm guests. +For each ioctl, the following information is provided along with a +description: + + Capability: which KVM extension provides this ioctl. Can be 'basic', + which means that is will be provided by any kernel that supports + API version 12 (see section 4.1), a KVM_CAP_xyz constant, which + means availability needs to be checked with KVM_CHECK_EXTENSION + (see section 4.4), or 'none' which means that while not all kernels + support this ioctl, there's no capability bit to check its + availability: for kernels that don't support the ioctl, + the ioctl returns -ENOTTY. + + Architectures: which instruction set architectures provide this ioctl. + x86 includes both i386 and x86_64. + + Type: system, vm, or vcpu. + + Parameters: what parameters are accepted by the ioctl. + + Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL) + are not detailed, but errors with specific meanings are. + + +4.1 KVM_GET_API_VERSION + +Capability: basic +Architectures: all +Type: system ioctl +Parameters: none +Returns: the constant KVM_API_VERSION (=12) + +This identifies the API version as the stable kvm API. It is not +expected that this number will change. However, Linux 2.6.20 and +2.6.21 report earlier versions; these are not documented and not +supported. Applications should refuse to run if KVM_GET_API_VERSION +returns a value other than 12. If this check passes, all ioctls +described as 'basic' will be available. + + +4.2 KVM_CREATE_VM + +Capability: basic +Architectures: all +Type: system ioctl +Parameters: machine type identifier (KVM_VM_*) +Returns: a VM fd that can be used to control the new virtual machine. + +The new VM has no virtual cpus and no memory. +You probably want to use 0 as machine type. + +In order to create user controlled virtual machines on S390, check +KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as +privileged user (CAP_SYS_ADMIN). + +To use hardware assisted virtualization on MIPS (VZ ASE) rather than +the default trap & emulate implementation (which changes the virtual +memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the +flag KVM_VM_MIPS_VZ. + + +On arm64, the physical address size for a VM (IPA Size limit) is limited +to 40bits by default. The limit can be configured if the host supports the +extension KVM_CAP_ARM_VM_IPA_SIZE. When supported, use +KVM_VM_TYPE_ARM_IPA_SIZE(IPA_Bits) to set the size in the machine type +identifier, where IPA_Bits is the maximum width of any physical +address used by the VM. The IPA_Bits is encoded in bits[7-0] of the +machine type identifier. + +e.g, to configure a guest to use 48bit physical address size : + + vm_fd = ioctl(dev_fd, KVM_CREATE_VM, KVM_VM_TYPE_ARM_IPA_SIZE(48)); + +The requested size (IPA_Bits) must be : + 0 - Implies default size, 40bits (for backward compatibility) + + or + + N - Implies N bits, where N is a positive integer such that, + 32 <= N <= Host_IPA_Limit + +Host_IPA_Limit is the maximum possible value for IPA_Bits on the host and +is dependent on the CPU capability and the kernel configuration. The limit can +be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION +ioctl() at run-time. + +Please note that configuring the IPA size does not affect the capability +exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects +size of the address translated by the stage2 level (guest physical to +host physical address translations). + + +4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST + +Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST +Architectures: x86 +Type: system ioctl +Parameters: struct kvm_msr_list (in/out) +Returns: 0 on success; -1 on error +Errors: + EFAULT: the msr index list cannot be read from or written to + E2BIG: the msr index list is to be to fit in the array specified by + the user. + +struct kvm_msr_list { + __u32 nmsrs; /* number of msrs in entries */ + __u32 indices[0]; +}; + +The user fills in the size of the indices array in nmsrs, and in return +kvm adjusts nmsrs to reflect the actual number of msrs and fills in the +indices array with their numbers. + +KVM_GET_MSR_INDEX_LIST returns the guest msrs that are supported. The list +varies by kvm version and host processor, but does not change otherwise. + +Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are +not returned in the MSR list, as different vcpus can have a different number +of banks, as set via the KVM_X86_SETUP_MCE ioctl. + +KVM_GET_MSR_FEATURE_INDEX_LIST returns the list of MSRs that can be passed +to the KVM_GET_MSRS system ioctl. This lets userspace probe host capabilities +and processor features that are exposed via MSRs (e.g., VMX capabilities). +This list also varies by kvm version and host processor, but does not change +otherwise. + + +4.4 KVM_CHECK_EXTENSION + +Capability: basic, KVM_CAP_CHECK_EXTENSION_VM for vm ioctl +Architectures: all +Type: system ioctl, vm ioctl +Parameters: extension identifier (KVM_CAP_*) +Returns: 0 if unsupported; 1 (or some other positive integer) if supported + +The API allows the application to query about extensions to the core +kvm API. Userspace passes an extension identifier (an integer) and +receives an integer that describes the extension availability. +Generally 0 means no and 1 means yes, but some extensions may report +additional information in the integer return value. + +Based on their initialization different VMs may have different capabilities. +It is thus encouraged to use the vm ioctl to query for capabilities (available +with KVM_CAP_CHECK_EXTENSION_VM on the vm fd) + +4.5 KVM_GET_VCPU_MMAP_SIZE + +Capability: basic +Architectures: all +Type: system ioctl +Parameters: none +Returns: size of vcpu mmap area, in bytes + +The KVM_RUN ioctl (cf.) communicates with userspace via a shared +memory region. This ioctl returns the size of that region. See the +KVM_RUN documentation for details. + + +4.6 KVM_SET_MEMORY_REGION + +Capability: basic +Architectures: all +Type: vm ioctl +Parameters: struct kvm_memory_region (in) +Returns: 0 on success, -1 on error + +This ioctl is obsolete and has been removed. + + +4.7 KVM_CREATE_VCPU + +Capability: basic +Architectures: all +Type: vm ioctl +Parameters: vcpu id (apic id on x86) +Returns: vcpu fd on success, -1 on error + +This API adds a vcpu to a virtual machine. No more than max_vcpus may be added. +The vcpu id is an integer in the range [0, max_vcpu_id). + +The recommended max_vcpus value can be retrieved using the KVM_CAP_NR_VCPUS of +the KVM_CHECK_EXTENSION ioctl() at run-time. +The maximum possible value for max_vcpus can be retrieved using the +KVM_CAP_MAX_VCPUS of the KVM_CHECK_EXTENSION ioctl() at run-time. + +If the KVM_CAP_NR_VCPUS does not exist, you should assume that max_vcpus is 4 +cpus max. +If the KVM_CAP_MAX_VCPUS does not exist, you should assume that max_vcpus is +same as the value returned from KVM_CAP_NR_VCPUS. + +The maximum possible value for max_vcpu_id can be retrieved using the +KVM_CAP_MAX_VCPU_ID of the KVM_CHECK_EXTENSION ioctl() at run-time. + +If the KVM_CAP_MAX_VCPU_ID does not exist, you should assume that max_vcpu_id +is the same as the value returned from KVM_CAP_MAX_VCPUS. + +On powerpc using book3s_hv mode, the vcpus are mapped onto virtual +threads in one or more virtual CPU cores. (This is because the +hardware requires all the hardware threads in a CPU core to be in the +same partition.) The KVM_CAP_PPC_SMT capability indicates the number +of vcpus per virtual core (vcore). The vcore id is obtained by +dividing the vcpu id by the number of vcpus per vcore. The vcpus in a +given vcore will always be in the same physical core as each other +(though that might be a different physical core from time to time). +Userspace can control the threading (SMT) mode of the guest by its +allocation of vcpu ids. For example, if userspace wants +single-threaded guest vcpus, it should make all vcpu ids be a multiple +of the number of vcpus per vcore. + +For virtual cpus that have been created with S390 user controlled virtual +machines, the resulting vcpu fd can be memory mapped at page offset +KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of the virtual +cpu's hardware control block. + + +4.8 KVM_GET_DIRTY_LOG (vm ioctl) + +Capability: basic +Architectures: all +Type: vm ioctl +Parameters: struct kvm_dirty_log (in/out) +Returns: 0 on success, -1 on error + +/* for KVM_GET_DIRTY_LOG */ +struct kvm_dirty_log { + __u32 slot; + __u32 padding; + union { + void __user *dirty_bitmap; /* one bit per page */ + __u64 padding; + }; +}; + +Given a memory slot, return a bitmap containing any pages dirtied +since the last call to this ioctl. Bit 0 is the first page in the +memory slot. Ensure the entire structure is cleared to avoid padding +issues. + +If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies +the address space for which you want to return the dirty bitmap. +They must be less than the value that KVM_CHECK_EXTENSION returns for +the KVM_CAP_MULTI_ADDRESS_SPACE capability. + +The bits in the dirty bitmap are cleared before the ioctl returns, unless +KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information, +see the description of the capability. + +4.9 KVM_SET_MEMORY_ALIAS + +Capability: basic +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_memory_alias (in) +Returns: 0 (success), -1 (error) + +This ioctl is obsolete and has been removed. + + +4.10 KVM_RUN + +Capability: basic +Architectures: all +Type: vcpu ioctl +Parameters: none +Returns: 0 on success, -1 on error +Errors: + EINTR: an unmasked signal is pending + +This ioctl is used to run a guest virtual cpu. While there are no +explicit parameters, there is an implicit parameter block that can be +obtained by mmap()ing the vcpu fd at offset 0, with the size given by +KVM_GET_VCPU_MMAP_SIZE. The parameter block is formatted as a 'struct +kvm_run' (see below). + + +4.11 KVM_GET_REGS + +Capability: basic +Architectures: all except ARM, arm64 +Type: vcpu ioctl +Parameters: struct kvm_regs (out) +Returns: 0 on success, -1 on error + +Reads the general purpose registers from the vcpu. + +/* x86 */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 rax, rbx, rcx, rdx; + __u64 rsi, rdi, rsp, rbp; + __u64 r8, r9, r10, r11; + __u64 r12, r13, r14, r15; + __u64 rip, rflags; +}; + +/* mips */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 gpr[32]; + __u64 hi; + __u64 lo; + __u64 pc; +}; + + +4.12 KVM_SET_REGS + +Capability: basic +Architectures: all except ARM, arm64 +Type: vcpu ioctl +Parameters: struct kvm_regs (in) +Returns: 0 on success, -1 on error + +Writes the general purpose registers into the vcpu. + +See KVM_GET_REGS for the data structure. + + +4.13 KVM_GET_SREGS + +Capability: basic +Architectures: x86, ppc +Type: vcpu ioctl +Parameters: struct kvm_sregs (out) +Returns: 0 on success, -1 on error + +Reads special registers from the vcpu. + +/* x86 */ +struct kvm_sregs { + struct kvm_segment cs, ds, es, fs, gs, ss; + struct kvm_segment tr, ldt; + struct kvm_dtable gdt, idt; + __u64 cr0, cr2, cr3, cr4, cr8; + __u64 efer; + __u64 apic_base; + __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64]; +}; + +/* ppc -- see arch/powerpc/include/uapi/asm/kvm.h */ + +interrupt_bitmap is a bitmap of pending external interrupts. At most +one bit may be set. This interrupt has been acknowledged by the APIC +but not yet injected into the cpu core. + + +4.14 KVM_SET_SREGS + +Capability: basic +Architectures: x86, ppc +Type: vcpu ioctl +Parameters: struct kvm_sregs (in) +Returns: 0 on success, -1 on error + +Writes special registers into the vcpu. See KVM_GET_SREGS for the +data structures. + + +4.15 KVM_TRANSLATE + +Capability: basic +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_translation (in/out) +Returns: 0 on success, -1 on error + +Translates a virtual address according to the vcpu's current address +translation mode. + +struct kvm_translation { + /* in */ + __u64 linear_address; + + /* out */ + __u64 physical_address; + __u8 valid; + __u8 writeable; + __u8 usermode; + __u8 pad[5]; +}; + + +4.16 KVM_INTERRUPT + +Capability: basic +Architectures: x86, ppc, mips +Type: vcpu ioctl +Parameters: struct kvm_interrupt (in) +Returns: 0 on success, negative on failure. + +Queues a hardware interrupt vector to be injected. + +/* for KVM_INTERRUPT */ +struct kvm_interrupt { + /* in */ + __u32 irq; +}; + +X86: + +Returns: 0 on success, + -EEXIST if an interrupt is already enqueued + -EINVAL the the irq number is invalid + -ENXIO if the PIC is in the kernel + -EFAULT if the pointer is invalid + +Note 'irq' is an interrupt vector, not an interrupt pin or line. This +ioctl is useful if the in-kernel PIC is not used. + +PPC: + +Queues an external interrupt to be injected. This ioctl is overleaded +with 3 different irq values: + +a) KVM_INTERRUPT_SET + + This injects an edge type external interrupt into the guest once it's ready + to receive interrupts. When injected, the interrupt is done. + +b) KVM_INTERRUPT_UNSET + + This unsets any pending interrupt. + + Only available with KVM_CAP_PPC_UNSET_IRQ. + +c) KVM_INTERRUPT_SET_LEVEL + + This injects a level type external interrupt into the guest context. The + interrupt stays pending until a specific ioctl with KVM_INTERRUPT_UNSET + is triggered. + + Only available with KVM_CAP_PPC_IRQ_LEVEL. + +Note that any value for 'irq' other than the ones stated above is invalid +and incurs unexpected behavior. + +This is an asynchronous vcpu ioctl and can be invoked from any thread. + +MIPS: + +Queues an external interrupt to be injected into the virtual CPU. A negative +interrupt number dequeues the interrupt. + +This is an asynchronous vcpu ioctl and can be invoked from any thread. + + +4.17 KVM_DEBUG_GUEST + +Capability: basic +Architectures: none +Type: vcpu ioctl +Parameters: none) +Returns: -1 on error + +Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead. + + +4.18 KVM_GET_MSRS + +Capability: basic (vcpu), KVM_CAP_GET_MSR_FEATURES (system) +Architectures: x86 +Type: system ioctl, vcpu ioctl +Parameters: struct kvm_msrs (in/out) +Returns: number of msrs successfully returned; + -1 on error + +When used as a system ioctl: +Reads the values of MSR-based features that are available for the VM. This +is similar to KVM_GET_SUPPORTED_CPUID, but it returns MSR indices and values. +The list of msr-based features can be obtained using KVM_GET_MSR_FEATURE_INDEX_LIST +in a system ioctl. + +When used as a vcpu ioctl: +Reads model-specific registers from the vcpu. Supported msr indices can +be obtained using KVM_GET_MSR_INDEX_LIST in a system ioctl. + +struct kvm_msrs { + __u32 nmsrs; /* number of msrs in entries */ + __u32 pad; + + struct kvm_msr_entry entries[0]; +}; + +struct kvm_msr_entry { + __u32 index; + __u32 reserved; + __u64 data; +}; + +Application code should set the 'nmsrs' member (which indicates the +size of the entries array) and the 'index' member of each array entry. +kvm will fill in the 'data' member. + + +4.19 KVM_SET_MSRS + +Capability: basic +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_msrs (in) +Returns: 0 on success, -1 on error + +Writes model-specific registers to the vcpu. See KVM_GET_MSRS for the +data structures. + +Application code should set the 'nmsrs' member (which indicates the +size of the entries array), and the 'index' and 'data' members of each +array entry. + + +4.20 KVM_SET_CPUID + +Capability: basic +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_cpuid (in) +Returns: 0 on success, -1 on error + +Defines the vcpu responses to the cpuid instruction. Applications +should use the KVM_SET_CPUID2 ioctl if available. + + +struct kvm_cpuid_entry { + __u32 function; + __u32 eax; + __u32 ebx; + __u32 ecx; + __u32 edx; + __u32 padding; +}; + +/* for KVM_SET_CPUID */ +struct kvm_cpuid { + __u32 nent; + __u32 padding; + struct kvm_cpuid_entry entries[0]; +}; + + +4.21 KVM_SET_SIGNAL_MASK + +Capability: basic +Architectures: all +Type: vcpu ioctl +Parameters: struct kvm_signal_mask (in) +Returns: 0 on success, -1 on error + +Defines which signals are blocked during execution of KVM_RUN. This +signal mask temporarily overrides the threads signal mask. Any +unblocked signal received (except SIGKILL and SIGSTOP, which retain +their traditional behaviour) will cause KVM_RUN to return with -EINTR. + +Note the signal will only be delivered if not blocked by the original +signal mask. + +/* for KVM_SET_SIGNAL_MASK */ +struct kvm_signal_mask { + __u32 len; + __u8 sigset[0]; +}; + + +4.22 KVM_GET_FPU + +Capability: basic +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_fpu (out) +Returns: 0 on success, -1 on error + +Reads the floating point state from the vcpu. + +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { + __u8 fpr[8][16]; + __u16 fcw; + __u16 fsw; + __u8 ftwx; /* in fxsave format */ + __u8 pad1; + __u16 last_opcode; + __u64 last_ip; + __u64 last_dp; + __u8 xmm[16][16]; + __u32 mxcsr; + __u32 pad2; +}; + + +4.23 KVM_SET_FPU + +Capability: basic +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_fpu (in) +Returns: 0 on success, -1 on error + +Writes the floating point state to the vcpu. + +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { + __u8 fpr[8][16]; + __u16 fcw; + __u16 fsw; + __u8 ftwx; /* in fxsave format */ + __u8 pad1; + __u16 last_opcode; + __u64 last_ip; + __u64 last_dp; + __u8 xmm[16][16]; + __u32 mxcsr; + __u32 pad2; +}; + + +4.24 KVM_CREATE_IRQCHIP + +Capability: KVM_CAP_IRQCHIP, KVM_CAP_S390_IRQCHIP (s390) +Architectures: x86, ARM, arm64, s390 +Type: vm ioctl +Parameters: none +Returns: 0 on success, -1 on error + +Creates an interrupt controller model in the kernel. +On x86, creates a virtual ioapic, a virtual PIC (two PICs, nested), and sets up +future vcpus to have a local APIC. IRQ routing for GSIs 0-15 is set to both +PIC and IOAPIC; GSI 16-23 only go to the IOAPIC. +On ARM/arm64, a GICv2 is created. Any other GIC versions require the usage of +KVM_CREATE_DEVICE, which also supports creating a GICv2. Using +KVM_CREATE_DEVICE is preferred over KVM_CREATE_IRQCHIP for GICv2. +On s390, a dummy irq routing table is created. + +Note that on s390 the KVM_CAP_S390_IRQCHIP vm capability needs to be enabled +before KVM_CREATE_IRQCHIP can be used. + + +4.25 KVM_IRQ_LINE + +Capability: KVM_CAP_IRQCHIP +Architectures: x86, arm, arm64 +Type: vm ioctl +Parameters: struct kvm_irq_level +Returns: 0 on success, -1 on error + +Sets the level of a GSI input to the interrupt controller model in the kernel. +On some architectures it is required that an interrupt controller model has +been previously created with KVM_CREATE_IRQCHIP. Note that edge-triggered +interrupts require the level to be set to 1 and then back to 0. + +On real hardware, interrupt pins can be active-low or active-high. This +does not matter for the level field of struct kvm_irq_level: 1 always +means active (asserted), 0 means inactive (deasserted). + +x86 allows the operating system to program the interrupt polarity +(active-low/active-high) for level-triggered interrupts, and KVM used +to consider the polarity. However, due to bitrot in the handling of +active-low interrupts, the above convention is now valid on x86 too. +This is signaled by KVM_CAP_X86_IOAPIC_POLARITY_IGNORED. Userspace +should not present interrupts to the guest as active-low unless this +capability is present (or unless it is not using the in-kernel irqchip, +of course). + + +ARM/arm64 can signal an interrupt either at the CPU level, or at the +in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to +use PPIs designated for specific cpus. The irq field is interpreted +like this: + +  bits: | 31 ... 24 | 23 ... 16 | 15 ... 0 | + field: | irq_type | vcpu_index | irq_id | + +The irq_type field has the following values: +- irq_type[0]: out-of-kernel GIC: irq_id 0 is IRQ, irq_id 1 is FIQ +- irq_type[1]: in-kernel GIC: SPI, irq_id between 32 and 1019 (incl.) + (the vcpu_index field is ignored) +- irq_type[2]: in-kernel GIC: PPI, irq_id between 16 and 31 (incl.) + +(The irq_id field thus corresponds nicely to the IRQ ID in the ARM GIC specs) + +In both cases, level is used to assert/deassert the line. + +struct kvm_irq_level { + union { + __u32 irq; /* GSI */ + __s32 status; /* not used for KVM_IRQ_LEVEL */ + }; + __u32 level; /* 0 or 1 */ +}; + + +4.26 KVM_GET_IRQCHIP + +Capability: KVM_CAP_IRQCHIP +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_irqchip (in/out) +Returns: 0 on success, -1 on error + +Reads the state of a kernel interrupt controller created with +KVM_CREATE_IRQCHIP into a buffer provided by the caller. + +struct kvm_irqchip { + __u32 chip_id; /* 0 = PIC1, 1 = PIC2, 2 = IOAPIC */ + __u32 pad; + union { + char dummy[512]; /* reserving space */ + struct kvm_pic_state pic; + struct kvm_ioapic_state ioapic; + } chip; +}; + + +4.27 KVM_SET_IRQCHIP + +Capability: KVM_CAP_IRQCHIP +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_irqchip (in) +Returns: 0 on success, -1 on error + +Sets the state of a kernel interrupt controller created with +KVM_CREATE_IRQCHIP from a buffer provided by the caller. + +struct kvm_irqchip { + __u32 chip_id; /* 0 = PIC1, 1 = PIC2, 2 = IOAPIC */ + __u32 pad; + union { + char dummy[512]; /* reserving space */ + struct kvm_pic_state pic; + struct kvm_ioapic_state ioapic; + } chip; +}; + + +4.28 KVM_XEN_HVM_CONFIG + +Capability: KVM_CAP_XEN_HVM +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_xen_hvm_config (in) +Returns: 0 on success, -1 on error + +Sets the MSR that the Xen HVM guest uses to initialize its hypercall +page, and provides the starting address and size of the hypercall +blobs in userspace. When the guest writes the MSR, kvm copies one +page of a blob (32- or 64-bit, depending on the vcpu mode) to guest +memory. + +struct kvm_xen_hvm_config { + __u32 flags; + __u32 msr; + __u64 blob_addr_32; + __u64 blob_addr_64; + __u8 blob_size_32; + __u8 blob_size_64; + __u8 pad2[30]; +}; + + +4.29 KVM_GET_CLOCK + +Capability: KVM_CAP_ADJUST_CLOCK +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_clock_data (out) +Returns: 0 on success, -1 on error + +Gets the current timestamp of kvmclock as seen by the current guest. In +conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios +such as migration. + +When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the +set of bits that KVM can return in struct kvm_clock_data's flag member. + +The only flag defined now is KVM_CLOCK_TSC_STABLE. If set, the returned +value is the exact kvmclock value seen by all VCPUs at the instant +when KVM_GET_CLOCK was called. If clear, the returned value is simply +CLOCK_MONOTONIC plus a constant offset; the offset can be modified +with KVM_SET_CLOCK. KVM will try to make all VCPUs follow this clock, +but the exact value read by each VCPU could differ, because the host +TSC is not stable. + +struct kvm_clock_data { + __u64 clock; /* kvmclock current value */ + __u32 flags; + __u32 pad[9]; +}; + + +4.30 KVM_SET_CLOCK + +Capability: KVM_CAP_ADJUST_CLOCK +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_clock_data (in) +Returns: 0 on success, -1 on error + +Sets the current timestamp of kvmclock to the value specified in its parameter. +In conjunction with KVM_GET_CLOCK, it is used to ensure monotonicity on scenarios +such as migration. + +struct kvm_clock_data { + __u64 clock; /* kvmclock current value */ + __u32 flags; + __u32 pad[9]; +}; + + +4.31 KVM_GET_VCPU_EVENTS + +Capability: KVM_CAP_VCPU_EVENTS +Extended by: KVM_CAP_INTR_SHADOW +Architectures: x86, arm, arm64 +Type: vcpu ioctl +Parameters: struct kvm_vcpu_event (out) +Returns: 0 on success, -1 on error + +X86: + +Gets currently pending exceptions, interrupts, and NMIs as well as related +states of the vcpu. + +struct kvm_vcpu_events { + struct { + __u8 injected; + __u8 nr; + __u8 has_error_code; + __u8 pending; + __u32 error_code; + } exception; + struct { + __u8 injected; + __u8 nr; + __u8 soft; + __u8 shadow; + } interrupt; + struct { + __u8 injected; + __u8 pending; + __u8 masked; + __u8 pad; + } nmi; + __u32 sipi_vector; + __u32 flags; + struct { + __u8 smm; + __u8 pending; + __u8 smm_inside_nmi; + __u8 latched_init; + } smi; + __u8 reserved[27]; + __u8 exception_has_payload; + __u64 exception_payload; +}; + +The following bits are defined in the flags field: + +- KVM_VCPUEVENT_VALID_SHADOW may be set to signal that + interrupt.shadow contains a valid state. + +- KVM_VCPUEVENT_VALID_SMM may be set to signal that smi contains a + valid state. + +- KVM_VCPUEVENT_VALID_PAYLOAD may be set to signal that the + exception_has_payload, exception_payload, and exception.pending + fields contain a valid state. This bit will be set whenever + KVM_CAP_EXCEPTION_PAYLOAD is enabled. + +ARM/ARM64: + +If the guest accesses a device that is being emulated by the host kernel in +such a way that a real device would generate a physical SError, KVM may make +a virtual SError pending for that VCPU. This system error interrupt remains +pending until the guest takes the exception by unmasking PSTATE.A. + +Running the VCPU may cause it to take a pending SError, or make an access that +causes an SError to become pending. The event's description is only valid while +the VPCU is not running. + +This API provides a way to read and write the pending 'event' state that is not +visible to the guest. To save, restore or migrate a VCPU the struct representing +the state can be read then written using this GET/SET API, along with the other +guest-visible registers. It is not possible to 'cancel' an SError that has been +made pending. + +A device being emulated in user-space may also wish to generate an SError. To do +this the events structure can be populated by user-space. The current state +should be read first, to ensure no existing SError is pending. If an existing +SError is pending, the architecture's 'Multiple SError interrupts' rules should +be followed. (2.5.3 of DDI0587.a "ARM Reliability, Availability, and +Serviceability (RAS) Specification"). + +SError exceptions always have an ESR value. Some CPUs have the ability to +specify what the virtual SError's ESR value should be. These systems will +advertise KVM_CAP_ARM_INJECT_SERROR_ESR. In this case exception.has_esr will +always have a non-zero value when read, and the agent making an SError pending +should specify the ISS field in the lower 24 bits of exception.serror_esr. If +the system supports KVM_CAP_ARM_INJECT_SERROR_ESR, but user-space sets the events +with exception.has_esr as zero, KVM will choose an ESR. + +Specifying exception.has_esr on a system that does not support it will return +-EINVAL. Setting anything other than the lower 24bits of exception.serror_esr +will return -EINVAL. + +struct kvm_vcpu_events { + struct { + __u8 serror_pending; + __u8 serror_has_esr; + /* Align it to 8 bytes */ + __u8 pad[6]; + __u64 serror_esr; + } exception; + __u32 reserved[12]; +}; + +4.32 KVM_SET_VCPU_EVENTS + +Capability: KVM_CAP_VCPU_EVENTS +Extended by: KVM_CAP_INTR_SHADOW +Architectures: x86, arm, arm64 +Type: vcpu ioctl +Parameters: struct kvm_vcpu_event (in) +Returns: 0 on success, -1 on error + +X86: + +Set pending exceptions, interrupts, and NMIs as well as related states of the +vcpu. + +See KVM_GET_VCPU_EVENTS for the data structure. + +Fields that may be modified asynchronously by running VCPUs can be excluded +from the update. These fields are nmi.pending, sipi_vector, smi.smm, +smi.pending. Keep the corresponding bits in the flags field cleared to +suppress overwriting the current in-kernel state. The bits are: + +KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel +KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector +KVM_VCPUEVENT_VALID_SMM - transfer the smi sub-struct. + +If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in +the flags field to signal that interrupt.shadow contains a valid state and +shall be written into the VCPU. + +KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available. + +If KVM_CAP_EXCEPTION_PAYLOAD is enabled, KVM_VCPUEVENT_VALID_PAYLOAD +can be set in the flags field to signal that the +exception_has_payload, exception_payload, and exception.pending fields +contain a valid state and shall be written into the VCPU. + +ARM/ARM64: + +Set the pending SError exception state for this VCPU. It is not possible to +'cancel' an Serror that has been made pending. + +See KVM_GET_VCPU_EVENTS for the data structure. + + +4.33 KVM_GET_DEBUGREGS + +Capability: KVM_CAP_DEBUGREGS +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_debugregs (out) +Returns: 0 on success, -1 on error + +Reads debug registers from the vcpu. + +struct kvm_debugregs { + __u64 db[4]; + __u64 dr6; + __u64 dr7; + __u64 flags; + __u64 reserved[9]; +}; + + +4.34 KVM_SET_DEBUGREGS + +Capability: KVM_CAP_DEBUGREGS +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_debugregs (in) +Returns: 0 on success, -1 on error + +Writes debug registers into the vcpu. + +See KVM_GET_DEBUGREGS for the data structure. The flags field is unused +yet and must be cleared on entry. + + +4.35 KVM_SET_USER_MEMORY_REGION + +Capability: KVM_CAP_USER_MEMORY +Architectures: all +Type: vm ioctl +Parameters: struct kvm_userspace_memory_region (in) +Returns: 0 on success, -1 on error + +struct kvm_userspace_memory_region { + __u32 slot; + __u32 flags; + __u64 guest_phys_addr; + __u64 memory_size; /* bytes */ + __u64 userspace_addr; /* start of the userspace allocated memory */ +}; + +/* for kvm_memory_region::flags */ +#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) +#define KVM_MEM_READONLY (1UL << 1) + +This ioctl allows the user to create, modify or delete a guest physical +memory slot. Bits 0-15 of "slot" specify the slot id and this value +should be less than the maximum number of user memory slots supported per +VM. The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS. +Slots may not overlap in guest physical address space. + +If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot" +specifies the address space which is being modified. They must be +less than the value that KVM_CHECK_EXTENSION returns for the +KVM_CAP_MULTI_ADDRESS_SPACE capability. Slots in separate address spaces +are unrelated; the restriction on overlapping slots only applies within +each address space. + +Deleting a slot is done by passing zero for memory_size. When changing +an existing slot, it may be moved in the guest physical memory space, +or its flags may be modified, but it may not be resized. + +Memory for the region is taken starting at the address denoted by the +field userspace_addr, which must point at user addressable memory for +the entire memory slot size. Any object may back this memory, including +anonymous memory, ordinary files, and hugetlbfs. + +It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr +be identical. This allows large pages in the guest to be backed by large +pages in the host. + +The flags field supports two flags: KVM_MEM_LOG_DIRTY_PAGES and +KVM_MEM_READONLY. The former can be set to instruct KVM to keep track of +writes to memory within the slot. See KVM_GET_DIRTY_LOG ioctl to know how to +use it. The latter can be set, if KVM_CAP_READONLY_MEM capability allows it, +to make a new slot read-only. In this case, writes to this memory will be +posted to userspace as KVM_EXIT_MMIO exits. + +When the KVM_CAP_SYNC_MMU capability is available, changes in the backing of +the memory region are automatically reflected into the guest. For example, an +mmap() that affects the region will be made visible immediately. Another +example is madvise(MADV_DROP). + +It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl. +The KVM_SET_MEMORY_REGION does not allow fine grained control over memory +allocation and is deprecated. + + +4.36 KVM_SET_TSS_ADDR + +Capability: KVM_CAP_SET_TSS_ADDR +Architectures: x86 +Type: vm ioctl +Parameters: unsigned long tss_address (in) +Returns: 0 on success, -1 on error + +This ioctl defines the physical address of a three-page region in the guest +physical address space. The region must be within the first 4GB of the +guest physical address space and must not conflict with any memory slot +or any mmio address. The guest may malfunction if it accesses this memory +region. + +This ioctl is required on Intel-based hosts. This is needed on Intel hardware +because of a quirk in the virtualization implementation (see the internals +documentation when it pops into existence). + + +4.37 KVM_ENABLE_CAP + +Capability: KVM_CAP_ENABLE_CAP +Architectures: mips, ppc, s390 +Type: vcpu ioctl +Parameters: struct kvm_enable_cap (in) +Returns: 0 on success; -1 on error + +Capability: KVM_CAP_ENABLE_CAP_VM +Architectures: all +Type: vcpu ioctl +Parameters: struct kvm_enable_cap (in) +Returns: 0 on success; -1 on error + ++Not all extensions are enabled by default. Using this ioctl the application +can enable an extension, making it available to the guest. + +On systems that do not support this ioctl, it always fails. On systems that +do support it, it only works for extensions that are supported for enablement. + +To check if a capability can be enabled, the KVM_CHECK_EXTENSION ioctl should +be used. + +struct kvm_enable_cap { + /* in */ + __u32 cap; + +The capability that is supposed to get enabled. + + __u32 flags; + +A bitfield indicating future enhancements. Has to be 0 for now. + + __u64 args[4]; + +Arguments for enabling a feature. If a feature needs initial values to +function properly, this is the place to put them. + + __u8 pad[64]; +}; + +The vcpu ioctl should be used for vcpu-specific capabilities, the vm ioctl +for vm-wide capabilities. + +4.38 KVM_GET_MP_STATE + +Capability: KVM_CAP_MP_STATE +Architectures: x86, s390, arm, arm64 +Type: vcpu ioctl +Parameters: struct kvm_mp_state (out) +Returns: 0 on success; -1 on error + +struct kvm_mp_state { + __u32 mp_state; +}; + +Returns the vcpu's current "multiprocessing state" (though also valid on +uniprocessor guests). + +Possible values are: + + - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86,arm/arm64] + - KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP) + which has not yet received an INIT signal [x86] + - KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is + now ready for a SIPI [x86] + - KVM_MP_STATE_HALTED: the vcpu has executed a HLT instruction and + is waiting for an interrupt [x86] + - KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector + accessible via KVM_GET_VCPU_EVENTS) [x86] + - KVM_MP_STATE_STOPPED: the vcpu is stopped [s390,arm/arm64] + - KVM_MP_STATE_CHECK_STOP: the vcpu is in a special error state [s390] + - KVM_MP_STATE_OPERATING: the vcpu is operating (running or halted) + [s390] + - KVM_MP_STATE_LOAD: the vcpu is in a special load/startup state + [s390] + +On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an +in-kernel irqchip, the multiprocessing state must be maintained by userspace on +these architectures. + +For arm/arm64: + +The only states that are valid are KVM_MP_STATE_STOPPED and +KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not. + +4.39 KVM_SET_MP_STATE + +Capability: KVM_CAP_MP_STATE +Architectures: x86, s390, arm, arm64 +Type: vcpu ioctl +Parameters: struct kvm_mp_state (in) +Returns: 0 on success; -1 on error + +Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for +arguments. + +On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an +in-kernel irqchip, the multiprocessing state must be maintained by userspace on +these architectures. + +For arm/arm64: + +The only states that are valid are KVM_MP_STATE_STOPPED and +KVM_MP_STATE_RUNNABLE which reflect if the vcpu should be paused or not. + +4.40 KVM_SET_IDENTITY_MAP_ADDR + +Capability: KVM_CAP_SET_IDENTITY_MAP_ADDR +Architectures: x86 +Type: vm ioctl +Parameters: unsigned long identity (in) +Returns: 0 on success, -1 on error + +This ioctl defines the physical address of a one-page region in the guest +physical address space. The region must be within the first 4GB of the +guest physical address space and must not conflict with any memory slot +or any mmio address. The guest may malfunction if it accesses this memory +region. + +Setting the address to 0 will result in resetting the address to its default +(0xfffbc000). + +This ioctl is required on Intel-based hosts. This is needed on Intel hardware +because of a quirk in the virtualization implementation (see the internals +documentation when it pops into existence). + +Fails if any VCPU has already been created. + +4.41 KVM_SET_BOOT_CPU_ID + +Capability: KVM_CAP_SET_BOOT_CPU_ID +Architectures: x86 +Type: vm ioctl +Parameters: unsigned long vcpu_id +Returns: 0 on success, -1 on error + +Define which vcpu is the Bootstrap Processor (BSP). Values are the same +as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default +is vcpu 0. + + +4.42 KVM_GET_XSAVE + +Capability: KVM_CAP_XSAVE +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_xsave (out) +Returns: 0 on success, -1 on error + +struct kvm_xsave { + __u32 region[1024]; +}; + +This ioctl would copy current vcpu's xsave struct to the userspace. + + +4.43 KVM_SET_XSAVE + +Capability: KVM_CAP_XSAVE +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_xsave (in) +Returns: 0 on success, -1 on error + +struct kvm_xsave { + __u32 region[1024]; +}; + +This ioctl would copy userspace's xsave struct to the kernel. + + +4.44 KVM_GET_XCRS + +Capability: KVM_CAP_XCRS +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_xcrs (out) +Returns: 0 on success, -1 on error + +struct kvm_xcr { + __u32 xcr; + __u32 reserved; + __u64 value; +}; + +struct kvm_xcrs { + __u32 nr_xcrs; + __u32 flags; + struct kvm_xcr xcrs[KVM_MAX_XCRS]; + __u64 padding[16]; +}; + +This ioctl would copy current vcpu's xcrs to the userspace. + + +4.45 KVM_SET_XCRS + +Capability: KVM_CAP_XCRS +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_xcrs (in) +Returns: 0 on success, -1 on error + +struct kvm_xcr { + __u32 xcr; + __u32 reserved; + __u64 value; +}; + +struct kvm_xcrs { + __u32 nr_xcrs; + __u32 flags; + struct kvm_xcr xcrs[KVM_MAX_XCRS]; + __u64 padding[16]; +}; + +This ioctl would set vcpu's xcr to the value userspace specified. + + +4.46 KVM_GET_SUPPORTED_CPUID + +Capability: KVM_CAP_EXT_CPUID +Architectures: x86 +Type: system ioctl +Parameters: struct kvm_cpuid2 (in/out) +Returns: 0 on success, -1 on error + +struct kvm_cpuid2 { + __u32 nent; + __u32 padding; + struct kvm_cpuid_entry2 entries[0]; +}; + +#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0) +#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1) +#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2) + +struct kvm_cpuid_entry2 { + __u32 function; + __u32 index; + __u32 flags; + __u32 eax; + __u32 ebx; + __u32 ecx; + __u32 edx; + __u32 padding[3]; +}; + +This ioctl returns x86 cpuid features which are supported by both the +hardware and kvm in its default configuration. Userspace can use the +information returned by this ioctl to construct cpuid information (for +KVM_SET_CPUID2) that is consistent with hardware, kernel, and +userspace capabilities, and with user requirements (for example, the +user may wish to constrain cpuid to emulate older hardware, or for +feature consistency across a cluster). + +Note that certain capabilities, such as KVM_CAP_X86_DISABLE_EXITS, may +expose cpuid features (e.g. MONITOR) which are not supported by kvm in +its default configuration. If userspace enables such capabilities, it +is responsible for modifying the results of this ioctl appropriately. + +Userspace invokes KVM_GET_SUPPORTED_CPUID by passing a kvm_cpuid2 structure +with the 'nent' field indicating the number of entries in the variable-size +array 'entries'. If the number of entries is too low to describe the cpu +capabilities, an error (E2BIG) is returned. If the number is too high, +the 'nent' field is adjusted and an error (ENOMEM) is returned. If the +number is just right, the 'nent' field is adjusted to the number of valid +entries in the 'entries' array, which is then filled. + +The entries returned are the host cpuid as returned by the cpuid instruction, +with unknown or unsupported features masked out. Some features (for example, +x2apic), may not be present in the host cpu, but are exposed by kvm if it can +emulate them efficiently. The fields in each entry are defined as follows: + + function: the eax value used to obtain the entry + index: the ecx value used to obtain the entry (for entries that are + affected by ecx) + flags: an OR of zero or more of the following: + KVM_CPUID_FLAG_SIGNIFCANT_INDEX: + if the index field is valid + KVM_CPUID_FLAG_STATEFUL_FUNC: + if cpuid for this function returns different values for successive + invocations; there will be several entries with the same function, + all with this flag set + KVM_CPUID_FLAG_STATE_READ_NEXT: + for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is + the first entry to be read by a cpu + eax, ebx, ecx, edx: the values returned by the cpuid instruction for + this function/index combination + +The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned +as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC +support. Instead it is reported via + + ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER) + +if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the +feature in userspace, then you can enable the feature for KVM_SET_CPUID2. + + +4.47 KVM_PPC_GET_PVINFO + +Capability: KVM_CAP_PPC_GET_PVINFO +Architectures: ppc +Type: vm ioctl +Parameters: struct kvm_ppc_pvinfo (out) +Returns: 0 on success, !0 on error + +struct kvm_ppc_pvinfo { + __u32 flags; + __u32 hcall[4]; + __u8 pad[108]; +}; + +This ioctl fetches PV specific information that need to be passed to the guest +using the device tree or other means from vm context. + +The hcall array defines 4 instructions that make up a hypercall. + +If any additional field gets added to this structure later on, a bit for that +additional piece of information will be set in the flags bitmap. + +The flags bitmap is defined as: + + /* the host supports the ePAPR idle hcall + #define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0) + +4.52 KVM_SET_GSI_ROUTING + +Capability: KVM_CAP_IRQ_ROUTING +Architectures: x86 s390 arm arm64 +Type: vm ioctl +Parameters: struct kvm_irq_routing (in) +Returns: 0 on success, -1 on error + +Sets the GSI routing table entries, overwriting any previously set entries. + +On arm/arm64, GSI routing has the following limitation: +- GSI routing does not apply to KVM_IRQ_LINE but only to KVM_IRQFD. + +struct kvm_irq_routing { + __u32 nr; + __u32 flags; + struct kvm_irq_routing_entry entries[0]; +}; + +No flags are specified so far, the corresponding field must be set to zero. + +struct kvm_irq_routing_entry { + __u32 gsi; + __u32 type; + __u32 flags; + __u32 pad; + union { + struct kvm_irq_routing_irqchip irqchip; + struct kvm_irq_routing_msi msi; + struct kvm_irq_routing_s390_adapter adapter; + struct kvm_irq_routing_hv_sint hv_sint; + __u32 pad[8]; + } u; +}; + +/* gsi routing entry types */ +#define KVM_IRQ_ROUTING_IRQCHIP 1 +#define KVM_IRQ_ROUTING_MSI 2 +#define KVM_IRQ_ROUTING_S390_ADAPTER 3 +#define KVM_IRQ_ROUTING_HV_SINT 4 + +flags: +- KVM_MSI_VALID_DEVID: used along with KVM_IRQ_ROUTING_MSI routing entry + type, specifies that the devid field contains a valid value. The per-VM + KVM_CAP_MSI_DEVID capability advertises the requirement to provide + the device ID. If this capability is not available, userspace should + never set the KVM_MSI_VALID_DEVID flag as the ioctl might fail. +- zero otherwise + +struct kvm_irq_routing_irqchip { + __u32 irqchip; + __u32 pin; +}; + +struct kvm_irq_routing_msi { + __u32 address_lo; + __u32 address_hi; + __u32 data; + union { + __u32 pad; + __u32 devid; + }; +}; + +If KVM_MSI_VALID_DEVID is set, devid contains a unique device identifier +for the device that wrote the MSI message. For PCI, this is usually a +BFD identifier in the lower 16 bits. + +On x86, address_hi is ignored unless the KVM_X2APIC_API_USE_32BIT_IDS +feature of KVM_CAP_X2APIC_API capability is enabled. If it is enabled, +address_hi bits 31-8 provide bits 31-8 of the destination id. Bits 7-0 of +address_hi must be zero. + +struct kvm_irq_routing_s390_adapter { + __u64 ind_addr; + __u64 summary_addr; + __u64 ind_offset; + __u32 summary_offset; + __u32 adapter_id; +}; + +struct kvm_irq_routing_hv_sint { + __u32 vcpu; + __u32 sint; +}; + + +4.55 KVM_SET_TSC_KHZ + +Capability: KVM_CAP_TSC_CONTROL +Architectures: x86 +Type: vcpu ioctl +Parameters: virtual tsc_khz +Returns: 0 on success, -1 on error + +Specifies the tsc frequency for the virtual machine. The unit of the +frequency is KHz. + + +4.56 KVM_GET_TSC_KHZ + +Capability: KVM_CAP_GET_TSC_KHZ +Architectures: x86 +Type: vcpu ioctl +Parameters: none +Returns: virtual tsc-khz on success, negative value on error + +Returns the tsc frequency of the guest. The unit of the return value is +KHz. If the host has unstable tsc this ioctl returns -EIO instead as an +error. + + +4.57 KVM_GET_LAPIC + +Capability: KVM_CAP_IRQCHIP +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_lapic_state (out) +Returns: 0 on success, -1 on error + +#define KVM_APIC_REG_SIZE 0x400 +struct kvm_lapic_state { + char regs[KVM_APIC_REG_SIZE]; +}; + +Reads the Local APIC registers and copies them into the input argument. The +data format and layout are the same as documented in the architecture manual. + +If KVM_X2APIC_API_USE_32BIT_IDS feature of KVM_CAP_X2APIC_API is +enabled, then the format of APIC_ID register depends on the APIC mode +(reported by MSR_IA32_APICBASE) of its VCPU. x2APIC stores APIC ID in +the APIC_ID register (bytes 32-35). xAPIC only allows an 8-bit APIC ID +which is stored in bits 31-24 of the APIC register, or equivalently in +byte 35 of struct kvm_lapic_state's regs field. KVM_GET_LAPIC must then +be called after MSR_IA32_APICBASE has been set with KVM_SET_MSR. + +If KVM_X2APIC_API_USE_32BIT_IDS feature is disabled, struct kvm_lapic_state +always uses xAPIC format. + + +4.58 KVM_SET_LAPIC + +Capability: KVM_CAP_IRQCHIP +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_lapic_state (in) +Returns: 0 on success, -1 on error + +#define KVM_APIC_REG_SIZE 0x400 +struct kvm_lapic_state { + char regs[KVM_APIC_REG_SIZE]; +}; + +Copies the input argument into the Local APIC registers. The data format +and layout are the same as documented in the architecture manual. + +The format of the APIC ID register (bytes 32-35 of struct kvm_lapic_state's +regs field) depends on the state of the KVM_CAP_X2APIC_API capability. +See the note in KVM_GET_LAPIC. + + +4.59 KVM_IOEVENTFD + +Capability: KVM_CAP_IOEVENTFD +Architectures: all +Type: vm ioctl +Parameters: struct kvm_ioeventfd (in) +Returns: 0 on success, !0 on error + +This ioctl attaches or detaches an ioeventfd to a legal pio/mmio address +within the guest. A guest write in the registered address will signal the +provided event instead of triggering an exit. + +struct kvm_ioeventfd { + __u64 datamatch; + __u64 addr; /* legal pio/mmio address */ + __u32 len; /* 0, 1, 2, 4, or 8 bytes */ + __s32 fd; + __u32 flags; + __u8 pad[36]; +}; + +For the special case of virtio-ccw devices on s390, the ioevent is matched +to a subchannel/virtqueue tuple instead. + +The following flags are defined: + +#define KVM_IOEVENTFD_FLAG_DATAMATCH (1 << kvm_ioeventfd_flag_nr_datamatch) +#define KVM_IOEVENTFD_FLAG_PIO (1 << kvm_ioeventfd_flag_nr_pio) +#define KVM_IOEVENTFD_FLAG_DEASSIGN (1 << kvm_ioeventfd_flag_nr_deassign) +#define KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY \ + (1 << kvm_ioeventfd_flag_nr_virtio_ccw_notify) + +If datamatch flag is set, the event will be signaled only if the written value +to the registered address is equal to datamatch in struct kvm_ioeventfd. + +For virtio-ccw devices, addr contains the subchannel id and datamatch the +virtqueue index. + +With KVM_CAP_IOEVENTFD_ANY_LENGTH, a zero length ioeventfd is allowed, and +the kernel will ignore the length of guest write and may get a faster vmexit. +The speedup may only apply to specific architectures, but the ioeventfd will +work anyway. + +4.60 KVM_DIRTY_TLB + +Capability: KVM_CAP_SW_TLB +Architectures: ppc +Type: vcpu ioctl +Parameters: struct kvm_dirty_tlb (in) +Returns: 0 on success, -1 on error + +struct kvm_dirty_tlb { + __u64 bitmap; + __u32 num_dirty; +}; + +This must be called whenever userspace has changed an entry in the shared +TLB, prior to calling KVM_RUN on the associated vcpu. + +The "bitmap" field is the userspace address of an array. This array +consists of a number of bits, equal to the total number of TLB entries as +determined by the last successful call to KVM_CONFIG_TLB, rounded up to the +nearest multiple of 64. + +Each bit corresponds to one TLB entry, ordered the same as in the shared TLB +array. + +The array is little-endian: the bit 0 is the least significant bit of the +first byte, bit 8 is the least significant bit of the second byte, etc. +This avoids any complications with differing word sizes. + +The "num_dirty" field is a performance hint for KVM to determine whether it +should skip processing the bitmap and just invalidate everything. It must +be set to the number of set bits in the bitmap. + + +4.62 KVM_CREATE_SPAPR_TCE + +Capability: KVM_CAP_SPAPR_TCE +Architectures: powerpc +Type: vm ioctl +Parameters: struct kvm_create_spapr_tce (in) +Returns: file descriptor for manipulating the created TCE table + +This creates a virtual TCE (translation control entry) table, which +is an IOMMU for PAPR-style virtual I/O. It is used to translate +logical addresses used in virtual I/O into guest physical addresses, +and provides a scatter/gather capability for PAPR virtual I/O. + +/* for KVM_CAP_SPAPR_TCE */ +struct kvm_create_spapr_tce { + __u64 liobn; + __u32 window_size; +}; + +The liobn field gives the logical IO bus number for which to create a +TCE table. The window_size field specifies the size of the DMA window +which this TCE table will translate - the table will contain one 64 +bit TCE entry for every 4kiB of the DMA window. + +When the guest issues an H_PUT_TCE hcall on a liobn for which a TCE +table has been created using this ioctl(), the kernel will handle it +in real mode, updating the TCE table. H_PUT_TCE calls for other +liobns will cause a vm exit and must be handled by userspace. + +The return value is a file descriptor which can be passed to mmap(2) +to map the created TCE table into userspace. This lets userspace read +the entries written by kernel-handled H_PUT_TCE calls, and also lets +userspace update the TCE table directly which is useful in some +circumstances. + + +4.63 KVM_ALLOCATE_RMA + +Capability: KVM_CAP_PPC_RMA +Architectures: powerpc +Type: vm ioctl +Parameters: struct kvm_allocate_rma (out) +Returns: file descriptor for mapping the allocated RMA + +This allocates a Real Mode Area (RMA) from the pool allocated at boot +time by the kernel. An RMA is a physically-contiguous, aligned region +of memory used on older POWER processors to provide the memory which +will be accessed by real-mode (MMU off) accesses in a KVM guest. +POWER processors support a set of sizes for the RMA that usually +includes 64MB, 128MB, 256MB and some larger powers of two. + +/* for KVM_ALLOCATE_RMA */ +struct kvm_allocate_rma { + __u64 rma_size; +}; + +The return value is a file descriptor which can be passed to mmap(2) +to map the allocated RMA into userspace. The mapped area can then be +passed to the KVM_SET_USER_MEMORY_REGION ioctl to establish it as the +RMA for a virtual machine. The size of the RMA in bytes (which is +fixed at host kernel boot time) is returned in the rma_size field of +the argument structure. + +The KVM_CAP_PPC_RMA capability is 1 or 2 if the KVM_ALLOCATE_RMA ioctl +is supported; 2 if the processor requires all virtual machines to have +an RMA, or 1 if the processor can use an RMA but doesn't require it, +because it supports the Virtual RMA (VRMA) facility. + + +4.64 KVM_NMI + +Capability: KVM_CAP_USER_NMI +Architectures: x86 +Type: vcpu ioctl +Parameters: none +Returns: 0 on success, -1 on error + +Queues an NMI on the thread's vcpu. Note this is well defined only +when KVM_CREATE_IRQCHIP has not been called, since this is an interface +between the virtual cpu core and virtual local APIC. After KVM_CREATE_IRQCHIP +has been called, this interface is completely emulated within the kernel. + +To use this to emulate the LINT1 input with KVM_CREATE_IRQCHIP, use the +following algorithm: + + - pause the vcpu + - read the local APIC's state (KVM_GET_LAPIC) + - check whether changing LINT1 will queue an NMI (see the LVT entry for LINT1) + - if so, issue KVM_NMI + - resume the vcpu + +Some guests configure the LINT1 NMI input to cause a panic, aiding in +debugging. + + +4.65 KVM_S390_UCAS_MAP + +Capability: KVM_CAP_S390_UCONTROL +Architectures: s390 +Type: vcpu ioctl +Parameters: struct kvm_s390_ucas_mapping (in) +Returns: 0 in case of success + +The parameter is defined like this: + struct kvm_s390_ucas_mapping { + __u64 user_addr; + __u64 vcpu_addr; + __u64 length; + }; + +This ioctl maps the memory at "user_addr" with the length "length" to +the vcpu's address space starting at "vcpu_addr". All parameters need to +be aligned by 1 megabyte. + + +4.66 KVM_S390_UCAS_UNMAP + +Capability: KVM_CAP_S390_UCONTROL +Architectures: s390 +Type: vcpu ioctl +Parameters: struct kvm_s390_ucas_mapping (in) +Returns: 0 in case of success + +The parameter is defined like this: + struct kvm_s390_ucas_mapping { + __u64 user_addr; + __u64 vcpu_addr; + __u64 length; + }; + +This ioctl unmaps the memory in the vcpu's address space starting at +"vcpu_addr" with the length "length". The field "user_addr" is ignored. +All parameters need to be aligned by 1 megabyte. + + +4.67 KVM_S390_VCPU_FAULT + +Capability: KVM_CAP_S390_UCONTROL +Architectures: s390 +Type: vcpu ioctl +Parameters: vcpu absolute address (in) +Returns: 0 in case of success + +This call creates a page table entry on the virtual cpu's address space +(for user controlled virtual machines) or the virtual machine's address +space (for regular virtual machines). This only works for minor faults, +thus it's recommended to access subject memory page via the user page +table upfront. This is useful to handle validity intercepts for user +controlled virtual machines to fault in the virtual cpu's lowcore pages +prior to calling the KVM_RUN ioctl. + + +4.68 KVM_SET_ONE_REG + +Capability: KVM_CAP_ONE_REG +Architectures: all +Type: vcpu ioctl +Parameters: struct kvm_one_reg (in) +Returns: 0 on success, negative value on failure +Errors: +  ENOENT:   no such register +  EINVAL:   invalid register ID, or no such register +  EPERM:    (arm64) register access not allowed before vcpu finalization +(These error codes are indicative only: do not rely on a specific error +code being returned in a specific situation.) + +struct kvm_one_reg { + __u64 id; + __u64 addr; +}; + +Using this ioctl, a single vcpu register can be set to a specific value +defined by user space with the passed in struct kvm_one_reg, where id +refers to the register identifier as described below and addr is a pointer +to a variable with the respective size. There can be architecture agnostic +and architecture specific registers. Each have their own range of operation +and their own constants and width. To keep track of the implemented +registers, find a list below: + + Arch | Register | Width (bits) + | | + PPC | KVM_REG_PPC_HIOR | 64 + PPC | KVM_REG_PPC_IAC1 | 64 + PPC | KVM_REG_PPC_IAC2 | 64 + PPC | KVM_REG_PPC_IAC3 | 64 + PPC | KVM_REG_PPC_IAC4 | 64 + PPC | KVM_REG_PPC_DAC1 | 64 + PPC | KVM_REG_PPC_DAC2 | 64 + PPC | KVM_REG_PPC_DABR | 64 + PPC | KVM_REG_PPC_DSCR | 64 + PPC | KVM_REG_PPC_PURR | 64 + PPC | KVM_REG_PPC_SPURR | 64 + PPC | KVM_REG_PPC_DAR | 64 + PPC | KVM_REG_PPC_DSISR | 32 + PPC | KVM_REG_PPC_AMR | 64 + PPC | KVM_REG_PPC_UAMOR | 64 + PPC | KVM_REG_PPC_MMCR0 | 64 + PPC | KVM_REG_PPC_MMCR1 | 64 + PPC | KVM_REG_PPC_MMCRA | 64 + PPC | KVM_REG_PPC_MMCR2 | 64 + PPC | KVM_REG_PPC_MMCRS | 64 + PPC | KVM_REG_PPC_SIAR | 64 + PPC | KVM_REG_PPC_SDAR | 64 + PPC | KVM_REG_PPC_SIER | 64 + PPC | KVM_REG_PPC_PMC1 | 32 + PPC | KVM_REG_PPC_PMC2 | 32 + PPC | KVM_REG_PPC_PMC3 | 32 + PPC | KVM_REG_PPC_PMC4 | 32 + PPC | KVM_REG_PPC_PMC5 | 32 + PPC | KVM_REG_PPC_PMC6 | 32 + PPC | KVM_REG_PPC_PMC7 | 32 + PPC | KVM_REG_PPC_PMC8 | 32 + PPC | KVM_REG_PPC_FPR0 | 64 + ... + PPC | KVM_REG_PPC_FPR31 | 64 + PPC | KVM_REG_PPC_VR0 | 128 + ... + PPC | KVM_REG_PPC_VR31 | 128 + PPC | KVM_REG_PPC_VSR0 | 128 + ... + PPC | KVM_REG_PPC_VSR31 | 128 + PPC | KVM_REG_PPC_FPSCR | 64 + PPC | KVM_REG_PPC_VSCR | 32 + PPC | KVM_REG_PPC_VPA_ADDR | 64 + PPC | KVM_REG_PPC_VPA_SLB | 128 + PPC | KVM_REG_PPC_VPA_DTL | 128 + PPC | KVM_REG_PPC_EPCR | 32 + PPC | KVM_REG_PPC_EPR | 32 + PPC | KVM_REG_PPC_TCR | 32 + PPC | KVM_REG_PPC_TSR | 32 + PPC | KVM_REG_PPC_OR_TSR | 32 + PPC | KVM_REG_PPC_CLEAR_TSR | 32 + PPC | KVM_REG_PPC_MAS0 | 32 + PPC | KVM_REG_PPC_MAS1 | 32 + PPC | KVM_REG_PPC_MAS2 | 64 + PPC | KVM_REG_PPC_MAS7_3 | 64 + PPC | KVM_REG_PPC_MAS4 | 32 + PPC | KVM_REG_PPC_MAS6 | 32 + PPC | KVM_REG_PPC_MMUCFG | 32 + PPC | KVM_REG_PPC_TLB0CFG | 32 + PPC | KVM_REG_PPC_TLB1CFG | 32 + PPC | KVM_REG_PPC_TLB2CFG | 32 + PPC | KVM_REG_PPC_TLB3CFG | 32 + PPC | KVM_REG_PPC_TLB0PS | 32 + PPC | KVM_REG_PPC_TLB1PS | 32 + PPC | KVM_REG_PPC_TLB2PS | 32 + PPC | KVM_REG_PPC_TLB3PS | 32 + PPC | KVM_REG_PPC_EPTCFG | 32 + PPC | KVM_REG_PPC_ICP_STATE | 64 + PPC | KVM_REG_PPC_VP_STATE | 128 + PPC | KVM_REG_PPC_TB_OFFSET | 64 + PPC | KVM_REG_PPC_SPMC1 | 32 + PPC | KVM_REG_PPC_SPMC2 | 32 + PPC | KVM_REG_PPC_IAMR | 64 + PPC | KVM_REG_PPC_TFHAR | 64 + PPC | KVM_REG_PPC_TFIAR | 64 + PPC | KVM_REG_PPC_TEXASR | 64 + PPC | KVM_REG_PPC_FSCR | 64 + PPC | KVM_REG_PPC_PSPB | 32 + PPC | KVM_REG_PPC_EBBHR | 64 + PPC | KVM_REG_PPC_EBBRR | 64 + PPC | KVM_REG_PPC_BESCR | 64 + PPC | KVM_REG_PPC_TAR | 64 + PPC | KVM_REG_PPC_DPDES | 64 + PPC | KVM_REG_PPC_DAWR | 64 + PPC | KVM_REG_PPC_DAWRX | 64 + PPC | KVM_REG_PPC_CIABR | 64 + PPC | KVM_REG_PPC_IC | 64 + PPC | KVM_REG_PPC_VTB | 64 + PPC | KVM_REG_PPC_CSIGR | 64 + PPC | KVM_REG_PPC_TACR | 64 + PPC | KVM_REG_PPC_TCSCR | 64 + PPC | KVM_REG_PPC_PID | 64 + PPC | KVM_REG_PPC_ACOP | 64 + PPC | KVM_REG_PPC_VRSAVE | 32 + PPC | KVM_REG_PPC_LPCR | 32 + PPC | KVM_REG_PPC_LPCR_64 | 64 + PPC | KVM_REG_PPC_PPR | 64 + PPC | KVM_REG_PPC_ARCH_COMPAT | 32 + PPC | KVM_REG_PPC_DABRX | 32 + PPC | KVM_REG_PPC_WORT | 64 + PPC | KVM_REG_PPC_SPRG9 | 64 + PPC | KVM_REG_PPC_DBSR | 32 + PPC | KVM_REG_PPC_TIDR | 64 + PPC | KVM_REG_PPC_PSSCR | 64 + PPC | KVM_REG_PPC_DEC_EXPIRY | 64 + PPC | KVM_REG_PPC_PTCR | 64 + PPC | KVM_REG_PPC_TM_GPR0 | 64 + ... + PPC | KVM_REG_PPC_TM_GPR31 | 64 + PPC | KVM_REG_PPC_TM_VSR0 | 128 + ... + PPC | KVM_REG_PPC_TM_VSR63 | 128 + PPC | KVM_REG_PPC_TM_CR | 64 + PPC | KVM_REG_PPC_TM_LR | 64 + PPC | KVM_REG_PPC_TM_CTR | 64 + PPC | KVM_REG_PPC_TM_FPSCR | 64 + PPC | KVM_REG_PPC_TM_AMR | 64 + PPC | KVM_REG_PPC_TM_PPR | 64 + PPC | KVM_REG_PPC_TM_VRSAVE | 64 + PPC | KVM_REG_PPC_TM_VSCR | 32 + PPC | KVM_REG_PPC_TM_DSCR | 64 + PPC | KVM_REG_PPC_TM_TAR | 64 + PPC | KVM_REG_PPC_TM_XER | 64 + | | + MIPS | KVM_REG_MIPS_R0 | 64 + ... + MIPS | KVM_REG_MIPS_R31 | 64 + MIPS | KVM_REG_MIPS_HI | 64 + MIPS | KVM_REG_MIPS_LO | 64 + MIPS | KVM_REG_MIPS_PC | 64 + MIPS | KVM_REG_MIPS_CP0_INDEX | 32 + MIPS | KVM_REG_MIPS_CP0_ENTRYLO0 | 64 + MIPS | KVM_REG_MIPS_CP0_ENTRYLO1 | 64 + MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64 + MIPS | KVM_REG_MIPS_CP0_CONTEXTCONFIG| 32 + MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64 + MIPS | KVM_REG_MIPS_CP0_XCONTEXTCONFIG| 64 + MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32 + MIPS | KVM_REG_MIPS_CP0_PAGEGRAIN | 32 + MIPS | KVM_REG_MIPS_CP0_SEGCTL0 | 64 + MIPS | KVM_REG_MIPS_CP0_SEGCTL1 | 64 + MIPS | KVM_REG_MIPS_CP0_SEGCTL2 | 64 + MIPS | KVM_REG_MIPS_CP0_PWBASE | 64 + MIPS | KVM_REG_MIPS_CP0_PWFIELD | 64 + MIPS | KVM_REG_MIPS_CP0_PWSIZE | 64 + MIPS | KVM_REG_MIPS_CP0_WIRED | 32 + MIPS | KVM_REG_MIPS_CP0_PWCTL | 32 + MIPS | KVM_REG_MIPS_CP0_HWRENA | 32 + MIPS | KVM_REG_MIPS_CP0_BADVADDR | 64 + MIPS | KVM_REG_MIPS_CP0_BADINSTR | 32 + MIPS | KVM_REG_MIPS_CP0_BADINSTRP | 32 + MIPS | KVM_REG_MIPS_CP0_COUNT | 32 + MIPS | KVM_REG_MIPS_CP0_ENTRYHI | 64 + MIPS | KVM_REG_MIPS_CP0_COMPARE | 32 + MIPS | KVM_REG_MIPS_CP0_STATUS | 32 + MIPS | KVM_REG_MIPS_CP0_INTCTL | 32 + MIPS | KVM_REG_MIPS_CP0_CAUSE | 32 + MIPS | KVM_REG_MIPS_CP0_EPC | 64 + MIPS | KVM_REG_MIPS_CP0_PRID | 32 + MIPS | KVM_REG_MIPS_CP0_EBASE | 64 + MIPS | KVM_REG_MIPS_CP0_CONFIG | 32 + MIPS | KVM_REG_MIPS_CP0_CONFIG1 | 32 + MIPS | KVM_REG_MIPS_CP0_CONFIG2 | 32 + MIPS | KVM_REG_MIPS_CP0_CONFIG3 | 32 + MIPS | KVM_REG_MIPS_CP0_CONFIG4 | 32 + MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32 + MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32 + MIPS | KVM_REG_MIPS_CP0_XCONTEXT | 64 + MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64 + MIPS | KVM_REG_MIPS_CP0_KSCRATCH1 | 64 + MIPS | KVM_REG_MIPS_CP0_KSCRATCH2 | 64 + MIPS | KVM_REG_MIPS_CP0_KSCRATCH3 | 64 + MIPS | KVM_REG_MIPS_CP0_KSCRATCH4 | 64 + MIPS | KVM_REG_MIPS_CP0_KSCRATCH5 | 64 + MIPS | KVM_REG_MIPS_CP0_KSCRATCH6 | 64 + MIPS | KVM_REG_MIPS_CP0_MAAR(0..63) | 64 + MIPS | KVM_REG_MIPS_COUNT_CTL | 64 + MIPS | KVM_REG_MIPS_COUNT_RESUME | 64 + MIPS | KVM_REG_MIPS_COUNT_HZ | 64 + MIPS | KVM_REG_MIPS_FPR_32(0..31) | 32 + MIPS | KVM_REG_MIPS_FPR_64(0..31) | 64 + MIPS | KVM_REG_MIPS_VEC_128(0..31) | 128 + MIPS | KVM_REG_MIPS_FCR_IR | 32 + MIPS | KVM_REG_MIPS_FCR_CSR | 32 + MIPS | KVM_REG_MIPS_MSA_IR | 32 + MIPS | KVM_REG_MIPS_MSA_CSR | 32 + +ARM registers are mapped using the lower 32 bits. The upper 16 of that +is the register group type, or coprocessor number: + +ARM core registers have the following id bit patterns: + 0x4020 0000 0010 + +ARM 32-bit CP15 registers have the following id bit patterns: + 0x4020 0000 000F + +ARM 64-bit CP15 registers have the following id bit patterns: + 0x4030 0000 000F + +ARM CCSIDR registers are demultiplexed by CSSELR value: + 0x4020 0000 0011 00 + +ARM 32-bit VFP control registers have the following id bit patterns: + 0x4020 0000 0012 1 + +ARM 64-bit FP registers have the following id bit patterns: + 0x4030 0000 0012 0 + +ARM firmware pseudo-registers have the following bit pattern: + 0x4030 0000 0014 + + +arm64 registers are mapped using the lower 32 bits. The upper 16 of +that is the register group type, or coprocessor number: + +arm64 core/FP-SIMD registers have the following id bit patterns. Note +that the size of the access is variable, as the kvm_regs structure +contains elements ranging from 32 to 128 bits. The index is a 32bit +value in the kvm_regs structure seen as a 32bit array. + 0x60x0 0000 0010 + +Specifically: + Encoding Register Bits kvm_regs member +---------------------------------------------------------------- + 0x6030 0000 0010 0000 X0 64 regs.regs[0] + 0x6030 0000 0010 0002 X1 64 regs.regs[1] + ... + 0x6030 0000 0010 003c X30 64 regs.regs[30] + 0x6030 0000 0010 003e SP 64 regs.sp + 0x6030 0000 0010 0040 PC 64 regs.pc + 0x6030 0000 0010 0042 PSTATE 64 regs.pstate + 0x6030 0000 0010 0044 SP_EL1 64 sp_el1 + 0x6030 0000 0010 0046 ELR_EL1 64 elr_el1 + 0x6030 0000 0010 0048 SPSR_EL1 64 spsr[KVM_SPSR_EL1] (alias SPSR_SVC) + 0x6030 0000 0010 004a SPSR_ABT 64 spsr[KVM_SPSR_ABT] + 0x6030 0000 0010 004c SPSR_UND 64 spsr[KVM_SPSR_UND] + 0x6030 0000 0010 004e SPSR_IRQ 64 spsr[KVM_SPSR_IRQ] + 0x6060 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ] + 0x6040 0000 0010 0054 V0 128 fp_regs.vregs[0] (*) + 0x6040 0000 0010 0058 V1 128 fp_regs.vregs[1] (*) + ... + 0x6040 0000 0010 00d0 V31 128 fp_regs.vregs[31] (*) + 0x6020 0000 0010 00d4 FPSR 32 fp_regs.fpsr + 0x6020 0000 0010 00d5 FPCR 32 fp_regs.fpcr + +(*) These encodings are not accepted for SVE-enabled vcpus. See + KVM_ARM_VCPU_INIT. + + The equivalent register content can be accessed via bits [127:0] of + the corresponding SVE Zn registers instead for vcpus that have SVE + enabled (see below). + +arm64 CCSIDR registers are demultiplexed by CSSELR value: + 0x6020 0000 0011 00 + +arm64 system registers have the following id bit patterns: + 0x6030 0000 0013 + +arm64 firmware pseudo-registers have the following bit pattern: + 0x6030 0000 0014 + +arm64 SVE registers have the following bit patterns: + 0x6080 0000 0015 00 Zn bits[2048*slice + 2047 : 2048*slice] + 0x6050 0000 0015 04 Pn bits[256*slice + 255 : 256*slice] + 0x6050 0000 0015 060 FFR bits[256*slice + 255 : 256*slice] + 0x6060 0000 0015 ffff KVM_REG_ARM64_SVE_VLS pseudo-register + +Access to register IDs where 2048 * slice >= 128 * max_vq will fail with +ENOENT. max_vq is the vcpu's maximum supported vector length in 128-bit +quadwords: see (**) below. + +These registers are only accessible on vcpus for which SVE is enabled. +See KVM_ARM_VCPU_INIT for details. + +In addition, except for KVM_REG_ARM64_SVE_VLS, these registers are not +accessible until the vcpu's SVE configuration has been finalized +using KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE). See KVM_ARM_VCPU_INIT +and KVM_ARM_VCPU_FINALIZE for more information about this procedure. + +KVM_REG_ARM64_SVE_VLS is a pseudo-register that allows the set of vector +lengths supported by the vcpu to be discovered and configured by +userspace. When transferred to or from user memory via KVM_GET_ONE_REG +or KVM_SET_ONE_REG, the value of this register is of type +__u64[KVM_ARM64_SVE_VLS_WORDS], and encodes the set of vector lengths as +follows: + +__u64 vector_lengths[KVM_ARM64_SVE_VLS_WORDS]; + +if (vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX && + ((vector_lengths[(vq - KVM_ARM64_SVE_VQ_MIN) / 64] >> + ((vq - KVM_ARM64_SVE_VQ_MIN) % 64)) & 1)) + /* Vector length vq * 16 bytes supported */ +else + /* Vector length vq * 16 bytes not supported */ + +(**) The maximum value vq for which the above condition is true is +max_vq. This is the maximum vector length available to the guest on +this vcpu, and determines which register slices are visible through +this ioctl interface. + +(See Documentation/arm64/sve.rst for an explanation of the "vq" +nomenclature.) + +KVM_REG_ARM64_SVE_VLS is only accessible after KVM_ARM_VCPU_INIT. +KVM_ARM_VCPU_INIT initialises it to the best set of vector lengths that +the host supports. + +Userspace may subsequently modify it if desired until the vcpu's SVE +configuration is finalized using KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE). + +Apart from simply removing all vector lengths from the host set that +exceed some value, support for arbitrarily chosen sets of vector lengths +is hardware-dependent and may not be available. Attempting to configure +an invalid set of vector lengths via KVM_SET_ONE_REG will fail with +EINVAL. + +After the vcpu's SVE configuration is finalized, further attempts to +write this register will fail with EPERM. + + +MIPS registers are mapped using the lower 32 bits. The upper 16 of that is +the register group type: + +MIPS core registers (see above) have the following id bit patterns: + 0x7030 0000 0000 + +MIPS CP0 registers (see KVM_REG_MIPS_CP0_* above) have the following id bit +patterns depending on whether they're 32-bit or 64-bit registers: + 0x7020 0000 0001 00 (32-bit) + 0x7030 0000 0001 00 (64-bit) + +Note: KVM_REG_MIPS_CP0_ENTRYLO0 and KVM_REG_MIPS_CP0_ENTRYLO1 are the MIPS64 +versions of the EntryLo registers regardless of the word size of the host +hardware, host kernel, guest, and whether XPA is present in the guest, i.e. +with the RI and XI bits (if they exist) in bits 63 and 62 respectively, and +the PFNX field starting at bit 30. + +MIPS MAARs (see KVM_REG_MIPS_CP0_MAAR(*) above) have the following id bit +patterns: + 0x7030 0000 0001 01 + +MIPS KVM control registers (see above) have the following id bit patterns: + 0x7030 0000 0002 + +MIPS FPU registers (see KVM_REG_MIPS_FPR_{32,64}() above) have the following +id bit patterns depending on the size of the register being accessed. They are +always accessed according to the current guest FPU mode (Status.FR and +Config5.FRE), i.e. as the guest would see them, and they become unpredictable +if the guest FPU mode is changed. MIPS SIMD Architecture (MSA) vector +registers (see KVM_REG_MIPS_VEC_128() above) have similar patterns as they +overlap the FPU registers: + 0x7020 0000 0003 00 <0:3> (32-bit FPU registers) + 0x7030 0000 0003 00 <0:3> (64-bit FPU registers) + 0x7040 0000 0003 00 <0:3> (128-bit MSA vector registers) + +MIPS FPU control registers (see KVM_REG_MIPS_FCR_{IR,CSR} above) have the +following id bit patterns: + 0x7020 0000 0003 01 <0:3> + +MIPS MSA control registers (see KVM_REG_MIPS_MSA_{IR,CSR} above) have the +following id bit patterns: + 0x7020 0000 0003 02 <0:3> + + +4.69 KVM_GET_ONE_REG + +Capability: KVM_CAP_ONE_REG +Architectures: all +Type: vcpu ioctl +Parameters: struct kvm_one_reg (in and out) +Returns: 0 on success, negative value on failure +Errors include: +  ENOENT:   no such register +  EINVAL:   invalid register ID, or no such register +  EPERM:    (arm64) register access not allowed before vcpu finalization +(These error codes are indicative only: do not rely on a specific error +code being returned in a specific situation.) + +This ioctl allows to receive the value of a single register implemented +in a vcpu. The register to read is indicated by the "id" field of the +kvm_one_reg struct passed in. On success, the register value can be found +at the memory location pointed to by "addr". + +The list of registers accessible using this interface is identical to the +list in 4.68. + + +4.70 KVM_KVMCLOCK_CTRL + +Capability: KVM_CAP_KVMCLOCK_CTRL +Architectures: Any that implement pvclocks (currently x86 only) +Type: vcpu ioctl +Parameters: None +Returns: 0 on success, -1 on error + +This signals to the host kernel that the specified guest is being paused by +userspace. The host will set a flag in the pvclock structure that is checked +from the soft lockup watchdog. The flag is part of the pvclock structure that +is shared between guest and host, specifically the second bit of the flags +field of the pvclock_vcpu_time_info structure. It will be set exclusively by +the host and read/cleared exclusively by the guest. The guest operation of +checking and clearing the flag must an atomic operation so +load-link/store-conditional, or equivalent must be used. There are two cases +where the guest will clear the flag: when the soft lockup watchdog timer resets +itself or when a soft lockup is detected. This ioctl can be called any time +after pausing the vcpu, but before it is resumed. + + +4.71 KVM_SIGNAL_MSI + +Capability: KVM_CAP_SIGNAL_MSI +Architectures: x86 arm arm64 +Type: vm ioctl +Parameters: struct kvm_msi (in) +Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error + +Directly inject a MSI message. Only valid with in-kernel irqchip that handles +MSI messages. + +struct kvm_msi { + __u32 address_lo; + __u32 address_hi; + __u32 data; + __u32 flags; + __u32 devid; + __u8 pad[12]; +}; + +flags: KVM_MSI_VALID_DEVID: devid contains a valid value. The per-VM + KVM_CAP_MSI_DEVID capability advertises the requirement to provide + the device ID. If this capability is not available, userspace + should never set the KVM_MSI_VALID_DEVID flag as the ioctl might fail. + +If KVM_MSI_VALID_DEVID is set, devid contains a unique device identifier +for the device that wrote the MSI message. For PCI, this is usually a +BFD identifier in the lower 16 bits. + +On x86, address_hi is ignored unless the KVM_X2APIC_API_USE_32BIT_IDS +feature of KVM_CAP_X2APIC_API capability is enabled. If it is enabled, +address_hi bits 31-8 provide bits 31-8 of the destination id. Bits 7-0 of +address_hi must be zero. + + +4.71 KVM_CREATE_PIT2 + +Capability: KVM_CAP_PIT2 +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_pit_config (in) +Returns: 0 on success, -1 on error + +Creates an in-kernel device model for the i8254 PIT. This call is only valid +after enabling in-kernel irqchip support via KVM_CREATE_IRQCHIP. The following +parameters have to be passed: + +struct kvm_pit_config { + __u32 flags; + __u32 pad[15]; +}; + +Valid flags are: + +#define KVM_PIT_SPEAKER_DUMMY 1 /* emulate speaker port stub */ + +PIT timer interrupts may use a per-VM kernel thread for injection. If it +exists, this thread will have a name of the following pattern: + +kvm-pit/ + +When running a guest with elevated priorities, the scheduling parameters of +this thread may have to be adjusted accordingly. + +This IOCTL replaces the obsolete KVM_CREATE_PIT. + + +4.72 KVM_GET_PIT2 + +Capability: KVM_CAP_PIT_STATE2 +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_pit_state2 (out) +Returns: 0 on success, -1 on error + +Retrieves the state of the in-kernel PIT model. Only valid after +KVM_CREATE_PIT2. The state is returned in the following structure: + +struct kvm_pit_state2 { + struct kvm_pit_channel_state channels[3]; + __u32 flags; + __u32 reserved[9]; +}; + +Valid flags are: + +/* disable PIT in HPET legacy mode */ +#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001 + +This IOCTL replaces the obsolete KVM_GET_PIT. + + +4.73 KVM_SET_PIT2 + +Capability: KVM_CAP_PIT_STATE2 +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_pit_state2 (in) +Returns: 0 on success, -1 on error + +Sets the state of the in-kernel PIT model. Only valid after KVM_CREATE_PIT2. +See KVM_GET_PIT2 for details on struct kvm_pit_state2. + +This IOCTL replaces the obsolete KVM_SET_PIT. + + +4.74 KVM_PPC_GET_SMMU_INFO + +Capability: KVM_CAP_PPC_GET_SMMU_INFO +Architectures: powerpc +Type: vm ioctl +Parameters: None +Returns: 0 on success, -1 on error + +This populates and returns a structure describing the features of +the "Server" class MMU emulation supported by KVM. +This can in turn be used by userspace to generate the appropriate +device-tree properties for the guest operating system. + +The structure contains some global information, followed by an +array of supported segment page sizes: + + struct kvm_ppc_smmu_info { + __u64 flags; + __u32 slb_size; + __u32 pad; + struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ]; + }; + +The supported flags are: + + - KVM_PPC_PAGE_SIZES_REAL: + When that flag is set, guest page sizes must "fit" the backing + store page sizes. When not set, any page size in the list can + be used regardless of how they are backed by userspace. + + - KVM_PPC_1T_SEGMENTS + The emulated MMU supports 1T segments in addition to the + standard 256M ones. + + - KVM_PPC_NO_HASH + This flag indicates that HPT guests are not supported by KVM, + thus all guests must use radix MMU mode. + +The "slb_size" field indicates how many SLB entries are supported + +The "sps" array contains 8 entries indicating the supported base +page sizes for a segment in increasing order. Each entry is defined +as follow: + + struct kvm_ppc_one_seg_page_size { + __u32 page_shift; /* Base page shift of segment (or 0) */ + __u32 slb_enc; /* SLB encoding for BookS */ + struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ]; + }; + +An entry with a "page_shift" of 0 is unused. Because the array is +organized in increasing order, a lookup can stop when encoutering +such an entry. + +The "slb_enc" field provides the encoding to use in the SLB for the +page size. The bits are in positions such as the value can directly +be OR'ed into the "vsid" argument of the slbmte instruction. + +The "enc" array is a list which for each of those segment base page +size provides the list of supported actual page sizes (which can be +only larger or equal to the base page size), along with the +corresponding encoding in the hash PTE. Similarly, the array is +8 entries sorted by increasing sizes and an entry with a "0" shift +is an empty entry and a terminator: + + struct kvm_ppc_one_page_size { + __u32 page_shift; /* Page shift (or 0) */ + __u32 pte_enc; /* Encoding in the HPTE (>>12) */ + }; + +The "pte_enc" field provides a value that can OR'ed into the hash +PTE's RPN field (ie, it needs to be shifted left by 12 to OR it +into the hash PTE second double word). + +4.75 KVM_IRQFD + +Capability: KVM_CAP_IRQFD +Architectures: x86 s390 arm arm64 +Type: vm ioctl +Parameters: struct kvm_irqfd (in) +Returns: 0 on success, -1 on error + +Allows setting an eventfd to directly trigger a guest interrupt. +kvm_irqfd.fd specifies the file descriptor to use as the eventfd and +kvm_irqfd.gsi specifies the irqchip pin toggled by this event. When +an event is triggered on the eventfd, an interrupt is injected into +the guest using the specified gsi pin. The irqfd is removed using +the KVM_IRQFD_FLAG_DEASSIGN flag, specifying both kvm_irqfd.fd +and kvm_irqfd.gsi. + +With KVM_CAP_IRQFD_RESAMPLE, KVM_IRQFD supports a de-assert and notify +mechanism allowing emulation of level-triggered, irqfd-based +interrupts. When KVM_IRQFD_FLAG_RESAMPLE is set the user must pass an +additional eventfd in the kvm_irqfd.resamplefd field. When operating +in resample mode, posting of an interrupt through kvm_irq.fd asserts +the specified gsi in the irqchip. When the irqchip is resampled, such +as from an EOI, the gsi is de-asserted and the user is notified via +kvm_irqfd.resamplefd. It is the user's responsibility to re-queue +the interrupt if the device making use of it still requires service. +Note that closing the resamplefd is not sufficient to disable the +irqfd. The KVM_IRQFD_FLAG_RESAMPLE is only necessary on assignment +and need not be specified with KVM_IRQFD_FLAG_DEASSIGN. + +On arm/arm64, gsi routing being supported, the following can happen: +- in case no routing entry is associated to this gsi, injection fails +- in case the gsi is associated to an irqchip routing entry, + irqchip.pin + 32 corresponds to the injected SPI ID. +- in case the gsi is associated to an MSI routing entry, the MSI + message and device ID are translated into an LPI (support restricted + to GICv3 ITS in-kernel emulation). + +4.76 KVM_PPC_ALLOCATE_HTAB + +Capability: KVM_CAP_PPC_ALLOC_HTAB +Architectures: powerpc +Type: vm ioctl +Parameters: Pointer to u32 containing hash table order (in/out) +Returns: 0 on success, -1 on error + +This requests the host kernel to allocate an MMU hash table for a +guest using the PAPR paravirtualization interface. This only does +anything if the kernel is configured to use the Book 3S HV style of +virtualization. Otherwise the capability doesn't exist and the ioctl +returns an ENOTTY error. The rest of this description assumes Book 3S +HV. + +There must be no vcpus running when this ioctl is called; if there +are, it will do nothing and return an EBUSY error. + +The parameter is a pointer to a 32-bit unsigned integer variable +containing the order (log base 2) of the desired size of the hash +table, which must be between 18 and 46. On successful return from the +ioctl, the value will not be changed by the kernel. + +If no hash table has been allocated when any vcpu is asked to run +(with the KVM_RUN ioctl), the host kernel will allocate a +default-sized hash table (16 MB). + +If this ioctl is called when a hash table has already been allocated, +with a different order from the existing hash table, the existing hash +table will be freed and a new one allocated. If this is ioctl is +called when a hash table has already been allocated of the same order +as specified, the kernel will clear out the existing hash table (zero +all HPTEs). In either case, if the guest is using the virtualized +real-mode area (VRMA) facility, the kernel will re-create the VMRA +HPTEs on the next KVM_RUN of any vcpu. + +4.77 KVM_S390_INTERRUPT + +Capability: basic +Architectures: s390 +Type: vm ioctl, vcpu ioctl +Parameters: struct kvm_s390_interrupt (in) +Returns: 0 on success, -1 on error + +Allows to inject an interrupt to the guest. Interrupts can be floating +(vm ioctl) or per cpu (vcpu ioctl), depending on the interrupt type. + +Interrupt parameters are passed via kvm_s390_interrupt: + +struct kvm_s390_interrupt { + __u32 type; + __u32 parm; + __u64 parm64; +}; + +type can be one of the following: + +KVM_S390_SIGP_STOP (vcpu) - sigp stop; optional flags in parm +KVM_S390_PROGRAM_INT (vcpu) - program check; code in parm +KVM_S390_SIGP_SET_PREFIX (vcpu) - sigp set prefix; prefix address in parm +KVM_S390_RESTART (vcpu) - restart +KVM_S390_INT_CLOCK_COMP (vcpu) - clock comparator interrupt +KVM_S390_INT_CPU_TIMER (vcpu) - CPU timer interrupt +KVM_S390_INT_VIRTIO (vm) - virtio external interrupt; external interrupt + parameters in parm and parm64 +KVM_S390_INT_SERVICE (vm) - sclp external interrupt; sclp parameter in parm +KVM_S390_INT_EMERGENCY (vcpu) - sigp emergency; source cpu in parm +KVM_S390_INT_EXTERNAL_CALL (vcpu) - sigp external call; source cpu in parm +KVM_S390_INT_IO(ai,cssid,ssid,schid) (vm) - compound value to indicate an + I/O interrupt (ai - adapter interrupt; cssid,ssid,schid - subchannel); + I/O interruption parameters in parm (subchannel) and parm64 (intparm, + interruption subclass) +KVM_S390_MCHK (vm, vcpu) - machine check interrupt; cr 14 bits in parm, + machine check interrupt code in parm64 (note that + machine checks needing further payload are not + supported by this ioctl) + +This is an asynchronous vcpu ioctl and can be invoked from any thread. + +4.78 KVM_PPC_GET_HTAB_FD + +Capability: KVM_CAP_PPC_HTAB_FD +Architectures: powerpc +Type: vm ioctl +Parameters: Pointer to struct kvm_get_htab_fd (in) +Returns: file descriptor number (>= 0) on success, -1 on error + +This returns a file descriptor that can be used either to read out the +entries in the guest's hashed page table (HPT), or to write entries to +initialize the HPT. The returned fd can only be written to if the +KVM_GET_HTAB_WRITE bit is set in the flags field of the argument, and +can only be read if that bit is clear. The argument struct looks like +this: + +/* For KVM_PPC_GET_HTAB_FD */ +struct kvm_get_htab_fd { + __u64 flags; + __u64 start_index; + __u64 reserved[2]; +}; + +/* Values for kvm_get_htab_fd.flags */ +#define KVM_GET_HTAB_BOLTED_ONLY ((__u64)0x1) +#define KVM_GET_HTAB_WRITE ((__u64)0x2) + +The `start_index' field gives the index in the HPT of the entry at +which to start reading. It is ignored when writing. + +Reads on the fd will initially supply information about all +"interesting" HPT entries. Interesting entries are those with the +bolted bit set, if the KVM_GET_HTAB_BOLTED_ONLY bit is set, otherwise +all entries. When the end of the HPT is reached, the read() will +return. If read() is called again on the fd, it will start again from +the beginning of the HPT, but will only return HPT entries that have +changed since they were last read. + +Data read or written is structured as a header (8 bytes) followed by a +series of valid HPT entries (16 bytes) each. The header indicates how +many valid HPT entries there are and how many invalid entries follow +the valid entries. The invalid entries are not represented explicitly +in the stream. The header format is: + +struct kvm_get_htab_header { + __u32 index; + __u16 n_valid; + __u16 n_invalid; +}; + +Writes to the fd create HPT entries starting at the index given in the +header; first `n_valid' valid entries with contents from the data +written, then `n_invalid' invalid entries, invalidating any previously +valid entries found. + +4.79 KVM_CREATE_DEVICE + +Capability: KVM_CAP_DEVICE_CTRL +Type: vm ioctl +Parameters: struct kvm_create_device (in/out) +Returns: 0 on success, -1 on error +Errors: + ENODEV: The device type is unknown or unsupported + EEXIST: Device already created, and this type of device may not + be instantiated multiple times + + Other error conditions may be defined by individual device types or + have their standard meanings. + +Creates an emulated device in the kernel. The file descriptor returned +in fd can be used with KVM_SET/GET/HAS_DEVICE_ATTR. + +If the KVM_CREATE_DEVICE_TEST flag is set, only test whether the +device type is supported (not necessarily whether it can be created +in the current vm). + +Individual devices should not define flags. Attributes should be used +for specifying any behavior that is not implied by the device type +number. + +struct kvm_create_device { + __u32 type; /* in: KVM_DEV_TYPE_xxx */ + __u32 fd; /* out: device handle */ + __u32 flags; /* in: KVM_CREATE_DEVICE_xxx */ +}; + +4.80 KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR + +Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, + KVM_CAP_VCPU_ATTRIBUTES for vcpu device +Type: device ioctl, vm ioctl, vcpu ioctl +Parameters: struct kvm_device_attr +Returns: 0 on success, -1 on error +Errors: + ENXIO: The group or attribute is unknown/unsupported for this device + or hardware support is missing. + EPERM: The attribute cannot (currently) be accessed this way + (e.g. read-only attribute, or attribute that only makes + sense when the device is in a different state) + + Other error conditions may be defined by individual device types. + +Gets/sets a specified piece of device configuration and/or state. The +semantics are device-specific. See individual device documentation in +the "devices" directory. As with ONE_REG, the size of the data +transferred is defined by the particular attribute. + +struct kvm_device_attr { + __u32 flags; /* no flags currently defined */ + __u32 group; /* device-defined */ + __u64 attr; /* group-defined */ + __u64 addr; /* userspace address of attr data */ +}; + +4.81 KVM_HAS_DEVICE_ATTR + +Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, + KVM_CAP_VCPU_ATTRIBUTES for vcpu device +Type: device ioctl, vm ioctl, vcpu ioctl +Parameters: struct kvm_device_attr +Returns: 0 on success, -1 on error +Errors: + ENXIO: The group or attribute is unknown/unsupported for this device + or hardware support is missing. + +Tests whether a device supports a particular attribute. A successful +return indicates the attribute is implemented. It does not necessarily +indicate that the attribute can be read or written in the device's +current state. "addr" is ignored. + +4.82 KVM_ARM_VCPU_INIT + +Capability: basic +Architectures: arm, arm64 +Type: vcpu ioctl +Parameters: struct kvm_vcpu_init (in) +Returns: 0 on success; -1 on error +Errors: +  EINVAL:    the target is unknown, or the combination of features is invalid. +  ENOENT:    a features bit specified is unknown. + +This tells KVM what type of CPU to present to the guest, and what +optional features it should have.  This will cause a reset of the cpu +registers to their initial values.  If this is not called, KVM_RUN will +return ENOEXEC for that vcpu. + +Note that because some registers reflect machine topology, all vcpus +should be created before this ioctl is invoked. + +Userspace can call this function multiple times for a given vcpu, including +after the vcpu has been run. This will reset the vcpu to its initial +state. All calls to this function after the initial call must use the same +target and same set of feature flags, otherwise EINVAL will be returned. + +Possible features: + - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state. + Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on + and execute guest code when KVM_RUN is called. + - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. + Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). + - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision + backward compatible with v0.2) for the CPU. + Depends on KVM_CAP_ARM_PSCI_0_2. + - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. + Depends on KVM_CAP_ARM_PMU_V3. + + - KVM_ARM_VCPU_PTRAUTH_ADDRESS: Enables Address Pointer authentication + for arm64 only. + Depends on KVM_CAP_ARM_PTRAUTH_ADDRESS. + If KVM_CAP_ARM_PTRAUTH_ADDRESS and KVM_CAP_ARM_PTRAUTH_GENERIC are + both present, then both KVM_ARM_VCPU_PTRAUTH_ADDRESS and + KVM_ARM_VCPU_PTRAUTH_GENERIC must be requested or neither must be + requested. + + - KVM_ARM_VCPU_PTRAUTH_GENERIC: Enables Generic Pointer authentication + for arm64 only. + Depends on KVM_CAP_ARM_PTRAUTH_GENERIC. + If KVM_CAP_ARM_PTRAUTH_ADDRESS and KVM_CAP_ARM_PTRAUTH_GENERIC are + both present, then both KVM_ARM_VCPU_PTRAUTH_ADDRESS and + KVM_ARM_VCPU_PTRAUTH_GENERIC must be requested or neither must be + requested. + + - KVM_ARM_VCPU_SVE: Enables SVE for the CPU (arm64 only). + Depends on KVM_CAP_ARM_SVE. + Requires KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE): + + * After KVM_ARM_VCPU_INIT: + + - KVM_REG_ARM64_SVE_VLS may be read using KVM_GET_ONE_REG: the + initial value of this pseudo-register indicates the best set of + vector lengths possible for a vcpu on this host. + + * Before KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE): + + - KVM_RUN and KVM_GET_REG_LIST are not available; + + - KVM_GET_ONE_REG and KVM_SET_ONE_REG cannot be used to access + the scalable archietctural SVE registers + KVM_REG_ARM64_SVE_ZREG(), KVM_REG_ARM64_SVE_PREG() or + KVM_REG_ARM64_SVE_FFR; + + - KVM_REG_ARM64_SVE_VLS may optionally be written using + KVM_SET_ONE_REG, to modify the set of vector lengths available + for the vcpu. + + * After KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE): + + - the KVM_REG_ARM64_SVE_VLS pseudo-register is immutable, and can + no longer be written using KVM_SET_ONE_REG. + +4.83 KVM_ARM_PREFERRED_TARGET + +Capability: basic +Architectures: arm, arm64 +Type: vm ioctl +Parameters: struct struct kvm_vcpu_init (out) +Returns: 0 on success; -1 on error +Errors: + ENODEV: no preferred target available for the host + +This queries KVM for preferred CPU target type which can be emulated +by KVM on underlying host. + +The ioctl returns struct kvm_vcpu_init instance containing information +about preferred CPU target type and recommended features for it. The +kvm_vcpu_init->features bitmap returned will have feature bits set if +the preferred target recommends setting these features, but this is +not mandatory. + +The information returned by this ioctl can be used to prepare an instance +of struct kvm_vcpu_init for KVM_ARM_VCPU_INIT ioctl which will result in +in VCPU matching underlying host. + + +4.84 KVM_GET_REG_LIST + +Capability: basic +Architectures: arm, arm64, mips +Type: vcpu ioctl +Parameters: struct kvm_reg_list (in/out) +Returns: 0 on success; -1 on error +Errors: +  E2BIG:     the reg index list is too big to fit in the array specified by +             the user (the number required will be written into n). + +struct kvm_reg_list { + __u64 n; /* number of registers in reg[] */ + __u64 reg[0]; +}; + +This ioctl returns the guest registers that are supported for the +KVM_GET_ONE_REG/KVM_SET_ONE_REG calls. + + +4.85 KVM_ARM_SET_DEVICE_ADDR (deprecated) + +Capability: KVM_CAP_ARM_SET_DEVICE_ADDR +Architectures: arm, arm64 +Type: vm ioctl +Parameters: struct kvm_arm_device_address (in) +Returns: 0 on success, -1 on error +Errors: + ENODEV: The device id is unknown + ENXIO: Device not supported on current system + EEXIST: Address already set + E2BIG: Address outside guest physical address space + EBUSY: Address overlaps with other device range + +struct kvm_arm_device_addr { + __u64 id; + __u64 addr; +}; + +Specify a device address in the guest's physical address space where guests +can access emulated or directly exposed devices, which the host kernel needs +to know about. The id field is an architecture specific identifier for a +specific device. + +ARM/arm64 divides the id field into two parts, a device id and an +address type id specific to the individual device. + +  bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 | + field: | 0x00000000 | device id | addr type id | + +ARM/arm64 currently only require this when using the in-kernel GIC +support for the hardware VGIC features, using KVM_ARM_DEVICE_VGIC_V2 +as the device id. When setting the base address for the guest's +mapping of the VGIC virtual CPU and distributor interface, the ioctl +must be called after calling KVM_CREATE_IRQCHIP, but before calling +KVM_RUN on any of the VCPUs. Calling this ioctl twice for any of the +base addresses will return -EEXIST. + +Note, this IOCTL is deprecated and the more flexible SET/GET_DEVICE_ATTR API +should be used instead. + + +4.86 KVM_PPC_RTAS_DEFINE_TOKEN + +Capability: KVM_CAP_PPC_RTAS +Architectures: ppc +Type: vm ioctl +Parameters: struct kvm_rtas_token_args +Returns: 0 on success, -1 on error + +Defines a token value for a RTAS (Run Time Abstraction Services) +service in order to allow it to be handled in the kernel. The +argument struct gives the name of the service, which must be the name +of a service that has a kernel-side implementation. If the token +value is non-zero, it will be associated with that service, and +subsequent RTAS calls by the guest specifying that token will be +handled by the kernel. If the token value is 0, then any token +associated with the service will be forgotten, and subsequent RTAS +calls by the guest for that service will be passed to userspace to be +handled. + +4.87 KVM_SET_GUEST_DEBUG + +Capability: KVM_CAP_SET_GUEST_DEBUG +Architectures: x86, s390, ppc, arm64 +Type: vcpu ioctl +Parameters: struct kvm_guest_debug (in) +Returns: 0 on success; -1 on error + +struct kvm_guest_debug { + __u32 control; + __u32 pad; + struct kvm_guest_debug_arch arch; +}; + +Set up the processor specific debug registers and configure vcpu for +handling guest debug events. There are two parts to the structure, the +first a control bitfield indicates the type of debug events to handle +when running. Common control bits are: + + - KVM_GUESTDBG_ENABLE: guest debugging is enabled + - KVM_GUESTDBG_SINGLESTEP: the next run should single-step + +The top 16 bits of the control field are architecture specific control +flags which can include the following: + + - KVM_GUESTDBG_USE_SW_BP: using software breakpoints [x86, arm64] + - KVM_GUESTDBG_USE_HW_BP: using hardware breakpoints [x86, s390, arm64] + - KVM_GUESTDBG_INJECT_DB: inject DB type exception [x86] + - KVM_GUESTDBG_INJECT_BP: inject BP type exception [x86] + - KVM_GUESTDBG_EXIT_PENDING: trigger an immediate guest exit [s390] + +For example KVM_GUESTDBG_USE_SW_BP indicates that software breakpoints +are enabled in memory so we need to ensure breakpoint exceptions are +correctly trapped and the KVM run loop exits at the breakpoint and not +running off into the normal guest vector. For KVM_GUESTDBG_USE_HW_BP +we need to ensure the guest vCPUs architecture specific registers are +updated to the correct (supplied) values. + +The second part of the structure is architecture specific and +typically contains a set of debug registers. + +For arm64 the number of debug registers is implementation defined and +can be determined by querying the KVM_CAP_GUEST_DEBUG_HW_BPS and +KVM_CAP_GUEST_DEBUG_HW_WPS capabilities which return a positive number +indicating the number of supported registers. + +When debug events exit the main run loop with the reason +KVM_EXIT_DEBUG with the kvm_debug_exit_arch part of the kvm_run +structure containing architecture specific debug information. + +4.88 KVM_GET_EMULATED_CPUID + +Capability: KVM_CAP_EXT_EMUL_CPUID +Architectures: x86 +Type: system ioctl +Parameters: struct kvm_cpuid2 (in/out) +Returns: 0 on success, -1 on error + +struct kvm_cpuid2 { + __u32 nent; + __u32 flags; + struct kvm_cpuid_entry2 entries[0]; +}; + +The member 'flags' is used for passing flags from userspace. + +#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0) +#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1) +#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2) + +struct kvm_cpuid_entry2 { + __u32 function; + __u32 index; + __u32 flags; + __u32 eax; + __u32 ebx; + __u32 ecx; + __u32 edx; + __u32 padding[3]; +}; + +This ioctl returns x86 cpuid features which are emulated by +kvm.Userspace can use the information returned by this ioctl to query +which features are emulated by kvm instead of being present natively. + +Userspace invokes KVM_GET_EMULATED_CPUID by passing a kvm_cpuid2 +structure with the 'nent' field indicating the number of entries in +the variable-size array 'entries'. If the number of entries is too low +to describe the cpu capabilities, an error (E2BIG) is returned. If the +number is too high, the 'nent' field is adjusted and an error (ENOMEM) +is returned. If the number is just right, the 'nent' field is adjusted +to the number of valid entries in the 'entries' array, which is then +filled. + +The entries returned are the set CPUID bits of the respective features +which kvm emulates, as returned by the CPUID instruction, with unknown +or unsupported feature bits cleared. + +Features like x2apic, for example, may not be present in the host cpu +but are exposed by kvm in KVM_GET_SUPPORTED_CPUID because they can be +emulated efficiently and thus not included here. + +The fields in each entry are defined as follows: + + function: the eax value used to obtain the entry + index: the ecx value used to obtain the entry (for entries that are + affected by ecx) + flags: an OR of zero or more of the following: + KVM_CPUID_FLAG_SIGNIFCANT_INDEX: + if the index field is valid + KVM_CPUID_FLAG_STATEFUL_FUNC: + if cpuid for this function returns different values for successive + invocations; there will be several entries with the same function, + all with this flag set + KVM_CPUID_FLAG_STATE_READ_NEXT: + for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is + the first entry to be read by a cpu + eax, ebx, ecx, edx: the values returned by the cpuid instruction for + this function/index combination + +4.89 KVM_S390_MEM_OP + +Capability: KVM_CAP_S390_MEM_OP +Architectures: s390 +Type: vcpu ioctl +Parameters: struct kvm_s390_mem_op (in) +Returns: = 0 on success, + < 0 on generic error (e.g. -EFAULT or -ENOMEM), + > 0 if an exception occurred while walking the page tables + +Read or write data from/to the logical (virtual) memory of a VCPU. + +Parameters are specified via the following structure: + +struct kvm_s390_mem_op { + __u64 gaddr; /* the guest address */ + __u64 flags; /* flags */ + __u32 size; /* amount of bytes */ + __u32 op; /* type of operation */ + __u64 buf; /* buffer in userspace */ + __u8 ar; /* the access register number */ + __u8 reserved[31]; /* should be set to 0 */ +}; + +The type of operation is specified in the "op" field. It is either +KVM_S390_MEMOP_LOGICAL_READ for reading from logical memory space or +KVM_S390_MEMOP_LOGICAL_WRITE for writing to logical memory space. The +KVM_S390_MEMOP_F_CHECK_ONLY flag can be set in the "flags" field to check +whether the corresponding memory access would create an access exception +(without touching the data in the memory at the destination). In case an +access exception occurred while walking the MMU tables of the guest, the +ioctl returns a positive error number to indicate the type of exception. +This exception is also raised directly at the corresponding VCPU if the +flag KVM_S390_MEMOP_F_INJECT_EXCEPTION is set in the "flags" field. + +The start address of the memory region has to be specified in the "gaddr" +field, and the length of the region in the "size" field. "buf" is the buffer +supplied by the userspace application where the read data should be written +to for KVM_S390_MEMOP_LOGICAL_READ, or where the data that should be written +is stored for a KVM_S390_MEMOP_LOGICAL_WRITE. "buf" is unused and can be NULL +when KVM_S390_MEMOP_F_CHECK_ONLY is specified. "ar" designates the access +register number to be used. + +The "reserved" field is meant for future extensions. It is not used by +KVM with the currently defined set of flags. + +4.90 KVM_S390_GET_SKEYS + +Capability: KVM_CAP_S390_SKEYS +Architectures: s390 +Type: vm ioctl +Parameters: struct kvm_s390_skeys +Returns: 0 on success, KVM_S390_GET_KEYS_NONE if guest is not using storage + keys, negative value on error + +This ioctl is used to get guest storage key values on the s390 +architecture. The ioctl takes parameters via the kvm_s390_skeys struct. + +struct kvm_s390_skeys { + __u64 start_gfn; + __u64 count; + __u64 skeydata_addr; + __u32 flags; + __u32 reserved[9]; +}; + +The start_gfn field is the number of the first guest frame whose storage keys +you want to get. + +The count field is the number of consecutive frames (starting from start_gfn) +whose storage keys to get. The count field must be at least 1 and the maximum +allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range +will cause the ioctl to return -EINVAL. + +The skeydata_addr field is the address to a buffer large enough to hold count +bytes. This buffer will be filled with storage key data by the ioctl. + +4.91 KVM_S390_SET_SKEYS + +Capability: KVM_CAP_S390_SKEYS +Architectures: s390 +Type: vm ioctl +Parameters: struct kvm_s390_skeys +Returns: 0 on success, negative value on error + +This ioctl is used to set guest storage key values on the s390 +architecture. The ioctl takes parameters via the kvm_s390_skeys struct. +See section on KVM_S390_GET_SKEYS for struct definition. + +The start_gfn field is the number of the first guest frame whose storage keys +you want to set. + +The count field is the number of consecutive frames (starting from start_gfn) +whose storage keys to get. The count field must be at least 1 and the maximum +allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range +will cause the ioctl to return -EINVAL. + +The skeydata_addr field is the address to a buffer containing count bytes of +storage keys. Each byte in the buffer will be set as the storage key for a +single frame starting at start_gfn for count frames. + +Note: If any architecturally invalid key value is found in the given data then +the ioctl will return -EINVAL. + +4.92 KVM_S390_IRQ + +Capability: KVM_CAP_S390_INJECT_IRQ +Architectures: s390 +Type: vcpu ioctl +Parameters: struct kvm_s390_irq (in) +Returns: 0 on success, -1 on error +Errors: + EINVAL: interrupt type is invalid + type is KVM_S390_SIGP_STOP and flag parameter is invalid value + type is KVM_S390_INT_EXTERNAL_CALL and code is bigger + than the maximum of VCPUs + EBUSY: type is KVM_S390_SIGP_SET_PREFIX and vcpu is not stopped + type is KVM_S390_SIGP_STOP and a stop irq is already pending + type is KVM_S390_INT_EXTERNAL_CALL and an external call interrupt + is already pending + +Allows to inject an interrupt to the guest. + +Using struct kvm_s390_irq as a parameter allows +to inject additional payload which is not +possible via KVM_S390_INTERRUPT. + +Interrupt parameters are passed via kvm_s390_irq: + +struct kvm_s390_irq { + __u64 type; + union { + struct kvm_s390_io_info io; + struct kvm_s390_ext_info ext; + struct kvm_s390_pgm_info pgm; + struct kvm_s390_emerg_info emerg; + struct kvm_s390_extcall_info extcall; + struct kvm_s390_prefix_info prefix; + struct kvm_s390_stop_info stop; + struct kvm_s390_mchk_info mchk; + char reserved[64]; + } u; +}; + +type can be one of the following: + +KVM_S390_SIGP_STOP - sigp stop; parameter in .stop +KVM_S390_PROGRAM_INT - program check; parameters in .pgm +KVM_S390_SIGP_SET_PREFIX - sigp set prefix; parameters in .prefix +KVM_S390_RESTART - restart; no parameters +KVM_S390_INT_CLOCK_COMP - clock comparator interrupt; no parameters +KVM_S390_INT_CPU_TIMER - CPU timer interrupt; no parameters +KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg +KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall +KVM_S390_MCHK - machine check interrupt; parameters in .mchk + +This is an asynchronous vcpu ioctl and can be invoked from any thread. + +4.94 KVM_S390_GET_IRQ_STATE + +Capability: KVM_CAP_S390_IRQ_STATE +Architectures: s390 +Type: vcpu ioctl +Parameters: struct kvm_s390_irq_state (out) +Returns: >= number of bytes copied into buffer, + -EINVAL if buffer size is 0, + -ENOBUFS if buffer size is too small to fit all pending interrupts, + -EFAULT if the buffer address was invalid + +This ioctl allows userspace to retrieve the complete state of all currently +pending interrupts in a single buffer. Use cases include migration +and introspection. The parameter structure contains the address of a +userspace buffer and its length: + +struct kvm_s390_irq_state { + __u64 buf; + __u32 flags; /* will stay unused for compatibility reasons */ + __u32 len; + __u32 reserved[4]; /* will stay unused for compatibility reasons */ +}; + +Userspace passes in the above struct and for each pending interrupt a +struct kvm_s390_irq is copied to the provided buffer. + +The structure contains a flags and a reserved field for future extensions. As +the kernel never checked for flags == 0 and QEMU never pre-zeroed flags and +reserved, these fields can not be used in the future without breaking +compatibility. + +If -ENOBUFS is returned the buffer provided was too small and userspace +may retry with a bigger buffer. + +4.95 KVM_S390_SET_IRQ_STATE + +Capability: KVM_CAP_S390_IRQ_STATE +Architectures: s390 +Type: vcpu ioctl +Parameters: struct kvm_s390_irq_state (in) +Returns: 0 on success, + -EFAULT if the buffer address was invalid, + -EINVAL for an invalid buffer length (see below), + -EBUSY if there were already interrupts pending, + errors occurring when actually injecting the + interrupt. See KVM_S390_IRQ. + +This ioctl allows userspace to set the complete state of all cpu-local +interrupts currently pending for the vcpu. It is intended for restoring +interrupt state after a migration. The input parameter is a userspace buffer +containing a struct kvm_s390_irq_state: + +struct kvm_s390_irq_state { + __u64 buf; + __u32 flags; /* will stay unused for compatibility reasons */ + __u32 len; + __u32 reserved[4]; /* will stay unused for compatibility reasons */ +}; + +The restrictions for flags and reserved apply as well. +(see KVM_S390_GET_IRQ_STATE) + +The userspace memory referenced by buf contains a struct kvm_s390_irq +for each interrupt to be injected into the guest. +If one of the interrupts could not be injected for some reason the +ioctl aborts. + +len must be a multiple of sizeof(struct kvm_s390_irq). It must be > 0 +and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq), +which is the maximum number of possibly pending cpu-local interrupts. + +4.96 KVM_SMI + +Capability: KVM_CAP_X86_SMM +Architectures: x86 +Type: vcpu ioctl +Parameters: none +Returns: 0 on success, -1 on error + +Queues an SMI on the thread's vcpu. + +4.97 KVM_CAP_PPC_MULTITCE + +Capability: KVM_CAP_PPC_MULTITCE +Architectures: ppc +Type: vm + +This capability means the kernel is capable of handling hypercalls +H_PUT_TCE_INDIRECT and H_STUFF_TCE without passing those into the user +space. This significantly accelerates DMA operations for PPC KVM guests. +User space should expect that its handlers for these hypercalls +are not going to be called if user space previously registered LIOBN +in KVM (via KVM_CREATE_SPAPR_TCE or similar calls). + +In order to enable H_PUT_TCE_INDIRECT and H_STUFF_TCE use in the guest, +user space might have to advertise it for the guest. For example, +IBM pSeries (sPAPR) guest starts using them if "hcall-multi-tce" is +present in the "ibm,hypertas-functions" device-tree property. + +The hypercalls mentioned above may or may not be processed successfully +in the kernel based fast path. If they can not be handled by the kernel, +they will get passed on to user space. So user space still has to have +an implementation for these despite the in kernel acceleration. + +This capability is always enabled. + +4.98 KVM_CREATE_SPAPR_TCE_64 + +Capability: KVM_CAP_SPAPR_TCE_64 +Architectures: powerpc +Type: vm ioctl +Parameters: struct kvm_create_spapr_tce_64 (in) +Returns: file descriptor for manipulating the created TCE table + +This is an extension for KVM_CAP_SPAPR_TCE which only supports 32bit +windows, described in 4.62 KVM_CREATE_SPAPR_TCE + +This capability uses extended struct in ioctl interface: + +/* for KVM_CAP_SPAPR_TCE_64 */ +struct kvm_create_spapr_tce_64 { + __u64 liobn; + __u32 page_shift; + __u32 flags; + __u64 offset; /* in pages */ + __u64 size; /* in pages */ +}; + +The aim of extension is to support an additional bigger DMA window with +a variable page size. +KVM_CREATE_SPAPR_TCE_64 receives a 64bit window size, an IOMMU page shift and +a bus offset of the corresponding DMA window, @size and @offset are numbers +of IOMMU pages. + +@flags are not used at the moment. + +The rest of functionality is identical to KVM_CREATE_SPAPR_TCE. + +4.99 KVM_REINJECT_CONTROL + +Capability: KVM_CAP_REINJECT_CONTROL +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_reinject_control (in) +Returns: 0 on success, + -EFAULT if struct kvm_reinject_control cannot be read, + -ENXIO if KVM_CREATE_PIT or KVM_CREATE_PIT2 didn't succeed earlier. + +i8254 (PIT) has two modes, reinject and !reinject. The default is reinject, +where KVM queues elapsed i8254 ticks and monitors completion of interrupt from +vector(s) that i8254 injects. Reinject mode dequeues a tick and injects its +interrupt whenever there isn't a pending interrupt from i8254. +!reinject mode injects an interrupt as soon as a tick arrives. + +struct kvm_reinject_control { + __u8 pit_reinject; + __u8 reserved[31]; +}; + +pit_reinject = 0 (!reinject mode) is recommended, unless running an old +operating system that uses the PIT for timing (e.g. Linux 2.4.x). + +4.100 KVM_PPC_CONFIGURE_V3_MMU + +Capability: KVM_CAP_PPC_RADIX_MMU or KVM_CAP_PPC_HASH_MMU_V3 +Architectures: ppc +Type: vm ioctl +Parameters: struct kvm_ppc_mmuv3_cfg (in) +Returns: 0 on success, + -EFAULT if struct kvm_ppc_mmuv3_cfg cannot be read, + -EINVAL if the configuration is invalid + +This ioctl controls whether the guest will use radix or HPT (hashed +page table) translation, and sets the pointer to the process table for +the guest. + +struct kvm_ppc_mmuv3_cfg { + __u64 flags; + __u64 process_table; +}; + +There are two bits that can be set in flags; KVM_PPC_MMUV3_RADIX and +KVM_PPC_MMUV3_GTSE. KVM_PPC_MMUV3_RADIX, if set, configures the guest +to use radix tree translation, and if clear, to use HPT translation. +KVM_PPC_MMUV3_GTSE, if set and if KVM permits it, configures the guest +to be able to use the global TLB and SLB invalidation instructions; +if clear, the guest may not use these instructions. + +The process_table field specifies the address and size of the guest +process table, which is in the guest's space. This field is formatted +as the second doubleword of the partition table entry, as defined in +the Power ISA V3.00, Book III section 5.7.6.1. + +4.101 KVM_PPC_GET_RMMU_INFO + +Capability: KVM_CAP_PPC_RADIX_MMU +Architectures: ppc +Type: vm ioctl +Parameters: struct kvm_ppc_rmmu_info (out) +Returns: 0 on success, + -EFAULT if struct kvm_ppc_rmmu_info cannot be written, + -EINVAL if no useful information can be returned + +This ioctl returns a structure containing two things: (a) a list +containing supported radix tree geometries, and (b) a list that maps +page sizes to put in the "AP" (actual page size) field for the tlbie +(TLB invalidate entry) instruction. + +struct kvm_ppc_rmmu_info { + struct kvm_ppc_radix_geom { + __u8 page_shift; + __u8 level_bits[4]; + __u8 pad[3]; + } geometries[8]; + __u32 ap_encodings[8]; +}; + +The geometries[] field gives up to 8 supported geometries for the +radix page table, in terms of the log base 2 of the smallest page +size, and the number of bits indexed at each level of the tree, from +the PTE level up to the PGD level in that order. Any unused entries +will have 0 in the page_shift field. + +The ap_encodings gives the supported page sizes and their AP field +encodings, encoded with the AP value in the top 3 bits and the log +base 2 of the page size in the bottom 6 bits. + +4.102 KVM_PPC_RESIZE_HPT_PREPARE + +Capability: KVM_CAP_SPAPR_RESIZE_HPT +Architectures: powerpc +Type: vm ioctl +Parameters: struct kvm_ppc_resize_hpt (in) +Returns: 0 on successful completion, + >0 if a new HPT is being prepared, the value is an estimated + number of milliseconds until preparation is complete + -EFAULT if struct kvm_reinject_control cannot be read, + -EINVAL if the supplied shift or flags are invalid + -ENOMEM if unable to allocate the new HPT + -ENOSPC if there was a hash collision when moving existing + HPT entries to the new HPT + -EIO on other error conditions + +Used to implement the PAPR extension for runtime resizing of a guest's +Hashed Page Table (HPT). Specifically this starts, stops or monitors +the preparation of a new potential HPT for the guest, essentially +implementing the H_RESIZE_HPT_PREPARE hypercall. + +If called with shift > 0 when there is no pending HPT for the guest, +this begins preparation of a new pending HPT of size 2^(shift) bytes. +It then returns a positive integer with the estimated number of +milliseconds until preparation is complete. + +If called when there is a pending HPT whose size does not match that +requested in the parameters, discards the existing pending HPT and +creates a new one as above. + +If called when there is a pending HPT of the size requested, will: + * If preparation of the pending HPT is already complete, return 0 + * If preparation of the pending HPT has failed, return an error + code, then discard the pending HPT. + * If preparation of the pending HPT is still in progress, return an + estimated number of milliseconds until preparation is complete. + +If called with shift == 0, discards any currently pending HPT and +returns 0 (i.e. cancels any in-progress preparation). + +flags is reserved for future expansion, currently setting any bits in +flags will result in an -EINVAL. + +Normally this will be called repeatedly with the same parameters until +it returns <= 0. The first call will initiate preparation, subsequent +ones will monitor preparation until it completes or fails. + +struct kvm_ppc_resize_hpt { + __u64 flags; + __u32 shift; + __u32 pad; +}; + +4.103 KVM_PPC_RESIZE_HPT_COMMIT + +Capability: KVM_CAP_SPAPR_RESIZE_HPT +Architectures: powerpc +Type: vm ioctl +Parameters: struct kvm_ppc_resize_hpt (in) +Returns: 0 on successful completion, + -EFAULT if struct kvm_reinject_control cannot be read, + -EINVAL if the supplied shift or flags are invalid + -ENXIO is there is no pending HPT, or the pending HPT doesn't + have the requested size + -EBUSY if the pending HPT is not fully prepared + -ENOSPC if there was a hash collision when moving existing + HPT entries to the new HPT + -EIO on other error conditions + +Used to implement the PAPR extension for runtime resizing of a guest's +Hashed Page Table (HPT). Specifically this requests that the guest be +transferred to working with the new HPT, essentially implementing the +H_RESIZE_HPT_COMMIT hypercall. + +This should only be called after KVM_PPC_RESIZE_HPT_PREPARE has +returned 0 with the same parameters. In other cases +KVM_PPC_RESIZE_HPT_COMMIT will return an error (usually -ENXIO or +-EBUSY, though others may be possible if the preparation was started, +but failed). + +This will have undefined effects on the guest if it has not already +placed itself in a quiescent state where no vcpu will make MMU enabled +memory accesses. + +On succsful completion, the pending HPT will become the guest's active +HPT and the previous HPT will be discarded. + +On failure, the guest will still be operating on its previous HPT. + +struct kvm_ppc_resize_hpt { + __u64 flags; + __u32 shift; + __u32 pad; +}; + +4.104 KVM_X86_GET_MCE_CAP_SUPPORTED + +Capability: KVM_CAP_MCE +Architectures: x86 +Type: system ioctl +Parameters: u64 mce_cap (out) +Returns: 0 on success, -1 on error + +Returns supported MCE capabilities. The u64 mce_cap parameter +has the same format as the MSR_IA32_MCG_CAP register. Supported +capabilities will have the corresponding bits set. + +4.105 KVM_X86_SETUP_MCE + +Capability: KVM_CAP_MCE +Architectures: x86 +Type: vcpu ioctl +Parameters: u64 mcg_cap (in) +Returns: 0 on success, + -EFAULT if u64 mcg_cap cannot be read, + -EINVAL if the requested number of banks is invalid, + -EINVAL if requested MCE capability is not supported. + +Initializes MCE support for use. The u64 mcg_cap parameter +has the same format as the MSR_IA32_MCG_CAP register and +specifies which capabilities should be enabled. The maximum +supported number of error-reporting banks can be retrieved when +checking for KVM_CAP_MCE. The supported capabilities can be +retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED. + +4.106 KVM_X86_SET_MCE + +Capability: KVM_CAP_MCE +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_x86_mce (in) +Returns: 0 on success, + -EFAULT if struct kvm_x86_mce cannot be read, + -EINVAL if the bank number is invalid, + -EINVAL if VAL bit is not set in status field. + +Inject a machine check error (MCE) into the guest. The input +parameter is: + +struct kvm_x86_mce { + __u64 status; + __u64 addr; + __u64 misc; + __u64 mcg_status; + __u8 bank; + __u8 pad1[7]; + __u64 pad2[3]; +}; + +If the MCE being reported is an uncorrected error, KVM will +inject it as an MCE exception into the guest. If the guest +MCG_STATUS register reports that an MCE is in progress, KVM +causes an KVM_EXIT_SHUTDOWN vmexit. + +Otherwise, if the MCE is a corrected error, KVM will just +store it in the corresponding bank (provided this bank is +not holding a previously reported uncorrected error). + +4.107 KVM_S390_GET_CMMA_BITS + +Capability: KVM_CAP_S390_CMMA_MIGRATION +Architectures: s390 +Type: vm ioctl +Parameters: struct kvm_s390_cmma_log (in, out) +Returns: 0 on success, a negative value on error + +This ioctl is used to get the values of the CMMA bits on the s390 +architecture. It is meant to be used in two scenarios: +- During live migration to save the CMMA values. Live migration needs + to be enabled via the KVM_REQ_START_MIGRATION VM property. +- To non-destructively peek at the CMMA values, with the flag + KVM_S390_CMMA_PEEK set. + +The ioctl takes parameters via the kvm_s390_cmma_log struct. The desired +values are written to a buffer whose location is indicated via the "values" +member in the kvm_s390_cmma_log struct. The values in the input struct are +also updated as needed. +Each CMMA value takes up one byte. + +struct kvm_s390_cmma_log { + __u64 start_gfn; + __u32 count; + __u32 flags; + union { + __u64 remaining; + __u64 mask; + }; + __u64 values; +}; + +start_gfn is the number of the first guest frame whose CMMA values are +to be retrieved, + +count is the length of the buffer in bytes, + +values points to the buffer where the result will be written to. + +If count is greater than KVM_S390_SKEYS_MAX, then it is considered to be +KVM_S390_SKEYS_MAX. KVM_S390_SKEYS_MAX is re-used for consistency with +other ioctls. + +The result is written in the buffer pointed to by the field values, and +the values of the input parameter are updated as follows. + +Depending on the flags, different actions are performed. The only +supported flag so far is KVM_S390_CMMA_PEEK. + +The default behaviour if KVM_S390_CMMA_PEEK is not set is: +start_gfn will indicate the first page frame whose CMMA bits were dirty. +It is not necessarily the same as the one passed as input, as clean pages +are skipped. + +count will indicate the number of bytes actually written in the buffer. +It can (and very often will) be smaller than the input value, since the +buffer is only filled until 16 bytes of clean values are found (which +are then not copied in the buffer). Since a CMMA migration block needs +the base address and the length, for a total of 16 bytes, we will send +back some clean data if there is some dirty data afterwards, as long as +the size of the clean data does not exceed the size of the header. This +allows to minimize the amount of data to be saved or transferred over +the network at the expense of more roundtrips to userspace. The next +invocation of the ioctl will skip over all the clean values, saving +potentially more than just the 16 bytes we found. + +If KVM_S390_CMMA_PEEK is set: +the existing storage attributes are read even when not in migration +mode, and no other action is performed; + +the output start_gfn will be equal to the input start_gfn, + +the output count will be equal to the input count, except if the end of +memory has been reached. + +In both cases: +the field "remaining" will indicate the total number of dirty CMMA values +still remaining, or 0 if KVM_S390_CMMA_PEEK is set and migration mode is +not enabled. + +mask is unused. + +values points to the userspace buffer where the result will be stored. + +This ioctl can fail with -ENOMEM if not enough memory can be allocated to +complete the task, with -ENXIO if CMMA is not enabled, with -EINVAL if +KVM_S390_CMMA_PEEK is not set but migration mode was not enabled, with +-EFAULT if the userspace address is invalid or if no page table is +present for the addresses (e.g. when using hugepages). + +4.108 KVM_S390_SET_CMMA_BITS + +Capability: KVM_CAP_S390_CMMA_MIGRATION +Architectures: s390 +Type: vm ioctl +Parameters: struct kvm_s390_cmma_log (in) +Returns: 0 on success, a negative value on error + +This ioctl is used to set the values of the CMMA bits on the s390 +architecture. It is meant to be used during live migration to restore +the CMMA values, but there are no restrictions on its use. +The ioctl takes parameters via the kvm_s390_cmma_values struct. +Each CMMA value takes up one byte. + +struct kvm_s390_cmma_log { + __u64 start_gfn; + __u32 count; + __u32 flags; + union { + __u64 remaining; + __u64 mask; + }; + __u64 values; +}; + +start_gfn indicates the starting guest frame number, + +count indicates how many values are to be considered in the buffer, + +flags is not used and must be 0. + +mask indicates which PGSTE bits are to be considered. + +remaining is not used. + +values points to the buffer in userspace where to store the values. + +This ioctl can fail with -ENOMEM if not enough memory can be allocated to +complete the task, with -ENXIO if CMMA is not enabled, with -EINVAL if +the count field is too large (e.g. more than KVM_S390_CMMA_SIZE_MAX) or +if the flags field was not 0, with -EFAULT if the userspace address is +invalid, if invalid pages are written to (e.g. after the end of memory) +or if no page table is present for the addresses (e.g. when using +hugepages). + +4.109 KVM_PPC_GET_CPU_CHAR + +Capability: KVM_CAP_PPC_GET_CPU_CHAR +Architectures: powerpc +Type: vm ioctl +Parameters: struct kvm_ppc_cpu_char (out) +Returns: 0 on successful completion + -EFAULT if struct kvm_ppc_cpu_char cannot be written + +This ioctl gives userspace information about certain characteristics +of the CPU relating to speculative execution of instructions and +possible information leakage resulting from speculative execution (see +CVE-2017-5715, CVE-2017-5753 and CVE-2017-5754). The information is +returned in struct kvm_ppc_cpu_char, which looks like this: + +struct kvm_ppc_cpu_char { + __u64 character; /* characteristics of the CPU */ + __u64 behaviour; /* recommended software behaviour */ + __u64 character_mask; /* valid bits in character */ + __u64 behaviour_mask; /* valid bits in behaviour */ +}; + +For extensibility, the character_mask and behaviour_mask fields +indicate which bits of character and behaviour have been filled in by +the kernel. If the set of defined bits is extended in future then +userspace will be able to tell whether it is running on a kernel that +knows about the new bits. + +The character field describes attributes of the CPU which can help +with preventing inadvertent information disclosure - specifically, +whether there is an instruction to flash-invalidate the L1 data cache +(ori 30,30,0 or mtspr SPRN_TRIG2,rN), whether the L1 data cache is set +to a mode where entries can only be used by the thread that created +them, whether the bcctr[l] instruction prevents speculation, and +whether a speculation barrier instruction (ori 31,31,0) is provided. + +The behaviour field describes actions that software should take to +prevent inadvertent information disclosure, and thus describes which +vulnerabilities the hardware is subject to; specifically whether the +L1 data cache should be flushed when returning to user mode from the +kernel, and whether a speculation barrier should be placed between an +array bounds check and the array access. + +These fields use the same bit definitions as the new +H_GET_CPU_CHARACTERISTICS hypercall. + +4.110 KVM_MEMORY_ENCRYPT_OP + +Capability: basic +Architectures: x86 +Type: system +Parameters: an opaque platform specific structure (in/out) +Returns: 0 on success; -1 on error + +If the platform supports creating encrypted VMs then this ioctl can be used +for issuing platform-specific memory encryption commands to manage those +encrypted VMs. + +Currently, this ioctl is used for issuing Secure Encrypted Virtualization +(SEV) commands on AMD Processors. The SEV commands are defined in +Documentation/virt/kvm/amd-memory-encryption.rst. + +4.111 KVM_MEMORY_ENCRYPT_REG_REGION + +Capability: basic +Architectures: x86 +Type: system +Parameters: struct kvm_enc_region (in) +Returns: 0 on success; -1 on error + +This ioctl can be used to register a guest memory region which may +contain encrypted data (e.g. guest RAM, SMRAM etc). + +It is used in the SEV-enabled guest. When encryption is enabled, a guest +memory region may contain encrypted data. The SEV memory encryption +engine uses a tweak such that two identical plaintext pages, each at +different locations will have differing ciphertexts. So swapping or +moving ciphertext of those pages will not result in plaintext being +swapped. So relocating (or migrating) physical backing pages for the SEV +guest will require some additional steps. + +Note: The current SEV key management spec does not provide commands to +swap or migrate (move) ciphertext pages. Hence, for now we pin the guest +memory region registered with the ioctl. + +4.112 KVM_MEMORY_ENCRYPT_UNREG_REGION + +Capability: basic +Architectures: x86 +Type: system +Parameters: struct kvm_enc_region (in) +Returns: 0 on success; -1 on error + +This ioctl can be used to unregister the guest memory region registered +with KVM_MEMORY_ENCRYPT_REG_REGION ioctl above. + +4.113 KVM_HYPERV_EVENTFD + +Capability: KVM_CAP_HYPERV_EVENTFD +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_hyperv_eventfd (in) + +This ioctl (un)registers an eventfd to receive notifications from the guest on +the specified Hyper-V connection id through the SIGNAL_EVENT hypercall, without +causing a user exit. SIGNAL_EVENT hypercall with non-zero event flag number +(bits 24-31) still triggers a KVM_EXIT_HYPERV_HCALL user exit. + +struct kvm_hyperv_eventfd { + __u32 conn_id; + __s32 fd; + __u32 flags; + __u32 padding[3]; +}; + +The conn_id field should fit within 24 bits: + +#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff + +The acceptable values for the flags field are: + +#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0) + +Returns: 0 on success, + -EINVAL if conn_id or flags is outside the allowed range + -ENOENT on deassign if the conn_id isn't registered + -EEXIST on assign if the conn_id is already registered + +4.114 KVM_GET_NESTED_STATE + +Capability: KVM_CAP_NESTED_STATE +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_nested_state (in/out) +Returns: 0 on success, -1 on error +Errors: + E2BIG: the total state size exceeds the value of 'size' specified by + the user; the size required will be written into size. + +struct kvm_nested_state { + __u16 flags; + __u16 format; + __u32 size; + + union { + struct kvm_vmx_nested_state_hdr vmx; + struct kvm_svm_nested_state_hdr svm; + + /* Pad the header to 128 bytes. */ + __u8 pad[120]; + } hdr; + + union { + struct kvm_vmx_nested_state_data vmx[0]; + struct kvm_svm_nested_state_data svm[0]; + } data; +}; + +#define KVM_STATE_NESTED_GUEST_MODE 0x00000001 +#define KVM_STATE_NESTED_RUN_PENDING 0x00000002 +#define KVM_STATE_NESTED_EVMCS 0x00000004 + +#define KVM_STATE_NESTED_FORMAT_VMX 0 +#define KVM_STATE_NESTED_FORMAT_SVM 1 + +#define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000 + +#define KVM_STATE_NESTED_VMX_SMM_GUEST_MODE 0x00000001 +#define KVM_STATE_NESTED_VMX_SMM_VMXON 0x00000002 + +struct kvm_vmx_nested_state_hdr { + __u64 vmxon_pa; + __u64 vmcs12_pa; + + struct { + __u16 flags; + } smm; +}; + +struct kvm_vmx_nested_state_data { + __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; + __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; +}; + +This ioctl copies the vcpu's nested virtualization state from the kernel to +userspace. + +The maximum size of the state can be retrieved by passing KVM_CAP_NESTED_STATE +to the KVM_CHECK_EXTENSION ioctl(). + +4.115 KVM_SET_NESTED_STATE + +Capability: KVM_CAP_NESTED_STATE +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_nested_state (in) +Returns: 0 on success, -1 on error + +This copies the vcpu's kvm_nested_state struct from userspace to the kernel. +For the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE. + +4.116 KVM_(UN)REGISTER_COALESCED_MMIO + +Capability: KVM_CAP_COALESCED_MMIO (for coalesced mmio) + KVM_CAP_COALESCED_PIO (for coalesced pio) +Architectures: all +Type: vm ioctl +Parameters: struct kvm_coalesced_mmio_zone +Returns: 0 on success, < 0 on error + +Coalesced I/O is a performance optimization that defers hardware +register write emulation so that userspace exits are avoided. It is +typically used to reduce the overhead of emulating frequently accessed +hardware registers. + +When a hardware register is configured for coalesced I/O, write accesses +do not exit to userspace and their value is recorded in a ring buffer +that is shared between kernel and userspace. + +Coalesced I/O is used if one or more write accesses to a hardware +register can be deferred until a read or a write to another hardware +register on the same device. This last access will cause a vmexit and +userspace will process accesses from the ring buffer before emulating +it. That will avoid exiting to userspace on repeated writes. + +Coalesced pio is based on coalesced mmio. There is little difference +between coalesced mmio and pio except that coalesced pio records accesses +to I/O ports. + +4.117 KVM_CLEAR_DIRTY_LOG (vm ioctl) + +Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 +Architectures: x86, arm, arm64, mips +Type: vm ioctl +Parameters: struct kvm_dirty_log (in) +Returns: 0 on success, -1 on error + +/* for KVM_CLEAR_DIRTY_LOG */ +struct kvm_clear_dirty_log { + __u32 slot; + __u32 num_pages; + __u64 first_page; + union { + void __user *dirty_bitmap; /* one bit per page */ + __u64 padding; + }; +}; + +The ioctl clears the dirty status of pages in a memory slot, according to +the bitmap that is passed in struct kvm_clear_dirty_log's dirty_bitmap +field. Bit 0 of the bitmap corresponds to page "first_page" in the +memory slot, and num_pages is the size in bits of the input bitmap. +first_page must be a multiple of 64; num_pages must also be a multiple of +64 unless first_page + num_pages is the size of the memory slot. For each +bit that is set in the input bitmap, the corresponding page is marked "clean" +in KVM's dirty bitmap, and dirty tracking is re-enabled for that page +(for example via write-protection, or by clearing the dirty bit in +a page table entry). + +If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies +the address space for which you want to return the dirty bitmap. +They must be less than the value that KVM_CHECK_EXTENSION returns for +the KVM_CAP_MULTI_ADDRESS_SPACE capability. + +This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 +is enabled; for more information, see the description of the capability. +However, it can always be used as long as KVM_CHECK_EXTENSION confirms +that KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is present. + +4.118 KVM_GET_SUPPORTED_HV_CPUID + +Capability: KVM_CAP_HYPERV_CPUID +Architectures: x86 +Type: vcpu ioctl +Parameters: struct kvm_cpuid2 (in/out) +Returns: 0 on success, -1 on error + +struct kvm_cpuid2 { + __u32 nent; + __u32 padding; + struct kvm_cpuid_entry2 entries[0]; +}; + +struct kvm_cpuid_entry2 { + __u32 function; + __u32 index; + __u32 flags; + __u32 eax; + __u32 ebx; + __u32 ecx; + __u32 edx; + __u32 padding[3]; +}; + +This ioctl returns x86 cpuid features leaves related to Hyper-V emulation in +KVM. Userspace can use the information returned by this ioctl to construct +cpuid information presented to guests consuming Hyper-V enlightenments (e.g. +Windows or Hyper-V guests). + +CPUID feature leaves returned by this ioctl are defined by Hyper-V Top Level +Functional Specification (TLFS). These leaves can't be obtained with +KVM_GET_SUPPORTED_CPUID ioctl because some of them intersect with KVM feature +leaves (0x40000000, 0x40000001). + +Currently, the following list of CPUID leaves are returned: + HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS + HYPERV_CPUID_INTERFACE + HYPERV_CPUID_VERSION + HYPERV_CPUID_FEATURES + HYPERV_CPUID_ENLIGHTMENT_INFO + HYPERV_CPUID_IMPLEMENT_LIMITS + HYPERV_CPUID_NESTED_FEATURES + +HYPERV_CPUID_NESTED_FEATURES leaf is only exposed when Enlightened VMCS was +enabled on the corresponding vCPU (KVM_CAP_HYPERV_ENLIGHTENED_VMCS). + +Userspace invokes KVM_GET_SUPPORTED_CPUID by passing a kvm_cpuid2 structure +with the 'nent' field indicating the number of entries in the variable-size +array 'entries'. If the number of entries is too low to describe all Hyper-V +feature leaves, an error (E2BIG) is returned. If the number is more or equal +to the number of Hyper-V feature leaves, the 'nent' field is adjusted to the +number of valid entries in the 'entries' array, which is then filled. + +'index' and 'flags' fields in 'struct kvm_cpuid_entry2' are currently reserved, +userspace should not expect to get any particular value there. + +4.119 KVM_ARM_VCPU_FINALIZE + +Architectures: arm, arm64 +Type: vcpu ioctl +Parameters: int feature (in) +Returns: 0 on success, -1 on error +Errors: + EPERM: feature not enabled, needs configuration, or already finalized + EINVAL: feature unknown or not present + +Recognised values for feature: + arm64 KVM_ARM_VCPU_SVE (requires KVM_CAP_ARM_SVE) + +Finalizes the configuration of the specified vcpu feature. + +The vcpu must already have been initialised, enabling the affected feature, by +means of a successful KVM_ARM_VCPU_INIT call with the appropriate flag set in +features[]. + +For affected vcpu features, this is a mandatory step that must be performed +before the vcpu is fully usable. + +Between KVM_ARM_VCPU_INIT and KVM_ARM_VCPU_FINALIZE, the feature may be +configured by use of ioctls such as KVM_SET_ONE_REG. The exact configuration +that should be performaned and how to do it are feature-dependent. + +Other calls that depend on a particular feature being finalized, such as +KVM_RUN, KVM_GET_REG_LIST, KVM_GET_ONE_REG and KVM_SET_ONE_REG, will fail with +-EPERM unless the feature has already been finalized by means of a +KVM_ARM_VCPU_FINALIZE call. + +See KVM_ARM_VCPU_INIT for details of vcpu features that require finalization +using this ioctl. + +4.120 KVM_SET_PMU_EVENT_FILTER + +Capability: KVM_CAP_PMU_EVENT_FILTER +Architectures: x86 +Type: vm ioctl +Parameters: struct kvm_pmu_event_filter (in) +Returns: 0 on success, -1 on error + +struct kvm_pmu_event_filter { + __u32 action; + __u32 nevents; + __u32 fixed_counter_bitmap; + __u32 flags; + __u32 pad[4]; + __u64 events[0]; +}; + +This ioctl restricts the set of PMU events that the guest can program. +The argument holds a list of events which will be allowed or denied. +The eventsel+umask of each event the guest attempts to program is compared +against the events field to determine whether the guest should have access. +The events field only controls general purpose counters; fixed purpose +counters are controlled by the fixed_counter_bitmap. + +No flags are defined yet, the field must be zero. + +Valid values for 'action': +#define KVM_PMU_EVENT_ALLOW 0 +#define KVM_PMU_EVENT_DENY 1 + + +5. The kvm_run structure +------------------------ + +Application code obtains a pointer to the kvm_run structure by +mmap()ing a vcpu fd. From that point, application code can control +execution by changing fields in kvm_run prior to calling the KVM_RUN +ioctl, and obtain information about the reason KVM_RUN returned by +looking up structure members. + +struct kvm_run { + /* in */ + __u8 request_interrupt_window; + +Request that KVM_RUN return when it becomes possible to inject external +interrupts into the guest. Useful in conjunction with KVM_INTERRUPT. + + __u8 immediate_exit; + +This field is polled once when KVM_RUN starts; if non-zero, KVM_RUN +exits immediately, returning -EINTR. In the common scenario where a +signal is used to "kick" a VCPU out of KVM_RUN, this field can be used +to avoid usage of KVM_SET_SIGNAL_MASK, which has worse scalability. +Rather than blocking the signal outside KVM_RUN, userspace can set up +a signal handler that sets run->immediate_exit to a non-zero value. + +This field is ignored if KVM_CAP_IMMEDIATE_EXIT is not available. + + __u8 padding1[6]; + + /* out */ + __u32 exit_reason; + +When KVM_RUN has returned successfully (return value 0), this informs +application code why KVM_RUN has returned. Allowable values for this +field are detailed below. + + __u8 ready_for_interrupt_injection; + +If request_interrupt_window has been specified, this field indicates +an interrupt can be injected now with KVM_INTERRUPT. + + __u8 if_flag; + +The value of the current interrupt flag. Only valid if in-kernel +local APIC is not used. + + __u16 flags; + +More architecture-specific flags detailing state of the VCPU that may +affect the device's behavior. The only currently defined flag is +KVM_RUN_X86_SMM, which is valid on x86 machines and is set if the +VCPU is in system management mode. + + /* in (pre_kvm_run), out (post_kvm_run) */ + __u64 cr8; + +The value of the cr8 register. Only valid if in-kernel local APIC is +not used. Both input and output. + + __u64 apic_base; + +The value of the APIC BASE msr. Only valid if in-kernel local +APIC is not used. Both input and output. + + union { + /* KVM_EXIT_UNKNOWN */ + struct { + __u64 hardware_exit_reason; + } hw; + +If exit_reason is KVM_EXIT_UNKNOWN, the vcpu has exited due to unknown +reasons. Further architecture-specific information is available in +hardware_exit_reason. + + /* KVM_EXIT_FAIL_ENTRY */ + struct { + __u64 hardware_entry_failure_reason; + } fail_entry; + +If exit_reason is KVM_EXIT_FAIL_ENTRY, the vcpu could not be run due +to unknown reasons. Further architecture-specific information is +available in hardware_entry_failure_reason. + + /* KVM_EXIT_EXCEPTION */ + struct { + __u32 exception; + __u32 error_code; + } ex; + +Unused. + + /* KVM_EXIT_IO */ + struct { +#define KVM_EXIT_IO_IN 0 +#define KVM_EXIT_IO_OUT 1 + __u8 direction; + __u8 size; /* bytes */ + __u16 port; + __u32 count; + __u64 data_offset; /* relative to kvm_run start */ + } io; + +If exit_reason is KVM_EXIT_IO, then the vcpu has +executed a port I/O instruction which could not be satisfied by kvm. +data_offset describes where the data is located (KVM_EXIT_IO_OUT) or +where kvm expects application code to place the data for the next +KVM_RUN invocation (KVM_EXIT_IO_IN). Data format is a packed array. + + /* KVM_EXIT_DEBUG */ + struct { + struct kvm_debug_exit_arch arch; + } debug; + +If the exit_reason is KVM_EXIT_DEBUG, then a vcpu is processing a debug event +for which architecture specific information is returned. + + /* KVM_EXIT_MMIO */ + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } mmio; + +If exit_reason is KVM_EXIT_MMIO, then the vcpu has +executed a memory-mapped I/O instruction which could not be satisfied +by kvm. The 'data' member contains the written data if 'is_write' is +true, and should be filled by application code otherwise. + +The 'data' member contains, in its first 'len' bytes, the value as it would +appear if the VCPU performed a load or store of the appropriate width directly +to the byte array. + +NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_PAPR and + KVM_EXIT_EPR the corresponding +operations are complete (and guest state is consistent) only after userspace +has re-entered the kernel with KVM_RUN. The kernel side will first finish +incomplete operations and then check for pending signals. Userspace +can re-enter the guest with an unmasked signal pending to complete +pending operations. + + /* KVM_EXIT_HYPERCALL */ + struct { + __u64 nr; + __u64 args[6]; + __u64 ret; + __u32 longmode; + __u32 pad; + } hypercall; + +Unused. This was once used for 'hypercall to userspace'. To implement +such functionality, use KVM_EXIT_IO (x86) or KVM_EXIT_MMIO (all except s390). +Note KVM_EXIT_IO is significantly faster than KVM_EXIT_MMIO. + + /* KVM_EXIT_TPR_ACCESS */ + struct { + __u64 rip; + __u32 is_write; + __u32 pad; + } tpr_access; + +To be documented (KVM_TPR_ACCESS_REPORTING). + + /* KVM_EXIT_S390_SIEIC */ + struct { + __u8 icptcode; + __u64 mask; /* psw upper half */ + __u64 addr; /* psw lower half */ + __u16 ipa; + __u32 ipb; + } s390_sieic; + +s390 specific. + + /* KVM_EXIT_S390_RESET */ +#define KVM_S390_RESET_POR 1 +#define KVM_S390_RESET_CLEAR 2 +#define KVM_S390_RESET_SUBSYSTEM 4 +#define KVM_S390_RESET_CPU_INIT 8 +#define KVM_S390_RESET_IPL 16 + __u64 s390_reset_flags; + +s390 specific. + + /* KVM_EXIT_S390_UCONTROL */ + struct { + __u64 trans_exc_code; + __u32 pgm_code; + } s390_ucontrol; + +s390 specific. A page fault has occurred for a user controlled virtual +machine (KVM_VM_S390_UNCONTROL) on it's host page table that cannot be +resolved by the kernel. +The program code and the translation exception code that were placed +in the cpu's lowcore are presented here as defined by the z Architecture +Principles of Operation Book in the Chapter for Dynamic Address Translation +(DAT) + + /* KVM_EXIT_DCR */ + struct { + __u32 dcrn; + __u32 data; + __u8 is_write; + } dcr; + +Deprecated - was used for 440 KVM. + + /* KVM_EXIT_OSI */ + struct { + __u64 gprs[32]; + } osi; + +MOL uses a special hypercall interface it calls 'OSI'. To enable it, we catch +hypercalls and exit with this exit struct that contains all the guest gprs. + +If exit_reason is KVM_EXIT_OSI, then the vcpu has triggered such a hypercall. +Userspace can now handle the hypercall and when it's done modify the gprs as +necessary. Upon guest entry all guest GPRs will then be replaced by the values +in this struct. + + /* KVM_EXIT_PAPR_HCALL */ + struct { + __u64 nr; + __u64 ret; + __u64 args[9]; + } papr_hcall; + +This is used on 64-bit PowerPC when emulating a pSeries partition, +e.g. with the 'pseries' machine type in qemu. It occurs when the +guest does a hypercall using the 'sc 1' instruction. The 'nr' field +contains the hypercall number (from the guest R3), and 'args' contains +the arguments (from the guest R4 - R12). Userspace should put the +return code in 'ret' and any extra returned values in args[]. +The possible hypercalls are defined in the Power Architecture Platform +Requirements (PAPR) document available from www.power.org (free +developer registration required to access it). + + /* KVM_EXIT_S390_TSCH */ + struct { + __u16 subchannel_id; + __u16 subchannel_nr; + __u32 io_int_parm; + __u32 io_int_word; + __u32 ipb; + __u8 dequeued; + } s390_tsch; + +s390 specific. This exit occurs when KVM_CAP_S390_CSS_SUPPORT has been enabled +and TEST SUBCHANNEL was intercepted. If dequeued is set, a pending I/O +interrupt for the target subchannel has been dequeued and subchannel_id, +subchannel_nr, io_int_parm and io_int_word contain the parameters for that +interrupt. ipb is needed for instruction parameter decoding. + + /* KVM_EXIT_EPR */ + struct { + __u32 epr; + } epr; + +On FSL BookE PowerPC chips, the interrupt controller has a fast patch +interrupt acknowledge path to the core. When the core successfully +delivers an interrupt, it automatically populates the EPR register with +the interrupt vector number and acknowledges the interrupt inside +the interrupt controller. + +In case the interrupt controller lives in user space, we need to do +the interrupt acknowledge cycle through it to fetch the next to be +delivered interrupt vector using this exit. + +It gets triggered whenever both KVM_CAP_PPC_EPR are enabled and an +external interrupt has just been delivered into the guest. User space +should put the acknowledged interrupt vector into the 'epr' field. + + /* KVM_EXIT_SYSTEM_EVENT */ + struct { +#define KVM_SYSTEM_EVENT_SHUTDOWN 1 +#define KVM_SYSTEM_EVENT_RESET 2 +#define KVM_SYSTEM_EVENT_CRASH 3 + __u32 type; + __u64 flags; + } system_event; + +If exit_reason is KVM_EXIT_SYSTEM_EVENT then the vcpu has triggered +a system-level event using some architecture specific mechanism (hypercall +or some special instruction). In case of ARM/ARM64, this is triggered using +HVC instruction based PSCI call from the vcpu. The 'type' field describes +the system-level event type. The 'flags' field describes architecture +specific flags for the system-level event. + +Valid values for 'type' are: + KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the + VM. Userspace is not obliged to honour this, and if it does honour + this does not need to destroy the VM synchronously (ie it may call + KVM_RUN again before shutdown finally occurs). + KVM_SYSTEM_EVENT_RESET -- the guest has requested a reset of the VM. + As with SHUTDOWN, userspace can choose to ignore the request, or + to schedule the reset to occur in the future and may call KVM_RUN again. + KVM_SYSTEM_EVENT_CRASH -- the guest crash occurred and the guest + has requested a crash condition maintenance. Userspace can choose + to ignore the request, or to gather VM memory core dump and/or + reset/shutdown of the VM. + + /* KVM_EXIT_IOAPIC_EOI */ + struct { + __u8 vector; + } eoi; + +Indicates that the VCPU's in-kernel local APIC received an EOI for a +level-triggered IOAPIC interrupt. This exit only triggers when the +IOAPIC is implemented in userspace (i.e. KVM_CAP_SPLIT_IRQCHIP is enabled); +the userspace IOAPIC should process the EOI and retrigger the interrupt if +it is still asserted. Vector is the LAPIC interrupt vector for which the +EOI was received. + + struct kvm_hyperv_exit { +#define KVM_EXIT_HYPERV_SYNIC 1 +#define KVM_EXIT_HYPERV_HCALL 2 + __u32 type; + union { + struct { + __u32 msr; + __u64 control; + __u64 evt_page; + __u64 msg_page; + } synic; + struct { + __u64 input; + __u64 result; + __u64 params[2]; + } hcall; + } u; + }; + /* KVM_EXIT_HYPERV */ + struct kvm_hyperv_exit hyperv; +Indicates that the VCPU exits into userspace to process some tasks +related to Hyper-V emulation. +Valid values for 'type' are: + KVM_EXIT_HYPERV_SYNIC -- synchronously notify user-space about +Hyper-V SynIC state change. Notification is used to remap SynIC +event/message pages and to enable/disable SynIC messages/events processing +in userspace. + + /* Fix the size of the union. */ + char padding[256]; + }; + + /* + * shared registers between kvm and userspace. + * kvm_valid_regs specifies the register classes set by the host + * kvm_dirty_regs specified the register classes dirtied by userspace + * struct kvm_sync_regs is architecture specific, as well as the + * bits for kvm_valid_regs and kvm_dirty_regs + */ + __u64 kvm_valid_regs; + __u64 kvm_dirty_regs; + union { + struct kvm_sync_regs regs; + char padding[SYNC_REGS_SIZE_BYTES]; + } s; + +If KVM_CAP_SYNC_REGS is defined, these fields allow userspace to access +certain guest registers without having to call SET/GET_*REGS. Thus we can +avoid some system call overhead if userspace has to handle the exit. +Userspace can query the validity of the structure by checking +kvm_valid_regs for specific bits. These bits are architecture specific +and usually define the validity of a groups of registers. (e.g. one bit + for general purpose registers) + +Please note that the kernel is allowed to use the kvm_run structure as the +primary storage for certain register types. Therefore, the kernel may use the +values in kvm_run even if the corresponding bit in kvm_dirty_regs is not set. + +}; + + + +6. Capabilities that can be enabled on vCPUs +-------------------------------------------- + +There are certain capabilities that change the behavior of the virtual CPU or +the virtual machine when enabled. To enable them, please see section 4.37. +Below you can find a list of capabilities and what their effect on the vCPU or +the virtual machine is when enabling them. + +The following information is provided along with the description: + + Architectures: which instruction set architectures provide this ioctl. + x86 includes both i386 and x86_64. + + Target: whether this is a per-vcpu or per-vm capability. + + Parameters: what parameters are accepted by the capability. + + Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL) + are not detailed, but errors with specific meanings are. + + +6.1 KVM_CAP_PPC_OSI + +Architectures: ppc +Target: vcpu +Parameters: none +Returns: 0 on success; -1 on error + +This capability enables interception of OSI hypercalls that otherwise would +be treated as normal system calls to be injected into the guest. OSI hypercalls +were invented by Mac-on-Linux to have a standardized communication mechanism +between the guest and the host. + +When this capability is enabled, KVM_EXIT_OSI can occur. + + +6.2 KVM_CAP_PPC_PAPR + +Architectures: ppc +Target: vcpu +Parameters: none +Returns: 0 on success; -1 on error + +This capability enables interception of PAPR hypercalls. PAPR hypercalls are +done using the hypercall instruction "sc 1". + +It also sets the guest privilege level to "supervisor" mode. Usually the guest +runs in "hypervisor" privilege mode with a few missing features. + +In addition to the above, it changes the semantics of SDR1. In this mode, the +HTAB address part of SDR1 contains an HVA instead of a GPA, as PAPR keeps the +HTAB invisible to the guest. + +When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur. + + +6.3 KVM_CAP_SW_TLB + +Architectures: ppc +Target: vcpu +Parameters: args[0] is the address of a struct kvm_config_tlb +Returns: 0 on success; -1 on error + +struct kvm_config_tlb { + __u64 params; + __u64 array; + __u32 mmu_type; + __u32 array_len; +}; + +Configures the virtual CPU's TLB array, establishing a shared memory area +between userspace and KVM. The "params" and "array" fields are userspace +addresses of mmu-type-specific data structures. The "array_len" field is an +safety mechanism, and should be set to the size in bytes of the memory that +userspace has reserved for the array. It must be at least the size dictated +by "mmu_type" and "params". + +While KVM_RUN is active, the shared region is under control of KVM. Its +contents are undefined, and any modification by userspace results in +boundedly undefined behavior. + +On return from KVM_RUN, the shared region will reflect the current state of +the guest's TLB. If userspace makes any changes, it must call KVM_DIRTY_TLB +to tell KVM which entries have been changed, prior to calling KVM_RUN again +on this vcpu. + +For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV: + - The "params" field is of type "struct kvm_book3e_206_tlb_params". + - The "array" field points to an array of type "struct + kvm_book3e_206_tlb_entry". + - The array consists of all entries in the first TLB, followed by all + entries in the second TLB. + - Within a TLB, entries are ordered first by increasing set number. Within a + set, entries are ordered by way (increasing ESEL). + - The hash for determining set number in TLB0 is: (MAS2 >> 12) & (num_sets - 1) + where "num_sets" is the tlb_sizes[] value divided by the tlb_ways[] value. + - The tsize field of mas1 shall be set to 4K on TLB0, even though the + hardware ignores this value for TLB0. + +6.4 KVM_CAP_S390_CSS_SUPPORT + +Architectures: s390 +Target: vcpu +Parameters: none +Returns: 0 on success; -1 on error + +This capability enables support for handling of channel I/O instructions. + +TEST PENDING INTERRUPTION and the interrupt portion of TEST SUBCHANNEL are +handled in-kernel, while the other I/O instructions are passed to userspace. + +When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST +SUBCHANNEL intercepts. + +Note that even though this capability is enabled per-vcpu, the complete +virtual machine is affected. + +6.5 KVM_CAP_PPC_EPR + +Architectures: ppc +Target: vcpu +Parameters: args[0] defines whether the proxy facility is active +Returns: 0 on success; -1 on error + +This capability enables or disables the delivery of interrupts through the +external proxy facility. + +When enabled (args[0] != 0), every time the guest gets an external interrupt +delivered, it automatically exits into user space with a KVM_EXIT_EPR exit +to receive the topmost interrupt vector. + +When disabled (args[0] == 0), behavior is as if this facility is unsupported. + +When this capability is enabled, KVM_EXIT_EPR can occur. + +6.6 KVM_CAP_IRQ_MPIC + +Architectures: ppc +Parameters: args[0] is the MPIC device fd + args[1] is the MPIC CPU number for this vcpu + +This capability connects the vcpu to an in-kernel MPIC device. + +6.7 KVM_CAP_IRQ_XICS + +Architectures: ppc +Target: vcpu +Parameters: args[0] is the XICS device fd + args[1] is the XICS CPU number (server ID) for this vcpu + +This capability connects the vcpu to an in-kernel XICS device. + +6.8 KVM_CAP_S390_IRQCHIP + +Architectures: s390 +Target: vm +Parameters: none + +This capability enables the in-kernel irqchip for s390. Please refer to +"4.24 KVM_CREATE_IRQCHIP" for details. + +6.9 KVM_CAP_MIPS_FPU + +Architectures: mips +Target: vcpu +Parameters: args[0] is reserved for future use (should be 0). + +This capability allows the use of the host Floating Point Unit by the guest. It +allows the Config1.FP bit to be set to enable the FPU in the guest. Once this is +done the KVM_REG_MIPS_FPR_* and KVM_REG_MIPS_FCR_* registers can be accessed +(depending on the current guest FPU register mode), and the Status.FR, +Config5.FRE bits are accessible via the KVM API and also from the guest, +depending on them being supported by the FPU. + +6.10 KVM_CAP_MIPS_MSA + +Architectures: mips +Target: vcpu +Parameters: args[0] is reserved for future use (should be 0). + +This capability allows the use of the MIPS SIMD Architecture (MSA) by the guest. +It allows the Config3.MSAP bit to be set to enable the use of MSA by the guest. +Once this is done the KVM_REG_MIPS_VEC_* and KVM_REG_MIPS_MSA_* registers can be +accessed, and the Config5.MSAEn bit is accessible via the KVM API and also from +the guest. + +6.74 KVM_CAP_SYNC_REGS +Architectures: s390, x86 +Target: s390: always enabled, x86: vcpu +Parameters: none +Returns: x86: KVM_CHECK_EXTENSION returns a bit-array indicating which register +sets are supported (bitfields defined in arch/x86/include/uapi/asm/kvm.h). + +As described above in the kvm_sync_regs struct info in section 5 (kvm_run): +KVM_CAP_SYNC_REGS "allow[s] userspace to access certain guest registers +without having to call SET/GET_*REGS". This reduces overhead by eliminating +repeated ioctl calls for setting and/or getting register values. This is +particularly important when userspace is making synchronous guest state +modifications, e.g. when emulating and/or intercepting instructions in +userspace. + +For s390 specifics, please refer to the source code. + +For x86: +- the register sets to be copied out to kvm_run are selectable + by userspace (rather that all sets being copied out for every exit). +- vcpu_events are available in addition to regs and sregs. + +For x86, the 'kvm_valid_regs' field of struct kvm_run is overloaded to +function as an input bit-array field set by userspace to indicate the +specific register sets to be copied out on the next exit. + +To indicate when userspace has modified values that should be copied into +the vCPU, the all architecture bitarray field, 'kvm_dirty_regs' must be set. +This is done using the same bitflags as for the 'kvm_valid_regs' field. +If the dirty bit is not set, then the register set values will not be copied +into the vCPU even if they've been modified. + +Unused bitfields in the bitarrays must be set to zero. + +struct kvm_sync_regs { + struct kvm_regs regs; + struct kvm_sregs sregs; + struct kvm_vcpu_events events; +}; + +6.75 KVM_CAP_PPC_IRQ_XIVE + +Architectures: ppc +Target: vcpu +Parameters: args[0] is the XIVE device fd + args[1] is the XIVE CPU number (server ID) for this vcpu + +This capability connects the vcpu to an in-kernel XIVE device. + +7. Capabilities that can be enabled on VMs +------------------------------------------ + +There are certain capabilities that change the behavior of the virtual +machine when enabled. To enable them, please see section 4.37. Below +you can find a list of capabilities and what their effect on the VM +is when enabling them. + +The following information is provided along with the description: + + Architectures: which instruction set architectures provide this ioctl. + x86 includes both i386 and x86_64. + + Parameters: what parameters are accepted by the capability. + + Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL) + are not detailed, but errors with specific meanings are. + + +7.1 KVM_CAP_PPC_ENABLE_HCALL + +Architectures: ppc +Parameters: args[0] is the sPAPR hcall number + args[1] is 0 to disable, 1 to enable in-kernel handling + +This capability controls whether individual sPAPR hypercalls (hcalls) +get handled by the kernel or not. Enabling or disabling in-kernel +handling of an hcall is effective across the VM. On creation, an +initial set of hcalls are enabled for in-kernel handling, which +consists of those hcalls for which in-kernel handlers were implemented +before this capability was implemented. If disabled, the kernel will +not to attempt to handle the hcall, but will always exit to userspace +to handle it. Note that it may not make sense to enable some and +disable others of a group of related hcalls, but KVM does not prevent +userspace from doing that. + +If the hcall number specified is not one that has an in-kernel +implementation, the KVM_ENABLE_CAP ioctl will fail with an EINVAL +error. + +7.2 KVM_CAP_S390_USER_SIGP + +Architectures: s390 +Parameters: none + +This capability controls which SIGP orders will be handled completely in user +space. With this capability enabled, all fast orders will be handled completely +in the kernel: +- SENSE +- SENSE RUNNING +- EXTERNAL CALL +- EMERGENCY SIGNAL +- CONDITIONAL EMERGENCY SIGNAL + +All other orders will be handled completely in user space. + +Only privileged operation exceptions will be checked for in the kernel (or even +in the hardware prior to interception). If this capability is not enabled, the +old way of handling SIGP orders is used (partially in kernel and user space). + +7.3 KVM_CAP_S390_VECTOR_REGISTERS + +Architectures: s390 +Parameters: none +Returns: 0 on success, negative value on error + +Allows use of the vector registers introduced with z13 processor, and +provides for the synchronization between host and user space. Will +return -EINVAL if the machine does not support vectors. + +7.4 KVM_CAP_S390_USER_STSI + +Architectures: s390 +Parameters: none + +This capability allows post-handlers for the STSI instruction. After +initial handling in the kernel, KVM exits to user space with +KVM_EXIT_S390_STSI to allow user space to insert further data. + +Before exiting to userspace, kvm handlers should fill in s390_stsi field of +vcpu->run: +struct { + __u64 addr; + __u8 ar; + __u8 reserved; + __u8 fc; + __u8 sel1; + __u16 sel2; +} s390_stsi; + +@addr - guest address of STSI SYSIB +@fc - function code +@sel1 - selector 1 +@sel2 - selector 2 +@ar - access register number + +KVM handlers should exit to userspace with rc = -EREMOTE. + +7.5 KVM_CAP_SPLIT_IRQCHIP + +Architectures: x86 +Parameters: args[0] - number of routes reserved for userspace IOAPICs +Returns: 0 on success, -1 on error + +Create a local apic for each processor in the kernel. This can be used +instead of KVM_CREATE_IRQCHIP if the userspace VMM wishes to emulate the +IOAPIC and PIC (and also the PIT, even though this has to be enabled +separately). + +This capability also enables in kernel routing of interrupt requests; +when KVM_CAP_SPLIT_IRQCHIP only routes of KVM_IRQ_ROUTING_MSI type are +used in the IRQ routing table. The first args[0] MSI routes are reserved +for the IOAPIC pins. Whenever the LAPIC receives an EOI for these routes, +a KVM_EXIT_IOAPIC_EOI vmexit will be reported to userspace. + +Fails if VCPU has already been created, or if the irqchip is already in the +kernel (i.e. KVM_CREATE_IRQCHIP has already been called). + +7.6 KVM_CAP_S390_RI + +Architectures: s390 +Parameters: none + +Allows use of runtime-instrumentation introduced with zEC12 processor. +Will return -EINVAL if the machine does not support runtime-instrumentation. +Will return -EBUSY if a VCPU has already been created. + +7.7 KVM_CAP_X2APIC_API + +Architectures: x86 +Parameters: args[0] - features that should be enabled +Returns: 0 on success, -EINVAL when args[0] contains invalid features + +Valid feature flags in args[0] are + +#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0) +#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1) + +Enabling KVM_X2APIC_API_USE_32BIT_IDS changes the behavior of +KVM_SET_GSI_ROUTING, KVM_SIGNAL_MSI, KVM_SET_LAPIC, and KVM_GET_LAPIC, +allowing the use of 32-bit APIC IDs. See KVM_CAP_X2APIC_API in their +respective sections. + +KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK must be enabled for x2APIC to work +in logical mode or with more than 255 VCPUs. Otherwise, KVM treats 0xff +as a broadcast even in x2APIC mode in order to support physical x2APIC +without interrupt remapping. This is undesirable in logical mode, +where 0xff represents CPUs 0-7 in cluster 0. + +7.8 KVM_CAP_S390_USER_INSTR0 + +Architectures: s390 +Parameters: none + +With this capability enabled, all illegal instructions 0x0000 (2 bytes) will +be intercepted and forwarded to user space. User space can use this +mechanism e.g. to realize 2-byte software breakpoints. The kernel will +not inject an operating exception for these instructions, user space has +to take care of that. + +This capability can be enabled dynamically even if VCPUs were already +created and are running. + +7.9 KVM_CAP_S390_GS + +Architectures: s390 +Parameters: none +Returns: 0 on success; -EINVAL if the machine does not support + guarded storage; -EBUSY if a VCPU has already been created. + +Allows use of guarded storage for the KVM guest. + +7.10 KVM_CAP_S390_AIS + +Architectures: s390 +Parameters: none + +Allow use of adapter-interruption suppression. +Returns: 0 on success; -EBUSY if a VCPU has already been created. + +7.11 KVM_CAP_PPC_SMT + +Architectures: ppc +Parameters: vsmt_mode, flags + +Enabling this capability on a VM provides userspace with a way to set +the desired virtual SMT mode (i.e. the number of virtual CPUs per +virtual core). The virtual SMT mode, vsmt_mode, must be a power of 2 +between 1 and 8. On POWER8, vsmt_mode must also be no greater than +the number of threads per subcore for the host. Currently flags must +be 0. A successful call to enable this capability will result in +vsmt_mode being returned when the KVM_CAP_PPC_SMT capability is +subsequently queried for the VM. This capability is only supported by +HV KVM, and can only be set before any VCPUs have been created. +The KVM_CAP_PPC_SMT_POSSIBLE capability indicates which virtual SMT +modes are available. + +7.12 KVM_CAP_PPC_FWNMI + +Architectures: ppc +Parameters: none + +With this capability a machine check exception in the guest address +space will cause KVM to exit the guest with NMI exit reason. This +enables QEMU to build error log and branch to guest kernel registered +machine check handling routine. Without this capability KVM will +branch to guests' 0x200 interrupt vector. + +7.13 KVM_CAP_X86_DISABLE_EXITS + +Architectures: x86 +Parameters: args[0] defines which exits are disabled +Returns: 0 on success, -EINVAL when args[0] contains invalid exits + +Valid bits in args[0] are + +#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0) +#define KVM_X86_DISABLE_EXITS_HLT (1 << 1) +#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) +#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3) + +Enabling this capability on a VM provides userspace with a way to no +longer intercept some instructions for improved latency in some +workloads, and is suggested when vCPUs are associated to dedicated +physical CPUs. More bits can be added in the future; userspace can +just pass the KVM_CHECK_EXTENSION result to KVM_ENABLE_CAP to disable +all such vmexits. + +Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits. + +7.14 KVM_CAP_S390_HPAGE_1M + +Architectures: s390 +Parameters: none +Returns: 0 on success, -EINVAL if hpage module parameter was not set + or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL + flag set + +With this capability the KVM support for memory backing with 1m pages +through hugetlbfs can be enabled for a VM. After the capability is +enabled, cmma can't be enabled anymore and pfmfi and the storage key +interpretation are disabled. If cmma has already been enabled or the +hpage module parameter is not set to 1, -EINVAL is returned. + +While it is generally possible to create a huge page backed VM without +this capability, the VM will not be able to run. + +7.15 KVM_CAP_MSR_PLATFORM_INFO + +Architectures: x86 +Parameters: args[0] whether feature should be enabled or not + +With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise, +a #GP would be raised when the guest tries to access. Currently, this +capability does not enable write permissions of this MSR for the guest. + +7.16 KVM_CAP_PPC_NESTED_HV + +Architectures: ppc +Parameters: none +Returns: 0 on success, -EINVAL when the implementation doesn't support + nested-HV virtualization. + +HV-KVM on POWER9 and later systems allows for "nested-HV" +virtualization, which provides a way for a guest VM to run guests that +can run using the CPU's supervisor mode (privileged non-hypervisor +state). Enabling this capability on a VM depends on the CPU having +the necessary functionality and on the facility being enabled with a +kvm-hv module parameter. + +7.17 KVM_CAP_EXCEPTION_PAYLOAD + +Architectures: x86 +Parameters: args[0] whether feature should be enabled or not + +With this capability enabled, CR2 will not be modified prior to the +emulated VM-exit when L1 intercepts a #PF exception that occurs in +L2. Similarly, for kvm-intel only, DR6 will not be modified prior to +the emulated VM-exit when L1 intercepts a #DB exception that occurs in +L2. As a result, when KVM_GET_VCPU_EVENTS reports a pending #PF (or +#DB) exception for L2, exception.has_payload will be set and the +faulting address (or the new DR6 bits*) will be reported in the +exception_payload field. Similarly, when userspace injects a #PF (or +#DB) into L2 using KVM_SET_VCPU_EVENTS, it is expected to set +exception.has_payload and to put the faulting address (or the new DR6 +bits*) in the exception_payload field. + +This capability also enables exception.pending in struct +kvm_vcpu_events, which allows userspace to distinguish between pending +and injected exceptions. + + +* For the new DR6 bits, note that bit 16 is set iff the #DB exception + will clear DR6.RTM. + +7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 + +Architectures: x86, arm, arm64, mips +Parameters: args[0] whether feature should be enabled or not + +With this capability enabled, KVM_GET_DIRTY_LOG will not automatically +clear and write-protect all pages that are returned as dirty. +Rather, userspace will have to do this operation separately using +KVM_CLEAR_DIRTY_LOG. + +At the cost of a slightly more complicated operation, this provides better +scalability and responsiveness for two reasons. First, +KVM_CLEAR_DIRTY_LOG ioctl can operate on a 64-page granularity rather +than requiring to sync a full memslot; this ensures that KVM does not +take spinlocks for an extended period of time. Second, in some cases a +large amount of time can pass between a call to KVM_GET_DIRTY_LOG and +userspace actually using the data in the page. Pages can be modified +during this time, which is inefficint for both the guest and userspace: +the guest will incur a higher penalty due to write protection faults, +while userspace can see false reports of dirty pages. Manual reprotection +helps reducing this time, improving guest performance and reducing the +number of dirty log false positives. + +KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 was previously available under the name +KVM_CAP_MANUAL_DIRTY_LOG_PROTECT, but the implementation had bugs that make +it hard or impossible to use it correctly. The availability of +KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 signals that those bugs are fixed. +Userspace should not try to use KVM_CAP_MANUAL_DIRTY_LOG_PROTECT. + +8. Other capabilities. +---------------------- + +This section lists capabilities that give information about other +features of the KVM implementation. + +8.1 KVM_CAP_PPC_HWRNG + +Architectures: ppc + +This capability, if KVM_CHECK_EXTENSION indicates that it is +available, means that that the kernel has an implementation of the +H_RANDOM hypercall backed by a hardware random-number generator. +If present, the kernel H_RANDOM handler can be enabled for guest use +with the KVM_CAP_PPC_ENABLE_HCALL capability. + +8.2 KVM_CAP_HYPERV_SYNIC + +Architectures: x86 +This capability, if KVM_CHECK_EXTENSION indicates that it is +available, means that that the kernel has an implementation of the +Hyper-V Synthetic interrupt controller(SynIC). Hyper-V SynIC is +used to support Windows Hyper-V based guest paravirt drivers(VMBus). + +In order to use SynIC, it has to be activated by setting this +capability via KVM_ENABLE_CAP ioctl on the vcpu fd. Note that this +will disable the use of APIC hardware virtualization even if supported +by the CPU, as it's incompatible with SynIC auto-EOI behavior. + +8.3 KVM_CAP_PPC_RADIX_MMU + +Architectures: ppc + +This capability, if KVM_CHECK_EXTENSION indicates that it is +available, means that that the kernel can support guests using the +radix MMU defined in Power ISA V3.00 (as implemented in the POWER9 +processor). + +8.4 KVM_CAP_PPC_HASH_MMU_V3 + +Architectures: ppc + +This capability, if KVM_CHECK_EXTENSION indicates that it is +available, means that that the kernel can support guests using the +hashed page table MMU defined in Power ISA V3.00 (as implemented in +the POWER9 processor), including in-memory segment tables. + +8.5 KVM_CAP_MIPS_VZ + +Architectures: mips + +This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that +it is available, means that full hardware assisted virtualization capabilities +of the hardware are available for use through KVM. An appropriate +KVM_VM_MIPS_* type must be passed to KVM_CREATE_VM to create a VM which +utilises it. + +If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is +available, it means that the VM is using full hardware assisted virtualization +capabilities of the hardware. This is useful to check after creating a VM with +KVM_VM_MIPS_DEFAULT. + +The value returned by KVM_CHECK_EXTENSION should be compared against known +values (see below). All other values are reserved. This is to allow for the +possibility of other hardware assisted virtualization implementations which +may be incompatible with the MIPS VZ ASE. + + 0: The trap & emulate implementation is in use to run guest code in user + mode. Guest virtual memory segments are rearranged to fit the guest in the + user mode address space. + + 1: The MIPS VZ ASE is in use, providing full hardware assisted + virtualization, including standard guest virtual memory segments. + +8.6 KVM_CAP_MIPS_TE + +Architectures: mips + +This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that +it is available, means that the trap & emulate implementation is available to +run guest code in user mode, even if KVM_CAP_MIPS_VZ indicates that hardware +assisted virtualisation is also available. KVM_VM_MIPS_TE (0) must be passed +to KVM_CREATE_VM to create a VM which utilises it. + +If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is +available, it means that the VM is using trap & emulate. + +8.7 KVM_CAP_MIPS_64BIT + +Architectures: mips + +This capability indicates the supported architecture type of the guest, i.e. the +supported register and address width. + +The values returned when this capability is checked by KVM_CHECK_EXTENSION on a +kvm VM handle correspond roughly to the CP0_Config.AT register field, and should +be checked specifically against known values (see below). All other values are +reserved. + + 0: MIPS32 or microMIPS32. + Both registers and addresses are 32-bits wide. + It will only be possible to run 32-bit guest code. + + 1: MIPS64 or microMIPS64 with access only to 32-bit compatibility segments. + Registers are 64-bits wide, but addresses are 32-bits wide. + 64-bit guest code may run but cannot access MIPS64 memory segments. + It will also be possible to run 32-bit guest code. + + 2: MIPS64 or microMIPS64 with access to all address segments. + Both registers and addresses are 64-bits wide. + It will be possible to run 64-bit or 32-bit guest code. + +8.9 KVM_CAP_ARM_USER_IRQ + +Architectures: arm, arm64 +This capability, if KVM_CHECK_EXTENSION indicates that it is available, means +that if userspace creates a VM without an in-kernel interrupt controller, it +will be notified of changes to the output level of in-kernel emulated devices, +which can generate virtual interrupts, presented to the VM. +For such VMs, on every return to userspace, the kernel +updates the vcpu's run->s.regs.device_irq_level field to represent the actual +output level of the device. + +Whenever kvm detects a change in the device output level, kvm guarantees at +least one return to userspace before running the VM. This exit could either +be a KVM_EXIT_INTR or any other exit event, like KVM_EXIT_MMIO. This way, +userspace can always sample the device output level and re-compute the state of +the userspace interrupt controller. Userspace should always check the state +of run->s.regs.device_irq_level on every kvm exit. +The value in run->s.regs.device_irq_level can represent both level and edge +triggered interrupt signals, depending on the device. Edge triggered interrupt +signals will exit to userspace with the bit in run->s.regs.device_irq_level +set exactly once per edge signal. + +The field run->s.regs.device_irq_level is available independent of +run->kvm_valid_regs or run->kvm_dirty_regs bits. + +If KVM_CAP_ARM_USER_IRQ is supported, the KVM_CHECK_EXTENSION ioctl returns a +number larger than 0 indicating the version of this capability is implemented +and thereby which bits in in run->s.regs.device_irq_level can signal values. + +Currently the following bits are defined for the device_irq_level bitmap: + + KVM_CAP_ARM_USER_IRQ >= 1: + + KVM_ARM_DEV_EL1_VTIMER - EL1 virtual timer + KVM_ARM_DEV_EL1_PTIMER - EL1 physical timer + KVM_ARM_DEV_PMU - ARM PMU overflow interrupt signal + +Future versions of kvm may implement additional events. These will get +indicated by returning a higher number from KVM_CHECK_EXTENSION and will be +listed above. + +8.10 KVM_CAP_PPC_SMT_POSSIBLE + +Architectures: ppc + +Querying this capability returns a bitmap indicating the possible +virtual SMT modes that can be set using KVM_CAP_PPC_SMT. If bit N +(counting from the right) is set, then a virtual SMT mode of 2^N is +available. + +8.11 KVM_CAP_HYPERV_SYNIC2 + +Architectures: x86 + +This capability enables a newer version of Hyper-V Synthetic interrupt +controller (SynIC). The only difference with KVM_CAP_HYPERV_SYNIC is that KVM +doesn't clear SynIC message and event flags pages when they are enabled by +writing to the respective MSRs. + +8.12 KVM_CAP_HYPERV_VP_INDEX + +Architectures: x86 + +This capability indicates that userspace can load HV_X64_MSR_VP_INDEX msr. Its +value is used to denote the target vcpu for a SynIC interrupt. For +compatibilty, KVM initializes this msr to KVM's internal vcpu index. When this +capability is absent, userspace can still query this msr's value. + +8.13 KVM_CAP_S390_AIS_MIGRATION + +Architectures: s390 +Parameters: none + +This capability indicates if the flic device will be able to get/set the +AIS states for migration via the KVM_DEV_FLIC_AISM_ALL attribute and allows +to discover this without having to create a flic device. + +8.14 KVM_CAP_S390_PSW + +Architectures: s390 + +This capability indicates that the PSW is exposed via the kvm_run structure. + +8.15 KVM_CAP_S390_GMAP + +Architectures: s390 + +This capability indicates that the user space memory used as guest mapping can +be anywhere in the user memory address space, as long as the memory slots are +aligned and sized to a segment (1MB) boundary. + +8.16 KVM_CAP_S390_COW + +Architectures: s390 + +This capability indicates that the user space memory used as guest mapping can +use copy-on-write semantics as well as dirty pages tracking via read-only page +tables. + +8.17 KVM_CAP_S390_BPB + +Architectures: s390 + +This capability indicates that kvm will implement the interfaces to handle +reset, migration and nested KVM for branch prediction blocking. The stfle +facility 82 should not be provided to the guest without this capability. + +8.18 KVM_CAP_HYPERV_TLBFLUSH + +Architectures: x86 + +This capability indicates that KVM supports paravirtualized Hyper-V TLB Flush +hypercalls: +HvFlushVirtualAddressSpace, HvFlushVirtualAddressSpaceEx, +HvFlushVirtualAddressList, HvFlushVirtualAddressListEx. + +8.19 KVM_CAP_ARM_INJECT_SERROR_ESR + +Architectures: arm, arm64 + +This capability indicates that userspace can specify (via the +KVM_SET_VCPU_EVENTS ioctl) the syndrome value reported to the guest when it +takes a virtual SError interrupt exception. +If KVM advertises this capability, userspace can only specify the ISS field for +the ESR syndrome. Other parts of the ESR, such as the EC are generated by the +CPU when the exception is taken. If this virtual SError is taken to EL1 using +AArch64, this value will be reported in the ISS field of ESR_ELx. + +See KVM_CAP_VCPU_EVENTS for more details. +8.20 KVM_CAP_HYPERV_SEND_IPI + +Architectures: x86 + +This capability indicates that KVM supports paravirtualized Hyper-V IPI send +hypercalls: +HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx. diff --git a/Documentation/virt/kvm/arm/hyp-abi.txt b/Documentation/virt/kvm/arm/hyp-abi.txt new file mode 100644 index 000000000000..a20a0bee268d --- /dev/null +++ b/Documentation/virt/kvm/arm/hyp-abi.txt @@ -0,0 +1,53 @@ +* Internal ABI between the kernel and HYP + +This file documents the interaction between the Linux kernel and the +hypervisor layer when running Linux as a hypervisor (for example +KVM). It doesn't cover the interaction of the kernel with the +hypervisor when running as a guest (under Xen, KVM or any other +hypervisor), or any hypervisor-specific interaction when the kernel is +used as a host. + +On arm and arm64 (without VHE), the kernel doesn't run in hypervisor +mode, but still needs to interact with it, allowing a built-in +hypervisor to be either installed or torn down. + +In order to achieve this, the kernel must be booted at HYP (arm) or +EL2 (arm64), allowing it to install a set of stubs before dropping to +SVC/EL1. These stubs are accessible by using a 'hvc #0' instruction, +and only act on individual CPUs. + +Unless specified otherwise, any built-in hypervisor must implement +these functions (see arch/arm{,64}/include/asm/virt.h): + +* r0/x0 = HVC_SET_VECTORS + r1/x1 = vectors + + Set HVBAR/VBAR_EL2 to 'vectors' to enable a hypervisor. 'vectors' + must be a physical address, and respect the alignment requirements + of the architecture. Only implemented by the initial stubs, not by + Linux hypervisors. + +* r0/x0 = HVC_RESET_VECTORS + + Turn HYP/EL2 MMU off, and reset HVBAR/VBAR_EL2 to the initials + stubs' exception vector value. This effectively disables an existing + hypervisor. + +* r0/x0 = HVC_SOFT_RESTART + r1/x1 = restart address + x2 = x0's value when entering the next payload (arm64) + x3 = x1's value when entering the next payload (arm64) + x4 = x2's value when entering the next payload (arm64) + + Mask all exceptions, disable the MMU, move the arguments into place + (arm64 only), and jump to the restart address while at HYP/EL2. This + hypercall is not expected to return to its caller. + +Any other value of r0/x0 triggers a hypervisor-specific handling, +which is not documented here. + +The return value of a stub hypercall is held by r0/x0, and is 0 on +success, and HVC_STUB_ERR on error. A stub hypercall is allowed to +clobber any of the caller-saved registers (x0-x18 on arm64, r0-r3 and +ip on arm). It is thus recommended to use a function call to perform +the hypercall. diff --git a/Documentation/virt/kvm/arm/psci.txt b/Documentation/virt/kvm/arm/psci.txt new file mode 100644 index 000000000000..559586fc9d37 --- /dev/null +++ b/Documentation/virt/kvm/arm/psci.txt @@ -0,0 +1,61 @@ +KVM implements the PSCI (Power State Coordination Interface) +specification in order to provide services such as CPU on/off, reset +and power-off to the guest. + +The PSCI specification is regularly updated to provide new features, +and KVM implements these updates if they make sense from a virtualization +point of view. + +This means that a guest booted on two different versions of KVM can +observe two different "firmware" revisions. This could cause issues if +a given guest is tied to a particular PSCI revision (unlikely), or if +a migration causes a different PSCI version to be exposed out of the +blue to an unsuspecting guest. + +In order to remedy this situation, KVM exposes a set of "firmware +pseudo-registers" that can be manipulated using the GET/SET_ONE_REG +interface. These registers can be saved/restored by userspace, and set +to a convenient value if required. + +The following register is defined: + +* KVM_REG_ARM_PSCI_VERSION: + + - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set + (and thus has already been initialized) + - Returns the current PSCI version on GET_ONE_REG (defaulting to the + highest PSCI version implemented by KVM and compatible with v0.2) + - Allows any PSCI version implemented by KVM and compatible with + v0.2 to be set with SET_ONE_REG + - Affects the whole VM (even if the register view is per-vcpu) + +* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: + Holds the state of the firmware support to mitigate CVE-2017-5715, as + offered by KVM to the guest via a HVC call. The workaround is described + under SMCCC_ARCH_WORKAROUND_1 in [1]. + Accepted values are: + KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL: KVM does not offer + firmware support for the workaround. The mitigation status for the + guest is unknown. + KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL: The workaround HVC call is + available to the guest and required for the mitigation. + KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED: The workaround HVC call + is available to the guest, but it is not needed on this VCPU. + +* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: + Holds the state of the firmware support to mitigate CVE-2018-3639, as + offered by KVM to the guest via a HVC call. The workaround is described + under SMCCC_ARCH_WORKAROUND_2 in [1]. + Accepted values are: + KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL: A workaround is not + available. KVM does not offer firmware support for the workaround. + KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN: The workaround state is + unknown. KVM does not offer firmware support for the workaround. + KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL: The workaround is available, + and can be disabled by a vCPU. If + KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED is set, it is active for + this vCPU. + KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED: The workaround is + always active on this vCPU or it is not needed. + +[1] https://developer.arm.com/-/media/developer/pdf/ARM_DEN_0070A_Firmware_interfaces_for_mitigating_CVE-2017-5715.pdf diff --git a/Documentation/virt/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst new file mode 100644 index 000000000000..01b081f6e7ea --- /dev/null +++ b/Documentation/virt/kvm/cpuid.rst @@ -0,0 +1,107 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============== +KVM CPUID bits +============== + +:Author: Glauber Costa + +A guest running on a kvm host, can check some of its features using +cpuid. This is not always guaranteed to work, since userspace can +mask-out some, or even all KVM-related cpuid features before launching +a guest. + +KVM cpuid functions are: + +function: KVM_CPUID_SIGNATURE (0x40000000) + +returns:: + + eax = 0x40000001 + ebx = 0x4b4d564b + ecx = 0x564b4d56 + edx = 0x4d + +Note that this value in ebx, ecx and edx corresponds to the string "KVMKVMKVM". +The value in eax corresponds to the maximum cpuid function present in this leaf, +and will be updated if more functions are added in the future. +Note also that old hosts set eax value to 0x0. This should +be interpreted as if the value was 0x40000001. +This function queries the presence of KVM cpuid leafs. + +function: define KVM_CPUID_FEATURES (0x40000001) + +returns:: + + ebx, ecx + eax = an OR'ed group of (1 << flag) + +where ``flag`` is defined as below: + +================================= =========== ================================ +flag value meaning +================================= =========== ================================ +KVM_FEATURE_CLOCKSOURCE 0 kvmclock available at msrs + 0x11 and 0x12 + +KVM_FEATURE_NOP_IO_DELAY 1 not necessary to perform delays + on PIO operations + +KVM_FEATURE_MMU_OP 2 deprecated + +KVM_FEATURE_CLOCKSOURCE2 3 kvmclock available at msrs + + 0x4b564d00 and 0x4b564d01 +KVM_FEATURE_ASYNC_PF 4 async pf can be enabled by + writing to msr 0x4b564d02 + +KVM_FEATURE_STEAL_TIME 5 steal time can be enabled by + writing to msr 0x4b564d03 + +KVM_FEATURE_PV_EOI 6 paravirtualized end of interrupt + handler can be enabled by + writing to msr 0x4b564d04 + +KVM_FEATURE_PV_UNHAULT 7 guest checks this feature bit + before enabling paravirtualized + spinlock support + +KVM_FEATURE_PV_TLB_FLUSH 9 guest checks this feature bit + before enabling paravirtualized + tlb flush + +KVM_FEATURE_ASYNC_PF_VMEXIT 10 paravirtualized async PF VM EXIT + can be enabled by setting bit 2 + when writing to msr 0x4b564d02 + +KVM_FEATURE_PV_SEND_IPI 11 guest checks this feature bit + before enabling paravirtualized + sebd IPIs + +KVM_FEATURE_PV_POLL_CONTROL 12 host-side polling on HLT can + be disabled by writing + to msr 0x4b564d05. + +KVM_FEATURE_PV_SCHED_YIELD 13 guest checks this feature bit + before using paravirtualized + sched yield. + +KVM_FEATURE_CLOCSOURCE_STABLE_BIT 24 host will warn if no guest-side + per-cpu warps are expeced in + kvmclock +================================= =========== ================================ + +:: + + edx = an OR'ed group of (1 << flag) + +Where ``flag`` here is defined as below: + +================== ============ ================================= +flag value meaning +================== ============ ================================= +KVM_HINTS_REALTIME 0 guest checks this feature bit to + determine that vCPUs are never + preempted for an unlimited time + allowing optimizations +================== ============ ================================= diff --git a/Documentation/virt/kvm/devices/README b/Documentation/virt/kvm/devices/README new file mode 100644 index 000000000000..34a69834124a --- /dev/null +++ b/Documentation/virt/kvm/devices/README @@ -0,0 +1 @@ +This directory contains specific device bindings for KVM_CAP_DEVICE_CTRL. diff --git a/Documentation/virt/kvm/devices/arm-vgic-its.txt b/Documentation/virt/kvm/devices/arm-vgic-its.txt new file mode 100644 index 000000000000..eeaa95b893a8 --- /dev/null +++ b/Documentation/virt/kvm/devices/arm-vgic-its.txt @@ -0,0 +1,181 @@ +ARM Virtual Interrupt Translation Service (ITS) +=============================================== + +Device types supported: + KVM_DEV_TYPE_ARM_VGIC_ITS ARM Interrupt Translation Service Controller + +The ITS allows MSI(-X) interrupts to be injected into guests. This extension is +optional. Creating a virtual ITS controller also requires a host GICv3 (see +arm-vgic-v3.txt), but does not depend on having physical ITS controllers. + +There can be multiple ITS controllers per guest, each of them has to have +a separate, non-overlapping MMIO region. + + +Groups: + KVM_DEV_ARM_VGIC_GRP_ADDR + Attributes: + KVM_VGIC_ITS_ADDR_TYPE (rw, 64-bit) + Base address in the guest physical address space of the GICv3 ITS + control register frame. + This address needs to be 64K aligned and the region covers 128K. + Errors: + -E2BIG: Address outside of addressable IPA range + -EINVAL: Incorrectly aligned address + -EEXIST: Address already configured + -EFAULT: Invalid user pointer for attr->addr. + -ENODEV: Incorrect attribute or the ITS is not supported. + + + KVM_DEV_ARM_VGIC_GRP_CTRL + Attributes: + KVM_DEV_ARM_VGIC_CTRL_INIT + request the initialization of the ITS, no additional parameter in + kvm_device_attr.addr. + + KVM_DEV_ARM_ITS_CTRL_RESET + reset the ITS, no additional parameter in kvm_device_attr.addr. + See "ITS Reset State" section. + + KVM_DEV_ARM_ITS_SAVE_TABLES + save the ITS table data into guest RAM, at the location provisioned + by the guest in corresponding registers/table entries. + + The layout of the tables in guest memory defines an ABI. The entries + are laid out in little endian format as described in the last paragraph. + + KVM_DEV_ARM_ITS_RESTORE_TABLES + restore the ITS tables from guest RAM to ITS internal structures. + + The GICV3 must be restored before the ITS and all ITS registers but + the GITS_CTLR must be restored before restoring the ITS tables. + + The GITS_IIDR read-only register must also be restored before + calling KVM_DEV_ARM_ITS_RESTORE_TABLES as the IIDR revision field + encodes the ABI revision. + + The expected ordering when restoring the GICv3/ITS is described in section + "ITS Restore Sequence". + + Errors: + -ENXIO: ITS not properly configured as required prior to setting + this attribute + -ENOMEM: Memory shortage when allocating ITS internal data + -EINVAL: Inconsistent restored data + -EFAULT: Invalid guest ram access + -EBUSY: One or more VCPUS are running + -EACCES: The virtual ITS is backed by a physical GICv4 ITS, and the + state is not available + + KVM_DEV_ARM_VGIC_GRP_ITS_REGS + Attributes: + The attr field of kvm_device_attr encodes the offset of the + ITS register, relative to the ITS control frame base address + (ITS_base). + + kvm_device_attr.addr points to a __u64 value whatever the width + of the addressed register (32/64 bits). 64 bit registers can only + be accessed with full length. + + Writes to read-only registers are ignored by the kernel except for: + - GITS_CREADR. It must be restored otherwise commands in the queue + will be re-executed after restoring CWRITER. GITS_CREADR must be + restored before restoring the GITS_CTLR which is likely to enable the + ITS. Also it must be restored after GITS_CBASER since a write to + GITS_CBASER resets GITS_CREADR. + - GITS_IIDR. The Revision field encodes the table layout ABI revision. + In the future we might implement direct injection of virtual LPIs. + This will require an upgrade of the table layout and an evolution of + the ABI. GITS_IIDR must be restored before calling + KVM_DEV_ARM_ITS_RESTORE_TABLES. + + For other registers, getting or setting a register has the same + effect as reading/writing the register on real hardware. + Errors: + -ENXIO: Offset does not correspond to any supported register + -EFAULT: Invalid user pointer for attr->addr + -EINVAL: Offset is not 64-bit aligned + -EBUSY: one or more VCPUS are running + + ITS Restore Sequence: + ------------------------- + +The following ordering must be followed when restoring the GIC and the ITS: +a) restore all guest memory and create vcpus +b) restore all redistributors +c) provide the ITS base address + (KVM_DEV_ARM_VGIC_GRP_ADDR) +d) restore the ITS in the following order: + 1. Restore GITS_CBASER + 2. Restore all other GITS_ registers, except GITS_CTLR! + 3. Load the ITS table data (KVM_DEV_ARM_ITS_RESTORE_TABLES) + 4. Restore GITS_CTLR + +Then vcpus can be started. + + ITS Table ABI REV0: + ------------------- + + Revision 0 of the ABI only supports the features of a virtual GICv3, and does + not support a virtual GICv4 with support for direct injection of virtual + interrupts for nested hypervisors. + + The device table and ITT are indexed by the DeviceID and EventID, + respectively. The collection table is not indexed by CollectionID, and the + entries in the collection are listed in no particular order. + All entries are 8 bytes. + + Device Table Entry (DTE): + + bits: | 63| 62 ... 49 | 48 ... 5 | 4 ... 0 | + values: | V | next | ITT_addr | Size | + + where; + - V indicates whether the entry is valid. If not, other fields + are not meaningful. + - next: equals to 0 if this entry is the last one; otherwise it + corresponds to the DeviceID offset to the next DTE, capped by + 2^14 -1. + - ITT_addr matches bits [51:8] of the ITT address (256 Byte aligned). + - Size specifies the supported number of bits for the EventID, + minus one + + Collection Table Entry (CTE): + + bits: | 63| 62 .. 52 | 51 ... 16 | 15 ... 0 | + values: | V | RES0 | RDBase | ICID | + + where: + - V indicates whether the entry is valid. If not, other fields are + not meaningful. + - RES0: reserved field with Should-Be-Zero-or-Preserved behavior. + - RDBase is the PE number (GICR_TYPER.Processor_Number semantic), + - ICID is the collection ID + + Interrupt Translation Entry (ITE): + + bits: | 63 ... 48 | 47 ... 16 | 15 ... 0 | + values: | next | pINTID | ICID | + + where: + - next: equals to 0 if this entry is the last one; otherwise it corresponds + to the EventID offset to the next ITE capped by 2^16 -1. + - pINTID is the physical LPI ID; if zero, it means the entry is not valid + and other fields are not meaningful. + - ICID is the collection ID + + ITS Reset State: + ---------------- + +RESET returns the ITS to the same state that it was when first created and +initialized. When the RESET command returns, the following things are +guaranteed: + +- The ITS is not enabled and quiescent + GITS_CTLR.Enabled = 0 .Quiescent=1 +- There is no internally cached state +- No collection or device table are used + GITS_BASER.Valid = 0 +- GITS_CBASER = 0, GITS_CREADR = 0, GITS_CWRITER = 0 +- The ABI version is unchanged and remains the one set when the ITS + device was first created. diff --git a/Documentation/virt/kvm/devices/arm-vgic-v3.txt b/Documentation/virt/kvm/devices/arm-vgic-v3.txt new file mode 100644 index 000000000000..ff290b43c8e5 --- /dev/null +++ b/Documentation/virt/kvm/devices/arm-vgic-v3.txt @@ -0,0 +1,251 @@ +ARM Virtual Generic Interrupt Controller v3 and later (VGICv3) +============================================================== + + +Device types supported: + KVM_DEV_TYPE_ARM_VGIC_V3 ARM Generic Interrupt Controller v3.0 + +Only one VGIC instance may be instantiated through this API. The created VGIC +will act as the VM interrupt controller, requiring emulated user-space devices +to inject interrupts to the VGIC instead of directly to CPUs. It is not +possible to create both a GICv3 and GICv2 on the same VM. + +Creating a guest GICv3 device requires a host GICv3 as well. + + +Groups: + KVM_DEV_ARM_VGIC_GRP_ADDR + Attributes: + KVM_VGIC_V3_ADDR_TYPE_DIST (rw, 64-bit) + Base address in the guest physical address space of the GICv3 distributor + register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V3. + This address needs to be 64K aligned and the region covers 64 KByte. + + KVM_VGIC_V3_ADDR_TYPE_REDIST (rw, 64-bit) + Base address in the guest physical address space of the GICv3 + redistributor register mappings. There are two 64K pages for each + VCPU and all of the redistributor pages are contiguous. + Only valid for KVM_DEV_TYPE_ARM_VGIC_V3. + This address needs to be 64K aligned. + + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION (rw, 64-bit) + The attribute data pointed to by kvm_device_attr.addr is a __u64 value: + bits: | 63 .... 52 | 51 .... 16 | 15 - 12 |11 - 0 + values: | count | base | flags | index + - index encodes the unique redistributor region index + - flags: reserved for future use, currently 0 + - base field encodes bits [51:16] of the guest physical base address + of the first redistributor in the region. + - count encodes the number of redistributors in the region. Must be + greater than 0. + There are two 64K pages for each redistributor in the region and + redistributors are laid out contiguously within the region. Regions + are filled with redistributors in the index order. The sum of all + region count fields must be greater than or equal to the number of + VCPUs. Redistributor regions must be registered in the incremental + index order, starting from index 0. + The characteristics of a specific redistributor region can be read + by presetting the index field in the attr data. + Only valid for KVM_DEV_TYPE_ARM_VGIC_V3. + + It is invalid to mix calls with KVM_VGIC_V3_ADDR_TYPE_REDIST and + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION attributes. + + Errors: + -E2BIG: Address outside of addressable IPA range + -EINVAL: Incorrectly aligned address, bad redistributor region + count/index, mixed redistributor region attribute usage + -EEXIST: Address already configured + -ENOENT: Attempt to read the characteristics of a non existing + redistributor region + -ENXIO: The group or attribute is unknown/unsupported for this device + or hardware support is missing. + -EFAULT: Invalid user pointer for attr->addr. + + + KVM_DEV_ARM_VGIC_GRP_DIST_REGS + KVM_DEV_ARM_VGIC_GRP_REDIST_REGS + Attributes: + The attr field of kvm_device_attr encodes two values: + bits: | 63 .... 32 | 31 .... 0 | + values: | mpidr | offset | + + All distributor regs are (rw, 32-bit) and kvm_device_attr.addr points to a + __u32 value. 64-bit registers must be accessed by separately accessing the + lower and higher word. + + Writes to read-only registers are ignored by the kernel. + + KVM_DEV_ARM_VGIC_GRP_DIST_REGS accesses the main distributor registers. + KVM_DEV_ARM_VGIC_GRP_REDIST_REGS accesses the redistributor of the CPU + specified by the mpidr. + + The offset is relative to the "[Re]Distributor base address" as defined + in the GICv3/4 specs. Getting or setting such a register has the same + effect as reading or writing the register on real hardware, except for the + following registers: GICD_STATUSR, GICR_STATUSR, GICD_ISPENDR, + GICR_ISPENDR0, GICD_ICPENDR, and GICR_ICPENDR0. These registers behave + differently when accessed via this interface compared to their + architecturally defined behavior to allow software a full view of the + VGIC's internal state. + + The mpidr field is used to specify which + redistributor is accessed. The mpidr is ignored for the distributor. + + The mpidr encoding is based on the affinity information in the + architecture defined MPIDR, and the field is encoded as follows: + | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 | + | Aff3 | Aff2 | Aff1 | Aff0 | + + Note that distributor fields are not banked, but return the same value + regardless of the mpidr used to access the register. + + GICD_IIDR.Revision is updated when the KVM implementation is changed in a + way directly observable by the guest or userspace. Userspace should read + GICD_IIDR from KVM and write back the read value to confirm its expected + behavior is aligned with the KVM implementation. Userspace should set + GICD_IIDR before setting any other registers to ensure the expected + behavior. + + + The GICD_STATUSR and GICR_STATUSR registers are architecturally defined such + that a write of a clear bit has no effect, whereas a write with a set bit + clears that value. To allow userspace to freely set the values of these two + registers, setting the attributes with the register offsets for these two + registers simply sets the non-reserved bits to the value written. + + + Accesses (reads and writes) to the GICD_ISPENDR register region and + GICR_ISPENDR0 registers get/set the value of the latched pending state for + the interrupts. + + This is identical to the value returned by a guest read from ISPENDR for an + edge triggered interrupt, but may differ for level triggered interrupts. + For edge triggered interrupts, once an interrupt becomes pending (whether + because of an edge detected on the input line or because of a guest write + to ISPENDR) this state is "latched", and only cleared when either the + interrupt is activated or when the guest writes to ICPENDR. A level + triggered interrupt may be pending either because the level input is held + high by a device, or because of a guest write to the ISPENDR register. Only + ISPENDR writes are latched; if the device lowers the line level then the + interrupt is no longer pending unless the guest also wrote to ISPENDR, and + conversely writes to ICPENDR or activations of the interrupt do not clear + the pending status if the line level is still being held high. (These + rules are documented in the GICv3 specification descriptions of the ICPENDR + and ISPENDR registers.) For a level triggered interrupt the value accessed + here is that of the latch which is set by ISPENDR and cleared by ICPENDR or + interrupt activation, whereas the value returned by a guest read from + ISPENDR is the logical OR of the latch value and the input line level. + + Raw access to the latch state is provided to userspace so that it can save + and restore the entire GIC internal state (which is defined by the + combination of the current input line level and the latch state, and cannot + be deduced from purely the line level and the value of the ISPENDR + registers). + + Accesses to GICD_ICPENDR register region and GICR_ICPENDR0 registers have + RAZ/WI semantics, meaning that reads always return 0 and writes are always + ignored. + + Errors: + -ENXIO: Getting or setting this register is not yet supported + -EBUSY: One or more VCPUs are running + + + KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS + Attributes: + The attr field of kvm_device_attr encodes two values: + bits: | 63 .... 32 | 31 .... 16 | 15 .... 0 | + values: | mpidr | RES | instr | + + The mpidr field encodes the CPU ID based on the affinity information in the + architecture defined MPIDR, and the field is encoded as follows: + | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 | + | Aff3 | Aff2 | Aff1 | Aff0 | + + The instr field encodes the system register to access based on the fields + defined in the A64 instruction set encoding for system register access + (RES means the bits are reserved for future use and should be zero): + + | 15 ... 14 | 13 ... 11 | 10 ... 7 | 6 ... 3 | 2 ... 0 | + | Op 0 | Op1 | CRn | CRm | Op2 | + + All system regs accessed through this API are (rw, 64-bit) and + kvm_device_attr.addr points to a __u64 value. + + KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS accesses the CPU interface registers for the + CPU specified by the mpidr field. + + CPU interface registers access is not implemented for AArch32 mode. + Error -ENXIO is returned when accessed in AArch32 mode. + Errors: + -ENXIO: Getting or setting this register is not yet supported + -EBUSY: VCPU is running + -EINVAL: Invalid mpidr or register value supplied + + + KVM_DEV_ARM_VGIC_GRP_NR_IRQS + Attributes: + A value describing the number of interrupts (SGI, PPI and SPI) for + this GIC instance, ranging from 64 to 1024, in increments of 32. + + kvm_device_attr.addr points to a __u32 value. + + Errors: + -EINVAL: Value set is out of the expected range + -EBUSY: Value has already be set. + + + KVM_DEV_ARM_VGIC_GRP_CTRL + Attributes: + KVM_DEV_ARM_VGIC_CTRL_INIT + request the initialization of the VGIC, no additional parameter in + kvm_device_attr.addr. + KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES + save all LPI pending bits into guest RAM pending tables. + + The first kB of the pending table is not altered by this operation. + Errors: + -ENXIO: VGIC not properly configured as required prior to calling + this attribute + -ENODEV: no online VCPU + -ENOMEM: memory shortage when allocating vgic internal data + -EFAULT: Invalid guest ram access + -EBUSY: One or more VCPUS are running + + + KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO + Attributes: + The attr field of kvm_device_attr encodes the following values: + bits: | 63 .... 32 | 31 .... 10 | 9 .... 0 | + values: | mpidr | info | vINTID | + + The vINTID specifies which set of IRQs is reported on. + + The info field specifies which information userspace wants to get or set + using this interface. Currently we support the following info values: + + VGIC_LEVEL_INFO_LINE_LEVEL: + Get/Set the input level of the IRQ line for a set of 32 contiguously + numbered interrupts. + vINTID must be a multiple of 32. + + kvm_device_attr.addr points to a __u32 value which will contain a + bitmap where a set bit means the interrupt level is asserted. + + Bit[n] indicates the status for interrupt vINTID + n. + + SGIs and any interrupt with a higher ID than the number of interrupts + supported, will be RAZ/WI. LPIs are always edge-triggered and are + therefore not supported by this interface. + + PPIs are reported per VCPU as specified in the mpidr field, and SPIs are + reported with the same value regardless of the mpidr specified. + + The mpidr field encodes the CPU ID based on the affinity information in the + architecture defined MPIDR, and the field is encoded as follows: + | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 | + | Aff3 | Aff2 | Aff1 | Aff0 | + Errors: + -EINVAL: vINTID is not multiple of 32 or + info field is not VGIC_LEVEL_INFO_LINE_LEVEL diff --git a/Documentation/virt/kvm/devices/arm-vgic.txt b/Documentation/virt/kvm/devices/arm-vgic.txt new file mode 100644 index 000000000000..97b6518148f8 --- /dev/null +++ b/Documentation/virt/kvm/devices/arm-vgic.txt @@ -0,0 +1,127 @@ +ARM Virtual Generic Interrupt Controller v2 (VGIC) +================================================== + +Device types supported: + KVM_DEV_TYPE_ARM_VGIC_V2 ARM Generic Interrupt Controller v2.0 + +Only one VGIC instance may be instantiated through either this API or the +legacy KVM_CREATE_IRQCHIP API. The created VGIC will act as the VM interrupt +controller, requiring emulated user-space devices to inject interrupts to the +VGIC instead of directly to CPUs. + +GICv3 implementations with hardware compatibility support allow creating a +guest GICv2 through this interface. For information on creating a guest GICv3 +device and guest ITS devices, see arm-vgic-v3.txt. It is not possible to +create both a GICv3 and GICv2 device on the same VM. + + +Groups: + KVM_DEV_ARM_VGIC_GRP_ADDR + Attributes: + KVM_VGIC_V2_ADDR_TYPE_DIST (rw, 64-bit) + Base address in the guest physical address space of the GIC distributor + register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2. + This address needs to be 4K aligned and the region covers 4 KByte. + + KVM_VGIC_V2_ADDR_TYPE_CPU (rw, 64-bit) + Base address in the guest physical address space of the GIC virtual cpu + interface register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2. + This address needs to be 4K aligned and the region covers 4 KByte. + Errors: + -E2BIG: Address outside of addressable IPA range + -EINVAL: Incorrectly aligned address + -EEXIST: Address already configured + -ENXIO: The group or attribute is unknown/unsupported for this device + or hardware support is missing. + -EFAULT: Invalid user pointer for attr->addr. + + KVM_DEV_ARM_VGIC_GRP_DIST_REGS + Attributes: + The attr field of kvm_device_attr encodes two values: + bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 | + values: | reserved | vcpu_index | offset | + + All distributor regs are (rw, 32-bit) + + The offset is relative to the "Distributor base address" as defined in the + GICv2 specs. Getting or setting such a register has the same effect as + reading or writing the register on the actual hardware from the cpu whose + index is specified with the vcpu_index field. Note that most distributor + fields are not banked, but return the same value regardless of the + vcpu_index used to access the register. + + GICD_IIDR.Revision is updated when the KVM implementation of an emulated + GICv2 is changed in a way directly observable by the guest or userspace. + Userspace should read GICD_IIDR from KVM and write back the read value to + confirm its expected behavior is aligned with the KVM implementation. + Userspace should set GICD_IIDR before setting any other registers (both + KVM_DEV_ARM_VGIC_GRP_DIST_REGS and KVM_DEV_ARM_VGIC_GRP_CPU_REGS) to ensure + the expected behavior. Unless GICD_IIDR has been set from userspace, writes + to the interrupt group registers (GICD_IGROUPR) are ignored. + Errors: + -ENXIO: Getting or setting this register is not yet supported + -EBUSY: One or more VCPUs are running + -EINVAL: Invalid vcpu_index supplied + + KVM_DEV_ARM_VGIC_GRP_CPU_REGS + Attributes: + The attr field of kvm_device_attr encodes two values: + bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 | + values: | reserved | vcpu_index | offset | + + All CPU interface regs are (rw, 32-bit) + + The offset specifies the offset from the "CPU interface base address" as + defined in the GICv2 specs. Getting or setting such a register has the + same effect as reading or writing the register on the actual hardware. + + The Active Priorities Registers APRn are implementation defined, so we set a + fixed format for our implementation that fits with the model of a "GICv2 + implementation without the security extensions" which we present to the + guest. This interface always exposes four register APR[0-3] describing the + maximum possible 128 preemption levels. The semantics of the register + indicate if any interrupts in a given preemption level are in the active + state by setting the corresponding bit. + + Thus, preemption level X has one or more active interrupts if and only if: + + APRn[X mod 32] == 0b1, where n = X / 32 + + Bits for undefined preemption levels are RAZ/WI. + + Note that this differs from a CPU's view of the APRs on hardware in which + a GIC without the security extensions expose group 0 and group 1 active + priorities in separate register groups, whereas we show a combined view + similar to GICv2's GICH_APR. + + For historical reasons and to provide ABI compatibility with userspace we + export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask + field in the lower 5 bits of a word, meaning that userspace must always + use the lower 5 bits to communicate with the KVM device and must shift the + value left by 3 places to obtain the actual priority mask level. + + Errors: + -ENXIO: Getting or setting this register is not yet supported + -EBUSY: One or more VCPUs are running + -EINVAL: Invalid vcpu_index supplied + + KVM_DEV_ARM_VGIC_GRP_NR_IRQS + Attributes: + A value describing the number of interrupts (SGI, PPI and SPI) for + this GIC instance, ranging from 64 to 1024, in increments of 32. + + Errors: + -EINVAL: Value set is out of the expected range + -EBUSY: Value has already be set, or GIC has already been initialized + with default values. + + KVM_DEV_ARM_VGIC_GRP_CTRL + Attributes: + KVM_DEV_ARM_VGIC_CTRL_INIT + request the initialization of the VGIC or ITS, no additional parameter + in kvm_device_attr.addr. + Errors: + -ENXIO: VGIC not properly configured as required prior to calling + this attribute + -ENODEV: no online VCPU + -ENOMEM: memory shortage when allocating vgic internal data diff --git a/Documentation/virt/kvm/devices/mpic.txt b/Documentation/virt/kvm/devices/mpic.txt new file mode 100644 index 000000000000..8257397adc3c --- /dev/null +++ b/Documentation/virt/kvm/devices/mpic.txt @@ -0,0 +1,53 @@ +MPIC interrupt controller +========================= + +Device types supported: + KVM_DEV_TYPE_FSL_MPIC_20 Freescale MPIC v2.0 + KVM_DEV_TYPE_FSL_MPIC_42 Freescale MPIC v4.2 + +Only one MPIC instance, of any type, may be instantiated. The created +MPIC will act as the system interrupt controller, connecting to each +vcpu's interrupt inputs. + +Groups: + KVM_DEV_MPIC_GRP_MISC + Attributes: + KVM_DEV_MPIC_BASE_ADDR (rw, 64-bit) + Base address of the 256 KiB MPIC register space. Must be + naturally aligned. A value of zero disables the mapping. + Reset value is zero. + + KVM_DEV_MPIC_GRP_REGISTER (rw, 32-bit) + Access an MPIC register, as if the access were made from the guest. + "attr" is the byte offset into the MPIC register space. Accesses + must be 4-byte aligned. + + MSIs may be signaled by using this attribute group to write + to the relevant MSIIR. + + KVM_DEV_MPIC_GRP_IRQ_ACTIVE (rw, 32-bit) + IRQ input line for each standard openpic source. 0 is inactive and 1 + is active, regardless of interrupt sense. + + For edge-triggered interrupts: Writing 1 is considered an activating + edge, and writing 0 is ignored. Reading returns 1 if a previously + signaled edge has not been acknowledged, and 0 otherwise. + + "attr" is the IRQ number. IRQ numbers for standard sources are the + byte offset of the relevant IVPR from EIVPR0, divided by 32. + +IRQ Routing: + + The MPIC emulation supports IRQ routing. Only a single MPIC device can + be instantiated. Once that device has been created, it's available as + irqchip id 0. + + This irqchip 0 has 256 interrupt pins, which expose the interrupts in + the main array of interrupt sources (a.k.a. "SRC" interrupts). + + The numbering is the same as the MPIC device tree binding -- based on + the register offset from the beginning of the sources array, without + regard to any subdivisions in chip documentation such as "internal" + or "external" interrupts. + + Access to non-SRC interrupts is not implemented through IRQ routing mechanisms. diff --git a/Documentation/virt/kvm/devices/s390_flic.txt b/Documentation/virt/kvm/devices/s390_flic.txt new file mode 100644 index 000000000000..a4e20a090174 --- /dev/null +++ b/Documentation/virt/kvm/devices/s390_flic.txt @@ -0,0 +1,163 @@ +FLIC (floating interrupt controller) +==================================== + +FLIC handles floating (non per-cpu) interrupts, i.e. I/O, service and some +machine check interruptions. All interrupts are stored in a per-vm list of +pending interrupts. FLIC performs operations on this list. + +Only one FLIC instance may be instantiated. + +FLIC provides support to +- add interrupts (KVM_DEV_FLIC_ENQUEUE) +- inspect currently pending interrupts (KVM_FLIC_GET_ALL_IRQS) +- purge all pending floating interrupts (KVM_DEV_FLIC_CLEAR_IRQS) +- purge one pending floating I/O interrupt (KVM_DEV_FLIC_CLEAR_IO_IRQ) +- enable/disable for the guest transparent async page faults +- register and modify adapter interrupt sources (KVM_DEV_FLIC_ADAPTER_*) +- modify AIS (adapter-interruption-suppression) mode state (KVM_DEV_FLIC_AISM) +- inject adapter interrupts on a specified adapter (KVM_DEV_FLIC_AIRQ_INJECT) +- get/set all AIS mode states (KVM_DEV_FLIC_AISM_ALL) + +Groups: + KVM_DEV_FLIC_ENQUEUE + Passes a buffer and length into the kernel which are then injected into + the list of pending interrupts. + attr->addr contains the pointer to the buffer and attr->attr contains + the length of the buffer. + The format of the data structure kvm_s390_irq as it is copied from userspace + is defined in usr/include/linux/kvm.h. + + KVM_DEV_FLIC_GET_ALL_IRQS + Copies all floating interrupts into a buffer provided by userspace. + When the buffer is too small it returns -ENOMEM, which is the indication + for userspace to try again with a bigger buffer. + -ENOBUFS is returned when the allocation of a kernelspace buffer has + failed. + -EFAULT is returned when copying data to userspace failed. + All interrupts remain pending, i.e. are not deleted from the list of + currently pending interrupts. + attr->addr contains the userspace address of the buffer into which all + interrupt data will be copied. + attr->attr contains the size of the buffer in bytes. + + KVM_DEV_FLIC_CLEAR_IRQS + Simply deletes all elements from the list of currently pending floating + interrupts. No interrupts are injected into the guest. + + KVM_DEV_FLIC_CLEAR_IO_IRQ + Deletes one (if any) I/O interrupt for a subchannel identified by the + subsystem identification word passed via the buffer specified by + attr->addr (address) and attr->attr (length). + + KVM_DEV_FLIC_APF_ENABLE + Enables async page faults for the guest. So in case of a major page fault + the host is allowed to handle this async and continues the guest. + + KVM_DEV_FLIC_APF_DISABLE_WAIT + Disables async page faults for the guest and waits until already pending + async page faults are done. This is necessary to trigger a completion interrupt + for every init interrupt before migrating the interrupt list. + + KVM_DEV_FLIC_ADAPTER_REGISTER + Register an I/O adapter interrupt source. Takes a kvm_s390_io_adapter + describing the adapter to register: + +struct kvm_s390_io_adapter { + __u32 id; + __u8 isc; + __u8 maskable; + __u8 swap; + __u8 flags; +}; + + id contains the unique id for the adapter, isc the I/O interruption subclass + to use, maskable whether this adapter may be masked (interrupts turned off), + swap whether the indicators need to be byte swapped, and flags contains + further characteristics of the adapter. + Currently defined values for 'flags' are: + - KVM_S390_ADAPTER_SUPPRESSIBLE: adapter is subject to AIS + (adapter-interrupt-suppression) facility. This flag only has an effect if + the AIS capability is enabled. + Unknown flag values are ignored. + + + KVM_DEV_FLIC_ADAPTER_MODIFY + Modifies attributes of an existing I/O adapter interrupt source. Takes + a kvm_s390_io_adapter_req specifying the adapter and the operation: + +struct kvm_s390_io_adapter_req { + __u32 id; + __u8 type; + __u8 mask; + __u16 pad0; + __u64 addr; +}; + + id specifies the adapter and type the operation. The supported operations + are: + + KVM_S390_IO_ADAPTER_MASK + mask or unmask the adapter, as specified in mask + + KVM_S390_IO_ADAPTER_MAP + perform a gmap translation for the guest address provided in addr, + pin a userspace page for the translated address and add it to the + list of mappings + Note: A new mapping will be created unconditionally; therefore, + the calling code should avoid making duplicate mappings. + + KVM_S390_IO_ADAPTER_UNMAP + release a userspace page for the translated address specified in addr + from the list of mappings + + KVM_DEV_FLIC_AISM + modify the adapter-interruption-suppression mode for a given isc if the + AIS capability is enabled. Takes a kvm_s390_ais_req describing: + +struct kvm_s390_ais_req { + __u8 isc; + __u16 mode; +}; + + isc contains the target I/O interruption subclass, mode the target + adapter-interruption-suppression mode. The following modes are + currently supported: + - KVM_S390_AIS_MODE_ALL: ALL-Interruptions Mode, i.e. airq injection + is always allowed; + - KVM_S390_AIS_MODE_SINGLE: SINGLE-Interruption Mode, i.e. airq + injection is only allowed once and the following adapter interrupts + will be suppressed until the mode is set again to ALL-Interruptions + or SINGLE-Interruption mode. + + KVM_DEV_FLIC_AIRQ_INJECT + Inject adapter interrupts on a specified adapter. + attr->attr contains the unique id for the adapter, which allows for + adapter-specific checks and actions. + For adapters subject to AIS, handle the airq injection suppression for + an isc according to the adapter-interruption-suppression mode on condition + that the AIS capability is enabled. + + KVM_DEV_FLIC_AISM_ALL + Gets or sets the adapter-interruption-suppression mode for all ISCs. Takes + a kvm_s390_ais_all describing: + +struct kvm_s390_ais_all { + __u8 simm; /* Single-Interruption-Mode mask */ + __u8 nimm; /* No-Interruption-Mode mask * +}; + + simm contains Single-Interruption-Mode mask for all ISCs, nimm contains + No-Interruption-Mode mask for all ISCs. Each bit in simm and nimm corresponds + to an ISC (MSB0 bit 0 to ISC 0 and so on). The combination of simm bit and + nimm bit presents AIS mode for a ISC. + + KVM_DEV_FLIC_AISM_ALL is indicated by KVM_CAP_S390_AIS_MIGRATION. + +Note: The KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR device ioctls executed on +FLIC with an unknown group or attribute gives the error code EINVAL (instead of +ENXIO, as specified in the API documentation). It is not possible to conclude +that a FLIC operation is unavailable based on the error code resulting from a +usage attempt. + +Note: The KVM_DEV_FLIC_CLEAR_IO_IRQ ioctl will return EINVAL in case a zero +schid is specified. diff --git a/Documentation/virt/kvm/devices/vcpu.txt b/Documentation/virt/kvm/devices/vcpu.txt new file mode 100644 index 000000000000..2b5dab16c4f2 --- /dev/null +++ b/Documentation/virt/kvm/devices/vcpu.txt @@ -0,0 +1,62 @@ +Generic vcpu interface +==================================== + +The virtual cpu "device" also accepts the ioctls KVM_SET_DEVICE_ATTR, +KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same struct +kvm_device_attr as other devices, but targets VCPU-wide settings and controls. + +The groups and attributes per virtual cpu, if any, are architecture specific. + +1. GROUP: KVM_ARM_VCPU_PMU_V3_CTRL +Architectures: ARM64 + +1.1. ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_IRQ +Parameters: in kvm_device_attr.addr the address for PMU overflow interrupt is a + pointer to an int +Returns: -EBUSY: The PMU overflow interrupt is already set + -ENXIO: The overflow interrupt not set when attempting to get it + -ENODEV: PMUv3 not supported + -EINVAL: Invalid PMU overflow interrupt number supplied or + trying to set the IRQ number without using an in-kernel + irqchip. + +A value describing the PMUv3 (Performance Monitor Unit v3) overflow interrupt +number for this vcpu. This interrupt could be a PPI or SPI, but the interrupt +type must be same for each vcpu. As a PPI, the interrupt number is the same for +all vcpus, while as an SPI it must be a separate number per vcpu. + +1.2 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_INIT +Parameters: no additional parameter in kvm_device_attr.addr +Returns: -ENODEV: PMUv3 not supported or GIC not initialized + -ENXIO: PMUv3 not properly configured or in-kernel irqchip not + configured as required prior to calling this attribute + -EBUSY: PMUv3 already initialized + +Request the initialization of the PMUv3. If using the PMUv3 with an in-kernel +virtual GIC implementation, this must be done after initializing the in-kernel +irqchip. + + +2. GROUP: KVM_ARM_VCPU_TIMER_CTRL +Architectures: ARM,ARM64 + +2.1. ATTRIBUTE: KVM_ARM_VCPU_TIMER_IRQ_VTIMER +2.2. ATTRIBUTE: KVM_ARM_VCPU_TIMER_IRQ_PTIMER +Parameters: in kvm_device_attr.addr the address for the timer interrupt is a + pointer to an int +Returns: -EINVAL: Invalid timer interrupt number + -EBUSY: One or more VCPUs has already run + +A value describing the architected timer interrupt number when connected to an +in-kernel virtual GIC. These must be a PPI (16 <= intid < 32). Setting the +attribute overrides the default values (see below). + +KVM_ARM_VCPU_TIMER_IRQ_VTIMER: The EL1 virtual timer intid (default: 27) +KVM_ARM_VCPU_TIMER_IRQ_PTIMER: The EL1 physical timer intid (default: 30) + +Setting the same PPI for different timers will prevent the VCPUs from running. +Setting the interrupt number on a VCPU configures all VCPUs created at that +time to use the number provided for a given timer, overwriting any previously +configured values on other VCPUs. Userspace should configure the interrupt +numbers on at least one VCPU after creating all VCPUs and before running any +VCPUs. diff --git a/Documentation/virt/kvm/devices/vfio.txt b/Documentation/virt/kvm/devices/vfio.txt new file mode 100644 index 000000000000..528c77c8022c --- /dev/null +++ b/Documentation/virt/kvm/devices/vfio.txt @@ -0,0 +1,36 @@ +VFIO virtual device +=================== + +Device types supported: + KVM_DEV_TYPE_VFIO + +Only one VFIO instance may be created per VM. The created device +tracks VFIO groups in use by the VM and features of those groups +important to the correctness and acceleration of the VM. As groups +are enabled and disabled for use by the VM, KVM should be updated +about their presence. When registered with KVM, a reference to the +VFIO-group is held by KVM. + +Groups: + KVM_DEV_VFIO_GROUP + +KVM_DEV_VFIO_GROUP attributes: + KVM_DEV_VFIO_GROUP_ADD: Add a VFIO group to VFIO-KVM device tracking + kvm_device_attr.addr points to an int32_t file descriptor + for the VFIO group. + KVM_DEV_VFIO_GROUP_DEL: Remove a VFIO group from VFIO-KVM device tracking + kvm_device_attr.addr points to an int32_t file descriptor + for the VFIO group. + KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: attaches a guest visible TCE table + allocated by sPAPR KVM. + kvm_device_attr.addr points to a struct: + + struct kvm_vfio_spapr_tce { + __s32 groupfd; + __s32 tablefd; + }; + + where + @groupfd is a file descriptor for a VFIO group; + @tablefd is a file descriptor for a TCE table allocated via + KVM_CREATE_SPAPR_TCE. diff --git a/Documentation/virt/kvm/devices/vm.txt b/Documentation/virt/kvm/devices/vm.txt new file mode 100644 index 000000000000..4ffb82b02468 --- /dev/null +++ b/Documentation/virt/kvm/devices/vm.txt @@ -0,0 +1,270 @@ +Generic vm interface +==================================== + +The virtual machine "device" also accepts the ioctls KVM_SET_DEVICE_ATTR, +KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same +struct kvm_device_attr as other devices, but targets VM-wide settings +and controls. + +The groups and attributes per virtual machine, if any, are architecture +specific. + +1. GROUP: KVM_S390_VM_MEM_CTRL +Architectures: s390 + +1.1. ATTRIBUTE: KVM_S390_VM_MEM_ENABLE_CMMA +Parameters: none +Returns: -EBUSY if a vcpu is already defined, otherwise 0 + +Enables Collaborative Memory Management Assist (CMMA) for the virtual machine. + +1.2. ATTRIBUTE: KVM_S390_VM_MEM_CLR_CMMA +Parameters: none +Returns: -EINVAL if CMMA was not enabled + 0 otherwise + +Clear the CMMA status for all guest pages, so any pages the guest marked +as unused are again used any may not be reclaimed by the host. + +1.3. ATTRIBUTE KVM_S390_VM_MEM_LIMIT_SIZE +Parameters: in attr->addr the address for the new limit of guest memory +Returns: -EFAULT if the given address is not accessible + -EINVAL if the virtual machine is of type UCONTROL + -E2BIG if the given guest memory is to big for that machine + -EBUSY if a vcpu is already defined + -ENOMEM if not enough memory is available for a new shadow guest mapping + 0 otherwise + +Allows userspace to query the actual limit and set a new limit for +the maximum guest memory size. The limit will be rounded up to +2048 MB, 4096 GB, 8192 TB respectively, as this limit is governed by +the number of page table levels. In the case that there is no limit we will set +the limit to KVM_S390_NO_MEM_LIMIT (U64_MAX). + +2. GROUP: KVM_S390_VM_CPU_MODEL +Architectures: s390 + +2.1. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE (r/o) + +Allows user space to retrieve machine and kvm specific cpu related information: + +struct kvm_s390_vm_cpu_machine { + __u64 cpuid; # CPUID of host + __u32 ibc; # IBC level range offered by host + __u8 pad[4]; + __u64 fac_mask[256]; # set of cpu facilities enabled by KVM + __u64 fac_list[256]; # set of cpu facilities offered by host +} + +Parameters: address of buffer to store the machine related cpu data + of type struct kvm_s390_vm_cpu_machine* +Returns: -EFAULT if the given address is not accessible from kernel space + -ENOMEM if not enough memory is available to process the ioctl + 0 in case of success + +2.2. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR (r/w) + +Allows user space to retrieve or request to change cpu related information for a vcpu: + +struct kvm_s390_vm_cpu_processor { + __u64 cpuid; # CPUID currently (to be) used by this vcpu + __u16 ibc; # IBC level currently (to be) used by this vcpu + __u8 pad[6]; + __u64 fac_list[256]; # set of cpu facilities currently (to be) used + # by this vcpu +} + +KVM does not enforce or limit the cpu model data in any form. Take the information +retrieved by means of KVM_S390_VM_CPU_MACHINE as hint for reasonable configuration +setups. Instruction interceptions triggered by additionally set facility bits that +are not handled by KVM need to by imlemented in the VM driver code. + +Parameters: address of buffer to store/set the processor related cpu + data of type struct kvm_s390_vm_cpu_processor*. +Returns: -EBUSY in case 1 or more vcpus are already activated (only in write case) + -EFAULT if the given address is not accessible from kernel space + -ENOMEM if not enough memory is available to process the ioctl + 0 in case of success + +2.3. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE_FEAT (r/o) + +Allows user space to retrieve available cpu features. A feature is available if +provided by the hardware and supported by kvm. In theory, cpu features could +even be completely emulated by kvm. + +struct kvm_s390_vm_cpu_feat { + __u64 feat[16]; # Bitmap (1 = feature available), MSB 0 bit numbering +}; + +Parameters: address of a buffer to load the feature list from. +Returns: -EFAULT if the given address is not accessible from kernel space. + 0 in case of success. + +2.4. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR_FEAT (r/w) + +Allows user space to retrieve or change enabled cpu features for all VCPUs of a +VM. Features that are not available cannot be enabled. + +See 2.3. for a description of the parameter struct. + +Parameters: address of a buffer to store/load the feature list from. +Returns: -EFAULT if the given address is not accessible from kernel space. + -EINVAL if a cpu feature that is not available is to be enabled. + -EBUSY if at least one VCPU has already been defined. + 0 in case of success. + +2.5. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE_SUBFUNC (r/o) + +Allows user space to retrieve available cpu subfunctions without any filtering +done by a set IBC. These subfunctions are indicated to the guest VCPU via +query or "test bit" subfunctions and used e.g. by cpacf functions, plo and ptff. + +A subfunction block is only valid if KVM_S390_VM_CPU_MACHINE contains the +STFL(E) bit introducing the affected instruction. If the affected instruction +indicates subfunctions via a "query subfunction", the response block is +contained in the returned struct. If the affected instruction +indicates subfunctions via a "test bit" mechanism, the subfunction codes are +contained in the returned struct in MSB 0 bit numbering. + +struct kvm_s390_vm_cpu_subfunc { + u8 plo[32]; # always valid (ESA/390 feature) + u8 ptff[16]; # valid with TOD-clock steering + u8 kmac[16]; # valid with Message-Security-Assist + u8 kmc[16]; # valid with Message-Security-Assist + u8 km[16]; # valid with Message-Security-Assist + u8 kimd[16]; # valid with Message-Security-Assist + u8 klmd[16]; # valid with Message-Security-Assist + u8 pckmo[16]; # valid with Message-Security-Assist-Extension 3 + u8 kmctr[16]; # valid with Message-Security-Assist-Extension 4 + u8 kmf[16]; # valid with Message-Security-Assist-Extension 4 + u8 kmo[16]; # valid with Message-Security-Assist-Extension 4 + u8 pcc[16]; # valid with Message-Security-Assist-Extension 4 + u8 ppno[16]; # valid with Message-Security-Assist-Extension 5 + u8 kma[16]; # valid with Message-Security-Assist-Extension 8 + u8 kdsa[16]; # valid with Message-Security-Assist-Extension 9 + u8 reserved[1792]; # reserved for future instructions +}; + +Parameters: address of a buffer to load the subfunction blocks from. +Returns: -EFAULT if the given address is not accessible from kernel space. + 0 in case of success. + +2.6. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR_SUBFUNC (r/w) + +Allows user space to retrieve or change cpu subfunctions to be indicated for +all VCPUs of a VM. This attribute will only be available if kernel and +hardware support are in place. + +The kernel uses the configured subfunction blocks for indication to +the guest. A subfunction block will only be used if the associated STFL(E) bit +has not been disabled by user space (so the instruction to be queried is +actually available for the guest). + +As long as no data has been written, a read will fail. The IBC will be used +to determine available subfunctions in this case, this will guarantee backward +compatibility. + +See 2.5. for a description of the parameter struct. + +Parameters: address of a buffer to store/load the subfunction blocks from. +Returns: -EFAULT if the given address is not accessible from kernel space. + -EINVAL when reading, if there was no write yet. + -EBUSY if at least one VCPU has already been defined. + 0 in case of success. + +3. GROUP: KVM_S390_VM_TOD +Architectures: s390 + +3.1. ATTRIBUTE: KVM_S390_VM_TOD_HIGH + +Allows user space to set/get the TOD clock extension (u8) (superseded by +KVM_S390_VM_TOD_EXT). + +Parameters: address of a buffer in user space to store the data (u8) to +Returns: -EFAULT if the given address is not accessible from kernel space + -EINVAL if setting the TOD clock extension to != 0 is not supported + +3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW + +Allows user space to set/get bits 0-63 of the TOD clock register as defined in +the POP (u64). + +Parameters: address of a buffer in user space to store the data (u64) to +Returns: -EFAULT if the given address is not accessible from kernel space + +3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT +Allows user space to set/get bits 0-63 of the TOD clock register as defined in +the POP (u64). If the guest CPU model supports the TOD clock extension (u8), it +also allows user space to get/set it. If the guest CPU model does not support +it, it is stored as 0 and not allowed to be set to a value != 0. + +Parameters: address of a buffer in user space to store the data + (kvm_s390_vm_tod_clock) to +Returns: -EFAULT if the given address is not accessible from kernel space + -EINVAL if setting the TOD clock extension to != 0 is not supported + +4. GROUP: KVM_S390_VM_CRYPTO +Architectures: s390 + +4.1. ATTRIBUTE: KVM_S390_VM_CRYPTO_ENABLE_AES_KW (w/o) + +Allows user space to enable aes key wrapping, including generating a new +wrapping key. + +Parameters: none +Returns: 0 + +4.2. ATTRIBUTE: KVM_S390_VM_CRYPTO_ENABLE_DEA_KW (w/o) + +Allows user space to enable dea key wrapping, including generating a new +wrapping key. + +Parameters: none +Returns: 0 + +4.3. ATTRIBUTE: KVM_S390_VM_CRYPTO_DISABLE_AES_KW (w/o) + +Allows user space to disable aes key wrapping, clearing the wrapping key. + +Parameters: none +Returns: 0 + +4.4. ATTRIBUTE: KVM_S390_VM_CRYPTO_DISABLE_DEA_KW (w/o) + +Allows user space to disable dea key wrapping, clearing the wrapping key. + +Parameters: none +Returns: 0 + +5. GROUP: KVM_S390_VM_MIGRATION +Architectures: s390 + +5.1. ATTRIBUTE: KVM_S390_VM_MIGRATION_STOP (w/o) + +Allows userspace to stop migration mode, needed for PGSTE migration. +Setting this attribute when migration mode is not active will have no +effects. + +Parameters: none +Returns: 0 + +5.2. ATTRIBUTE: KVM_S390_VM_MIGRATION_START (w/o) + +Allows userspace to start migration mode, needed for PGSTE migration. +Setting this attribute when migration mode is already active will have +no effects. + +Parameters: none +Returns: -ENOMEM if there is not enough free memory to start migration mode + -EINVAL if the state of the VM is invalid (e.g. no memory defined) + 0 in case of success. + +5.3. ATTRIBUTE: KVM_S390_VM_MIGRATION_STATUS (r/o) + +Allows userspace to query the status of migration mode. + +Parameters: address of a buffer in user space to store the data (u64) to; + the data itself is either 0 if migration mode is disabled or 1 + if it is enabled +Returns: -EFAULT if the given address is not accessible from kernel space + 0 in case of success. diff --git a/Documentation/virt/kvm/devices/xics.txt b/Documentation/virt/kvm/devices/xics.txt new file mode 100644 index 000000000000..42864935ac5d --- /dev/null +++ b/Documentation/virt/kvm/devices/xics.txt @@ -0,0 +1,66 @@ +XICS interrupt controller + +Device type supported: KVM_DEV_TYPE_XICS + +Groups: + KVM_DEV_XICS_SOURCES + Attributes: One per interrupt source, indexed by the source number. + +This device emulates the XICS (eXternal Interrupt Controller +Specification) defined in PAPR. The XICS has a set of interrupt +sources, each identified by a 20-bit source number, and a set of +Interrupt Control Presentation (ICP) entities, also called "servers", +each associated with a virtual CPU. + +The ICP entities are created by enabling the KVM_CAP_IRQ_ARCH +capability for each vcpu, specifying KVM_CAP_IRQ_XICS in args[0] and +the interrupt server number (i.e. the vcpu number from the XICS's +point of view) in args[1] of the kvm_enable_cap struct. Each ICP has +64 bits of state which can be read and written using the +KVM_GET_ONE_REG and KVM_SET_ONE_REG ioctls on the vcpu. The 64 bit +state word has the following bitfields, starting at the +least-significant end of the word: + +* Unused, 16 bits + +* Pending interrupt priority, 8 bits + Zero is the highest priority, 255 means no interrupt is pending. + +* Pending IPI (inter-processor interrupt) priority, 8 bits + Zero is the highest priority, 255 means no IPI is pending. + +* Pending interrupt source number, 24 bits + Zero means no interrupt pending, 2 means an IPI is pending + +* Current processor priority, 8 bits + Zero is the highest priority, meaning no interrupts can be + delivered, and 255 is the lowest priority. + +Each source has 64 bits of state that can be read and written using +the KVM_GET_DEVICE_ATTR and KVM_SET_DEVICE_ATTR ioctls, specifying the +KVM_DEV_XICS_SOURCES attribute group, with the attribute number being +the interrupt source number. The 64 bit state word has the following +bitfields, starting from the least-significant end of the word: + +* Destination (server number), 32 bits + This specifies where the interrupt should be sent, and is the + interrupt server number specified for the destination vcpu. + +* Priority, 8 bits + This is the priority specified for this interrupt source, where 0 is + the highest priority and 255 is the lowest. An interrupt with a + priority of 255 will never be delivered. + +* Level sensitive flag, 1 bit + This bit is 1 for a level-sensitive interrupt source, or 0 for + edge-sensitive (or MSI). + +* Masked flag, 1 bit + This bit is set to 1 if the interrupt is masked (cannot be delivered + regardless of its priority), for example by the ibm,int-off RTAS + call, or 0 if it is not masked. + +* Pending flag, 1 bit + This bit is 1 if the source has a pending interrupt, otherwise 0. + +Only one XICS instance may be created per VM. diff --git a/Documentation/virt/kvm/devices/xive.txt b/Documentation/virt/kvm/devices/xive.txt new file mode 100644 index 000000000000..9a24a4525253 --- /dev/null +++ b/Documentation/virt/kvm/devices/xive.txt @@ -0,0 +1,197 @@ +POWER9 eXternal Interrupt Virtualization Engine (XIVE Gen1) +========================================================== + +Device types supported: + KVM_DEV_TYPE_XIVE POWER9 XIVE Interrupt Controller generation 1 + +This device acts as a VM interrupt controller. It provides the KVM +interface to configure the interrupt sources of a VM in the underlying +POWER9 XIVE interrupt controller. + +Only one XIVE instance may be instantiated. A guest XIVE device +requires a POWER9 host and the guest OS should have support for the +XIVE native exploitation interrupt mode. If not, it should run using +the legacy interrupt mode, referred as XICS (POWER7/8). + +* Device Mappings + + The KVM device exposes different MMIO ranges of the XIVE HW which + are required for interrupt management. These are exposed to the + guest in VMAs populated with a custom VM fault handler. + + 1. Thread Interrupt Management Area (TIMA) + + Each thread has an associated Thread Interrupt Management context + composed of a set of registers. These registers let the thread + handle priority management and interrupt acknowledgment. The most + important are : + + - Interrupt Pending Buffer (IPB) + - Current Processor Priority (CPPR) + - Notification Source Register (NSR) + + They are exposed to software in four different pages each proposing + a view with a different privilege. The first page is for the + physical thread context and the second for the hypervisor. Only the + third (operating system) and the fourth (user level) are exposed the + guest. + + 2. Event State Buffer (ESB) + + Each source is associated with an Event State Buffer (ESB) with + either a pair of even/odd pair of pages which provides commands to + manage the source: to trigger, to EOI, to turn off the source for + instance. + + 3. Device pass-through + + When a device is passed-through into the guest, the source + interrupts are from a different HW controller (PHB4) and the ESB + pages exposed to the guest should accommadate this change. + + The passthru_irq helpers, kvmppc_xive_set_mapped() and + kvmppc_xive_clr_mapped() are called when the device HW irqs are + mapped into or unmapped from the guest IRQ number space. The KVM + device extends these helpers to clear the ESB pages of the guest IRQ + number being mapped and then lets the VM fault handler repopulate. + The handler will insert the ESB page corresponding to the HW + interrupt of the device being passed-through or the initial IPI ESB + page if the device has being removed. + + The ESB remapping is fully transparent to the guest and the OS + device driver. All handling is done within VFIO and the above + helpers in KVM-PPC. + +* Groups: + + 1. KVM_DEV_XIVE_GRP_CTRL + Provides global controls on the device + Attributes: + 1.1 KVM_DEV_XIVE_RESET (write only) + Resets the interrupt controller configuration for sources and event + queues. To be used by kexec and kdump. + Errors: none + + 1.2 KVM_DEV_XIVE_EQ_SYNC (write only) + Sync all the sources and queues and mark the EQ pages dirty. This + to make sure that a consistent memory state is captured when + migrating the VM. + Errors: none + + 2. KVM_DEV_XIVE_GRP_SOURCE (write only) + Initializes a new source in the XIVE device and mask it. + Attributes: + Interrupt source number (64-bit) + The kvm_device_attr.addr points to a __u64 value: + bits: | 63 .... 2 | 1 | 0 + values: | unused | level | type + - type: 0:MSI 1:LSI + - level: assertion level in case of an LSI. + Errors: + -E2BIG: Interrupt source number is out of range + -ENOMEM: Could not create a new source block + -EFAULT: Invalid user pointer for attr->addr. + -ENXIO: Could not allocate underlying HW interrupt + + 3. KVM_DEV_XIVE_GRP_SOURCE_CONFIG (write only) + Configures source targeting + Attributes: + Interrupt source number (64-bit) + The kvm_device_attr.addr points to a __u64 value: + bits: | 63 .... 33 | 32 | 31 .. 3 | 2 .. 0 + values: | eisn | mask | server | priority + - priority: 0-7 interrupt priority level + - server: CPU number chosen to handle the interrupt + - mask: mask flag (unused) + - eisn: Effective Interrupt Source Number + Errors: + -ENOENT: Unknown source number + -EINVAL: Not initialized source number + -EINVAL: Invalid priority + -EINVAL: Invalid CPU number. + -EFAULT: Invalid user pointer for attr->addr. + -ENXIO: CPU event queues not configured or configuration of the + underlying HW interrupt failed + -EBUSY: No CPU available to serve interrupt + + 4. KVM_DEV_XIVE_GRP_EQ_CONFIG (read-write) + Configures an event queue of a CPU + Attributes: + EQ descriptor identifier (64-bit) + The EQ descriptor identifier is a tuple (server, priority) : + bits: | 63 .... 32 | 31 .. 3 | 2 .. 0 + values: | unused | server | priority + The kvm_device_attr.addr points to : + struct kvm_ppc_xive_eq { + __u32 flags; + __u32 qshift; + __u64 qaddr; + __u32 qtoggle; + __u32 qindex; + __u8 pad[40]; + }; + - flags: queue flags + KVM_XIVE_EQ_ALWAYS_NOTIFY (required) + forces notification without using the coalescing mechanism + provided by the XIVE END ESBs. + - qshift: queue size (power of 2) + - qaddr: real address of queue + - qtoggle: current queue toggle bit + - qindex: current queue index + - pad: reserved for future use + Errors: + -ENOENT: Invalid CPU number + -EINVAL: Invalid priority + -EINVAL: Invalid flags + -EINVAL: Invalid queue size + -EINVAL: Invalid queue address + -EFAULT: Invalid user pointer for attr->addr. + -EIO: Configuration of the underlying HW failed + + 5. KVM_DEV_XIVE_GRP_SOURCE_SYNC (write only) + Synchronize the source to flush event notifications + Attributes: + Interrupt source number (64-bit) + Errors: + -ENOENT: Unknown source number + -EINVAL: Not initialized source number + +* VCPU state + + The XIVE IC maintains VP interrupt state in an internal structure + called the NVT. When a VP is not dispatched on a HW processor + thread, this structure can be updated by HW if the VP is the target + of an event notification. + + It is important for migration to capture the cached IPB from the NVT + as it synthesizes the priorities of the pending interrupts. We + capture a bit more to report debug information. + + KVM_REG_PPC_VP_STATE (2 * 64bits) + bits: | 63 .... 32 | 31 .... 0 | + values: | TIMA word0 | TIMA word1 | + bits: | 127 .......... 64 | + values: | unused | + +* Migration: + + Saving the state of a VM using the XIVE native exploitation mode + should follow a specific sequence. When the VM is stopped : + + 1. Mask all sources (PQ=01) to stop the flow of events. + + 2. Sync the XIVE device with the KVM control KVM_DEV_XIVE_EQ_SYNC to + flush any in-flight event notification and to stabilize the EQs. At + this stage, the EQ pages are marked dirty to make sure they are + transferred in the migration sequence. + + 3. Capture the state of the source targeting, the EQs configuration + and the state of thread interrupt context registers. + + Restore is similar : + + 1. Restore the EQ configuration. As targeting depends on it. + 2. Restore targeting + 3. Restore the thread interrupt contexts + 4. Restore the source states + 5. Let the vCPU run diff --git a/Documentation/virt/kvm/halt-polling.txt b/Documentation/virt/kvm/halt-polling.txt new file mode 100644 index 000000000000..4f791b128dd2 --- /dev/null +++ b/Documentation/virt/kvm/halt-polling.txt @@ -0,0 +1,136 @@ +The KVM halt polling system +=========================== + +The KVM halt polling system provides a feature within KVM whereby the latency +of a guest can, under some circumstances, be reduced by polling in the host +for some time period after the guest has elected to no longer run by cedeing. +That is, when a guest vcpu has ceded, or in the case of powerpc when all of the +vcpus of a single vcore have ceded, the host kernel polls for wakeup conditions +before giving up the cpu to the scheduler in order to let something else run. + +Polling provides a latency advantage in cases where the guest can be run again +very quickly by at least saving us a trip through the scheduler, normally on +the order of a few micro-seconds, although performance benefits are workload +dependant. In the event that no wakeup source arrives during the polling +interval or some other task on the runqueue is runnable the scheduler is +invoked. Thus halt polling is especially useful on workloads with very short +wakeup periods where the time spent halt polling is minimised and the time +savings of not invoking the scheduler are distinguishable. + +The generic halt polling code is implemented in: + + virt/kvm/kvm_main.c: kvm_vcpu_block() + +The powerpc kvm-hv specific case is implemented in: + + arch/powerpc/kvm/book3s_hv.c: kvmppc_vcore_blocked() + +Halt Polling Interval +===================== + +The maximum time for which to poll before invoking the scheduler, referred to +as the halt polling interval, is increased and decreased based on the perceived +effectiveness of the polling in an attempt to limit pointless polling. +This value is stored in either the vcpu struct: + + kvm_vcpu->halt_poll_ns + +or in the case of powerpc kvm-hv, in the vcore struct: + + kvmppc_vcore->halt_poll_ns + +Thus this is a per vcpu (or vcore) value. + +During polling if a wakeup source is received within the halt polling interval, +the interval is left unchanged. In the event that a wakeup source isn't +received during the polling interval (and thus schedule is invoked) there are +two options, either the polling interval and total block time[0] were less than +the global max polling interval (see module params below), or the total block +time was greater than the global max polling interval. + +In the event that both the polling interval and total block time were less than +the global max polling interval then the polling interval can be increased in +the hope that next time during the longer polling interval the wake up source +will be received while the host is polling and the latency benefits will be +received. The polling interval is grown in the function grow_halt_poll_ns() and +is multiplied by the module parameters halt_poll_ns_grow and +halt_poll_ns_grow_start. + +In the event that the total block time was greater than the global max polling +interval then the host will never poll for long enough (limited by the global +max) to wakeup during the polling interval so it may as well be shrunk in order +to avoid pointless polling. The polling interval is shrunk in the function +shrink_halt_poll_ns() and is divided by the module parameter +halt_poll_ns_shrink, or set to 0 iff halt_poll_ns_shrink == 0. + +It is worth noting that this adjustment process attempts to hone in on some +steady state polling interval but will only really do a good job for wakeups +which come at an approximately constant rate, otherwise there will be constant +adjustment of the polling interval. + +[0] total block time: the time between when the halt polling function is + invoked and a wakeup source received (irrespective of + whether the scheduler is invoked within that function). + +Module Parameters +================= + +The kvm module has 3 tuneable module parameters to adjust the global max +polling interval as well as the rate at which the polling interval is grown and +shrunk. These variables are defined in include/linux/kvm_host.h and as module +parameters in virt/kvm/kvm_main.c, or arch/powerpc/kvm/book3s_hv.c in the +powerpc kvm-hv case. + +Module Parameter | Description | Default Value +-------------------------------------------------------------------------------- +halt_poll_ns | The global max polling | KVM_HALT_POLL_NS_DEFAULT + | interval which defines | + | the ceiling value of the | + | polling interval for | (per arch value) + | each vcpu. | +-------------------------------------------------------------------------------- +halt_poll_ns_grow | The value by which the | 2 + | halt polling interval is | + | multiplied in the | + | grow_halt_poll_ns() | + | function. | +-------------------------------------------------------------------------------- +halt_poll_ns_grow_start | The initial value to grow | 10000 + | to from zero in the | + | grow_halt_poll_ns() | + | function. | +-------------------------------------------------------------------------------- +halt_poll_ns_shrink | The value by which the | 0 + | halt polling interval is | + | divided in the | + | shrink_halt_poll_ns() | + | function. | +-------------------------------------------------------------------------------- + +These module parameters can be set from the debugfs files in: + + /sys/module/kvm/parameters/ + +Note: that these module parameters are system wide values and are not able to + be tuned on a per vm basis. + +Further Notes +============= + +- Care should be taken when setting the halt_poll_ns module parameter as a +large value has the potential to drive the cpu usage to 100% on a machine which +would be almost entirely idle otherwise. This is because even if a guest has +wakeups during which very little work is done and which are quite far apart, if +the period is shorter than the global max polling interval (halt_poll_ns) then +the host will always poll for the entire block time and thus cpu utilisation +will go to 100%. + +- Halt polling essentially presents a trade off between power usage and latency +and the module parameters should be used to tune the affinity for this. Idle +cpu time is essentially converted to host kernel time with the aim of decreasing +latency when entering the guest. + +- Halt polling will only be conducted by the host when no other tasks are +runnable on that cpu, otherwise the polling will cease immediately and +schedule will be invoked to allow that other task to run. Thus this doesn't +allow a guest to denial of service the cpu. diff --git a/Documentation/virt/kvm/hypercalls.txt b/Documentation/virt/kvm/hypercalls.txt new file mode 100644 index 000000000000..5f6d291bd004 --- /dev/null +++ b/Documentation/virt/kvm/hypercalls.txt @@ -0,0 +1,154 @@ +Linux KVM Hypercall: +=================== +X86: + KVM Hypercalls have a three-byte sequence of either the vmcall or the vmmcall + instruction. The hypervisor can replace it with instructions that are + guaranteed to be supported. + + Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively. + The hypercall number should be placed in rax and the return value will be + placed in rax. No other registers will be clobbered unless explicitly stated + by the particular hypercall. + +S390: + R2-R7 are used for parameters 1-6. In addition, R1 is used for hypercall + number. The return value is written to R2. + + S390 uses diagnose instruction as hypercall (0x500) along with hypercall + number in R1. + + For further information on the S390 diagnose call as supported by KVM, + refer to Documentation/virt/kvm/s390-diag.txt. + + PowerPC: + It uses R3-R10 and hypercall number in R11. R4-R11 are used as output registers. + Return value is placed in R3. + + KVM hypercalls uses 4 byte opcode, that are patched with 'hypercall-instructions' + property inside the device tree's /hypervisor node. + For more information refer to Documentation/virt/kvm/ppc-pv.txt + +MIPS: + KVM hypercalls use the HYPCALL instruction with code 0 and the hypercall + number in $2 (v0). Up to four arguments may be placed in $4-$7 (a0-a3) and + the return value is placed in $2 (v0). + +KVM Hypercalls Documentation +=========================== +The template for each hypercall is: +1. Hypercall name. +2. Architecture(s) +3. Status (deprecated, obsolete, active) +4. Purpose + +1. KVM_HC_VAPIC_POLL_IRQ +------------------------ +Architecture: x86 +Status: active +Purpose: Trigger guest exit so that the host can check for pending +interrupts on reentry. + +2. KVM_HC_MMU_OP +------------------------ +Architecture: x86 +Status: deprecated. +Purpose: Support MMU operations such as writing to PTE, +flushing TLB, release PT. + +3. KVM_HC_FEATURES +------------------------ +Architecture: PPC +Status: active +Purpose: Expose hypercall availability to the guest. On x86 platforms, cpuid +used to enumerate which hypercalls are available. On PPC, either device tree +based lookup ( which is also what EPAPR dictates) OR KVM specific enumeration +mechanism (which is this hypercall) can be used. + +4. KVM_HC_PPC_MAP_MAGIC_PAGE +------------------------ +Architecture: PPC +Status: active +Purpose: To enable communication between the hypervisor and guest there is a +shared page that contains parts of supervisor visible register state. +The guest can map this shared page to access its supervisor register through +memory using this hypercall. + +5. KVM_HC_KICK_CPU +------------------------ +Architecture: x86 +Status: active +Purpose: Hypercall used to wakeup a vcpu from HLT state +Usage example : A vcpu of a paravirtualized guest that is busywaiting in guest +kernel mode for an event to occur (ex: a spinlock to become available) can +execute HLT instruction once it has busy-waited for more than a threshold +time-interval. Execution of HLT instruction would cause the hypervisor to put +the vcpu to sleep until occurrence of an appropriate event. Another vcpu of the +same guest can wakeup the sleeping vcpu by issuing KVM_HC_KICK_CPU hypercall, +specifying APIC ID (a1) of the vcpu to be woken up. An additional argument (a0) +is used in the hypercall for future use. + + +6. KVM_HC_CLOCK_PAIRING +------------------------ +Architecture: x86 +Status: active +Purpose: Hypercall used to synchronize host and guest clocks. +Usage: + +a0: guest physical address where host copies +"struct kvm_clock_offset" structure. + +a1: clock_type, ATM only KVM_CLOCK_PAIRING_WALLCLOCK (0) +is supported (corresponding to the host's CLOCK_REALTIME clock). + + struct kvm_clock_pairing { + __s64 sec; + __s64 nsec; + __u64 tsc; + __u32 flags; + __u32 pad[9]; + }; + + Where: + * sec: seconds from clock_type clock. + * nsec: nanoseconds from clock_type clock. + * tsc: guest TSC value used to calculate sec/nsec pair + * flags: flags, unused (0) at the moment. + +The hypercall lets a guest compute a precise timestamp across +host and guest. The guest can use the returned TSC value to +compute the CLOCK_REALTIME for its clock, at the same instant. + +Returns KVM_EOPNOTSUPP if the host does not use TSC clocksource, +or if clock type is different than KVM_CLOCK_PAIRING_WALLCLOCK. + +6. KVM_HC_SEND_IPI +------------------------ +Architecture: x86 +Status: active +Purpose: Send IPIs to multiple vCPUs. + +a0: lower part of the bitmap of destination APIC IDs +a1: higher part of the bitmap of destination APIC IDs +a2: the lowest APIC ID in bitmap +a3: APIC ICR + +The hypercall lets a guest send multicast IPIs, with at most 128 +128 destinations per hypercall in 64-bit mode and 64 vCPUs per +hypercall in 32-bit mode. The destinations are represented by a +bitmap contained in the first two arguments (a0 and a1). Bit 0 of +a0 corresponds to the APIC ID in the third argument (a2), bit 1 +corresponds to the APIC ID a2+1, and so on. + +Returns the number of CPUs to which the IPIs were delivered successfully. + +7. KVM_HC_SCHED_YIELD +------------------------ +Architecture: x86 +Status: active +Purpose: Hypercall used to yield if the IPI target vCPU is preempted + +a0: destination APIC ID + +Usage example: When sending a call-function IPI-many to vCPUs, yield if +any of the IPI target vCPUs was preempted. diff --git a/Documentation/virt/kvm/index.rst b/Documentation/virt/kvm/index.rst new file mode 100644 index 000000000000..0b206a06f5be --- /dev/null +++ b/Documentation/virt/kvm/index.rst @@ -0,0 +1,11 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=== +KVM +=== + +.. toctree:: + :maxdepth: 2 + + amd-memory-encryption + cpuid diff --git a/Documentation/virt/kvm/locking.txt b/Documentation/virt/kvm/locking.txt new file mode 100644 index 000000000000..635cd6eaf714 --- /dev/null +++ b/Documentation/virt/kvm/locking.txt @@ -0,0 +1,215 @@ +KVM Lock Overview +================= + +1. Acquisition Orders +--------------------- + +The acquisition orders for mutexes are as follows: + +- kvm->lock is taken outside vcpu->mutex + +- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock + +- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring + them together is quite rare. + +On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock. + +Everything else is a leaf: no other lock is taken inside the critical +sections. + +2: Exception +------------ + +Fast page fault: + +Fast page fault is the fast path which fixes the guest page fault out of +the mmu-lock on x86. Currently, the page fault can be fast in one of the +following two cases: + +1. Access Tracking: The SPTE is not present, but it is marked for access +tracking i.e. the SPTE_SPECIAL_MASK is set. That means we need to +restore the saved R/X bits. This is described in more detail later below. + +2. Write-Protection: The SPTE is present and the fault is +caused by write-protect. That means we just need to change the W bit of the +spte. + +What we use to avoid all the race is the SPTE_HOST_WRITEABLE bit and +SPTE_MMU_WRITEABLE bit on the spte: +- SPTE_HOST_WRITEABLE means the gfn is writable on host. +- SPTE_MMU_WRITEABLE means the gfn is writable on mmu. The bit is set when + the gfn is writable on guest mmu and it is not write-protected by shadow + page write-protection. + +On fast page fault path, we will use cmpxchg to atomically set the spte W +bit if spte.SPTE_HOST_WRITEABLE = 1 and spte.SPTE_WRITE_PROTECT = 1, or +restore the saved R/X bits if VMX_EPT_TRACK_ACCESS mask is set, or both. This +is safe because whenever changing these bits can be detected by cmpxchg. + +But we need carefully check these cases: +1): The mapping from gfn to pfn +The mapping from gfn to pfn may be changed since we can only ensure the pfn +is not changed during cmpxchg. This is a ABA problem, for example, below case +will happen: + +At the beginning: +gpte = gfn1 +gfn1 is mapped to pfn1 on host +spte is the shadow page table entry corresponding with gpte and +spte = pfn1 + + VCPU 0 VCPU0 +on fast page fault path: + + old_spte = *spte; + pfn1 is swapped out: + spte = 0; + + pfn1 is re-alloced for gfn2. + + gpte is changed to point to + gfn2 by the guest: + spte = pfn1; + + if (cmpxchg(spte, old_spte, old_spte+W) + mark_page_dirty(vcpu->kvm, gfn1) + OOPS!!! + +We dirty-log for gfn1, that means gfn2 is lost in dirty-bitmap. + +For direct sp, we can easily avoid it since the spte of direct sp is fixed +to gfn. For indirect sp, before we do cmpxchg, we call gfn_to_pfn_atomic() +to pin gfn to pfn, because after gfn_to_pfn_atomic(): +- We have held the refcount of pfn that means the pfn can not be freed and + be reused for another gfn. +- The pfn is writable that means it can not be shared between different gfns + by KSM. + +Then, we can ensure the dirty bitmaps is correctly set for a gfn. + +Currently, to simplify the whole things, we disable fast page fault for +indirect shadow page. + +2): Dirty bit tracking +In the origin code, the spte can be fast updated (non-atomically) if the +spte is read-only and the Accessed bit has already been set since the +Accessed bit and Dirty bit can not be lost. + +But it is not true after fast page fault since the spte can be marked +writable between reading spte and updating spte. Like below case: + +At the beginning: +spte.W = 0 +spte.Accessed = 1 + + VCPU 0 VCPU0 +In mmu_spte_clear_track_bits(): + + old_spte = *spte; + + /* 'if' condition is satisfied. */ + if (old_spte.Accessed == 1 && + old_spte.W == 0) + spte = 0ull; + on fast page fault path: + spte.W = 1 + memory write on the spte: + spte.Dirty = 1 + + + else + old_spte = xchg(spte, 0ull) + + + if (old_spte.Accessed == 1) + kvm_set_pfn_accessed(spte.pfn); + if (old_spte.Dirty == 1) + kvm_set_pfn_dirty(spte.pfn); + OOPS!!! + +The Dirty bit is lost in this case. + +In order to avoid this kind of issue, we always treat the spte as "volatile" +if it can be updated out of mmu-lock, see spte_has_volatile_bits(), it means, +the spte is always atomically updated in this case. + +3): flush tlbs due to spte updated +If the spte is updated from writable to readonly, we should flush all TLBs, +otherwise rmap_write_protect will find a read-only spte, even though the +writable spte might be cached on a CPU's TLB. + +As mentioned before, the spte can be updated to writable out of mmu-lock on +fast page fault path, in order to easily audit the path, we see if TLBs need +be flushed caused by this reason in mmu_spte_update() since this is a common +function to update spte (present -> present). + +Since the spte is "volatile" if it can be updated out of mmu-lock, we always +atomically update the spte, the race caused by fast page fault can be avoided, +See the comments in spte_has_volatile_bits() and mmu_spte_update(). + +Lockless Access Tracking: + +This is used for Intel CPUs that are using EPT but do not support the EPT A/D +bits. In this case, when the KVM MMU notifier is called to track accesses to a +page (via kvm_mmu_notifier_clear_flush_young), it marks the PTE as not-present +by clearing the RWX bits in the PTE and storing the original R & X bits in +some unused/ignored bits. In addition, the SPTE_SPECIAL_MASK is also set on the +PTE (using the ignored bit 62). When the VM tries to access the page later on, +a fault is generated and the fast page fault mechanism described above is used +to atomically restore the PTE to a Present state. The W bit is not saved when +the PTE is marked for access tracking and during restoration to the Present +state, the W bit is set depending on whether or not it was a write access. If +it wasn't, then the W bit will remain clear until a write access happens, at +which time it will be set using the Dirty tracking mechanism described above. + +3. Reference +------------ + +Name: kvm_lock +Type: mutex +Arch: any +Protects: - vm_list + +Name: kvm_count_lock +Type: raw_spinlock_t +Arch: any +Protects: - hardware virtualization enable/disable +Comment: 'raw' because hardware enabling/disabling must be atomic /wrt + migration. + +Name: kvm_arch::tsc_write_lock +Type: raw_spinlock +Arch: x86 +Protects: - kvm_arch::{last_tsc_write,last_tsc_nsec,last_tsc_offset} + - tsc offset in vmcb +Comment: 'raw' because updating the tsc offsets must not be preempted. + +Name: kvm->mmu_lock +Type: spinlock_t +Arch: any +Protects: -shadow page/shadow tlb entry +Comment: it is a spinlock since it is used in mmu notifier. + +Name: kvm->srcu +Type: srcu lock +Arch: any +Protects: - kvm->memslots + - kvm->buses +Comment: The srcu read lock must be held while accessing memslots (e.g. + when using gfn_to_* functions) and while accessing in-kernel + MMIO/PIO address->device structure mapping (kvm->buses). + The srcu index can be stored in kvm_vcpu->srcu_idx per vcpu + if it is needed by multiple functions. + +Name: blocked_vcpu_on_cpu_lock +Type: spinlock_t +Arch: x86 +Protects: blocked_vcpu_on_cpu +Comment: This is a per-CPU lock and it is used for VT-d posted-interrupts. + When VT-d posted-interrupts is supported and the VM has assigned + devices, we put the blocked vCPU on the list blocked_vcpu_on_cpu + protected by blocked_vcpu_on_cpu_lock, when VT-d hardware issues + wakeup notification event since external interrupts from the + assigned devices happens, we will find the vCPU on the list to + wakeup. diff --git a/Documentation/virt/kvm/mmu.txt b/Documentation/virt/kvm/mmu.txt new file mode 100644 index 000000000000..1b9880dfba0a --- /dev/null +++ b/Documentation/virt/kvm/mmu.txt @@ -0,0 +1,449 @@ +The x86 kvm shadow mmu +====================== + +The mmu (in arch/x86/kvm, files mmu.[ch] and paging_tmpl.h) is responsible +for presenting a standard x86 mmu to the guest, while translating guest +physical addresses to host physical addresses. + +The mmu code attempts to satisfy the following requirements: + +- correctness: the guest should not be able to determine that it is running + on an emulated mmu except for timing (we attempt to comply + with the specification, not emulate the characteristics of + a particular implementation such as tlb size) +- security: the guest must not be able to touch host memory not assigned + to it +- performance: minimize the performance penalty imposed by the mmu +- scaling: need to scale to large memory and large vcpu guests +- hardware: support the full range of x86 virtualization hardware +- integration: Linux memory management code must be in control of guest memory + so that swapping, page migration, page merging, transparent + hugepages, and similar features work without change +- dirty tracking: report writes to guest memory to enable live migration + and framebuffer-based displays +- footprint: keep the amount of pinned kernel memory low (most memory + should be shrinkable) +- reliability: avoid multipage or GFP_ATOMIC allocations + +Acronyms +======== + +pfn host page frame number +hpa host physical address +hva host virtual address +gfn guest frame number +gpa guest physical address +gva guest virtual address +ngpa nested guest physical address +ngva nested guest virtual address +pte page table entry (used also to refer generically to paging structure + entries) +gpte guest pte (referring to gfns) +spte shadow pte (referring to pfns) +tdp two dimensional paging (vendor neutral term for NPT and EPT) + +Virtual and real hardware supported +=================================== + +The mmu supports first-generation mmu hardware, which allows an atomic switch +of the current paging mode and cr3 during guest entry, as well as +two-dimensional paging (AMD's NPT and Intel's EPT). The emulated hardware +it exposes is the traditional 2/3/4 level x86 mmu, with support for global +pages, pae, pse, pse36, cr0.wp, and 1GB pages. Emulated hardware also +able to expose NPT capable hardware on NPT capable hosts. + +Translation +=========== + +The primary job of the mmu is to program the processor's mmu to translate +addresses for the guest. Different translations are required at different +times: + +- when guest paging is disabled, we translate guest physical addresses to + host physical addresses (gpa->hpa) +- when guest paging is enabled, we translate guest virtual addresses, to + guest physical addresses, to host physical addresses (gva->gpa->hpa) +- when the guest launches a guest of its own, we translate nested guest + virtual addresses, to nested guest physical addresses, to guest physical + addresses, to host physical addresses (ngva->ngpa->gpa->hpa) + +The primary challenge is to encode between 1 and 3 translations into hardware +that support only 1 (traditional) and 2 (tdp) translations. When the +number of required translations matches the hardware, the mmu operates in +direct mode; otherwise it operates in shadow mode (see below). + +Memory +====== + +Guest memory (gpa) is part of the user address space of the process that is +using kvm. Userspace defines the translation between guest addresses and user +addresses (gpa->hva); note that two gpas may alias to the same hva, but not +vice versa. + +These hvas may be backed using any method available to the host: anonymous +memory, file backed memory, and device memory. Memory might be paged by the +host at any time. + +Events +====== + +The mmu is driven by events, some from the guest, some from the host. + +Guest generated events: +- writes to control registers (especially cr3) +- invlpg/invlpga instruction execution +- access to missing or protected translations + +Host generated events: +- changes in the gpa->hpa translation (either through gpa->hva changes or + through hva->hpa changes) +- memory pressure (the shrinker) + +Shadow pages +============ + +The principal data structure is the shadow page, 'struct kvm_mmu_page'. A +shadow page contains 512 sptes, which can be either leaf or nonleaf sptes. A +shadow page may contain a mix of leaf and nonleaf sptes. + +A nonleaf spte allows the hardware mmu to reach the leaf pages and +is not related to a translation directly. It points to other shadow pages. + +A leaf spte corresponds to either one or two translations encoded into +one paging structure entry. These are always the lowest level of the +translation stack, with optional higher level translations left to NPT/EPT. +Leaf ptes point at guest pages. + +The following table shows translations encoded by leaf ptes, with higher-level +translations in parentheses: + + Non-nested guests: + nonpaging: gpa->hpa + paging: gva->gpa->hpa + paging, tdp: (gva->)gpa->hpa + Nested guests: + non-tdp: ngva->gpa->hpa (*) + tdp: (ngva->)ngpa->gpa->hpa + +(*) the guest hypervisor will encode the ngva->gpa translation into its page + tables if npt is not present + +Shadow pages contain the following information: + role.level: + The level in the shadow paging hierarchy that this shadow page belongs to. + 1=4k sptes, 2=2M sptes, 3=1G sptes, etc. + role.direct: + If set, leaf sptes reachable from this page are for a linear range. + Examples include real mode translation, large guest pages backed by small + host pages, and gpa->hpa translations when NPT or EPT is active. + The linear range starts at (gfn << PAGE_SHIFT) and its size is determined + by role.level (2MB for first level, 1GB for second level, 0.5TB for third + level, 256TB for fourth level) + If clear, this page corresponds to a guest page table denoted by the gfn + field. + role.quadrant: + When role.gpte_is_8_bytes=0, the guest uses 32-bit gptes while the host uses 64-bit + sptes. That means a guest page table contains more ptes than the host, + so multiple shadow pages are needed to shadow one guest page. + For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the + first or second 512-gpte block in the guest page table. For second-level + page tables, each 32-bit gpte is converted to two 64-bit sptes + (since each first-level guest page is shadowed by two first-level + shadow pages) so role.quadrant takes values in the range 0..3. Each + quadrant maps 1GB virtual address space. + role.access: + Inherited guest access permissions in the form uwx. Note execute + permission is positive, not negative. + role.invalid: + The page is invalid and should not be used. It is a root page that is + currently pinned (by a cpu hardware register pointing to it); once it is + unpinned it will be destroyed. + role.gpte_is_8_bytes: + Reflects the size of the guest PTE for which the page is valid, i.e. '1' + if 64-bit gptes are in use, '0' if 32-bit gptes are in use. + role.nxe: + Contains the value of efer.nxe for which the page is valid. + role.cr0_wp: + Contains the value of cr0.wp for which the page is valid. + role.smep_andnot_wp: + Contains the value of cr4.smep && !cr0.wp for which the page is valid + (pages for which this is true are different from other pages; see the + treatment of cr0.wp=0 below). + role.smap_andnot_wp: + Contains the value of cr4.smap && !cr0.wp for which the page is valid + (pages for which this is true are different from other pages; see the + treatment of cr0.wp=0 below). + role.ept_sp: + This is a virtual flag to denote a shadowed nested EPT page. ept_sp + is true if "cr0_wp && smap_andnot_wp", an otherwise invalid combination. + role.smm: + Is 1 if the page is valid in system management mode. This field + determines which of the kvm_memslots array was used to build this + shadow page; it is also used to go back from a struct kvm_mmu_page + to a memslot, through the kvm_memslots_for_spte_role macro and + __gfn_to_memslot. + role.ad_disabled: + Is 1 if the MMU instance cannot use A/D bits. EPT did not have A/D + bits before Haswell; shadow EPT page tables also cannot use A/D bits + if the L1 hypervisor does not enable them. + gfn: + Either the guest page table containing the translations shadowed by this + page, or the base page frame for linear translations. See role.direct. + spt: + A pageful of 64-bit sptes containing the translations for this page. + Accessed by both kvm and hardware. + The page pointed to by spt will have its page->private pointing back + at the shadow page structure. + sptes in spt point either at guest pages, or at lower-level shadow pages. + Specifically, if sp1 and sp2 are shadow pages, then sp1->spt[n] may point + at __pa(sp2->spt). sp2 will point back at sp1 through parent_pte. + The spt array forms a DAG structure with the shadow page as a node, and + guest pages as leaves. + gfns: + An array of 512 guest frame numbers, one for each present pte. Used to + perform a reverse map from a pte to a gfn. When role.direct is set, any + element of this array can be calculated from the gfn field when used, in + this case, the array of gfns is not allocated. See role.direct and gfn. + root_count: + A counter keeping track of how many hardware registers (guest cr3 or + pdptrs) are now pointing at the page. While this counter is nonzero, the + page cannot be destroyed. See role.invalid. + parent_ptes: + The reverse mapping for the pte/ptes pointing at this page's spt. If + parent_ptes bit 0 is zero, only one spte points at this page and + parent_ptes points at this single spte, otherwise, there exists multiple + sptes pointing at this page and (parent_ptes & ~0x1) points at a data + structure with a list of parent sptes. + unsync: + If true, then the translations in this page may not match the guest's + translation. This is equivalent to the state of the tlb when a pte is + changed but before the tlb entry is flushed. Accordingly, unsync ptes + are synchronized when the guest executes invlpg or flushes its tlb by + other means. Valid for leaf pages. + unsync_children: + How many sptes in the page point at pages that are unsync (or have + unsynchronized children). + unsync_child_bitmap: + A bitmap indicating which sptes in spt point (directly or indirectly) at + pages that may be unsynchronized. Used to quickly locate all unsychronized + pages reachable from a given page. + clear_spte_count: + Only present on 32-bit hosts, where a 64-bit spte cannot be written + atomically. The reader uses this while running out of the MMU lock + to detect in-progress updates and retry them until the writer has + finished the write. + write_flooding_count: + A guest may write to a page table many times, causing a lot of + emulations if the page needs to be write-protected (see "Synchronized + and unsynchronized pages" below). Leaf pages can be unsynchronized + so that they do not trigger frequent emulation, but this is not + possible for non-leafs. This field counts the number of emulations + since the last time the page table was actually used; if emulation + is triggered too frequently on this page, KVM will unmap the page + to avoid emulation in the future. + +Reverse map +=========== + +The mmu maintains a reverse mapping whereby all ptes mapping a page can be +reached given its gfn. This is used, for example, when swapping out a page. + +Synchronized and unsynchronized pages +===================================== + +The guest uses two events to synchronize its tlb and page tables: tlb flushes +and page invalidations (invlpg). + +A tlb flush means that we need to synchronize all sptes reachable from the +guest's cr3. This is expensive, so we keep all guest page tables write +protected, and synchronize sptes to gptes when a gpte is written. + +A special case is when a guest page table is reachable from the current +guest cr3. In this case, the guest is obliged to issue an invlpg instruction +before using the translation. We take advantage of that by removing write +protection from the guest page, and allowing the guest to modify it freely. +We synchronize modified gptes when the guest invokes invlpg. This reduces +the amount of emulation we have to do when the guest modifies multiple gptes, +or when the a guest page is no longer used as a page table and is used for +random guest data. + +As a side effect we have to resynchronize all reachable unsynchronized shadow +pages on a tlb flush. + + +Reaction to events +================== + +- guest page fault (or npt page fault, or ept violation) + +This is the most complicated event. The cause of a page fault can be: + + - a true guest fault (the guest translation won't allow the access) (*) + - access to a missing translation + - access to a protected translation + - when logging dirty pages, memory is write protected + - synchronized shadow pages are write protected (*) + - access to untranslatable memory (mmio) + + (*) not applicable in direct mode + +Handling a page fault is performed as follows: + + - if the RSV bit of the error code is set, the page fault is caused by guest + accessing MMIO and cached MMIO information is available. + - walk shadow page table + - check for valid generation number in the spte (see "Fast invalidation of + MMIO sptes" below) + - cache the information to vcpu->arch.mmio_gva, vcpu->arch.access and + vcpu->arch.mmio_gfn, and call the emulator + - If both P bit and R/W bit of error code are set, this could possibly + be handled as a "fast page fault" (fixed without taking the MMU lock). See + the description in Documentation/virt/kvm/locking.txt. + - if needed, walk the guest page tables to determine the guest translation + (gva->gpa or ngpa->gpa) + - if permissions are insufficient, reflect the fault back to the guest + - determine the host page + - if this is an mmio request, there is no host page; cache the info to + vcpu->arch.mmio_gva, vcpu->arch.access and vcpu->arch.mmio_gfn + - walk the shadow page table to find the spte for the translation, + instantiating missing intermediate page tables as necessary + - If this is an mmio request, cache the mmio info to the spte and set some + reserved bit on the spte (see callers of kvm_mmu_set_mmio_spte_mask) + - try to unsynchronize the page + - if successful, we can let the guest continue and modify the gpte + - emulate the instruction + - if failed, unshadow the page and let the guest continue + - update any translations that were modified by the instruction + +invlpg handling: + + - walk the shadow page hierarchy and drop affected translations + - try to reinstantiate the indicated translation in the hope that the + guest will use it in the near future + +Guest control register updates: + +- mov to cr3 + - look up new shadow roots + - synchronize newly reachable shadow pages + +- mov to cr0/cr4/efer + - set up mmu context for new paging mode + - look up new shadow roots + - synchronize newly reachable shadow pages + +Host translation updates: + + - mmu notifier called with updated hva + - look up affected sptes through reverse map + - drop (or update) translations + +Emulating cr0.wp +================ + +If tdp is not enabled, the host must keep cr0.wp=1 so page write protection +works for the guest kernel, not guest guest userspace. When the guest +cr0.wp=1, this does not present a problem. However when the guest cr0.wp=0, +we cannot map the permissions for gpte.u=1, gpte.w=0 to any spte (the +semantics require allowing any guest kernel access plus user read access). + +We handle this by mapping the permissions to two possible sptes, depending +on fault type: + +- kernel write fault: spte.u=0, spte.w=1 (allows full kernel access, + disallows user access) +- read fault: spte.u=1, spte.w=0 (allows full read access, disallows kernel + write access) + +(user write faults generate a #PF) + +In the first case there are two additional complications: +- if CR4.SMEP is enabled: since we've turned the page into a kernel page, + the kernel may now execute it. We handle this by also setting spte.nx. + If we get a user fetch or read fault, we'll change spte.u=1 and + spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when + shadow paging is in use. +- if CR4.SMAP is disabled: since the page has been changed to a kernel + page, it can not be reused when CR4.SMAP is enabled. We set + CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note, + here we do not care the case that CR4.SMAP is enabled since KVM will + directly inject #PF to guest due to failed permission check. + +To prevent an spte that was converted into a kernel page with cr0.wp=0 +from being written by the kernel after cr0.wp has changed to 1, we make +the value of cr0.wp part of the page role. This means that an spte created +with one value of cr0.wp cannot be used when cr0.wp has a different value - +it will simply be missed by the shadow page lookup code. A similar issue +exists when an spte created with cr0.wp=0 and cr4.smep=0 is used after +changing cr4.smep to 1. To avoid this, the value of !cr0.wp && cr4.smep +is also made a part of the page role. + +Large pages +=========== + +The mmu supports all combinations of large and small guest and host pages. +Supported page sizes include 4k, 2M, 4M, and 1G. 4M pages are treated as +two separate 2M pages, on both guest and host, since the mmu always uses PAE +paging. + +To instantiate a large spte, four constraints must be satisfied: + +- the spte must point to a large host page +- the guest pte must be a large pte of at least equivalent size (if tdp is + enabled, there is no guest pte and this condition is satisfied) +- if the spte will be writeable, the large page frame may not overlap any + write-protected pages +- the guest page must be wholly contained by a single memory slot + +To check the last two conditions, the mmu maintains a ->disallow_lpage set of +arrays for each memory slot and large page size. Every write protected page +causes its disallow_lpage to be incremented, thus preventing instantiation of +a large spte. The frames at the end of an unaligned memory slot have +artificially inflated ->disallow_lpages so they can never be instantiated. + +Fast invalidation of MMIO sptes +=============================== + +As mentioned in "Reaction to events" above, kvm will cache MMIO +information in leaf sptes. When a new memslot is added or an existing +memslot is changed, this information may become stale and needs to be +invalidated. This also needs to hold the MMU lock while walking all +shadow pages, and is made more scalable with a similar technique. + +MMIO sptes have a few spare bits, which are used to store a +generation number. The global generation number is stored in +kvm_memslots(kvm)->generation, and increased whenever guest memory info +changes. + +When KVM finds an MMIO spte, it checks the generation number of the spte. +If the generation number of the spte does not equal the global generation +number, it will ignore the cached MMIO information and handle the page +fault through the slow path. + +Since only 19 bits are used to store generation-number on mmio spte, all +pages are zapped when there is an overflow. + +Unfortunately, a single memory access might access kvm_memslots(kvm) multiple +times, the last one happening when the generation number is retrieved and +stored into the MMIO spte. Thus, the MMIO spte might be created based on +out-of-date information, but with an up-to-date generation number. + +To avoid this, the generation number is incremented again after synchronize_srcu +returns; thus, bit 63 of kvm_memslots(kvm)->generation set to 1 only during a +memslot update, while some SRCU readers might be using the old copy. We do not +want to use an MMIO sptes created with an odd generation number, and we can do +this without losing a bit in the MMIO spte. The "update in-progress" bit of the +generation is not stored in MMIO spte, and is so is implicitly zero when the +generation is extracted out of the spte. If KVM is unlucky and creates an MMIO +spte while an update is in-progress, the next access to the spte will always be +a cache miss. For example, a subsequent access during the update window will +miss due to the in-progress flag diverging, while an access after the update +window closes will have a higher generation number (as compared to the spte). + + +Further reading +=============== + +- NPT presentation from KVM Forum 2008 + http://www.linux-kvm.org/images/c/c8/KvmForum2008%24kdf2008_21.pdf + diff --git a/Documentation/virt/kvm/msr.txt b/Documentation/virt/kvm/msr.txt new file mode 100644 index 000000000000..df1f4338b3ca --- /dev/null +++ b/Documentation/virt/kvm/msr.txt @@ -0,0 +1,284 @@ +KVM-specific MSRs. +Glauber Costa , Red Hat Inc, 2010 +===================================================== + +KVM makes use of some custom MSRs to service some requests. + +Custom MSRs have a range reserved for them, that goes from +0x4b564d00 to 0x4b564dff. There are MSRs outside this area, +but they are deprecated and their use is discouraged. + +Custom MSR list +-------- + +The current supported Custom MSR list is: + +MSR_KVM_WALL_CLOCK_NEW: 0x4b564d00 + + data: 4-byte alignment physical address of a memory area which must be + in guest RAM. This memory is expected to hold a copy of the following + structure: + + struct pvclock_wall_clock { + u32 version; + u32 sec; + u32 nsec; + } __attribute__((__packed__)); + + whose data will be filled in by the hypervisor. The hypervisor is only + guaranteed to update this data at the moment of MSR write. + Users that want to reliably query this information more than once have + to write more than once to this MSR. Fields have the following meanings: + + version: guest has to check version before and after grabbing + time information and check that they are both equal and even. + An odd version indicates an in-progress update. + + sec: number of seconds for wallclock at time of boot. + + nsec: number of nanoseconds for wallclock at time of boot. + + In order to get the current wallclock time, the system_time from + MSR_KVM_SYSTEM_TIME_NEW needs to be added. + + Note that although MSRs are per-CPU entities, the effect of this + particular MSR is global. + + Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid + leaf prior to usage. + +MSR_KVM_SYSTEM_TIME_NEW: 0x4b564d01 + + data: 4-byte aligned physical address of a memory area which must be in + guest RAM, plus an enable bit in bit 0. This memory is expected to hold + a copy of the following structure: + + struct pvclock_vcpu_time_info { + u32 version; + u32 pad0; + u64 tsc_timestamp; + u64 system_time; + u32 tsc_to_system_mul; + s8 tsc_shift; + u8 flags; + u8 pad[2]; + } __attribute__((__packed__)); /* 32 bytes */ + + whose data will be filled in by the hypervisor periodically. Only one + write, or registration, is needed for each VCPU. The interval between + updates of this structure is arbitrary and implementation-dependent. + The hypervisor may update this structure at any time it sees fit until + anything with bit0 == 0 is written to it. + + Fields have the following meanings: + + version: guest has to check version before and after grabbing + time information and check that they are both equal and even. + An odd version indicates an in-progress update. + + tsc_timestamp: the tsc value at the current VCPU at the time + of the update of this structure. Guests can subtract this value + from current tsc to derive a notion of elapsed time since the + structure update. + + system_time: a host notion of monotonic time, including sleep + time at the time this structure was last updated. Unit is + nanoseconds. + + tsc_to_system_mul: multiplier to be used when converting + tsc-related quantity to nanoseconds + + tsc_shift: shift to be used when converting tsc-related + quantity to nanoseconds. This shift will ensure that + multiplication with tsc_to_system_mul does not overflow. + A positive value denotes a left shift, a negative value + a right shift. + + The conversion from tsc to nanoseconds involves an additional + right shift by 32 bits. With this information, guests can + derive per-CPU time by doing: + + time = (current_tsc - tsc_timestamp) + if (tsc_shift >= 0) + time <<= tsc_shift; + else + time >>= -tsc_shift; + time = (time * tsc_to_system_mul) >> 32 + time = time + system_time + + flags: bits in this field indicate extended capabilities + coordinated between the guest and the hypervisor. Availability + of specific flags has to be checked in 0x40000001 cpuid leaf. + Current flags are: + + flag bit | cpuid bit | meaning + ------------------------------------------------------------- + | | time measures taken across + 0 | 24 | multiple cpus are guaranteed to + | | be monotonic + ------------------------------------------------------------- + | | guest vcpu has been paused by + 1 | N/A | the host + | | See 4.70 in api.txt + ------------------------------------------------------------- + + Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid + leaf prior to usage. + + +MSR_KVM_WALL_CLOCK: 0x11 + + data and functioning: same as MSR_KVM_WALL_CLOCK_NEW. Use that instead. + + This MSR falls outside the reserved KVM range and may be removed in the + future. Its usage is deprecated. + + Availability of this MSR must be checked via bit 0 in 0x4000001 cpuid + leaf prior to usage. + +MSR_KVM_SYSTEM_TIME: 0x12 + + data and functioning: same as MSR_KVM_SYSTEM_TIME_NEW. Use that instead. + + This MSR falls outside the reserved KVM range and may be removed in the + future. Its usage is deprecated. + + Availability of this MSR must be checked via bit 0 in 0x4000001 cpuid + leaf prior to usage. + + The suggested algorithm for detecting kvmclock presence is then: + + if (!kvm_para_available()) /* refer to cpuid.txt */ + return NON_PRESENT; + + flags = cpuid_eax(0x40000001); + if (flags & 3) { + msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW; + msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW; + return PRESENT; + } else if (flags & 0) { + msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; + msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; + return PRESENT; + } else + return NON_PRESENT; + +MSR_KVM_ASYNC_PF_EN: 0x4b564d02 + data: Bits 63-6 hold 64-byte aligned physical address of a + 64 byte memory area which must be in guest RAM and must be + zeroed. Bits 5-3 are reserved and should be zero. Bit 0 is 1 + when asynchronous page faults are enabled on the vcpu 0 when + disabled. Bit 1 is 1 if asynchronous page faults can be injected + when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults + are delivered to L1 as #PF vmexits. Bit 2 can be set only if + KVM_FEATURE_ASYNC_PF_VMEXIT is present in CPUID. + + First 4 byte of 64 byte memory location will be written to by + the hypervisor at the time of asynchronous page fault (APF) + injection to indicate type of asynchronous page fault. Value + of 1 means that the page referred to by the page fault is not + present. Value 2 means that the page is now available. Disabling + interrupt inhibits APFs. Guest must not enable interrupt + before the reason is read, or it may be overwritten by another + APF. Since APF uses the same exception vector as regular page + fault guest must reset the reason to 0 before it does + something that can generate normal page fault. If during page + fault APF reason is 0 it means that this is regular page + fault. + + During delivery of type 1 APF cr2 contains a token that will + be used to notify a guest when missing page becomes + available. When page becomes available type 2 APF is sent with + cr2 set to the token associated with the page. There is special + kind of token 0xffffffff which tells vcpu that it should wake + up all processes waiting for APFs and no individual type 2 APFs + will be sent. + + If APF is disabled while there are outstanding APFs, they will + not be delivered. + + Currently type 2 APF will be always delivered on the same vcpu as + type 1 was, but guest should not rely on that. + +MSR_KVM_STEAL_TIME: 0x4b564d03 + + data: 64-byte alignment physical address of a memory area which must be + in guest RAM, plus an enable bit in bit 0. This memory is expected to + hold a copy of the following structure: + + struct kvm_steal_time { + __u64 steal; + __u32 version; + __u32 flags; + __u8 preempted; + __u8 u8_pad[3]; + __u32 pad[11]; + } + + whose data will be filled in by the hypervisor periodically. Only one + write, or registration, is needed for each VCPU. The interval between + updates of this structure is arbitrary and implementation-dependent. + The hypervisor may update this structure at any time it sees fit until + anything with bit0 == 0 is written to it. Guest is required to make sure + this structure is initialized to zero. + + Fields have the following meanings: + + version: a sequence counter. In other words, guest has to check + this field before and after grabbing time information and make + sure they are both equal and even. An odd version indicates an + in-progress update. + + flags: At this point, always zero. May be used to indicate + changes in this structure in the future. + + steal: the amount of time in which this vCPU did not run, in + nanoseconds. Time during which the vcpu is idle, will not be + reported as steal time. + + preempted: indicate the vCPU who owns this struct is running or + not. Non-zero values mean the vCPU has been preempted. Zero + means the vCPU is not preempted. NOTE, it is always zero if the + the hypervisor doesn't support this field. + +MSR_KVM_EOI_EN: 0x4b564d04 + data: Bit 0 is 1 when PV end of interrupt is enabled on the vcpu; 0 + when disabled. Bit 1 is reserved and must be zero. When PV end of + interrupt is enabled (bit 0 set), bits 63-2 hold a 4-byte aligned + physical address of a 4 byte memory area which must be in guest RAM and + must be zeroed. + + The first, least significant bit of 4 byte memory location will be + written to by the hypervisor, typically at the time of interrupt + injection. Value of 1 means that guest can skip writing EOI to the apic + (using MSR or MMIO write); instead, it is sufficient to signal + EOI by clearing the bit in guest memory - this location will + later be polled by the hypervisor. + Value of 0 means that the EOI write is required. + + It is always safe for the guest to ignore the optimization and perform + the APIC EOI write anyway. + + Hypervisor is guaranteed to only modify this least + significant bit while in the current VCPU context, this means that + guest does not need to use either lock prefix or memory ordering + primitives to synchronise with the hypervisor. + + However, hypervisor can set and clear this memory bit at any time: + therefore to make sure hypervisor does not interrupt the + guest and clear the least significant bit in the memory area + in the window between guest testing it to detect + whether it can skip EOI apic write and between guest + clearing it to signal EOI to the hypervisor, + guest must both read the least significant bit in the memory area and + clear it using a single CPU instruction, such as test and clear, or + compare and exchange. + +MSR_KVM_POLL_CONTROL: 0x4b564d05 + Control host-side polling. + + data: Bit 0 enables (1) or disables (0) host-side HLT polling logic. + + KVM guests can request the host not to poll on HLT, for example if + they are performing polling themselves. + diff --git a/Documentation/virt/kvm/nested-vmx.txt b/Documentation/virt/kvm/nested-vmx.txt new file mode 100644 index 000000000000..97eb1353e962 --- /dev/null +++ b/Documentation/virt/kvm/nested-vmx.txt @@ -0,0 +1,240 @@ +Nested VMX +========== + +Overview +--------- + +On Intel processors, KVM uses Intel's VMX (Virtual-Machine eXtensions) +to easily and efficiently run guest operating systems. Normally, these guests +*cannot* themselves be hypervisors running their own guests, because in VMX, +guests cannot use VMX instructions. + +The "Nested VMX" feature adds this missing capability - of running guest +hypervisors (which use VMX) with their own nested guests. It does so by +allowing a guest to use VMX instructions, and correctly and efficiently +emulating them using the single level of VMX available in the hardware. + +We describe in much greater detail the theory behind the nested VMX feature, +its implementation and its performance characteristics, in the OSDI 2010 paper +"The Turtles Project: Design and Implementation of Nested Virtualization", +available at: + + http://www.usenix.org/events/osdi10/tech/full_papers/Ben-Yehuda.pdf + + +Terminology +----------- + +Single-level virtualization has two levels - the host (KVM) and the guests. +In nested virtualization, we have three levels: The host (KVM), which we call +L0, the guest hypervisor, which we call L1, and its nested guest, which we +call L2. + + +Running nested VMX +------------------ + +The nested VMX feature is disabled by default. It can be enabled by giving +the "nested=1" option to the kvm-intel module. + +No modifications are required to user space (qemu). However, qemu's default +emulated CPU type (qemu64) does not list the "VMX" CPU feature, so it must be +explicitly enabled, by giving qemu one of the following options: + + -cpu host (emulated CPU has all features of the real CPU) + + -cpu qemu64,+vmx (add just the vmx feature to a named CPU type) + + +ABIs +---- + +Nested VMX aims to present a standard and (eventually) fully-functional VMX +implementation for the a guest hypervisor to use. As such, the official +specification of the ABI that it provides is Intel's VMX specification, +namely volume 3B of their "Intel 64 and IA-32 Architectures Software +Developer's Manual". Not all of VMX's features are currently fully supported, +but the goal is to eventually support them all, starting with the VMX features +which are used in practice by popular hypervisors (KVM and others). + +As a VMX implementation, nested VMX presents a VMCS structure to L1. +As mandated by the spec, other than the two fields revision_id and abort, +this structure is *opaque* to its user, who is not supposed to know or care +about its internal structure. Rather, the structure is accessed through the +VMREAD and VMWRITE instructions. +Still, for debugging purposes, KVM developers might be interested to know the +internals of this structure; This is struct vmcs12 from arch/x86/kvm/vmx.c. + +The name "vmcs12" refers to the VMCS that L1 builds for L2. In the code we +also have "vmcs01", the VMCS that L0 built for L1, and "vmcs02" is the VMCS +which L0 builds to actually run L2 - how this is done is explained in the +aforementioned paper. + +For convenience, we repeat the content of struct vmcs12 here. If the internals +of this structure changes, this can break live migration across KVM versions. +VMCS12_REVISION (from vmx.c) should be changed if struct vmcs12 or its inner +struct shadow_vmcs is ever changed. + + typedef u64 natural_width; + struct __packed vmcs12 { + /* According to the Intel spec, a VMCS region must start with + * these two user-visible fields */ + u32 revision_id; + u32 abort; + + u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ + u32 padding[7]; /* room for future expansion */ + + u64 io_bitmap_a; + u64 io_bitmap_b; + u64 msr_bitmap; + u64 vm_exit_msr_store_addr; + u64 vm_exit_msr_load_addr; + u64 vm_entry_msr_load_addr; + u64 tsc_offset; + u64 virtual_apic_page_addr; + u64 apic_access_addr; + u64 ept_pointer; + u64 guest_physical_address; + u64 vmcs_link_pointer; + u64 guest_ia32_debugctl; + u64 guest_ia32_pat; + u64 guest_ia32_efer; + u64 guest_pdptr0; + u64 guest_pdptr1; + u64 guest_pdptr2; + u64 guest_pdptr3; + u64 host_ia32_pat; + u64 host_ia32_efer; + u64 padding64[8]; /* room for future expansion */ + natural_width cr0_guest_host_mask; + natural_width cr4_guest_host_mask; + natural_width cr0_read_shadow; + natural_width cr4_read_shadow; + natural_width cr3_target_value0; + natural_width cr3_target_value1; + natural_width cr3_target_value2; + natural_width cr3_target_value3; + natural_width exit_qualification; + natural_width guest_linear_address; + natural_width guest_cr0; + natural_width guest_cr3; + natural_width guest_cr4; + natural_width guest_es_base; + natural_width guest_cs_base; + natural_width guest_ss_base; + natural_width guest_ds_base; + natural_width guest_fs_base; + natural_width guest_gs_base; + natural_width guest_ldtr_base; + natural_width guest_tr_base; + natural_width guest_gdtr_base; + natural_width guest_idtr_base; + natural_width guest_dr7; + natural_width guest_rsp; + natural_width guest_rip; + natural_width guest_rflags; + natural_width guest_pending_dbg_exceptions; + natural_width guest_sysenter_esp; + natural_width guest_sysenter_eip; + natural_width host_cr0; + natural_width host_cr3; + natural_width host_cr4; + natural_width host_fs_base; + natural_width host_gs_base; + natural_width host_tr_base; + natural_width host_gdtr_base; + natural_width host_idtr_base; + natural_width host_ia32_sysenter_esp; + natural_width host_ia32_sysenter_eip; + natural_width host_rsp; + natural_width host_rip; + natural_width paddingl[8]; /* room for future expansion */ + u32 pin_based_vm_exec_control; + u32 cpu_based_vm_exec_control; + u32 exception_bitmap; + u32 page_fault_error_code_mask; + u32 page_fault_error_code_match; + u32 cr3_target_count; + u32 vm_exit_controls; + u32 vm_exit_msr_store_count; + u32 vm_exit_msr_load_count; + u32 vm_entry_controls; + u32 vm_entry_msr_load_count; + u32 vm_entry_intr_info_field; + u32 vm_entry_exception_error_code; + u32 vm_entry_instruction_len; + u32 tpr_threshold; + u32 secondary_vm_exec_control; + u32 vm_instruction_error; + u32 vm_exit_reason; + u32 vm_exit_intr_info; + u32 vm_exit_intr_error_code; + u32 idt_vectoring_info_field; + u32 idt_vectoring_error_code; + u32 vm_exit_instruction_len; + u32 vmx_instruction_info; + u32 guest_es_limit; + u32 guest_cs_limit; + u32 guest_ss_limit; + u32 guest_ds_limit; + u32 guest_fs_limit; + u32 guest_gs_limit; + u32 guest_ldtr_limit; + u32 guest_tr_limit; + u32 guest_gdtr_limit; + u32 guest_idtr_limit; + u32 guest_es_ar_bytes; + u32 guest_cs_ar_bytes; + u32 guest_ss_ar_bytes; + u32 guest_ds_ar_bytes; + u32 guest_fs_ar_bytes; + u32 guest_gs_ar_bytes; + u32 guest_ldtr_ar_bytes; + u32 guest_tr_ar_bytes; + u32 guest_interruptibility_info; + u32 guest_activity_state; + u32 guest_sysenter_cs; + u32 host_ia32_sysenter_cs; + u32 padding32[8]; /* room for future expansion */ + u16 virtual_processor_id; + u16 guest_es_selector; + u16 guest_cs_selector; + u16 guest_ss_selector; + u16 guest_ds_selector; + u16 guest_fs_selector; + u16 guest_gs_selector; + u16 guest_ldtr_selector; + u16 guest_tr_selector; + u16 host_es_selector; + u16 host_cs_selector; + u16 host_ss_selector; + u16 host_ds_selector; + u16 host_fs_selector; + u16 host_gs_selector; + u16 host_tr_selector; + }; + + +Authors +------- + +These patches were written by: + Abel Gordon, abelg il.ibm.com + Nadav Har'El, nyh il.ibm.com + Orit Wasserman, oritw il.ibm.com + Ben-Ami Yassor, benami il.ibm.com + Muli Ben-Yehuda, muli il.ibm.com + +With contributions by: + Anthony Liguori, aliguori us.ibm.com + Mike Day, mdday us.ibm.com + Michael Factor, factor il.ibm.com + Zvi Dubitzky, dubi il.ibm.com + +And valuable reviews by: + Avi Kivity, avi redhat.com + Gleb Natapov, gleb redhat.com + Marcelo Tosatti, mtosatti redhat.com + Kevin Tian, kevin.tian intel.com + and others. diff --git a/Documentation/virt/kvm/ppc-pv.txt b/Documentation/virt/kvm/ppc-pv.txt new file mode 100644 index 000000000000..e26115ce4258 --- /dev/null +++ b/Documentation/virt/kvm/ppc-pv.txt @@ -0,0 +1,212 @@ +The PPC KVM paravirtual interface +================================= + +The basic execution principle by which KVM on PowerPC works is to run all kernel +space code in PR=1 which is user space. This way we trap all privileged +instructions and can emulate them accordingly. + +Unfortunately that is also the downfall. There are quite some privileged +instructions that needlessly return us to the hypervisor even though they +could be handled differently. + +This is what the PPC PV interface helps with. It takes privileged instructions +and transforms them into unprivileged ones with some help from the hypervisor. +This cuts down virtualization costs by about 50% on some of my benchmarks. + +The code for that interface can be found in arch/powerpc/kernel/kvm* + +Querying for existence +====================== + +To find out if we're running on KVM or not, we leverage the device tree. When +Linux is running on KVM, a node /hypervisor exists. That node contains a +compatible property with the value "linux,kvm". + +Once you determined you're running under a PV capable KVM, you can now use +hypercalls as described below. + +KVM hypercalls +============== + +Inside the device tree's /hypervisor node there's a property called +'hypercall-instructions'. This property contains at most 4 opcodes that make +up the hypercall. To call a hypercall, just call these instructions. + +The parameters are as follows: + + Register IN OUT + + r0 - volatile + r3 1st parameter Return code + r4 2nd parameter 1st output value + r5 3rd parameter 2nd output value + r6 4th parameter 3rd output value + r7 5th parameter 4th output value + r8 6th parameter 5th output value + r9 7th parameter 6th output value + r10 8th parameter 7th output value + r11 hypercall number 8th output value + r12 - volatile + +Hypercall definitions are shared in generic code, so the same hypercall numbers +apply for x86 and powerpc alike with the exception that each KVM hypercall +also needs to be ORed with the KVM vendor code which is (42 << 16). + +Return codes can be as follows: + + Code Meaning + + 0 Success + 12 Hypercall not implemented + <0 Error + +The magic page +============== + +To enable communication between the hypervisor and guest there is a new shared +page that contains parts of supervisor visible register state. The guest can +map this shared page using the KVM hypercall KVM_HC_PPC_MAP_MAGIC_PAGE. + +With this hypercall issued the guest always gets the magic page mapped at the +desired location. The first parameter indicates the effective address when the +MMU is enabled. The second parameter indicates the address in real mode, if +applicable to the target. For now, we always map the page to -4096. This way we +can access it using absolute load and store functions. The following +instruction reads the first field of the magic page: + + ld rX, -4096(0) + +The interface is designed to be extensible should there be need later to add +additional registers to the magic page. If you add fields to the magic page, +also define a new hypercall feature to indicate that the host can give you more +registers. Only if the host supports the additional features, make use of them. + +The magic page layout is described by struct kvm_vcpu_arch_shared +in arch/powerpc/include/asm/kvm_para.h. + +Magic page features +=================== + +When mapping the magic page using the KVM hypercall KVM_HC_PPC_MAP_MAGIC_PAGE, +a second return value is passed to the guest. This second return value contains +a bitmap of available features inside the magic page. + +The following enhancements to the magic page are currently available: + + KVM_MAGIC_FEAT_SR Maps SR registers r/w in the magic page + KVM_MAGIC_FEAT_MAS0_TO_SPRG7 Maps MASn, ESR, PIR and high SPRGs + +For enhanced features in the magic page, please check for the existence of the +feature before using them! + +Magic page flags +================ + +In addition to features that indicate whether a host is capable of a particular +feature we also have a channel for a guest to tell the guest whether it's capable +of something. This is what we call "flags". + +Flags are passed to the host in the low 12 bits of the Effective Address. + +The following flags are currently available for a guest to expose: + + MAGIC_PAGE_FLAG_NOT_MAPPED_NX Guest handles NX bits correctly wrt magic page + +MSR bits +======== + +The MSR contains bits that require hypervisor intervention and bits that do +not require direct hypervisor intervention because they only get interpreted +when entering the guest or don't have any impact on the hypervisor's behavior. + +The following bits are safe to be set inside the guest: + + MSR_EE + MSR_RI + +If any other bit changes in the MSR, please still use mtmsr(d). + +Patched instructions +==================== + +The "ld" and "std" instructions are transformed to "lwz" and "stw" instructions +respectively on 32 bit systems with an added offset of 4 to accommodate for big +endianness. + +The following is a list of mapping the Linux kernel performs when running as +guest. Implementing any of those mappings is optional, as the instruction traps +also act on the shared page. So calling privileged instructions still works as +before. + +From To +==== == + +mfmsr rX ld rX, magic_page->msr +mfsprg rX, 0 ld rX, magic_page->sprg0 +mfsprg rX, 1 ld rX, magic_page->sprg1 +mfsprg rX, 2 ld rX, magic_page->sprg2 +mfsprg rX, 3 ld rX, magic_page->sprg3 +mfsrr0 rX ld rX, magic_page->srr0 +mfsrr1 rX ld rX, magic_page->srr1 +mfdar rX ld rX, magic_page->dar +mfdsisr rX lwz rX, magic_page->dsisr + +mtmsr rX std rX, magic_page->msr +mtsprg 0, rX std rX, magic_page->sprg0 +mtsprg 1, rX std rX, magic_page->sprg1 +mtsprg 2, rX std rX, magic_page->sprg2 +mtsprg 3, rX std rX, magic_page->sprg3 +mtsrr0 rX std rX, magic_page->srr0 +mtsrr1 rX std rX, magic_page->srr1 +mtdar rX std rX, magic_page->dar +mtdsisr rX stw rX, magic_page->dsisr + +tlbsync nop + +mtmsrd rX, 0 b +mtmsr rX b + +mtmsrd rX, 1 b + +[Book3S only] +mtsrin rX, rY b + +[BookE only] +wrteei [0|1] b + + +Some instructions require more logic to determine what's going on than a load +or store instruction can deliver. To enable patching of those, we keep some +RAM around where we can live translate instructions to. What happens is the +following: + + 1) copy emulation code to memory + 2) patch that code to fit the emulated instruction + 3) patch that code to return to the original pc + 4 + 4) patch the original instruction to branch to the new code + +That way we can inject an arbitrary amount of code as replacement for a single +instruction. This allows us to check for pending interrupts when setting EE=1 +for example. + +Hypercall ABIs in KVM on PowerPC +================================= +1) KVM hypercalls (ePAPR) + +These are ePAPR compliant hypercall implementation (mentioned above). Even +generic hypercalls are implemented here, like the ePAPR idle hcall. These are +available on all targets. + +2) PAPR hypercalls + +PAPR hypercalls are needed to run server PowerPC PAPR guests (-M pseries in QEMU). +These are the same hypercalls that pHyp, the POWER hypervisor implements. Some of +them are handled in the kernel, some are handled in user space. This is only +available on book3s_64. + +3) OSI hypercalls + +Mac-on-Linux is another user of KVM on PowerPC, which has its own hypercall (long +before KVM). This is supported to maintain compatibility. All these hypercalls get +forwarded to user space. This is only useful on book3s_32, but can be used with +book3s_64 as well. diff --git a/Documentation/virt/kvm/review-checklist.txt b/Documentation/virt/kvm/review-checklist.txt new file mode 100644 index 000000000000..499af499e296 --- /dev/null +++ b/Documentation/virt/kvm/review-checklist.txt @@ -0,0 +1,38 @@ +Review checklist for kvm patches +================================ + +1. The patch must follow Documentation/process/coding-style.rst and + Documentation/process/submitting-patches.rst. + +2. Patches should be against kvm.git master branch. + +3. If the patch introduces or modifies a new userspace API: + - the API must be documented in Documentation/virt/kvm/api.txt + - the API must be discoverable using KVM_CHECK_EXTENSION + +4. New state must include support for save/restore. + +5. New features must default to off (userspace should explicitly request them). + Performance improvements can and should default to on. + +6. New cpu features should be exposed via KVM_GET_SUPPORTED_CPUID2 + +7. Emulator changes should be accompanied by unit tests for qemu-kvm.git + kvm/test directory. + +8. Changes should be vendor neutral when possible. Changes to common code + are better than duplicating changes to vendor code. + +9. Similarly, prefer changes to arch independent code than to arch dependent + code. + +10. User/kernel interfaces and guest/host interfaces must be 64-bit clean + (all variables and sizes naturally aligned on 64-bit; use specific types + only - u64 rather than ulong). + +11. New guest visible features must either be documented in a hardware manual + or be accompanied by documentation. + +12. Features must be robust against reset and kexec - for example, shared + host/guest memory must be unshared to prevent the host from writing to + guest memory that the guest has not reserved for this purpose. diff --git a/Documentation/virt/kvm/s390-diag.txt b/Documentation/virt/kvm/s390-diag.txt new file mode 100644 index 000000000000..7c52e5f8b210 --- /dev/null +++ b/Documentation/virt/kvm/s390-diag.txt @@ -0,0 +1,83 @@ +The s390 DIAGNOSE call on KVM +============================= + +KVM on s390 supports the DIAGNOSE call for making hypercalls, both for +native hypercalls and for selected hypercalls found on other s390 +hypervisors. + +Note that bits are numbered as by the usual s390 convention (most significant +bit on the left). + + +General remarks +--------------- + +DIAGNOSE calls by the guest cause a mandatory intercept. This implies +all supported DIAGNOSE calls need to be handled by either KVM or its +userspace. + +All DIAGNOSE calls supported by KVM use the RS-a format: + +-------------------------------------- +| '83' | R1 | R3 | B2 | D2 | +-------------------------------------- +0 8 12 16 20 31 + +The second-operand address (obtained by the base/displacement calculation) +is not used to address data. Instead, bits 48-63 of this address specify +the function code, and bits 0-47 are ignored. + +The supported DIAGNOSE function codes vary by the userspace used. For +DIAGNOSE function codes not specific to KVM, please refer to the +documentation for the s390 hypervisors defining them. + + +DIAGNOSE function code 'X'500' - KVM virtio functions +----------------------------------------------------- + +If the function code specifies 0x500, various virtio-related functions +are performed. + +General register 1 contains the virtio subfunction code. Supported +virtio subfunctions depend on KVM's userspace. Generally, userspace +provides either s390-virtio (subcodes 0-2) or virtio-ccw (subcode 3). + +Upon completion of the DIAGNOSE instruction, general register 2 contains +the function's return code, which is either a return code or a subcode +specific value. + +Subcode 0 - s390-virtio notification and early console printk + Handled by userspace. + +Subcode 1 - s390-virtio reset + Handled by userspace. + +Subcode 2 - s390-virtio set status + Handled by userspace. + +Subcode 3 - virtio-ccw notification + Handled by either userspace or KVM (ioeventfd case). + + General register 2 contains a subchannel-identification word denoting + the subchannel of the virtio-ccw proxy device to be notified. + + General register 3 contains the number of the virtqueue to be notified. + + General register 4 contains a 64bit identifier for KVM usage (the + kvm_io_bus cookie). If general register 4 does not contain a valid + identifier, it is ignored. + + After completion of the DIAGNOSE call, general register 2 may contain + a 64bit identifier (in the kvm_io_bus cookie case), or a negative + error value, if an internal error occurred. + + See also the virtio standard for a discussion of this hypercall. + + +DIAGNOSE function code 'X'501 - KVM breakpoint +---------------------------------------------- + +If the function code specifies 0x501, breakpoint functions may be performed. +This function code is handled by userspace. + +This diagnose function code has no subfunctions and uses no parameters. diff --git a/Documentation/virt/kvm/timekeeping.txt b/Documentation/virt/kvm/timekeeping.txt new file mode 100644 index 000000000000..76808a17ad84 --- /dev/null +++ b/Documentation/virt/kvm/timekeeping.txt @@ -0,0 +1,612 @@ + + Timekeeping Virtualization for X86-Based Architectures + + Zachary Amsden + Copyright (c) 2010, Red Hat. All rights reserved. + +1) Overview +2) Timing Devices +3) TSC Hardware +4) Virtualization Problems + +========================================================================= + +1) Overview + +One of the most complicated parts of the X86 platform, and specifically, +the virtualization of this platform is the plethora of timing devices available +and the complexity of emulating those devices. In addition, virtualization of +time introduces a new set of challenges because it introduces a multiplexed +division of time beyond the control of the guest CPU. + +First, we will describe the various timekeeping hardware available, then +present some of the problems which arise and solutions available, giving +specific recommendations for certain classes of KVM guests. + +The purpose of this document is to collect data and information relevant to +timekeeping which may be difficult to find elsewhere, specifically, +information relevant to KVM and hardware-based virtualization. + +========================================================================= + +2) Timing Devices + +First we discuss the basic hardware devices available. TSC and the related +KVM clock are special enough to warrant a full exposition and are described in +the following section. + +2.1) i8254 - PIT + +One of the first timer devices available is the programmable interrupt timer, +or PIT. The PIT has a fixed frequency 1.193182 MHz base clock and three +channels which can be programmed to deliver periodic or one-shot interrupts. +These three channels can be configured in different modes and have individual +counters. Channel 1 and 2 were not available for general use in the original +IBM PC, and historically were connected to control RAM refresh and the PC +speaker. Now the PIT is typically integrated as part of an emulated chipset +and a separate physical PIT is not used. + +The PIT uses I/O ports 0x40 - 0x43. Access to the 16-bit counters is done +using single or multiple byte access to the I/O ports. There are 6 modes +available, but not all modes are available to all timers, as only timer 2 +has a connected gate input, required for modes 1 and 5. The gate line is +controlled by port 61h, bit 0, as illustrated in the following diagram. + + -------------- ---------------- +| | | | +| 1.1932 MHz |---------->| CLOCK OUT | ---------> IRQ 0 +| Clock | | | | + -------------- | +->| GATE TIMER 0 | + | ---------------- + | + | ---------------- + | | | + |------>| CLOCK OUT | ---------> 66.3 KHZ DRAM + | | | (aka /dev/null) + | +->| GATE TIMER 1 | + | ---------------- + | + | ---------------- + | | | + |------>| CLOCK OUT | ---------> Port 61h, bit 5 + | | | +Port 61h, bit 0 ---------->| GATE TIMER 2 | \_.---- ____ + ---------------- _| )--|LPF|---Speaker + / *---- \___/ +Port 61h, bit 1 -----------------------------------/ + +The timer modes are now described. + +Mode 0: Single Timeout. This is a one-shot software timeout that counts down + when the gate is high (always true for timers 0 and 1). When the count + reaches zero, the output goes high. + +Mode 1: Triggered One-shot. The output is initially set high. When the gate + line is set high, a countdown is initiated (which does not stop if the gate is + lowered), during which the output is set low. When the count reaches zero, + the output goes high. + +Mode 2: Rate Generator. The output is initially set high. When the countdown + reaches 1, the output goes low for one count and then returns high. The value + is reloaded and the countdown automatically resumes. If the gate line goes + low, the count is halted. If the output is low when the gate is lowered, the + output automatically goes high (this only affects timer 2). + +Mode 3: Square Wave. This generates a high / low square wave. The count + determines the length of the pulse, which alternates between high and low + when zero is reached. The count only proceeds when gate is high and is + automatically reloaded on reaching zero. The count is decremented twice at + each clock to generate a full high / low cycle at the full periodic rate. + If the count is even, the clock remains high for N/2 counts and low for N/2 + counts; if the clock is odd, the clock is high for (N+1)/2 counts and low + for (N-1)/2 counts. Only even values are latched by the counter, so odd + values are not observed when reading. This is the intended mode for timer 2, + which generates sine-like tones by low-pass filtering the square wave output. + +Mode 4: Software Strobe. After programming this mode and loading the counter, + the output remains high until the counter reaches zero. Then the output + goes low for 1 clock cycle and returns high. The counter is not reloaded. + Counting only occurs when gate is high. + +Mode 5: Hardware Strobe. After programming and loading the counter, the + output remains high. When the gate is raised, a countdown is initiated + (which does not stop if the gate is lowered). When the counter reaches zero, + the output goes low for 1 clock cycle and then returns high. The counter is + not reloaded. + +In addition to normal binary counting, the PIT supports BCD counting. The +command port, 0x43 is used to set the counter and mode for each of the three +timers. + +PIT commands, issued to port 0x43, using the following bit encoding: + +Bit 7-4: Command (See table below) +Bit 3-1: Mode (000 = Mode 0, 101 = Mode 5, 11X = undefined) +Bit 0 : Binary (0) / BCD (1) + +Command table: + +0000 - Latch Timer 0 count for port 0x40 + sample and hold the count to be read in port 0x40; + additional commands ignored until counter is read; + mode bits ignored. + +0001 - Set Timer 0 LSB mode for port 0x40 + set timer to read LSB only and force MSB to zero; + mode bits set timer mode + +0010 - Set Timer 0 MSB mode for port 0x40 + set timer to read MSB only and force LSB to zero; + mode bits set timer mode + +0011 - Set Timer 0 16-bit mode for port 0x40 + set timer to read / write LSB first, then MSB; + mode bits set timer mode + +0100 - Latch Timer 1 count for port 0x41 - as described above +0101 - Set Timer 1 LSB mode for port 0x41 - as described above +0110 - Set Timer 1 MSB mode for port 0x41 - as described above +0111 - Set Timer 1 16-bit mode for port 0x41 - as described above + +1000 - Latch Timer 2 count for port 0x42 - as described above +1001 - Set Timer 2 LSB mode for port 0x42 - as described above +1010 - Set Timer 2 MSB mode for port 0x42 - as described above +1011 - Set Timer 2 16-bit mode for port 0x42 as described above + +1101 - General counter latch + Latch combination of counters into corresponding ports + Bit 3 = Counter 2 + Bit 2 = Counter 1 + Bit 1 = Counter 0 + Bit 0 = Unused + +1110 - Latch timer status + Latch combination of counter mode into corresponding ports + Bit 3 = Counter 2 + Bit 2 = Counter 1 + Bit 1 = Counter 0 + + The output of ports 0x40-0x42 following this command will be: + + Bit 7 = Output pin + Bit 6 = Count loaded (0 if timer has expired) + Bit 5-4 = Read / Write mode + 01 = MSB only + 10 = LSB only + 11 = LSB / MSB (16-bit) + Bit 3-1 = Mode + Bit 0 = Binary (0) / BCD mode (1) + +2.2) RTC + +The second device which was available in the original PC was the MC146818 real +time clock. The original device is now obsolete, and usually emulated by the +system chipset, sometimes by an HPET and some frankenstein IRQ routing. + +The RTC is accessed through CMOS variables, which uses an index register to +control which bytes are read. Since there is only one index register, read +of the CMOS and read of the RTC require lock protection (in addition, it is +dangerous to allow userspace utilities such as hwclock to have direct RTC +access, as they could corrupt kernel reads and writes of CMOS memory). + +The RTC generates an interrupt which is usually routed to IRQ 8. The interrupt +can function as a periodic timer, an additional once a day alarm, and can issue +interrupts after an update of the CMOS registers by the MC146818 is complete. +The type of interrupt is signalled in the RTC status registers. + +The RTC will update the current time fields by battery power even while the +system is off. The current time fields should not be read while an update is +in progress, as indicated in the status register. + +The clock uses a 32.768kHz crystal, so bits 6-4 of register A should be +programmed to a 32kHz divider if the RTC is to count seconds. + +This is the RAM map originally used for the RTC/CMOS: + +Location Size Description +------------------------------------------ +00h byte Current second (BCD) +01h byte Seconds alarm (BCD) +02h byte Current minute (BCD) +03h byte Minutes alarm (BCD) +04h byte Current hour (BCD) +05h byte Hours alarm (BCD) +06h byte Current day of week (BCD) +07h byte Current day of month (BCD) +08h byte Current month (BCD) +09h byte Current year (BCD) +0Ah byte Register A + bit 7 = Update in progress + bit 6-4 = Divider for clock + 000 = 4.194 MHz + 001 = 1.049 MHz + 010 = 32 kHz + 10X = test modes + 110 = reset / disable + 111 = reset / disable + bit 3-0 = Rate selection for periodic interrupt + 000 = periodic timer disabled + 001 = 3.90625 uS + 010 = 7.8125 uS + 011 = .122070 mS + 100 = .244141 mS + ... + 1101 = 125 mS + 1110 = 250 mS + 1111 = 500 mS +0Bh byte Register B + bit 7 = Run (0) / Halt (1) + bit 6 = Periodic interrupt enable + bit 5 = Alarm interrupt enable + bit 4 = Update-ended interrupt enable + bit 3 = Square wave interrupt enable + bit 2 = BCD calendar (0) / Binary (1) + bit 1 = 12-hour mode (0) / 24-hour mode (1) + bit 0 = 0 (DST off) / 1 (DST enabled) +OCh byte Register C (read only) + bit 7 = interrupt request flag (IRQF) + bit 6 = periodic interrupt flag (PF) + bit 5 = alarm interrupt flag (AF) + bit 4 = update interrupt flag (UF) + bit 3-0 = reserved +ODh byte Register D (read only) + bit 7 = RTC has power + bit 6-0 = reserved +32h byte Current century BCD (*) + (*) location vendor specific and now determined from ACPI global tables + +2.3) APIC + +On Pentium and later processors, an on-board timer is available to each CPU +as part of the Advanced Programmable Interrupt Controller. The APIC is +accessed through memory-mapped registers and provides interrupt service to each +CPU, used for IPIs and local timer interrupts. + +Although in theory the APIC is a safe and stable source for local interrupts, +in practice, many bugs and glitches have occurred due to the special nature of +the APIC CPU-local memory-mapped hardware. Beware that CPU errata may affect +the use of the APIC and that workarounds may be required. In addition, some of +these workarounds pose unique constraints for virtualization - requiring either +extra overhead incurred from extra reads of memory-mapped I/O or additional +functionality that may be more computationally expensive to implement. + +Since the APIC is documented quite well in the Intel and AMD manuals, we will +avoid repetition of the detail here. It should be pointed out that the APIC +timer is programmed through the LVT (local vector timer) register, is capable +of one-shot or periodic operation, and is based on the bus clock divided down +by the programmable divider register. + +2.4) HPET + +HPET is quite complex, and was originally intended to replace the PIT / RTC +support of the X86 PC. It remains to be seen whether that will be the case, as +the de facto standard of PC hardware is to emulate these older devices. Some +systems designated as legacy free may support only the HPET as a hardware timer +device. + +The HPET spec is rather loose and vague, requiring at least 3 hardware timers, +but allowing implementation freedom to support many more. It also imposes no +fixed rate on the timer frequency, but does impose some extremal values on +frequency, error and slew. + +In general, the HPET is recommended as a high precision (compared to PIT /RTC) +time source which is independent of local variation (as there is only one HPET +in any given system). The HPET is also memory-mapped, and its presence is +indicated through ACPI tables by the BIOS. + +Detailed specification of the HPET is beyond the current scope of this +document, as it is also very well documented elsewhere. + +2.5) Offboard Timers + +Several cards, both proprietary (watchdog boards) and commonplace (e1000) have +timing chips built into the cards which may have registers which are accessible +to kernel or user drivers. To the author's knowledge, using these to generate +a clocksource for a Linux or other kernel has not yet been attempted and is in +general frowned upon as not playing by the agreed rules of the game. Such a +timer device would require additional support to be virtualized properly and is +not considered important at this time as no known operating system does this. + +========================================================================= + +3) TSC Hardware + +The TSC or time stamp counter is relatively simple in theory; it counts +instruction cycles issued by the processor, which can be used as a measure of +time. In practice, due to a number of problems, it is the most complicated +timekeeping device to use. + +The TSC is represented internally as a 64-bit MSR which can be read with the +RDMSR, RDTSC, or RDTSCP (when available) instructions. In the past, hardware +limitations made it possible to write the TSC, but generally on old hardware it +was only possible to write the low 32-bits of the 64-bit counter, and the upper +32-bits of the counter were cleared. Now, however, on Intel processors family +0Fh, for models 3, 4 and 6, and family 06h, models e and f, this restriction +has been lifted and all 64-bits are writable. On AMD systems, the ability to +write the TSC MSR is not an architectural guarantee. + +The TSC is accessible from CPL-0 and conditionally, for CPL > 0 software by +means of the CR4.TSD bit, which when enabled, disables CPL > 0 TSC access. + +Some vendors have implemented an additional instruction, RDTSCP, which returns +atomically not just the TSC, but an indicator which corresponds to the +processor number. This can be used to index into an array of TSC variables to +determine offset information in SMP systems where TSCs are not synchronized. +The presence of this instruction must be determined by consulting CPUID feature +bits. + +Both VMX and SVM provide extension fields in the virtualization hardware which +allows the guest visible TSC to be offset by a constant. Newer implementations +promise to allow the TSC to additionally be scaled, but this hardware is not +yet widely available. + +3.1) TSC synchronization + +The TSC is a CPU-local clock in most implementations. This means, on SMP +platforms, the TSCs of different CPUs may start at different times depending +on when the CPUs are powered on. Generally, CPUs on the same die will share +the same clock, however, this is not always the case. + +The BIOS may attempt to resynchronize the TSCs during the poweron process and +the operating system or other system software may attempt to do this as well. +Several hardware limitations make the problem worse - if it is not possible to +write the full 64-bits of the TSC, it may be impossible to match the TSC in +newly arriving CPUs to that of the rest of the system, resulting in +unsynchronized TSCs. This may be done by BIOS or system software, but in +practice, getting a perfectly synchronized TSC will not be possible unless all +values are read from the same clock, which generally only is possible on single +socket systems or those with special hardware support. + +3.2) TSC and CPU hotplug + +As touched on already, CPUs which arrive later than the boot time of the system +may not have a TSC value that is synchronized with the rest of the system. +Either system software, BIOS, or SMM code may actually try to establish the TSC +to a value matching the rest of the system, but a perfect match is usually not +a guarantee. This can have the effect of bringing a system from a state where +TSC is synchronized back to a state where TSC synchronization flaws, however +small, may be exposed to the OS and any virtualization environment. + +3.3) TSC and multi-socket / NUMA + +Multi-socket systems, especially large multi-socket systems are likely to have +individual clocksources rather than a single, universally distributed clock. +Since these clocks are driven by different crystals, they will not have +perfectly matched frequency, and temperature and electrical variations will +cause the CPU clocks, and thus the TSCs to drift over time. Depending on the +exact clock and bus design, the drift may or may not be fixed in absolute +error, and may accumulate over time. + +In addition, very large systems may deliberately slew the clocks of individual +cores. This technique, known as spread-spectrum clocking, reduces EMI at the +clock frequency and harmonics of it, which may be required to pass FCC +standards for telecommunications and computer equipment. + +It is recommended not to trust the TSCs to remain synchronized on NUMA or +multiple socket systems for these reasons. + +3.4) TSC and C-states + +C-states, or idling states of the processor, especially C1E and deeper sleep +states may be problematic for TSC as well. The TSC may stop advancing in such +a state, resulting in a TSC which is behind that of other CPUs when execution +is resumed. Such CPUs must be detected and flagged by the operating system +based on CPU and chipset identifications. + +The TSC in such a case may be corrected by catching it up to a known external +clocksource. + +3.5) TSC frequency change / P-states + +To make things slightly more interesting, some CPUs may change frequency. They +may or may not run the TSC at the same rate, and because the frequency change +may be staggered or slewed, at some points in time, the TSC rate may not be +known other than falling within a range of values. In this case, the TSC will +not be a stable time source, and must be calibrated against a known, stable, +external clock to be a usable source of time. + +Whether the TSC runs at a constant rate or scales with the P-state is model +dependent and must be determined by inspecting CPUID, chipset or vendor +specific MSR fields. + +In addition, some vendors have known bugs where the P-state is actually +compensated for properly during normal operation, but when the processor is +inactive, the P-state may be raised temporarily to service cache misses from +other processors. In such cases, the TSC on halted CPUs could advance faster +than that of non-halted processors. AMD Turion processors are known to have +this problem. + +3.6) TSC and STPCLK / T-states + +External signals given to the processor may also have the effect of stopping +the TSC. This is typically done for thermal emergency power control to prevent +an overheating condition, and typically, there is no way to detect that this +condition has happened. + +3.7) TSC virtualization - VMX + +VMX provides conditional trapping of RDTSC, RDMSR, WRMSR and RDTSCP +instructions, which is enough for full virtualization of TSC in any manner. In +addition, VMX allows passing through the host TSC plus an additional TSC_OFFSET +field specified in the VMCS. Special instructions must be used to read and +write the VMCS field. + +3.8) TSC virtualization - SVM + +SVM provides conditional trapping of RDTSC, RDMSR, WRMSR and RDTSCP +instructions, which is enough for full virtualization of TSC in any manner. In +addition, SVM allows passing through the host TSC plus an additional offset +field specified in the SVM control block. + +3.9) TSC feature bits in Linux + +In summary, there is no way to guarantee the TSC remains in perfect +synchronization unless it is explicitly guaranteed by the architecture. Even +if so, the TSCs in multi-sockets or NUMA systems may still run independently +despite being locally consistent. + +The following feature bits are used by Linux to signal various TSC attributes, +but they can only be taken to be meaningful for UP or single node systems. + +X86_FEATURE_TSC : The TSC is available in hardware +X86_FEATURE_RDTSCP : The RDTSCP instruction is available +X86_FEATURE_CONSTANT_TSC : The TSC rate is unchanged with P-states +X86_FEATURE_NONSTOP_TSC : The TSC does not stop in C-states +X86_FEATURE_TSC_RELIABLE : TSC sync checks are skipped (VMware) + +4) Virtualization Problems + +Timekeeping is especially problematic for virtualization because a number of +challenges arise. The most obvious problem is that time is now shared between +the host and, potentially, a number of virtual machines. Thus the virtual +operating system does not run with 100% usage of the CPU, despite the fact that +it may very well make that assumption. It may expect it to remain true to very +exacting bounds when interrupt sources are disabled, but in reality only its +virtual interrupt sources are disabled, and the machine may still be preempted +at any time. This causes problems as the passage of real time, the injection +of machine interrupts and the associated clock sources are no longer completely +synchronized with real time. + +This same problem can occur on native hardware to a degree, as SMM mode may +steal cycles from the naturally on X86 systems when SMM mode is used by the +BIOS, but not in such an extreme fashion. However, the fact that SMM mode may +cause similar problems to virtualization makes it a good justification for +solving many of these problems on bare metal. + +4.1) Interrupt clocking + +One of the most immediate problems that occurs with legacy operating systems +is that the system timekeeping routines are often designed to keep track of +time by counting periodic interrupts. These interrupts may come from the PIT +or the RTC, but the problem is the same: the host virtualization engine may not +be able to deliver the proper number of interrupts per second, and so guest +time may fall behind. This is especially problematic if a high interrupt rate +is selected, such as 1000 HZ, which is unfortunately the default for many Linux +guests. + +There are three approaches to solving this problem; first, it may be possible +to simply ignore it. Guests which have a separate time source for tracking +'wall clock' or 'real time' may not need any adjustment of their interrupts to +maintain proper time. If this is not sufficient, it may be necessary to inject +additional interrupts into the guest in order to increase the effective +interrupt rate. This approach leads to complications in extreme conditions, +where host load or guest lag is too much to compensate for, and thus another +solution to the problem has risen: the guest may need to become aware of lost +ticks and compensate for them internally. Although promising in theory, the +implementation of this policy in Linux has been extremely error prone, and a +number of buggy variants of lost tick compensation are distributed across +commonly used Linux systems. + +Windows uses periodic RTC clocking as a means of keeping time internally, and +thus requires interrupt slewing to keep proper time. It does use a low enough +rate (ed: is it 18.2 Hz?) however that it has not yet been a problem in +practice. + +4.2) TSC sampling and serialization + +As the highest precision time source available, the cycle counter of the CPU +has aroused much interest from developers. As explained above, this timer has +many problems unique to its nature as a local, potentially unstable and +potentially unsynchronized source. One issue which is not unique to the TSC, +but is highlighted because of its very precise nature is sampling delay. By +definition, the counter, once read is already old. However, it is also +possible for the counter to be read ahead of the actual use of the result. +This is a consequence of the superscalar execution of the instruction stream, +which may execute instructions out of order. Such execution is called +non-serialized. Forcing serialized execution is necessary for precise +measurement with the TSC, and requires a serializing instruction, such as CPUID +or an MSR read. + +Since CPUID may actually be virtualized by a trap and emulate mechanism, this +serialization can pose a performance issue for hardware virtualization. An +accurate time stamp counter reading may therefore not always be available, and +it may be necessary for an implementation to guard against "backwards" reads of +the TSC as seen from other CPUs, even in an otherwise perfectly synchronized +system. + +4.3) Timespec aliasing + +Additionally, this lack of serialization from the TSC poses another challenge +when using results of the TSC when measured against another time source. As +the TSC is much higher precision, many possible values of the TSC may be read +while another clock is still expressing the same value. + +That is, you may read (T,T+10) while external clock C maintains the same value. +Due to non-serialized reads, you may actually end up with a range which +fluctuates - from (T-1.. T+10). Thus, any time calculated from a TSC, but +calibrated against an external value may have a range of valid values. +Re-calibrating this computation may actually cause time, as computed after the +calibration, to go backwards, compared with time computed before the +calibration. + +This problem is particularly pronounced with an internal time source in Linux, +the kernel time, which is expressed in the theoretically high resolution +timespec - but which advances in much larger granularity intervals, sometimes +at the rate of jiffies, and possibly in catchup modes, at a much larger step. + +This aliasing requires care in the computation and recalibration of kvmclock +and any other values derived from TSC computation (such as TSC virtualization +itself). + +4.4) Migration + +Migration of a virtual machine raises problems for timekeeping in two ways. +First, the migration itself may take time, during which interrupts cannot be +delivered, and after which, the guest time may need to be caught up. NTP may +be able to help to some degree here, as the clock correction required is +typically small enough to fall in the NTP-correctable window. + +An additional concern is that timers based off the TSC (or HPET, if the raw bus +clock is exposed) may now be running at different rates, requiring compensation +in some way in the hypervisor by virtualizing these timers. In addition, +migrating to a faster machine may preclude the use of a passthrough TSC, as a +faster clock cannot be made visible to a guest without the potential of time +advancing faster than usual. A slower clock is less of a problem, as it can +always be caught up to the original rate. KVM clock avoids these problems by +simply storing multipliers and offsets against the TSC for the guest to convert +back into nanosecond resolution values. + +4.5) Scheduling + +Since scheduling may be based on precise timing and firing of interrupts, the +scheduling algorithms of an operating system may be adversely affected by +virtualization. In theory, the effect is random and should be universally +distributed, but in contrived as well as real scenarios (guest device access, +causes of virtualization exits, possible context switch), this may not always +be the case. The effect of this has not been well studied. + +In an attempt to work around this, several implementations have provided a +paravirtualized scheduler clock, which reveals the true amount of CPU time for +which a virtual machine has been running. + +4.6) Watchdogs + +Watchdog timers, such as the lock detector in Linux may fire accidentally when +running under hardware virtualization due to timer interrupts being delayed or +misinterpretation of the passage of real time. Usually, these warnings are +spurious and can be ignored, but in some circumstances it may be necessary to +disable such detection. + +4.7) Delays and precision timing + +Precise timing and delays may not be possible in a virtualized system. This +can happen if the system is controlling physical hardware, or issues delays to +compensate for slower I/O to and from devices. The first issue is not solvable +in general for a virtualized system; hardware control software can't be +adequately virtualized without a full real-time operating system, which would +require an RT aware virtualization platform. + +The second issue may cause performance problems, but this is unlikely to be a +significant issue. In many cases these delays may be eliminated through +configuration or paravirtualization. + +4.8) Covert channels and leaks + +In addition to the above problems, time information will inevitably leak to the +guest about the host in anything but a perfect implementation of virtualized +time. This may allow the guest to infer the presence of a hypervisor (as in a +red-pill type detection), and it may allow information to leak between guests +by using CPU utilization itself as a signalling channel. Preventing such +problems would require completely isolated virtual time which may not track +real time any longer. This may be useful in certain security or QA contexts, +but in general isn't recommended for real-world deployment scenarios. diff --git a/Documentation/virt/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst new file mode 100644 index 000000000000..5feb3706a7ae --- /dev/null +++ b/Documentation/virt/kvm/vcpu-requests.rst @@ -0,0 +1,307 @@ +================= +KVM VCPU Requests +================= + +Overview +======== + +KVM supports an internal API enabling threads to request a VCPU thread to +perform some activity. For example, a thread may request a VCPU to flush +its TLB with a VCPU request. The API consists of the following functions:: + + /* Check if any requests are pending for VCPU @vcpu. */ + bool kvm_request_pending(struct kvm_vcpu *vcpu); + + /* Check if VCPU @vcpu has request @req pending. */ + bool kvm_test_request(int req, struct kvm_vcpu *vcpu); + + /* Clear request @req for VCPU @vcpu. */ + void kvm_clear_request(int req, struct kvm_vcpu *vcpu); + + /* + * Check if VCPU @vcpu has request @req pending. When the request is + * pending it will be cleared and a memory barrier, which pairs with + * another in kvm_make_request(), will be issued. + */ + bool kvm_check_request(int req, struct kvm_vcpu *vcpu); + + /* + * Make request @req of VCPU @vcpu. Issues a memory barrier, which pairs + * with another in kvm_check_request(), prior to setting the request. + */ + void kvm_make_request(int req, struct kvm_vcpu *vcpu); + + /* Make request @req of all VCPUs of the VM with struct kvm @kvm. */ + bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); + +Typically a requester wants the VCPU to perform the activity as soon +as possible after making the request. This means most requests +(kvm_make_request() calls) are followed by a call to kvm_vcpu_kick(), +and kvm_make_all_cpus_request() has the kicking of all VCPUs built +into it. + +VCPU Kicks +---------- + +The goal of a VCPU kick is to bring a VCPU thread out of guest mode in +order to perform some KVM maintenance. To do so, an IPI is sent, forcing +a guest mode exit. However, a VCPU thread may not be in guest mode at the +time of the kick. Therefore, depending on the mode and state of the VCPU +thread, there are two other actions a kick may take. All three actions +are listed below: + +1) Send an IPI. This forces a guest mode exit. +2) Waking a sleeping VCPU. Sleeping VCPUs are VCPU threads outside guest + mode that wait on waitqueues. Waking them removes the threads from + the waitqueues, allowing the threads to run again. This behavior + may be suppressed, see KVM_REQUEST_NO_WAKEUP below. +3) Nothing. When the VCPU is not in guest mode and the VCPU thread is not + sleeping, then there is nothing to do. + +VCPU Mode +--------- + +VCPUs have a mode state, ``vcpu->mode``, that is used to track whether the +guest is running in guest mode or not, as well as some specific +outside guest mode states. The architecture may use ``vcpu->mode`` to +ensure VCPU requests are seen by VCPUs (see "Ensuring Requests Are Seen"), +as well as to avoid sending unnecessary IPIs (see "IPI Reduction"), and +even to ensure IPI acknowledgements are waited upon (see "Waiting for +Acknowledgements"). The following modes are defined: + +OUTSIDE_GUEST_MODE + + The VCPU thread is outside guest mode. + +IN_GUEST_MODE + + The VCPU thread is in guest mode. + +EXITING_GUEST_MODE + + The VCPU thread is transitioning from IN_GUEST_MODE to + OUTSIDE_GUEST_MODE. + +READING_SHADOW_PAGE_TABLES + + The VCPU thread is outside guest mode, but it wants the sender of + certain VCPU requests, namely KVM_REQ_TLB_FLUSH, to wait until the VCPU + thread is done reading the page tables. + +VCPU Request Internals +====================== + +VCPU requests are simply bit indices of the ``vcpu->requests`` bitmap. +This means general bitops, like those documented in [atomic-ops]_ could +also be used, e.g. :: + + clear_bit(KVM_REQ_UNHALT & KVM_REQUEST_MASK, &vcpu->requests); + +However, VCPU request users should refrain from doing so, as it would +break the abstraction. The first 8 bits are reserved for architecture +independent requests, all additional bits are available for architecture +dependent requests. + +Architecture Independent Requests +--------------------------------- + +KVM_REQ_TLB_FLUSH + + KVM's common MMU notifier may need to flush all of a guest's TLB + entries, calling kvm_flush_remote_tlbs() to do so. Architectures that + choose to use the common kvm_flush_remote_tlbs() implementation will + need to handle this VCPU request. + +KVM_REQ_MMU_RELOAD + + When shadow page tables are used and memory slots are removed it's + necessary to inform each VCPU to completely refresh the tables. This + request is used for that. + +KVM_REQ_PENDING_TIMER + + This request may be made from a timer handler run on the host on behalf + of a VCPU. It informs the VCPU thread to inject a timer interrupt. + +KVM_REQ_UNHALT + + This request may be made from the KVM common function kvm_vcpu_block(), + which is used to emulate an instruction that causes a CPU to halt until + one of an architectural specific set of events and/or interrupts is + received (determined by checking kvm_arch_vcpu_runnable()). When that + event or interrupt arrives kvm_vcpu_block() makes the request. This is + in contrast to when kvm_vcpu_block() returns due to any other reason, + such as a pending signal, which does not indicate the VCPU's halt + emulation should stop, and therefore does not make the request. + +KVM_REQUEST_MASK +---------------- + +VCPU requests should be masked by KVM_REQUEST_MASK before using them with +bitops. This is because only the lower 8 bits are used to represent the +request's number. The upper bits are used as flags. Currently only two +flags are defined. + +VCPU Request Flags +------------------ + +KVM_REQUEST_NO_WAKEUP + + This flag is applied to requests that only need immediate attention + from VCPUs running in guest mode. That is, sleeping VCPUs do not need + to be awaken for these requests. Sleeping VCPUs will handle the + requests when they are awaken later for some other reason. + +KVM_REQUEST_WAIT + + When requests with this flag are made with kvm_make_all_cpus_request(), + then the caller will wait for each VCPU to acknowledge its IPI before + proceeding. This flag only applies to VCPUs that would receive IPIs. + If, for example, the VCPU is sleeping, so no IPI is necessary, then + the requesting thread does not wait. This means that this flag may be + safely combined with KVM_REQUEST_NO_WAKEUP. See "Waiting for + Acknowledgements" for more information about requests with + KVM_REQUEST_WAIT. + +VCPU Requests with Associated State +=================================== + +Requesters that want the receiving VCPU to handle new state need to ensure +the newly written state is observable to the receiving VCPU thread's CPU +by the time it observes the request. This means a write memory barrier +must be inserted after writing the new state and before setting the VCPU +request bit. Additionally, on the receiving VCPU thread's side, a +corresponding read barrier must be inserted after reading the request bit +and before proceeding to read the new state associated with it. See +scenario 3, Message and Flag, of [lwn-mb]_ and the kernel documentation +[memory-barriers]_. + +The pair of functions, kvm_check_request() and kvm_make_request(), provide +the memory barriers, allowing this requirement to be handled internally by +the API. + +Ensuring Requests Are Seen +========================== + +When making requests to VCPUs, we want to avoid the receiving VCPU +executing in guest mode for an arbitrary long time without handling the +request. We can be sure this won't happen as long as we ensure the VCPU +thread checks kvm_request_pending() before entering guest mode and that a +kick will send an IPI to force an exit from guest mode when necessary. +Extra care must be taken to cover the period after the VCPU thread's last +kvm_request_pending() check and before it has entered guest mode, as kick +IPIs will only trigger guest mode exits for VCPU threads that are in guest +mode or at least have already disabled interrupts in order to prepare to +enter guest mode. This means that an optimized implementation (see "IPI +Reduction") must be certain when it's safe to not send the IPI. One +solution, which all architectures except s390 apply, is to: + +- set ``vcpu->mode`` to IN_GUEST_MODE between disabling the interrupts and + the last kvm_request_pending() check; +- enable interrupts atomically when entering the guest. + +This solution also requires memory barriers to be placed carefully in both +the requesting thread and the receiving VCPU. With the memory barriers we +can exclude the possibility of a VCPU thread observing +!kvm_request_pending() on its last check and then not receiving an IPI for +the next request made of it, even if the request is made immediately after +the check. This is done by way of the Dekker memory barrier pattern +(scenario 10 of [lwn-mb]_). As the Dekker pattern requires two variables, +this solution pairs ``vcpu->mode`` with ``vcpu->requests``. Substituting +them into the pattern gives:: + + CPU1 CPU2 + ================= ================= + local_irq_disable(); + WRITE_ONCE(vcpu->mode, IN_GUEST_MODE); kvm_make_request(REQ, vcpu); + smp_mb(); smp_mb(); + if (kvm_request_pending(vcpu)) { if (READ_ONCE(vcpu->mode) == + IN_GUEST_MODE) { + ...abort guest entry... ...send IPI... + } } + +As stated above, the IPI is only useful for VCPU threads in guest mode or +that have already disabled interrupts. This is why this specific case of +the Dekker pattern has been extended to disable interrupts before setting +``vcpu->mode`` to IN_GUEST_MODE. WRITE_ONCE() and READ_ONCE() are used to +pedantically implement the memory barrier pattern, guaranteeing the +compiler doesn't interfere with ``vcpu->mode``'s carefully planned +accesses. + +IPI Reduction +------------- + +As only one IPI is needed to get a VCPU to check for any/all requests, +then they may be coalesced. This is easily done by having the first IPI +sending kick also change the VCPU mode to something !IN_GUEST_MODE. The +transitional state, EXITING_GUEST_MODE, is used for this purpose. + +Waiting for Acknowledgements +---------------------------- + +Some requests, those with the KVM_REQUEST_WAIT flag set, require IPIs to +be sent, and the acknowledgements to be waited upon, even when the target +VCPU threads are in modes other than IN_GUEST_MODE. For example, one case +is when a target VCPU thread is in READING_SHADOW_PAGE_TABLES mode, which +is set after disabling interrupts. To support these cases, the +KVM_REQUEST_WAIT flag changes the condition for sending an IPI from +checking that the VCPU is IN_GUEST_MODE to checking that it is not +OUTSIDE_GUEST_MODE. + +Request-less VCPU Kicks +----------------------- + +As the determination of whether or not to send an IPI depends on the +two-variable Dekker memory barrier pattern, then it's clear that +request-less VCPU kicks are almost never correct. Without the assurance +that a non-IPI generating kick will still result in an action by the +receiving VCPU, as the final kvm_request_pending() check does for +request-accompanying kicks, then the kick may not do anything useful at +all. If, for instance, a request-less kick was made to a VCPU that was +just about to set its mode to IN_GUEST_MODE, meaning no IPI is sent, then +the VCPU thread may continue its entry without actually having done +whatever it was the kick was meant to initiate. + +One exception is x86's posted interrupt mechanism. In this case, however, +even the request-less VCPU kick is coupled with the same +local_irq_disable() + smp_mb() pattern described above; the ON bit +(Outstanding Notification) in the posted interrupt descriptor takes the +role of ``vcpu->requests``. When sending a posted interrupt, PIR.ON is +set before reading ``vcpu->mode``; dually, in the VCPU thread, +vmx_sync_pir_to_irr() reads PIR after setting ``vcpu->mode`` to +IN_GUEST_MODE. + +Additional Considerations +========================= + +Sleeping VCPUs +-------------- + +VCPU threads may need to consider requests before and/or after calling +functions that may put them to sleep, e.g. kvm_vcpu_block(). Whether they +do or not, and, if they do, which requests need consideration, is +architecture dependent. kvm_vcpu_block() calls kvm_arch_vcpu_runnable() +to check if it should awaken. One reason to do so is to provide +architectures a function where requests may be checked if necessary. + +Clearing Requests +----------------- + +Generally it only makes sense for the receiving VCPU thread to clear a +request. However, in some circumstances, such as when the requesting +thread and the receiving VCPU thread are executed serially, such as when +they are the same thread, or when they are using some form of concurrency +control to temporarily execute synchronously, then it's possible to know +that the request may be cleared immediately, rather than waiting for the +receiving VCPU thread to handle the request in VCPU RUN. The only current +examples of this are kvm_vcpu_block() calls made by VCPUs to block +themselves. A possible side-effect of that call is to make the +KVM_REQ_UNHALT request, which may then be cleared immediately when the +VCPU returns from the call. + +References +========== + +.. [atomic-ops] Documentation/core-api/atomic_ops.rst +.. [memory-barriers] Documentation/memory-barriers.txt +.. [lwn-mb] https://lwn.net/Articles/573436/ diff --git a/Documentation/virt/paravirt_ops.rst b/Documentation/virt/paravirt_ops.rst new file mode 100644 index 000000000000..6b789d27cead --- /dev/null +++ b/Documentation/virt/paravirt_ops.rst @@ -0,0 +1,35 @@ +.. SPDX-License-Identifier: GPL-2.0 + +============ +Paravirt_ops +============ + +Linux provides support for different hypervisor virtualization technologies. +Historically different binary kernels would be required in order to support +different hypervisors, this restriction was removed with pv_ops. +Linux pv_ops is a virtualization API which enables support for different +hypervisors. It allows each hypervisor to override critical operations and +allows a single kernel binary to run on all supported execution environments +including native machine -- without any hypervisors. + +pv_ops provides a set of function pointers which represent operations +corresponding to low level critical instructions and high level +functionalities in various areas. pv-ops allows for optimizations at run +time by enabling binary patching of the low-ops critical operations +at boot time. + +pv_ops operations are classified into three categories: + +- simple indirect call + These operations correspond to high level functionality where it is + known that the overhead of indirect call isn't very important. + +- indirect call which allows optimization with binary patch + Usually these operations correspond to low level critical instructions. They + are called frequently and are performance critical. The overhead is + very important. + +- a set of macros for hand written assembly code + Hand written assembly codes (.S files) also need paravirtualization + because they include sensitive instructions or some of code paths in + them are very performance critical. diff --git a/Documentation/virt/uml/UserModeLinux-HOWTO.txt b/Documentation/virt/uml/UserModeLinux-HOWTO.txt new file mode 100644 index 000000000000..87b80f589e1c --- /dev/null +++ b/Documentation/virt/uml/UserModeLinux-HOWTO.txt @@ -0,0 +1,4589 @@ + User Mode Linux HOWTO + User Mode Linux Core Team + Mon Nov 18 14:16:16 EST 2002 + + This document describes the use and abuse of Jeff Dike's User Mode + Linux: a port of the Linux kernel as a normal Intel Linux process. + ______________________________________________________________________ + + Table of Contents + + 1. Introduction + + 1.1 How is User Mode Linux Different? + 1.2 Why Would I Want User Mode Linux? + + 2. Compiling the kernel and modules + + 2.1 Compiling the kernel + 2.2 Compiling and installing kernel modules + 2.3 Compiling and installing uml_utilities + + 3. Running UML and logging in + + 3.1 Running UML + 3.2 Logging in + 3.3 Examples + + 4. UML on 2G/2G hosts + + 4.1 Introduction + 4.2 The problem + 4.3 The solution + + 5. Setting up serial lines and consoles + + 5.1 Specifying the device + 5.2 Specifying the channel + 5.3 Examples + + 6. Setting up the network + + 6.1 General setup + 6.2 Userspace daemons + 6.3 Specifying ethernet addresses + 6.4 UML interface setup + 6.5 Multicast + 6.6 TUN/TAP with the uml_net helper + 6.7 TUN/TAP with a preconfigured tap device + 6.8 Ethertap + 6.9 The switch daemon + 6.10 Slip + 6.11 Slirp + 6.12 pcap + 6.13 Setting up the host yourself + + 7. Sharing Filesystems between Virtual Machines + + 7.1 A warning + 7.2 Using layered block devices + 7.3 Note! + 7.4 Another warning + 7.5 uml_moo : Merging a COW file with its backing file + + 8. Creating filesystems + + 8.1 Create the filesystem file + 8.2 Assign the file to a UML device + 8.3 Creating and mounting the filesystem + + 9. Host file access + + 9.1 Using hostfs + 9.2 hostfs as the root filesystem + 9.3 Building hostfs + + 10. The Management Console + 10.1 version + 10.2 halt and reboot + 10.3 config + 10.4 remove + 10.5 sysrq + 10.6 help + 10.7 cad + 10.8 stop + 10.9 go + + 11. Kernel debugging + + 11.1 Starting the kernel under gdb + 11.2 Examining sleeping processes + 11.3 Running ddd on UML + 11.4 Debugging modules + 11.5 Attaching gdb to the kernel + 11.6 Using alternate debuggers + + 12. Kernel debugging examples + + 12.1 The case of the hung fsck + 12.2 Episode 2: The case of the hung fsck + + 13. What to do when UML doesn't work + + 13.1 Strange compilation errors when you build from source + 13.2 (obsolete) + 13.3 A variety of panics and hangs with /tmp on a reiserfs filesystem + 13.4 The compile fails with errors about conflicting types for 'open', 'dup', and 'waitpid' + 13.5 UML doesn't work when /tmp is an NFS filesystem + 13.6 UML hangs on boot when compiled with gprof support + 13.7 syslogd dies with a SIGTERM on startup + 13.8 TUN/TAP networking doesn't work on a 2.4 host + 13.9 You can network to the host but not to other machines on the net + 13.10 I have no root and I want to scream + 13.11 UML build conflict between ptrace.h and ucontext.h + 13.12 The UML BogoMips is exactly half the host's BogoMips + 13.13 When you run UML, it immediately segfaults + 13.14 xterms appear, then immediately disappear + 13.15 Any other panic, hang, or strange behavior + + 14. Diagnosing Problems + + 14.1 Case 1 : Normal kernel panics + 14.2 Case 2 : Tracing thread panics + 14.3 Case 3 : Tracing thread panics caused by other threads + 14.4 Case 4 : Hangs + + 15. Thanks + + 15.1 Code and Documentation + 15.2 Flushing out bugs + 15.3 Buglets and clean-ups + 15.4 Case Studies + 15.5 Other contributions + + + ______________________________________________________________________ + + 1. Introduction + + Welcome to User Mode Linux. It's going to be fun. + + + + 1.1. How is User Mode Linux Different? + + Normally, the Linux Kernel talks straight to your hardware (video + card, keyboard, hard drives, etc), and any programs which run ask the + kernel to operate the hardware, like so: + + + + +-----------+-----------+----+ + | Process 1 | Process 2 | ...| + +-----------+-----------+----+ + | Linux Kernel | + +----------------------------+ + | Hardware | + +----------------------------+ + + + + + The User Mode Linux Kernel is different; instead of talking to the + hardware, it talks to a `real' Linux kernel (called the `host kernel' + from now on), like any other program. Programs can then run inside + User-Mode Linux as if they were running under a normal kernel, like + so: + + + + +----------------+ + | Process 2 | ...| + +-----------+----------------+ + | Process 1 | User-Mode Linux| + +----------------------------+ + | Linux Kernel | + +----------------------------+ + | Hardware | + +----------------------------+ + + + + + + 1.2. Why Would I Want User Mode Linux? + + + 1. If User Mode Linux crashes, your host kernel is still fine. + + 2. You can run a usermode kernel as a non-root user. + + 3. You can debug the User Mode Linux like any normal process. + + 4. You can run gprof (profiling) and gcov (coverage testing). + + 5. You can play with your kernel without breaking things. + + 6. You can use it as a sandbox for testing new apps. + + 7. You can try new development kernels safely. + + 8. You can run different distributions simultaneously. + + 9. It's extremely fun. + + + + + + 2. Compiling the kernel and modules + + + + + 2.1. Compiling the kernel + + + Compiling the user mode kernel is just like compiling any other + kernel. Let's go through the steps, using 2.4.0-prerelease (current + as of this writing) as an example: + + + 1. Download the latest UML patch from + + the download page + . + + + 3. Make a directory and unpack the kernel into it. + + + + host% + mkdir ~/uml + + + + + + + host% + cd ~/uml + + + + + + + host% + tar -xzvf linux-2.4.0-prerelease.tar.bz2 + + + + + + + 4. Apply the patch using + + + + host% + cd ~/uml/linux + + + + host% + bzcat uml-patch-2.4.0-prerelease.bz2 | patch -p1 + + + + + + + 5. Run your favorite config; `make xconfig ARCH=um' is the most + convenient. `make config ARCH=um' and 'make menuconfig ARCH=um' + will work as well. The defaults will give you a useful kernel. If + you want to change something, go ahead, it probably won't hurt + anything. + + + Note: If the host is configured with a 2G/2G address space split + rather than the usual 3G/1G split, then the packaged UML binaries + will not run. They will immediately segfault. See ``UML on 2G/2G + hosts'' for the scoop on running UML on your system. + + + + 6. Finish with `make linux ARCH=um': the result is a file called + `linux' in the top directory of your source tree. + + Make sure that you don't build this kernel in /usr/src/linux. On some + distributions, /usr/include/asm is a link into this pool. The user- + mode build changes the other end of that link, and things that include + stop compiling. + + The sources are also available from cvs at the project's cvs page, + which has directions on getting the sources. You can also browse the + CVS pool from there. + + If you get the CVS sources, you will have to check them out into an + empty directory. You will then have to copy each file into the + corresponding directory in the appropriate kernel pool. + + If you don't have the latest kernel pool, you can get the + corresponding user-mode sources with + + + host% cvs co -r v_2_3_x linux + + + + + where 'x' is the version in your pool. Note that you will not get the + bug fixes and enhancements that have gone into subsequent releases. + + + 2.2. Compiling and installing kernel modules + + UML modules are built in the same way as the native kernel (with the + exception of the 'ARCH=um' that you always need for UML): + + + host% make modules ARCH=um + + + + + Any modules that you want to load into this kernel need to be built in + the user-mode pool. Modules from the native kernel won't work. + + You can install them by using ftp or something to copy them into the + virtual machine and dropping them into /lib/modules/`uname -r`. + + You can also get the kernel build process to install them as follows: + + 1. with the kernel not booted, mount the root filesystem in the top + level of the kernel pool: + + + host% mount root_fs mnt -o loop + + + + + + + 2. run + + + host% + make modules_install INSTALL_MOD_PATH=`pwd`/mnt ARCH=um + + + + + + + 3. unmount the filesystem + + + host% umount mnt + + + + + + + 4. boot the kernel on it + + + When the system is booted, you can use insmod as usual to get the + modules into the kernel. A number of things have been loaded into UML + as modules, especially filesystems and network protocols and filters, + so most symbols which need to be exported probably already are. + However, if you do find symbols that need exporting, let us + know, and + they'll be "taken care of". + + + + 2.3. Compiling and installing uml_utilities + + Many features of the UML kernel require a user-space helper program, + so a uml_utilities package is distributed separately from the kernel + patch which provides these helpers. Included within this is: + + o port-helper - Used by consoles which connect to xterms or ports + + o tunctl - Configuration tool to create and delete tap devices + + o uml_net - Setuid binary for automatic tap device configuration + + o uml_switch - User-space virtual switch required for daemon + transport + + The uml_utilities tree is compiled with: + + + host# + make && make install + + + + + Note that UML kernel patches may require a specific version of the + uml_utilities distribution. If you don't keep up with the mailing + lists, ensure that you have the latest release of uml_utilities if you + are experiencing problems with your UML kernel, particularly when + dealing with consoles or command-line switches to the helper programs + + + + + + + + + 3. Running UML and logging in + + + + 3.1. Running UML + + It runs on 2.2.15 or later, and all 2.4 kernels. + + + Booting UML is straightforward. Simply run 'linux': it will try to + mount the file `root_fs' in the current directory. You do not need to + run it as root. If your root filesystem is not named `root_fs', then + you need to put a `ubd0=root_fs_whatever' switch on the linux command + line. + + + You will need a filesystem to boot UML from. There are a number + available for download from here . There are also several tools + which can be + used to generate UML-compatible filesystem images from media. + The kernel will boot up and present you with a login prompt. + + + Note: If the host is configured with a 2G/2G address space split + rather than the usual 3G/1G split, then the packaged UML binaries will + not run. They will immediately segfault. See ``UML on 2G/2G hosts'' + for the scoop on running UML on your system. + + + + 3.2. Logging in + + + + The prepackaged filesystems have a root account with password 'root' + and a user account with password 'user'. The login banner will + generally tell you how to log in. So, you log in and you will find + yourself inside a little virtual machine. Our filesystems have a + variety of commands and utilities installed (and it is fairly easy to + add more), so you will have a lot of tools with which to poke around + the system. + + There are a couple of other ways to log in: + + o On a virtual console + + + + Each virtual console that is configured (i.e. the device exists in + /dev and /etc/inittab runs a getty on it) will come up in its own + xterm. If you get tired of the xterms, read ``Setting up serial + lines and consoles'' to see how to attach the consoles to + something else, like host ptys. + + + + o Over the serial line + + + In the boot output, find a line that looks like: + + + + serial line 0 assigned pty /dev/ptyp1 + + + + + Attach your favorite terminal program to the corresponding tty. I.e. + for minicom, the command would be + + + host% minicom -o -p /dev/ttyp1 + + + + + + + o Over the net + + + If the network is running, then you can telnet to the virtual + machine and log in to it. See ``Setting up the network'' to learn + about setting up a virtual network. + + When you're done using it, run halt, and the kernel will bring itself + down and the process will exit. + + + 3.3. Examples + + Here are some examples of UML in action: + + o A login session + + o A virtual network + + + + + + + + 4. UML on 2G/2G hosts + + + + + 4.1. Introduction + + + Most Linux machines are configured so that the kernel occupies the + upper 1G (0xc0000000 - 0xffffffff) of the 4G address space and + processes use the lower 3G (0x00000000 - 0xbfffffff). However, some + machine are configured with a 2G/2G split, with the kernel occupying + the upper 2G (0x80000000 - 0xffffffff) and processes using the lower + 2G (0x00000000 - 0x7fffffff). + + + + + 4.2. The problem + + + The prebuilt UML binaries on this site will not run on 2G/2G hosts + because UML occupies the upper .5G of the 3G process address space + (0xa0000000 - 0xbfffffff). Obviously, on 2G/2G hosts, this is right + in the middle of the kernel address space, so UML won't even load - it + will immediately segfault. + + + + + 4.3. The solution + + + The fix for this is to rebuild UML from source after enabling + CONFIG_HOST_2G_2G (under 'General Setup'). This will cause UML to + load itself in the top .5G of that smaller process address space, + where it will run fine. See ``Compiling the kernel and modules'' if + you need help building UML from source. + + + + + + + + + + + 5. Setting up serial lines and consoles + + + It is possible to attach UML serial lines and consoles to many types + of host I/O channels by specifying them on the command line. + + + You can attach them to host ptys, ttys, file descriptors, and ports. + This allows you to do things like + + o have a UML console appear on an unused host console, + + o hook two virtual machines together by having one attach to a pty + and having the other attach to the corresponding tty + + o make a virtual machine accessible from the net by attaching a + console to a port on the host. + + + The general format of the command line option is device=channel. + + + + 5.1. Specifying the device + + Devices are specified with "con" or "ssl" (console or serial line, + respectively), optionally with a device number if you are talking + about a specific device. + + + Using just "con" or "ssl" describes all of the consoles or serial + lines. If you want to talk about console #3 or serial line #10, they + would be "con3" and "ssl10", respectively. + + + A specific device name will override a less general "con=" or "ssl=". + So, for example, you can assign a pty to each of the serial lines + except for the first two like this: + + + ssl=pty ssl0=tty:/dev/tty0 ssl1=tty:/dev/tty1 + + + + + The specificity of the device name is all that matters; order on the + command line is irrelevant. + + + + 5.2. Specifying the channel + + There are a number of different types of channels to attach a UML + device to, each with a different way of specifying exactly what to + attach to. + + o pseudo-terminals - device=pty pts terminals - device=pts + + + This will cause UML to allocate a free host pseudo-terminal for the + device. The terminal that it got will be announced in the boot + log. You access it by attaching a terminal program to the + corresponding tty: + + o screen /dev/pts/n + + o screen /dev/ttyxx + + o minicom -o -p /dev/ttyxx - minicom seems not able to handle pts + devices + + o kermit - start it up, 'open' the device, then 'connect' + + + + + + o terminals - device=tty:tty device file + + + This will make UML attach the device to the specified tty (i.e + + + con1=tty:/dev/tty3 + + + + + will attach UML's console 1 to the host's /dev/tty3). If the tty that + you specify is the slave end of a tty/pty pair, something else must + have already opened the corresponding pty in order for this to work. + + + + + + o xterms - device=xterm + + + UML will run an xterm and the device will be attached to it. + + + + + + o Port - device=port:port number + + + This will attach the UML devices to the specified host port. + Attaching console 1 to the host's port 9000 would be done like + this: + + + con1=port:9000 + + + + + Attaching all the serial lines to that port would be done similarly: + + + ssl=port:9000 + + + + + You access these devices by telnetting to that port. Each active tel- + net session gets a different device. If there are more telnets to a + port than UML devices attached to it, then the extra telnet sessions + will block until an existing telnet detaches, or until another device + becomes active (i.e. by being activated in /etc/inittab). + + This channel has the advantage that you can both attach multiple UML + devices to it and know how to access them without reading the UML boot + log. It is also unique in allowing access to a UML from remote + machines without requiring that the UML be networked. This could be + useful in allowing public access to UMLs because they would be + accessible from the net, but wouldn't need any kind of network + filtering or access control because they would have no network access. + + + If you attach the main console to a portal, then the UML boot will + appear to hang. In reality, it's waiting for a telnet to connect, at + which point the boot will proceed. + + + + + + o already-existing file descriptors - device=file descriptor + + + If you set up a file descriptor on the UML command line, you can + attach a UML device to it. This is most commonly used to put the + main console back on stdin and stdout after assigning all the other + consoles to something else: + + + con0=fd:0,fd:1 con=pts + + + + + + + + + o Nothing - device=null + + + This allows the device to be opened, in contrast to 'none', but + reads will block, and writes will succeed and the data will be + thrown out. + + + + + + o None - device=none + + + This causes the device to disappear. + + + + You can also specify different input and output channels for a device + by putting a comma between them: + + + ssl3=tty:/dev/tty2,xterm + + + + + will cause serial line 3 to accept input on the host's /dev/tty2 and + display output on an xterm. That's a silly example - the most common + use of this syntax is to reattach the main console to stdin and stdout + as shown above. + + + If you decide to move the main console away from stdin/stdout, the + initial boot output will appear in the terminal that you're running + UML in. However, once the console driver has been officially + initialized, then the boot output will start appearing wherever you + specified that console 0 should be. That device will receive all + subsequent output. + + + + 5.3. Examples + + There are a number of interesting things you can do with this + capability. + + + First, this is how you get rid of those bleeding console xterms by + attaching them to host ptys: + + + con=pty con0=fd:0,fd:1 + + + + + This will make a UML console take over an unused host virtual console, + so that when you switch to it, you will see the UML login prompt + rather than the host login prompt: + + + con1=tty:/dev/tty6 + + + + + You can attach two virtual machines together with what amounts to a + serial line as follows: + + Run one UML with a serial line attached to a pty - + + + ssl1=pty + + + + + Look at the boot log to see what pty it got (this example will assume + that it got /dev/ptyp1). + + Boot the other UML with a serial line attached to the corresponding + tty - + + + ssl1=tty:/dev/ttyp1 + + + + + Log in, make sure that it has no getty on that serial line, attach a + terminal program like minicom to it, and you should see the login + prompt of the other virtual machine. + + + 6. Setting up the network + + + + This page describes how to set up the various transports and to + provide a UML instance with network access to the host, other machines + on the local net, and the rest of the net. + + + As of 2.4.5, UML networking has been completely redone to make it much + easier to set up, fix bugs, and add new features. + + + There is a new helper, uml_net, which does the host setup that + requires root privileges. + + + There are currently five transport types available for a UML virtual + machine to exchange packets with other hosts: + + o ethertap + + o TUN/TAP + + o Multicast + + o a switch daemon + + o slip + + o slirp + + o pcap + + The TUN/TAP, ethertap, slip, and slirp transports allow a UML + instance to exchange packets with the host. They may be directed + to the host or the host may just act as a router to provide access + to other physical or virtual machines. + + + The pcap transport is a synthetic read-only interface, using the + libpcap binary to collect packets from interfaces on the host and + filter them. This is useful for building preconfigured traffic + monitors or sniffers. + + + The daemon and multicast transports provide a completely virtual + network to other virtual machines. This network is completely + disconnected from the physical network unless one of the virtual + machines on it is acting as a gateway. + + + With so many host transports, which one should you use? Here's when + you should use each one: + + o ethertap - if you want access to the host networking and it is + running 2.2 + + o TUN/TAP - if you want access to the host networking and it is + running 2.4. Also, the TUN/TAP transport is able to use a + preconfigured device, allowing it to avoid using the setuid uml_net + helper, which is a security advantage. + + o Multicast - if you want a purely virtual network and you don't want + to set up anything but the UML + + o a switch daemon - if you want a purely virtual network and you + don't mind running the daemon in order to get somewhat better + performance + + o slip - there is no particular reason to run the slip backend unless + ethertap and TUN/TAP are just not available for some reason + + o slirp - if you don't have root access on the host to setup + networking, or if you don't want to allocate an IP to your UML + + o pcap - not much use for actual network connectivity, but great for + monitoring traffic on the host + + Ethertap is available on 2.4 and works fine. TUN/TAP is preferred + to it because it has better performance and ethertap is officially + considered obsolete in 2.4. Also, the root helper only needs to + run occasionally for TUN/TAP, rather than handling every packet, as + it does with ethertap. This is a slight security advantage since + it provides fewer opportunities for a nasty UML user to somehow + exploit the helper's root privileges. + + + 6.1. General setup + + First, you must have the virtual network enabled in your UML. If are + running a prebuilt kernel from this site, everything is already + enabled. If you build the kernel yourself, under the "Network device + support" menu, enable "Network device support", and then the three + transports. + + + The next step is to provide a network device to the virtual machine. + This is done by describing it on the kernel command line. + + The general format is + + + eth = , + + + + + For example, a virtual ethernet device may be attached to a host + ethertap device as follows: + + + eth0=ethertap,tap0,fe:fd:0:0:0:1,192.168.0.254 + + + + + This sets up eth0 inside the virtual machine to attach itself to the + host /dev/tap0, assigns it an ethernet address, and assigns the host + tap0 interface an IP address. + + + + Note that the IP address you assign to the host end of the tap device + must be different than the IP you assign to the eth device inside UML. + If you are short on IPs and don't want to consume two per UML, then + you can reuse the host's eth IP address for the host ends of the tap + devices. Internally, the UMLs must still get unique IPs for their eth + devices. You can also give the UMLs non-routable IPs (192.168.x.x or + 10.x.x.x) and have the host masquerade them. This will let outgoing + connections work, but incoming connections won't without more work, + such as port forwarding from the host. + Also note that when you configure the host side of an interface, it is + only acting as a gateway. It will respond to pings sent to it + locally, but is not useful to do that since it's a host interface. + You are not talking to the UML when you ping that interface and get a + response. + + + You can also add devices to a UML and remove them at runtime. See the + ``The Management Console'' page for details. + + + The sections below describe this in more detail. + + + Once you've decided how you're going to set up the devices, you boot + UML, log in, configure the UML side of the devices, and set up routes + to the outside world. At that point, you will be able to talk to any + other machines, physical or virtual, on the net. + + + If ifconfig inside UML fails and the network refuses to come up, run + tell you what went wrong. + + + + 6.2. Userspace daemons + + You will likely need the setuid helper, or the switch daemon, or both. + They are both installed with the RPM and deb, so if you've installed + either, you can skip the rest of this section. + + + If not, then you need to check them out of CVS, build them, and + install them. The helper is uml_net, in CVS /tools/uml_net, and the + daemon is uml_switch, in CVS /tools/uml_router. They are both built + with a plain 'make'. Both need to be installed in a directory that's + in your path - /usr/bin is recommend. On top of that, uml_net needs + to be setuid root. + + + + 6.3. Specifying ethernet addresses + + Below, you will see that the TUN/TAP, ethertap, and daemon interfaces + allow you to specify hardware addresses for the virtual ethernet + devices. This is generally not necessary. If you don't have a + specific reason to do it, you probably shouldn't. If one is not + specified on the command line, the driver will assign one based on the + device IP address. It will provide the address fe:fd:nn:nn:nn:nn + where nn.nn.nn.nn is the device IP address. This is nearly always + sufficient to guarantee a unique hardware address for the device. A + couple of exceptions are: + + o Another set of virtual ethernet devices are on the same network and + they are assigned hardware addresses using a different scheme which + may conflict with the UML IP address-based scheme + + o You aren't going to use the device for IP networking, so you don't + assign the device an IP address + + If you let the driver provide the hardware address, you should make + sure that the device IP address is known before the interface is + brought up. So, inside UML, this will guarantee that: + + + + UML# + ifconfig eth0 192.168.0.250 up + + + + + If you decide to assign the hardware address yourself, make sure that + the first byte of the address is even. Addresses with an odd first + byte are broadcast addresses, which you don't want assigned to a + device. + + + + 6.4. UML interface setup + + Once the network devices have been described on the command line, you + should boot UML and log in. + + + The first thing to do is bring the interface up: + + + UML# ifconfig ethn ip-address up + + + + + You should be able to ping the host at this point. + + + To reach the rest of the world, you should set a default route to the + host: + + + UML# route add default gw host ip + + + + + Again, with host ip of 192.168.0.4: + + + UML# route add default gw 192.168.0.4 + + + + + This page used to recommend setting a network route to your local net. + This is wrong, because it will cause UML to try to figure out hardware + addresses of the local machines by arping on the interface to the + host. Since that interface is basically a single strand of ethernet + with two nodes on it (UML and the host) and arp requests don't cross + networks, they will fail to elicit any responses. So, what you want + is for UML to just blindly throw all packets at the host and let it + figure out what to do with them, which is what leaving out the network + route and adding the default route does. + + + Note: If you can't communicate with other hosts on your physical + ethernet, it's probably because of a network route that's + automatically set up. If you run 'route -n' and see a route that + looks like this: + + + + + Destination Gateway Genmask Flags Metric Ref Use Iface + 192.168.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0 + + + + + with a mask that's not 255.255.255.255, then replace it with a route + to your host: + + + UML# + route del -net 192.168.0.0 dev eth0 netmask 255.255.255.0 + + + + + + + UML# + route add -host 192.168.0.4 dev eth0 + + + + + This, plus the default route to the host, will allow UML to exchange + packets with any machine on your ethernet. + + + + 6.5. Multicast + + The simplest way to set up a virtual network between multiple UMLs is + to use the mcast transport. This was written by Harald Welte and is + present in UML version 2.4.5-5um and later. Your system must have + multicast enabled in the kernel and there must be a multicast-capable + network device on the host. Normally, this is eth0, but if there is + no ethernet card on the host, then you will likely get strange error + messages when you bring the device up inside UML. + + + To use it, run two UMLs with + + + eth0=mcast + + + + + on their command lines. Log in, configure the ethernet device in each + machine with different IP addresses: + + + UML1# ifconfig eth0 192.168.0.254 + + + + + + + UML2# ifconfig eth0 192.168.0.253 + + + + + and they should be able to talk to each other. + + The full set of command line options for this transport are + + + + ethn=mcast,ethernet address,multicast + address,multicast port,ttl + + + + + Harald's original README is here and explains these in detail, as well as + some other issues. + + There is also a related point-to-point only "ucast" transport. + This is useful when your network does not support multicast, and + all network connections are simple point to point links. + + The full set of command line options for this transport are + + + ethn=ucast,ethernet address,remote address,listen port,remote port + + + + + 6.6. TUN/TAP with the uml_net helper + + TUN/TAP is the preferred mechanism on 2.4 to exchange packets with the + host. The TUN/TAP backend has been in UML since 2.4.9-3um. + + + The easiest way to get up and running is to let the setuid uml_net + helper do the host setup for you. This involves insmod-ing the tun.o + module if necessary, configuring the device, and setting up IP + forwarding, routing, and proxy arp. If you are new to UML networking, + do this first. If you're concerned about the security implications of + the setuid helper, use it to get up and running, then read the next + section to see how to have UML use a preconfigured tap device, which + avoids the use of uml_net. + + + If you specify an IP address for the host side of the device, the + uml_net helper will do all necessary setup on the host - the only + requirement is that TUN/TAP be available, either built in to the host + kernel or as the tun.o module. + + The format of the command line switch to attach a device to a TUN/TAP + device is + + + eth =tuntap,,, + + + + + For example, this argument will attach the UML's eth0 to the next + available tap device and assign an ethernet address to it based on its + IP address + + + eth0=tuntap,,,192.168.0.254 + + + + + + + Note that the IP address that must be used for the eth device inside + UML is fixed by the routing and proxy arp that is set up on the + TUN/TAP device on the host. You can use a different one, but it won't + work because reply packets won't reach the UML. This is a feature. + It prevents a nasty UML user from doing things like setting the UML IP + to the same as the network's nameserver or mail server. + + + There are a couple potential problems with running the TUN/TAP + transport on a 2.4 host kernel + + o TUN/TAP seems not to work on 2.4.3 and earlier. Upgrade the host + kernel or use the ethertap transport. + + o With an upgraded kernel, TUN/TAP may fail with + + + File descriptor in bad state + + + + + This is due to a header mismatch between the upgraded kernel and the + kernel that was originally installed on the machine. The fix is to + make sure that /usr/src/linux points to the headers for the running + kernel. + + These were pointed out by Tim Robinson in + name="this uml- + user post"> . + + + + 6.7. TUN/TAP with a preconfigured tap device + + If you prefer not to have UML use uml_net (which is somewhat + insecure), with UML 2.4.17-11, you can set up a TUN/TAP device + beforehand. The setup needs to be done as root, but once that's done, + there is no need for root assistance. Setting up the device is done + as follows: + + o Create the device with tunctl (available from the UML utilities + tarball) + + + + + host# tunctl -u uid + + + + + where uid is the user id or username that UML will be run as. This + will tell you what device was created. + + o Configure the device IP (change IP addresses and device name to + suit) + + + + + host# ifconfig tap0 192.168.0.254 up + + + + + + o Set up routing and arping if desired - this is my recipe, there are + other ways of doing the same thing + + + host# + bash -c 'echo 1 > /proc/sys/net/ipv4/ip_forward' + + host# + route add -host 192.168.0.253 dev tap0 + + + + + + + host# + bash -c 'echo 1 > /proc/sys/net/ipv4/conf/tap0/proxy_arp' + + + + + + + host# + arp -Ds 192.168.0.253 eth0 pub + + + + + Note that this must be done every time the host boots - this configu- + ration is not stored across host reboots. So, it's probably a good + idea to stick it in an rc file. An even better idea would be a little + utility which reads the information from a config file and sets up + devices at boot time. + + o Rather than using up two IPs and ARPing for one of them, you can + also provide direct access to your LAN by the UML by using a + bridge. + + + host# + brctl addbr br0 + + + + + + + host# + ifconfig eth0 0.0.0.0 promisc up + + + + + + + host# + ifconfig tap0 0.0.0.0 promisc up + + + + + + + host# + ifconfig br0 192.168.0.1 netmask 255.255.255.0 up + + + + + + + + host# + brctl stp br0 off + + + + + + + host# + brctl setfd br0 1 + + + + + + + host# + brctl sethello br0 1 + + + + + + + host# + brctl addif br0 eth0 + + + + + + + host# + brctl addif br0 tap0 + + + + + Note that 'br0' should be setup using ifconfig with the existing IP + address of eth0, as eth0 no longer has its own IP. + + o + + + Also, the /dev/net/tun device must be writable by the user running + UML in order for the UML to use the device that's been configured + for it. The simplest thing to do is + + + host# chmod 666 /dev/net/tun + + + + + Making it world-writable looks bad, but it seems not to be + exploitable as a security hole. However, it does allow anyone to cre- + ate useless tap devices (useless because they can't configure them), + which is a DOS attack. A somewhat more secure alternative would to be + to create a group containing all the users who have preconfigured tap + devices and chgrp /dev/net/tun to that group with mode 664 or 660. + + + o Once the device is set up, run UML with 'eth0=tuntap,device name' + (i.e. 'eth0=tuntap,tap0') on the command line (or do it with the + mconsole config command). + + o Bring the eth device up in UML and you're in business. + + If you don't want that tap device any more, you can make it non- + persistent with + + + host# tunctl -d tap device + + + + + Finally, tunctl has a -b (for brief mode) switch which causes it to + output only the name of the tap device it created. This makes it + suitable for capture by a script: + + + host# TAP=`tunctl -u 1000 -b` + + + + + + + 6.8. Ethertap + + Ethertap is the general mechanism on 2.2 for userspace processes to + exchange packets with the kernel. + + + + To use this transport, you need to describe the virtual network device + on the UML command line. The general format for this is + + + eth =ethertap, , , + + + + + So, the previous example + + + eth0=ethertap,tap0,fe:fd:0:0:0:1,192.168.0.254 + + + + + attaches the UML eth0 device to the host /dev/tap0, assigns it the + ethernet address fe:fd:0:0:0:1, and assigns the IP address + 192.168.0.254 to the tap device. + + + + The tap device is mandatory, but the others are optional. If the + ethernet address is omitted, one will be assigned to it. + + + The presence of the tap IP address will cause the helper to run and do + whatever host setup is needed to allow the virtual machine to + communicate with the outside world. If you're not sure you know what + you're doing, this is the way to go. + + + If it is absent, then you must configure the tap device and whatever + arping and routing you will need on the host. However, even in this + case, the uml_net helper still needs to be in your path and it must be + setuid root if you're not running UML as root. This is because the + tap device doesn't support SIGIO, which UML needs in order to use + something as a source of input. So, the helper is used as a + convenient asynchronous IO thread. + + If you're using the uml_net helper, you can ignore the following host + setup - uml_net will do it for you. You just need to make sure you + have ethertap available, either built in to the host kernel or + available as a module. + + + If you want to set things up yourself, you need to make sure that the + appropriate /dev entry exists. If it doesn't, become root and create + it as follows: + + + mknod /dev/tap c 36 + 16 + + + + + For example, this is how to create /dev/tap0: + + + mknod /dev/tap0 c 36 0 + 16 + + + + + You also need to make sure that the host kernel has ethertap support. + If ethertap is enabled as a module, you apparently need to insmod + ethertap once for each ethertap device you want to enable. So, + + + host# + insmod ethertap + + + + + will give you the tap0 interface. To get the tap1 interface, you need + to run + + + host# + insmod ethertap unit=1 -o ethertap1 + + + + + + + + 6.9. The switch daemon + + Note: This is the daemon formerly known as uml_router, but which was + renamed so the network weenies of the world would stop growling at me. + + + The switch daemon, uml_switch, provides a mechanism for creating a + totally virtual network. By default, it provides no connection to the + host network (but see -tap, below). + + + The first thing you need to do is run the daemon. Running it with no + arguments will make it listen on a default pair of unix domain + sockets. + + + If you want it to listen on a different pair of sockets, use + + + -unix control socket data socket + + + + + + If you want it to act as a hub rather than a switch, use + + + -hub + + + + + + If you want the switch to be connected to host networking (allowing + the umls to get access to the outside world through the host), use + + + -tap tap0 + + + + + + Note that the tap device must be preconfigured (see "TUN/TAP with a + preconfigured tap device", above). If you're using a different tap + device than tap0, specify that instead of tap0. + + + uml_switch can be backgrounded as follows + + + host% + uml_switch [ options ] < /dev/null > /dev/null + + + + + The reason it doesn't background by default is that it listens to + stdin for EOF. When it sees that, it exits. + + + The general format of the kernel command line switch is + + + + ethn=daemon,ethernet address,socket + type,control socket,data socket + + + + + You can leave off everything except the 'daemon'. You only need to + specify the ethernet address if the one that will be assigned to it + isn't acceptable for some reason. The rest of the arguments describe + how to communicate with the daemon. You should only specify them if + you told the daemon to use different sockets than the default. So, if + you ran the daemon with no arguments, running the UML on the same + machine with + eth0=daemon + + + + + will cause the eth0 driver to attach itself to the daemon correctly. + + + + 6.10. Slip + + Slip is another, less general, mechanism for a process to communicate + with the host networking. In contrast to the ethertap interface, + which exchanges ethernet frames with the host and can be used to + transport any higher-level protocol, it can only be used to transport + IP. + + + The general format of the command line switch is + + + + ethn=slip,slip IP + + + + + The slip IP argument is the IP address that will be assigned to the + host end of the slip device. If it is specified, the helper will run + and will set up the host so that the virtual machine can reach it and + the rest of the network. + + + There are some oddities with this interface that you should be aware + of. You should only specify one slip device on a given virtual + machine, and its name inside UML will be 'umn', not 'eth0' or whatever + you specified on the command line. These problems will be fixed at + some point. + + + + 6.11. Slirp + + slirp uses an external program, usually /usr/bin/slirp, to provide IP + only networking connectivity through the host. This is similar to IP + masquerading with a firewall, although the translation is performed in + user-space, rather than by the kernel. As slirp does not set up any + interfaces on the host, or changes routing, slirp does not require + root access or setuid binaries on the host. + + + The general format of the command line switch for slirp is: + + + + ethn=slirp,ethernet address,slirp path + + + + + The ethernet address is optional, as UML will set up the interface + with an ethernet address based upon the initial IP address of the + interface. The slirp path is generally /usr/bin/slirp, although it + will depend on distribution. + + + The slirp program can have a number of options passed to the command + line and we can't add them to the UML command line, as they will be + parsed incorrectly. Instead, a wrapper shell script can be written or + the options inserted into the /.slirprc file. More information on + all of the slirp options can be found in its man pages. + + + The eth0 interface on UML should be set up with the IP 10.2.0.15, + although you can use anything as long as it is not used by a network + you will be connecting to. The default route on UML should be set to + use + + + UML# + route add default dev eth0 + + + + + slirp provides a number of useful IP addresses which can be used by + UML, such as 10.0.2.3 which is an alias for the DNS server specified + in /etc/resolv.conf on the host or the IP given in the 'dns' option + for slirp. + + + Even with a baudrate setting higher than 115200, the slirp connection + is limited to 115200. If you need it to go faster, the slirp binary + needs to be compiled with FULL_BOLT defined in config.h. + + + + 6.12. pcap + + The pcap transport is attached to a UML ethernet device on the command + line or with uml_mconsole with the following syntax: + + + + ethn=pcap,host interface,filter + expression,option1,option2 + + + + + The expression and options are optional. + + + The interface is whatever network device on the host you want to + sniff. The expression is a pcap filter expression, which is also what + tcpdump uses, so if you know how to specify tcpdump filters, you will + use the same expressions here. The options are up to two of + 'promisc', control whether pcap puts the host interface into + promiscuous mode. 'optimize' and 'nooptimize' control whether the pcap + expression optimizer is used. + + + Example: + + + + eth0=pcap,eth0,tcp + + eth1=pcap,eth0,!tcp + + + + will cause the UML eth0 to emit all tcp packets on the host eth0 and + the UML eth1 to emit all non-tcp packets on the host eth0. + + + + 6.13. Setting up the host yourself + + If you don't specify an address for the host side of the ethertap or + slip device, UML won't do any setup on the host. So this is what is + needed to get things working (the examples use a host-side IP of + 192.168.0.251 and a UML-side IP of 192.168.0.250 - adjust to suit your + own network): + + o The device needs to be configured with its IP address. Tap devices + are also configured with an mtu of 1484. Slip devices are + configured with a point-to-point address pointing at the UML ip + address. + + + host# ifconfig tap0 arp mtu 1484 192.168.0.251 up + + + + + + + host# + ifconfig sl0 192.168.0.251 pointopoint 192.168.0.250 up + + + + + + o If a tap device is being set up, a route is set to the UML IP. + + + UML# route add -host 192.168.0.250 gw 192.168.0.251 + + + + + + o To allow other hosts on your network to see the virtual machine, + proxy arp is set up for it. + + + host# arp -Ds 192.168.0.250 eth0 pub + + + + + + o Finally, the host is set up to route packets. + + + host# echo 1 > /proc/sys/net/ipv4/ip_forward + + + + + + + + + + + 7. Sharing Filesystems between Virtual Machines + + + + + 7.1. A warning + + Don't attempt to share filesystems simply by booting two UMLs from the + same file. That's the same thing as booting two physical machines + from a shared disk. It will result in filesystem corruption. + + + + 7.2. Using layered block devices + + The way to share a filesystem between two virtual machines is to use + the copy-on-write (COW) layering capability of the ubd block driver. + As of 2.4.6-2um, the driver supports layering a read-write private + device over a read-only shared device. A machine's writes are stored + in the private device, while reads come from either device - the + private one if the requested block is valid in it, the shared one if + not. Using this scheme, the majority of data which is unchanged is + shared between an arbitrary number of virtual machines, each of which + has a much smaller file containing the changes that it has made. With + a large number of UMLs booting from a large root filesystem, this + leads to a huge disk space saving. It will also help performance, + since the host will be able to cache the shared data using a much + smaller amount of memory, so UML disk requests will be served from the + host's memory rather than its disks. + + + + + To add a copy-on-write layer to an existing block device file, simply + add the name of the COW file to the appropriate ubd switch: + + + ubd0=root_fs_cow,root_fs_debian_22 + + + + + where 'root_fs_cow' is the private COW file and 'root_fs_debian_22' is + the existing shared filesystem. The COW file need not exist. If it + doesn't, the driver will create and initialize it. Once the COW file + has been initialized, it can be used on its own on the command line: + + + ubd0=root_fs_cow + + + + + The name of the backing file is stored in the COW file header, so it + would be redundant to continue specifying it on the command line. + + + + 7.3. Note! + + When checking the size of the COW file in order to see the gobs of + space that you're saving, make sure you use 'ls -ls' to see the actual + disk consumption rather than the length of the file. The COW file is + sparse, so the length will be very different from the disk usage. + Here is a 'ls -l' of a COW file and backing file from one boot and + shutdown: + host% ls -l cow.debian debian2.2 + -rw-r--r-- 1 jdike jdike 492504064 Aug 6 21:16 cow.debian + -rwxrw-rw- 1 jdike jdike 537919488 Aug 6 20:42 debian2.2 + + + + + Doesn't look like much saved space, does it? Well, here's 'ls -ls': + + + host% ls -ls cow.debian debian2.2 + 880 -rw-r--r-- 1 jdike jdike 492504064 Aug 6 21:16 cow.debian + 525832 -rwxrw-rw- 1 jdike jdike 537919488 Aug 6 20:42 debian2.2 + + + + + Now, you can see that the COW file has less than a meg of disk, rather + than 492 meg. + + + + 7.4. Another warning + + Once a filesystem is being used as a readonly backing file for a COW + file, do not boot directly from it or modify it in any way. Doing so + will invalidate any COW files that are using it. The mtime and size + of the backing file are stored in the COW file header at its creation, + and they must continue to match. If they don't, the driver will + refuse to use the COW file. + + + + + If you attempt to evade this restriction by changing either the + backing file or the COW header by hand, you will get a corrupted + filesystem. + + + + + Among other things, this means that upgrading the distribution in a + backing file and expecting that all of the COW files using it will see + the upgrade will not work. + + + + + 7.5. uml_moo : Merging a COW file with its backing file + + Depending on how you use UML and COW devices, it may be advisable to + merge the changes in the COW file into the backing file every once in + a while. + + + + + The utility that does this is uml_moo. Its usage is + + + host% uml_moo COW file new backing file + + + + + There's no need to specify the backing file since that information is + already in the COW file header. If you're paranoid, boot the new + merged file, and if you're happy with it, move it over the old backing + file. + + + + + uml_moo creates a new backing file by default as a safety measure. It + also has a destructive merge option which will merge the COW file + directly into its current backing file. This is really only usable + when the backing file only has one COW file associated with it. If + there are multiple COWs associated with a backing file, a -d merge of + one of them will invalidate all of the others. However, it is + convenient if you're short of disk space, and it should also be + noticeably faster than a non-destructive merge. + + + + + uml_moo is installed with the UML deb and RPM. If you didn't install + UML from one of those packages, you can also get it from the UML + utilities tar file in tools/moo. + + + + + + + + + 8. Creating filesystems + + + You may want to create and mount new UML filesystems, either because + your root filesystem isn't large enough or because you want to use a + filesystem other than ext2. + + + This was written on the occasion of reiserfs being included in the + 2.4.1 kernel pool, and therefore the 2.4.1 UML, so the examples will + talk about reiserfs. This information is generic, and the examples + should be easy to translate to the filesystem of your choice. + + + 8.1. Create the filesystem file + + dd is your friend. All you need to do is tell dd to create an empty + file of the appropriate size. I usually make it sparse to save time + and to avoid allocating disk space until it's actually used. For + example, the following command will create a sparse 100 meg file full + of zeroes. + + + host% + dd if=/dev/zero of=new_filesystem seek=100 count=1 bs=1M + + + + + + + 8.2. Assign the file to a UML device + + Add an argument like the following to the UML command line: + + ubd4=new_filesystem + + + + + making sure that you use an unassigned ubd device number. + + + + 8.3. Creating and mounting the filesystem + + Make sure that the filesystem is available, either by being built into + the kernel, or available as a module, then boot up UML and log in. If + the root filesystem doesn't have the filesystem utilities (mkfs, fsck, + etc), then get them into UML by way of the net or hostfs. + + + Make the new filesystem on the device assigned to the new file: + + + host# mkreiserfs /dev/ubd/4 + + + <----------- MKREISERFSv2 -----------> + + ReiserFS version 3.6.25 + Block size 4096 bytes + Block count 25856 + Used blocks 8212 + Journal - 8192 blocks (18-8209), journal header is in block 8210 + Bitmaps: 17 + Root block 8211 + Hash function "r5" + ATTENTION: ALL DATA WILL BE LOST ON '/dev/ubd/4'! (y/n)y + journal size 8192 (from 18) + Initializing journal - 0%....20%....40%....60%....80%....100% + Syncing..done. + + + + + Now, mount it: + + + UML# + mount /dev/ubd/4 /mnt + + + + + and you're in business. + + + + + + + + + + 9. Host file access + + + If you want to access files on the host machine from inside UML, you + can treat it as a separate machine and either nfs mount directories + from the host or copy files into the virtual machine with scp or rcp. + However, since UML is running on the host, it can access those + files just like any other process and make them available inside the + virtual machine without needing to use the network. + + + This is now possible with the hostfs virtual filesystem. With it, you + can mount a host directory into the UML filesystem and access the + files contained in it just as you would on the host. + + + 9.1. Using hostfs + + To begin with, make sure that hostfs is available inside the virtual + machine with + + + UML# cat /proc/filesystems + + + + . hostfs should be listed. If it's not, either rebuild the kernel + with hostfs configured into it or make sure that hostfs is built as a + module and available inside the virtual machine, and insmod it. + + + Now all you need to do is run mount: + + + UML# mount none /mnt/host -t hostfs + + + + + will mount the host's / on the virtual machine's /mnt/host. + + + If you don't want to mount the host root directory, then you can + specify a subdirectory to mount with the -o switch to mount: + + + UML# mount none /mnt/home -t hostfs -o /home + + + + + will mount the hosts's /home on the virtual machine's /mnt/home. + + + + 9.2. hostfs as the root filesystem + + It's possible to boot from a directory hierarchy on the host using + hostfs rather than using the standard filesystem in a file. + + To start, you need that hierarchy. The easiest way is to loop mount + an existing root_fs file: + + + host# mount root_fs uml_root_dir -o loop + + + + + You need to change the filesystem type of / in etc/fstab to be + 'hostfs', so that line looks like this: + + /dev/ubd/0 / hostfs defaults 1 1 + + + + + Then you need to chown to yourself all the files in that directory + that are owned by root. This worked for me: + + + host# find . -uid 0 -exec chown jdike {} \; + + + + + Next, make sure that your UML kernel has hostfs compiled in, not as a + module. Then run UML with the boot device pointing at that directory: + + + ubd0=/path/to/uml/root/directory + + + + + UML should then boot as it does normally. + + + 9.3. Building hostfs + + If you need to build hostfs because it's not in your kernel, you have + two choices: + + + + o Compiling hostfs into the kernel: + + + Reconfigure the kernel and set the 'Host filesystem' option under + + + o Compiling hostfs as a module: + + + Reconfigure the kernel and set the 'Host filesystem' option under + be in arch/um/fs/hostfs/hostfs.o. Install that in + /lib/modules/`uname -r`/fs in the virtual machine, boot it up, and + + + UML# insmod hostfs + + + + + + + + + + + + + 10. The Management Console + + + + The UML management console is a low-level interface to the kernel, + somewhat like the i386 SysRq interface. Since there is a full-blown + operating system under UML, there is much greater flexibility possible + than with the SysRq mechanism. + + + There are a number of things you can do with the mconsole interface: + + o get the kernel version + + o add and remove devices + + o halt or reboot the machine + + o Send SysRq commands + + o Pause and resume the UML + + + You need the mconsole client (uml_mconsole) which is present in CVS + (/tools/mconsole) in 2.4.5-9um and later, and will be in the RPM in + 2.4.6. + + + You also need CONFIG_MCONSOLE (under 'General Setup') enabled in UML. + When you boot UML, you'll see a line like: + + + mconsole initialized on /home/jdike/.uml/umlNJ32yL/mconsole + + + + + If you specify a unique machine id one the UML command line, i.e. + + + umid=debian + + + + + you'll see this + + + mconsole initialized on /home/jdike/.uml/debian/mconsole + + + + + That file is the socket that uml_mconsole will use to communicate with + UML. Run it with either the umid or the full path as its argument: + + + host% uml_mconsole debian + + + + + or + + + host% uml_mconsole /home/jdike/.uml/debian/mconsole + + + + + You'll get a prompt, at which you can run one of these commands: + + o version + + o halt + + o reboot + + o config + + o remove + + o sysrq + + o help + + o cad + + o stop + + o go + + + 10.1. version + + This takes no arguments. It prints the UML version. + + + (mconsole) version + OK Linux usermode 2.4.5-9um #1 Wed Jun 20 22:47:08 EDT 2001 i686 + + + + + There are a couple actual uses for this. It's a simple no-op which + can be used to check that a UML is running. It's also a way of + sending an interrupt to the UML. This is sometimes useful on SMP + hosts, where there's a bug which causes signals to UML to be lost, + often causing it to appear to hang. Sending such a UML the mconsole + version command is a good way to 'wake it up' before networking has + been enabled, as it does not do anything to the function of the UML. + + + + 10.2. halt and reboot + + These take no arguments. They shut the machine down immediately, with + no syncing of disks and no clean shutdown of userspace. So, they are + pretty close to crashing the machine. + + + (mconsole) halt + OK + + + + + + + 10.3. config + + "config" adds a new device to the virtual machine. Currently the ubd + and network drivers support this. It takes one argument, which is the + device to add, with the same syntax as the kernel command line. + + + + + (mconsole) + config ubd3=/home/jdike/incoming/roots/root_fs_debian22 + + OK + (mconsole) config eth1=mcast + OK + + + + + + + 10.4. remove + + "remove" deletes a device from the system. Its argument is just the + name of the device to be removed. The device must be idle in whatever + sense the driver considers necessary. In the case of the ubd driver, + the removed block device must not be mounted, swapped on, or otherwise + open, and in the case of the network driver, the device must be down. + + + (mconsole) remove ubd3 + OK + (mconsole) remove eth1 + OK + + + + + + + 10.5. sysrq + + This takes one argument, which is a single letter. It calls the + generic kernel's SysRq driver, which does whatever is called for by + that argument. See the SysRq documentation in + Documentation/admin-guide/sysrq.rst in your favorite kernel tree to + see what letters are valid and what they do. + + + + 10.6. help + + "help" returns a string listing the valid commands and what each one + does. + + + + 10.7. cad + + This invokes the Ctl-Alt-Del action on init. What exactly this ends + up doing is up to /etc/inittab. Normally, it reboots the machine. + With UML, this is usually not desired, so if a halt would be better, + then find the section of inittab that looks like this + + + # What to do when CTRL-ALT-DEL is pressed. + ca:12345:ctrlaltdel:/sbin/shutdown -t1 -a -r now + + + + + and change the command to halt. + + + + 10.8. stop + + This puts the UML in a loop reading mconsole requests until a 'go' + mconsole command is received. This is very useful for making backups + of UML filesystems, as the UML can be stopped, then synced via 'sysrq + s', so that everything is written to the filesystem. You can then copy + the filesystem and then send the UML 'go' via mconsole. + + + Note that a UML running with more than one CPU will have problems + after you send the 'stop' command, as only one CPU will be held in a + mconsole loop and all others will continue as normal. This is a bug, + and will be fixed. + + + + 10.9. go + + This resumes a UML after being paused by a 'stop' command. Note that + when the UML has resumed, TCP connections may have timed out and if + the UML is paused for a long period of time, crond might go a little + crazy, running all the jobs it didn't do earlier. + + + + + + + + + 11. Kernel debugging + + + Note: The interface that makes debugging, as described here, possible + is present in 2.4.0-test6 kernels and later. + + + Since the user-mode kernel runs as a normal Linux process, it is + possible to debug it with gdb almost like any other process. It is + slightly different because the kernel's threads are already being + ptraced for system call interception, so gdb can't ptrace them. + However, a mechanism has been added to work around that problem. + + + In order to debug the kernel, you need build it from source. See + ``Compiling the kernel and modules'' for information on doing that. + Make sure that you enable CONFIG_DEBUGSYM and CONFIG_PT_PROXY during + the config. These will compile the kernel with -g, and enable the + ptrace proxy so that gdb works with UML, respectively. + + + + + 11.1. Starting the kernel under gdb + + You can have the kernel running under the control of gdb from the + beginning by putting 'debug' on the command line. You will get an + xterm with gdb running inside it. The kernel will send some commands + to gdb which will leave it stopped at the beginning of start_kernel. + At this point, you can get things going with 'next', 'step', or + 'cont'. + + + There is a transcript of a debugging session here , with breakpoints being set in the scheduler and in an + interrupt handler. + 11.2. Examining sleeping processes + + Not every bug is evident in the currently running process. Sometimes, + processes hang in the kernel when they shouldn't because they've + deadlocked on a semaphore or something similar. In this case, when + you ^C gdb and get a backtrace, you will see the idle thread, which + isn't very relevant. + + + What you want is the stack of whatever process is sleeping when it + shouldn't be. You need to figure out which process that is, which is + generally fairly easy. Then you need to get its host process id, + which you can do either by looking at ps on the host or at + task.thread.extern_pid in gdb. + + + Now what you do is this: + + o detach from the current thread + + + (UML gdb) det + + + + + + o attach to the thread you are interested in + + + (UML gdb) att + + + + + + o look at its stack and anything else of interest + + + (UML gdb) bt + + + + + Note that you can't do anything at this point that requires that a + process execute, e.g. calling a function + + o when you're done looking at that process, reattach to the current + thread and continue it + + + (UML gdb) + att 1 + + + + + + + (UML gdb) + c + + + + + Here, specifying any pid which is not the process id of a UML thread + will cause gdb to reattach to the current thread. I commonly use 1, + but any other invalid pid would work. + + + + 11.3. Running ddd on UML + + ddd works on UML, but requires a special kludge. The process goes + like this: + + o Start ddd + + + host% ddd linux + + + + + + o With ps, get the pid of the gdb that ddd started. You can ask the + gdb to tell you, but for some reason that confuses things and + causes a hang. + + o run UML with 'debug=parent gdb-pid=' added to the command line + - it will just sit there after you hit return + + o type 'att 1' to the ddd gdb and you will see something like + + + 0xa013dc51 in __kill () + + + (gdb) + + + + + + o At this point, type 'c', UML will boot up, and you can use ddd just + as you do on any other process. + + + + 11.4. Debugging modules + + gdb has support for debugging code which is dynamically loaded into + the process. This support is what is needed to debug kernel modules + under UML. + + + Using that support is somewhat complicated. You have to tell gdb what + object file you just loaded into UML and where in memory it is. Then, + it can read the symbol table, and figure out where all the symbols are + from the load address that you provided. It gets more interesting + when you load the module again (i.e. after an rmmod). You have to + tell gdb to forget about all its symbols, including the main UML ones + for some reason, then load then all back in again. + + + There's an easy way and a hard way to do this. The easy way is to use + the umlgdb expect script written by Chandan Kudige. It basically + automates the process for you. + + + First, you must tell it where your modules are. There is a list in + the script that looks like this: + set MODULE_PATHS { + "fat" "/usr/src/uml/linux-2.4.18/fs/fat/fat.o" + "isofs" "/usr/src/uml/linux-2.4.18/fs/isofs/isofs.o" + "minix" "/usr/src/uml/linux-2.4.18/fs/minix/minix.o" + } + + + + + You change that to list the names and paths of the modules that you + are going to debug. Then you run it from the toplevel directory of + your UML pool and it basically tells you what to do: + + + + + ******** GDB pid is 21903 ******** + Start UML as: ./linux debug gdb-pid=21903 + + + + GNU gdb 5.0rh-5 Red Hat Linux 7.1 + Copyright 2001 Free Software Foundation, Inc. + GDB is free software, covered by the GNU General Public License, and you are + welcome to change it and/or distribute copies of it under certain conditions. + Type "show copying" to see the conditions. + There is absolutely no warranty for GDB. Type "show warranty" for details. + This GDB was configured as "i386-redhat-linux"... + (gdb) b sys_init_module + Breakpoint 1 at 0xa0011923: file module.c, line 349. + (gdb) att 1 + + + + + After you run UML and it sits there doing nothing, you hit return at + the 'att 1' and continue it: + + + Attaching to program: /home/jdike/linux/2.4/um/./linux, process 1 + 0xa00f4221 in __kill () + (UML gdb) c + Continuing. + + + + + At this point, you debug normally. When you insmod something, the + expect magic will kick in and you'll see something like: + + + + + + + + + + + + + + + + + + *** Module hostfs loaded *** + Breakpoint 1, sys_init_module (name_user=0x805abb0 "hostfs", + mod_user=0x8070e00) at module.c:349 + 349 char *name, *n_name, *name_tmp = NULL; + (UML gdb) finish + Run till exit from #0 sys_init_module (name_user=0x805abb0 "hostfs", + mod_user=0x8070e00) at module.c:349 + 0xa00e2e23 in execute_syscall (r=0xa8140284) at syscall_kern.c:411 + 411 else res = EXECUTE_SYSCALL(syscall, regs); + Value returned is $1 = 0 + (UML gdb) + p/x (int)module_list + module_list->size_of_struct + + $2 = 0xa9021054 + (UML gdb) symbol-file ./linux + Load new symbol table from "./linux"? (y or n) y + Reading symbols from ./linux... + done. + (UML gdb) + add-symbol-file /home/jdike/linux/2.4/um/arch/um/fs/hostfs/hostfs.o 0xa9021054 + + add symbol table from file "/home/jdike/linux/2.4/um/arch/um/fs/hostfs/hostfs.o" at + .text_addr = 0xa9021054 + (y or n) y + + Reading symbols from /home/jdike/linux/2.4/um/arch/um/fs/hostfs/hostfs.o... + done. + (UML gdb) p *module_list + $1 = {size_of_struct = 84, next = 0xa0178720, name = 0xa9022de0 "hostfs", + size = 9016, uc = {usecount = {counter = 0}, pad = 0}, flags = 1, + nsyms = 57, ndeps = 0, syms = 0xa9023170, deps = 0x0, refs = 0x0, + init = 0xa90221f0 , cleanup = 0xa902222c , + ex_table_start = 0x0, ex_table_end = 0x0, persist_start = 0x0, + persist_end = 0x0, can_unload = 0, runsize = 0, kallsyms_start = 0x0, + kallsyms_end = 0x0, + archdata_start = 0x1b855
, + archdata_end = 0xe5890000
, + kernel_data = 0xf689c35d
} + >> Finished loading symbols for hostfs ... + + + + + That's the easy way. It's highly recommended. The hard way is + described below in case you're interested in what's going on. + + + Boot the kernel under the debugger and load the module with insmod or + modprobe. With gdb, do: + + + (UML gdb) p module_list + + + + + This is a list of modules that have been loaded into the kernel, with + the most recently loaded module first. Normally, the module you want + is at module_list. If it's not, walk down the next links, looking at + the name fields until find the module you want to debug. Take the + address of that structure, and add module.size_of_struct (which in + 2.4.10 kernels is 96 (0x60)) to it. Gdb can make this hard addition + for you :-): + + + + (UML gdb) + printf "%#x\n", (int)module_list module_list->size_of_struct + + + + + The offset from the module start occasionally changes (before 2.4.0, + it was module.size_of_struct + 4), so it's a good idea to check the + init and cleanup addresses once in a while, as describe below. Now + do: + + + (UML gdb) + add-symbol-file /path/to/module/on/host that_address + + + + + Tell gdb you really want to do it, and you're in business. + + + If there's any doubt that you got the offset right, like breakpoints + appear not to work, or they're appearing in the wrong place, you can + check it by looking at the module structure. The init and cleanup + fields should look like: + + + init = 0x588066b0 , cleanup = 0x588066c0 + + + + + with no offsets on the symbol names. If the names are right, but they + are offset, then the offset tells you how much you need to add to the + address you gave to add-symbol-file. + + + When you want to load in a new version of the module, you need to get + gdb to forget about the old one. The only way I've found to do that + is to tell gdb to forget about all symbols that it knows about: + + + (UML gdb) symbol-file + + + + + Then reload the symbols from the kernel binary: + + + (UML gdb) symbol-file /path/to/kernel + + + + + and repeat the process above. You'll also need to re-enable break- + points. They were disabled when you dumped all the symbols because + gdb couldn't figure out where they should go. + + + + 11.5. Attaching gdb to the kernel + + If you don't have the kernel running under gdb, you can attach gdb to + it later by sending the tracing thread a SIGUSR1. The first line of + the console output identifies its pid: + tracing thread pid = 20093 + + + + + When you send it the signal: + + + host% kill -USR1 20093 + + + + + you will get an xterm with gdb running in it. + + + If you have the mconsole compiled into UML, then the mconsole client + can be used to start gdb: + + + (mconsole) (mconsole) config gdb=xterm + + + + + will fire up an xterm with gdb running in it. + + + + 11.6. Using alternate debuggers + + UML has support for attaching to an already running debugger rather + than starting gdb itself. This is present in CVS as of 17 Apr 2001. + I sent it to Alan for inclusion in the ac tree, and it will be in my + 2.4.4 release. + + + This is useful when gdb is a subprocess of some UI, such as emacs or + ddd. It can also be used to run debuggers other than gdb on UML. + Below is an example of using strace as an alternate debugger. + + + To do this, you need to get the pid of the debugger and pass it in + with the + + + If you are using gdb under some UI, then tell it to 'att 1', and + you'll find yourself attached to UML. + + + If you are using something other than gdb as your debugger, then + you'll need to get it to do the equivalent of 'att 1' if it doesn't do + it automatically. + + + An example of an alternate debugger is strace. You can strace the + actual kernel as follows: + + o Run the following in a shell + + + host% + sh -c 'echo pid=$$; echo -n hit return; read x; exec strace -p 1 -o strace.out' + + + + o Run UML with 'debug' and 'gdb-pid=' with the pid printed out + by the previous command + + o Hit return in the shell, and UML will start running, and strace + output will start accumulating in the output file. + + Note that this is different from running + + + host% strace ./linux + + + + + That will strace only the main UML thread, the tracing thread, which + doesn't do any of the actual kernel work. It just oversees the vir- + tual machine. In contrast, using strace as described above will show + you the low-level activity of the virtual machine. + + + + + + 12. Kernel debugging examples + + 12.1. The case of the hung fsck + + When booting up the kernel, fsck failed, and dropped me into a shell + to fix things up. I ran fsck -y, which hung: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Setting hostname uml [ OK ] + Checking root filesystem + /dev/fhd0 was not cleanly unmounted, check forced. + Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780. + + /dev/fhd0: UNEXPECTED INCONSISTENCY; RUN fsck MANUALLY. + (i.e., without -a or -p options) + [ FAILED ] + + *** An error occurred during the file system check. + *** Dropping you to a shell; the system will reboot + *** when you leave the shell. + Give root password for maintenance + (or type Control-D for normal startup): + + [root@uml /root]# fsck -y /dev/fhd0 + fsck -y /dev/fhd0 + Parallelizing fsck version 1.14 (9-Jan-1999) + e2fsck 1.14, 9-Jan-1999 for EXT2 FS 0.5b, 95/08/09 + /dev/fhd0 contains a file system with errors, check forced. + Pass 1: Checking inodes, blocks, and sizes + Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780. Ignore error? yes + + Inode 19780, i_blocks is 1548, should be 540. Fix? yes + + Pass 2: Checking directory structure + Error reading block 49405 (Attempt to read block from filesystem resulted in short read). Ignore error? yes + + Directory inode 11858, block 0, offset 0: directory corrupted + Salvage? yes + + Missing '.' in directory inode 11858. + Fix? yes + + Missing '..' in directory inode 11858. + Fix? yes + + + + + + The standard drill in this sort of situation is to fire up gdb on the + signal thread, which, in this case, was pid 1935. In another window, + I run gdb and attach pid 1935. + + + + + ~/linux/2.3.26/um 1016: gdb linux + GNU gdb 4.17.0.11 with Linux support + Copyright 1998 Free Software Foundation, Inc. + GDB is free software, covered by the GNU General Public License, and you are + welcome to change it and/or distribute copies of it under certain conditions. + Type "show copying" to see the conditions. + There is absolutely no warranty for GDB. Type "show warranty" for details. + This GDB was configured as "i386-redhat-linux"... + + (gdb) att 1935 + Attaching to program `/home/dike/linux/2.3.26/um/linux', Pid 1935 + 0x100756d9 in __wait4 () + + + + + + + Let's see what's currently running: + + + + (gdb) p current_task.pid + $1 = 0 + + + + + + It's the idle thread, which means that fsck went to sleep for some + reason and never woke up. + + + Let's guess that the last process in the process list is fsck: + + + + (gdb) p current_task.prev_task.comm + $13 = "fsck.ext2\000\000\000\000\000\000" + + + + + + It is, so let's see what it thinks it's up to: + + + + (gdb) p current_task.prev_task.thread + $14 = {extern_pid = 1980, tracing = 0, want_tracing = 0, forking = 0, + kernel_stack_page = 0, signal_stack = 1342627840, syscall = {id = 4, args = { + 3, 134973440, 1024, 0, 1024}, have_result = 0, result = 50590720}, + request = {op = 2, u = {exec = {ip = 1350467584, sp = 2952789424}, fork = { + regs = {1350467584, 2952789424, 0 }, sigstack = 0, + pid = 0}, switch_to = 0x507e8000, thread = {proc = 0x507e8000, + arg = 0xaffffdb0, flags = 0, new_pid = 0}, input_request = { + op = 1350467584, fd = -1342177872, proc = 0, pid = 0}}}} + + + + + + The interesting things here are the fact that its .thread.syscall.id + is __NR_write (see the big switch in arch/um/kernel/syscall_kern.c or + the defines in include/asm-um/arch/unistd.h), and that it never + returned. Also, its .request.op is OP_SWITCH (see + arch/um/include/user_util.h). These mean that it went into a write, + and, for some reason, called schedule(). + + + The fact that it never returned from write means that its stack should + be fairly interesting. Its pid is 1980 (.thread.extern_pid). That + process is being ptraced by the signal thread, so it must be detached + before gdb can attach it: + + + + + + + + + + + (gdb) call detach(1980) + + Program received signal SIGSEGV, Segmentation fault. + + The program being debugged stopped while in a function called from GDB. + When the function (detach) is done executing, GDB will silently + stop (instead of continuing to evaluate the expression containing + the function call). + (gdb) call detach(1980) + $15 = 0 + + + + + + The first detach segfaults for some reason, and the second one + succeeds. + + + Now I detach from the signal thread, attach to the fsck thread, and + look at its stack: + + + (gdb) det + Detaching from program: /home/dike/linux/2.3.26/um/linux Pid 1935 + (gdb) att 1980 + Attaching to program `/home/dike/linux/2.3.26/um/linux', Pid 1980 + 0x10070451 in __kill () + (gdb) bt + #0 0x10070451 in __kill () + #1 0x10068ccd in usr1_pid (pid=1980) at process.c:30 + #2 0x1006a03f in _switch_to (prev=0x50072000, next=0x507e8000) + at process_kern.c:156 + #3 0x1006a052 in switch_to (prev=0x50072000, next=0x507e8000, last=0x50072000) + at process_kern.c:161 + #4 0x10001d12 in schedule () at core.c:777 + #5 0x1006a744 in __down (sem=0x507d241c) at semaphore.c:71 + #6 0x1006aa10 in __down_failed () at semaphore.c:157 + #7 0x1006c5d8 in segv_handler (sc=0x5006e940) at trap_user.c:174 + #8 0x1006c5ec in kern_segv_handler (sig=11) at trap_user.c:182 + #9 + #10 0x10155404 in errno () + #11 0x1006c0aa in segv (address=1342179328, is_write=2) at trap_kern.c:50 + #12 0x1006c5d8 in segv_handler (sc=0x5006eaf8) at trap_user.c:174 + #13 0x1006c5ec in kern_segv_handler (sig=11) at trap_user.c:182 + #14 + #15 0xc0fd in ?? () + #16 0x10016647 in sys_write (fd=3, + buf=0x80b8800
, count=1024) + at read_write.c:159 + #17 0x1006d5b3 in execute_syscall (syscall=4, args=0x5006ef08) + at syscall_kern.c:254 + #18 0x1006af87 in really_do_syscall (sig=12) at syscall_user.c:35 + #19 + #20 0x400dc8b0 in ?? () + + + + + + The interesting things here are : + + o There are two segfaults on this stack (frames 9 and 14) + + o The first faulting address (frame 11) is 0x50000800 + + (gdb) p (void *)1342179328 + $16 = (void *) 0x50000800 + + + + + + The initial faulting address is interesting because it is on the idle + thread's stack. I had been seeing the idle thread segfault for no + apparent reason, and the cause looked like stack corruption. In hopes + of catching the culprit in the act, I had turned off all protections + to that stack while the idle thread wasn't running. This apparently + tripped that trap. + + + However, the more immediate problem is that second segfault and I'm + going to concentrate on that. First, I want to see where the fault + happened, so I have to go look at the sigcontent struct in frame 8: + + + + (gdb) up + #1 0x10068ccd in usr1_pid (pid=1980) at process.c:30 + 30 kill(pid, SIGUSR1); + (gdb) + #2 0x1006a03f in _switch_to (prev=0x50072000, next=0x507e8000) + at process_kern.c:156 + 156 usr1_pid(getpid()); + (gdb) + #3 0x1006a052 in switch_to (prev=0x50072000, next=0x507e8000, last=0x50072000) + at process_kern.c:161 + 161 _switch_to(prev, next); + (gdb) + #4 0x10001d12 in schedule () at core.c:777 + 777 switch_to(prev, next, prev); + (gdb) + #5 0x1006a744 in __down (sem=0x507d241c) at semaphore.c:71 + 71 schedule(); + (gdb) + #6 0x1006aa10 in __down_failed () at semaphore.c:157 + 157 } + (gdb) + #7 0x1006c5d8 in segv_handler (sc=0x5006e940) at trap_user.c:174 + 174 segv(sc->cr2, sc->err & 2); + (gdb) + #8 0x1006c5ec in kern_segv_handler (sig=11) at trap_user.c:182 + 182 segv_handler(sc); + (gdb) p *sc + Cannot access memory at address 0x0. + + + + + That's not very useful, so I'll try a more manual method: + + + (gdb) p *((struct sigcontext *) (&sig + 1)) + $19 = {gs = 0, __gsh = 0, fs = 0, __fsh = 0, es = 43, __esh = 0, ds = 43, + __dsh = 0, edi = 1342179328, esi = 1350378548, ebp = 1342630440, + esp = 1342630420, ebx = 1348150624, edx = 1280, ecx = 0, eax = 0, + trapno = 14, err = 4, eip = 268480945, cs = 35, __csh = 0, eflags = 66118, + esp_at_signal = 1342630420, ss = 43, __ssh = 0, fpstate = 0x0, oldmask = 0, + cr2 = 1280} + + + + The ip is in handle_mm_fault: + + + (gdb) p (void *)268480945 + $20 = (void *) 0x1000b1b1 + (gdb) i sym $20 + handle_mm_fault + 57 in section .text + + + + + + Specifically, it's in pte_alloc: + + + (gdb) i line *$20 + Line 124 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" + starts at address 0x1000b1b1 + and ends at 0x1000b1b7 . + + + + + + To find where in handle_mm_fault this is, I'll jump forward in the + code until I see an address in that procedure: + + + + (gdb) i line *0x1000b1c0 + Line 126 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" + starts at address 0x1000b1b7 + and ends at 0x1000b1c3 . + (gdb) i line *0x1000b1d0 + Line 131 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" + starts at address 0x1000b1d0 + and ends at 0x1000b1da . + (gdb) i line *0x1000b1e0 + Line 61 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" + starts at address 0x1000b1da + and ends at 0x1000b1e1 . + (gdb) i line *0x1000b1f0 + Line 134 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" + starts at address 0x1000b1f0 + and ends at 0x1000b200 . + (gdb) i line *0x1000b200 + Line 135 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" + starts at address 0x1000b200 + and ends at 0x1000b208 . + (gdb) i line *0x1000b210 + Line 139 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" + starts at address 0x1000b210 + and ends at 0x1000b219 . + (gdb) i line *0x1000b220 + Line 1168 of "memory.c" starts at address 0x1000b21e + and ends at 0x1000b222 . + + + + + + Something is apparently wrong with the page tables or vma_structs, so + lets go back to frame 11 and have a look at them: + + + + #11 0x1006c0aa in segv (address=1342179328, is_write=2) at trap_kern.c:50 + 50 handle_mm_fault(current, vma, address, is_write); + (gdb) call pgd_offset_proc(vma->vm_mm, address) + $22 = (pgd_t *) 0x80a548c + + + + + + That's pretty bogus. Page tables aren't supposed to be in process + text or data areas. Let's see what's in the vma: + + + (gdb) p *vma + $23 = {vm_mm = 0x507d2434, vm_start = 0, vm_end = 134512640, + vm_next = 0x80a4f8c, vm_page_prot = {pgprot = 0}, vm_flags = 31200, + vm_avl_height = 2058, vm_avl_left = 0x80a8c94, vm_avl_right = 0x80d1000, + vm_next_share = 0xaffffdb0, vm_pprev_share = 0xaffffe63, + vm_ops = 0xaffffe7a, vm_pgoff = 2952789626, vm_file = 0xafffffec, + vm_private_data = 0x62} + (gdb) p *vma.vm_mm + $24 = {mmap = 0x507d2434, mmap_avl = 0x0, mmap_cache = 0x8048000, + pgd = 0x80a4f8c, mm_users = {counter = 0}, mm_count = {counter = 134904288}, + map_count = 134909076, mmap_sem = {count = {counter = 135073792}, + sleepers = -1342177872, wait = {lock = , + task_list = {next = 0xaffffe63, prev = 0xaffffe7a}, + __magic = -1342177670, __creator = -1342177300}, __magic = 98}, + page_table_lock = {}, context = 138, start_code = 0, end_code = 0, + start_data = 0, end_data = 0, start_brk = 0, brk = 0, start_stack = 0, + arg_start = 0, arg_end = 0, env_start = 0, env_end = 0, rss = 1350381536, + total_vm = 0, locked_vm = 0, def_flags = 0, cpu_vm_mask = 0, swap_cnt = 0, + swap_address = 0, segments = 0x0} + + + + + + This also pretty bogus. With all of the 0x80xxxxx and 0xaffffxxx + addresses, this is looking like a stack was plonked down on top of + these structures. Maybe it's a stack overflow from the next page: + + + + (gdb) p vma + $25 = (struct vm_area_struct *) 0x507d2434 + + + + + + That's towards the lower quarter of the page, so that would have to + have been pretty heavy stack overflow: + + + + + + + + + + + + + + + (gdb) x/100x $25 + 0x507d2434: 0x507d2434 0x00000000 0x08048000 0x080a4f8c + 0x507d2444: 0x00000000 0x080a79e0 0x080a8c94 0x080d1000 + 0x507d2454: 0xaffffdb0 0xaffffe63 0xaffffe7a 0xaffffe7a + 0x507d2464: 0xafffffec 0x00000062 0x0000008a 0x00000000 + 0x507d2474: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d2484: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d2494: 0x00000000 0x00000000 0x507d2fe0 0x00000000 + 0x507d24a4: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d24b4: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d24c4: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d24d4: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d24e4: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d24f4: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d2504: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d2514: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d2524: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d2534: 0x00000000 0x00000000 0x507d25dc 0x00000000 + 0x507d2544: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d2554: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d2564: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d2574: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d2584: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d2594: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d25a4: 0x00000000 0x00000000 0x00000000 0x00000000 + 0x507d25b4: 0x00000000 0x00000000 0x00000000 0x00000000 + + + + + + It's not stack overflow. The only "stack-like" piece of this data is + the vma_struct itself. + + + At this point, I don't see any avenues to pursue, so I just have to + admit that I have no idea what's going on. What I will do, though, is + stick a trap on the segfault handler which will stop if it sees any + writes to the idle thread's stack. That was the thing that happened + first, and it may be that if I can catch it immediately, what's going + on will be somewhat clearer. + + + 12.2. Episode 2: The case of the hung fsck + + After setting a trap in the SEGV handler for accesses to the signal + thread's stack, I reran the kernel. + + + fsck hung again, this time by hitting the trap: + + + + + + + + + + + + + + + + + Setting hostname uml [ OK ] + Checking root filesystem + /dev/fhd0 contains a file system with errors, check forced. + Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780. + + /dev/fhd0: UNEXPECTED INCONSISTENCY; RUN fsck MANUALLY. + (i.e., without -a or -p options) + [ FAILED ] + + *** An error occurred during the file system check. + *** Dropping you to a shell; the system will reboot + *** when you leave the shell. + Give root password for maintenance + (or type Control-D for normal startup): + + [root@uml /root]# fsck -y /dev/fhd0 + fsck -y /dev/fhd0 + Parallelizing fsck version 1.14 (9-Jan-1999) + e2fsck 1.14, 9-Jan-1999 for EXT2 FS 0.5b, 95/08/09 + /dev/fhd0 contains a file system with errors, check forced. + Pass 1: Checking inodes, blocks, and sizes + Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780. Ignore error? yes + + Pass 2: Checking directory structure + Error reading block 49405 (Attempt to read block from filesystem resulted in short read). Ignore error? yes + + Directory inode 11858, block 0, offset 0: directory corrupted + Salvage? yes + + Missing '.' in directory inode 11858. + Fix? yes + + Missing '..' in directory inode 11858. + Fix? yes + + Untested (4127) [100fe44c]: trap_kern.c line 31 + + + + + + I need to get the signal thread to detach from pid 4127 so that I can + attach to it with gdb. This is done by sending it a SIGUSR1, which is + caught by the signal thread, which detaches the process: + + + kill -USR1 4127 + + + + + + Now I can run gdb on it: + + + + + + + + + + + + + + ~/linux/2.3.26/um 1034: gdb linux + GNU gdb 4.17.0.11 with Linux support + Copyright 1998 Free Software Foundation, Inc. + GDB is free software, covered by the GNU General Public License, and you are + welcome to change it and/or distribute copies of it under certain conditions. + Type "show copying" to see the conditions. + There is absolutely no warranty for GDB. Type "show warranty" for details. + This GDB was configured as "i386-redhat-linux"... + (gdb) att 4127 + Attaching to program `/home/dike/linux/2.3.26/um/linux', Pid 4127 + 0x10075891 in __libc_nanosleep () + + + + + + The backtrace shows that it was in a write and that the fault address + (address in frame 3) is 0x50000800, which is right in the middle of + the signal thread's stack page: + + + (gdb) bt + #0 0x10075891 in __libc_nanosleep () + #1 0x1007584d in __sleep (seconds=1000000) + at ../sysdeps/unix/sysv/linux/sleep.c:78 + #2 0x1006ce9a in stop () at user_util.c:191 + #3 0x1006bf88 in segv (address=1342179328, is_write=2) at trap_kern.c:31 + #4 0x1006c628 in segv_handler (sc=0x5006eaf8) at trap_user.c:174 + #5 0x1006c63c in kern_segv_handler (sig=11) at trap_user.c:182 + #6 + #7 0xc0fd in ?? () + #8 0x10016647 in sys_write (fd=3, buf=0x80b8800 "R.", count=1024) + at read_write.c:159 + #9 0x1006d603 in execute_syscall (syscall=4, args=0x5006ef08) + at syscall_kern.c:254 + #10 0x1006af87 in really_do_syscall (sig=12) at syscall_user.c:35 + #11 + #12 0x400dc8b0 in ?? () + #13 + #14 0x400dc8b0 in ?? () + #15 0x80545fd in ?? () + #16 0x804daae in ?? () + #17 0x8054334 in ?? () + #18 0x804d23e in ?? () + #19 0x8049632 in ?? () + #20 0x80491d2 in ?? () + #21 0x80596b5 in ?? () + (gdb) p (void *)1342179328 + $3 = (void *) 0x50000800 + + + + + + Going up the stack to the segv_handler frame and looking at where in + the code the access happened shows that it happened near line 110 of + block_dev.c: + + + + + + + + + + (gdb) up + #1 0x1007584d in __sleep (seconds=1000000) + at ../sysdeps/unix/sysv/linux/sleep.c:78 + ../sysdeps/unix/sysv/linux/sleep.c:78: No such file or directory. + (gdb) + #2 0x1006ce9a in stop () at user_util.c:191 + 191 while(1) sleep(1000000); + (gdb) + #3 0x1006bf88 in segv (address=1342179328, is_write=2) at trap_kern.c:31 + 31 KERN_UNTESTED(); + (gdb) + #4 0x1006c628 in segv_handler (sc=0x5006eaf8) at trap_user.c:174 + 174 segv(sc->cr2, sc->err & 2); + (gdb) p *sc + $1 = {gs = 0, __gsh = 0, fs = 0, __fsh = 0, es = 43, __esh = 0, ds = 43, + __dsh = 0, edi = 1342179328, esi = 134973440, ebp = 1342631484, + esp = 1342630864, ebx = 256, edx = 0, ecx = 256, eax = 1024, trapno = 14, + err = 6, eip = 268550834, cs = 35, __csh = 0, eflags = 66070, + esp_at_signal = 1342630864, ss = 43, __ssh = 0, fpstate = 0x0, oldmask = 0, + cr2 = 1342179328} + (gdb) p (void *)268550834 + $2 = (void *) 0x1001c2b2 + (gdb) i sym $2 + block_write + 1090 in section .text + (gdb) i line *$2 + Line 209 of "/home/dike/linux/2.3.26/um/include/asm/arch/string.h" + starts at address 0x1001c2a1 + and ends at 0x1001c2bf . + (gdb) i line *0x1001c2c0 + Line 110 of "block_dev.c" starts at address 0x1001c2bf + and ends at 0x1001c2e3 . + + + + + + Looking at the source shows that the fault happened during a call to + copy_from_user to copy the data into the kernel: + + + 107 count -= chars; + 108 copy_from_user(p,buf,chars); + 109 p += chars; + 110 buf += chars; + + + + + + p is the pointer which must contain 0x50000800, since buf contains + 0x80b8800 (frame 8 above). It is defined as: + + + p = offset + bh->b_data; + + + + + + I need to figure out what bh is, and it just so happens that bh is + passed as an argument to mark_buffer_uptodate and mark_buffer_dirty a + few lines later, so I do a little disassembly: + + + + + (gdb) disas 0x1001c2bf 0x1001c2e0 + Dump of assembler code from 0x1001c2bf to 0x1001c2d0: + 0x1001c2bf : addl %eax,0xc(%ebp) + 0x1001c2c2 : movl 0xfffffdd4(%ebp),%edx + 0x1001c2c8 : btsl $0x0,0x18(%edx) + 0x1001c2cd : btsl $0x1,0x18(%edx) + 0x1001c2d2 : sbbl %ecx,%ecx + 0x1001c2d4 : testl %ecx,%ecx + 0x1001c2d6 : jne 0x1001c2e3 + 0x1001c2d8 : pushl $0x0 + 0x1001c2da : pushl %edx + 0x1001c2db : call 0x1001819c <__mark_buffer_dirty> + End of assembler dump. + + + + + + At that point, bh is in %edx (address 0x1001c2da), which is calculated + at 0x1001c2c2 as %ebp + 0xfffffdd4, so I figure exactly what that is, + taking %ebp from the sigcontext_struct above: + + + (gdb) p (void *)1342631484 + $5 = (void *) 0x5006ee3c + (gdb) p 0x5006ee3c+0xfffffdd4 + $6 = 1342630928 + (gdb) p (void *)$6 + $7 = (void *) 0x5006ec10 + (gdb) p *((void **)$7) + $8 = (void *) 0x50100200 + + + + + + Now, I look at the structure to see what's in it, and particularly, + what its b_data field contains: + + + (gdb) p *((struct buffer_head *)0x50100200) + $13 = {b_next = 0x50289380, b_blocknr = 49405, b_size = 1024, b_list = 0, + b_dev = 15872, b_count = {counter = 1}, b_rdev = 15872, b_state = 24, + b_flushtime = 0, b_next_free = 0x501001a0, b_prev_free = 0x50100260, + b_this_page = 0x501001a0, b_reqnext = 0x0, b_pprev = 0x507fcf58, + b_data = 0x50000800 "", b_page = 0x50004000, + b_end_io = 0x10017f60 , b_dev_id = 0x0, + b_rsector = 98810, b_wait = {lock = , + task_list = {next = 0x50100248, prev = 0x50100248}, __magic = 1343226448, + __creator = 0}, b_kiobuf = 0x0} + + + + + + The b_data field is indeed 0x50000800, so the question becomes how + that happened. The rest of the structure looks fine, so this probably + is not a case of data corruption. It happened on purpose somehow. + + + The b_page field is a pointer to the page_struct representing the + 0x50000000 page. Looking at it shows the kernel's idea of the state + of that page: + + + + (gdb) p *$13.b_page + $17 = {list = {next = 0x50004a5c, prev = 0x100c5174}, mapping = 0x0, + index = 0, next_hash = 0x0, count = {counter = 1}, flags = 132, lru = { + next = 0x50008460, prev = 0x50019350}, wait = { + lock = , task_list = {next = 0x50004024, + prev = 0x50004024}, __magic = 1342193708, __creator = 0}, + pprev_hash = 0x0, buffers = 0x501002c0, virtual = 1342177280, + zone = 0x100c5160} + + + + + + Some sanity-checking: the virtual field shows the "virtual" address of + this page, which in this kernel is the same as its "physical" address, + and the page_struct itself should be mem_map[0], since it represents + the first page of memory: + + + + (gdb) p (void *)1342177280 + $18 = (void *) 0x50000000 + (gdb) p mem_map + $19 = (mem_map_t *) 0x50004000 + + + + + + These check out fine. + + + Now to check out the page_struct itself. In particular, the flags + field shows whether the page is considered free or not: + + + (gdb) p (void *)132 + $21 = (void *) 0x84 + + + + + + The "reserved" bit is the high bit, which is definitely not set, so + the kernel considers the signal stack page to be free and available to + be used. + + + At this point, I jump to conclusions and start looking at my early + boot code, because that's where that page is supposed to be reserved. + + + In my setup_arch procedure, I have the following code which looks just + fine: + + + + bootmap_size = init_bootmem(start_pfn, end_pfn - start_pfn); + free_bootmem(__pa(low_physmem) + bootmap_size, high_physmem - low_physmem); + + + + + + Two stack pages have already been allocated, and low_physmem points to + the third page, which is the beginning of free memory. + The init_bootmem call declares the entire memory to the boot memory + manager, which marks it all reserved. The free_bootmem call frees up + all of it, except for the first two pages. This looks correct to me. + + + So, I decide to see init_bootmem run and make sure that it is marking + those first two pages as reserved. I never get that far. + + + Stepping into init_bootmem, and looking at bootmem_map before looking + at what it contains shows the following: + + + + (gdb) p bootmem_map + $3 = (void *) 0x50000000 + + + + + + Aha! The light dawns. That first page is doing double duty as a + stack and as the boot memory map. The last thing that the boot memory + manager does is to free the pages used by its memory map, so this page + is getting freed even its marked as reserved. + + + The fix was to initialize the boot memory manager before allocating + those two stack pages, and then allocate them through the boot memory + manager. After doing this, and fixing a couple of subsequent buglets, + the stack corruption problem disappeared. + + + + + + 13. What to do when UML doesn't work + + + + + 13.1. Strange compilation errors when you build from source + + As of test11, it is necessary to have "ARCH=um" in the environment or + on the make command line for all steps in building UML, including + clean, distclean, or mrproper, config, menuconfig, or xconfig, dep, + and linux. If you forget for any of them, the i386 build seems to + contaminate the UML build. If this happens, start from scratch with + + + host% + make mrproper ARCH=um + + + + + and repeat the build process with ARCH=um on all the steps. + + + See ``Compiling the kernel and modules'' for more details. + + + Another cause of strange compilation errors is building UML in + /usr/src/linux. If you do this, the first thing you need to do is + clean up the mess you made. The /usr/src/linux/asm link will now + point to /usr/src/linux/asm-um. Make it point back to + /usr/src/linux/asm-i386. Then, move your UML pool someplace else and + build it there. Also see below, where a more specific set of symptoms + is described. + + + + 13.3. A variety of panics and hangs with /tmp on a reiserfs filesys- + tem + + I saw this on reiserfs 3.5.21 and it seems to be fixed in 3.5.27. + Panics preceded by + + + Detaching pid nnnn + + + + are diagnostic of this problem. This is a reiserfs bug which causes a + thread to occasionally read stale data from a mmapped page shared with + another thread. The fix is to upgrade the filesystem or to have /tmp + be an ext2 filesystem. + + + + 13.4. The compile fails with errors about conflicting types for + 'open', 'dup', and 'waitpid' + + This happens when you build in /usr/src/linux. The UML build makes + the include/asm link point to include/asm-um. /usr/include/asm points + to /usr/src/linux/include/asm, so when that link gets moved, files + which need to include the asm-i386 versions of headers get the + incompatible asm-um versions. The fix is to move the include/asm link + back to include/asm-i386 and to do UML builds someplace else. + + + + 13.5. UML doesn't work when /tmp is an NFS filesystem + + This seems to be a similar situation with the ReiserFS problem above. + Some versions of NFS seems not to handle mmap correctly, which UML + depends on. The workaround is have /tmp be a non-NFS directory. + + + 13.6. UML hangs on boot when compiled with gprof support + + If you build UML with gprof support and, early in the boot, it does + this + + + kernel BUG at page_alloc.c:100! + + + + + you have a buggy gcc. You can work around the problem by removing + UM_FASTCALL from CFLAGS in arch/um/Makefile-i386. This will open up + another bug, but that one is fairly hard to reproduce. + + + + 13.7. syslogd dies with a SIGTERM on startup + + The exact boot error depends on the distribution that you're booting, + but Debian produces this: + + + /etc/rc2.d/S10sysklogd: line 49: 93 Terminated + start-stop-daemon --start --quiet --exec /sbin/syslogd -- $SYSLOGD + + + + + This is a syslogd bug. There's a race between a parent process + installing a signal handler and its child sending the signal. See + this uml-devel post for the details. + + + + 13.8. TUN/TAP networking doesn't work on a 2.4 host + + There are a couple of problems which were + name="pointed + out"> by Tim Robinson + + o It doesn't work on hosts running 2.4.7 (or thereabouts) or earlier. + The fix is to upgrade to something more recent and then read the + next item. + + o If you see + + + File descriptor in bad state + + + + when you bring up the device inside UML, you have a header mismatch + between the original kernel and the upgraded one. Make /usr/src/linux + point at the new headers. This will only be a problem if you build + uml_net yourself. + + + + 13.9. You can network to the host but not to other machines on the + net + + If you can connect to the host, and the host can connect to UML, but + you cannot connect to any other machines, then you may need to enable + IP Masquerading on the host. Usually this is only experienced when + using private IP addresses (192.168.x.x or 10.x.x.x) for host/UML + networking, rather than the public address space that your host is + connected to. UML does not enable IP Masquerading, so you will need + to create a static rule to enable it: + + + host% + iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE + + + + + Replace eth0 with the interface that you use to talk to the rest of + the world. + + + Documentation on IP Masquerading, and SNAT, can be found at + www.netfilter.org . + + + If you can reach the local net, but not the outside Internet, then + that is usually a routing problem. The UML needs a default route: + + + UML# + route add default gw gateway IP + + + + + The gateway IP can be any machine on the local net that knows how to + reach the outside world. Usually, this is the host or the local net- + work's gateway. + + + Occasionally, we hear from someone who can reach some machines, but + not others on the same net, or who can reach some ports on other + machines, but not others. These are usually caused by strange + firewalling somewhere between the UML and the other box. You track + this down by running tcpdump on every interface the packets travel + over and see where they disappear. When you find a machine that takes + the packets in, but does not send them onward, that's the culprit. + + + + 13.10. I have no root and I want to scream + + Thanks to Birgit Wahlich for telling me about this strange one. It + turns out that there's a limit of six environment variables on the + kernel command line. When that limit is reached or exceeded, argument + processing stops, which means that the 'root=' argument that UML + usually adds is not seen. So, the filesystem has no idea what the + root device is, so it panics. + + + The fix is to put less stuff on the command line. Glomming all your + setup variables into one is probably the best way to go. + + + + 13.11. UML build conflict between ptrace.h and ucontext.h + + On some older systems, /usr/include/asm/ptrace.h and + /usr/include/sys/ucontext.h define the same names. So, when they're + included together, the defines from one completely mess up the parsing + of the other, producing errors like: + /usr/include/sys/ucontext.h:47: parse error before + `10' + + + + + plus a pile of warnings. + + + This is a libc botch, which has since been fixed, and I don't see any + way around it besides upgrading. + + + + 13.12. The UML BogoMips is exactly half the host's BogoMips + + On i386 kernels, there are two ways of running the loop that is used + to calculate the BogoMips rating, using the TSC if it's there or using + a one-instruction loop. The TSC produces twice the BogoMips as the + loop. UML uses the loop, since it has nothing resembling a TSC, and + will get almost exactly the same BogoMips as a host using the loop. + However, on a host with a TSC, its BogoMips will be double the loop + BogoMips, and therefore double the UML BogoMips. + + + + 13.13. When you run UML, it immediately segfaults + + If the host is configured with the 2G/2G address space split, that's + why. See ``UML on 2G/2G hosts'' for the details on getting UML to + run on your host. + + + + 13.14. xterms appear, then immediately disappear + + If you're running an up to date kernel with an old release of + uml_utilities, the port-helper program will not work properly, so + xterms will exit straight after they appear. The solution is to + upgrade to the latest release of uml_utilities. Usually this problem + occurs when you have installed a packaged release of UML then compiled + your own development kernel without upgrading the uml_utilities from + the source distribution. + + + + 13.15. Any other panic, hang, or strange behavior + + If you're seeing truly strange behavior, such as hangs or panics that + happen in random places, or you try running the debugger to see what's + happening and it acts strangely, then it could be a problem in the + host kernel. If you're not running a stock Linus or -ac kernel, then + try that. An early version of the preemption patch and a 2.4.10 SuSE + kernel have caused very strange problems in UML. + + + Otherwise, let me know about it. Send a message to one of the UML + mailing lists - either the developer list - user-mode-linux-devel at + lists dot sourceforge dot net (subscription info) or the user list - + user-mode-linux-user at lists dot sourceforge do net (subscription + info), whichever you prefer. Don't assume that everyone knows about + it and that a fix is imminent. + + + If you want to be super-helpful, read ``Diagnosing Problems'' and + follow the instructions contained therein. + 14. Diagnosing Problems + + + If you get UML to crash, hang, or otherwise misbehave, you should + report this on one of the project mailing lists, either the developer + list - user-mode-linux-devel at lists dot sourceforge dot net + (subscription info) or the user list - user-mode-linux-user at lists + dot sourceforge dot net (subscription info). When you do, it is + likely that I will want more information. So, it would be helpful to + read the stuff below, do whatever is applicable in your case, and + report the results to the list. + + + For any diagnosis, you're going to need to build a debugging kernel. + The binaries from this site aren't debuggable. If you haven't done + this before, read about ``Compiling the kernel and modules'' and + ``Kernel debugging'' UML first. + + + 14.1. Case 1 : Normal kernel panics + + The most common case is for a normal thread to panic. To debug this, + you will need to run it under the debugger (add 'debug' to the command + line). An xterm will start up with gdb running inside it. Continue + it when it stops in start_kernel and make it crash. Now ^C gdb and + + + If the panic was a "Kernel mode fault", then there will be a segv + frame on the stack and I'm going to want some more information. The + stack might look something like this: + + + (UML gdb) backtrace + #0 0x1009bf76 in __sigprocmask (how=1, set=0x5f347940, oset=0x0) + at ../sysdeps/unix/sysv/linux/sigprocmask.c:49 + #1 0x10091411 in change_sig (signal=10, on=1) at process.c:218 + #2 0x10094785 in timer_handler (sig=26) at time_kern.c:32 + #3 0x1009bf38 in __restore () + at ../sysdeps/unix/sysv/linux/i386/sigaction.c:125 + #4 0x1009534c in segv (address=8, ip=268849158, is_write=2, is_user=0) + at trap_kern.c:66 + #5 0x10095c04 in segv_handler (sig=11) at trap_user.c:285 + #6 0x1009bf38 in __restore () + + + + + I'm going to want to see the symbol and line information for the value + of ip in the segv frame. In this case, you would do the following: + + + (UML gdb) i sym 268849158 + + + + + and + + + (UML gdb) i line *268849158 + + + + + The reason for this is the __restore frame right above the segv_han- + dler frame is hiding the frame that actually segfaulted. So, I have + to get that information from the faulting ip. + + + 14.2. Case 2 : Tracing thread panics + + The less common and more painful case is when the tracing thread + panics. In this case, the kernel debugger will be useless because it + needs a healthy tracing thread in order to work. The first thing to + do is get a backtrace from the tracing thread. This is done by + figuring out what its pid is, firing up gdb, and attaching it to that + pid. You can figure out the tracing thread pid by looking at the + first line of the console output, which will look like this: + + + tracing thread pid = 15851 + + + + + or by running ps on the host and finding the line that looks like + this: + + + jdike 15851 4.5 0.4 132568 1104 pts/0 S 21:34 0:05 ./linux [(tracing thread)] + + + + + If the panic was 'segfault in signals', then follow the instructions + above for collecting information about the location of the seg fault. + + + If the tracing thread flaked out all by itself, then send that + backtrace in and wait for our crack debugging team to fix the problem. + + + 14.3. Case 3 : Tracing thread panics caused by other threads + + However, there are cases where the misbehavior of another thread + caused the problem. The most common panic of this type is: + + + wait_for_stop failed to wait for to stop with + + + + + In this case, you'll need to get a backtrace from the process men- + tioned in the panic, which is complicated by the fact that the kernel + debugger is defunct and without some fancy footwork, another gdb can't + attach to it. So, this is how the fancy footwork goes: + + In a shell: + + + host% kill -STOP pid + + + + + Run gdb on the tracing thread as described in case 2 and do: + + + (host gdb) call detach(pid) + + + If you get a segfault, do it again. It always works the second time. + + Detach from the tracing thread and attach to that other thread: + + + (host gdb) detach + + + + + + + (host gdb) attach pid + + + + + If gdb hangs when attaching to that process, go back to a shell and + do: + + + host% + kill -CONT pid + + + + + And then get the backtrace: + + + (host gdb) backtrace + + + + + + 14.4. Case 4 : Hangs + + Hangs seem to be fairly rare, but they sometimes happen. When a hang + happens, we need a backtrace from the offending process. Run the + kernel debugger as described in case 1 and get a backtrace. If the + current process is not the idle thread, then send in the backtrace. + You can tell that it's the idle thread if the stack looks like this: + + + #0 0x100b1401 in __libc_nanosleep () + #1 0x100a2885 in idle_sleep (secs=10) at time.c:122 + #2 0x100a546f in do_idle () at process_kern.c:445 + #3 0x100a5508 in cpu_idle () at process_kern.c:471 + #4 0x100ec18f in start_kernel () at init/main.c:592 + #5 0x100a3e10 in start_kernel_proc (unused=0x0) at um_arch.c:71 + #6 0x100a383f in signal_tramp (arg=0x100a3dd8) at trap_user.c:50 + + + + + If this is the case, then some other process is at fault, and went to + sleep when it shouldn't have. Run ps on the host and figure out which + process should not have gone to sleep and stayed asleep. Then attach + to it with gdb and get a backtrace as described in case 3. + + + + + + + 15. Thanks + + + A number of people have helped this project in various ways, and this + page gives recognition where recognition is due. + + + If you're listed here and you would prefer a real link on your name, + or no link at all, instead of the despammed email address pseudo-link, + let me know. + + + If you're not listed here and you think maybe you should be, please + let me know that as well. I try to get everyone, but sometimes my + bookkeeping lapses and I forget about contributions. + + + 15.1. Code and Documentation + + Rusty Russell - + + o wrote the HOWTO + + o prodded me into making this project official and putting it on + SourceForge + + o came up with the way cool UML logo + + o redid the config process + + + Peter Moulder - Fixed my config and build + processes, and added some useful code to the block driver + + + Bill Stearns - + + o HOWTO updates + + o lots of bug reports + + o lots of testing + + o dedicated a box (uml.ists.dartmouth.edu) to support UML development + + o wrote the mkrootfs script, which allows bootable filesystems of + RPM-based distributions to be cranked out + + o cranked out a large number of filesystems with said script + + + Jim Leu - Wrote the virtual ethernet driver + and associated usermode tools + + Lars Brinkhoff - Contributed the ptrace + proxy from his own project to allow easier + kernel debugging + + + Andrea Arcangeli - Redid some of the early boot + code so that it would work on machines with Large File Support + + + Chris Emerson - Did + the first UML port to Linux/ppc + + + Harald Welte - Wrote the multicast + transport for the network driver + + + Jorgen Cederlof - Added special file support to hostfs + + + Greg Lonnon - Changed the ubd driver + to allow it to layer a COW file on a shared read-only filesystem and + wrote the iomem emulation support + + + Henrik Nordstrom - Provided a variety + of patches, fixes, and clues + + + Lennert Buytenhek - Contributed various patches, a rewrite of the + network driver, the first implementation of the mconsole driver, and + did the bulk of the work needed to get SMP working again. + + + Yon Uriarte - Fixed the TUN/TAP network backend while I slept. + + + Adam Heath - Made a bunch of nice cleanups to the initialization code, + plus various other small patches. + + + Matt Zimmerman - Matt volunteered to be the UML Debian maintainer and + is doing a real nice job of it. He also noticed and fixed a number of + actually and potentially exploitable security holes in uml_net. Plus + the occasional patch. I like patches. + + + James McMechan - James seems to have taken over maintenance of the ubd + driver and is doing a nice job of it. + + + Chandan Kudige - wrote the umlgdb script which automates the reloading + of module symbols. + + + Steve Schmidtke - wrote the UML slirp transport and hostaudio drivers, + enabling UML processes to access audio devices on the host. He also + submitted patches for the slip transport and lots of other things. + + + David Coulson - + + o Set up the usermodelinux.org site, + which is a great way of keeping the UML user community on top of + UML goings-on. + + o Site documentation and updates + + o Nifty little UML management daemon UMLd + + + o Lots of testing and bug reports + + + + + 15.2. Flushing out bugs + + + + o Yuri Pudgorodsky + + o Gerald Britton + + o Ian Wehrman + + o Gord Lamb + + o Eugene Koontz + + o John H. Hartman + + o Anders Karlsson + + o Daniel Phillips + + o John Fremlin + + o Rainer Burgstaller + + o James Stevenson + + o Matt Clay + + o Cliff Jefferies + + o Geoff Hoff + + o Lennert Buytenhek + + o Al Viro + + o Frank Klingenhoefer + + o Livio Baldini Soares + + o Jon Burgess + + o Petru Paler + + o Paul + + o Chris Reahard + + o Sverker Nilsson + + o Gong Su + + o johan verrept + + o Bjorn Eriksson + + o Lorenzo Allegrucci + + o Muli Ben-Yehuda + + o David Mansfield + + o Howard Goff + + o Mike Anderson + + o John Byrne + + o Sapan J. Batia + + o Iris Huang + + o Jan Hudec + + o Voluspa + + + + + 15.3. Buglets and clean-ups + + + + o Dave Zarzycki + + o Adam Lazur + + o Boria Feigin + + o Brian J. Murrell + + o JS + + o Roman Zippel + + o Wil Cooley + + o Ayelet Shemesh + + o Will Dyson + + o Sverker Nilsson + + o dvorak + + o v.naga srinivas + + o Shlomi Fish + + o Roger Binns + + o johan verrept + + o MrChuoi + + o Peter Cleve + + o Vincent Guffens + + o Nathan Scott + + o Patrick Caulfield + + o jbearce + + o Catalin Marinas + + o Shane Spencer + + o Zou Min + + + o Ryan Boder + + o Lorenzo Colitti + + o Gwendal Grignou + + o Andre' Breiler + + o Tsutomu Yasuda + + + + 15.4. Case Studies + + + o Jon Wright + + o William McEwan + + o Michael Richardson + + + + 15.5. Other contributions + + + Bill Carr made the Red Hat mkrootfs script + work with RH 6.2. + + Michael Jennings sent in some material which + is now gracing the top of the index page of this site. + + SGI (and more specifically Ralf Baechle ) gave me an account on oss.sgi.com + . The bandwidth there made it possible to + produce most of the filesystems available on the project download + page. + + Laurent Bonnaud took the old grotty + Debian filesystem that I've been distributing and updated it to 2.2. + It is now available by itself here. + + Rik van Riel gave me some ftp space on ftp.nl.linux.org so I can make + releases even when Sourceforge is broken. + + Rodrigo de Castro looked at my broken pte code and told me what was + wrong with it, letting me fix a long-standing (several weeks) and + serious set of bugs. + + Chris Reahard built a specialized root filesystem for running a DNS + server jailed inside UML. It's available from the download + page in the Jail + Filesystems section. + + + + + + + + + + + + diff --git a/Documentation/virtual/index.rst b/Documentation/virtual/index.rst deleted file mode 100644 index 062ffb527043..000000000000 --- a/Documentation/virtual/index.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. SPDX-License-Identifier: GPL-2.0 - -============================ -Linux Virtualization Support -============================ - -.. toctree:: - :maxdepth: 2 - - kvm/index - paravirt_ops - -.. only:: html and subproject - - Indices - ======= - - * :ref:`genindex` diff --git a/Documentation/virtual/kvm/amd-memory-encryption.rst b/Documentation/virtual/kvm/amd-memory-encryption.rst deleted file mode 100644 index d18c97b4e140..000000000000 --- a/Documentation/virtual/kvm/amd-memory-encryption.rst +++ /dev/null @@ -1,250 +0,0 @@ -====================================== -Secure Encrypted Virtualization (SEV) -====================================== - -Overview -======== - -Secure Encrypted Virtualization (SEV) is a feature found on AMD processors. - -SEV is an extension to the AMD-V architecture which supports running -virtual machines (VMs) under the control of a hypervisor. When enabled, -the memory contents of a VM will be transparently encrypted with a key -unique to that VM. - -The hypervisor can determine the SEV support through the CPUID -instruction. The CPUID function 0x8000001f reports information related -to SEV:: - - 0x8000001f[eax]: - Bit[1] indicates support for SEV - ... - [ecx]: - Bits[31:0] Number of encrypted guests supported simultaneously - -If support for SEV is present, MSR 0xc001_0010 (MSR_K8_SYSCFG) and MSR 0xc001_0015 -(MSR_K7_HWCR) can be used to determine if it can be enabled:: - - 0xc001_0010: - Bit[23] 1 = memory encryption can be enabled - 0 = memory encryption can not be enabled - - 0xc001_0015: - Bit[0] 1 = memory encryption can be enabled - 0 = memory encryption can not be enabled - -When SEV support is available, it can be enabled in a specific VM by -setting the SEV bit before executing VMRUN.:: - - VMCB[0x90]: - Bit[1] 1 = SEV is enabled - 0 = SEV is disabled - -SEV hardware uses ASIDs to associate a memory encryption key with a VM. -Hence, the ASID for the SEV-enabled guests must be from 1 to a maximum value -defined in the CPUID 0x8000001f[ecx] field. - -SEV Key Management -================== - -The SEV guest key management is handled by a separate processor called the AMD -Secure Processor (AMD-SP). Firmware running inside the AMD-SP provides a secure -key management interface to perform common hypervisor activities such as -encrypting bootstrap code, snapshot, migrating and debugging the guest. For more -information, see the SEV Key Management spec [api-spec]_ - -KVM implements the following commands to support common lifecycle events of SEV -guests, such as launching, running, snapshotting, migrating and decommissioning. - -1. KVM_SEV_INIT ---------------- - -The KVM_SEV_INIT command is used by the hypervisor to initialize the SEV platform -context. In a typical workflow, this command should be the first command issued. - -Returns: 0 on success, -negative on error - -2. KVM_SEV_LAUNCH_START ------------------------ - -The KVM_SEV_LAUNCH_START command is used for creating the memory encryption -context. To create the encryption context, user must provide a guest policy, -the owner's public Diffie-Hellman (PDH) key and session information. - -Parameters: struct kvm_sev_launch_start (in/out) - -Returns: 0 on success, -negative on error - -:: - - struct kvm_sev_launch_start { - __u32 handle; /* if zero then firmware creates a new handle */ - __u32 policy; /* guest's policy */ - - __u64 dh_uaddr; /* userspace address pointing to the guest owner's PDH key */ - __u32 dh_len; - - __u64 session_addr; /* userspace address which points to the guest session information */ - __u32 session_len; - }; - -On success, the 'handle' field contains a new handle and on error, a negative value. - -For more details, see SEV spec Section 6.2. - -3. KVM_SEV_LAUNCH_UPDATE_DATA ------------------------------ - -The KVM_SEV_LAUNCH_UPDATE_DATA is used for encrypting a memory region. It also -calculates a measurement of the memory contents. The measurement is a signature -of the memory contents that can be sent to the guest owner as an attestation -that the memory was encrypted correctly by the firmware. - -Parameters (in): struct kvm_sev_launch_update_data - -Returns: 0 on success, -negative on error - -:: - - struct kvm_sev_launch_update { - __u64 uaddr; /* userspace address to be encrypted (must be 16-byte aligned) */ - __u32 len; /* length of the data to be encrypted (must be 16-byte aligned) */ - }; - -For more details, see SEV spec Section 6.3. - -4. KVM_SEV_LAUNCH_MEASURE -------------------------- - -The KVM_SEV_LAUNCH_MEASURE command is used to retrieve the measurement of the -data encrypted by the KVM_SEV_LAUNCH_UPDATE_DATA command. The guest owner may -wait to provide the guest with confidential information until it can verify the -measurement. Since the guest owner knows the initial contents of the guest at -boot, the measurement can be verified by comparing it to what the guest owner -expects. - -Parameters (in): struct kvm_sev_launch_measure - -Returns: 0 on success, -negative on error - -:: - - struct kvm_sev_launch_measure { - __u64 uaddr; /* where to copy the measurement */ - __u32 len; /* length of measurement blob */ - }; - -For more details on the measurement verification flow, see SEV spec Section 6.4. - -5. KVM_SEV_LAUNCH_FINISH ------------------------- - -After completion of the launch flow, the KVM_SEV_LAUNCH_FINISH command can be -issued to make the guest ready for the execution. - -Returns: 0 on success, -negative on error - -6. KVM_SEV_GUEST_STATUS ------------------------ - -The KVM_SEV_GUEST_STATUS command is used to retrieve status information about a -SEV-enabled guest. - -Parameters (out): struct kvm_sev_guest_status - -Returns: 0 on success, -negative on error - -:: - - struct kvm_sev_guest_status { - __u32 handle; /* guest handle */ - __u32 policy; /* guest policy */ - __u8 state; /* guest state (see enum below) */ - }; - -SEV guest state: - -:: - - enum { - SEV_STATE_INVALID = 0; - SEV_STATE_LAUNCHING, /* guest is currently being launched */ - SEV_STATE_SECRET, /* guest is being launched and ready to accept the ciphertext data */ - SEV_STATE_RUNNING, /* guest is fully launched and running */ - SEV_STATE_RECEIVING, /* guest is being migrated in from another SEV machine */ - SEV_STATE_SENDING /* guest is getting migrated out to another SEV machine */ - }; - -7. KVM_SEV_DBG_DECRYPT ----------------------- - -The KVM_SEV_DEBUG_DECRYPT command can be used by the hypervisor to request the -firmware to decrypt the data at the given memory region. - -Parameters (in): struct kvm_sev_dbg - -Returns: 0 on success, -negative on error - -:: - - struct kvm_sev_dbg { - __u64 src_uaddr; /* userspace address of data to decrypt */ - __u64 dst_uaddr; /* userspace address of destination */ - __u32 len; /* length of memory region to decrypt */ - }; - -The command returns an error if the guest policy does not allow debugging. - -8. KVM_SEV_DBG_ENCRYPT ----------------------- - -The KVM_SEV_DEBUG_ENCRYPT command can be used by the hypervisor to request the -firmware to encrypt the data at the given memory region. - -Parameters (in): struct kvm_sev_dbg - -Returns: 0 on success, -negative on error - -:: - - struct kvm_sev_dbg { - __u64 src_uaddr; /* userspace address of data to encrypt */ - __u64 dst_uaddr; /* userspace address of destination */ - __u32 len; /* length of memory region to encrypt */ - }; - -The command returns an error if the guest policy does not allow debugging. - -9. KVM_SEV_LAUNCH_SECRET ------------------------- - -The KVM_SEV_LAUNCH_SECRET command can be used by the hypervisor to inject secret -data after the measurement has been validated by the guest owner. - -Parameters (in): struct kvm_sev_launch_secret - -Returns: 0 on success, -negative on error - -:: - - struct kvm_sev_launch_secret { - __u64 hdr_uaddr; /* userspace address containing the packet header */ - __u32 hdr_len; - - __u64 guest_uaddr; /* the guest memory region where the secret should be injected */ - __u32 guest_len; - - __u64 trans_uaddr; /* the hypervisor memory region which contains the secret */ - __u32 trans_len; - }; - -References -========== - - -See [white-paper]_, [api-spec]_, [amd-apm]_ and [kvm-forum]_ for more info. - -.. [white-paper] http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf -.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM_API_Specification.pdf -.. [amd-apm] http://support.amd.com/TechDocs/24593.pdf (section 15.34) -.. [kvm-forum] http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt deleted file mode 100644 index e54a3f51ddc5..000000000000 --- a/Documentation/virtual/kvm/api.txt +++ /dev/null @@ -1,5296 +0,0 @@ -The Definitive KVM (Kernel-based Virtual Machine) API Documentation -=================================================================== - -1. General description ----------------------- - -The kvm API is a set of ioctls that are issued to control various aspects -of a virtual machine. The ioctls belong to three classes: - - - System ioctls: These query and set global attributes which affect the - whole kvm subsystem. In addition a system ioctl is used to create - virtual machines. - - - VM ioctls: These query and set attributes that affect an entire virtual - machine, for example memory layout. In addition a VM ioctl is used to - create virtual cpus (vcpus) and devices. - - VM ioctls must be issued from the same process (address space) that was - used to create the VM. - - - vcpu ioctls: These query and set attributes that control the operation - of a single virtual cpu. - - vcpu ioctls should be issued from the same thread that was used to create - the vcpu, except for asynchronous vcpu ioctl that are marked as such in - the documentation. Otherwise, the first ioctl after switching threads - could see a performance impact. - - - device ioctls: These query and set attributes that control the operation - of a single device. - - device ioctls must be issued from the same process (address space) that - was used to create the VM. - -2. File descriptors -------------------- - -The kvm API is centered around file descriptors. An initial -open("/dev/kvm") obtains a handle to the kvm subsystem; this handle -can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this -handle will create a VM file descriptor which can be used to issue VM -ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will -create a virtual cpu or device and return a file descriptor pointing to -the new resource. Finally, ioctls on a vcpu or device fd can be used -to control the vcpu or device. For vcpus, this includes the important -task of actually running guest code. - -In general file descriptors can be migrated among processes by means -of fork() and the SCM_RIGHTS facility of unix domain socket. These -kinds of tricks are explicitly not supported by kvm. While they will -not cause harm to the host, their actual behavior is not guaranteed by -the API. See "General description" for details on the ioctl usage -model that is supported by KVM. - -It is important to note that althought VM ioctls may only be issued from -the process that created the VM, a VM's lifecycle is associated with its -file descriptor, not its creator (process). In other words, the VM and -its resources, *including the associated address space*, are not freed -until the last reference to the VM's file descriptor has been released. -For example, if fork() is issued after ioctl(KVM_CREATE_VM), the VM will -not be freed until both the parent (original) process and its child have -put their references to the VM's file descriptor. - -Because a VM's resources are not freed until the last reference to its -file descriptor is released, creating additional references to a VM via -via fork(), dup(), etc... without careful consideration is strongly -discouraged and may have unwanted side effects, e.g. memory allocated -by and on behalf of the VM's process may not be freed/unaccounted when -the VM is shut down. - - -3. Extensions -------------- - -As of Linux 2.6.22, the KVM ABI has been stabilized: no backward -incompatible change are allowed. However, there is an extension -facility that allows backward-compatible extensions to the API to be -queried and used. - -The extension mechanism is not based on the Linux version number. -Instead, kvm defines extension identifiers and a facility to query -whether a particular extension identifier is available. If it is, a -set of ioctls is available for application use. - - -4. API description ------------------- - -This section describes ioctls that can be used to control kvm guests. -For each ioctl, the following information is provided along with a -description: - - Capability: which KVM extension provides this ioctl. Can be 'basic', - which means that is will be provided by any kernel that supports - API version 12 (see section 4.1), a KVM_CAP_xyz constant, which - means availability needs to be checked with KVM_CHECK_EXTENSION - (see section 4.4), or 'none' which means that while not all kernels - support this ioctl, there's no capability bit to check its - availability: for kernels that don't support the ioctl, - the ioctl returns -ENOTTY. - - Architectures: which instruction set architectures provide this ioctl. - x86 includes both i386 and x86_64. - - Type: system, vm, or vcpu. - - Parameters: what parameters are accepted by the ioctl. - - Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL) - are not detailed, but errors with specific meanings are. - - -4.1 KVM_GET_API_VERSION - -Capability: basic -Architectures: all -Type: system ioctl -Parameters: none -Returns: the constant KVM_API_VERSION (=12) - -This identifies the API version as the stable kvm API. It is not -expected that this number will change. However, Linux 2.6.20 and -2.6.21 report earlier versions; these are not documented and not -supported. Applications should refuse to run if KVM_GET_API_VERSION -returns a value other than 12. If this check passes, all ioctls -described as 'basic' will be available. - - -4.2 KVM_CREATE_VM - -Capability: basic -Architectures: all -Type: system ioctl -Parameters: machine type identifier (KVM_VM_*) -Returns: a VM fd that can be used to control the new virtual machine. - -The new VM has no virtual cpus and no memory. -You probably want to use 0 as machine type. - -In order to create user controlled virtual machines on S390, check -KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as -privileged user (CAP_SYS_ADMIN). - -To use hardware assisted virtualization on MIPS (VZ ASE) rather than -the default trap & emulate implementation (which changes the virtual -memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the -flag KVM_VM_MIPS_VZ. - - -On arm64, the physical address size for a VM (IPA Size limit) is limited -to 40bits by default. The limit can be configured if the host supports the -extension KVM_CAP_ARM_VM_IPA_SIZE. When supported, use -KVM_VM_TYPE_ARM_IPA_SIZE(IPA_Bits) to set the size in the machine type -identifier, where IPA_Bits is the maximum width of any physical -address used by the VM. The IPA_Bits is encoded in bits[7-0] of the -machine type identifier. - -e.g, to configure a guest to use 48bit physical address size : - - vm_fd = ioctl(dev_fd, KVM_CREATE_VM, KVM_VM_TYPE_ARM_IPA_SIZE(48)); - -The requested size (IPA_Bits) must be : - 0 - Implies default size, 40bits (for backward compatibility) - - or - - N - Implies N bits, where N is a positive integer such that, - 32 <= N <= Host_IPA_Limit - -Host_IPA_Limit is the maximum possible value for IPA_Bits on the host and -is dependent on the CPU capability and the kernel configuration. The limit can -be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION -ioctl() at run-time. - -Please note that configuring the IPA size does not affect the capability -exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects -size of the address translated by the stage2 level (guest physical to -host physical address translations). - - -4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST - -Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST -Architectures: x86 -Type: system ioctl -Parameters: struct kvm_msr_list (in/out) -Returns: 0 on success; -1 on error -Errors: - EFAULT: the msr index list cannot be read from or written to - E2BIG: the msr index list is to be to fit in the array specified by - the user. - -struct kvm_msr_list { - __u32 nmsrs; /* number of msrs in entries */ - __u32 indices[0]; -}; - -The user fills in the size of the indices array in nmsrs, and in return -kvm adjusts nmsrs to reflect the actual number of msrs and fills in the -indices array with their numbers. - -KVM_GET_MSR_INDEX_LIST returns the guest msrs that are supported. The list -varies by kvm version and host processor, but does not change otherwise. - -Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are -not returned in the MSR list, as different vcpus can have a different number -of banks, as set via the KVM_X86_SETUP_MCE ioctl. - -KVM_GET_MSR_FEATURE_INDEX_LIST returns the list of MSRs that can be passed -to the KVM_GET_MSRS system ioctl. This lets userspace probe host capabilities -and processor features that are exposed via MSRs (e.g., VMX capabilities). -This list also varies by kvm version and host processor, but does not change -otherwise. - - -4.4 KVM_CHECK_EXTENSION - -Capability: basic, KVM_CAP_CHECK_EXTENSION_VM for vm ioctl -Architectures: all -Type: system ioctl, vm ioctl -Parameters: extension identifier (KVM_CAP_*) -Returns: 0 if unsupported; 1 (or some other positive integer) if supported - -The API allows the application to query about extensions to the core -kvm API. Userspace passes an extension identifier (an integer) and -receives an integer that describes the extension availability. -Generally 0 means no and 1 means yes, but some extensions may report -additional information in the integer return value. - -Based on their initialization different VMs may have different capabilities. -It is thus encouraged to use the vm ioctl to query for capabilities (available -with KVM_CAP_CHECK_EXTENSION_VM on the vm fd) - -4.5 KVM_GET_VCPU_MMAP_SIZE - -Capability: basic -Architectures: all -Type: system ioctl -Parameters: none -Returns: size of vcpu mmap area, in bytes - -The KVM_RUN ioctl (cf.) communicates with userspace via a shared -memory region. This ioctl returns the size of that region. See the -KVM_RUN documentation for details. - - -4.6 KVM_SET_MEMORY_REGION - -Capability: basic -Architectures: all -Type: vm ioctl -Parameters: struct kvm_memory_region (in) -Returns: 0 on success, -1 on error - -This ioctl is obsolete and has been removed. - - -4.7 KVM_CREATE_VCPU - -Capability: basic -Architectures: all -Type: vm ioctl -Parameters: vcpu id (apic id on x86) -Returns: vcpu fd on success, -1 on error - -This API adds a vcpu to a virtual machine. No more than max_vcpus may be added. -The vcpu id is an integer in the range [0, max_vcpu_id). - -The recommended max_vcpus value can be retrieved using the KVM_CAP_NR_VCPUS of -the KVM_CHECK_EXTENSION ioctl() at run-time. -The maximum possible value for max_vcpus can be retrieved using the -KVM_CAP_MAX_VCPUS of the KVM_CHECK_EXTENSION ioctl() at run-time. - -If the KVM_CAP_NR_VCPUS does not exist, you should assume that max_vcpus is 4 -cpus max. -If the KVM_CAP_MAX_VCPUS does not exist, you should assume that max_vcpus is -same as the value returned from KVM_CAP_NR_VCPUS. - -The maximum possible value for max_vcpu_id can be retrieved using the -KVM_CAP_MAX_VCPU_ID of the KVM_CHECK_EXTENSION ioctl() at run-time. - -If the KVM_CAP_MAX_VCPU_ID does not exist, you should assume that max_vcpu_id -is the same as the value returned from KVM_CAP_MAX_VCPUS. - -On powerpc using book3s_hv mode, the vcpus are mapped onto virtual -threads in one or more virtual CPU cores. (This is because the -hardware requires all the hardware threads in a CPU core to be in the -same partition.) The KVM_CAP_PPC_SMT capability indicates the number -of vcpus per virtual core (vcore). The vcore id is obtained by -dividing the vcpu id by the number of vcpus per vcore. The vcpus in a -given vcore will always be in the same physical core as each other -(though that might be a different physical core from time to time). -Userspace can control the threading (SMT) mode of the guest by its -allocation of vcpu ids. For example, if userspace wants -single-threaded guest vcpus, it should make all vcpu ids be a multiple -of the number of vcpus per vcore. - -For virtual cpus that have been created with S390 user controlled virtual -machines, the resulting vcpu fd can be memory mapped at page offset -KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of the virtual -cpu's hardware control block. - - -4.8 KVM_GET_DIRTY_LOG (vm ioctl) - -Capability: basic -Architectures: all -Type: vm ioctl -Parameters: struct kvm_dirty_log (in/out) -Returns: 0 on success, -1 on error - -/* for KVM_GET_DIRTY_LOG */ -struct kvm_dirty_log { - __u32 slot; - __u32 padding; - union { - void __user *dirty_bitmap; /* one bit per page */ - __u64 padding; - }; -}; - -Given a memory slot, return a bitmap containing any pages dirtied -since the last call to this ioctl. Bit 0 is the first page in the -memory slot. Ensure the entire structure is cleared to avoid padding -issues. - -If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies -the address space for which you want to return the dirty bitmap. -They must be less than the value that KVM_CHECK_EXTENSION returns for -the KVM_CAP_MULTI_ADDRESS_SPACE capability. - -The bits in the dirty bitmap are cleared before the ioctl returns, unless -KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information, -see the description of the capability. - -4.9 KVM_SET_MEMORY_ALIAS - -Capability: basic -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_memory_alias (in) -Returns: 0 (success), -1 (error) - -This ioctl is obsolete and has been removed. - - -4.10 KVM_RUN - -Capability: basic -Architectures: all -Type: vcpu ioctl -Parameters: none -Returns: 0 on success, -1 on error -Errors: - EINTR: an unmasked signal is pending - -This ioctl is used to run a guest virtual cpu. While there are no -explicit parameters, there is an implicit parameter block that can be -obtained by mmap()ing the vcpu fd at offset 0, with the size given by -KVM_GET_VCPU_MMAP_SIZE. The parameter block is formatted as a 'struct -kvm_run' (see below). - - -4.11 KVM_GET_REGS - -Capability: basic -Architectures: all except ARM, arm64 -Type: vcpu ioctl -Parameters: struct kvm_regs (out) -Returns: 0 on success, -1 on error - -Reads the general purpose registers from the vcpu. - -/* x86 */ -struct kvm_regs { - /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ - __u64 rax, rbx, rcx, rdx; - __u64 rsi, rdi, rsp, rbp; - __u64 r8, r9, r10, r11; - __u64 r12, r13, r14, r15; - __u64 rip, rflags; -}; - -/* mips */ -struct kvm_regs { - /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ - __u64 gpr[32]; - __u64 hi; - __u64 lo; - __u64 pc; -}; - - -4.12 KVM_SET_REGS - -Capability: basic -Architectures: all except ARM, arm64 -Type: vcpu ioctl -Parameters: struct kvm_regs (in) -Returns: 0 on success, -1 on error - -Writes the general purpose registers into the vcpu. - -See KVM_GET_REGS for the data structure. - - -4.13 KVM_GET_SREGS - -Capability: basic -Architectures: x86, ppc -Type: vcpu ioctl -Parameters: struct kvm_sregs (out) -Returns: 0 on success, -1 on error - -Reads special registers from the vcpu. - -/* x86 */ -struct kvm_sregs { - struct kvm_segment cs, ds, es, fs, gs, ss; - struct kvm_segment tr, ldt; - struct kvm_dtable gdt, idt; - __u64 cr0, cr2, cr3, cr4, cr8; - __u64 efer; - __u64 apic_base; - __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64]; -}; - -/* ppc -- see arch/powerpc/include/uapi/asm/kvm.h */ - -interrupt_bitmap is a bitmap of pending external interrupts. At most -one bit may be set. This interrupt has been acknowledged by the APIC -but not yet injected into the cpu core. - - -4.14 KVM_SET_SREGS - -Capability: basic -Architectures: x86, ppc -Type: vcpu ioctl -Parameters: struct kvm_sregs (in) -Returns: 0 on success, -1 on error - -Writes special registers into the vcpu. See KVM_GET_SREGS for the -data structures. - - -4.15 KVM_TRANSLATE - -Capability: basic -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_translation (in/out) -Returns: 0 on success, -1 on error - -Translates a virtual address according to the vcpu's current address -translation mode. - -struct kvm_translation { - /* in */ - __u64 linear_address; - - /* out */ - __u64 physical_address; - __u8 valid; - __u8 writeable; - __u8 usermode; - __u8 pad[5]; -}; - - -4.16 KVM_INTERRUPT - -Capability: basic -Architectures: x86, ppc, mips -Type: vcpu ioctl -Parameters: struct kvm_interrupt (in) -Returns: 0 on success, negative on failure. - -Queues a hardware interrupt vector to be injected. - -/* for KVM_INTERRUPT */ -struct kvm_interrupt { - /* in */ - __u32 irq; -}; - -X86: - -Returns: 0 on success, - -EEXIST if an interrupt is already enqueued - -EINVAL the the irq number is invalid - -ENXIO if the PIC is in the kernel - -EFAULT if the pointer is invalid - -Note 'irq' is an interrupt vector, not an interrupt pin or line. This -ioctl is useful if the in-kernel PIC is not used. - -PPC: - -Queues an external interrupt to be injected. This ioctl is overleaded -with 3 different irq values: - -a) KVM_INTERRUPT_SET - - This injects an edge type external interrupt into the guest once it's ready - to receive interrupts. When injected, the interrupt is done. - -b) KVM_INTERRUPT_UNSET - - This unsets any pending interrupt. - - Only available with KVM_CAP_PPC_UNSET_IRQ. - -c) KVM_INTERRUPT_SET_LEVEL - - This injects a level type external interrupt into the guest context. The - interrupt stays pending until a specific ioctl with KVM_INTERRUPT_UNSET - is triggered. - - Only available with KVM_CAP_PPC_IRQ_LEVEL. - -Note that any value for 'irq' other than the ones stated above is invalid -and incurs unexpected behavior. - -This is an asynchronous vcpu ioctl and can be invoked from any thread. - -MIPS: - -Queues an external interrupt to be injected into the virtual CPU. A negative -interrupt number dequeues the interrupt. - -This is an asynchronous vcpu ioctl and can be invoked from any thread. - - -4.17 KVM_DEBUG_GUEST - -Capability: basic -Architectures: none -Type: vcpu ioctl -Parameters: none) -Returns: -1 on error - -Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead. - - -4.18 KVM_GET_MSRS - -Capability: basic (vcpu), KVM_CAP_GET_MSR_FEATURES (system) -Architectures: x86 -Type: system ioctl, vcpu ioctl -Parameters: struct kvm_msrs (in/out) -Returns: number of msrs successfully returned; - -1 on error - -When used as a system ioctl: -Reads the values of MSR-based features that are available for the VM. This -is similar to KVM_GET_SUPPORTED_CPUID, but it returns MSR indices and values. -The list of msr-based features can be obtained using KVM_GET_MSR_FEATURE_INDEX_LIST -in a system ioctl. - -When used as a vcpu ioctl: -Reads model-specific registers from the vcpu. Supported msr indices can -be obtained using KVM_GET_MSR_INDEX_LIST in a system ioctl. - -struct kvm_msrs { - __u32 nmsrs; /* number of msrs in entries */ - __u32 pad; - - struct kvm_msr_entry entries[0]; -}; - -struct kvm_msr_entry { - __u32 index; - __u32 reserved; - __u64 data; -}; - -Application code should set the 'nmsrs' member (which indicates the -size of the entries array) and the 'index' member of each array entry. -kvm will fill in the 'data' member. - - -4.19 KVM_SET_MSRS - -Capability: basic -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_msrs (in) -Returns: 0 on success, -1 on error - -Writes model-specific registers to the vcpu. See KVM_GET_MSRS for the -data structures. - -Application code should set the 'nmsrs' member (which indicates the -size of the entries array), and the 'index' and 'data' members of each -array entry. - - -4.20 KVM_SET_CPUID - -Capability: basic -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_cpuid (in) -Returns: 0 on success, -1 on error - -Defines the vcpu responses to the cpuid instruction. Applications -should use the KVM_SET_CPUID2 ioctl if available. - - -struct kvm_cpuid_entry { - __u32 function; - __u32 eax; - __u32 ebx; - __u32 ecx; - __u32 edx; - __u32 padding; -}; - -/* for KVM_SET_CPUID */ -struct kvm_cpuid { - __u32 nent; - __u32 padding; - struct kvm_cpuid_entry entries[0]; -}; - - -4.21 KVM_SET_SIGNAL_MASK - -Capability: basic -Architectures: all -Type: vcpu ioctl -Parameters: struct kvm_signal_mask (in) -Returns: 0 on success, -1 on error - -Defines which signals are blocked during execution of KVM_RUN. This -signal mask temporarily overrides the threads signal mask. Any -unblocked signal received (except SIGKILL and SIGSTOP, which retain -their traditional behaviour) will cause KVM_RUN to return with -EINTR. - -Note the signal will only be delivered if not blocked by the original -signal mask. - -/* for KVM_SET_SIGNAL_MASK */ -struct kvm_signal_mask { - __u32 len; - __u8 sigset[0]; -}; - - -4.22 KVM_GET_FPU - -Capability: basic -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_fpu (out) -Returns: 0 on success, -1 on error - -Reads the floating point state from the vcpu. - -/* for KVM_GET_FPU and KVM_SET_FPU */ -struct kvm_fpu { - __u8 fpr[8][16]; - __u16 fcw; - __u16 fsw; - __u8 ftwx; /* in fxsave format */ - __u8 pad1; - __u16 last_opcode; - __u64 last_ip; - __u64 last_dp; - __u8 xmm[16][16]; - __u32 mxcsr; - __u32 pad2; -}; - - -4.23 KVM_SET_FPU - -Capability: basic -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_fpu (in) -Returns: 0 on success, -1 on error - -Writes the floating point state to the vcpu. - -/* for KVM_GET_FPU and KVM_SET_FPU */ -struct kvm_fpu { - __u8 fpr[8][16]; - __u16 fcw; - __u16 fsw; - __u8 ftwx; /* in fxsave format */ - __u8 pad1; - __u16 last_opcode; - __u64 last_ip; - __u64 last_dp; - __u8 xmm[16][16]; - __u32 mxcsr; - __u32 pad2; -}; - - -4.24 KVM_CREATE_IRQCHIP - -Capability: KVM_CAP_IRQCHIP, KVM_CAP_S390_IRQCHIP (s390) -Architectures: x86, ARM, arm64, s390 -Type: vm ioctl -Parameters: none -Returns: 0 on success, -1 on error - -Creates an interrupt controller model in the kernel. -On x86, creates a virtual ioapic, a virtual PIC (two PICs, nested), and sets up -future vcpus to have a local APIC. IRQ routing for GSIs 0-15 is set to both -PIC and IOAPIC; GSI 16-23 only go to the IOAPIC. -On ARM/arm64, a GICv2 is created. Any other GIC versions require the usage of -KVM_CREATE_DEVICE, which also supports creating a GICv2. Using -KVM_CREATE_DEVICE is preferred over KVM_CREATE_IRQCHIP for GICv2. -On s390, a dummy irq routing table is created. - -Note that on s390 the KVM_CAP_S390_IRQCHIP vm capability needs to be enabled -before KVM_CREATE_IRQCHIP can be used. - - -4.25 KVM_IRQ_LINE - -Capability: KVM_CAP_IRQCHIP -Architectures: x86, arm, arm64 -Type: vm ioctl -Parameters: struct kvm_irq_level -Returns: 0 on success, -1 on error - -Sets the level of a GSI input to the interrupt controller model in the kernel. -On some architectures it is required that an interrupt controller model has -been previously created with KVM_CREATE_IRQCHIP. Note that edge-triggered -interrupts require the level to be set to 1 and then back to 0. - -On real hardware, interrupt pins can be active-low or active-high. This -does not matter for the level field of struct kvm_irq_level: 1 always -means active (asserted), 0 means inactive (deasserted). - -x86 allows the operating system to program the interrupt polarity -(active-low/active-high) for level-triggered interrupts, and KVM used -to consider the polarity. However, due to bitrot in the handling of -active-low interrupts, the above convention is now valid on x86 too. -This is signaled by KVM_CAP_X86_IOAPIC_POLARITY_IGNORED. Userspace -should not present interrupts to the guest as active-low unless this -capability is present (or unless it is not using the in-kernel irqchip, -of course). - - -ARM/arm64 can signal an interrupt either at the CPU level, or at the -in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to -use PPIs designated for specific cpus. The irq field is interpreted -like this: - -  bits: | 31 ... 24 | 23 ... 16 | 15 ... 0 | - field: | irq_type | vcpu_index | irq_id | - -The irq_type field has the following values: -- irq_type[0]: out-of-kernel GIC: irq_id 0 is IRQ, irq_id 1 is FIQ -- irq_type[1]: in-kernel GIC: SPI, irq_id between 32 and 1019 (incl.) - (the vcpu_index field is ignored) -- irq_type[2]: in-kernel GIC: PPI, irq_id between 16 and 31 (incl.) - -(The irq_id field thus corresponds nicely to the IRQ ID in the ARM GIC specs) - -In both cases, level is used to assert/deassert the line. - -struct kvm_irq_level { - union { - __u32 irq; /* GSI */ - __s32 status; /* not used for KVM_IRQ_LEVEL */ - }; - __u32 level; /* 0 or 1 */ -}; - - -4.26 KVM_GET_IRQCHIP - -Capability: KVM_CAP_IRQCHIP -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_irqchip (in/out) -Returns: 0 on success, -1 on error - -Reads the state of a kernel interrupt controller created with -KVM_CREATE_IRQCHIP into a buffer provided by the caller. - -struct kvm_irqchip { - __u32 chip_id; /* 0 = PIC1, 1 = PIC2, 2 = IOAPIC */ - __u32 pad; - union { - char dummy[512]; /* reserving space */ - struct kvm_pic_state pic; - struct kvm_ioapic_state ioapic; - } chip; -}; - - -4.27 KVM_SET_IRQCHIP - -Capability: KVM_CAP_IRQCHIP -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_irqchip (in) -Returns: 0 on success, -1 on error - -Sets the state of a kernel interrupt controller created with -KVM_CREATE_IRQCHIP from a buffer provided by the caller. - -struct kvm_irqchip { - __u32 chip_id; /* 0 = PIC1, 1 = PIC2, 2 = IOAPIC */ - __u32 pad; - union { - char dummy[512]; /* reserving space */ - struct kvm_pic_state pic; - struct kvm_ioapic_state ioapic; - } chip; -}; - - -4.28 KVM_XEN_HVM_CONFIG - -Capability: KVM_CAP_XEN_HVM -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_xen_hvm_config (in) -Returns: 0 on success, -1 on error - -Sets the MSR that the Xen HVM guest uses to initialize its hypercall -page, and provides the starting address and size of the hypercall -blobs in userspace. When the guest writes the MSR, kvm copies one -page of a blob (32- or 64-bit, depending on the vcpu mode) to guest -memory. - -struct kvm_xen_hvm_config { - __u32 flags; - __u32 msr; - __u64 blob_addr_32; - __u64 blob_addr_64; - __u8 blob_size_32; - __u8 blob_size_64; - __u8 pad2[30]; -}; - - -4.29 KVM_GET_CLOCK - -Capability: KVM_CAP_ADJUST_CLOCK -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_clock_data (out) -Returns: 0 on success, -1 on error - -Gets the current timestamp of kvmclock as seen by the current guest. In -conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios -such as migration. - -When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the -set of bits that KVM can return in struct kvm_clock_data's flag member. - -The only flag defined now is KVM_CLOCK_TSC_STABLE. If set, the returned -value is the exact kvmclock value seen by all VCPUs at the instant -when KVM_GET_CLOCK was called. If clear, the returned value is simply -CLOCK_MONOTONIC plus a constant offset; the offset can be modified -with KVM_SET_CLOCK. KVM will try to make all VCPUs follow this clock, -but the exact value read by each VCPU could differ, because the host -TSC is not stable. - -struct kvm_clock_data { - __u64 clock; /* kvmclock current value */ - __u32 flags; - __u32 pad[9]; -}; - - -4.30 KVM_SET_CLOCK - -Capability: KVM_CAP_ADJUST_CLOCK -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_clock_data (in) -Returns: 0 on success, -1 on error - -Sets the current timestamp of kvmclock to the value specified in its parameter. -In conjunction with KVM_GET_CLOCK, it is used to ensure monotonicity on scenarios -such as migration. - -struct kvm_clock_data { - __u64 clock; /* kvmclock current value */ - __u32 flags; - __u32 pad[9]; -}; - - -4.31 KVM_GET_VCPU_EVENTS - -Capability: KVM_CAP_VCPU_EVENTS -Extended by: KVM_CAP_INTR_SHADOW -Architectures: x86, arm, arm64 -Type: vcpu ioctl -Parameters: struct kvm_vcpu_event (out) -Returns: 0 on success, -1 on error - -X86: - -Gets currently pending exceptions, interrupts, and NMIs as well as related -states of the vcpu. - -struct kvm_vcpu_events { - struct { - __u8 injected; - __u8 nr; - __u8 has_error_code; - __u8 pending; - __u32 error_code; - } exception; - struct { - __u8 injected; - __u8 nr; - __u8 soft; - __u8 shadow; - } interrupt; - struct { - __u8 injected; - __u8 pending; - __u8 masked; - __u8 pad; - } nmi; - __u32 sipi_vector; - __u32 flags; - struct { - __u8 smm; - __u8 pending; - __u8 smm_inside_nmi; - __u8 latched_init; - } smi; - __u8 reserved[27]; - __u8 exception_has_payload; - __u64 exception_payload; -}; - -The following bits are defined in the flags field: - -- KVM_VCPUEVENT_VALID_SHADOW may be set to signal that - interrupt.shadow contains a valid state. - -- KVM_VCPUEVENT_VALID_SMM may be set to signal that smi contains a - valid state. - -- KVM_VCPUEVENT_VALID_PAYLOAD may be set to signal that the - exception_has_payload, exception_payload, and exception.pending - fields contain a valid state. This bit will be set whenever - KVM_CAP_EXCEPTION_PAYLOAD is enabled. - -ARM/ARM64: - -If the guest accesses a device that is being emulated by the host kernel in -such a way that a real device would generate a physical SError, KVM may make -a virtual SError pending for that VCPU. This system error interrupt remains -pending until the guest takes the exception by unmasking PSTATE.A. - -Running the VCPU may cause it to take a pending SError, or make an access that -causes an SError to become pending. The event's description is only valid while -the VPCU is not running. - -This API provides a way to read and write the pending 'event' state that is not -visible to the guest. To save, restore or migrate a VCPU the struct representing -the state can be read then written using this GET/SET API, along with the other -guest-visible registers. It is not possible to 'cancel' an SError that has been -made pending. - -A device being emulated in user-space may also wish to generate an SError. To do -this the events structure can be populated by user-space. The current state -should be read first, to ensure no existing SError is pending. If an existing -SError is pending, the architecture's 'Multiple SError interrupts' rules should -be followed. (2.5.3 of DDI0587.a "ARM Reliability, Availability, and -Serviceability (RAS) Specification"). - -SError exceptions always have an ESR value. Some CPUs have the ability to -specify what the virtual SError's ESR value should be. These systems will -advertise KVM_CAP_ARM_INJECT_SERROR_ESR. In this case exception.has_esr will -always have a non-zero value when read, and the agent making an SError pending -should specify the ISS field in the lower 24 bits of exception.serror_esr. If -the system supports KVM_CAP_ARM_INJECT_SERROR_ESR, but user-space sets the events -with exception.has_esr as zero, KVM will choose an ESR. - -Specifying exception.has_esr on a system that does not support it will return --EINVAL. Setting anything other than the lower 24bits of exception.serror_esr -will return -EINVAL. - -struct kvm_vcpu_events { - struct { - __u8 serror_pending; - __u8 serror_has_esr; - /* Align it to 8 bytes */ - __u8 pad[6]; - __u64 serror_esr; - } exception; - __u32 reserved[12]; -}; - -4.32 KVM_SET_VCPU_EVENTS - -Capability: KVM_CAP_VCPU_EVENTS -Extended by: KVM_CAP_INTR_SHADOW -Architectures: x86, arm, arm64 -Type: vcpu ioctl -Parameters: struct kvm_vcpu_event (in) -Returns: 0 on success, -1 on error - -X86: - -Set pending exceptions, interrupts, and NMIs as well as related states of the -vcpu. - -See KVM_GET_VCPU_EVENTS for the data structure. - -Fields that may be modified asynchronously by running VCPUs can be excluded -from the update. These fields are nmi.pending, sipi_vector, smi.smm, -smi.pending. Keep the corresponding bits in the flags field cleared to -suppress overwriting the current in-kernel state. The bits are: - -KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel -KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector -KVM_VCPUEVENT_VALID_SMM - transfer the smi sub-struct. - -If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in -the flags field to signal that interrupt.shadow contains a valid state and -shall be written into the VCPU. - -KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available. - -If KVM_CAP_EXCEPTION_PAYLOAD is enabled, KVM_VCPUEVENT_VALID_PAYLOAD -can be set in the flags field to signal that the -exception_has_payload, exception_payload, and exception.pending fields -contain a valid state and shall be written into the VCPU. - -ARM/ARM64: - -Set the pending SError exception state for this VCPU. It is not possible to -'cancel' an Serror that has been made pending. - -See KVM_GET_VCPU_EVENTS for the data structure. - - -4.33 KVM_GET_DEBUGREGS - -Capability: KVM_CAP_DEBUGREGS -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_debugregs (out) -Returns: 0 on success, -1 on error - -Reads debug registers from the vcpu. - -struct kvm_debugregs { - __u64 db[4]; - __u64 dr6; - __u64 dr7; - __u64 flags; - __u64 reserved[9]; -}; - - -4.34 KVM_SET_DEBUGREGS - -Capability: KVM_CAP_DEBUGREGS -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_debugregs (in) -Returns: 0 on success, -1 on error - -Writes debug registers into the vcpu. - -See KVM_GET_DEBUGREGS for the data structure. The flags field is unused -yet and must be cleared on entry. - - -4.35 KVM_SET_USER_MEMORY_REGION - -Capability: KVM_CAP_USER_MEMORY -Architectures: all -Type: vm ioctl -Parameters: struct kvm_userspace_memory_region (in) -Returns: 0 on success, -1 on error - -struct kvm_userspace_memory_region { - __u32 slot; - __u32 flags; - __u64 guest_phys_addr; - __u64 memory_size; /* bytes */ - __u64 userspace_addr; /* start of the userspace allocated memory */ -}; - -/* for kvm_memory_region::flags */ -#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0) -#define KVM_MEM_READONLY (1UL << 1) - -This ioctl allows the user to create, modify or delete a guest physical -memory slot. Bits 0-15 of "slot" specify the slot id and this value -should be less than the maximum number of user memory slots supported per -VM. The maximum allowed slots can be queried using KVM_CAP_NR_MEMSLOTS. -Slots may not overlap in guest physical address space. - -If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of "slot" -specifies the address space which is being modified. They must be -less than the value that KVM_CHECK_EXTENSION returns for the -KVM_CAP_MULTI_ADDRESS_SPACE capability. Slots in separate address spaces -are unrelated; the restriction on overlapping slots only applies within -each address space. - -Deleting a slot is done by passing zero for memory_size. When changing -an existing slot, it may be moved in the guest physical memory space, -or its flags may be modified, but it may not be resized. - -Memory for the region is taken starting at the address denoted by the -field userspace_addr, which must point at user addressable memory for -the entire memory slot size. Any object may back this memory, including -anonymous memory, ordinary files, and hugetlbfs. - -It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr -be identical. This allows large pages in the guest to be backed by large -pages in the host. - -The flags field supports two flags: KVM_MEM_LOG_DIRTY_PAGES and -KVM_MEM_READONLY. The former can be set to instruct KVM to keep track of -writes to memory within the slot. See KVM_GET_DIRTY_LOG ioctl to know how to -use it. The latter can be set, if KVM_CAP_READONLY_MEM capability allows it, -to make a new slot read-only. In this case, writes to this memory will be -posted to userspace as KVM_EXIT_MMIO exits. - -When the KVM_CAP_SYNC_MMU capability is available, changes in the backing of -the memory region are automatically reflected into the guest. For example, an -mmap() that affects the region will be made visible immediately. Another -example is madvise(MADV_DROP). - -It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl. -The KVM_SET_MEMORY_REGION does not allow fine grained control over memory -allocation and is deprecated. - - -4.36 KVM_SET_TSS_ADDR - -Capability: KVM_CAP_SET_TSS_ADDR -Architectures: x86 -Type: vm ioctl -Parameters: unsigned long tss_address (in) -Returns: 0 on success, -1 on error - -This ioctl defines the physical address of a three-page region in the guest -physical address space. The region must be within the first 4GB of the -guest physical address space and must not conflict with any memory slot -or any mmio address. The guest may malfunction if it accesses this memory -region. - -This ioctl is required on Intel-based hosts. This is needed on Intel hardware -because of a quirk in the virtualization implementation (see the internals -documentation when it pops into existence). - - -4.37 KVM_ENABLE_CAP - -Capability: KVM_CAP_ENABLE_CAP -Architectures: mips, ppc, s390 -Type: vcpu ioctl -Parameters: struct kvm_enable_cap (in) -Returns: 0 on success; -1 on error - -Capability: KVM_CAP_ENABLE_CAP_VM -Architectures: all -Type: vcpu ioctl -Parameters: struct kvm_enable_cap (in) -Returns: 0 on success; -1 on error - -+Not all extensions are enabled by default. Using this ioctl the application -can enable an extension, making it available to the guest. - -On systems that do not support this ioctl, it always fails. On systems that -do support it, it only works for extensions that are supported for enablement. - -To check if a capability can be enabled, the KVM_CHECK_EXTENSION ioctl should -be used. - -struct kvm_enable_cap { - /* in */ - __u32 cap; - -The capability that is supposed to get enabled. - - __u32 flags; - -A bitfield indicating future enhancements. Has to be 0 for now. - - __u64 args[4]; - -Arguments for enabling a feature. If a feature needs initial values to -function properly, this is the place to put them. - - __u8 pad[64]; -}; - -The vcpu ioctl should be used for vcpu-specific capabilities, the vm ioctl -for vm-wide capabilities. - -4.38 KVM_GET_MP_STATE - -Capability: KVM_CAP_MP_STATE -Architectures: x86, s390, arm, arm64 -Type: vcpu ioctl -Parameters: struct kvm_mp_state (out) -Returns: 0 on success; -1 on error - -struct kvm_mp_state { - __u32 mp_state; -}; - -Returns the vcpu's current "multiprocessing state" (though also valid on -uniprocessor guests). - -Possible values are: - - - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86,arm/arm64] - - KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP) - which has not yet received an INIT signal [x86] - - KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is - now ready for a SIPI [x86] - - KVM_MP_STATE_HALTED: the vcpu has executed a HLT instruction and - is waiting for an interrupt [x86] - - KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector - accessible via KVM_GET_VCPU_EVENTS) [x86] - - KVM_MP_STATE_STOPPED: the vcpu is stopped [s390,arm/arm64] - - KVM_MP_STATE_CHECK_STOP: the vcpu is in a special error state [s390] - - KVM_MP_STATE_OPERATING: the vcpu is operating (running or halted) - [s390] - - KVM_MP_STATE_LOAD: the vcpu is in a special load/startup state - [s390] - -On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an -in-kernel irqchip, the multiprocessing state must be maintained by userspace on -these architectures. - -For arm/arm64: - -The only states that are valid are KVM_MP_STATE_STOPPED and -KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not. - -4.39 KVM_SET_MP_STATE - -Capability: KVM_CAP_MP_STATE -Architectures: x86, s390, arm, arm64 -Type: vcpu ioctl -Parameters: struct kvm_mp_state (in) -Returns: 0 on success; -1 on error - -Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for -arguments. - -On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an -in-kernel irqchip, the multiprocessing state must be maintained by userspace on -these architectures. - -For arm/arm64: - -The only states that are valid are KVM_MP_STATE_STOPPED and -KVM_MP_STATE_RUNNABLE which reflect if the vcpu should be paused or not. - -4.40 KVM_SET_IDENTITY_MAP_ADDR - -Capability: KVM_CAP_SET_IDENTITY_MAP_ADDR -Architectures: x86 -Type: vm ioctl -Parameters: unsigned long identity (in) -Returns: 0 on success, -1 on error - -This ioctl defines the physical address of a one-page region in the guest -physical address space. The region must be within the first 4GB of the -guest physical address space and must not conflict with any memory slot -or any mmio address. The guest may malfunction if it accesses this memory -region. - -Setting the address to 0 will result in resetting the address to its default -(0xfffbc000). - -This ioctl is required on Intel-based hosts. This is needed on Intel hardware -because of a quirk in the virtualization implementation (see the internals -documentation when it pops into existence). - -Fails if any VCPU has already been created. - -4.41 KVM_SET_BOOT_CPU_ID - -Capability: KVM_CAP_SET_BOOT_CPU_ID -Architectures: x86 -Type: vm ioctl -Parameters: unsigned long vcpu_id -Returns: 0 on success, -1 on error - -Define which vcpu is the Bootstrap Processor (BSP). Values are the same -as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default -is vcpu 0. - - -4.42 KVM_GET_XSAVE - -Capability: KVM_CAP_XSAVE -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_xsave (out) -Returns: 0 on success, -1 on error - -struct kvm_xsave { - __u32 region[1024]; -}; - -This ioctl would copy current vcpu's xsave struct to the userspace. - - -4.43 KVM_SET_XSAVE - -Capability: KVM_CAP_XSAVE -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_xsave (in) -Returns: 0 on success, -1 on error - -struct kvm_xsave { - __u32 region[1024]; -}; - -This ioctl would copy userspace's xsave struct to the kernel. - - -4.44 KVM_GET_XCRS - -Capability: KVM_CAP_XCRS -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_xcrs (out) -Returns: 0 on success, -1 on error - -struct kvm_xcr { - __u32 xcr; - __u32 reserved; - __u64 value; -}; - -struct kvm_xcrs { - __u32 nr_xcrs; - __u32 flags; - struct kvm_xcr xcrs[KVM_MAX_XCRS]; - __u64 padding[16]; -}; - -This ioctl would copy current vcpu's xcrs to the userspace. - - -4.45 KVM_SET_XCRS - -Capability: KVM_CAP_XCRS -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_xcrs (in) -Returns: 0 on success, -1 on error - -struct kvm_xcr { - __u32 xcr; - __u32 reserved; - __u64 value; -}; - -struct kvm_xcrs { - __u32 nr_xcrs; - __u32 flags; - struct kvm_xcr xcrs[KVM_MAX_XCRS]; - __u64 padding[16]; -}; - -This ioctl would set vcpu's xcr to the value userspace specified. - - -4.46 KVM_GET_SUPPORTED_CPUID - -Capability: KVM_CAP_EXT_CPUID -Architectures: x86 -Type: system ioctl -Parameters: struct kvm_cpuid2 (in/out) -Returns: 0 on success, -1 on error - -struct kvm_cpuid2 { - __u32 nent; - __u32 padding; - struct kvm_cpuid_entry2 entries[0]; -}; - -#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0) -#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1) -#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2) - -struct kvm_cpuid_entry2 { - __u32 function; - __u32 index; - __u32 flags; - __u32 eax; - __u32 ebx; - __u32 ecx; - __u32 edx; - __u32 padding[3]; -}; - -This ioctl returns x86 cpuid features which are supported by both the -hardware and kvm in its default configuration. Userspace can use the -information returned by this ioctl to construct cpuid information (for -KVM_SET_CPUID2) that is consistent with hardware, kernel, and -userspace capabilities, and with user requirements (for example, the -user may wish to constrain cpuid to emulate older hardware, or for -feature consistency across a cluster). - -Note that certain capabilities, such as KVM_CAP_X86_DISABLE_EXITS, may -expose cpuid features (e.g. MONITOR) which are not supported by kvm in -its default configuration. If userspace enables such capabilities, it -is responsible for modifying the results of this ioctl appropriately. - -Userspace invokes KVM_GET_SUPPORTED_CPUID by passing a kvm_cpuid2 structure -with the 'nent' field indicating the number of entries in the variable-size -array 'entries'. If the number of entries is too low to describe the cpu -capabilities, an error (E2BIG) is returned. If the number is too high, -the 'nent' field is adjusted and an error (ENOMEM) is returned. If the -number is just right, the 'nent' field is adjusted to the number of valid -entries in the 'entries' array, which is then filled. - -The entries returned are the host cpuid as returned by the cpuid instruction, -with unknown or unsupported features masked out. Some features (for example, -x2apic), may not be present in the host cpu, but are exposed by kvm if it can -emulate them efficiently. The fields in each entry are defined as follows: - - function: the eax value used to obtain the entry - index: the ecx value used to obtain the entry (for entries that are - affected by ecx) - flags: an OR of zero or more of the following: - KVM_CPUID_FLAG_SIGNIFCANT_INDEX: - if the index field is valid - KVM_CPUID_FLAG_STATEFUL_FUNC: - if cpuid for this function returns different values for successive - invocations; there will be several entries with the same function, - all with this flag set - KVM_CPUID_FLAG_STATE_READ_NEXT: - for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is - the first entry to be read by a cpu - eax, ebx, ecx, edx: the values returned by the cpuid instruction for - this function/index combination - -The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned -as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC -support. Instead it is reported via - - ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER) - -if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the -feature in userspace, then you can enable the feature for KVM_SET_CPUID2. - - -4.47 KVM_PPC_GET_PVINFO - -Capability: KVM_CAP_PPC_GET_PVINFO -Architectures: ppc -Type: vm ioctl -Parameters: struct kvm_ppc_pvinfo (out) -Returns: 0 on success, !0 on error - -struct kvm_ppc_pvinfo { - __u32 flags; - __u32 hcall[4]; - __u8 pad[108]; -}; - -This ioctl fetches PV specific information that need to be passed to the guest -using the device tree or other means from vm context. - -The hcall array defines 4 instructions that make up a hypercall. - -If any additional field gets added to this structure later on, a bit for that -additional piece of information will be set in the flags bitmap. - -The flags bitmap is defined as: - - /* the host supports the ePAPR idle hcall - #define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0) - -4.52 KVM_SET_GSI_ROUTING - -Capability: KVM_CAP_IRQ_ROUTING -Architectures: x86 s390 arm arm64 -Type: vm ioctl -Parameters: struct kvm_irq_routing (in) -Returns: 0 on success, -1 on error - -Sets the GSI routing table entries, overwriting any previously set entries. - -On arm/arm64, GSI routing has the following limitation: -- GSI routing does not apply to KVM_IRQ_LINE but only to KVM_IRQFD. - -struct kvm_irq_routing { - __u32 nr; - __u32 flags; - struct kvm_irq_routing_entry entries[0]; -}; - -No flags are specified so far, the corresponding field must be set to zero. - -struct kvm_irq_routing_entry { - __u32 gsi; - __u32 type; - __u32 flags; - __u32 pad; - union { - struct kvm_irq_routing_irqchip irqchip; - struct kvm_irq_routing_msi msi; - struct kvm_irq_routing_s390_adapter adapter; - struct kvm_irq_routing_hv_sint hv_sint; - __u32 pad[8]; - } u; -}; - -/* gsi routing entry types */ -#define KVM_IRQ_ROUTING_IRQCHIP 1 -#define KVM_IRQ_ROUTING_MSI 2 -#define KVM_IRQ_ROUTING_S390_ADAPTER 3 -#define KVM_IRQ_ROUTING_HV_SINT 4 - -flags: -- KVM_MSI_VALID_DEVID: used along with KVM_IRQ_ROUTING_MSI routing entry - type, specifies that the devid field contains a valid value. The per-VM - KVM_CAP_MSI_DEVID capability advertises the requirement to provide - the device ID. If this capability is not available, userspace should - never set the KVM_MSI_VALID_DEVID flag as the ioctl might fail. -- zero otherwise - -struct kvm_irq_routing_irqchip { - __u32 irqchip; - __u32 pin; -}; - -struct kvm_irq_routing_msi { - __u32 address_lo; - __u32 address_hi; - __u32 data; - union { - __u32 pad; - __u32 devid; - }; -}; - -If KVM_MSI_VALID_DEVID is set, devid contains a unique device identifier -for the device that wrote the MSI message. For PCI, this is usually a -BFD identifier in the lower 16 bits. - -On x86, address_hi is ignored unless the KVM_X2APIC_API_USE_32BIT_IDS -feature of KVM_CAP_X2APIC_API capability is enabled. If it is enabled, -address_hi bits 31-8 provide bits 31-8 of the destination id. Bits 7-0 of -address_hi must be zero. - -struct kvm_irq_routing_s390_adapter { - __u64 ind_addr; - __u64 summary_addr; - __u64 ind_offset; - __u32 summary_offset; - __u32 adapter_id; -}; - -struct kvm_irq_routing_hv_sint { - __u32 vcpu; - __u32 sint; -}; - - -4.55 KVM_SET_TSC_KHZ - -Capability: KVM_CAP_TSC_CONTROL -Architectures: x86 -Type: vcpu ioctl -Parameters: virtual tsc_khz -Returns: 0 on success, -1 on error - -Specifies the tsc frequency for the virtual machine. The unit of the -frequency is KHz. - - -4.56 KVM_GET_TSC_KHZ - -Capability: KVM_CAP_GET_TSC_KHZ -Architectures: x86 -Type: vcpu ioctl -Parameters: none -Returns: virtual tsc-khz on success, negative value on error - -Returns the tsc frequency of the guest. The unit of the return value is -KHz. If the host has unstable tsc this ioctl returns -EIO instead as an -error. - - -4.57 KVM_GET_LAPIC - -Capability: KVM_CAP_IRQCHIP -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_lapic_state (out) -Returns: 0 on success, -1 on error - -#define KVM_APIC_REG_SIZE 0x400 -struct kvm_lapic_state { - char regs[KVM_APIC_REG_SIZE]; -}; - -Reads the Local APIC registers and copies them into the input argument. The -data format and layout are the same as documented in the architecture manual. - -If KVM_X2APIC_API_USE_32BIT_IDS feature of KVM_CAP_X2APIC_API is -enabled, then the format of APIC_ID register depends on the APIC mode -(reported by MSR_IA32_APICBASE) of its VCPU. x2APIC stores APIC ID in -the APIC_ID register (bytes 32-35). xAPIC only allows an 8-bit APIC ID -which is stored in bits 31-24 of the APIC register, or equivalently in -byte 35 of struct kvm_lapic_state's regs field. KVM_GET_LAPIC must then -be called after MSR_IA32_APICBASE has been set with KVM_SET_MSR. - -If KVM_X2APIC_API_USE_32BIT_IDS feature is disabled, struct kvm_lapic_state -always uses xAPIC format. - - -4.58 KVM_SET_LAPIC - -Capability: KVM_CAP_IRQCHIP -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_lapic_state (in) -Returns: 0 on success, -1 on error - -#define KVM_APIC_REG_SIZE 0x400 -struct kvm_lapic_state { - char regs[KVM_APIC_REG_SIZE]; -}; - -Copies the input argument into the Local APIC registers. The data format -and layout are the same as documented in the architecture manual. - -The format of the APIC ID register (bytes 32-35 of struct kvm_lapic_state's -regs field) depends on the state of the KVM_CAP_X2APIC_API capability. -See the note in KVM_GET_LAPIC. - - -4.59 KVM_IOEVENTFD - -Capability: KVM_CAP_IOEVENTFD -Architectures: all -Type: vm ioctl -Parameters: struct kvm_ioeventfd (in) -Returns: 0 on success, !0 on error - -This ioctl attaches or detaches an ioeventfd to a legal pio/mmio address -within the guest. A guest write in the registered address will signal the -provided event instead of triggering an exit. - -struct kvm_ioeventfd { - __u64 datamatch; - __u64 addr; /* legal pio/mmio address */ - __u32 len; /* 0, 1, 2, 4, or 8 bytes */ - __s32 fd; - __u32 flags; - __u8 pad[36]; -}; - -For the special case of virtio-ccw devices on s390, the ioevent is matched -to a subchannel/virtqueue tuple instead. - -The following flags are defined: - -#define KVM_IOEVENTFD_FLAG_DATAMATCH (1 << kvm_ioeventfd_flag_nr_datamatch) -#define KVM_IOEVENTFD_FLAG_PIO (1 << kvm_ioeventfd_flag_nr_pio) -#define KVM_IOEVENTFD_FLAG_DEASSIGN (1 << kvm_ioeventfd_flag_nr_deassign) -#define KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY \ - (1 << kvm_ioeventfd_flag_nr_virtio_ccw_notify) - -If datamatch flag is set, the event will be signaled only if the written value -to the registered address is equal to datamatch in struct kvm_ioeventfd. - -For virtio-ccw devices, addr contains the subchannel id and datamatch the -virtqueue index. - -With KVM_CAP_IOEVENTFD_ANY_LENGTH, a zero length ioeventfd is allowed, and -the kernel will ignore the length of guest write and may get a faster vmexit. -The speedup may only apply to specific architectures, but the ioeventfd will -work anyway. - -4.60 KVM_DIRTY_TLB - -Capability: KVM_CAP_SW_TLB -Architectures: ppc -Type: vcpu ioctl -Parameters: struct kvm_dirty_tlb (in) -Returns: 0 on success, -1 on error - -struct kvm_dirty_tlb { - __u64 bitmap; - __u32 num_dirty; -}; - -This must be called whenever userspace has changed an entry in the shared -TLB, prior to calling KVM_RUN on the associated vcpu. - -The "bitmap" field is the userspace address of an array. This array -consists of a number of bits, equal to the total number of TLB entries as -determined by the last successful call to KVM_CONFIG_TLB, rounded up to the -nearest multiple of 64. - -Each bit corresponds to one TLB entry, ordered the same as in the shared TLB -array. - -The array is little-endian: the bit 0 is the least significant bit of the -first byte, bit 8 is the least significant bit of the second byte, etc. -This avoids any complications with differing word sizes. - -The "num_dirty" field is a performance hint for KVM to determine whether it -should skip processing the bitmap and just invalidate everything. It must -be set to the number of set bits in the bitmap. - - -4.62 KVM_CREATE_SPAPR_TCE - -Capability: KVM_CAP_SPAPR_TCE -Architectures: powerpc -Type: vm ioctl -Parameters: struct kvm_create_spapr_tce (in) -Returns: file descriptor for manipulating the created TCE table - -This creates a virtual TCE (translation control entry) table, which -is an IOMMU for PAPR-style virtual I/O. It is used to translate -logical addresses used in virtual I/O into guest physical addresses, -and provides a scatter/gather capability for PAPR virtual I/O. - -/* for KVM_CAP_SPAPR_TCE */ -struct kvm_create_spapr_tce { - __u64 liobn; - __u32 window_size; -}; - -The liobn field gives the logical IO bus number for which to create a -TCE table. The window_size field specifies the size of the DMA window -which this TCE table will translate - the table will contain one 64 -bit TCE entry for every 4kiB of the DMA window. - -When the guest issues an H_PUT_TCE hcall on a liobn for which a TCE -table has been created using this ioctl(), the kernel will handle it -in real mode, updating the TCE table. H_PUT_TCE calls for other -liobns will cause a vm exit and must be handled by userspace. - -The return value is a file descriptor which can be passed to mmap(2) -to map the created TCE table into userspace. This lets userspace read -the entries written by kernel-handled H_PUT_TCE calls, and also lets -userspace update the TCE table directly which is useful in some -circumstances. - - -4.63 KVM_ALLOCATE_RMA - -Capability: KVM_CAP_PPC_RMA -Architectures: powerpc -Type: vm ioctl -Parameters: struct kvm_allocate_rma (out) -Returns: file descriptor for mapping the allocated RMA - -This allocates a Real Mode Area (RMA) from the pool allocated at boot -time by the kernel. An RMA is a physically-contiguous, aligned region -of memory used on older POWER processors to provide the memory which -will be accessed by real-mode (MMU off) accesses in a KVM guest. -POWER processors support a set of sizes for the RMA that usually -includes 64MB, 128MB, 256MB and some larger powers of two. - -/* for KVM_ALLOCATE_RMA */ -struct kvm_allocate_rma { - __u64 rma_size; -}; - -The return value is a file descriptor which can be passed to mmap(2) -to map the allocated RMA into userspace. The mapped area can then be -passed to the KVM_SET_USER_MEMORY_REGION ioctl to establish it as the -RMA for a virtual machine. The size of the RMA in bytes (which is -fixed at host kernel boot time) is returned in the rma_size field of -the argument structure. - -The KVM_CAP_PPC_RMA capability is 1 or 2 if the KVM_ALLOCATE_RMA ioctl -is supported; 2 if the processor requires all virtual machines to have -an RMA, or 1 if the processor can use an RMA but doesn't require it, -because it supports the Virtual RMA (VRMA) facility. - - -4.64 KVM_NMI - -Capability: KVM_CAP_USER_NMI -Architectures: x86 -Type: vcpu ioctl -Parameters: none -Returns: 0 on success, -1 on error - -Queues an NMI on the thread's vcpu. Note this is well defined only -when KVM_CREATE_IRQCHIP has not been called, since this is an interface -between the virtual cpu core and virtual local APIC. After KVM_CREATE_IRQCHIP -has been called, this interface is completely emulated within the kernel. - -To use this to emulate the LINT1 input with KVM_CREATE_IRQCHIP, use the -following algorithm: - - - pause the vcpu - - read the local APIC's state (KVM_GET_LAPIC) - - check whether changing LINT1 will queue an NMI (see the LVT entry for LINT1) - - if so, issue KVM_NMI - - resume the vcpu - -Some guests configure the LINT1 NMI input to cause a panic, aiding in -debugging. - - -4.65 KVM_S390_UCAS_MAP - -Capability: KVM_CAP_S390_UCONTROL -Architectures: s390 -Type: vcpu ioctl -Parameters: struct kvm_s390_ucas_mapping (in) -Returns: 0 in case of success - -The parameter is defined like this: - struct kvm_s390_ucas_mapping { - __u64 user_addr; - __u64 vcpu_addr; - __u64 length; - }; - -This ioctl maps the memory at "user_addr" with the length "length" to -the vcpu's address space starting at "vcpu_addr". All parameters need to -be aligned by 1 megabyte. - - -4.66 KVM_S390_UCAS_UNMAP - -Capability: KVM_CAP_S390_UCONTROL -Architectures: s390 -Type: vcpu ioctl -Parameters: struct kvm_s390_ucas_mapping (in) -Returns: 0 in case of success - -The parameter is defined like this: - struct kvm_s390_ucas_mapping { - __u64 user_addr; - __u64 vcpu_addr; - __u64 length; - }; - -This ioctl unmaps the memory in the vcpu's address space starting at -"vcpu_addr" with the length "length". The field "user_addr" is ignored. -All parameters need to be aligned by 1 megabyte. - - -4.67 KVM_S390_VCPU_FAULT - -Capability: KVM_CAP_S390_UCONTROL -Architectures: s390 -Type: vcpu ioctl -Parameters: vcpu absolute address (in) -Returns: 0 in case of success - -This call creates a page table entry on the virtual cpu's address space -(for user controlled virtual machines) or the virtual machine's address -space (for regular virtual machines). This only works for minor faults, -thus it's recommended to access subject memory page via the user page -table upfront. This is useful to handle validity intercepts for user -controlled virtual machines to fault in the virtual cpu's lowcore pages -prior to calling the KVM_RUN ioctl. - - -4.68 KVM_SET_ONE_REG - -Capability: KVM_CAP_ONE_REG -Architectures: all -Type: vcpu ioctl -Parameters: struct kvm_one_reg (in) -Returns: 0 on success, negative value on failure -Errors: -  ENOENT:   no such register -  EINVAL:   invalid register ID, or no such register -  EPERM:    (arm64) register access not allowed before vcpu finalization -(These error codes are indicative only: do not rely on a specific error -code being returned in a specific situation.) - -struct kvm_one_reg { - __u64 id; - __u64 addr; -}; - -Using this ioctl, a single vcpu register can be set to a specific value -defined by user space with the passed in struct kvm_one_reg, where id -refers to the register identifier as described below and addr is a pointer -to a variable with the respective size. There can be architecture agnostic -and architecture specific registers. Each have their own range of operation -and their own constants and width. To keep track of the implemented -registers, find a list below: - - Arch | Register | Width (bits) - | | - PPC | KVM_REG_PPC_HIOR | 64 - PPC | KVM_REG_PPC_IAC1 | 64 - PPC | KVM_REG_PPC_IAC2 | 64 - PPC | KVM_REG_PPC_IAC3 | 64 - PPC | KVM_REG_PPC_IAC4 | 64 - PPC | KVM_REG_PPC_DAC1 | 64 - PPC | KVM_REG_PPC_DAC2 | 64 - PPC | KVM_REG_PPC_DABR | 64 - PPC | KVM_REG_PPC_DSCR | 64 - PPC | KVM_REG_PPC_PURR | 64 - PPC | KVM_REG_PPC_SPURR | 64 - PPC | KVM_REG_PPC_DAR | 64 - PPC | KVM_REG_PPC_DSISR | 32 - PPC | KVM_REG_PPC_AMR | 64 - PPC | KVM_REG_PPC_UAMOR | 64 - PPC | KVM_REG_PPC_MMCR0 | 64 - PPC | KVM_REG_PPC_MMCR1 | 64 - PPC | KVM_REG_PPC_MMCRA | 64 - PPC | KVM_REG_PPC_MMCR2 | 64 - PPC | KVM_REG_PPC_MMCRS | 64 - PPC | KVM_REG_PPC_SIAR | 64 - PPC | KVM_REG_PPC_SDAR | 64 - PPC | KVM_REG_PPC_SIER | 64 - PPC | KVM_REG_PPC_PMC1 | 32 - PPC | KVM_REG_PPC_PMC2 | 32 - PPC | KVM_REG_PPC_PMC3 | 32 - PPC | KVM_REG_PPC_PMC4 | 32 - PPC | KVM_REG_PPC_PMC5 | 32 - PPC | KVM_REG_PPC_PMC6 | 32 - PPC | KVM_REG_PPC_PMC7 | 32 - PPC | KVM_REG_PPC_PMC8 | 32 - PPC | KVM_REG_PPC_FPR0 | 64 - ... - PPC | KVM_REG_PPC_FPR31 | 64 - PPC | KVM_REG_PPC_VR0 | 128 - ... - PPC | KVM_REG_PPC_VR31 | 128 - PPC | KVM_REG_PPC_VSR0 | 128 - ... - PPC | KVM_REG_PPC_VSR31 | 128 - PPC | KVM_REG_PPC_FPSCR | 64 - PPC | KVM_REG_PPC_VSCR | 32 - PPC | KVM_REG_PPC_VPA_ADDR | 64 - PPC | KVM_REG_PPC_VPA_SLB | 128 - PPC | KVM_REG_PPC_VPA_DTL | 128 - PPC | KVM_REG_PPC_EPCR | 32 - PPC | KVM_REG_PPC_EPR | 32 - PPC | KVM_REG_PPC_TCR | 32 - PPC | KVM_REG_PPC_TSR | 32 - PPC | KVM_REG_PPC_OR_TSR | 32 - PPC | KVM_REG_PPC_CLEAR_TSR | 32 - PPC | KVM_REG_PPC_MAS0 | 32 - PPC | KVM_REG_PPC_MAS1 | 32 - PPC | KVM_REG_PPC_MAS2 | 64 - PPC | KVM_REG_PPC_MAS7_3 | 64 - PPC | KVM_REG_PPC_MAS4 | 32 - PPC | KVM_REG_PPC_MAS6 | 32 - PPC | KVM_REG_PPC_MMUCFG | 32 - PPC | KVM_REG_PPC_TLB0CFG | 32 - PPC | KVM_REG_PPC_TLB1CFG | 32 - PPC | KVM_REG_PPC_TLB2CFG | 32 - PPC | KVM_REG_PPC_TLB3CFG | 32 - PPC | KVM_REG_PPC_TLB0PS | 32 - PPC | KVM_REG_PPC_TLB1PS | 32 - PPC | KVM_REG_PPC_TLB2PS | 32 - PPC | KVM_REG_PPC_TLB3PS | 32 - PPC | KVM_REG_PPC_EPTCFG | 32 - PPC | KVM_REG_PPC_ICP_STATE | 64 - PPC | KVM_REG_PPC_VP_STATE | 128 - PPC | KVM_REG_PPC_TB_OFFSET | 64 - PPC | KVM_REG_PPC_SPMC1 | 32 - PPC | KVM_REG_PPC_SPMC2 | 32 - PPC | KVM_REG_PPC_IAMR | 64 - PPC | KVM_REG_PPC_TFHAR | 64 - PPC | KVM_REG_PPC_TFIAR | 64 - PPC | KVM_REG_PPC_TEXASR | 64 - PPC | KVM_REG_PPC_FSCR | 64 - PPC | KVM_REG_PPC_PSPB | 32 - PPC | KVM_REG_PPC_EBBHR | 64 - PPC | KVM_REG_PPC_EBBRR | 64 - PPC | KVM_REG_PPC_BESCR | 64 - PPC | KVM_REG_PPC_TAR | 64 - PPC | KVM_REG_PPC_DPDES | 64 - PPC | KVM_REG_PPC_DAWR | 64 - PPC | KVM_REG_PPC_DAWRX | 64 - PPC | KVM_REG_PPC_CIABR | 64 - PPC | KVM_REG_PPC_IC | 64 - PPC | KVM_REG_PPC_VTB | 64 - PPC | KVM_REG_PPC_CSIGR | 64 - PPC | KVM_REG_PPC_TACR | 64 - PPC | KVM_REG_PPC_TCSCR | 64 - PPC | KVM_REG_PPC_PID | 64 - PPC | KVM_REG_PPC_ACOP | 64 - PPC | KVM_REG_PPC_VRSAVE | 32 - PPC | KVM_REG_PPC_LPCR | 32 - PPC | KVM_REG_PPC_LPCR_64 | 64 - PPC | KVM_REG_PPC_PPR | 64 - PPC | KVM_REG_PPC_ARCH_COMPAT | 32 - PPC | KVM_REG_PPC_DABRX | 32 - PPC | KVM_REG_PPC_WORT | 64 - PPC | KVM_REG_PPC_SPRG9 | 64 - PPC | KVM_REG_PPC_DBSR | 32 - PPC | KVM_REG_PPC_TIDR | 64 - PPC | KVM_REG_PPC_PSSCR | 64 - PPC | KVM_REG_PPC_DEC_EXPIRY | 64 - PPC | KVM_REG_PPC_PTCR | 64 - PPC | KVM_REG_PPC_TM_GPR0 | 64 - ... - PPC | KVM_REG_PPC_TM_GPR31 | 64 - PPC | KVM_REG_PPC_TM_VSR0 | 128 - ... - PPC | KVM_REG_PPC_TM_VSR63 | 128 - PPC | KVM_REG_PPC_TM_CR | 64 - PPC | KVM_REG_PPC_TM_LR | 64 - PPC | KVM_REG_PPC_TM_CTR | 64 - PPC | KVM_REG_PPC_TM_FPSCR | 64 - PPC | KVM_REG_PPC_TM_AMR | 64 - PPC | KVM_REG_PPC_TM_PPR | 64 - PPC | KVM_REG_PPC_TM_VRSAVE | 64 - PPC | KVM_REG_PPC_TM_VSCR | 32 - PPC | KVM_REG_PPC_TM_DSCR | 64 - PPC | KVM_REG_PPC_TM_TAR | 64 - PPC | KVM_REG_PPC_TM_XER | 64 - | | - MIPS | KVM_REG_MIPS_R0 | 64 - ... - MIPS | KVM_REG_MIPS_R31 | 64 - MIPS | KVM_REG_MIPS_HI | 64 - MIPS | KVM_REG_MIPS_LO | 64 - MIPS | KVM_REG_MIPS_PC | 64 - MIPS | KVM_REG_MIPS_CP0_INDEX | 32 - MIPS | KVM_REG_MIPS_CP0_ENTRYLO0 | 64 - MIPS | KVM_REG_MIPS_CP0_ENTRYLO1 | 64 - MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64 - MIPS | KVM_REG_MIPS_CP0_CONTEXTCONFIG| 32 - MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64 - MIPS | KVM_REG_MIPS_CP0_XCONTEXTCONFIG| 64 - MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32 - MIPS | KVM_REG_MIPS_CP0_PAGEGRAIN | 32 - MIPS | KVM_REG_MIPS_CP0_SEGCTL0 | 64 - MIPS | KVM_REG_MIPS_CP0_SEGCTL1 | 64 - MIPS | KVM_REG_MIPS_CP0_SEGCTL2 | 64 - MIPS | KVM_REG_MIPS_CP0_PWBASE | 64 - MIPS | KVM_REG_MIPS_CP0_PWFIELD | 64 - MIPS | KVM_REG_MIPS_CP0_PWSIZE | 64 - MIPS | KVM_REG_MIPS_CP0_WIRED | 32 - MIPS | KVM_REG_MIPS_CP0_PWCTL | 32 - MIPS | KVM_REG_MIPS_CP0_HWRENA | 32 - MIPS | KVM_REG_MIPS_CP0_BADVADDR | 64 - MIPS | KVM_REG_MIPS_CP0_BADINSTR | 32 - MIPS | KVM_REG_MIPS_CP0_BADINSTRP | 32 - MIPS | KVM_REG_MIPS_CP0_COUNT | 32 - MIPS | KVM_REG_MIPS_CP0_ENTRYHI | 64 - MIPS | KVM_REG_MIPS_CP0_COMPARE | 32 - MIPS | KVM_REG_MIPS_CP0_STATUS | 32 - MIPS | KVM_REG_MIPS_CP0_INTCTL | 32 - MIPS | KVM_REG_MIPS_CP0_CAUSE | 32 - MIPS | KVM_REG_MIPS_CP0_EPC | 64 - MIPS | KVM_REG_MIPS_CP0_PRID | 32 - MIPS | KVM_REG_MIPS_CP0_EBASE | 64 - MIPS | KVM_REG_MIPS_CP0_CONFIG | 32 - MIPS | KVM_REG_MIPS_CP0_CONFIG1 | 32 - MIPS | KVM_REG_MIPS_CP0_CONFIG2 | 32 - MIPS | KVM_REG_MIPS_CP0_CONFIG3 | 32 - MIPS | KVM_REG_MIPS_CP0_CONFIG4 | 32 - MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32 - MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32 - MIPS | KVM_REG_MIPS_CP0_XCONTEXT | 64 - MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64 - MIPS | KVM_REG_MIPS_CP0_KSCRATCH1 | 64 - MIPS | KVM_REG_MIPS_CP0_KSCRATCH2 | 64 - MIPS | KVM_REG_MIPS_CP0_KSCRATCH3 | 64 - MIPS | KVM_REG_MIPS_CP0_KSCRATCH4 | 64 - MIPS | KVM_REG_MIPS_CP0_KSCRATCH5 | 64 - MIPS | KVM_REG_MIPS_CP0_KSCRATCH6 | 64 - MIPS | KVM_REG_MIPS_CP0_MAAR(0..63) | 64 - MIPS | KVM_REG_MIPS_COUNT_CTL | 64 - MIPS | KVM_REG_MIPS_COUNT_RESUME | 64 - MIPS | KVM_REG_MIPS_COUNT_HZ | 64 - MIPS | KVM_REG_MIPS_FPR_32(0..31) | 32 - MIPS | KVM_REG_MIPS_FPR_64(0..31) | 64 - MIPS | KVM_REG_MIPS_VEC_128(0..31) | 128 - MIPS | KVM_REG_MIPS_FCR_IR | 32 - MIPS | KVM_REG_MIPS_FCR_CSR | 32 - MIPS | KVM_REG_MIPS_MSA_IR | 32 - MIPS | KVM_REG_MIPS_MSA_CSR | 32 - -ARM registers are mapped using the lower 32 bits. The upper 16 of that -is the register group type, or coprocessor number: - -ARM core registers have the following id bit patterns: - 0x4020 0000 0010 - -ARM 32-bit CP15 registers have the following id bit patterns: - 0x4020 0000 000F - -ARM 64-bit CP15 registers have the following id bit patterns: - 0x4030 0000 000F - -ARM CCSIDR registers are demultiplexed by CSSELR value: - 0x4020 0000 0011 00 - -ARM 32-bit VFP control registers have the following id bit patterns: - 0x4020 0000 0012 1 - -ARM 64-bit FP registers have the following id bit patterns: - 0x4030 0000 0012 0 - -ARM firmware pseudo-registers have the following bit pattern: - 0x4030 0000 0014 - - -arm64 registers are mapped using the lower 32 bits. The upper 16 of -that is the register group type, or coprocessor number: - -arm64 core/FP-SIMD registers have the following id bit patterns. Note -that the size of the access is variable, as the kvm_regs structure -contains elements ranging from 32 to 128 bits. The index is a 32bit -value in the kvm_regs structure seen as a 32bit array. - 0x60x0 0000 0010 - -Specifically: - Encoding Register Bits kvm_regs member ----------------------------------------------------------------- - 0x6030 0000 0010 0000 X0 64 regs.regs[0] - 0x6030 0000 0010 0002 X1 64 regs.regs[1] - ... - 0x6030 0000 0010 003c X30 64 regs.regs[30] - 0x6030 0000 0010 003e SP 64 regs.sp - 0x6030 0000 0010 0040 PC 64 regs.pc - 0x6030 0000 0010 0042 PSTATE 64 regs.pstate - 0x6030 0000 0010 0044 SP_EL1 64 sp_el1 - 0x6030 0000 0010 0046 ELR_EL1 64 elr_el1 - 0x6030 0000 0010 0048 SPSR_EL1 64 spsr[KVM_SPSR_EL1] (alias SPSR_SVC) - 0x6030 0000 0010 004a SPSR_ABT 64 spsr[KVM_SPSR_ABT] - 0x6030 0000 0010 004c SPSR_UND 64 spsr[KVM_SPSR_UND] - 0x6030 0000 0010 004e SPSR_IRQ 64 spsr[KVM_SPSR_IRQ] - 0x6060 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ] - 0x6040 0000 0010 0054 V0 128 fp_regs.vregs[0] (*) - 0x6040 0000 0010 0058 V1 128 fp_regs.vregs[1] (*) - ... - 0x6040 0000 0010 00d0 V31 128 fp_regs.vregs[31] (*) - 0x6020 0000 0010 00d4 FPSR 32 fp_regs.fpsr - 0x6020 0000 0010 00d5 FPCR 32 fp_regs.fpcr - -(*) These encodings are not accepted for SVE-enabled vcpus. See - KVM_ARM_VCPU_INIT. - - The equivalent register content can be accessed via bits [127:0] of - the corresponding SVE Zn registers instead for vcpus that have SVE - enabled (see below). - -arm64 CCSIDR registers are demultiplexed by CSSELR value: - 0x6020 0000 0011 00 - -arm64 system registers have the following id bit patterns: - 0x6030 0000 0013 - -arm64 firmware pseudo-registers have the following bit pattern: - 0x6030 0000 0014 - -arm64 SVE registers have the following bit patterns: - 0x6080 0000 0015 00 Zn bits[2048*slice + 2047 : 2048*slice] - 0x6050 0000 0015 04 Pn bits[256*slice + 255 : 256*slice] - 0x6050 0000 0015 060 FFR bits[256*slice + 255 : 256*slice] - 0x6060 0000 0015 ffff KVM_REG_ARM64_SVE_VLS pseudo-register - -Access to register IDs where 2048 * slice >= 128 * max_vq will fail with -ENOENT. max_vq is the vcpu's maximum supported vector length in 128-bit -quadwords: see (**) below. - -These registers are only accessible on vcpus for which SVE is enabled. -See KVM_ARM_VCPU_INIT for details. - -In addition, except for KVM_REG_ARM64_SVE_VLS, these registers are not -accessible until the vcpu's SVE configuration has been finalized -using KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE). See KVM_ARM_VCPU_INIT -and KVM_ARM_VCPU_FINALIZE for more information about this procedure. - -KVM_REG_ARM64_SVE_VLS is a pseudo-register that allows the set of vector -lengths supported by the vcpu to be discovered and configured by -userspace. When transferred to or from user memory via KVM_GET_ONE_REG -or KVM_SET_ONE_REG, the value of this register is of type -__u64[KVM_ARM64_SVE_VLS_WORDS], and encodes the set of vector lengths as -follows: - -__u64 vector_lengths[KVM_ARM64_SVE_VLS_WORDS]; - -if (vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX && - ((vector_lengths[(vq - KVM_ARM64_SVE_VQ_MIN) / 64] >> - ((vq - KVM_ARM64_SVE_VQ_MIN) % 64)) & 1)) - /* Vector length vq * 16 bytes supported */ -else - /* Vector length vq * 16 bytes not supported */ - -(**) The maximum value vq for which the above condition is true is -max_vq. This is the maximum vector length available to the guest on -this vcpu, and determines which register slices are visible through -this ioctl interface. - -(See Documentation/arm64/sve.rst for an explanation of the "vq" -nomenclature.) - -KVM_REG_ARM64_SVE_VLS is only accessible after KVM_ARM_VCPU_INIT. -KVM_ARM_VCPU_INIT initialises it to the best set of vector lengths that -the host supports. - -Userspace may subsequently modify it if desired until the vcpu's SVE -configuration is finalized using KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE). - -Apart from simply removing all vector lengths from the host set that -exceed some value, support for arbitrarily chosen sets of vector lengths -is hardware-dependent and may not be available. Attempting to configure -an invalid set of vector lengths via KVM_SET_ONE_REG will fail with -EINVAL. - -After the vcpu's SVE configuration is finalized, further attempts to -write this register will fail with EPERM. - - -MIPS registers are mapped using the lower 32 bits. The upper 16 of that is -the register group type: - -MIPS core registers (see above) have the following id bit patterns: - 0x7030 0000 0000 - -MIPS CP0 registers (see KVM_REG_MIPS_CP0_* above) have the following id bit -patterns depending on whether they're 32-bit or 64-bit registers: - 0x7020 0000 0001 00 (32-bit) - 0x7030 0000 0001 00 (64-bit) - -Note: KVM_REG_MIPS_CP0_ENTRYLO0 and KVM_REG_MIPS_CP0_ENTRYLO1 are the MIPS64 -versions of the EntryLo registers regardless of the word size of the host -hardware, host kernel, guest, and whether XPA is present in the guest, i.e. -with the RI and XI bits (if they exist) in bits 63 and 62 respectively, and -the PFNX field starting at bit 30. - -MIPS MAARs (see KVM_REG_MIPS_CP0_MAAR(*) above) have the following id bit -patterns: - 0x7030 0000 0001 01 - -MIPS KVM control registers (see above) have the following id bit patterns: - 0x7030 0000 0002 - -MIPS FPU registers (see KVM_REG_MIPS_FPR_{32,64}() above) have the following -id bit patterns depending on the size of the register being accessed. They are -always accessed according to the current guest FPU mode (Status.FR and -Config5.FRE), i.e. as the guest would see them, and they become unpredictable -if the guest FPU mode is changed. MIPS SIMD Architecture (MSA) vector -registers (see KVM_REG_MIPS_VEC_128() above) have similar patterns as they -overlap the FPU registers: - 0x7020 0000 0003 00 <0:3> (32-bit FPU registers) - 0x7030 0000 0003 00 <0:3> (64-bit FPU registers) - 0x7040 0000 0003 00 <0:3> (128-bit MSA vector registers) - -MIPS FPU control registers (see KVM_REG_MIPS_FCR_{IR,CSR} above) have the -following id bit patterns: - 0x7020 0000 0003 01 <0:3> - -MIPS MSA control registers (see KVM_REG_MIPS_MSA_{IR,CSR} above) have the -following id bit patterns: - 0x7020 0000 0003 02 <0:3> - - -4.69 KVM_GET_ONE_REG - -Capability: KVM_CAP_ONE_REG -Architectures: all -Type: vcpu ioctl -Parameters: struct kvm_one_reg (in and out) -Returns: 0 on success, negative value on failure -Errors include: -  ENOENT:   no such register -  EINVAL:   invalid register ID, or no such register -  EPERM:    (arm64) register access not allowed before vcpu finalization -(These error codes are indicative only: do not rely on a specific error -code being returned in a specific situation.) - -This ioctl allows to receive the value of a single register implemented -in a vcpu. The register to read is indicated by the "id" field of the -kvm_one_reg struct passed in. On success, the register value can be found -at the memory location pointed to by "addr". - -The list of registers accessible using this interface is identical to the -list in 4.68. - - -4.70 KVM_KVMCLOCK_CTRL - -Capability: KVM_CAP_KVMCLOCK_CTRL -Architectures: Any that implement pvclocks (currently x86 only) -Type: vcpu ioctl -Parameters: None -Returns: 0 on success, -1 on error - -This signals to the host kernel that the specified guest is being paused by -userspace. The host will set a flag in the pvclock structure that is checked -from the soft lockup watchdog. The flag is part of the pvclock structure that -is shared between guest and host, specifically the second bit of the flags -field of the pvclock_vcpu_time_info structure. It will be set exclusively by -the host and read/cleared exclusively by the guest. The guest operation of -checking and clearing the flag must an atomic operation so -load-link/store-conditional, or equivalent must be used. There are two cases -where the guest will clear the flag: when the soft lockup watchdog timer resets -itself or when a soft lockup is detected. This ioctl can be called any time -after pausing the vcpu, but before it is resumed. - - -4.71 KVM_SIGNAL_MSI - -Capability: KVM_CAP_SIGNAL_MSI -Architectures: x86 arm arm64 -Type: vm ioctl -Parameters: struct kvm_msi (in) -Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error - -Directly inject a MSI message. Only valid with in-kernel irqchip that handles -MSI messages. - -struct kvm_msi { - __u32 address_lo; - __u32 address_hi; - __u32 data; - __u32 flags; - __u32 devid; - __u8 pad[12]; -}; - -flags: KVM_MSI_VALID_DEVID: devid contains a valid value. The per-VM - KVM_CAP_MSI_DEVID capability advertises the requirement to provide - the device ID. If this capability is not available, userspace - should never set the KVM_MSI_VALID_DEVID flag as the ioctl might fail. - -If KVM_MSI_VALID_DEVID is set, devid contains a unique device identifier -for the device that wrote the MSI message. For PCI, this is usually a -BFD identifier in the lower 16 bits. - -On x86, address_hi is ignored unless the KVM_X2APIC_API_USE_32BIT_IDS -feature of KVM_CAP_X2APIC_API capability is enabled. If it is enabled, -address_hi bits 31-8 provide bits 31-8 of the destination id. Bits 7-0 of -address_hi must be zero. - - -4.71 KVM_CREATE_PIT2 - -Capability: KVM_CAP_PIT2 -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_pit_config (in) -Returns: 0 on success, -1 on error - -Creates an in-kernel device model for the i8254 PIT. This call is only valid -after enabling in-kernel irqchip support via KVM_CREATE_IRQCHIP. The following -parameters have to be passed: - -struct kvm_pit_config { - __u32 flags; - __u32 pad[15]; -}; - -Valid flags are: - -#define KVM_PIT_SPEAKER_DUMMY 1 /* emulate speaker port stub */ - -PIT timer interrupts may use a per-VM kernel thread for injection. If it -exists, this thread will have a name of the following pattern: - -kvm-pit/ - -When running a guest with elevated priorities, the scheduling parameters of -this thread may have to be adjusted accordingly. - -This IOCTL replaces the obsolete KVM_CREATE_PIT. - - -4.72 KVM_GET_PIT2 - -Capability: KVM_CAP_PIT_STATE2 -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_pit_state2 (out) -Returns: 0 on success, -1 on error - -Retrieves the state of the in-kernel PIT model. Only valid after -KVM_CREATE_PIT2. The state is returned in the following structure: - -struct kvm_pit_state2 { - struct kvm_pit_channel_state channels[3]; - __u32 flags; - __u32 reserved[9]; -}; - -Valid flags are: - -/* disable PIT in HPET legacy mode */ -#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001 - -This IOCTL replaces the obsolete KVM_GET_PIT. - - -4.73 KVM_SET_PIT2 - -Capability: KVM_CAP_PIT_STATE2 -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_pit_state2 (in) -Returns: 0 on success, -1 on error - -Sets the state of the in-kernel PIT model. Only valid after KVM_CREATE_PIT2. -See KVM_GET_PIT2 for details on struct kvm_pit_state2. - -This IOCTL replaces the obsolete KVM_SET_PIT. - - -4.74 KVM_PPC_GET_SMMU_INFO - -Capability: KVM_CAP_PPC_GET_SMMU_INFO -Architectures: powerpc -Type: vm ioctl -Parameters: None -Returns: 0 on success, -1 on error - -This populates and returns a structure describing the features of -the "Server" class MMU emulation supported by KVM. -This can in turn be used by userspace to generate the appropriate -device-tree properties for the guest operating system. - -The structure contains some global information, followed by an -array of supported segment page sizes: - - struct kvm_ppc_smmu_info { - __u64 flags; - __u32 slb_size; - __u32 pad; - struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ]; - }; - -The supported flags are: - - - KVM_PPC_PAGE_SIZES_REAL: - When that flag is set, guest page sizes must "fit" the backing - store page sizes. When not set, any page size in the list can - be used regardless of how they are backed by userspace. - - - KVM_PPC_1T_SEGMENTS - The emulated MMU supports 1T segments in addition to the - standard 256M ones. - - - KVM_PPC_NO_HASH - This flag indicates that HPT guests are not supported by KVM, - thus all guests must use radix MMU mode. - -The "slb_size" field indicates how many SLB entries are supported - -The "sps" array contains 8 entries indicating the supported base -page sizes for a segment in increasing order. Each entry is defined -as follow: - - struct kvm_ppc_one_seg_page_size { - __u32 page_shift; /* Base page shift of segment (or 0) */ - __u32 slb_enc; /* SLB encoding for BookS */ - struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ]; - }; - -An entry with a "page_shift" of 0 is unused. Because the array is -organized in increasing order, a lookup can stop when encoutering -such an entry. - -The "slb_enc" field provides the encoding to use in the SLB for the -page size. The bits are in positions such as the value can directly -be OR'ed into the "vsid" argument of the slbmte instruction. - -The "enc" array is a list which for each of those segment base page -size provides the list of supported actual page sizes (which can be -only larger or equal to the base page size), along with the -corresponding encoding in the hash PTE. Similarly, the array is -8 entries sorted by increasing sizes and an entry with a "0" shift -is an empty entry and a terminator: - - struct kvm_ppc_one_page_size { - __u32 page_shift; /* Page shift (or 0) */ - __u32 pte_enc; /* Encoding in the HPTE (>>12) */ - }; - -The "pte_enc" field provides a value that can OR'ed into the hash -PTE's RPN field (ie, it needs to be shifted left by 12 to OR it -into the hash PTE second double word). - -4.75 KVM_IRQFD - -Capability: KVM_CAP_IRQFD -Architectures: x86 s390 arm arm64 -Type: vm ioctl -Parameters: struct kvm_irqfd (in) -Returns: 0 on success, -1 on error - -Allows setting an eventfd to directly trigger a guest interrupt. -kvm_irqfd.fd specifies the file descriptor to use as the eventfd and -kvm_irqfd.gsi specifies the irqchip pin toggled by this event. When -an event is triggered on the eventfd, an interrupt is injected into -the guest using the specified gsi pin. The irqfd is removed using -the KVM_IRQFD_FLAG_DEASSIGN flag, specifying both kvm_irqfd.fd -and kvm_irqfd.gsi. - -With KVM_CAP_IRQFD_RESAMPLE, KVM_IRQFD supports a de-assert and notify -mechanism allowing emulation of level-triggered, irqfd-based -interrupts. When KVM_IRQFD_FLAG_RESAMPLE is set the user must pass an -additional eventfd in the kvm_irqfd.resamplefd field. When operating -in resample mode, posting of an interrupt through kvm_irq.fd asserts -the specified gsi in the irqchip. When the irqchip is resampled, such -as from an EOI, the gsi is de-asserted and the user is notified via -kvm_irqfd.resamplefd. It is the user's responsibility to re-queue -the interrupt if the device making use of it still requires service. -Note that closing the resamplefd is not sufficient to disable the -irqfd. The KVM_IRQFD_FLAG_RESAMPLE is only necessary on assignment -and need not be specified with KVM_IRQFD_FLAG_DEASSIGN. - -On arm/arm64, gsi routing being supported, the following can happen: -- in case no routing entry is associated to this gsi, injection fails -- in case the gsi is associated to an irqchip routing entry, - irqchip.pin + 32 corresponds to the injected SPI ID. -- in case the gsi is associated to an MSI routing entry, the MSI - message and device ID are translated into an LPI (support restricted - to GICv3 ITS in-kernel emulation). - -4.76 KVM_PPC_ALLOCATE_HTAB - -Capability: KVM_CAP_PPC_ALLOC_HTAB -Architectures: powerpc -Type: vm ioctl -Parameters: Pointer to u32 containing hash table order (in/out) -Returns: 0 on success, -1 on error - -This requests the host kernel to allocate an MMU hash table for a -guest using the PAPR paravirtualization interface. This only does -anything if the kernel is configured to use the Book 3S HV style of -virtualization. Otherwise the capability doesn't exist and the ioctl -returns an ENOTTY error. The rest of this description assumes Book 3S -HV. - -There must be no vcpus running when this ioctl is called; if there -are, it will do nothing and return an EBUSY error. - -The parameter is a pointer to a 32-bit unsigned integer variable -containing the order (log base 2) of the desired size of the hash -table, which must be between 18 and 46. On successful return from the -ioctl, the value will not be changed by the kernel. - -If no hash table has been allocated when any vcpu is asked to run -(with the KVM_RUN ioctl), the host kernel will allocate a -default-sized hash table (16 MB). - -If this ioctl is called when a hash table has already been allocated, -with a different order from the existing hash table, the existing hash -table will be freed and a new one allocated. If this is ioctl is -called when a hash table has already been allocated of the same order -as specified, the kernel will clear out the existing hash table (zero -all HPTEs). In either case, if the guest is using the virtualized -real-mode area (VRMA) facility, the kernel will re-create the VMRA -HPTEs on the next KVM_RUN of any vcpu. - -4.77 KVM_S390_INTERRUPT - -Capability: basic -Architectures: s390 -Type: vm ioctl, vcpu ioctl -Parameters: struct kvm_s390_interrupt (in) -Returns: 0 on success, -1 on error - -Allows to inject an interrupt to the guest. Interrupts can be floating -(vm ioctl) or per cpu (vcpu ioctl), depending on the interrupt type. - -Interrupt parameters are passed via kvm_s390_interrupt: - -struct kvm_s390_interrupt { - __u32 type; - __u32 parm; - __u64 parm64; -}; - -type can be one of the following: - -KVM_S390_SIGP_STOP (vcpu) - sigp stop; optional flags in parm -KVM_S390_PROGRAM_INT (vcpu) - program check; code in parm -KVM_S390_SIGP_SET_PREFIX (vcpu) - sigp set prefix; prefix address in parm -KVM_S390_RESTART (vcpu) - restart -KVM_S390_INT_CLOCK_COMP (vcpu) - clock comparator interrupt -KVM_S390_INT_CPU_TIMER (vcpu) - CPU timer interrupt -KVM_S390_INT_VIRTIO (vm) - virtio external interrupt; external interrupt - parameters in parm and parm64 -KVM_S390_INT_SERVICE (vm) - sclp external interrupt; sclp parameter in parm -KVM_S390_INT_EMERGENCY (vcpu) - sigp emergency; source cpu in parm -KVM_S390_INT_EXTERNAL_CALL (vcpu) - sigp external call; source cpu in parm -KVM_S390_INT_IO(ai,cssid,ssid,schid) (vm) - compound value to indicate an - I/O interrupt (ai - adapter interrupt; cssid,ssid,schid - subchannel); - I/O interruption parameters in parm (subchannel) and parm64 (intparm, - interruption subclass) -KVM_S390_MCHK (vm, vcpu) - machine check interrupt; cr 14 bits in parm, - machine check interrupt code in parm64 (note that - machine checks needing further payload are not - supported by this ioctl) - -This is an asynchronous vcpu ioctl and can be invoked from any thread. - -4.78 KVM_PPC_GET_HTAB_FD - -Capability: KVM_CAP_PPC_HTAB_FD -Architectures: powerpc -Type: vm ioctl -Parameters: Pointer to struct kvm_get_htab_fd (in) -Returns: file descriptor number (>= 0) on success, -1 on error - -This returns a file descriptor that can be used either to read out the -entries in the guest's hashed page table (HPT), or to write entries to -initialize the HPT. The returned fd can only be written to if the -KVM_GET_HTAB_WRITE bit is set in the flags field of the argument, and -can only be read if that bit is clear. The argument struct looks like -this: - -/* For KVM_PPC_GET_HTAB_FD */ -struct kvm_get_htab_fd { - __u64 flags; - __u64 start_index; - __u64 reserved[2]; -}; - -/* Values for kvm_get_htab_fd.flags */ -#define KVM_GET_HTAB_BOLTED_ONLY ((__u64)0x1) -#define KVM_GET_HTAB_WRITE ((__u64)0x2) - -The `start_index' field gives the index in the HPT of the entry at -which to start reading. It is ignored when writing. - -Reads on the fd will initially supply information about all -"interesting" HPT entries. Interesting entries are those with the -bolted bit set, if the KVM_GET_HTAB_BOLTED_ONLY bit is set, otherwise -all entries. When the end of the HPT is reached, the read() will -return. If read() is called again on the fd, it will start again from -the beginning of the HPT, but will only return HPT entries that have -changed since they were last read. - -Data read or written is structured as a header (8 bytes) followed by a -series of valid HPT entries (16 bytes) each. The header indicates how -many valid HPT entries there are and how many invalid entries follow -the valid entries. The invalid entries are not represented explicitly -in the stream. The header format is: - -struct kvm_get_htab_header { - __u32 index; - __u16 n_valid; - __u16 n_invalid; -}; - -Writes to the fd create HPT entries starting at the index given in the -header; first `n_valid' valid entries with contents from the data -written, then `n_invalid' invalid entries, invalidating any previously -valid entries found. - -4.79 KVM_CREATE_DEVICE - -Capability: KVM_CAP_DEVICE_CTRL -Type: vm ioctl -Parameters: struct kvm_create_device (in/out) -Returns: 0 on success, -1 on error -Errors: - ENODEV: The device type is unknown or unsupported - EEXIST: Device already created, and this type of device may not - be instantiated multiple times - - Other error conditions may be defined by individual device types or - have their standard meanings. - -Creates an emulated device in the kernel. The file descriptor returned -in fd can be used with KVM_SET/GET/HAS_DEVICE_ATTR. - -If the KVM_CREATE_DEVICE_TEST flag is set, only test whether the -device type is supported (not necessarily whether it can be created -in the current vm). - -Individual devices should not define flags. Attributes should be used -for specifying any behavior that is not implied by the device type -number. - -struct kvm_create_device { - __u32 type; /* in: KVM_DEV_TYPE_xxx */ - __u32 fd; /* out: device handle */ - __u32 flags; /* in: KVM_CREATE_DEVICE_xxx */ -}; - -4.80 KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR - -Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, - KVM_CAP_VCPU_ATTRIBUTES for vcpu device -Type: device ioctl, vm ioctl, vcpu ioctl -Parameters: struct kvm_device_attr -Returns: 0 on success, -1 on error -Errors: - ENXIO: The group or attribute is unknown/unsupported for this device - or hardware support is missing. - EPERM: The attribute cannot (currently) be accessed this way - (e.g. read-only attribute, or attribute that only makes - sense when the device is in a different state) - - Other error conditions may be defined by individual device types. - -Gets/sets a specified piece of device configuration and/or state. The -semantics are device-specific. See individual device documentation in -the "devices" directory. As with ONE_REG, the size of the data -transferred is defined by the particular attribute. - -struct kvm_device_attr { - __u32 flags; /* no flags currently defined */ - __u32 group; /* device-defined */ - __u64 attr; /* group-defined */ - __u64 addr; /* userspace address of attr data */ -}; - -4.81 KVM_HAS_DEVICE_ATTR - -Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device, - KVM_CAP_VCPU_ATTRIBUTES for vcpu device -Type: device ioctl, vm ioctl, vcpu ioctl -Parameters: struct kvm_device_attr -Returns: 0 on success, -1 on error -Errors: - ENXIO: The group or attribute is unknown/unsupported for this device - or hardware support is missing. - -Tests whether a device supports a particular attribute. A successful -return indicates the attribute is implemented. It does not necessarily -indicate that the attribute can be read or written in the device's -current state. "addr" is ignored. - -4.82 KVM_ARM_VCPU_INIT - -Capability: basic -Architectures: arm, arm64 -Type: vcpu ioctl -Parameters: struct kvm_vcpu_init (in) -Returns: 0 on success; -1 on error -Errors: -  EINVAL:    the target is unknown, or the combination of features is invalid. -  ENOENT:    a features bit specified is unknown. - -This tells KVM what type of CPU to present to the guest, and what -optional features it should have.  This will cause a reset of the cpu -registers to their initial values.  If this is not called, KVM_RUN will -return ENOEXEC for that vcpu. - -Note that because some registers reflect machine topology, all vcpus -should be created before this ioctl is invoked. - -Userspace can call this function multiple times for a given vcpu, including -after the vcpu has been run. This will reset the vcpu to its initial -state. All calls to this function after the initial call must use the same -target and same set of feature flags, otherwise EINVAL will be returned. - -Possible features: - - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state. - Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on - and execute guest code when KVM_RUN is called. - - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode. - Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only). - - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision - backward compatible with v0.2) for the CPU. - Depends on KVM_CAP_ARM_PSCI_0_2. - - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU. - Depends on KVM_CAP_ARM_PMU_V3. - - - KVM_ARM_VCPU_PTRAUTH_ADDRESS: Enables Address Pointer authentication - for arm64 only. - Depends on KVM_CAP_ARM_PTRAUTH_ADDRESS. - If KVM_CAP_ARM_PTRAUTH_ADDRESS and KVM_CAP_ARM_PTRAUTH_GENERIC are - both present, then both KVM_ARM_VCPU_PTRAUTH_ADDRESS and - KVM_ARM_VCPU_PTRAUTH_GENERIC must be requested or neither must be - requested. - - - KVM_ARM_VCPU_PTRAUTH_GENERIC: Enables Generic Pointer authentication - for arm64 only. - Depends on KVM_CAP_ARM_PTRAUTH_GENERIC. - If KVM_CAP_ARM_PTRAUTH_ADDRESS and KVM_CAP_ARM_PTRAUTH_GENERIC are - both present, then both KVM_ARM_VCPU_PTRAUTH_ADDRESS and - KVM_ARM_VCPU_PTRAUTH_GENERIC must be requested or neither must be - requested. - - - KVM_ARM_VCPU_SVE: Enables SVE for the CPU (arm64 only). - Depends on KVM_CAP_ARM_SVE. - Requires KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE): - - * After KVM_ARM_VCPU_INIT: - - - KVM_REG_ARM64_SVE_VLS may be read using KVM_GET_ONE_REG: the - initial value of this pseudo-register indicates the best set of - vector lengths possible for a vcpu on this host. - - * Before KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE): - - - KVM_RUN and KVM_GET_REG_LIST are not available; - - - KVM_GET_ONE_REG and KVM_SET_ONE_REG cannot be used to access - the scalable archietctural SVE registers - KVM_REG_ARM64_SVE_ZREG(), KVM_REG_ARM64_SVE_PREG() or - KVM_REG_ARM64_SVE_FFR; - - - KVM_REG_ARM64_SVE_VLS may optionally be written using - KVM_SET_ONE_REG, to modify the set of vector lengths available - for the vcpu. - - * After KVM_ARM_VCPU_FINALIZE(KVM_ARM_VCPU_SVE): - - - the KVM_REG_ARM64_SVE_VLS pseudo-register is immutable, and can - no longer be written using KVM_SET_ONE_REG. - -4.83 KVM_ARM_PREFERRED_TARGET - -Capability: basic -Architectures: arm, arm64 -Type: vm ioctl -Parameters: struct struct kvm_vcpu_init (out) -Returns: 0 on success; -1 on error -Errors: - ENODEV: no preferred target available for the host - -This queries KVM for preferred CPU target type which can be emulated -by KVM on underlying host. - -The ioctl returns struct kvm_vcpu_init instance containing information -about preferred CPU target type and recommended features for it. The -kvm_vcpu_init->features bitmap returned will have feature bits set if -the preferred target recommends setting these features, but this is -not mandatory. - -The information returned by this ioctl can be used to prepare an instance -of struct kvm_vcpu_init for KVM_ARM_VCPU_INIT ioctl which will result in -in VCPU matching underlying host. - - -4.84 KVM_GET_REG_LIST - -Capability: basic -Architectures: arm, arm64, mips -Type: vcpu ioctl -Parameters: struct kvm_reg_list (in/out) -Returns: 0 on success; -1 on error -Errors: -  E2BIG:     the reg index list is too big to fit in the array specified by -             the user (the number required will be written into n). - -struct kvm_reg_list { - __u64 n; /* number of registers in reg[] */ - __u64 reg[0]; -}; - -This ioctl returns the guest registers that are supported for the -KVM_GET_ONE_REG/KVM_SET_ONE_REG calls. - - -4.85 KVM_ARM_SET_DEVICE_ADDR (deprecated) - -Capability: KVM_CAP_ARM_SET_DEVICE_ADDR -Architectures: arm, arm64 -Type: vm ioctl -Parameters: struct kvm_arm_device_address (in) -Returns: 0 on success, -1 on error -Errors: - ENODEV: The device id is unknown - ENXIO: Device not supported on current system - EEXIST: Address already set - E2BIG: Address outside guest physical address space - EBUSY: Address overlaps with other device range - -struct kvm_arm_device_addr { - __u64 id; - __u64 addr; -}; - -Specify a device address in the guest's physical address space where guests -can access emulated or directly exposed devices, which the host kernel needs -to know about. The id field is an architecture specific identifier for a -specific device. - -ARM/arm64 divides the id field into two parts, a device id and an -address type id specific to the individual device. - -  bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 | - field: | 0x00000000 | device id | addr type id | - -ARM/arm64 currently only require this when using the in-kernel GIC -support for the hardware VGIC features, using KVM_ARM_DEVICE_VGIC_V2 -as the device id. When setting the base address for the guest's -mapping of the VGIC virtual CPU and distributor interface, the ioctl -must be called after calling KVM_CREATE_IRQCHIP, but before calling -KVM_RUN on any of the VCPUs. Calling this ioctl twice for any of the -base addresses will return -EEXIST. - -Note, this IOCTL is deprecated and the more flexible SET/GET_DEVICE_ATTR API -should be used instead. - - -4.86 KVM_PPC_RTAS_DEFINE_TOKEN - -Capability: KVM_CAP_PPC_RTAS -Architectures: ppc -Type: vm ioctl -Parameters: struct kvm_rtas_token_args -Returns: 0 on success, -1 on error - -Defines a token value for a RTAS (Run Time Abstraction Services) -service in order to allow it to be handled in the kernel. The -argument struct gives the name of the service, which must be the name -of a service that has a kernel-side implementation. If the token -value is non-zero, it will be associated with that service, and -subsequent RTAS calls by the guest specifying that token will be -handled by the kernel. If the token value is 0, then any token -associated with the service will be forgotten, and subsequent RTAS -calls by the guest for that service will be passed to userspace to be -handled. - -4.87 KVM_SET_GUEST_DEBUG - -Capability: KVM_CAP_SET_GUEST_DEBUG -Architectures: x86, s390, ppc, arm64 -Type: vcpu ioctl -Parameters: struct kvm_guest_debug (in) -Returns: 0 on success; -1 on error - -struct kvm_guest_debug { - __u32 control; - __u32 pad; - struct kvm_guest_debug_arch arch; -}; - -Set up the processor specific debug registers and configure vcpu for -handling guest debug events. There are two parts to the structure, the -first a control bitfield indicates the type of debug events to handle -when running. Common control bits are: - - - KVM_GUESTDBG_ENABLE: guest debugging is enabled - - KVM_GUESTDBG_SINGLESTEP: the next run should single-step - -The top 16 bits of the control field are architecture specific control -flags which can include the following: - - - KVM_GUESTDBG_USE_SW_BP: using software breakpoints [x86, arm64] - - KVM_GUESTDBG_USE_HW_BP: using hardware breakpoints [x86, s390, arm64] - - KVM_GUESTDBG_INJECT_DB: inject DB type exception [x86] - - KVM_GUESTDBG_INJECT_BP: inject BP type exception [x86] - - KVM_GUESTDBG_EXIT_PENDING: trigger an immediate guest exit [s390] - -For example KVM_GUESTDBG_USE_SW_BP indicates that software breakpoints -are enabled in memory so we need to ensure breakpoint exceptions are -correctly trapped and the KVM run loop exits at the breakpoint and not -running off into the normal guest vector. For KVM_GUESTDBG_USE_HW_BP -we need to ensure the guest vCPUs architecture specific registers are -updated to the correct (supplied) values. - -The second part of the structure is architecture specific and -typically contains a set of debug registers. - -For arm64 the number of debug registers is implementation defined and -can be determined by querying the KVM_CAP_GUEST_DEBUG_HW_BPS and -KVM_CAP_GUEST_DEBUG_HW_WPS capabilities which return a positive number -indicating the number of supported registers. - -When debug events exit the main run loop with the reason -KVM_EXIT_DEBUG with the kvm_debug_exit_arch part of the kvm_run -structure containing architecture specific debug information. - -4.88 KVM_GET_EMULATED_CPUID - -Capability: KVM_CAP_EXT_EMUL_CPUID -Architectures: x86 -Type: system ioctl -Parameters: struct kvm_cpuid2 (in/out) -Returns: 0 on success, -1 on error - -struct kvm_cpuid2 { - __u32 nent; - __u32 flags; - struct kvm_cpuid_entry2 entries[0]; -}; - -The member 'flags' is used for passing flags from userspace. - -#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0) -#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1) -#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2) - -struct kvm_cpuid_entry2 { - __u32 function; - __u32 index; - __u32 flags; - __u32 eax; - __u32 ebx; - __u32 ecx; - __u32 edx; - __u32 padding[3]; -}; - -This ioctl returns x86 cpuid features which are emulated by -kvm.Userspace can use the information returned by this ioctl to query -which features are emulated by kvm instead of being present natively. - -Userspace invokes KVM_GET_EMULATED_CPUID by passing a kvm_cpuid2 -structure with the 'nent' field indicating the number of entries in -the variable-size array 'entries'. If the number of entries is too low -to describe the cpu capabilities, an error (E2BIG) is returned. If the -number is too high, the 'nent' field is adjusted and an error (ENOMEM) -is returned. If the number is just right, the 'nent' field is adjusted -to the number of valid entries in the 'entries' array, which is then -filled. - -The entries returned are the set CPUID bits of the respective features -which kvm emulates, as returned by the CPUID instruction, with unknown -or unsupported feature bits cleared. - -Features like x2apic, for example, may not be present in the host cpu -but are exposed by kvm in KVM_GET_SUPPORTED_CPUID because they can be -emulated efficiently and thus not included here. - -The fields in each entry are defined as follows: - - function: the eax value used to obtain the entry - index: the ecx value used to obtain the entry (for entries that are - affected by ecx) - flags: an OR of zero or more of the following: - KVM_CPUID_FLAG_SIGNIFCANT_INDEX: - if the index field is valid - KVM_CPUID_FLAG_STATEFUL_FUNC: - if cpuid for this function returns different values for successive - invocations; there will be several entries with the same function, - all with this flag set - KVM_CPUID_FLAG_STATE_READ_NEXT: - for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is - the first entry to be read by a cpu - eax, ebx, ecx, edx: the values returned by the cpuid instruction for - this function/index combination - -4.89 KVM_S390_MEM_OP - -Capability: KVM_CAP_S390_MEM_OP -Architectures: s390 -Type: vcpu ioctl -Parameters: struct kvm_s390_mem_op (in) -Returns: = 0 on success, - < 0 on generic error (e.g. -EFAULT or -ENOMEM), - > 0 if an exception occurred while walking the page tables - -Read or write data from/to the logical (virtual) memory of a VCPU. - -Parameters are specified via the following structure: - -struct kvm_s390_mem_op { - __u64 gaddr; /* the guest address */ - __u64 flags; /* flags */ - __u32 size; /* amount of bytes */ - __u32 op; /* type of operation */ - __u64 buf; /* buffer in userspace */ - __u8 ar; /* the access register number */ - __u8 reserved[31]; /* should be set to 0 */ -}; - -The type of operation is specified in the "op" field. It is either -KVM_S390_MEMOP_LOGICAL_READ for reading from logical memory space or -KVM_S390_MEMOP_LOGICAL_WRITE for writing to logical memory space. The -KVM_S390_MEMOP_F_CHECK_ONLY flag can be set in the "flags" field to check -whether the corresponding memory access would create an access exception -(without touching the data in the memory at the destination). In case an -access exception occurred while walking the MMU tables of the guest, the -ioctl returns a positive error number to indicate the type of exception. -This exception is also raised directly at the corresponding VCPU if the -flag KVM_S390_MEMOP_F_INJECT_EXCEPTION is set in the "flags" field. - -The start address of the memory region has to be specified in the "gaddr" -field, and the length of the region in the "size" field. "buf" is the buffer -supplied by the userspace application where the read data should be written -to for KVM_S390_MEMOP_LOGICAL_READ, or where the data that should be written -is stored for a KVM_S390_MEMOP_LOGICAL_WRITE. "buf" is unused and can be NULL -when KVM_S390_MEMOP_F_CHECK_ONLY is specified. "ar" designates the access -register number to be used. - -The "reserved" field is meant for future extensions. It is not used by -KVM with the currently defined set of flags. - -4.90 KVM_S390_GET_SKEYS - -Capability: KVM_CAP_S390_SKEYS -Architectures: s390 -Type: vm ioctl -Parameters: struct kvm_s390_skeys -Returns: 0 on success, KVM_S390_GET_KEYS_NONE if guest is not using storage - keys, negative value on error - -This ioctl is used to get guest storage key values on the s390 -architecture. The ioctl takes parameters via the kvm_s390_skeys struct. - -struct kvm_s390_skeys { - __u64 start_gfn; - __u64 count; - __u64 skeydata_addr; - __u32 flags; - __u32 reserved[9]; -}; - -The start_gfn field is the number of the first guest frame whose storage keys -you want to get. - -The count field is the number of consecutive frames (starting from start_gfn) -whose storage keys to get. The count field must be at least 1 and the maximum -allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range -will cause the ioctl to return -EINVAL. - -The skeydata_addr field is the address to a buffer large enough to hold count -bytes. This buffer will be filled with storage key data by the ioctl. - -4.91 KVM_S390_SET_SKEYS - -Capability: KVM_CAP_S390_SKEYS -Architectures: s390 -Type: vm ioctl -Parameters: struct kvm_s390_skeys -Returns: 0 on success, negative value on error - -This ioctl is used to set guest storage key values on the s390 -architecture. The ioctl takes parameters via the kvm_s390_skeys struct. -See section on KVM_S390_GET_SKEYS for struct definition. - -The start_gfn field is the number of the first guest frame whose storage keys -you want to set. - -The count field is the number of consecutive frames (starting from start_gfn) -whose storage keys to get. The count field must be at least 1 and the maximum -allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range -will cause the ioctl to return -EINVAL. - -The skeydata_addr field is the address to a buffer containing count bytes of -storage keys. Each byte in the buffer will be set as the storage key for a -single frame starting at start_gfn for count frames. - -Note: If any architecturally invalid key value is found in the given data then -the ioctl will return -EINVAL. - -4.92 KVM_S390_IRQ - -Capability: KVM_CAP_S390_INJECT_IRQ -Architectures: s390 -Type: vcpu ioctl -Parameters: struct kvm_s390_irq (in) -Returns: 0 on success, -1 on error -Errors: - EINVAL: interrupt type is invalid - type is KVM_S390_SIGP_STOP and flag parameter is invalid value - type is KVM_S390_INT_EXTERNAL_CALL and code is bigger - than the maximum of VCPUs - EBUSY: type is KVM_S390_SIGP_SET_PREFIX and vcpu is not stopped - type is KVM_S390_SIGP_STOP and a stop irq is already pending - type is KVM_S390_INT_EXTERNAL_CALL and an external call interrupt - is already pending - -Allows to inject an interrupt to the guest. - -Using struct kvm_s390_irq as a parameter allows -to inject additional payload which is not -possible via KVM_S390_INTERRUPT. - -Interrupt parameters are passed via kvm_s390_irq: - -struct kvm_s390_irq { - __u64 type; - union { - struct kvm_s390_io_info io; - struct kvm_s390_ext_info ext; - struct kvm_s390_pgm_info pgm; - struct kvm_s390_emerg_info emerg; - struct kvm_s390_extcall_info extcall; - struct kvm_s390_prefix_info prefix; - struct kvm_s390_stop_info stop; - struct kvm_s390_mchk_info mchk; - char reserved[64]; - } u; -}; - -type can be one of the following: - -KVM_S390_SIGP_STOP - sigp stop; parameter in .stop -KVM_S390_PROGRAM_INT - program check; parameters in .pgm -KVM_S390_SIGP_SET_PREFIX - sigp set prefix; parameters in .prefix -KVM_S390_RESTART - restart; no parameters -KVM_S390_INT_CLOCK_COMP - clock comparator interrupt; no parameters -KVM_S390_INT_CPU_TIMER - CPU timer interrupt; no parameters -KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg -KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall -KVM_S390_MCHK - machine check interrupt; parameters in .mchk - -This is an asynchronous vcpu ioctl and can be invoked from any thread. - -4.94 KVM_S390_GET_IRQ_STATE - -Capability: KVM_CAP_S390_IRQ_STATE -Architectures: s390 -Type: vcpu ioctl -Parameters: struct kvm_s390_irq_state (out) -Returns: >= number of bytes copied into buffer, - -EINVAL if buffer size is 0, - -ENOBUFS if buffer size is too small to fit all pending interrupts, - -EFAULT if the buffer address was invalid - -This ioctl allows userspace to retrieve the complete state of all currently -pending interrupts in a single buffer. Use cases include migration -and introspection. The parameter structure contains the address of a -userspace buffer and its length: - -struct kvm_s390_irq_state { - __u64 buf; - __u32 flags; /* will stay unused for compatibility reasons */ - __u32 len; - __u32 reserved[4]; /* will stay unused for compatibility reasons */ -}; - -Userspace passes in the above struct and for each pending interrupt a -struct kvm_s390_irq is copied to the provided buffer. - -The structure contains a flags and a reserved field for future extensions. As -the kernel never checked for flags == 0 and QEMU never pre-zeroed flags and -reserved, these fields can not be used in the future without breaking -compatibility. - -If -ENOBUFS is returned the buffer provided was too small and userspace -may retry with a bigger buffer. - -4.95 KVM_S390_SET_IRQ_STATE - -Capability: KVM_CAP_S390_IRQ_STATE -Architectures: s390 -Type: vcpu ioctl -Parameters: struct kvm_s390_irq_state (in) -Returns: 0 on success, - -EFAULT if the buffer address was invalid, - -EINVAL for an invalid buffer length (see below), - -EBUSY if there were already interrupts pending, - errors occurring when actually injecting the - interrupt. See KVM_S390_IRQ. - -This ioctl allows userspace to set the complete state of all cpu-local -interrupts currently pending for the vcpu. It is intended for restoring -interrupt state after a migration. The input parameter is a userspace buffer -containing a struct kvm_s390_irq_state: - -struct kvm_s390_irq_state { - __u64 buf; - __u32 flags; /* will stay unused for compatibility reasons */ - __u32 len; - __u32 reserved[4]; /* will stay unused for compatibility reasons */ -}; - -The restrictions for flags and reserved apply as well. -(see KVM_S390_GET_IRQ_STATE) - -The userspace memory referenced by buf contains a struct kvm_s390_irq -for each interrupt to be injected into the guest. -If one of the interrupts could not be injected for some reason the -ioctl aborts. - -len must be a multiple of sizeof(struct kvm_s390_irq). It must be > 0 -and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq), -which is the maximum number of possibly pending cpu-local interrupts. - -4.96 KVM_SMI - -Capability: KVM_CAP_X86_SMM -Architectures: x86 -Type: vcpu ioctl -Parameters: none -Returns: 0 on success, -1 on error - -Queues an SMI on the thread's vcpu. - -4.97 KVM_CAP_PPC_MULTITCE - -Capability: KVM_CAP_PPC_MULTITCE -Architectures: ppc -Type: vm - -This capability means the kernel is capable of handling hypercalls -H_PUT_TCE_INDIRECT and H_STUFF_TCE without passing those into the user -space. This significantly accelerates DMA operations for PPC KVM guests. -User space should expect that its handlers for these hypercalls -are not going to be called if user space previously registered LIOBN -in KVM (via KVM_CREATE_SPAPR_TCE or similar calls). - -In order to enable H_PUT_TCE_INDIRECT and H_STUFF_TCE use in the guest, -user space might have to advertise it for the guest. For example, -IBM pSeries (sPAPR) guest starts using them if "hcall-multi-tce" is -present in the "ibm,hypertas-functions" device-tree property. - -The hypercalls mentioned above may or may not be processed successfully -in the kernel based fast path. If they can not be handled by the kernel, -they will get passed on to user space. So user space still has to have -an implementation for these despite the in kernel acceleration. - -This capability is always enabled. - -4.98 KVM_CREATE_SPAPR_TCE_64 - -Capability: KVM_CAP_SPAPR_TCE_64 -Architectures: powerpc -Type: vm ioctl -Parameters: struct kvm_create_spapr_tce_64 (in) -Returns: file descriptor for manipulating the created TCE table - -This is an extension for KVM_CAP_SPAPR_TCE which only supports 32bit -windows, described in 4.62 KVM_CREATE_SPAPR_TCE - -This capability uses extended struct in ioctl interface: - -/* for KVM_CAP_SPAPR_TCE_64 */ -struct kvm_create_spapr_tce_64 { - __u64 liobn; - __u32 page_shift; - __u32 flags; - __u64 offset; /* in pages */ - __u64 size; /* in pages */ -}; - -The aim of extension is to support an additional bigger DMA window with -a variable page size. -KVM_CREATE_SPAPR_TCE_64 receives a 64bit window size, an IOMMU page shift and -a bus offset of the corresponding DMA window, @size and @offset are numbers -of IOMMU pages. - -@flags are not used at the moment. - -The rest of functionality is identical to KVM_CREATE_SPAPR_TCE. - -4.99 KVM_REINJECT_CONTROL - -Capability: KVM_CAP_REINJECT_CONTROL -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_reinject_control (in) -Returns: 0 on success, - -EFAULT if struct kvm_reinject_control cannot be read, - -ENXIO if KVM_CREATE_PIT or KVM_CREATE_PIT2 didn't succeed earlier. - -i8254 (PIT) has two modes, reinject and !reinject. The default is reinject, -where KVM queues elapsed i8254 ticks and monitors completion of interrupt from -vector(s) that i8254 injects. Reinject mode dequeues a tick and injects its -interrupt whenever there isn't a pending interrupt from i8254. -!reinject mode injects an interrupt as soon as a tick arrives. - -struct kvm_reinject_control { - __u8 pit_reinject; - __u8 reserved[31]; -}; - -pit_reinject = 0 (!reinject mode) is recommended, unless running an old -operating system that uses the PIT for timing (e.g. Linux 2.4.x). - -4.100 KVM_PPC_CONFIGURE_V3_MMU - -Capability: KVM_CAP_PPC_RADIX_MMU or KVM_CAP_PPC_HASH_MMU_V3 -Architectures: ppc -Type: vm ioctl -Parameters: struct kvm_ppc_mmuv3_cfg (in) -Returns: 0 on success, - -EFAULT if struct kvm_ppc_mmuv3_cfg cannot be read, - -EINVAL if the configuration is invalid - -This ioctl controls whether the guest will use radix or HPT (hashed -page table) translation, and sets the pointer to the process table for -the guest. - -struct kvm_ppc_mmuv3_cfg { - __u64 flags; - __u64 process_table; -}; - -There are two bits that can be set in flags; KVM_PPC_MMUV3_RADIX and -KVM_PPC_MMUV3_GTSE. KVM_PPC_MMUV3_RADIX, if set, configures the guest -to use radix tree translation, and if clear, to use HPT translation. -KVM_PPC_MMUV3_GTSE, if set and if KVM permits it, configures the guest -to be able to use the global TLB and SLB invalidation instructions; -if clear, the guest may not use these instructions. - -The process_table field specifies the address and size of the guest -process table, which is in the guest's space. This field is formatted -as the second doubleword of the partition table entry, as defined in -the Power ISA V3.00, Book III section 5.7.6.1. - -4.101 KVM_PPC_GET_RMMU_INFO - -Capability: KVM_CAP_PPC_RADIX_MMU -Architectures: ppc -Type: vm ioctl -Parameters: struct kvm_ppc_rmmu_info (out) -Returns: 0 on success, - -EFAULT if struct kvm_ppc_rmmu_info cannot be written, - -EINVAL if no useful information can be returned - -This ioctl returns a structure containing two things: (a) a list -containing supported radix tree geometries, and (b) a list that maps -page sizes to put in the "AP" (actual page size) field for the tlbie -(TLB invalidate entry) instruction. - -struct kvm_ppc_rmmu_info { - struct kvm_ppc_radix_geom { - __u8 page_shift; - __u8 level_bits[4]; - __u8 pad[3]; - } geometries[8]; - __u32 ap_encodings[8]; -}; - -The geometries[] field gives up to 8 supported geometries for the -radix page table, in terms of the log base 2 of the smallest page -size, and the number of bits indexed at each level of the tree, from -the PTE level up to the PGD level in that order. Any unused entries -will have 0 in the page_shift field. - -The ap_encodings gives the supported page sizes and their AP field -encodings, encoded with the AP value in the top 3 bits and the log -base 2 of the page size in the bottom 6 bits. - -4.102 KVM_PPC_RESIZE_HPT_PREPARE - -Capability: KVM_CAP_SPAPR_RESIZE_HPT -Architectures: powerpc -Type: vm ioctl -Parameters: struct kvm_ppc_resize_hpt (in) -Returns: 0 on successful completion, - >0 if a new HPT is being prepared, the value is an estimated - number of milliseconds until preparation is complete - -EFAULT if struct kvm_reinject_control cannot be read, - -EINVAL if the supplied shift or flags are invalid - -ENOMEM if unable to allocate the new HPT - -ENOSPC if there was a hash collision when moving existing - HPT entries to the new HPT - -EIO on other error conditions - -Used to implement the PAPR extension for runtime resizing of a guest's -Hashed Page Table (HPT). Specifically this starts, stops or monitors -the preparation of a new potential HPT for the guest, essentially -implementing the H_RESIZE_HPT_PREPARE hypercall. - -If called with shift > 0 when there is no pending HPT for the guest, -this begins preparation of a new pending HPT of size 2^(shift) bytes. -It then returns a positive integer with the estimated number of -milliseconds until preparation is complete. - -If called when there is a pending HPT whose size does not match that -requested in the parameters, discards the existing pending HPT and -creates a new one as above. - -If called when there is a pending HPT of the size requested, will: - * If preparation of the pending HPT is already complete, return 0 - * If preparation of the pending HPT has failed, return an error - code, then discard the pending HPT. - * If preparation of the pending HPT is still in progress, return an - estimated number of milliseconds until preparation is complete. - -If called with shift == 0, discards any currently pending HPT and -returns 0 (i.e. cancels any in-progress preparation). - -flags is reserved for future expansion, currently setting any bits in -flags will result in an -EINVAL. - -Normally this will be called repeatedly with the same parameters until -it returns <= 0. The first call will initiate preparation, subsequent -ones will monitor preparation until it completes or fails. - -struct kvm_ppc_resize_hpt { - __u64 flags; - __u32 shift; - __u32 pad; -}; - -4.103 KVM_PPC_RESIZE_HPT_COMMIT - -Capability: KVM_CAP_SPAPR_RESIZE_HPT -Architectures: powerpc -Type: vm ioctl -Parameters: struct kvm_ppc_resize_hpt (in) -Returns: 0 on successful completion, - -EFAULT if struct kvm_reinject_control cannot be read, - -EINVAL if the supplied shift or flags are invalid - -ENXIO is there is no pending HPT, or the pending HPT doesn't - have the requested size - -EBUSY if the pending HPT is not fully prepared - -ENOSPC if there was a hash collision when moving existing - HPT entries to the new HPT - -EIO on other error conditions - -Used to implement the PAPR extension for runtime resizing of a guest's -Hashed Page Table (HPT). Specifically this requests that the guest be -transferred to working with the new HPT, essentially implementing the -H_RESIZE_HPT_COMMIT hypercall. - -This should only be called after KVM_PPC_RESIZE_HPT_PREPARE has -returned 0 with the same parameters. In other cases -KVM_PPC_RESIZE_HPT_COMMIT will return an error (usually -ENXIO or --EBUSY, though others may be possible if the preparation was started, -but failed). - -This will have undefined effects on the guest if it has not already -placed itself in a quiescent state where no vcpu will make MMU enabled -memory accesses. - -On succsful completion, the pending HPT will become the guest's active -HPT and the previous HPT will be discarded. - -On failure, the guest will still be operating on its previous HPT. - -struct kvm_ppc_resize_hpt { - __u64 flags; - __u32 shift; - __u32 pad; -}; - -4.104 KVM_X86_GET_MCE_CAP_SUPPORTED - -Capability: KVM_CAP_MCE -Architectures: x86 -Type: system ioctl -Parameters: u64 mce_cap (out) -Returns: 0 on success, -1 on error - -Returns supported MCE capabilities. The u64 mce_cap parameter -has the same format as the MSR_IA32_MCG_CAP register. Supported -capabilities will have the corresponding bits set. - -4.105 KVM_X86_SETUP_MCE - -Capability: KVM_CAP_MCE -Architectures: x86 -Type: vcpu ioctl -Parameters: u64 mcg_cap (in) -Returns: 0 on success, - -EFAULT if u64 mcg_cap cannot be read, - -EINVAL if the requested number of banks is invalid, - -EINVAL if requested MCE capability is not supported. - -Initializes MCE support for use. The u64 mcg_cap parameter -has the same format as the MSR_IA32_MCG_CAP register and -specifies which capabilities should be enabled. The maximum -supported number of error-reporting banks can be retrieved when -checking for KVM_CAP_MCE. The supported capabilities can be -retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED. - -4.106 KVM_X86_SET_MCE - -Capability: KVM_CAP_MCE -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_x86_mce (in) -Returns: 0 on success, - -EFAULT if struct kvm_x86_mce cannot be read, - -EINVAL if the bank number is invalid, - -EINVAL if VAL bit is not set in status field. - -Inject a machine check error (MCE) into the guest. The input -parameter is: - -struct kvm_x86_mce { - __u64 status; - __u64 addr; - __u64 misc; - __u64 mcg_status; - __u8 bank; - __u8 pad1[7]; - __u64 pad2[3]; -}; - -If the MCE being reported is an uncorrected error, KVM will -inject it as an MCE exception into the guest. If the guest -MCG_STATUS register reports that an MCE is in progress, KVM -causes an KVM_EXIT_SHUTDOWN vmexit. - -Otherwise, if the MCE is a corrected error, KVM will just -store it in the corresponding bank (provided this bank is -not holding a previously reported uncorrected error). - -4.107 KVM_S390_GET_CMMA_BITS - -Capability: KVM_CAP_S390_CMMA_MIGRATION -Architectures: s390 -Type: vm ioctl -Parameters: struct kvm_s390_cmma_log (in, out) -Returns: 0 on success, a negative value on error - -This ioctl is used to get the values of the CMMA bits on the s390 -architecture. It is meant to be used in two scenarios: -- During live migration to save the CMMA values. Live migration needs - to be enabled via the KVM_REQ_START_MIGRATION VM property. -- To non-destructively peek at the CMMA values, with the flag - KVM_S390_CMMA_PEEK set. - -The ioctl takes parameters via the kvm_s390_cmma_log struct. The desired -values are written to a buffer whose location is indicated via the "values" -member in the kvm_s390_cmma_log struct. The values in the input struct are -also updated as needed. -Each CMMA value takes up one byte. - -struct kvm_s390_cmma_log { - __u64 start_gfn; - __u32 count; - __u32 flags; - union { - __u64 remaining; - __u64 mask; - }; - __u64 values; -}; - -start_gfn is the number of the first guest frame whose CMMA values are -to be retrieved, - -count is the length of the buffer in bytes, - -values points to the buffer where the result will be written to. - -If count is greater than KVM_S390_SKEYS_MAX, then it is considered to be -KVM_S390_SKEYS_MAX. KVM_S390_SKEYS_MAX is re-used for consistency with -other ioctls. - -The result is written in the buffer pointed to by the field values, and -the values of the input parameter are updated as follows. - -Depending on the flags, different actions are performed. The only -supported flag so far is KVM_S390_CMMA_PEEK. - -The default behaviour if KVM_S390_CMMA_PEEK is not set is: -start_gfn will indicate the first page frame whose CMMA bits were dirty. -It is not necessarily the same as the one passed as input, as clean pages -are skipped. - -count will indicate the number of bytes actually written in the buffer. -It can (and very often will) be smaller than the input value, since the -buffer is only filled until 16 bytes of clean values are found (which -are then not copied in the buffer). Since a CMMA migration block needs -the base address and the length, for a total of 16 bytes, we will send -back some clean data if there is some dirty data afterwards, as long as -the size of the clean data does not exceed the size of the header. This -allows to minimize the amount of data to be saved or transferred over -the network at the expense of more roundtrips to userspace. The next -invocation of the ioctl will skip over all the clean values, saving -potentially more than just the 16 bytes we found. - -If KVM_S390_CMMA_PEEK is set: -the existing storage attributes are read even when not in migration -mode, and no other action is performed; - -the output start_gfn will be equal to the input start_gfn, - -the output count will be equal to the input count, except if the end of -memory has been reached. - -In both cases: -the field "remaining" will indicate the total number of dirty CMMA values -still remaining, or 0 if KVM_S390_CMMA_PEEK is set and migration mode is -not enabled. - -mask is unused. - -values points to the userspace buffer where the result will be stored. - -This ioctl can fail with -ENOMEM if not enough memory can be allocated to -complete the task, with -ENXIO if CMMA is not enabled, with -EINVAL if -KVM_S390_CMMA_PEEK is not set but migration mode was not enabled, with --EFAULT if the userspace address is invalid or if no page table is -present for the addresses (e.g. when using hugepages). - -4.108 KVM_S390_SET_CMMA_BITS - -Capability: KVM_CAP_S390_CMMA_MIGRATION -Architectures: s390 -Type: vm ioctl -Parameters: struct kvm_s390_cmma_log (in) -Returns: 0 on success, a negative value on error - -This ioctl is used to set the values of the CMMA bits on the s390 -architecture. It is meant to be used during live migration to restore -the CMMA values, but there are no restrictions on its use. -The ioctl takes parameters via the kvm_s390_cmma_values struct. -Each CMMA value takes up one byte. - -struct kvm_s390_cmma_log { - __u64 start_gfn; - __u32 count; - __u32 flags; - union { - __u64 remaining; - __u64 mask; - }; - __u64 values; -}; - -start_gfn indicates the starting guest frame number, - -count indicates how many values are to be considered in the buffer, - -flags is not used and must be 0. - -mask indicates which PGSTE bits are to be considered. - -remaining is not used. - -values points to the buffer in userspace where to store the values. - -This ioctl can fail with -ENOMEM if not enough memory can be allocated to -complete the task, with -ENXIO if CMMA is not enabled, with -EINVAL if -the count field is too large (e.g. more than KVM_S390_CMMA_SIZE_MAX) or -if the flags field was not 0, with -EFAULT if the userspace address is -invalid, if invalid pages are written to (e.g. after the end of memory) -or if no page table is present for the addresses (e.g. when using -hugepages). - -4.109 KVM_PPC_GET_CPU_CHAR - -Capability: KVM_CAP_PPC_GET_CPU_CHAR -Architectures: powerpc -Type: vm ioctl -Parameters: struct kvm_ppc_cpu_char (out) -Returns: 0 on successful completion - -EFAULT if struct kvm_ppc_cpu_char cannot be written - -This ioctl gives userspace information about certain characteristics -of the CPU relating to speculative execution of instructions and -possible information leakage resulting from speculative execution (see -CVE-2017-5715, CVE-2017-5753 and CVE-2017-5754). The information is -returned in struct kvm_ppc_cpu_char, which looks like this: - -struct kvm_ppc_cpu_char { - __u64 character; /* characteristics of the CPU */ - __u64 behaviour; /* recommended software behaviour */ - __u64 character_mask; /* valid bits in character */ - __u64 behaviour_mask; /* valid bits in behaviour */ -}; - -For extensibility, the character_mask and behaviour_mask fields -indicate which bits of character and behaviour have been filled in by -the kernel. If the set of defined bits is extended in future then -userspace will be able to tell whether it is running on a kernel that -knows about the new bits. - -The character field describes attributes of the CPU which can help -with preventing inadvertent information disclosure - specifically, -whether there is an instruction to flash-invalidate the L1 data cache -(ori 30,30,0 or mtspr SPRN_TRIG2,rN), whether the L1 data cache is set -to a mode where entries can only be used by the thread that created -them, whether the bcctr[l] instruction prevents speculation, and -whether a speculation barrier instruction (ori 31,31,0) is provided. - -The behaviour field describes actions that software should take to -prevent inadvertent information disclosure, and thus describes which -vulnerabilities the hardware is subject to; specifically whether the -L1 data cache should be flushed when returning to user mode from the -kernel, and whether a speculation barrier should be placed between an -array bounds check and the array access. - -These fields use the same bit definitions as the new -H_GET_CPU_CHARACTERISTICS hypercall. - -4.110 KVM_MEMORY_ENCRYPT_OP - -Capability: basic -Architectures: x86 -Type: system -Parameters: an opaque platform specific structure (in/out) -Returns: 0 on success; -1 on error - -If the platform supports creating encrypted VMs then this ioctl can be used -for issuing platform-specific memory encryption commands to manage those -encrypted VMs. - -Currently, this ioctl is used for issuing Secure Encrypted Virtualization -(SEV) commands on AMD Processors. The SEV commands are defined in -Documentation/virtual/kvm/amd-memory-encryption.rst. - -4.111 KVM_MEMORY_ENCRYPT_REG_REGION - -Capability: basic -Architectures: x86 -Type: system -Parameters: struct kvm_enc_region (in) -Returns: 0 on success; -1 on error - -This ioctl can be used to register a guest memory region which may -contain encrypted data (e.g. guest RAM, SMRAM etc). - -It is used in the SEV-enabled guest. When encryption is enabled, a guest -memory region may contain encrypted data. The SEV memory encryption -engine uses a tweak such that two identical plaintext pages, each at -different locations will have differing ciphertexts. So swapping or -moving ciphertext of those pages will not result in plaintext being -swapped. So relocating (or migrating) physical backing pages for the SEV -guest will require some additional steps. - -Note: The current SEV key management spec does not provide commands to -swap or migrate (move) ciphertext pages. Hence, for now we pin the guest -memory region registered with the ioctl. - -4.112 KVM_MEMORY_ENCRYPT_UNREG_REGION - -Capability: basic -Architectures: x86 -Type: system -Parameters: struct kvm_enc_region (in) -Returns: 0 on success; -1 on error - -This ioctl can be used to unregister the guest memory region registered -with KVM_MEMORY_ENCRYPT_REG_REGION ioctl above. - -4.113 KVM_HYPERV_EVENTFD - -Capability: KVM_CAP_HYPERV_EVENTFD -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_hyperv_eventfd (in) - -This ioctl (un)registers an eventfd to receive notifications from the guest on -the specified Hyper-V connection id through the SIGNAL_EVENT hypercall, without -causing a user exit. SIGNAL_EVENT hypercall with non-zero event flag number -(bits 24-31) still triggers a KVM_EXIT_HYPERV_HCALL user exit. - -struct kvm_hyperv_eventfd { - __u32 conn_id; - __s32 fd; - __u32 flags; - __u32 padding[3]; -}; - -The conn_id field should fit within 24 bits: - -#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff - -The acceptable values for the flags field are: - -#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0) - -Returns: 0 on success, - -EINVAL if conn_id or flags is outside the allowed range - -ENOENT on deassign if the conn_id isn't registered - -EEXIST on assign if the conn_id is already registered - -4.114 KVM_GET_NESTED_STATE - -Capability: KVM_CAP_NESTED_STATE -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_nested_state (in/out) -Returns: 0 on success, -1 on error -Errors: - E2BIG: the total state size exceeds the value of 'size' specified by - the user; the size required will be written into size. - -struct kvm_nested_state { - __u16 flags; - __u16 format; - __u32 size; - - union { - struct kvm_vmx_nested_state_hdr vmx; - struct kvm_svm_nested_state_hdr svm; - - /* Pad the header to 128 bytes. */ - __u8 pad[120]; - } hdr; - - union { - struct kvm_vmx_nested_state_data vmx[0]; - struct kvm_svm_nested_state_data svm[0]; - } data; -}; - -#define KVM_STATE_NESTED_GUEST_MODE 0x00000001 -#define KVM_STATE_NESTED_RUN_PENDING 0x00000002 -#define KVM_STATE_NESTED_EVMCS 0x00000004 - -#define KVM_STATE_NESTED_FORMAT_VMX 0 -#define KVM_STATE_NESTED_FORMAT_SVM 1 - -#define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000 - -#define KVM_STATE_NESTED_VMX_SMM_GUEST_MODE 0x00000001 -#define KVM_STATE_NESTED_VMX_SMM_VMXON 0x00000002 - -struct kvm_vmx_nested_state_hdr { - __u64 vmxon_pa; - __u64 vmcs12_pa; - - struct { - __u16 flags; - } smm; -}; - -struct kvm_vmx_nested_state_data { - __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; - __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; -}; - -This ioctl copies the vcpu's nested virtualization state from the kernel to -userspace. - -The maximum size of the state can be retrieved by passing KVM_CAP_NESTED_STATE -to the KVM_CHECK_EXTENSION ioctl(). - -4.115 KVM_SET_NESTED_STATE - -Capability: KVM_CAP_NESTED_STATE -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_nested_state (in) -Returns: 0 on success, -1 on error - -This copies the vcpu's kvm_nested_state struct from userspace to the kernel. -For the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE. - -4.116 KVM_(UN)REGISTER_COALESCED_MMIO - -Capability: KVM_CAP_COALESCED_MMIO (for coalesced mmio) - KVM_CAP_COALESCED_PIO (for coalesced pio) -Architectures: all -Type: vm ioctl -Parameters: struct kvm_coalesced_mmio_zone -Returns: 0 on success, < 0 on error - -Coalesced I/O is a performance optimization that defers hardware -register write emulation so that userspace exits are avoided. It is -typically used to reduce the overhead of emulating frequently accessed -hardware registers. - -When a hardware register is configured for coalesced I/O, write accesses -do not exit to userspace and their value is recorded in a ring buffer -that is shared between kernel and userspace. - -Coalesced I/O is used if one or more write accesses to a hardware -register can be deferred until a read or a write to another hardware -register on the same device. This last access will cause a vmexit and -userspace will process accesses from the ring buffer before emulating -it. That will avoid exiting to userspace on repeated writes. - -Coalesced pio is based on coalesced mmio. There is little difference -between coalesced mmio and pio except that coalesced pio records accesses -to I/O ports. - -4.117 KVM_CLEAR_DIRTY_LOG (vm ioctl) - -Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 -Architectures: x86, arm, arm64, mips -Type: vm ioctl -Parameters: struct kvm_dirty_log (in) -Returns: 0 on success, -1 on error - -/* for KVM_CLEAR_DIRTY_LOG */ -struct kvm_clear_dirty_log { - __u32 slot; - __u32 num_pages; - __u64 first_page; - union { - void __user *dirty_bitmap; /* one bit per page */ - __u64 padding; - }; -}; - -The ioctl clears the dirty status of pages in a memory slot, according to -the bitmap that is passed in struct kvm_clear_dirty_log's dirty_bitmap -field. Bit 0 of the bitmap corresponds to page "first_page" in the -memory slot, and num_pages is the size in bits of the input bitmap. -first_page must be a multiple of 64; num_pages must also be a multiple of -64 unless first_page + num_pages is the size of the memory slot. For each -bit that is set in the input bitmap, the corresponding page is marked "clean" -in KVM's dirty bitmap, and dirty tracking is re-enabled for that page -(for example via write-protection, or by clearing the dirty bit in -a page table entry). - -If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies -the address space for which you want to return the dirty bitmap. -They must be less than the value that KVM_CHECK_EXTENSION returns for -the KVM_CAP_MULTI_ADDRESS_SPACE capability. - -This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 -is enabled; for more information, see the description of the capability. -However, it can always be used as long as KVM_CHECK_EXTENSION confirms -that KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is present. - -4.118 KVM_GET_SUPPORTED_HV_CPUID - -Capability: KVM_CAP_HYPERV_CPUID -Architectures: x86 -Type: vcpu ioctl -Parameters: struct kvm_cpuid2 (in/out) -Returns: 0 on success, -1 on error - -struct kvm_cpuid2 { - __u32 nent; - __u32 padding; - struct kvm_cpuid_entry2 entries[0]; -}; - -struct kvm_cpuid_entry2 { - __u32 function; - __u32 index; - __u32 flags; - __u32 eax; - __u32 ebx; - __u32 ecx; - __u32 edx; - __u32 padding[3]; -}; - -This ioctl returns x86 cpuid features leaves related to Hyper-V emulation in -KVM. Userspace can use the information returned by this ioctl to construct -cpuid information presented to guests consuming Hyper-V enlightenments (e.g. -Windows or Hyper-V guests). - -CPUID feature leaves returned by this ioctl are defined by Hyper-V Top Level -Functional Specification (TLFS). These leaves can't be obtained with -KVM_GET_SUPPORTED_CPUID ioctl because some of them intersect with KVM feature -leaves (0x40000000, 0x40000001). - -Currently, the following list of CPUID leaves are returned: - HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS - HYPERV_CPUID_INTERFACE - HYPERV_CPUID_VERSION - HYPERV_CPUID_FEATURES - HYPERV_CPUID_ENLIGHTMENT_INFO - HYPERV_CPUID_IMPLEMENT_LIMITS - HYPERV_CPUID_NESTED_FEATURES - -HYPERV_CPUID_NESTED_FEATURES leaf is only exposed when Enlightened VMCS was -enabled on the corresponding vCPU (KVM_CAP_HYPERV_ENLIGHTENED_VMCS). - -Userspace invokes KVM_GET_SUPPORTED_CPUID by passing a kvm_cpuid2 structure -with the 'nent' field indicating the number of entries in the variable-size -array 'entries'. If the number of entries is too low to describe all Hyper-V -feature leaves, an error (E2BIG) is returned. If the number is more or equal -to the number of Hyper-V feature leaves, the 'nent' field is adjusted to the -number of valid entries in the 'entries' array, which is then filled. - -'index' and 'flags' fields in 'struct kvm_cpuid_entry2' are currently reserved, -userspace should not expect to get any particular value there. - -4.119 KVM_ARM_VCPU_FINALIZE - -Architectures: arm, arm64 -Type: vcpu ioctl -Parameters: int feature (in) -Returns: 0 on success, -1 on error -Errors: - EPERM: feature not enabled, needs configuration, or already finalized - EINVAL: feature unknown or not present - -Recognised values for feature: - arm64 KVM_ARM_VCPU_SVE (requires KVM_CAP_ARM_SVE) - -Finalizes the configuration of the specified vcpu feature. - -The vcpu must already have been initialised, enabling the affected feature, by -means of a successful KVM_ARM_VCPU_INIT call with the appropriate flag set in -features[]. - -For affected vcpu features, this is a mandatory step that must be performed -before the vcpu is fully usable. - -Between KVM_ARM_VCPU_INIT and KVM_ARM_VCPU_FINALIZE, the feature may be -configured by use of ioctls such as KVM_SET_ONE_REG. The exact configuration -that should be performaned and how to do it are feature-dependent. - -Other calls that depend on a particular feature being finalized, such as -KVM_RUN, KVM_GET_REG_LIST, KVM_GET_ONE_REG and KVM_SET_ONE_REG, will fail with --EPERM unless the feature has already been finalized by means of a -KVM_ARM_VCPU_FINALIZE call. - -See KVM_ARM_VCPU_INIT for details of vcpu features that require finalization -using this ioctl. - -4.120 KVM_SET_PMU_EVENT_FILTER - -Capability: KVM_CAP_PMU_EVENT_FILTER -Architectures: x86 -Type: vm ioctl -Parameters: struct kvm_pmu_event_filter (in) -Returns: 0 on success, -1 on error - -struct kvm_pmu_event_filter { - __u32 action; - __u32 nevents; - __u32 fixed_counter_bitmap; - __u32 flags; - __u32 pad[4]; - __u64 events[0]; -}; - -This ioctl restricts the set of PMU events that the guest can program. -The argument holds a list of events which will be allowed or denied. -The eventsel+umask of each event the guest attempts to program is compared -against the events field to determine whether the guest should have access. -The events field only controls general purpose counters; fixed purpose -counters are controlled by the fixed_counter_bitmap. - -No flags are defined yet, the field must be zero. - -Valid values for 'action': -#define KVM_PMU_EVENT_ALLOW 0 -#define KVM_PMU_EVENT_DENY 1 - - -5. The kvm_run structure ------------------------- - -Application code obtains a pointer to the kvm_run structure by -mmap()ing a vcpu fd. From that point, application code can control -execution by changing fields in kvm_run prior to calling the KVM_RUN -ioctl, and obtain information about the reason KVM_RUN returned by -looking up structure members. - -struct kvm_run { - /* in */ - __u8 request_interrupt_window; - -Request that KVM_RUN return when it becomes possible to inject external -interrupts into the guest. Useful in conjunction with KVM_INTERRUPT. - - __u8 immediate_exit; - -This field is polled once when KVM_RUN starts; if non-zero, KVM_RUN -exits immediately, returning -EINTR. In the common scenario where a -signal is used to "kick" a VCPU out of KVM_RUN, this field can be used -to avoid usage of KVM_SET_SIGNAL_MASK, which has worse scalability. -Rather than blocking the signal outside KVM_RUN, userspace can set up -a signal handler that sets run->immediate_exit to a non-zero value. - -This field is ignored if KVM_CAP_IMMEDIATE_EXIT is not available. - - __u8 padding1[6]; - - /* out */ - __u32 exit_reason; - -When KVM_RUN has returned successfully (return value 0), this informs -application code why KVM_RUN has returned. Allowable values for this -field are detailed below. - - __u8 ready_for_interrupt_injection; - -If request_interrupt_window has been specified, this field indicates -an interrupt can be injected now with KVM_INTERRUPT. - - __u8 if_flag; - -The value of the current interrupt flag. Only valid if in-kernel -local APIC is not used. - - __u16 flags; - -More architecture-specific flags detailing state of the VCPU that may -affect the device's behavior. The only currently defined flag is -KVM_RUN_X86_SMM, which is valid on x86 machines and is set if the -VCPU is in system management mode. - - /* in (pre_kvm_run), out (post_kvm_run) */ - __u64 cr8; - -The value of the cr8 register. Only valid if in-kernel local APIC is -not used. Both input and output. - - __u64 apic_base; - -The value of the APIC BASE msr. Only valid if in-kernel local -APIC is not used. Both input and output. - - union { - /* KVM_EXIT_UNKNOWN */ - struct { - __u64 hardware_exit_reason; - } hw; - -If exit_reason is KVM_EXIT_UNKNOWN, the vcpu has exited due to unknown -reasons. Further architecture-specific information is available in -hardware_exit_reason. - - /* KVM_EXIT_FAIL_ENTRY */ - struct { - __u64 hardware_entry_failure_reason; - } fail_entry; - -If exit_reason is KVM_EXIT_FAIL_ENTRY, the vcpu could not be run due -to unknown reasons. Further architecture-specific information is -available in hardware_entry_failure_reason. - - /* KVM_EXIT_EXCEPTION */ - struct { - __u32 exception; - __u32 error_code; - } ex; - -Unused. - - /* KVM_EXIT_IO */ - struct { -#define KVM_EXIT_IO_IN 0 -#define KVM_EXIT_IO_OUT 1 - __u8 direction; - __u8 size; /* bytes */ - __u16 port; - __u32 count; - __u64 data_offset; /* relative to kvm_run start */ - } io; - -If exit_reason is KVM_EXIT_IO, then the vcpu has -executed a port I/O instruction which could not be satisfied by kvm. -data_offset describes where the data is located (KVM_EXIT_IO_OUT) or -where kvm expects application code to place the data for the next -KVM_RUN invocation (KVM_EXIT_IO_IN). Data format is a packed array. - - /* KVM_EXIT_DEBUG */ - struct { - struct kvm_debug_exit_arch arch; - } debug; - -If the exit_reason is KVM_EXIT_DEBUG, then a vcpu is processing a debug event -for which architecture specific information is returned. - - /* KVM_EXIT_MMIO */ - struct { - __u64 phys_addr; - __u8 data[8]; - __u32 len; - __u8 is_write; - } mmio; - -If exit_reason is KVM_EXIT_MMIO, then the vcpu has -executed a memory-mapped I/O instruction which could not be satisfied -by kvm. The 'data' member contains the written data if 'is_write' is -true, and should be filled by application code otherwise. - -The 'data' member contains, in its first 'len' bytes, the value as it would -appear if the VCPU performed a load or store of the appropriate width directly -to the byte array. - -NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_PAPR and - KVM_EXIT_EPR the corresponding -operations are complete (and guest state is consistent) only after userspace -has re-entered the kernel with KVM_RUN. The kernel side will first finish -incomplete operations and then check for pending signals. Userspace -can re-enter the guest with an unmasked signal pending to complete -pending operations. - - /* KVM_EXIT_HYPERCALL */ - struct { - __u64 nr; - __u64 args[6]; - __u64 ret; - __u32 longmode; - __u32 pad; - } hypercall; - -Unused. This was once used for 'hypercall to userspace'. To implement -such functionality, use KVM_EXIT_IO (x86) or KVM_EXIT_MMIO (all except s390). -Note KVM_EXIT_IO is significantly faster than KVM_EXIT_MMIO. - - /* KVM_EXIT_TPR_ACCESS */ - struct { - __u64 rip; - __u32 is_write; - __u32 pad; - } tpr_access; - -To be documented (KVM_TPR_ACCESS_REPORTING). - - /* KVM_EXIT_S390_SIEIC */ - struct { - __u8 icptcode; - __u64 mask; /* psw upper half */ - __u64 addr; /* psw lower half */ - __u16 ipa; - __u32 ipb; - } s390_sieic; - -s390 specific. - - /* KVM_EXIT_S390_RESET */ -#define KVM_S390_RESET_POR 1 -#define KVM_S390_RESET_CLEAR 2 -#define KVM_S390_RESET_SUBSYSTEM 4 -#define KVM_S390_RESET_CPU_INIT 8 -#define KVM_S390_RESET_IPL 16 - __u64 s390_reset_flags; - -s390 specific. - - /* KVM_EXIT_S390_UCONTROL */ - struct { - __u64 trans_exc_code; - __u32 pgm_code; - } s390_ucontrol; - -s390 specific. A page fault has occurred for a user controlled virtual -machine (KVM_VM_S390_UNCONTROL) on it's host page table that cannot be -resolved by the kernel. -The program code and the translation exception code that were placed -in the cpu's lowcore are presented here as defined by the z Architecture -Principles of Operation Book in the Chapter for Dynamic Address Translation -(DAT) - - /* KVM_EXIT_DCR */ - struct { - __u32 dcrn; - __u32 data; - __u8 is_write; - } dcr; - -Deprecated - was used for 440 KVM. - - /* KVM_EXIT_OSI */ - struct { - __u64 gprs[32]; - } osi; - -MOL uses a special hypercall interface it calls 'OSI'. To enable it, we catch -hypercalls and exit with this exit struct that contains all the guest gprs. - -If exit_reason is KVM_EXIT_OSI, then the vcpu has triggered such a hypercall. -Userspace can now handle the hypercall and when it's done modify the gprs as -necessary. Upon guest entry all guest GPRs will then be replaced by the values -in this struct. - - /* KVM_EXIT_PAPR_HCALL */ - struct { - __u64 nr; - __u64 ret; - __u64 args[9]; - } papr_hcall; - -This is used on 64-bit PowerPC when emulating a pSeries partition, -e.g. with the 'pseries' machine type in qemu. It occurs when the -guest does a hypercall using the 'sc 1' instruction. The 'nr' field -contains the hypercall number (from the guest R3), and 'args' contains -the arguments (from the guest R4 - R12). Userspace should put the -return code in 'ret' and any extra returned values in args[]. -The possible hypercalls are defined in the Power Architecture Platform -Requirements (PAPR) document available from www.power.org (free -developer registration required to access it). - - /* KVM_EXIT_S390_TSCH */ - struct { - __u16 subchannel_id; - __u16 subchannel_nr; - __u32 io_int_parm; - __u32 io_int_word; - __u32 ipb; - __u8 dequeued; - } s390_tsch; - -s390 specific. This exit occurs when KVM_CAP_S390_CSS_SUPPORT has been enabled -and TEST SUBCHANNEL was intercepted. If dequeued is set, a pending I/O -interrupt for the target subchannel has been dequeued and subchannel_id, -subchannel_nr, io_int_parm and io_int_word contain the parameters for that -interrupt. ipb is needed for instruction parameter decoding. - - /* KVM_EXIT_EPR */ - struct { - __u32 epr; - } epr; - -On FSL BookE PowerPC chips, the interrupt controller has a fast patch -interrupt acknowledge path to the core. When the core successfully -delivers an interrupt, it automatically populates the EPR register with -the interrupt vector number and acknowledges the interrupt inside -the interrupt controller. - -In case the interrupt controller lives in user space, we need to do -the interrupt acknowledge cycle through it to fetch the next to be -delivered interrupt vector using this exit. - -It gets triggered whenever both KVM_CAP_PPC_EPR are enabled and an -external interrupt has just been delivered into the guest. User space -should put the acknowledged interrupt vector into the 'epr' field. - - /* KVM_EXIT_SYSTEM_EVENT */ - struct { -#define KVM_SYSTEM_EVENT_SHUTDOWN 1 -#define KVM_SYSTEM_EVENT_RESET 2 -#define KVM_SYSTEM_EVENT_CRASH 3 - __u32 type; - __u64 flags; - } system_event; - -If exit_reason is KVM_EXIT_SYSTEM_EVENT then the vcpu has triggered -a system-level event using some architecture specific mechanism (hypercall -or some special instruction). In case of ARM/ARM64, this is triggered using -HVC instruction based PSCI call from the vcpu. The 'type' field describes -the system-level event type. The 'flags' field describes architecture -specific flags for the system-level event. - -Valid values for 'type' are: - KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the - VM. Userspace is not obliged to honour this, and if it does honour - this does not need to destroy the VM synchronously (ie it may call - KVM_RUN again before shutdown finally occurs). - KVM_SYSTEM_EVENT_RESET -- the guest has requested a reset of the VM. - As with SHUTDOWN, userspace can choose to ignore the request, or - to schedule the reset to occur in the future and may call KVM_RUN again. - KVM_SYSTEM_EVENT_CRASH -- the guest crash occurred and the guest - has requested a crash condition maintenance. Userspace can choose - to ignore the request, or to gather VM memory core dump and/or - reset/shutdown of the VM. - - /* KVM_EXIT_IOAPIC_EOI */ - struct { - __u8 vector; - } eoi; - -Indicates that the VCPU's in-kernel local APIC received an EOI for a -level-triggered IOAPIC interrupt. This exit only triggers when the -IOAPIC is implemented in userspace (i.e. KVM_CAP_SPLIT_IRQCHIP is enabled); -the userspace IOAPIC should process the EOI and retrigger the interrupt if -it is still asserted. Vector is the LAPIC interrupt vector for which the -EOI was received. - - struct kvm_hyperv_exit { -#define KVM_EXIT_HYPERV_SYNIC 1 -#define KVM_EXIT_HYPERV_HCALL 2 - __u32 type; - union { - struct { - __u32 msr; - __u64 control; - __u64 evt_page; - __u64 msg_page; - } synic; - struct { - __u64 input; - __u64 result; - __u64 params[2]; - } hcall; - } u; - }; - /* KVM_EXIT_HYPERV */ - struct kvm_hyperv_exit hyperv; -Indicates that the VCPU exits into userspace to process some tasks -related to Hyper-V emulation. -Valid values for 'type' are: - KVM_EXIT_HYPERV_SYNIC -- synchronously notify user-space about -Hyper-V SynIC state change. Notification is used to remap SynIC -event/message pages and to enable/disable SynIC messages/events processing -in userspace. - - /* Fix the size of the union. */ - char padding[256]; - }; - - /* - * shared registers between kvm and userspace. - * kvm_valid_regs specifies the register classes set by the host - * kvm_dirty_regs specified the register classes dirtied by userspace - * struct kvm_sync_regs is architecture specific, as well as the - * bits for kvm_valid_regs and kvm_dirty_regs - */ - __u64 kvm_valid_regs; - __u64 kvm_dirty_regs; - union { - struct kvm_sync_regs regs; - char padding[SYNC_REGS_SIZE_BYTES]; - } s; - -If KVM_CAP_SYNC_REGS is defined, these fields allow userspace to access -certain guest registers without having to call SET/GET_*REGS. Thus we can -avoid some system call overhead if userspace has to handle the exit. -Userspace can query the validity of the structure by checking -kvm_valid_regs for specific bits. These bits are architecture specific -and usually define the validity of a groups of registers. (e.g. one bit - for general purpose registers) - -Please note that the kernel is allowed to use the kvm_run structure as the -primary storage for certain register types. Therefore, the kernel may use the -values in kvm_run even if the corresponding bit in kvm_dirty_regs is not set. - -}; - - - -6. Capabilities that can be enabled on vCPUs --------------------------------------------- - -There are certain capabilities that change the behavior of the virtual CPU or -the virtual machine when enabled. To enable them, please see section 4.37. -Below you can find a list of capabilities and what their effect on the vCPU or -the virtual machine is when enabling them. - -The following information is provided along with the description: - - Architectures: which instruction set architectures provide this ioctl. - x86 includes both i386 and x86_64. - - Target: whether this is a per-vcpu or per-vm capability. - - Parameters: what parameters are accepted by the capability. - - Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL) - are not detailed, but errors with specific meanings are. - - -6.1 KVM_CAP_PPC_OSI - -Architectures: ppc -Target: vcpu -Parameters: none -Returns: 0 on success; -1 on error - -This capability enables interception of OSI hypercalls that otherwise would -be treated as normal system calls to be injected into the guest. OSI hypercalls -were invented by Mac-on-Linux to have a standardized communication mechanism -between the guest and the host. - -When this capability is enabled, KVM_EXIT_OSI can occur. - - -6.2 KVM_CAP_PPC_PAPR - -Architectures: ppc -Target: vcpu -Parameters: none -Returns: 0 on success; -1 on error - -This capability enables interception of PAPR hypercalls. PAPR hypercalls are -done using the hypercall instruction "sc 1". - -It also sets the guest privilege level to "supervisor" mode. Usually the guest -runs in "hypervisor" privilege mode with a few missing features. - -In addition to the above, it changes the semantics of SDR1. In this mode, the -HTAB address part of SDR1 contains an HVA instead of a GPA, as PAPR keeps the -HTAB invisible to the guest. - -When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur. - - -6.3 KVM_CAP_SW_TLB - -Architectures: ppc -Target: vcpu -Parameters: args[0] is the address of a struct kvm_config_tlb -Returns: 0 on success; -1 on error - -struct kvm_config_tlb { - __u64 params; - __u64 array; - __u32 mmu_type; - __u32 array_len; -}; - -Configures the virtual CPU's TLB array, establishing a shared memory area -between userspace and KVM. The "params" and "array" fields are userspace -addresses of mmu-type-specific data structures. The "array_len" field is an -safety mechanism, and should be set to the size in bytes of the memory that -userspace has reserved for the array. It must be at least the size dictated -by "mmu_type" and "params". - -While KVM_RUN is active, the shared region is under control of KVM. Its -contents are undefined, and any modification by userspace results in -boundedly undefined behavior. - -On return from KVM_RUN, the shared region will reflect the current state of -the guest's TLB. If userspace makes any changes, it must call KVM_DIRTY_TLB -to tell KVM which entries have been changed, prior to calling KVM_RUN again -on this vcpu. - -For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV: - - The "params" field is of type "struct kvm_book3e_206_tlb_params". - - The "array" field points to an array of type "struct - kvm_book3e_206_tlb_entry". - - The array consists of all entries in the first TLB, followed by all - entries in the second TLB. - - Within a TLB, entries are ordered first by increasing set number. Within a - set, entries are ordered by way (increasing ESEL). - - The hash for determining set number in TLB0 is: (MAS2 >> 12) & (num_sets - 1) - where "num_sets" is the tlb_sizes[] value divided by the tlb_ways[] value. - - The tsize field of mas1 shall be set to 4K on TLB0, even though the - hardware ignores this value for TLB0. - -6.4 KVM_CAP_S390_CSS_SUPPORT - -Architectures: s390 -Target: vcpu -Parameters: none -Returns: 0 on success; -1 on error - -This capability enables support for handling of channel I/O instructions. - -TEST PENDING INTERRUPTION and the interrupt portion of TEST SUBCHANNEL are -handled in-kernel, while the other I/O instructions are passed to userspace. - -When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST -SUBCHANNEL intercepts. - -Note that even though this capability is enabled per-vcpu, the complete -virtual machine is affected. - -6.5 KVM_CAP_PPC_EPR - -Architectures: ppc -Target: vcpu -Parameters: args[0] defines whether the proxy facility is active -Returns: 0 on success; -1 on error - -This capability enables or disables the delivery of interrupts through the -external proxy facility. - -When enabled (args[0] != 0), every time the guest gets an external interrupt -delivered, it automatically exits into user space with a KVM_EXIT_EPR exit -to receive the topmost interrupt vector. - -When disabled (args[0] == 0), behavior is as if this facility is unsupported. - -When this capability is enabled, KVM_EXIT_EPR can occur. - -6.6 KVM_CAP_IRQ_MPIC - -Architectures: ppc -Parameters: args[0] is the MPIC device fd - args[1] is the MPIC CPU number for this vcpu - -This capability connects the vcpu to an in-kernel MPIC device. - -6.7 KVM_CAP_IRQ_XICS - -Architectures: ppc -Target: vcpu -Parameters: args[0] is the XICS device fd - args[1] is the XICS CPU number (server ID) for this vcpu - -This capability connects the vcpu to an in-kernel XICS device. - -6.8 KVM_CAP_S390_IRQCHIP - -Architectures: s390 -Target: vm -Parameters: none - -This capability enables the in-kernel irqchip for s390. Please refer to -"4.24 KVM_CREATE_IRQCHIP" for details. - -6.9 KVM_CAP_MIPS_FPU - -Architectures: mips -Target: vcpu -Parameters: args[0] is reserved for future use (should be 0). - -This capability allows the use of the host Floating Point Unit by the guest. It -allows the Config1.FP bit to be set to enable the FPU in the guest. Once this is -done the KVM_REG_MIPS_FPR_* and KVM_REG_MIPS_FCR_* registers can be accessed -(depending on the current guest FPU register mode), and the Status.FR, -Config5.FRE bits are accessible via the KVM API and also from the guest, -depending on them being supported by the FPU. - -6.10 KVM_CAP_MIPS_MSA - -Architectures: mips -Target: vcpu -Parameters: args[0] is reserved for future use (should be 0). - -This capability allows the use of the MIPS SIMD Architecture (MSA) by the guest. -It allows the Config3.MSAP bit to be set to enable the use of MSA by the guest. -Once this is done the KVM_REG_MIPS_VEC_* and KVM_REG_MIPS_MSA_* registers can be -accessed, and the Config5.MSAEn bit is accessible via the KVM API and also from -the guest. - -6.74 KVM_CAP_SYNC_REGS -Architectures: s390, x86 -Target: s390: always enabled, x86: vcpu -Parameters: none -Returns: x86: KVM_CHECK_EXTENSION returns a bit-array indicating which register -sets are supported (bitfields defined in arch/x86/include/uapi/asm/kvm.h). - -As described above in the kvm_sync_regs struct info in section 5 (kvm_run): -KVM_CAP_SYNC_REGS "allow[s] userspace to access certain guest registers -without having to call SET/GET_*REGS". This reduces overhead by eliminating -repeated ioctl calls for setting and/or getting register values. This is -particularly important when userspace is making synchronous guest state -modifications, e.g. when emulating and/or intercepting instructions in -userspace. - -For s390 specifics, please refer to the source code. - -For x86: -- the register sets to be copied out to kvm_run are selectable - by userspace (rather that all sets being copied out for every exit). -- vcpu_events are available in addition to regs and sregs. - -For x86, the 'kvm_valid_regs' field of struct kvm_run is overloaded to -function as an input bit-array field set by userspace to indicate the -specific register sets to be copied out on the next exit. - -To indicate when userspace has modified values that should be copied into -the vCPU, the all architecture bitarray field, 'kvm_dirty_regs' must be set. -This is done using the same bitflags as for the 'kvm_valid_regs' field. -If the dirty bit is not set, then the register set values will not be copied -into the vCPU even if they've been modified. - -Unused bitfields in the bitarrays must be set to zero. - -struct kvm_sync_regs { - struct kvm_regs regs; - struct kvm_sregs sregs; - struct kvm_vcpu_events events; -}; - -6.75 KVM_CAP_PPC_IRQ_XIVE - -Architectures: ppc -Target: vcpu -Parameters: args[0] is the XIVE device fd - args[1] is the XIVE CPU number (server ID) for this vcpu - -This capability connects the vcpu to an in-kernel XIVE device. - -7. Capabilities that can be enabled on VMs ------------------------------------------- - -There are certain capabilities that change the behavior of the virtual -machine when enabled. To enable them, please see section 4.37. Below -you can find a list of capabilities and what their effect on the VM -is when enabling them. - -The following information is provided along with the description: - - Architectures: which instruction set architectures provide this ioctl. - x86 includes both i386 and x86_64. - - Parameters: what parameters are accepted by the capability. - - Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL) - are not detailed, but errors with specific meanings are. - - -7.1 KVM_CAP_PPC_ENABLE_HCALL - -Architectures: ppc -Parameters: args[0] is the sPAPR hcall number - args[1] is 0 to disable, 1 to enable in-kernel handling - -This capability controls whether individual sPAPR hypercalls (hcalls) -get handled by the kernel or not. Enabling or disabling in-kernel -handling of an hcall is effective across the VM. On creation, an -initial set of hcalls are enabled for in-kernel handling, which -consists of those hcalls for which in-kernel handlers were implemented -before this capability was implemented. If disabled, the kernel will -not to attempt to handle the hcall, but will always exit to userspace -to handle it. Note that it may not make sense to enable some and -disable others of a group of related hcalls, but KVM does not prevent -userspace from doing that. - -If the hcall number specified is not one that has an in-kernel -implementation, the KVM_ENABLE_CAP ioctl will fail with an EINVAL -error. - -7.2 KVM_CAP_S390_USER_SIGP - -Architectures: s390 -Parameters: none - -This capability controls which SIGP orders will be handled completely in user -space. With this capability enabled, all fast orders will be handled completely -in the kernel: -- SENSE -- SENSE RUNNING -- EXTERNAL CALL -- EMERGENCY SIGNAL -- CONDITIONAL EMERGENCY SIGNAL - -All other orders will be handled completely in user space. - -Only privileged operation exceptions will be checked for in the kernel (or even -in the hardware prior to interception). If this capability is not enabled, the -old way of handling SIGP orders is used (partially in kernel and user space). - -7.3 KVM_CAP_S390_VECTOR_REGISTERS - -Architectures: s390 -Parameters: none -Returns: 0 on success, negative value on error - -Allows use of the vector registers introduced with z13 processor, and -provides for the synchronization between host and user space. Will -return -EINVAL if the machine does not support vectors. - -7.4 KVM_CAP_S390_USER_STSI - -Architectures: s390 -Parameters: none - -This capability allows post-handlers for the STSI instruction. After -initial handling in the kernel, KVM exits to user space with -KVM_EXIT_S390_STSI to allow user space to insert further data. - -Before exiting to userspace, kvm handlers should fill in s390_stsi field of -vcpu->run: -struct { - __u64 addr; - __u8 ar; - __u8 reserved; - __u8 fc; - __u8 sel1; - __u16 sel2; -} s390_stsi; - -@addr - guest address of STSI SYSIB -@fc - function code -@sel1 - selector 1 -@sel2 - selector 2 -@ar - access register number - -KVM handlers should exit to userspace with rc = -EREMOTE. - -7.5 KVM_CAP_SPLIT_IRQCHIP - -Architectures: x86 -Parameters: args[0] - number of routes reserved for userspace IOAPICs -Returns: 0 on success, -1 on error - -Create a local apic for each processor in the kernel. This can be used -instead of KVM_CREATE_IRQCHIP if the userspace VMM wishes to emulate the -IOAPIC and PIC (and also the PIT, even though this has to be enabled -separately). - -This capability also enables in kernel routing of interrupt requests; -when KVM_CAP_SPLIT_IRQCHIP only routes of KVM_IRQ_ROUTING_MSI type are -used in the IRQ routing table. The first args[0] MSI routes are reserved -for the IOAPIC pins. Whenever the LAPIC receives an EOI for these routes, -a KVM_EXIT_IOAPIC_EOI vmexit will be reported to userspace. - -Fails if VCPU has already been created, or if the irqchip is already in the -kernel (i.e. KVM_CREATE_IRQCHIP has already been called). - -7.6 KVM_CAP_S390_RI - -Architectures: s390 -Parameters: none - -Allows use of runtime-instrumentation introduced with zEC12 processor. -Will return -EINVAL if the machine does not support runtime-instrumentation. -Will return -EBUSY if a VCPU has already been created. - -7.7 KVM_CAP_X2APIC_API - -Architectures: x86 -Parameters: args[0] - features that should be enabled -Returns: 0 on success, -EINVAL when args[0] contains invalid features - -Valid feature flags in args[0] are - -#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0) -#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1) - -Enabling KVM_X2APIC_API_USE_32BIT_IDS changes the behavior of -KVM_SET_GSI_ROUTING, KVM_SIGNAL_MSI, KVM_SET_LAPIC, and KVM_GET_LAPIC, -allowing the use of 32-bit APIC IDs. See KVM_CAP_X2APIC_API in their -respective sections. - -KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK must be enabled for x2APIC to work -in logical mode or with more than 255 VCPUs. Otherwise, KVM treats 0xff -as a broadcast even in x2APIC mode in order to support physical x2APIC -without interrupt remapping. This is undesirable in logical mode, -where 0xff represents CPUs 0-7 in cluster 0. - -7.8 KVM_CAP_S390_USER_INSTR0 - -Architectures: s390 -Parameters: none - -With this capability enabled, all illegal instructions 0x0000 (2 bytes) will -be intercepted and forwarded to user space. User space can use this -mechanism e.g. to realize 2-byte software breakpoints. The kernel will -not inject an operating exception for these instructions, user space has -to take care of that. - -This capability can be enabled dynamically even if VCPUs were already -created and are running. - -7.9 KVM_CAP_S390_GS - -Architectures: s390 -Parameters: none -Returns: 0 on success; -EINVAL if the machine does not support - guarded storage; -EBUSY if a VCPU has already been created. - -Allows use of guarded storage for the KVM guest. - -7.10 KVM_CAP_S390_AIS - -Architectures: s390 -Parameters: none - -Allow use of adapter-interruption suppression. -Returns: 0 on success; -EBUSY if a VCPU has already been created. - -7.11 KVM_CAP_PPC_SMT - -Architectures: ppc -Parameters: vsmt_mode, flags - -Enabling this capability on a VM provides userspace with a way to set -the desired virtual SMT mode (i.e. the number of virtual CPUs per -virtual core). The virtual SMT mode, vsmt_mode, must be a power of 2 -between 1 and 8. On POWER8, vsmt_mode must also be no greater than -the number of threads per subcore for the host. Currently flags must -be 0. A successful call to enable this capability will result in -vsmt_mode being returned when the KVM_CAP_PPC_SMT capability is -subsequently queried for the VM. This capability is only supported by -HV KVM, and can only be set before any VCPUs have been created. -The KVM_CAP_PPC_SMT_POSSIBLE capability indicates which virtual SMT -modes are available. - -7.12 KVM_CAP_PPC_FWNMI - -Architectures: ppc -Parameters: none - -With this capability a machine check exception in the guest address -space will cause KVM to exit the guest with NMI exit reason. This -enables QEMU to build error log and branch to guest kernel registered -machine check handling routine. Without this capability KVM will -branch to guests' 0x200 interrupt vector. - -7.13 KVM_CAP_X86_DISABLE_EXITS - -Architectures: x86 -Parameters: args[0] defines which exits are disabled -Returns: 0 on success, -EINVAL when args[0] contains invalid exits - -Valid bits in args[0] are - -#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0) -#define KVM_X86_DISABLE_EXITS_HLT (1 << 1) -#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) -#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3) - -Enabling this capability on a VM provides userspace with a way to no -longer intercept some instructions for improved latency in some -workloads, and is suggested when vCPUs are associated to dedicated -physical CPUs. More bits can be added in the future; userspace can -just pass the KVM_CHECK_EXTENSION result to KVM_ENABLE_CAP to disable -all such vmexits. - -Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits. - -7.14 KVM_CAP_S390_HPAGE_1M - -Architectures: s390 -Parameters: none -Returns: 0 on success, -EINVAL if hpage module parameter was not set - or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL - flag set - -With this capability the KVM support for memory backing with 1m pages -through hugetlbfs can be enabled for a VM. After the capability is -enabled, cmma can't be enabled anymore and pfmfi and the storage key -interpretation are disabled. If cmma has already been enabled or the -hpage module parameter is not set to 1, -EINVAL is returned. - -While it is generally possible to create a huge page backed VM without -this capability, the VM will not be able to run. - -7.15 KVM_CAP_MSR_PLATFORM_INFO - -Architectures: x86 -Parameters: args[0] whether feature should be enabled or not - -With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise, -a #GP would be raised when the guest tries to access. Currently, this -capability does not enable write permissions of this MSR for the guest. - -7.16 KVM_CAP_PPC_NESTED_HV - -Architectures: ppc -Parameters: none -Returns: 0 on success, -EINVAL when the implementation doesn't support - nested-HV virtualization. - -HV-KVM on POWER9 and later systems allows for "nested-HV" -virtualization, which provides a way for a guest VM to run guests that -can run using the CPU's supervisor mode (privileged non-hypervisor -state). Enabling this capability on a VM depends on the CPU having -the necessary functionality and on the facility being enabled with a -kvm-hv module parameter. - -7.17 KVM_CAP_EXCEPTION_PAYLOAD - -Architectures: x86 -Parameters: args[0] whether feature should be enabled or not - -With this capability enabled, CR2 will not be modified prior to the -emulated VM-exit when L1 intercepts a #PF exception that occurs in -L2. Similarly, for kvm-intel only, DR6 will not be modified prior to -the emulated VM-exit when L1 intercepts a #DB exception that occurs in -L2. As a result, when KVM_GET_VCPU_EVENTS reports a pending #PF (or -#DB) exception for L2, exception.has_payload will be set and the -faulting address (or the new DR6 bits*) will be reported in the -exception_payload field. Similarly, when userspace injects a #PF (or -#DB) into L2 using KVM_SET_VCPU_EVENTS, it is expected to set -exception.has_payload and to put the faulting address (or the new DR6 -bits*) in the exception_payload field. - -This capability also enables exception.pending in struct -kvm_vcpu_events, which allows userspace to distinguish between pending -and injected exceptions. - - -* For the new DR6 bits, note that bit 16 is set iff the #DB exception - will clear DR6.RTM. - -7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 - -Architectures: x86, arm, arm64, mips -Parameters: args[0] whether feature should be enabled or not - -With this capability enabled, KVM_GET_DIRTY_LOG will not automatically -clear and write-protect all pages that are returned as dirty. -Rather, userspace will have to do this operation separately using -KVM_CLEAR_DIRTY_LOG. - -At the cost of a slightly more complicated operation, this provides better -scalability and responsiveness for two reasons. First, -KVM_CLEAR_DIRTY_LOG ioctl can operate on a 64-page granularity rather -than requiring to sync a full memslot; this ensures that KVM does not -take spinlocks for an extended period of time. Second, in some cases a -large amount of time can pass between a call to KVM_GET_DIRTY_LOG and -userspace actually using the data in the page. Pages can be modified -during this time, which is inefficint for both the guest and userspace: -the guest will incur a higher penalty due to write protection faults, -while userspace can see false reports of dirty pages. Manual reprotection -helps reducing this time, improving guest performance and reducing the -number of dirty log false positives. - -KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 was previously available under the name -KVM_CAP_MANUAL_DIRTY_LOG_PROTECT, but the implementation had bugs that make -it hard or impossible to use it correctly. The availability of -KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 signals that those bugs are fixed. -Userspace should not try to use KVM_CAP_MANUAL_DIRTY_LOG_PROTECT. - -8. Other capabilities. ----------------------- - -This section lists capabilities that give information about other -features of the KVM implementation. - -8.1 KVM_CAP_PPC_HWRNG - -Architectures: ppc - -This capability, if KVM_CHECK_EXTENSION indicates that it is -available, means that that the kernel has an implementation of the -H_RANDOM hypercall backed by a hardware random-number generator. -If present, the kernel H_RANDOM handler can be enabled for guest use -with the KVM_CAP_PPC_ENABLE_HCALL capability. - -8.2 KVM_CAP_HYPERV_SYNIC - -Architectures: x86 -This capability, if KVM_CHECK_EXTENSION indicates that it is -available, means that that the kernel has an implementation of the -Hyper-V Synthetic interrupt controller(SynIC). Hyper-V SynIC is -used to support Windows Hyper-V based guest paravirt drivers(VMBus). - -In order to use SynIC, it has to be activated by setting this -capability via KVM_ENABLE_CAP ioctl on the vcpu fd. Note that this -will disable the use of APIC hardware virtualization even if supported -by the CPU, as it's incompatible with SynIC auto-EOI behavior. - -8.3 KVM_CAP_PPC_RADIX_MMU - -Architectures: ppc - -This capability, if KVM_CHECK_EXTENSION indicates that it is -available, means that that the kernel can support guests using the -radix MMU defined in Power ISA V3.00 (as implemented in the POWER9 -processor). - -8.4 KVM_CAP_PPC_HASH_MMU_V3 - -Architectures: ppc - -This capability, if KVM_CHECK_EXTENSION indicates that it is -available, means that that the kernel can support guests using the -hashed page table MMU defined in Power ISA V3.00 (as implemented in -the POWER9 processor), including in-memory segment tables. - -8.5 KVM_CAP_MIPS_VZ - -Architectures: mips - -This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that -it is available, means that full hardware assisted virtualization capabilities -of the hardware are available for use through KVM. An appropriate -KVM_VM_MIPS_* type must be passed to KVM_CREATE_VM to create a VM which -utilises it. - -If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is -available, it means that the VM is using full hardware assisted virtualization -capabilities of the hardware. This is useful to check after creating a VM with -KVM_VM_MIPS_DEFAULT. - -The value returned by KVM_CHECK_EXTENSION should be compared against known -values (see below). All other values are reserved. This is to allow for the -possibility of other hardware assisted virtualization implementations which -may be incompatible with the MIPS VZ ASE. - - 0: The trap & emulate implementation is in use to run guest code in user - mode. Guest virtual memory segments are rearranged to fit the guest in the - user mode address space. - - 1: The MIPS VZ ASE is in use, providing full hardware assisted - virtualization, including standard guest virtual memory segments. - -8.6 KVM_CAP_MIPS_TE - -Architectures: mips - -This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that -it is available, means that the trap & emulate implementation is available to -run guest code in user mode, even if KVM_CAP_MIPS_VZ indicates that hardware -assisted virtualisation is also available. KVM_VM_MIPS_TE (0) must be passed -to KVM_CREATE_VM to create a VM which utilises it. - -If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is -available, it means that the VM is using trap & emulate. - -8.7 KVM_CAP_MIPS_64BIT - -Architectures: mips - -This capability indicates the supported architecture type of the guest, i.e. the -supported register and address width. - -The values returned when this capability is checked by KVM_CHECK_EXTENSION on a -kvm VM handle correspond roughly to the CP0_Config.AT register field, and should -be checked specifically against known values (see below). All other values are -reserved. - - 0: MIPS32 or microMIPS32. - Both registers and addresses are 32-bits wide. - It will only be possible to run 32-bit guest code. - - 1: MIPS64 or microMIPS64 with access only to 32-bit compatibility segments. - Registers are 64-bits wide, but addresses are 32-bits wide. - 64-bit guest code may run but cannot access MIPS64 memory segments. - It will also be possible to run 32-bit guest code. - - 2: MIPS64 or microMIPS64 with access to all address segments. - Both registers and addresses are 64-bits wide. - It will be possible to run 64-bit or 32-bit guest code. - -8.9 KVM_CAP_ARM_USER_IRQ - -Architectures: arm, arm64 -This capability, if KVM_CHECK_EXTENSION indicates that it is available, means -that if userspace creates a VM without an in-kernel interrupt controller, it -will be notified of changes to the output level of in-kernel emulated devices, -which can generate virtual interrupts, presented to the VM. -For such VMs, on every return to userspace, the kernel -updates the vcpu's run->s.regs.device_irq_level field to represent the actual -output level of the device. - -Whenever kvm detects a change in the device output level, kvm guarantees at -least one return to userspace before running the VM. This exit could either -be a KVM_EXIT_INTR or any other exit event, like KVM_EXIT_MMIO. This way, -userspace can always sample the device output level and re-compute the state of -the userspace interrupt controller. Userspace should always check the state -of run->s.regs.device_irq_level on every kvm exit. -The value in run->s.regs.device_irq_level can represent both level and edge -triggered interrupt signals, depending on the device. Edge triggered interrupt -signals will exit to userspace with the bit in run->s.regs.device_irq_level -set exactly once per edge signal. - -The field run->s.regs.device_irq_level is available independent of -run->kvm_valid_regs or run->kvm_dirty_regs bits. - -If KVM_CAP_ARM_USER_IRQ is supported, the KVM_CHECK_EXTENSION ioctl returns a -number larger than 0 indicating the version of this capability is implemented -and thereby which bits in in run->s.regs.device_irq_level can signal values. - -Currently the following bits are defined for the device_irq_level bitmap: - - KVM_CAP_ARM_USER_IRQ >= 1: - - KVM_ARM_DEV_EL1_VTIMER - EL1 virtual timer - KVM_ARM_DEV_EL1_PTIMER - EL1 physical timer - KVM_ARM_DEV_PMU - ARM PMU overflow interrupt signal - -Future versions of kvm may implement additional events. These will get -indicated by returning a higher number from KVM_CHECK_EXTENSION and will be -listed above. - -8.10 KVM_CAP_PPC_SMT_POSSIBLE - -Architectures: ppc - -Querying this capability returns a bitmap indicating the possible -virtual SMT modes that can be set using KVM_CAP_PPC_SMT. If bit N -(counting from the right) is set, then a virtual SMT mode of 2^N is -available. - -8.11 KVM_CAP_HYPERV_SYNIC2 - -Architectures: x86 - -This capability enables a newer version of Hyper-V Synthetic interrupt -controller (SynIC). The only difference with KVM_CAP_HYPERV_SYNIC is that KVM -doesn't clear SynIC message and event flags pages when they are enabled by -writing to the respective MSRs. - -8.12 KVM_CAP_HYPERV_VP_INDEX - -Architectures: x86 - -This capability indicates that userspace can load HV_X64_MSR_VP_INDEX msr. Its -value is used to denote the target vcpu for a SynIC interrupt. For -compatibilty, KVM initializes this msr to KVM's internal vcpu index. When this -capability is absent, userspace can still query this msr's value. - -8.13 KVM_CAP_S390_AIS_MIGRATION - -Architectures: s390 -Parameters: none - -This capability indicates if the flic device will be able to get/set the -AIS states for migration via the KVM_DEV_FLIC_AISM_ALL attribute and allows -to discover this without having to create a flic device. - -8.14 KVM_CAP_S390_PSW - -Architectures: s390 - -This capability indicates that the PSW is exposed via the kvm_run structure. - -8.15 KVM_CAP_S390_GMAP - -Architectures: s390 - -This capability indicates that the user space memory used as guest mapping can -be anywhere in the user memory address space, as long as the memory slots are -aligned and sized to a segment (1MB) boundary. - -8.16 KVM_CAP_S390_COW - -Architectures: s390 - -This capability indicates that the user space memory used as guest mapping can -use copy-on-write semantics as well as dirty pages tracking via read-only page -tables. - -8.17 KVM_CAP_S390_BPB - -Architectures: s390 - -This capability indicates that kvm will implement the interfaces to handle -reset, migration and nested KVM for branch prediction blocking. The stfle -facility 82 should not be provided to the guest without this capability. - -8.18 KVM_CAP_HYPERV_TLBFLUSH - -Architectures: x86 - -This capability indicates that KVM supports paravirtualized Hyper-V TLB Flush -hypercalls: -HvFlushVirtualAddressSpace, HvFlushVirtualAddressSpaceEx, -HvFlushVirtualAddressList, HvFlushVirtualAddressListEx. - -8.19 KVM_CAP_ARM_INJECT_SERROR_ESR - -Architectures: arm, arm64 - -This capability indicates that userspace can specify (via the -KVM_SET_VCPU_EVENTS ioctl) the syndrome value reported to the guest when it -takes a virtual SError interrupt exception. -If KVM advertises this capability, userspace can only specify the ISS field for -the ESR syndrome. Other parts of the ESR, such as the EC are generated by the -CPU when the exception is taken. If this virtual SError is taken to EL1 using -AArch64, this value will be reported in the ISS field of ESR_ELx. - -See KVM_CAP_VCPU_EVENTS for more details. -8.20 KVM_CAP_HYPERV_SEND_IPI - -Architectures: x86 - -This capability indicates that KVM supports paravirtualized Hyper-V IPI send -hypercalls: -HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx. diff --git a/Documentation/virtual/kvm/arm/hyp-abi.txt b/Documentation/virtual/kvm/arm/hyp-abi.txt deleted file mode 100644 index a20a0bee268d..000000000000 --- a/Documentation/virtual/kvm/arm/hyp-abi.txt +++ /dev/null @@ -1,53 +0,0 @@ -* Internal ABI between the kernel and HYP - -This file documents the interaction between the Linux kernel and the -hypervisor layer when running Linux as a hypervisor (for example -KVM). It doesn't cover the interaction of the kernel with the -hypervisor when running as a guest (under Xen, KVM or any other -hypervisor), or any hypervisor-specific interaction when the kernel is -used as a host. - -On arm and arm64 (without VHE), the kernel doesn't run in hypervisor -mode, but still needs to interact with it, allowing a built-in -hypervisor to be either installed or torn down. - -In order to achieve this, the kernel must be booted at HYP (arm) or -EL2 (arm64), allowing it to install a set of stubs before dropping to -SVC/EL1. These stubs are accessible by using a 'hvc #0' instruction, -and only act on individual CPUs. - -Unless specified otherwise, any built-in hypervisor must implement -these functions (see arch/arm{,64}/include/asm/virt.h): - -* r0/x0 = HVC_SET_VECTORS - r1/x1 = vectors - - Set HVBAR/VBAR_EL2 to 'vectors' to enable a hypervisor. 'vectors' - must be a physical address, and respect the alignment requirements - of the architecture. Only implemented by the initial stubs, not by - Linux hypervisors. - -* r0/x0 = HVC_RESET_VECTORS - - Turn HYP/EL2 MMU off, and reset HVBAR/VBAR_EL2 to the initials - stubs' exception vector value. This effectively disables an existing - hypervisor. - -* r0/x0 = HVC_SOFT_RESTART - r1/x1 = restart address - x2 = x0's value when entering the next payload (arm64) - x3 = x1's value when entering the next payload (arm64) - x4 = x2's value when entering the next payload (arm64) - - Mask all exceptions, disable the MMU, move the arguments into place - (arm64 only), and jump to the restart address while at HYP/EL2. This - hypercall is not expected to return to its caller. - -Any other value of r0/x0 triggers a hypervisor-specific handling, -which is not documented here. - -The return value of a stub hypercall is held by r0/x0, and is 0 on -success, and HVC_STUB_ERR on error. A stub hypercall is allowed to -clobber any of the caller-saved registers (x0-x18 on arm64, r0-r3 and -ip on arm). It is thus recommended to use a function call to perform -the hypercall. diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virtual/kvm/arm/psci.txt deleted file mode 100644 index 559586fc9d37..000000000000 --- a/Documentation/virtual/kvm/arm/psci.txt +++ /dev/null @@ -1,61 +0,0 @@ -KVM implements the PSCI (Power State Coordination Interface) -specification in order to provide services such as CPU on/off, reset -and power-off to the guest. - -The PSCI specification is regularly updated to provide new features, -and KVM implements these updates if they make sense from a virtualization -point of view. - -This means that a guest booted on two different versions of KVM can -observe two different "firmware" revisions. This could cause issues if -a given guest is tied to a particular PSCI revision (unlikely), or if -a migration causes a different PSCI version to be exposed out of the -blue to an unsuspecting guest. - -In order to remedy this situation, KVM exposes a set of "firmware -pseudo-registers" that can be manipulated using the GET/SET_ONE_REG -interface. These registers can be saved/restored by userspace, and set -to a convenient value if required. - -The following register is defined: - -* KVM_REG_ARM_PSCI_VERSION: - - - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set - (and thus has already been initialized) - - Returns the current PSCI version on GET_ONE_REG (defaulting to the - highest PSCI version implemented by KVM and compatible with v0.2) - - Allows any PSCI version implemented by KVM and compatible with - v0.2 to be set with SET_ONE_REG - - Affects the whole VM (even if the register view is per-vcpu) - -* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: - Holds the state of the firmware support to mitigate CVE-2017-5715, as - offered by KVM to the guest via a HVC call. The workaround is described - under SMCCC_ARCH_WORKAROUND_1 in [1]. - Accepted values are: - KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL: KVM does not offer - firmware support for the workaround. The mitigation status for the - guest is unknown. - KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL: The workaround HVC call is - available to the guest and required for the mitigation. - KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED: The workaround HVC call - is available to the guest, but it is not needed on this VCPU. - -* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: - Holds the state of the firmware support to mitigate CVE-2018-3639, as - offered by KVM to the guest via a HVC call. The workaround is described - under SMCCC_ARCH_WORKAROUND_2 in [1]. - Accepted values are: - KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL: A workaround is not - available. KVM does not offer firmware support for the workaround. - KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN: The workaround state is - unknown. KVM does not offer firmware support for the workaround. - KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL: The workaround is available, - and can be disabled by a vCPU. If - KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED is set, it is active for - this vCPU. - KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED: The workaround is - always active on this vCPU or it is not needed. - -[1] https://developer.arm.com/-/media/developer/pdf/ARM_DEN_0070A_Firmware_interfaces_for_mitigating_CVE-2017-5715.pdf diff --git a/Documentation/virtual/kvm/cpuid.rst b/Documentation/virtual/kvm/cpuid.rst deleted file mode 100644 index 01b081f6e7ea..000000000000 --- a/Documentation/virtual/kvm/cpuid.rst +++ /dev/null @@ -1,107 +0,0 @@ -.. SPDX-License-Identifier: GPL-2.0 - -============== -KVM CPUID bits -============== - -:Author: Glauber Costa - -A guest running on a kvm host, can check some of its features using -cpuid. This is not always guaranteed to work, since userspace can -mask-out some, or even all KVM-related cpuid features before launching -a guest. - -KVM cpuid functions are: - -function: KVM_CPUID_SIGNATURE (0x40000000) - -returns:: - - eax = 0x40000001 - ebx = 0x4b4d564b - ecx = 0x564b4d56 - edx = 0x4d - -Note that this value in ebx, ecx and edx corresponds to the string "KVMKVMKVM". -The value in eax corresponds to the maximum cpuid function present in this leaf, -and will be updated if more functions are added in the future. -Note also that old hosts set eax value to 0x0. This should -be interpreted as if the value was 0x40000001. -This function queries the presence of KVM cpuid leafs. - -function: define KVM_CPUID_FEATURES (0x40000001) - -returns:: - - ebx, ecx - eax = an OR'ed group of (1 << flag) - -where ``flag`` is defined as below: - -================================= =========== ================================ -flag value meaning -================================= =========== ================================ -KVM_FEATURE_CLOCKSOURCE 0 kvmclock available at msrs - 0x11 and 0x12 - -KVM_FEATURE_NOP_IO_DELAY 1 not necessary to perform delays - on PIO operations - -KVM_FEATURE_MMU_OP 2 deprecated - -KVM_FEATURE_CLOCKSOURCE2 3 kvmclock available at msrs - - 0x4b564d00 and 0x4b564d01 -KVM_FEATURE_ASYNC_PF 4 async pf can be enabled by - writing to msr 0x4b564d02 - -KVM_FEATURE_STEAL_TIME 5 steal time can be enabled by - writing to msr 0x4b564d03 - -KVM_FEATURE_PV_EOI 6 paravirtualized end of interrupt - handler can be enabled by - writing to msr 0x4b564d04 - -KVM_FEATURE_PV_UNHAULT 7 guest checks this feature bit - before enabling paravirtualized - spinlock support - -KVM_FEATURE_PV_TLB_FLUSH 9 guest checks this feature bit - before enabling paravirtualized - tlb flush - -KVM_FEATURE_ASYNC_PF_VMEXIT 10 paravirtualized async PF VM EXIT - can be enabled by setting bit 2 - when writing to msr 0x4b564d02 - -KVM_FEATURE_PV_SEND_IPI 11 guest checks this feature bit - before enabling paravirtualized - sebd IPIs - -KVM_FEATURE_PV_POLL_CONTROL 12 host-side polling on HLT can - be disabled by writing - to msr 0x4b564d05. - -KVM_FEATURE_PV_SCHED_YIELD 13 guest checks this feature bit - before using paravirtualized - sched yield. - -KVM_FEATURE_CLOCSOURCE_STABLE_BIT 24 host will warn if no guest-side - per-cpu warps are expeced in - kvmclock -================================= =========== ================================ - -:: - - edx = an OR'ed group of (1 << flag) - -Where ``flag`` here is defined as below: - -================== ============ ================================= -flag value meaning -================== ============ ================================= -KVM_HINTS_REALTIME 0 guest checks this feature bit to - determine that vCPUs are never - preempted for an unlimited time - allowing optimizations -================== ============ ================================= diff --git a/Documentation/virtual/kvm/devices/README b/Documentation/virtual/kvm/devices/README deleted file mode 100644 index 34a69834124a..000000000000 --- a/Documentation/virtual/kvm/devices/README +++ /dev/null @@ -1 +0,0 @@ -This directory contains specific device bindings for KVM_CAP_DEVICE_CTRL. diff --git a/Documentation/virtual/kvm/devices/arm-vgic-its.txt b/Documentation/virtual/kvm/devices/arm-vgic-its.txt deleted file mode 100644 index eeaa95b893a8..000000000000 --- a/Documentation/virtual/kvm/devices/arm-vgic-its.txt +++ /dev/null @@ -1,181 +0,0 @@ -ARM Virtual Interrupt Translation Service (ITS) -=============================================== - -Device types supported: - KVM_DEV_TYPE_ARM_VGIC_ITS ARM Interrupt Translation Service Controller - -The ITS allows MSI(-X) interrupts to be injected into guests. This extension is -optional. Creating a virtual ITS controller also requires a host GICv3 (see -arm-vgic-v3.txt), but does not depend on having physical ITS controllers. - -There can be multiple ITS controllers per guest, each of them has to have -a separate, non-overlapping MMIO region. - - -Groups: - KVM_DEV_ARM_VGIC_GRP_ADDR - Attributes: - KVM_VGIC_ITS_ADDR_TYPE (rw, 64-bit) - Base address in the guest physical address space of the GICv3 ITS - control register frame. - This address needs to be 64K aligned and the region covers 128K. - Errors: - -E2BIG: Address outside of addressable IPA range - -EINVAL: Incorrectly aligned address - -EEXIST: Address already configured - -EFAULT: Invalid user pointer for attr->addr. - -ENODEV: Incorrect attribute or the ITS is not supported. - - - KVM_DEV_ARM_VGIC_GRP_CTRL - Attributes: - KVM_DEV_ARM_VGIC_CTRL_INIT - request the initialization of the ITS, no additional parameter in - kvm_device_attr.addr. - - KVM_DEV_ARM_ITS_CTRL_RESET - reset the ITS, no additional parameter in kvm_device_attr.addr. - See "ITS Reset State" section. - - KVM_DEV_ARM_ITS_SAVE_TABLES - save the ITS table data into guest RAM, at the location provisioned - by the guest in corresponding registers/table entries. - - The layout of the tables in guest memory defines an ABI. The entries - are laid out in little endian format as described in the last paragraph. - - KVM_DEV_ARM_ITS_RESTORE_TABLES - restore the ITS tables from guest RAM to ITS internal structures. - - The GICV3 must be restored before the ITS and all ITS registers but - the GITS_CTLR must be restored before restoring the ITS tables. - - The GITS_IIDR read-only register must also be restored before - calling KVM_DEV_ARM_ITS_RESTORE_TABLES as the IIDR revision field - encodes the ABI revision. - - The expected ordering when restoring the GICv3/ITS is described in section - "ITS Restore Sequence". - - Errors: - -ENXIO: ITS not properly configured as required prior to setting - this attribute - -ENOMEM: Memory shortage when allocating ITS internal data - -EINVAL: Inconsistent restored data - -EFAULT: Invalid guest ram access - -EBUSY: One or more VCPUS are running - -EACCES: The virtual ITS is backed by a physical GICv4 ITS, and the - state is not available - - KVM_DEV_ARM_VGIC_GRP_ITS_REGS - Attributes: - The attr field of kvm_device_attr encodes the offset of the - ITS register, relative to the ITS control frame base address - (ITS_base). - - kvm_device_attr.addr points to a __u64 value whatever the width - of the addressed register (32/64 bits). 64 bit registers can only - be accessed with full length. - - Writes to read-only registers are ignored by the kernel except for: - - GITS_CREADR. It must be restored otherwise commands in the queue - will be re-executed after restoring CWRITER. GITS_CREADR must be - restored before restoring the GITS_CTLR which is likely to enable the - ITS. Also it must be restored after GITS_CBASER since a write to - GITS_CBASER resets GITS_CREADR. - - GITS_IIDR. The Revision field encodes the table layout ABI revision. - In the future we might implement direct injection of virtual LPIs. - This will require an upgrade of the table layout and an evolution of - the ABI. GITS_IIDR must be restored before calling - KVM_DEV_ARM_ITS_RESTORE_TABLES. - - For other registers, getting or setting a register has the same - effect as reading/writing the register on real hardware. - Errors: - -ENXIO: Offset does not correspond to any supported register - -EFAULT: Invalid user pointer for attr->addr - -EINVAL: Offset is not 64-bit aligned - -EBUSY: one or more VCPUS are running - - ITS Restore Sequence: - ------------------------- - -The following ordering must be followed when restoring the GIC and the ITS: -a) restore all guest memory and create vcpus -b) restore all redistributors -c) provide the ITS base address - (KVM_DEV_ARM_VGIC_GRP_ADDR) -d) restore the ITS in the following order: - 1. Restore GITS_CBASER - 2. Restore all other GITS_ registers, except GITS_CTLR! - 3. Load the ITS table data (KVM_DEV_ARM_ITS_RESTORE_TABLES) - 4. Restore GITS_CTLR - -Then vcpus can be started. - - ITS Table ABI REV0: - ------------------- - - Revision 0 of the ABI only supports the features of a virtual GICv3, and does - not support a virtual GICv4 with support for direct injection of virtual - interrupts for nested hypervisors. - - The device table and ITT are indexed by the DeviceID and EventID, - respectively. The collection table is not indexed by CollectionID, and the - entries in the collection are listed in no particular order. - All entries are 8 bytes. - - Device Table Entry (DTE): - - bits: | 63| 62 ... 49 | 48 ... 5 | 4 ... 0 | - values: | V | next | ITT_addr | Size | - - where; - - V indicates whether the entry is valid. If not, other fields - are not meaningful. - - next: equals to 0 if this entry is the last one; otherwise it - corresponds to the DeviceID offset to the next DTE, capped by - 2^14 -1. - - ITT_addr matches bits [51:8] of the ITT address (256 Byte aligned). - - Size specifies the supported number of bits for the EventID, - minus one - - Collection Table Entry (CTE): - - bits: | 63| 62 .. 52 | 51 ... 16 | 15 ... 0 | - values: | V | RES0 | RDBase | ICID | - - where: - - V indicates whether the entry is valid. If not, other fields are - not meaningful. - - RES0: reserved field with Should-Be-Zero-or-Preserved behavior. - - RDBase is the PE number (GICR_TYPER.Processor_Number semantic), - - ICID is the collection ID - - Interrupt Translation Entry (ITE): - - bits: | 63 ... 48 | 47 ... 16 | 15 ... 0 | - values: | next | pINTID | ICID | - - where: - - next: equals to 0 if this entry is the last one; otherwise it corresponds - to the EventID offset to the next ITE capped by 2^16 -1. - - pINTID is the physical LPI ID; if zero, it means the entry is not valid - and other fields are not meaningful. - - ICID is the collection ID - - ITS Reset State: - ---------------- - -RESET returns the ITS to the same state that it was when first created and -initialized. When the RESET command returns, the following things are -guaranteed: - -- The ITS is not enabled and quiescent - GITS_CTLR.Enabled = 0 .Quiescent=1 -- There is no internally cached state -- No collection or device table are used - GITS_BASER.Valid = 0 -- GITS_CBASER = 0, GITS_CREADR = 0, GITS_CWRITER = 0 -- The ABI version is unchanged and remains the one set when the ITS - device was first created. diff --git a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt b/Documentation/virtual/kvm/devices/arm-vgic-v3.txt deleted file mode 100644 index ff290b43c8e5..000000000000 --- a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt +++ /dev/null @@ -1,251 +0,0 @@ -ARM Virtual Generic Interrupt Controller v3 and later (VGICv3) -============================================================== - - -Device types supported: - KVM_DEV_TYPE_ARM_VGIC_V3 ARM Generic Interrupt Controller v3.0 - -Only one VGIC instance may be instantiated through this API. The created VGIC -will act as the VM interrupt controller, requiring emulated user-space devices -to inject interrupts to the VGIC instead of directly to CPUs. It is not -possible to create both a GICv3 and GICv2 on the same VM. - -Creating a guest GICv3 device requires a host GICv3 as well. - - -Groups: - KVM_DEV_ARM_VGIC_GRP_ADDR - Attributes: - KVM_VGIC_V3_ADDR_TYPE_DIST (rw, 64-bit) - Base address in the guest physical address space of the GICv3 distributor - register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V3. - This address needs to be 64K aligned and the region covers 64 KByte. - - KVM_VGIC_V3_ADDR_TYPE_REDIST (rw, 64-bit) - Base address in the guest physical address space of the GICv3 - redistributor register mappings. There are two 64K pages for each - VCPU and all of the redistributor pages are contiguous. - Only valid for KVM_DEV_TYPE_ARM_VGIC_V3. - This address needs to be 64K aligned. - - KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION (rw, 64-bit) - The attribute data pointed to by kvm_device_attr.addr is a __u64 value: - bits: | 63 .... 52 | 51 .... 16 | 15 - 12 |11 - 0 - values: | count | base | flags | index - - index encodes the unique redistributor region index - - flags: reserved for future use, currently 0 - - base field encodes bits [51:16] of the guest physical base address - of the first redistributor in the region. - - count encodes the number of redistributors in the region. Must be - greater than 0. - There are two 64K pages for each redistributor in the region and - redistributors are laid out contiguously within the region. Regions - are filled with redistributors in the index order. The sum of all - region count fields must be greater than or equal to the number of - VCPUs. Redistributor regions must be registered in the incremental - index order, starting from index 0. - The characteristics of a specific redistributor region can be read - by presetting the index field in the attr data. - Only valid for KVM_DEV_TYPE_ARM_VGIC_V3. - - It is invalid to mix calls with KVM_VGIC_V3_ADDR_TYPE_REDIST and - KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION attributes. - - Errors: - -E2BIG: Address outside of addressable IPA range - -EINVAL: Incorrectly aligned address, bad redistributor region - count/index, mixed redistributor region attribute usage - -EEXIST: Address already configured - -ENOENT: Attempt to read the characteristics of a non existing - redistributor region - -ENXIO: The group or attribute is unknown/unsupported for this device - or hardware support is missing. - -EFAULT: Invalid user pointer for attr->addr. - - - KVM_DEV_ARM_VGIC_GRP_DIST_REGS - KVM_DEV_ARM_VGIC_GRP_REDIST_REGS - Attributes: - The attr field of kvm_device_attr encodes two values: - bits: | 63 .... 32 | 31 .... 0 | - values: | mpidr | offset | - - All distributor regs are (rw, 32-bit) and kvm_device_attr.addr points to a - __u32 value. 64-bit registers must be accessed by separately accessing the - lower and higher word. - - Writes to read-only registers are ignored by the kernel. - - KVM_DEV_ARM_VGIC_GRP_DIST_REGS accesses the main distributor registers. - KVM_DEV_ARM_VGIC_GRP_REDIST_REGS accesses the redistributor of the CPU - specified by the mpidr. - - The offset is relative to the "[Re]Distributor base address" as defined - in the GICv3/4 specs. Getting or setting such a register has the same - effect as reading or writing the register on real hardware, except for the - following registers: GICD_STATUSR, GICR_STATUSR, GICD_ISPENDR, - GICR_ISPENDR0, GICD_ICPENDR, and GICR_ICPENDR0. These registers behave - differently when accessed via this interface compared to their - architecturally defined behavior to allow software a full view of the - VGIC's internal state. - - The mpidr field is used to specify which - redistributor is accessed. The mpidr is ignored for the distributor. - - The mpidr encoding is based on the affinity information in the - architecture defined MPIDR, and the field is encoded as follows: - | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 | - | Aff3 | Aff2 | Aff1 | Aff0 | - - Note that distributor fields are not banked, but return the same value - regardless of the mpidr used to access the register. - - GICD_IIDR.Revision is updated when the KVM implementation is changed in a - way directly observable by the guest or userspace. Userspace should read - GICD_IIDR from KVM and write back the read value to confirm its expected - behavior is aligned with the KVM implementation. Userspace should set - GICD_IIDR before setting any other registers to ensure the expected - behavior. - - - The GICD_STATUSR and GICR_STATUSR registers are architecturally defined such - that a write of a clear bit has no effect, whereas a write with a set bit - clears that value. To allow userspace to freely set the values of these two - registers, setting the attributes with the register offsets for these two - registers simply sets the non-reserved bits to the value written. - - - Accesses (reads and writes) to the GICD_ISPENDR register region and - GICR_ISPENDR0 registers get/set the value of the latched pending state for - the interrupts. - - This is identical to the value returned by a guest read from ISPENDR for an - edge triggered interrupt, but may differ for level triggered interrupts. - For edge triggered interrupts, once an interrupt becomes pending (whether - because of an edge detected on the input line or because of a guest write - to ISPENDR) this state is "latched", and only cleared when either the - interrupt is activated or when the guest writes to ICPENDR. A level - triggered interrupt may be pending either because the level input is held - high by a device, or because of a guest write to the ISPENDR register. Only - ISPENDR writes are latched; if the device lowers the line level then the - interrupt is no longer pending unless the guest also wrote to ISPENDR, and - conversely writes to ICPENDR or activations of the interrupt do not clear - the pending status if the line level is still being held high. (These - rules are documented in the GICv3 specification descriptions of the ICPENDR - and ISPENDR registers.) For a level triggered interrupt the value accessed - here is that of the latch which is set by ISPENDR and cleared by ICPENDR or - interrupt activation, whereas the value returned by a guest read from - ISPENDR is the logical OR of the latch value and the input line level. - - Raw access to the latch state is provided to userspace so that it can save - and restore the entire GIC internal state (which is defined by the - combination of the current input line level and the latch state, and cannot - be deduced from purely the line level and the value of the ISPENDR - registers). - - Accesses to GICD_ICPENDR register region and GICR_ICPENDR0 registers have - RAZ/WI semantics, meaning that reads always return 0 and writes are always - ignored. - - Errors: - -ENXIO: Getting or setting this register is not yet supported - -EBUSY: One or more VCPUs are running - - - KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS - Attributes: - The attr field of kvm_device_attr encodes two values: - bits: | 63 .... 32 | 31 .... 16 | 15 .... 0 | - values: | mpidr | RES | instr | - - The mpidr field encodes the CPU ID based on the affinity information in the - architecture defined MPIDR, and the field is encoded as follows: - | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 | - | Aff3 | Aff2 | Aff1 | Aff0 | - - The instr field encodes the system register to access based on the fields - defined in the A64 instruction set encoding for system register access - (RES means the bits are reserved for future use and should be zero): - - | 15 ... 14 | 13 ... 11 | 10 ... 7 | 6 ... 3 | 2 ... 0 | - | Op 0 | Op1 | CRn | CRm | Op2 | - - All system regs accessed through this API are (rw, 64-bit) and - kvm_device_attr.addr points to a __u64 value. - - KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS accesses the CPU interface registers for the - CPU specified by the mpidr field. - - CPU interface registers access is not implemented for AArch32 mode. - Error -ENXIO is returned when accessed in AArch32 mode. - Errors: - -ENXIO: Getting or setting this register is not yet supported - -EBUSY: VCPU is running - -EINVAL: Invalid mpidr or register value supplied - - - KVM_DEV_ARM_VGIC_GRP_NR_IRQS - Attributes: - A value describing the number of interrupts (SGI, PPI and SPI) for - this GIC instance, ranging from 64 to 1024, in increments of 32. - - kvm_device_attr.addr points to a __u32 value. - - Errors: - -EINVAL: Value set is out of the expected range - -EBUSY: Value has already be set. - - - KVM_DEV_ARM_VGIC_GRP_CTRL - Attributes: - KVM_DEV_ARM_VGIC_CTRL_INIT - request the initialization of the VGIC, no additional parameter in - kvm_device_attr.addr. - KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES - save all LPI pending bits into guest RAM pending tables. - - The first kB of the pending table is not altered by this operation. - Errors: - -ENXIO: VGIC not properly configured as required prior to calling - this attribute - -ENODEV: no online VCPU - -ENOMEM: memory shortage when allocating vgic internal data - -EFAULT: Invalid guest ram access - -EBUSY: One or more VCPUS are running - - - KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO - Attributes: - The attr field of kvm_device_attr encodes the following values: - bits: | 63 .... 32 | 31 .... 10 | 9 .... 0 | - values: | mpidr | info | vINTID | - - The vINTID specifies which set of IRQs is reported on. - - The info field specifies which information userspace wants to get or set - using this interface. Currently we support the following info values: - - VGIC_LEVEL_INFO_LINE_LEVEL: - Get/Set the input level of the IRQ line for a set of 32 contiguously - numbered interrupts. - vINTID must be a multiple of 32. - - kvm_device_attr.addr points to a __u32 value which will contain a - bitmap where a set bit means the interrupt level is asserted. - - Bit[n] indicates the status for interrupt vINTID + n. - - SGIs and any interrupt with a higher ID than the number of interrupts - supported, will be RAZ/WI. LPIs are always edge-triggered and are - therefore not supported by this interface. - - PPIs are reported per VCPU as specified in the mpidr field, and SPIs are - reported with the same value regardless of the mpidr specified. - - The mpidr field encodes the CPU ID based on the affinity information in the - architecture defined MPIDR, and the field is encoded as follows: - | 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 | - | Aff3 | Aff2 | Aff1 | Aff0 | - Errors: - -EINVAL: vINTID is not multiple of 32 or - info field is not VGIC_LEVEL_INFO_LINE_LEVEL diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virtual/kvm/devices/arm-vgic.txt deleted file mode 100644 index 97b6518148f8..000000000000 --- a/Documentation/virtual/kvm/devices/arm-vgic.txt +++ /dev/null @@ -1,127 +0,0 @@ -ARM Virtual Generic Interrupt Controller v2 (VGIC) -================================================== - -Device types supported: - KVM_DEV_TYPE_ARM_VGIC_V2 ARM Generic Interrupt Controller v2.0 - -Only one VGIC instance may be instantiated through either this API or the -legacy KVM_CREATE_IRQCHIP API. The created VGIC will act as the VM interrupt -controller, requiring emulated user-space devices to inject interrupts to the -VGIC instead of directly to CPUs. - -GICv3 implementations with hardware compatibility support allow creating a -guest GICv2 through this interface. For information on creating a guest GICv3 -device and guest ITS devices, see arm-vgic-v3.txt. It is not possible to -create both a GICv3 and GICv2 device on the same VM. - - -Groups: - KVM_DEV_ARM_VGIC_GRP_ADDR - Attributes: - KVM_VGIC_V2_ADDR_TYPE_DIST (rw, 64-bit) - Base address in the guest physical address space of the GIC distributor - register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2. - This address needs to be 4K aligned and the region covers 4 KByte. - - KVM_VGIC_V2_ADDR_TYPE_CPU (rw, 64-bit) - Base address in the guest physical address space of the GIC virtual cpu - interface register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2. - This address needs to be 4K aligned and the region covers 4 KByte. - Errors: - -E2BIG: Address outside of addressable IPA range - -EINVAL: Incorrectly aligned address - -EEXIST: Address already configured - -ENXIO: The group or attribute is unknown/unsupported for this device - or hardware support is missing. - -EFAULT: Invalid user pointer for attr->addr. - - KVM_DEV_ARM_VGIC_GRP_DIST_REGS - Attributes: - The attr field of kvm_device_attr encodes two values: - bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 | - values: | reserved | vcpu_index | offset | - - All distributor regs are (rw, 32-bit) - - The offset is relative to the "Distributor base address" as defined in the - GICv2 specs. Getting or setting such a register has the same effect as - reading or writing the register on the actual hardware from the cpu whose - index is specified with the vcpu_index field. Note that most distributor - fields are not banked, but return the same value regardless of the - vcpu_index used to access the register. - - GICD_IIDR.Revision is updated when the KVM implementation of an emulated - GICv2 is changed in a way directly observable by the guest or userspace. - Userspace should read GICD_IIDR from KVM and write back the read value to - confirm its expected behavior is aligned with the KVM implementation. - Userspace should set GICD_IIDR before setting any other registers (both - KVM_DEV_ARM_VGIC_GRP_DIST_REGS and KVM_DEV_ARM_VGIC_GRP_CPU_REGS) to ensure - the expected behavior. Unless GICD_IIDR has been set from userspace, writes - to the interrupt group registers (GICD_IGROUPR) are ignored. - Errors: - -ENXIO: Getting or setting this register is not yet supported - -EBUSY: One or more VCPUs are running - -EINVAL: Invalid vcpu_index supplied - - KVM_DEV_ARM_VGIC_GRP_CPU_REGS - Attributes: - The attr field of kvm_device_attr encodes two values: - bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 | - values: | reserved | vcpu_index | offset | - - All CPU interface regs are (rw, 32-bit) - - The offset specifies the offset from the "CPU interface base address" as - defined in the GICv2 specs. Getting or setting such a register has the - same effect as reading or writing the register on the actual hardware. - - The Active Priorities Registers APRn are implementation defined, so we set a - fixed format for our implementation that fits with the model of a "GICv2 - implementation without the security extensions" which we present to the - guest. This interface always exposes four register APR[0-3] describing the - maximum possible 128 preemption levels. The semantics of the register - indicate if any interrupts in a given preemption level are in the active - state by setting the corresponding bit. - - Thus, preemption level X has one or more active interrupts if and only if: - - APRn[X mod 32] == 0b1, where n = X / 32 - - Bits for undefined preemption levels are RAZ/WI. - - Note that this differs from a CPU's view of the APRs on hardware in which - a GIC without the security extensions expose group 0 and group 1 active - priorities in separate register groups, whereas we show a combined view - similar to GICv2's GICH_APR. - - For historical reasons and to provide ABI compatibility with userspace we - export the GICC_PMR register in the format of the GICH_VMCR.VMPriMask - field in the lower 5 bits of a word, meaning that userspace must always - use the lower 5 bits to communicate with the KVM device and must shift the - value left by 3 places to obtain the actual priority mask level. - - Errors: - -ENXIO: Getting or setting this register is not yet supported - -EBUSY: One or more VCPUs are running - -EINVAL: Invalid vcpu_index supplied - - KVM_DEV_ARM_VGIC_GRP_NR_IRQS - Attributes: - A value describing the number of interrupts (SGI, PPI and SPI) for - this GIC instance, ranging from 64 to 1024, in increments of 32. - - Errors: - -EINVAL: Value set is out of the expected range - -EBUSY: Value has already be set, or GIC has already been initialized - with default values. - - KVM_DEV_ARM_VGIC_GRP_CTRL - Attributes: - KVM_DEV_ARM_VGIC_CTRL_INIT - request the initialization of the VGIC or ITS, no additional parameter - in kvm_device_attr.addr. - Errors: - -ENXIO: VGIC not properly configured as required prior to calling - this attribute - -ENODEV: no online VCPU - -ENOMEM: memory shortage when allocating vgic internal data diff --git a/Documentation/virtual/kvm/devices/mpic.txt b/Documentation/virtual/kvm/devices/mpic.txt deleted file mode 100644 index 8257397adc3c..000000000000 --- a/Documentation/virtual/kvm/devices/mpic.txt +++ /dev/null @@ -1,53 +0,0 @@ -MPIC interrupt controller -========================= - -Device types supported: - KVM_DEV_TYPE_FSL_MPIC_20 Freescale MPIC v2.0 - KVM_DEV_TYPE_FSL_MPIC_42 Freescale MPIC v4.2 - -Only one MPIC instance, of any type, may be instantiated. The created -MPIC will act as the system interrupt controller, connecting to each -vcpu's interrupt inputs. - -Groups: - KVM_DEV_MPIC_GRP_MISC - Attributes: - KVM_DEV_MPIC_BASE_ADDR (rw, 64-bit) - Base address of the 256 KiB MPIC register space. Must be - naturally aligned. A value of zero disables the mapping. - Reset value is zero. - - KVM_DEV_MPIC_GRP_REGISTER (rw, 32-bit) - Access an MPIC register, as if the access were made from the guest. - "attr" is the byte offset into the MPIC register space. Accesses - must be 4-byte aligned. - - MSIs may be signaled by using this attribute group to write - to the relevant MSIIR. - - KVM_DEV_MPIC_GRP_IRQ_ACTIVE (rw, 32-bit) - IRQ input line for each standard openpic source. 0 is inactive and 1 - is active, regardless of interrupt sense. - - For edge-triggered interrupts: Writing 1 is considered an activating - edge, and writing 0 is ignored. Reading returns 1 if a previously - signaled edge has not been acknowledged, and 0 otherwise. - - "attr" is the IRQ number. IRQ numbers for standard sources are the - byte offset of the relevant IVPR from EIVPR0, divided by 32. - -IRQ Routing: - - The MPIC emulation supports IRQ routing. Only a single MPIC device can - be instantiated. Once that device has been created, it's available as - irqchip id 0. - - This irqchip 0 has 256 interrupt pins, which expose the interrupts in - the main array of interrupt sources (a.k.a. "SRC" interrupts). - - The numbering is the same as the MPIC device tree binding -- based on - the register offset from the beginning of the sources array, without - regard to any subdivisions in chip documentation such as "internal" - or "external" interrupts. - - Access to non-SRC interrupts is not implemented through IRQ routing mechanisms. diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt deleted file mode 100644 index a4e20a090174..000000000000 --- a/Documentation/virtual/kvm/devices/s390_flic.txt +++ /dev/null @@ -1,163 +0,0 @@ -FLIC (floating interrupt controller) -==================================== - -FLIC handles floating (non per-cpu) interrupts, i.e. I/O, service and some -machine check interruptions. All interrupts are stored in a per-vm list of -pending interrupts. FLIC performs operations on this list. - -Only one FLIC instance may be instantiated. - -FLIC provides support to -- add interrupts (KVM_DEV_FLIC_ENQUEUE) -- inspect currently pending interrupts (KVM_FLIC_GET_ALL_IRQS) -- purge all pending floating interrupts (KVM_DEV_FLIC_CLEAR_IRQS) -- purge one pending floating I/O interrupt (KVM_DEV_FLIC_CLEAR_IO_IRQ) -- enable/disable for the guest transparent async page faults -- register and modify adapter interrupt sources (KVM_DEV_FLIC_ADAPTER_*) -- modify AIS (adapter-interruption-suppression) mode state (KVM_DEV_FLIC_AISM) -- inject adapter interrupts on a specified adapter (KVM_DEV_FLIC_AIRQ_INJECT) -- get/set all AIS mode states (KVM_DEV_FLIC_AISM_ALL) - -Groups: - KVM_DEV_FLIC_ENQUEUE - Passes a buffer and length into the kernel which are then injected into - the list of pending interrupts. - attr->addr contains the pointer to the buffer and attr->attr contains - the length of the buffer. - The format of the data structure kvm_s390_irq as it is copied from userspace - is defined in usr/include/linux/kvm.h. - - KVM_DEV_FLIC_GET_ALL_IRQS - Copies all floating interrupts into a buffer provided by userspace. - When the buffer is too small it returns -ENOMEM, which is the indication - for userspace to try again with a bigger buffer. - -ENOBUFS is returned when the allocation of a kernelspace buffer has - failed. - -EFAULT is returned when copying data to userspace failed. - All interrupts remain pending, i.e. are not deleted from the list of - currently pending interrupts. - attr->addr contains the userspace address of the buffer into which all - interrupt data will be copied. - attr->attr contains the size of the buffer in bytes. - - KVM_DEV_FLIC_CLEAR_IRQS - Simply deletes all elements from the list of currently pending floating - interrupts. No interrupts are injected into the guest. - - KVM_DEV_FLIC_CLEAR_IO_IRQ - Deletes one (if any) I/O interrupt for a subchannel identified by the - subsystem identification word passed via the buffer specified by - attr->addr (address) and attr->attr (length). - - KVM_DEV_FLIC_APF_ENABLE - Enables async page faults for the guest. So in case of a major page fault - the host is allowed to handle this async and continues the guest. - - KVM_DEV_FLIC_APF_DISABLE_WAIT - Disables async page faults for the guest and waits until already pending - async page faults are done. This is necessary to trigger a completion interrupt - for every init interrupt before migrating the interrupt list. - - KVM_DEV_FLIC_ADAPTER_REGISTER - Register an I/O adapter interrupt source. Takes a kvm_s390_io_adapter - describing the adapter to register: - -struct kvm_s390_io_adapter { - __u32 id; - __u8 isc; - __u8 maskable; - __u8 swap; - __u8 flags; -}; - - id contains the unique id for the adapter, isc the I/O interruption subclass - to use, maskable whether this adapter may be masked (interrupts turned off), - swap whether the indicators need to be byte swapped, and flags contains - further characteristics of the adapter. - Currently defined values for 'flags' are: - - KVM_S390_ADAPTER_SUPPRESSIBLE: adapter is subject to AIS - (adapter-interrupt-suppression) facility. This flag only has an effect if - the AIS capability is enabled. - Unknown flag values are ignored. - - - KVM_DEV_FLIC_ADAPTER_MODIFY - Modifies attributes of an existing I/O adapter interrupt source. Takes - a kvm_s390_io_adapter_req specifying the adapter and the operation: - -struct kvm_s390_io_adapter_req { - __u32 id; - __u8 type; - __u8 mask; - __u16 pad0; - __u64 addr; -}; - - id specifies the adapter and type the operation. The supported operations - are: - - KVM_S390_IO_ADAPTER_MASK - mask or unmask the adapter, as specified in mask - - KVM_S390_IO_ADAPTER_MAP - perform a gmap translation for the guest address provided in addr, - pin a userspace page for the translated address and add it to the - list of mappings - Note: A new mapping will be created unconditionally; therefore, - the calling code should avoid making duplicate mappings. - - KVM_S390_IO_ADAPTER_UNMAP - release a userspace page for the translated address specified in addr - from the list of mappings - - KVM_DEV_FLIC_AISM - modify the adapter-interruption-suppression mode for a given isc if the - AIS capability is enabled. Takes a kvm_s390_ais_req describing: - -struct kvm_s390_ais_req { - __u8 isc; - __u16 mode; -}; - - isc contains the target I/O interruption subclass, mode the target - adapter-interruption-suppression mode. The following modes are - currently supported: - - KVM_S390_AIS_MODE_ALL: ALL-Interruptions Mode, i.e. airq injection - is always allowed; - - KVM_S390_AIS_MODE_SINGLE: SINGLE-Interruption Mode, i.e. airq - injection is only allowed once and the following adapter interrupts - will be suppressed until the mode is set again to ALL-Interruptions - or SINGLE-Interruption mode. - - KVM_DEV_FLIC_AIRQ_INJECT - Inject adapter interrupts on a specified adapter. - attr->attr contains the unique id for the adapter, which allows for - adapter-specific checks and actions. - For adapters subject to AIS, handle the airq injection suppression for - an isc according to the adapter-interruption-suppression mode on condition - that the AIS capability is enabled. - - KVM_DEV_FLIC_AISM_ALL - Gets or sets the adapter-interruption-suppression mode for all ISCs. Takes - a kvm_s390_ais_all describing: - -struct kvm_s390_ais_all { - __u8 simm; /* Single-Interruption-Mode mask */ - __u8 nimm; /* No-Interruption-Mode mask * -}; - - simm contains Single-Interruption-Mode mask for all ISCs, nimm contains - No-Interruption-Mode mask for all ISCs. Each bit in simm and nimm corresponds - to an ISC (MSB0 bit 0 to ISC 0 and so on). The combination of simm bit and - nimm bit presents AIS mode for a ISC. - - KVM_DEV_FLIC_AISM_ALL is indicated by KVM_CAP_S390_AIS_MIGRATION. - -Note: The KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR device ioctls executed on -FLIC with an unknown group or attribute gives the error code EINVAL (instead of -ENXIO, as specified in the API documentation). It is not possible to conclude -that a FLIC operation is unavailable based on the error code resulting from a -usage attempt. - -Note: The KVM_DEV_FLIC_CLEAR_IO_IRQ ioctl will return EINVAL in case a zero -schid is specified. diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virtual/kvm/devices/vcpu.txt deleted file mode 100644 index 2b5dab16c4f2..000000000000 --- a/Documentation/virtual/kvm/devices/vcpu.txt +++ /dev/null @@ -1,62 +0,0 @@ -Generic vcpu interface -==================================== - -The virtual cpu "device" also accepts the ioctls KVM_SET_DEVICE_ATTR, -KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same struct -kvm_device_attr as other devices, but targets VCPU-wide settings and controls. - -The groups and attributes per virtual cpu, if any, are architecture specific. - -1. GROUP: KVM_ARM_VCPU_PMU_V3_CTRL -Architectures: ARM64 - -1.1. ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_IRQ -Parameters: in kvm_device_attr.addr the address for PMU overflow interrupt is a - pointer to an int -Returns: -EBUSY: The PMU overflow interrupt is already set - -ENXIO: The overflow interrupt not set when attempting to get it - -ENODEV: PMUv3 not supported - -EINVAL: Invalid PMU overflow interrupt number supplied or - trying to set the IRQ number without using an in-kernel - irqchip. - -A value describing the PMUv3 (Performance Monitor Unit v3) overflow interrupt -number for this vcpu. This interrupt could be a PPI or SPI, but the interrupt -type must be same for each vcpu. As a PPI, the interrupt number is the same for -all vcpus, while as an SPI it must be a separate number per vcpu. - -1.2 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_INIT -Parameters: no additional parameter in kvm_device_attr.addr -Returns: -ENODEV: PMUv3 not supported or GIC not initialized - -ENXIO: PMUv3 not properly configured or in-kernel irqchip not - configured as required prior to calling this attribute - -EBUSY: PMUv3 already initialized - -Request the initialization of the PMUv3. If using the PMUv3 with an in-kernel -virtual GIC implementation, this must be done after initializing the in-kernel -irqchip. - - -2. GROUP: KVM_ARM_VCPU_TIMER_CTRL -Architectures: ARM,ARM64 - -2.1. ATTRIBUTE: KVM_ARM_VCPU_TIMER_IRQ_VTIMER -2.2. ATTRIBUTE: KVM_ARM_VCPU_TIMER_IRQ_PTIMER -Parameters: in kvm_device_attr.addr the address for the timer interrupt is a - pointer to an int -Returns: -EINVAL: Invalid timer interrupt number - -EBUSY: One or more VCPUs has already run - -A value describing the architected timer interrupt number when connected to an -in-kernel virtual GIC. These must be a PPI (16 <= intid < 32). Setting the -attribute overrides the default values (see below). - -KVM_ARM_VCPU_TIMER_IRQ_VTIMER: The EL1 virtual timer intid (default: 27) -KVM_ARM_VCPU_TIMER_IRQ_PTIMER: The EL1 physical timer intid (default: 30) - -Setting the same PPI for different timers will prevent the VCPUs from running. -Setting the interrupt number on a VCPU configures all VCPUs created at that -time to use the number provided for a given timer, overwriting any previously -configured values on other VCPUs. Userspace should configure the interrupt -numbers on at least one VCPU after creating all VCPUs and before running any -VCPUs. diff --git a/Documentation/virtual/kvm/devices/vfio.txt b/Documentation/virtual/kvm/devices/vfio.txt deleted file mode 100644 index 528c77c8022c..000000000000 --- a/Documentation/virtual/kvm/devices/vfio.txt +++ /dev/null @@ -1,36 +0,0 @@ -VFIO virtual device -=================== - -Device types supported: - KVM_DEV_TYPE_VFIO - -Only one VFIO instance may be created per VM. The created device -tracks VFIO groups in use by the VM and features of those groups -important to the correctness and acceleration of the VM. As groups -are enabled and disabled for use by the VM, KVM should be updated -about their presence. When registered with KVM, a reference to the -VFIO-group is held by KVM. - -Groups: - KVM_DEV_VFIO_GROUP - -KVM_DEV_VFIO_GROUP attributes: - KVM_DEV_VFIO_GROUP_ADD: Add a VFIO group to VFIO-KVM device tracking - kvm_device_attr.addr points to an int32_t file descriptor - for the VFIO group. - KVM_DEV_VFIO_GROUP_DEL: Remove a VFIO group from VFIO-KVM device tracking - kvm_device_attr.addr points to an int32_t file descriptor - for the VFIO group. - KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: attaches a guest visible TCE table - allocated by sPAPR KVM. - kvm_device_attr.addr points to a struct: - - struct kvm_vfio_spapr_tce { - __s32 groupfd; - __s32 tablefd; - }; - - where - @groupfd is a file descriptor for a VFIO group; - @tablefd is a file descriptor for a TCE table allocated via - KVM_CREATE_SPAPR_TCE. diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virtual/kvm/devices/vm.txt deleted file mode 100644 index 4ffb82b02468..000000000000 --- a/Documentation/virtual/kvm/devices/vm.txt +++ /dev/null @@ -1,270 +0,0 @@ -Generic vm interface -==================================== - -The virtual machine "device" also accepts the ioctls KVM_SET_DEVICE_ATTR, -KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same -struct kvm_device_attr as other devices, but targets VM-wide settings -and controls. - -The groups and attributes per virtual machine, if any, are architecture -specific. - -1. GROUP: KVM_S390_VM_MEM_CTRL -Architectures: s390 - -1.1. ATTRIBUTE: KVM_S390_VM_MEM_ENABLE_CMMA -Parameters: none -Returns: -EBUSY if a vcpu is already defined, otherwise 0 - -Enables Collaborative Memory Management Assist (CMMA) for the virtual machine. - -1.2. ATTRIBUTE: KVM_S390_VM_MEM_CLR_CMMA -Parameters: none -Returns: -EINVAL if CMMA was not enabled - 0 otherwise - -Clear the CMMA status for all guest pages, so any pages the guest marked -as unused are again used any may not be reclaimed by the host. - -1.3. ATTRIBUTE KVM_S390_VM_MEM_LIMIT_SIZE -Parameters: in attr->addr the address for the new limit of guest memory -Returns: -EFAULT if the given address is not accessible - -EINVAL if the virtual machine is of type UCONTROL - -E2BIG if the given guest memory is to big for that machine - -EBUSY if a vcpu is already defined - -ENOMEM if not enough memory is available for a new shadow guest mapping - 0 otherwise - -Allows userspace to query the actual limit and set a new limit for -the maximum guest memory size. The limit will be rounded up to -2048 MB, 4096 GB, 8192 TB respectively, as this limit is governed by -the number of page table levels. In the case that there is no limit we will set -the limit to KVM_S390_NO_MEM_LIMIT (U64_MAX). - -2. GROUP: KVM_S390_VM_CPU_MODEL -Architectures: s390 - -2.1. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE (r/o) - -Allows user space to retrieve machine and kvm specific cpu related information: - -struct kvm_s390_vm_cpu_machine { - __u64 cpuid; # CPUID of host - __u32 ibc; # IBC level range offered by host - __u8 pad[4]; - __u64 fac_mask[256]; # set of cpu facilities enabled by KVM - __u64 fac_list[256]; # set of cpu facilities offered by host -} - -Parameters: address of buffer to store the machine related cpu data - of type struct kvm_s390_vm_cpu_machine* -Returns: -EFAULT if the given address is not accessible from kernel space - -ENOMEM if not enough memory is available to process the ioctl - 0 in case of success - -2.2. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR (r/w) - -Allows user space to retrieve or request to change cpu related information for a vcpu: - -struct kvm_s390_vm_cpu_processor { - __u64 cpuid; # CPUID currently (to be) used by this vcpu - __u16 ibc; # IBC level currently (to be) used by this vcpu - __u8 pad[6]; - __u64 fac_list[256]; # set of cpu facilities currently (to be) used - # by this vcpu -} - -KVM does not enforce or limit the cpu model data in any form. Take the information -retrieved by means of KVM_S390_VM_CPU_MACHINE as hint for reasonable configuration -setups. Instruction interceptions triggered by additionally set facility bits that -are not handled by KVM need to by imlemented in the VM driver code. - -Parameters: address of buffer to store/set the processor related cpu - data of type struct kvm_s390_vm_cpu_processor*. -Returns: -EBUSY in case 1 or more vcpus are already activated (only in write case) - -EFAULT if the given address is not accessible from kernel space - -ENOMEM if not enough memory is available to process the ioctl - 0 in case of success - -2.3. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE_FEAT (r/o) - -Allows user space to retrieve available cpu features. A feature is available if -provided by the hardware and supported by kvm. In theory, cpu features could -even be completely emulated by kvm. - -struct kvm_s390_vm_cpu_feat { - __u64 feat[16]; # Bitmap (1 = feature available), MSB 0 bit numbering -}; - -Parameters: address of a buffer to load the feature list from. -Returns: -EFAULT if the given address is not accessible from kernel space. - 0 in case of success. - -2.4. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR_FEAT (r/w) - -Allows user space to retrieve or change enabled cpu features for all VCPUs of a -VM. Features that are not available cannot be enabled. - -See 2.3. for a description of the parameter struct. - -Parameters: address of a buffer to store/load the feature list from. -Returns: -EFAULT if the given address is not accessible from kernel space. - -EINVAL if a cpu feature that is not available is to be enabled. - -EBUSY if at least one VCPU has already been defined. - 0 in case of success. - -2.5. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE_SUBFUNC (r/o) - -Allows user space to retrieve available cpu subfunctions without any filtering -done by a set IBC. These subfunctions are indicated to the guest VCPU via -query or "test bit" subfunctions and used e.g. by cpacf functions, plo and ptff. - -A subfunction block is only valid if KVM_S390_VM_CPU_MACHINE contains the -STFL(E) bit introducing the affected instruction. If the affected instruction -indicates subfunctions via a "query subfunction", the response block is -contained in the returned struct. If the affected instruction -indicates subfunctions via a "test bit" mechanism, the subfunction codes are -contained in the returned struct in MSB 0 bit numbering. - -struct kvm_s390_vm_cpu_subfunc { - u8 plo[32]; # always valid (ESA/390 feature) - u8 ptff[16]; # valid with TOD-clock steering - u8 kmac[16]; # valid with Message-Security-Assist - u8 kmc[16]; # valid with Message-Security-Assist - u8 km[16]; # valid with Message-Security-Assist - u8 kimd[16]; # valid with Message-Security-Assist - u8 klmd[16]; # valid with Message-Security-Assist - u8 pckmo[16]; # valid with Message-Security-Assist-Extension 3 - u8 kmctr[16]; # valid with Message-Security-Assist-Extension 4 - u8 kmf[16]; # valid with Message-Security-Assist-Extension 4 - u8 kmo[16]; # valid with Message-Security-Assist-Extension 4 - u8 pcc[16]; # valid with Message-Security-Assist-Extension 4 - u8 ppno[16]; # valid with Message-Security-Assist-Extension 5 - u8 kma[16]; # valid with Message-Security-Assist-Extension 8 - u8 kdsa[16]; # valid with Message-Security-Assist-Extension 9 - u8 reserved[1792]; # reserved for future instructions -}; - -Parameters: address of a buffer to load the subfunction blocks from. -Returns: -EFAULT if the given address is not accessible from kernel space. - 0 in case of success. - -2.6. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR_SUBFUNC (r/w) - -Allows user space to retrieve or change cpu subfunctions to be indicated for -all VCPUs of a VM. This attribute will only be available if kernel and -hardware support are in place. - -The kernel uses the configured subfunction blocks for indication to -the guest. A subfunction block will only be used if the associated STFL(E) bit -has not been disabled by user space (so the instruction to be queried is -actually available for the guest). - -As long as no data has been written, a read will fail. The IBC will be used -to determine available subfunctions in this case, this will guarantee backward -compatibility. - -See 2.5. for a description of the parameter struct. - -Parameters: address of a buffer to store/load the subfunction blocks from. -Returns: -EFAULT if the given address is not accessible from kernel space. - -EINVAL when reading, if there was no write yet. - -EBUSY if at least one VCPU has already been defined. - 0 in case of success. - -3. GROUP: KVM_S390_VM_TOD -Architectures: s390 - -3.1. ATTRIBUTE: KVM_S390_VM_TOD_HIGH - -Allows user space to set/get the TOD clock extension (u8) (superseded by -KVM_S390_VM_TOD_EXT). - -Parameters: address of a buffer in user space to store the data (u8) to -Returns: -EFAULT if the given address is not accessible from kernel space - -EINVAL if setting the TOD clock extension to != 0 is not supported - -3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW - -Allows user space to set/get bits 0-63 of the TOD clock register as defined in -the POP (u64). - -Parameters: address of a buffer in user space to store the data (u64) to -Returns: -EFAULT if the given address is not accessible from kernel space - -3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT -Allows user space to set/get bits 0-63 of the TOD clock register as defined in -the POP (u64). If the guest CPU model supports the TOD clock extension (u8), it -also allows user space to get/set it. If the guest CPU model does not support -it, it is stored as 0 and not allowed to be set to a value != 0. - -Parameters: address of a buffer in user space to store the data - (kvm_s390_vm_tod_clock) to -Returns: -EFAULT if the given address is not accessible from kernel space - -EINVAL if setting the TOD clock extension to != 0 is not supported - -4. GROUP: KVM_S390_VM_CRYPTO -Architectures: s390 - -4.1. ATTRIBUTE: KVM_S390_VM_CRYPTO_ENABLE_AES_KW (w/o) - -Allows user space to enable aes key wrapping, including generating a new -wrapping key. - -Parameters: none -Returns: 0 - -4.2. ATTRIBUTE: KVM_S390_VM_CRYPTO_ENABLE_DEA_KW (w/o) - -Allows user space to enable dea key wrapping, including generating a new -wrapping key. - -Parameters: none -Returns: 0 - -4.3. ATTRIBUTE: KVM_S390_VM_CRYPTO_DISABLE_AES_KW (w/o) - -Allows user space to disable aes key wrapping, clearing the wrapping key. - -Parameters: none -Returns: 0 - -4.4. ATTRIBUTE: KVM_S390_VM_CRYPTO_DISABLE_DEA_KW (w/o) - -Allows user space to disable dea key wrapping, clearing the wrapping key. - -Parameters: none -Returns: 0 - -5. GROUP: KVM_S390_VM_MIGRATION -Architectures: s390 - -5.1. ATTRIBUTE: KVM_S390_VM_MIGRATION_STOP (w/o) - -Allows userspace to stop migration mode, needed for PGSTE migration. -Setting this attribute when migration mode is not active will have no -effects. - -Parameters: none -Returns: 0 - -5.2. ATTRIBUTE: KVM_S390_VM_MIGRATION_START (w/o) - -Allows userspace to start migration mode, needed for PGSTE migration. -Setting this attribute when migration mode is already active will have -no effects. - -Parameters: none -Returns: -ENOMEM if there is not enough free memory to start migration mode - -EINVAL if the state of the VM is invalid (e.g. no memory defined) - 0 in case of success. - -5.3. ATTRIBUTE: KVM_S390_VM_MIGRATION_STATUS (r/o) - -Allows userspace to query the status of migration mode. - -Parameters: address of a buffer in user space to store the data (u64) to; - the data itself is either 0 if migration mode is disabled or 1 - if it is enabled -Returns: -EFAULT if the given address is not accessible from kernel space - 0 in case of success. diff --git a/Documentation/virtual/kvm/devices/xics.txt b/Documentation/virtual/kvm/devices/xics.txt deleted file mode 100644 index 42864935ac5d..000000000000 --- a/Documentation/virtual/kvm/devices/xics.txt +++ /dev/null @@ -1,66 +0,0 @@ -XICS interrupt controller - -Device type supported: KVM_DEV_TYPE_XICS - -Groups: - KVM_DEV_XICS_SOURCES - Attributes: One per interrupt source, indexed by the source number. - -This device emulates the XICS (eXternal Interrupt Controller -Specification) defined in PAPR. The XICS has a set of interrupt -sources, each identified by a 20-bit source number, and a set of -Interrupt Control Presentation (ICP) entities, also called "servers", -each associated with a virtual CPU. - -The ICP entities are created by enabling the KVM_CAP_IRQ_ARCH -capability for each vcpu, specifying KVM_CAP_IRQ_XICS in args[0] and -the interrupt server number (i.e. the vcpu number from the XICS's -point of view) in args[1] of the kvm_enable_cap struct. Each ICP has -64 bits of state which can be read and written using the -KVM_GET_ONE_REG and KVM_SET_ONE_REG ioctls on the vcpu. The 64 bit -state word has the following bitfields, starting at the -least-significant end of the word: - -* Unused, 16 bits - -* Pending interrupt priority, 8 bits - Zero is the highest priority, 255 means no interrupt is pending. - -* Pending IPI (inter-processor interrupt) priority, 8 bits - Zero is the highest priority, 255 means no IPI is pending. - -* Pending interrupt source number, 24 bits - Zero means no interrupt pending, 2 means an IPI is pending - -* Current processor priority, 8 bits - Zero is the highest priority, meaning no interrupts can be - delivered, and 255 is the lowest priority. - -Each source has 64 bits of state that can be read and written using -the KVM_GET_DEVICE_ATTR and KVM_SET_DEVICE_ATTR ioctls, specifying the -KVM_DEV_XICS_SOURCES attribute group, with the attribute number being -the interrupt source number. The 64 bit state word has the following -bitfields, starting from the least-significant end of the word: - -* Destination (server number), 32 bits - This specifies where the interrupt should be sent, and is the - interrupt server number specified for the destination vcpu. - -* Priority, 8 bits - This is the priority specified for this interrupt source, where 0 is - the highest priority and 255 is the lowest. An interrupt with a - priority of 255 will never be delivered. - -* Level sensitive flag, 1 bit - This bit is 1 for a level-sensitive interrupt source, or 0 for - edge-sensitive (or MSI). - -* Masked flag, 1 bit - This bit is set to 1 if the interrupt is masked (cannot be delivered - regardless of its priority), for example by the ibm,int-off RTAS - call, or 0 if it is not masked. - -* Pending flag, 1 bit - This bit is 1 if the source has a pending interrupt, otherwise 0. - -Only one XICS instance may be created per VM. diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virtual/kvm/devices/xive.txt deleted file mode 100644 index 9a24a4525253..000000000000 --- a/Documentation/virtual/kvm/devices/xive.txt +++ /dev/null @@ -1,197 +0,0 @@ -POWER9 eXternal Interrupt Virtualization Engine (XIVE Gen1) -========================================================== - -Device types supported: - KVM_DEV_TYPE_XIVE POWER9 XIVE Interrupt Controller generation 1 - -This device acts as a VM interrupt controller. It provides the KVM -interface to configure the interrupt sources of a VM in the underlying -POWER9 XIVE interrupt controller. - -Only one XIVE instance may be instantiated. A guest XIVE device -requires a POWER9 host and the guest OS should have support for the -XIVE native exploitation interrupt mode. If not, it should run using -the legacy interrupt mode, referred as XICS (POWER7/8). - -* Device Mappings - - The KVM device exposes different MMIO ranges of the XIVE HW which - are required for interrupt management. These are exposed to the - guest in VMAs populated with a custom VM fault handler. - - 1. Thread Interrupt Management Area (TIMA) - - Each thread has an associated Thread Interrupt Management context - composed of a set of registers. These registers let the thread - handle priority management and interrupt acknowledgment. The most - important are : - - - Interrupt Pending Buffer (IPB) - - Current Processor Priority (CPPR) - - Notification Source Register (NSR) - - They are exposed to software in four different pages each proposing - a view with a different privilege. The first page is for the - physical thread context and the second for the hypervisor. Only the - third (operating system) and the fourth (user level) are exposed the - guest. - - 2. Event State Buffer (ESB) - - Each source is associated with an Event State Buffer (ESB) with - either a pair of even/odd pair of pages which provides commands to - manage the source: to trigger, to EOI, to turn off the source for - instance. - - 3. Device pass-through - - When a device is passed-through into the guest, the source - interrupts are from a different HW controller (PHB4) and the ESB - pages exposed to the guest should accommadate this change. - - The passthru_irq helpers, kvmppc_xive_set_mapped() and - kvmppc_xive_clr_mapped() are called when the device HW irqs are - mapped into or unmapped from the guest IRQ number space. The KVM - device extends these helpers to clear the ESB pages of the guest IRQ - number being mapped and then lets the VM fault handler repopulate. - The handler will insert the ESB page corresponding to the HW - interrupt of the device being passed-through or the initial IPI ESB - page if the device has being removed. - - The ESB remapping is fully transparent to the guest and the OS - device driver. All handling is done within VFIO and the above - helpers in KVM-PPC. - -* Groups: - - 1. KVM_DEV_XIVE_GRP_CTRL - Provides global controls on the device - Attributes: - 1.1 KVM_DEV_XIVE_RESET (write only) - Resets the interrupt controller configuration for sources and event - queues. To be used by kexec and kdump. - Errors: none - - 1.2 KVM_DEV_XIVE_EQ_SYNC (write only) - Sync all the sources and queues and mark the EQ pages dirty. This - to make sure that a consistent memory state is captured when - migrating the VM. - Errors: none - - 2. KVM_DEV_XIVE_GRP_SOURCE (write only) - Initializes a new source in the XIVE device and mask it. - Attributes: - Interrupt source number (64-bit) - The kvm_device_attr.addr points to a __u64 value: - bits: | 63 .... 2 | 1 | 0 - values: | unused | level | type - - type: 0:MSI 1:LSI - - level: assertion level in case of an LSI. - Errors: - -E2BIG: Interrupt source number is out of range - -ENOMEM: Could not create a new source block - -EFAULT: Invalid user pointer for attr->addr. - -ENXIO: Could not allocate underlying HW interrupt - - 3. KVM_DEV_XIVE_GRP_SOURCE_CONFIG (write only) - Configures source targeting - Attributes: - Interrupt source number (64-bit) - The kvm_device_attr.addr points to a __u64 value: - bits: | 63 .... 33 | 32 | 31 .. 3 | 2 .. 0 - values: | eisn | mask | server | priority - - priority: 0-7 interrupt priority level - - server: CPU number chosen to handle the interrupt - - mask: mask flag (unused) - - eisn: Effective Interrupt Source Number - Errors: - -ENOENT: Unknown source number - -EINVAL: Not initialized source number - -EINVAL: Invalid priority - -EINVAL: Invalid CPU number. - -EFAULT: Invalid user pointer for attr->addr. - -ENXIO: CPU event queues not configured or configuration of the - underlying HW interrupt failed - -EBUSY: No CPU available to serve interrupt - - 4. KVM_DEV_XIVE_GRP_EQ_CONFIG (read-write) - Configures an event queue of a CPU - Attributes: - EQ descriptor identifier (64-bit) - The EQ descriptor identifier is a tuple (server, priority) : - bits: | 63 .... 32 | 31 .. 3 | 2 .. 0 - values: | unused | server | priority - The kvm_device_attr.addr points to : - struct kvm_ppc_xive_eq { - __u32 flags; - __u32 qshift; - __u64 qaddr; - __u32 qtoggle; - __u32 qindex; - __u8 pad[40]; - }; - - flags: queue flags - KVM_XIVE_EQ_ALWAYS_NOTIFY (required) - forces notification without using the coalescing mechanism - provided by the XIVE END ESBs. - - qshift: queue size (power of 2) - - qaddr: real address of queue - - qtoggle: current queue toggle bit - - qindex: current queue index - - pad: reserved for future use - Errors: - -ENOENT: Invalid CPU number - -EINVAL: Invalid priority - -EINVAL: Invalid flags - -EINVAL: Invalid queue size - -EINVAL: Invalid queue address - -EFAULT: Invalid user pointer for attr->addr. - -EIO: Configuration of the underlying HW failed - - 5. KVM_DEV_XIVE_GRP_SOURCE_SYNC (write only) - Synchronize the source to flush event notifications - Attributes: - Interrupt source number (64-bit) - Errors: - -ENOENT: Unknown source number - -EINVAL: Not initialized source number - -* VCPU state - - The XIVE IC maintains VP interrupt state in an internal structure - called the NVT. When a VP is not dispatched on a HW processor - thread, this structure can be updated by HW if the VP is the target - of an event notification. - - It is important for migration to capture the cached IPB from the NVT - as it synthesizes the priorities of the pending interrupts. We - capture a bit more to report debug information. - - KVM_REG_PPC_VP_STATE (2 * 64bits) - bits: | 63 .... 32 | 31 .... 0 | - values: | TIMA word0 | TIMA word1 | - bits: | 127 .......... 64 | - values: | unused | - -* Migration: - - Saving the state of a VM using the XIVE native exploitation mode - should follow a specific sequence. When the VM is stopped : - - 1. Mask all sources (PQ=01) to stop the flow of events. - - 2. Sync the XIVE device with the KVM control KVM_DEV_XIVE_EQ_SYNC to - flush any in-flight event notification and to stabilize the EQs. At - this stage, the EQ pages are marked dirty to make sure they are - transferred in the migration sequence. - - 3. Capture the state of the source targeting, the EQs configuration - and the state of thread interrupt context registers. - - Restore is similar : - - 1. Restore the EQ configuration. As targeting depends on it. - 2. Restore targeting - 3. Restore the thread interrupt contexts - 4. Restore the source states - 5. Let the vCPU run diff --git a/Documentation/virtual/kvm/halt-polling.txt b/Documentation/virtual/kvm/halt-polling.txt deleted file mode 100644 index 4f791b128dd2..000000000000 --- a/Documentation/virtual/kvm/halt-polling.txt +++ /dev/null @@ -1,136 +0,0 @@ -The KVM halt polling system -=========================== - -The KVM halt polling system provides a feature within KVM whereby the latency -of a guest can, under some circumstances, be reduced by polling in the host -for some time period after the guest has elected to no longer run by cedeing. -That is, when a guest vcpu has ceded, or in the case of powerpc when all of the -vcpus of a single vcore have ceded, the host kernel polls for wakeup conditions -before giving up the cpu to the scheduler in order to let something else run. - -Polling provides a latency advantage in cases where the guest can be run again -very quickly by at least saving us a trip through the scheduler, normally on -the order of a few micro-seconds, although performance benefits are workload -dependant. In the event that no wakeup source arrives during the polling -interval or some other task on the runqueue is runnable the scheduler is -invoked. Thus halt polling is especially useful on workloads with very short -wakeup periods where the time spent halt polling is minimised and the time -savings of not invoking the scheduler are distinguishable. - -The generic halt polling code is implemented in: - - virt/kvm/kvm_main.c: kvm_vcpu_block() - -The powerpc kvm-hv specific case is implemented in: - - arch/powerpc/kvm/book3s_hv.c: kvmppc_vcore_blocked() - -Halt Polling Interval -===================== - -The maximum time for which to poll before invoking the scheduler, referred to -as the halt polling interval, is increased and decreased based on the perceived -effectiveness of the polling in an attempt to limit pointless polling. -This value is stored in either the vcpu struct: - - kvm_vcpu->halt_poll_ns - -or in the case of powerpc kvm-hv, in the vcore struct: - - kvmppc_vcore->halt_poll_ns - -Thus this is a per vcpu (or vcore) value. - -During polling if a wakeup source is received within the halt polling interval, -the interval is left unchanged. In the event that a wakeup source isn't -received during the polling interval (and thus schedule is invoked) there are -two options, either the polling interval and total block time[0] were less than -the global max polling interval (see module params below), or the total block -time was greater than the global max polling interval. - -In the event that both the polling interval and total block time were less than -the global max polling interval then the polling interval can be increased in -the hope that next time during the longer polling interval the wake up source -will be received while the host is polling and the latency benefits will be -received. The polling interval is grown in the function grow_halt_poll_ns() and -is multiplied by the module parameters halt_poll_ns_grow and -halt_poll_ns_grow_start. - -In the event that the total block time was greater than the global max polling -interval then the host will never poll for long enough (limited by the global -max) to wakeup during the polling interval so it may as well be shrunk in order -to avoid pointless polling. The polling interval is shrunk in the function -shrink_halt_poll_ns() and is divided by the module parameter -halt_poll_ns_shrink, or set to 0 iff halt_poll_ns_shrink == 0. - -It is worth noting that this adjustment process attempts to hone in on some -steady state polling interval but will only really do a good job for wakeups -which come at an approximately constant rate, otherwise there will be constant -adjustment of the polling interval. - -[0] total block time: the time between when the halt polling function is - invoked and a wakeup source received (irrespective of - whether the scheduler is invoked within that function). - -Module Parameters -================= - -The kvm module has 3 tuneable module parameters to adjust the global max -polling interval as well as the rate at which the polling interval is grown and -shrunk. These variables are defined in include/linux/kvm_host.h and as module -parameters in virt/kvm/kvm_main.c, or arch/powerpc/kvm/book3s_hv.c in the -powerpc kvm-hv case. - -Module Parameter | Description | Default Value --------------------------------------------------------------------------------- -halt_poll_ns | The global max polling | KVM_HALT_POLL_NS_DEFAULT - | interval which defines | - | the ceiling value of the | - | polling interval for | (per arch value) - | each vcpu. | --------------------------------------------------------------------------------- -halt_poll_ns_grow | The value by which the | 2 - | halt polling interval is | - | multiplied in the | - | grow_halt_poll_ns() | - | function. | --------------------------------------------------------------------------------- -halt_poll_ns_grow_start | The initial value to grow | 10000 - | to from zero in the | - | grow_halt_poll_ns() | - | function. | --------------------------------------------------------------------------------- -halt_poll_ns_shrink | The value by which the | 0 - | halt polling interval is | - | divided in the | - | shrink_halt_poll_ns() | - | function. | --------------------------------------------------------------------------------- - -These module parameters can be set from the debugfs files in: - - /sys/module/kvm/parameters/ - -Note: that these module parameters are system wide values and are not able to - be tuned on a per vm basis. - -Further Notes -============= - -- Care should be taken when setting the halt_poll_ns module parameter as a -large value has the potential to drive the cpu usage to 100% on a machine which -would be almost entirely idle otherwise. This is because even if a guest has -wakeups during which very little work is done and which are quite far apart, if -the period is shorter than the global max polling interval (halt_poll_ns) then -the host will always poll for the entire block time and thus cpu utilisation -will go to 100%. - -- Halt polling essentially presents a trade off between power usage and latency -and the module parameters should be used to tune the affinity for this. Idle -cpu time is essentially converted to host kernel time with the aim of decreasing -latency when entering the guest. - -- Halt polling will only be conducted by the host when no other tasks are -runnable on that cpu, otherwise the polling will cease immediately and -schedule will be invoked to allow that other task to run. Thus this doesn't -allow a guest to denial of service the cpu. diff --git a/Documentation/virtual/kvm/hypercalls.txt b/Documentation/virtual/kvm/hypercalls.txt deleted file mode 100644 index da210651f714..000000000000 --- a/Documentation/virtual/kvm/hypercalls.txt +++ /dev/null @@ -1,154 +0,0 @@ -Linux KVM Hypercall: -=================== -X86: - KVM Hypercalls have a three-byte sequence of either the vmcall or the vmmcall - instruction. The hypervisor can replace it with instructions that are - guaranteed to be supported. - - Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively. - The hypercall number should be placed in rax and the return value will be - placed in rax. No other registers will be clobbered unless explicitly stated - by the particular hypercall. - -S390: - R2-R7 are used for parameters 1-6. In addition, R1 is used for hypercall - number. The return value is written to R2. - - S390 uses diagnose instruction as hypercall (0x500) along with hypercall - number in R1. - - For further information on the S390 diagnose call as supported by KVM, - refer to Documentation/virtual/kvm/s390-diag.txt. - - PowerPC: - It uses R3-R10 and hypercall number in R11. R4-R11 are used as output registers. - Return value is placed in R3. - - KVM hypercalls uses 4 byte opcode, that are patched with 'hypercall-instructions' - property inside the device tree's /hypervisor node. - For more information refer to Documentation/virtual/kvm/ppc-pv.txt - -MIPS: - KVM hypercalls use the HYPCALL instruction with code 0 and the hypercall - number in $2 (v0). Up to four arguments may be placed in $4-$7 (a0-a3) and - the return value is placed in $2 (v0). - -KVM Hypercalls Documentation -=========================== -The template for each hypercall is: -1. Hypercall name. -2. Architecture(s) -3. Status (deprecated, obsolete, active) -4. Purpose - -1. KVM_HC_VAPIC_POLL_IRQ ------------------------- -Architecture: x86 -Status: active -Purpose: Trigger guest exit so that the host can check for pending -interrupts on reentry. - -2. KVM_HC_MMU_OP ------------------------- -Architecture: x86 -Status: deprecated. -Purpose: Support MMU operations such as writing to PTE, -flushing TLB, release PT. - -3. KVM_HC_FEATURES ------------------------- -Architecture: PPC -Status: active -Purpose: Expose hypercall availability to the guest. On x86 platforms, cpuid -used to enumerate which hypercalls are available. On PPC, either device tree -based lookup ( which is also what EPAPR dictates) OR KVM specific enumeration -mechanism (which is this hypercall) can be used. - -4. KVM_HC_PPC_MAP_MAGIC_PAGE ------------------------- -Architecture: PPC -Status: active -Purpose: To enable communication between the hypervisor and guest there is a -shared page that contains parts of supervisor visible register state. -The guest can map this shared page to access its supervisor register through -memory using this hypercall. - -5. KVM_HC_KICK_CPU ------------------------- -Architecture: x86 -Status: active -Purpose: Hypercall used to wakeup a vcpu from HLT state -Usage example : A vcpu of a paravirtualized guest that is busywaiting in guest -kernel mode for an event to occur (ex: a spinlock to become available) can -execute HLT instruction once it has busy-waited for more than a threshold -time-interval. Execution of HLT instruction would cause the hypervisor to put -the vcpu to sleep until occurrence of an appropriate event. Another vcpu of the -same guest can wakeup the sleeping vcpu by issuing KVM_HC_KICK_CPU hypercall, -specifying APIC ID (a1) of the vcpu to be woken up. An additional argument (a0) -is used in the hypercall for future use. - - -6. KVM_HC_CLOCK_PAIRING ------------------------- -Architecture: x86 -Status: active -Purpose: Hypercall used to synchronize host and guest clocks. -Usage: - -a0: guest physical address where host copies -"struct kvm_clock_offset" structure. - -a1: clock_type, ATM only KVM_CLOCK_PAIRING_WALLCLOCK (0) -is supported (corresponding to the host's CLOCK_REALTIME clock). - - struct kvm_clock_pairing { - __s64 sec; - __s64 nsec; - __u64 tsc; - __u32 flags; - __u32 pad[9]; - }; - - Where: - * sec: seconds from clock_type clock. - * nsec: nanoseconds from clock_type clock. - * tsc: guest TSC value used to calculate sec/nsec pair - * flags: flags, unused (0) at the moment. - -The hypercall lets a guest compute a precise timestamp across -host and guest. The guest can use the returned TSC value to -compute the CLOCK_REALTIME for its clock, at the same instant. - -Returns KVM_EOPNOTSUPP if the host does not use TSC clocksource, -or if clock type is different than KVM_CLOCK_PAIRING_WALLCLOCK. - -6. KVM_HC_SEND_IPI ------------------------- -Architecture: x86 -Status: active -Purpose: Send IPIs to multiple vCPUs. - -a0: lower part of the bitmap of destination APIC IDs -a1: higher part of the bitmap of destination APIC IDs -a2: the lowest APIC ID in bitmap -a3: APIC ICR - -The hypercall lets a guest send multicast IPIs, with at most 128 -128 destinations per hypercall in 64-bit mode and 64 vCPUs per -hypercall in 32-bit mode. The destinations are represented by a -bitmap contained in the first two arguments (a0 and a1). Bit 0 of -a0 corresponds to the APIC ID in the third argument (a2), bit 1 -corresponds to the APIC ID a2+1, and so on. - -Returns the number of CPUs to which the IPIs were delivered successfully. - -7. KVM_HC_SCHED_YIELD ------------------------- -Architecture: x86 -Status: active -Purpose: Hypercall used to yield if the IPI target vCPU is preempted - -a0: destination APIC ID - -Usage example: When sending a call-function IPI-many to vCPUs, yield if -any of the IPI target vCPUs was preempted. diff --git a/Documentation/virtual/kvm/index.rst b/Documentation/virtual/kvm/index.rst deleted file mode 100644 index 0b206a06f5be..000000000000 --- a/Documentation/virtual/kvm/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. SPDX-License-Identifier: GPL-2.0 - -=== -KVM -=== - -.. toctree:: - :maxdepth: 2 - - amd-memory-encryption - cpuid diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt deleted file mode 100644 index 635cd6eaf714..000000000000 --- a/Documentation/virtual/kvm/locking.txt +++ /dev/null @@ -1,215 +0,0 @@ -KVM Lock Overview -================= - -1. Acquisition Orders ---------------------- - -The acquisition orders for mutexes are as follows: - -- kvm->lock is taken outside vcpu->mutex - -- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock - -- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring - them together is quite rare. - -On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock. - -Everything else is a leaf: no other lock is taken inside the critical -sections. - -2: Exception ------------- - -Fast page fault: - -Fast page fault is the fast path which fixes the guest page fault out of -the mmu-lock on x86. Currently, the page fault can be fast in one of the -following two cases: - -1. Access Tracking: The SPTE is not present, but it is marked for access -tracking i.e. the SPTE_SPECIAL_MASK is set. That means we need to -restore the saved R/X bits. This is described in more detail later below. - -2. Write-Protection: The SPTE is present and the fault is -caused by write-protect. That means we just need to change the W bit of the -spte. - -What we use to avoid all the race is the SPTE_HOST_WRITEABLE bit and -SPTE_MMU_WRITEABLE bit on the spte: -- SPTE_HOST_WRITEABLE means the gfn is writable on host. -- SPTE_MMU_WRITEABLE means the gfn is writable on mmu. The bit is set when - the gfn is writable on guest mmu and it is not write-protected by shadow - page write-protection. - -On fast page fault path, we will use cmpxchg to atomically set the spte W -bit if spte.SPTE_HOST_WRITEABLE = 1 and spte.SPTE_WRITE_PROTECT = 1, or -restore the saved R/X bits if VMX_EPT_TRACK_ACCESS mask is set, or both. This -is safe because whenever changing these bits can be detected by cmpxchg. - -But we need carefully check these cases: -1): The mapping from gfn to pfn -The mapping from gfn to pfn may be changed since we can only ensure the pfn -is not changed during cmpxchg. This is a ABA problem, for example, below case -will happen: - -At the beginning: -gpte = gfn1 -gfn1 is mapped to pfn1 on host -spte is the shadow page table entry corresponding with gpte and -spte = pfn1 - - VCPU 0 VCPU0 -on fast page fault path: - - old_spte = *spte; - pfn1 is swapped out: - spte = 0; - - pfn1 is re-alloced for gfn2. - - gpte is changed to point to - gfn2 by the guest: - spte = pfn1; - - if (cmpxchg(spte, old_spte, old_spte+W) - mark_page_dirty(vcpu->kvm, gfn1) - OOPS!!! - -We dirty-log for gfn1, that means gfn2 is lost in dirty-bitmap. - -For direct sp, we can easily avoid it since the spte of direct sp is fixed -to gfn. For indirect sp, before we do cmpxchg, we call gfn_to_pfn_atomic() -to pin gfn to pfn, because after gfn_to_pfn_atomic(): -- We have held the refcount of pfn that means the pfn can not be freed and - be reused for another gfn. -- The pfn is writable that means it can not be shared between different gfns - by KSM. - -Then, we can ensure the dirty bitmaps is correctly set for a gfn. - -Currently, to simplify the whole things, we disable fast page fault for -indirect shadow page. - -2): Dirty bit tracking -In the origin code, the spte can be fast updated (non-atomically) if the -spte is read-only and the Accessed bit has already been set since the -Accessed bit and Dirty bit can not be lost. - -But it is not true after fast page fault since the spte can be marked -writable between reading spte and updating spte. Like below case: - -At the beginning: -spte.W = 0 -spte.Accessed = 1 - - VCPU 0 VCPU0 -In mmu_spte_clear_track_bits(): - - old_spte = *spte; - - /* 'if' condition is satisfied. */ - if (old_spte.Accessed == 1 && - old_spte.W == 0) - spte = 0ull; - on fast page fault path: - spte.W = 1 - memory write on the spte: - spte.Dirty = 1 - - - else - old_spte = xchg(spte, 0ull) - - - if (old_spte.Accessed == 1) - kvm_set_pfn_accessed(spte.pfn); - if (old_spte.Dirty == 1) - kvm_set_pfn_dirty(spte.pfn); - OOPS!!! - -The Dirty bit is lost in this case. - -In order to avoid this kind of issue, we always treat the spte as "volatile" -if it can be updated out of mmu-lock, see spte_has_volatile_bits(), it means, -the spte is always atomically updated in this case. - -3): flush tlbs due to spte updated -If the spte is updated from writable to readonly, we should flush all TLBs, -otherwise rmap_write_protect will find a read-only spte, even though the -writable spte might be cached on a CPU's TLB. - -As mentioned before, the spte can be updated to writable out of mmu-lock on -fast page fault path, in order to easily audit the path, we see if TLBs need -be flushed caused by this reason in mmu_spte_update() since this is a common -function to update spte (present -> present). - -Since the spte is "volatile" if it can be updated out of mmu-lock, we always -atomically update the spte, the race caused by fast page fault can be avoided, -See the comments in spte_has_volatile_bits() and mmu_spte_update(). - -Lockless Access Tracking: - -This is used for Intel CPUs that are using EPT but do not support the EPT A/D -bits. In this case, when the KVM MMU notifier is called to track accesses to a -page (via kvm_mmu_notifier_clear_flush_young), it marks the PTE as not-present -by clearing the RWX bits in the PTE and storing the original R & X bits in -some unused/ignored bits. In addition, the SPTE_SPECIAL_MASK is also set on the -PTE (using the ignored bit 62). When the VM tries to access the page later on, -a fault is generated and the fast page fault mechanism described above is used -to atomically restore the PTE to a Present state. The W bit is not saved when -the PTE is marked for access tracking and during restoration to the Present -state, the W bit is set depending on whether or not it was a write access. If -it wasn't, then the W bit will remain clear until a write access happens, at -which time it will be set using the Dirty tracking mechanism described above. - -3. Reference ------------- - -Name: kvm_lock -Type: mutex -Arch: any -Protects: - vm_list - -Name: kvm_count_lock -Type: raw_spinlock_t -Arch: any -Protects: - hardware virtualization enable/disable -Comment: 'raw' because hardware enabling/disabling must be atomic /wrt - migration. - -Name: kvm_arch::tsc_write_lock -Type: raw_spinlock -Arch: x86 -Protects: - kvm_arch::{last_tsc_write,last_tsc_nsec,last_tsc_offset} - - tsc offset in vmcb -Comment: 'raw' because updating the tsc offsets must not be preempted. - -Name: kvm->mmu_lock -Type: spinlock_t -Arch: any -Protects: -shadow page/shadow tlb entry -Comment: it is a spinlock since it is used in mmu notifier. - -Name: kvm->srcu -Type: srcu lock -Arch: any -Protects: - kvm->memslots - - kvm->buses -Comment: The srcu read lock must be held while accessing memslots (e.g. - when using gfn_to_* functions) and while accessing in-kernel - MMIO/PIO address->device structure mapping (kvm->buses). - The srcu index can be stored in kvm_vcpu->srcu_idx per vcpu - if it is needed by multiple functions. - -Name: blocked_vcpu_on_cpu_lock -Type: spinlock_t -Arch: x86 -Protects: blocked_vcpu_on_cpu -Comment: This is a per-CPU lock and it is used for VT-d posted-interrupts. - When VT-d posted-interrupts is supported and the VM has assigned - devices, we put the blocked vCPU on the list blocked_vcpu_on_cpu - protected by blocked_vcpu_on_cpu_lock, when VT-d hardware issues - wakeup notification event since external interrupts from the - assigned devices happens, we will find the vCPU on the list to - wakeup. diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt deleted file mode 100644 index 2efe0efc516e..000000000000 --- a/Documentation/virtual/kvm/mmu.txt +++ /dev/null @@ -1,449 +0,0 @@ -The x86 kvm shadow mmu -====================== - -The mmu (in arch/x86/kvm, files mmu.[ch] and paging_tmpl.h) is responsible -for presenting a standard x86 mmu to the guest, while translating guest -physical addresses to host physical addresses. - -The mmu code attempts to satisfy the following requirements: - -- correctness: the guest should not be able to determine that it is running - on an emulated mmu except for timing (we attempt to comply - with the specification, not emulate the characteristics of - a particular implementation such as tlb size) -- security: the guest must not be able to touch host memory not assigned - to it -- performance: minimize the performance penalty imposed by the mmu -- scaling: need to scale to large memory and large vcpu guests -- hardware: support the full range of x86 virtualization hardware -- integration: Linux memory management code must be in control of guest memory - so that swapping, page migration, page merging, transparent - hugepages, and similar features work without change -- dirty tracking: report writes to guest memory to enable live migration - and framebuffer-based displays -- footprint: keep the amount of pinned kernel memory low (most memory - should be shrinkable) -- reliability: avoid multipage or GFP_ATOMIC allocations - -Acronyms -======== - -pfn host page frame number -hpa host physical address -hva host virtual address -gfn guest frame number -gpa guest physical address -gva guest virtual address -ngpa nested guest physical address -ngva nested guest virtual address -pte page table entry (used also to refer generically to paging structure - entries) -gpte guest pte (referring to gfns) -spte shadow pte (referring to pfns) -tdp two dimensional paging (vendor neutral term for NPT and EPT) - -Virtual and real hardware supported -=================================== - -The mmu supports first-generation mmu hardware, which allows an atomic switch -of the current paging mode and cr3 during guest entry, as well as -two-dimensional paging (AMD's NPT and Intel's EPT). The emulated hardware -it exposes is the traditional 2/3/4 level x86 mmu, with support for global -pages, pae, pse, pse36, cr0.wp, and 1GB pages. Emulated hardware also -able to expose NPT capable hardware on NPT capable hosts. - -Translation -=========== - -The primary job of the mmu is to program the processor's mmu to translate -addresses for the guest. Different translations are required at different -times: - -- when guest paging is disabled, we translate guest physical addresses to - host physical addresses (gpa->hpa) -- when guest paging is enabled, we translate guest virtual addresses, to - guest physical addresses, to host physical addresses (gva->gpa->hpa) -- when the guest launches a guest of its own, we translate nested guest - virtual addresses, to nested guest physical addresses, to guest physical - addresses, to host physical addresses (ngva->ngpa->gpa->hpa) - -The primary challenge is to encode between 1 and 3 translations into hardware -that support only 1 (traditional) and 2 (tdp) translations. When the -number of required translations matches the hardware, the mmu operates in -direct mode; otherwise it operates in shadow mode (see below). - -Memory -====== - -Guest memory (gpa) is part of the user address space of the process that is -using kvm. Userspace defines the translation between guest addresses and user -addresses (gpa->hva); note that two gpas may alias to the same hva, but not -vice versa. - -These hvas may be backed using any method available to the host: anonymous -memory, file backed memory, and device memory. Memory might be paged by the -host at any time. - -Events -====== - -The mmu is driven by events, some from the guest, some from the host. - -Guest generated events: -- writes to control registers (especially cr3) -- invlpg/invlpga instruction execution -- access to missing or protected translations - -Host generated events: -- changes in the gpa->hpa translation (either through gpa->hva changes or - through hva->hpa changes) -- memory pressure (the shrinker) - -Shadow pages -============ - -The principal data structure is the shadow page, 'struct kvm_mmu_page'. A -shadow page contains 512 sptes, which can be either leaf or nonleaf sptes. A -shadow page may contain a mix of leaf and nonleaf sptes. - -A nonleaf spte allows the hardware mmu to reach the leaf pages and -is not related to a translation directly. It points to other shadow pages. - -A leaf spte corresponds to either one or two translations encoded into -one paging structure entry. These are always the lowest level of the -translation stack, with optional higher level translations left to NPT/EPT. -Leaf ptes point at guest pages. - -The following table shows translations encoded by leaf ptes, with higher-level -translations in parentheses: - - Non-nested guests: - nonpaging: gpa->hpa - paging: gva->gpa->hpa - paging, tdp: (gva->)gpa->hpa - Nested guests: - non-tdp: ngva->gpa->hpa (*) - tdp: (ngva->)ngpa->gpa->hpa - -(*) the guest hypervisor will encode the ngva->gpa translation into its page - tables if npt is not present - -Shadow pages contain the following information: - role.level: - The level in the shadow paging hierarchy that this shadow page belongs to. - 1=4k sptes, 2=2M sptes, 3=1G sptes, etc. - role.direct: - If set, leaf sptes reachable from this page are for a linear range. - Examples include real mode translation, large guest pages backed by small - host pages, and gpa->hpa translations when NPT or EPT is active. - The linear range starts at (gfn << PAGE_SHIFT) and its size is determined - by role.level (2MB for first level, 1GB for second level, 0.5TB for third - level, 256TB for fourth level) - If clear, this page corresponds to a guest page table denoted by the gfn - field. - role.quadrant: - When role.gpte_is_8_bytes=0, the guest uses 32-bit gptes while the host uses 64-bit - sptes. That means a guest page table contains more ptes than the host, - so multiple shadow pages are needed to shadow one guest page. - For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the - first or second 512-gpte block in the guest page table. For second-level - page tables, each 32-bit gpte is converted to two 64-bit sptes - (since each first-level guest page is shadowed by two first-level - shadow pages) so role.quadrant takes values in the range 0..3. Each - quadrant maps 1GB virtual address space. - role.access: - Inherited guest access permissions in the form uwx. Note execute - permission is positive, not negative. - role.invalid: - The page is invalid and should not be used. It is a root page that is - currently pinned (by a cpu hardware register pointing to it); once it is - unpinned it will be destroyed. - role.gpte_is_8_bytes: - Reflects the size of the guest PTE for which the page is valid, i.e. '1' - if 64-bit gptes are in use, '0' if 32-bit gptes are in use. - role.nxe: - Contains the value of efer.nxe for which the page is valid. - role.cr0_wp: - Contains the value of cr0.wp for which the page is valid. - role.smep_andnot_wp: - Contains the value of cr4.smep && !cr0.wp for which the page is valid - (pages for which this is true are different from other pages; see the - treatment of cr0.wp=0 below). - role.smap_andnot_wp: - Contains the value of cr4.smap && !cr0.wp for which the page is valid - (pages for which this is true are different from other pages; see the - treatment of cr0.wp=0 below). - role.ept_sp: - This is a virtual flag to denote a shadowed nested EPT page. ept_sp - is true if "cr0_wp && smap_andnot_wp", an otherwise invalid combination. - role.smm: - Is 1 if the page is valid in system management mode. This field - determines which of the kvm_memslots array was used to build this - shadow page; it is also used to go back from a struct kvm_mmu_page - to a memslot, through the kvm_memslots_for_spte_role macro and - __gfn_to_memslot. - role.ad_disabled: - Is 1 if the MMU instance cannot use A/D bits. EPT did not have A/D - bits before Haswell; shadow EPT page tables also cannot use A/D bits - if the L1 hypervisor does not enable them. - gfn: - Either the guest page table containing the translations shadowed by this - page, or the base page frame for linear translations. See role.direct. - spt: - A pageful of 64-bit sptes containing the translations for this page. - Accessed by both kvm and hardware. - The page pointed to by spt will have its page->private pointing back - at the shadow page structure. - sptes in spt point either at guest pages, or at lower-level shadow pages. - Specifically, if sp1 and sp2 are shadow pages, then sp1->spt[n] may point - at __pa(sp2->spt). sp2 will point back at sp1 through parent_pte. - The spt array forms a DAG structure with the shadow page as a node, and - guest pages as leaves. - gfns: - An array of 512 guest frame numbers, one for each present pte. Used to - perform a reverse map from a pte to a gfn. When role.direct is set, any - element of this array can be calculated from the gfn field when used, in - this case, the array of gfns is not allocated. See role.direct and gfn. - root_count: - A counter keeping track of how many hardware registers (guest cr3 or - pdptrs) are now pointing at the page. While this counter is nonzero, the - page cannot be destroyed. See role.invalid. - parent_ptes: - The reverse mapping for the pte/ptes pointing at this page's spt. If - parent_ptes bit 0 is zero, only one spte points at this page and - parent_ptes points at this single spte, otherwise, there exists multiple - sptes pointing at this page and (parent_ptes & ~0x1) points at a data - structure with a list of parent sptes. - unsync: - If true, then the translations in this page may not match the guest's - translation. This is equivalent to the state of the tlb when a pte is - changed but before the tlb entry is flushed. Accordingly, unsync ptes - are synchronized when the guest executes invlpg or flushes its tlb by - other means. Valid for leaf pages. - unsync_children: - How many sptes in the page point at pages that are unsync (or have - unsynchronized children). - unsync_child_bitmap: - A bitmap indicating which sptes in spt point (directly or indirectly) at - pages that may be unsynchronized. Used to quickly locate all unsychronized - pages reachable from a given page. - clear_spte_count: - Only present on 32-bit hosts, where a 64-bit spte cannot be written - atomically. The reader uses this while running out of the MMU lock - to detect in-progress updates and retry them until the writer has - finished the write. - write_flooding_count: - A guest may write to a page table many times, causing a lot of - emulations if the page needs to be write-protected (see "Synchronized - and unsynchronized pages" below). Leaf pages can be unsynchronized - so that they do not trigger frequent emulation, but this is not - possible for non-leafs. This field counts the number of emulations - since the last time the page table was actually used; if emulation - is triggered too frequently on this page, KVM will unmap the page - to avoid emulation in the future. - -Reverse map -=========== - -The mmu maintains a reverse mapping whereby all ptes mapping a page can be -reached given its gfn. This is used, for example, when swapping out a page. - -Synchronized and unsynchronized pages -===================================== - -The guest uses two events to synchronize its tlb and page tables: tlb flushes -and page invalidations (invlpg). - -A tlb flush means that we need to synchronize all sptes reachable from the -guest's cr3. This is expensive, so we keep all guest page tables write -protected, and synchronize sptes to gptes when a gpte is written. - -A special case is when a guest page table is reachable from the current -guest cr3. In this case, the guest is obliged to issue an invlpg instruction -before using the translation. We take advantage of that by removing write -protection from the guest page, and allowing the guest to modify it freely. -We synchronize modified gptes when the guest invokes invlpg. This reduces -the amount of emulation we have to do when the guest modifies multiple gptes, -or when the a guest page is no longer used as a page table and is used for -random guest data. - -As a side effect we have to resynchronize all reachable unsynchronized shadow -pages on a tlb flush. - - -Reaction to events -================== - -- guest page fault (or npt page fault, or ept violation) - -This is the most complicated event. The cause of a page fault can be: - - - a true guest fault (the guest translation won't allow the access) (*) - - access to a missing translation - - access to a protected translation - - when logging dirty pages, memory is write protected - - synchronized shadow pages are write protected (*) - - access to untranslatable memory (mmio) - - (*) not applicable in direct mode - -Handling a page fault is performed as follows: - - - if the RSV bit of the error code is set, the page fault is caused by guest - accessing MMIO and cached MMIO information is available. - - walk shadow page table - - check for valid generation number in the spte (see "Fast invalidation of - MMIO sptes" below) - - cache the information to vcpu->arch.mmio_gva, vcpu->arch.access and - vcpu->arch.mmio_gfn, and call the emulator - - If both P bit and R/W bit of error code are set, this could possibly - be handled as a "fast page fault" (fixed without taking the MMU lock). See - the description in Documentation/virtual/kvm/locking.txt. - - if needed, walk the guest page tables to determine the guest translation - (gva->gpa or ngpa->gpa) - - if permissions are insufficient, reflect the fault back to the guest - - determine the host page - - if this is an mmio request, there is no host page; cache the info to - vcpu->arch.mmio_gva, vcpu->arch.access and vcpu->arch.mmio_gfn - - walk the shadow page table to find the spte for the translation, - instantiating missing intermediate page tables as necessary - - If this is an mmio request, cache the mmio info to the spte and set some - reserved bit on the spte (see callers of kvm_mmu_set_mmio_spte_mask) - - try to unsynchronize the page - - if successful, we can let the guest continue and modify the gpte - - emulate the instruction - - if failed, unshadow the page and let the guest continue - - update any translations that were modified by the instruction - -invlpg handling: - - - walk the shadow page hierarchy and drop affected translations - - try to reinstantiate the indicated translation in the hope that the - guest will use it in the near future - -Guest control register updates: - -- mov to cr3 - - look up new shadow roots - - synchronize newly reachable shadow pages - -- mov to cr0/cr4/efer - - set up mmu context for new paging mode - - look up new shadow roots - - synchronize newly reachable shadow pages - -Host translation updates: - - - mmu notifier called with updated hva - - look up affected sptes through reverse map - - drop (or update) translations - -Emulating cr0.wp -================ - -If tdp is not enabled, the host must keep cr0.wp=1 so page write protection -works for the guest kernel, not guest guest userspace. When the guest -cr0.wp=1, this does not present a problem. However when the guest cr0.wp=0, -we cannot map the permissions for gpte.u=1, gpte.w=0 to any spte (the -semantics require allowing any guest kernel access plus user read access). - -We handle this by mapping the permissions to two possible sptes, depending -on fault type: - -- kernel write fault: spte.u=0, spte.w=1 (allows full kernel access, - disallows user access) -- read fault: spte.u=1, spte.w=0 (allows full read access, disallows kernel - write access) - -(user write faults generate a #PF) - -In the first case there are two additional complications: -- if CR4.SMEP is enabled: since we've turned the page into a kernel page, - the kernel may now execute it. We handle this by also setting spte.nx. - If we get a user fetch or read fault, we'll change spte.u=1 and - spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when - shadow paging is in use. -- if CR4.SMAP is disabled: since the page has been changed to a kernel - page, it can not be reused when CR4.SMAP is enabled. We set - CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note, - here we do not care the case that CR4.SMAP is enabled since KVM will - directly inject #PF to guest due to failed permission check. - -To prevent an spte that was converted into a kernel page with cr0.wp=0 -from being written by the kernel after cr0.wp has changed to 1, we make -the value of cr0.wp part of the page role. This means that an spte created -with one value of cr0.wp cannot be used when cr0.wp has a different value - -it will simply be missed by the shadow page lookup code. A similar issue -exists when an spte created with cr0.wp=0 and cr4.smep=0 is used after -changing cr4.smep to 1. To avoid this, the value of !cr0.wp && cr4.smep -is also made a part of the page role. - -Large pages -=========== - -The mmu supports all combinations of large and small guest and host pages. -Supported page sizes include 4k, 2M, 4M, and 1G. 4M pages are treated as -two separate 2M pages, on both guest and host, since the mmu always uses PAE -paging. - -To instantiate a large spte, four constraints must be satisfied: - -- the spte must point to a large host page -- the guest pte must be a large pte of at least equivalent size (if tdp is - enabled, there is no guest pte and this condition is satisfied) -- if the spte will be writeable, the large page frame may not overlap any - write-protected pages -- the guest page must be wholly contained by a single memory slot - -To check the last two conditions, the mmu maintains a ->disallow_lpage set of -arrays for each memory slot and large page size. Every write protected page -causes its disallow_lpage to be incremented, thus preventing instantiation of -a large spte. The frames at the end of an unaligned memory slot have -artificially inflated ->disallow_lpages so they can never be instantiated. - -Fast invalidation of MMIO sptes -=============================== - -As mentioned in "Reaction to events" above, kvm will cache MMIO -information in leaf sptes. When a new memslot is added or an existing -memslot is changed, this information may become stale and needs to be -invalidated. This also needs to hold the MMU lock while walking all -shadow pages, and is made more scalable with a similar technique. - -MMIO sptes have a few spare bits, which are used to store a -generation number. The global generation number is stored in -kvm_memslots(kvm)->generation, and increased whenever guest memory info -changes. - -When KVM finds an MMIO spte, it checks the generation number of the spte. -If the generation number of the spte does not equal the global generation -number, it will ignore the cached MMIO information and handle the page -fault through the slow path. - -Since only 19 bits are used to store generation-number on mmio spte, all -pages are zapped when there is an overflow. - -Unfortunately, a single memory access might access kvm_memslots(kvm) multiple -times, the last one happening when the generation number is retrieved and -stored into the MMIO spte. Thus, the MMIO spte might be created based on -out-of-date information, but with an up-to-date generation number. - -To avoid this, the generation number is incremented again after synchronize_srcu -returns; thus, bit 63 of kvm_memslots(kvm)->generation set to 1 only during a -memslot update, while some SRCU readers might be using the old copy. We do not -want to use an MMIO sptes created with an odd generation number, and we can do -this without losing a bit in the MMIO spte. The "update in-progress" bit of the -generation is not stored in MMIO spte, and is so is implicitly zero when the -generation is extracted out of the spte. If KVM is unlucky and creates an MMIO -spte while an update is in-progress, the next access to the spte will always be -a cache miss. For example, a subsequent access during the update window will -miss due to the in-progress flag diverging, while an access after the update -window closes will have a higher generation number (as compared to the spte). - - -Further reading -=============== - -- NPT presentation from KVM Forum 2008 - http://www.linux-kvm.org/images/c/c8/KvmForum2008%24kdf2008_21.pdf - diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt deleted file mode 100644 index df1f4338b3ca..000000000000 --- a/Documentation/virtual/kvm/msr.txt +++ /dev/null @@ -1,284 +0,0 @@ -KVM-specific MSRs. -Glauber Costa , Red Hat Inc, 2010 -===================================================== - -KVM makes use of some custom MSRs to service some requests. - -Custom MSRs have a range reserved for them, that goes from -0x4b564d00 to 0x4b564dff. There are MSRs outside this area, -but they are deprecated and their use is discouraged. - -Custom MSR list --------- - -The current supported Custom MSR list is: - -MSR_KVM_WALL_CLOCK_NEW: 0x4b564d00 - - data: 4-byte alignment physical address of a memory area which must be - in guest RAM. This memory is expected to hold a copy of the following - structure: - - struct pvclock_wall_clock { - u32 version; - u32 sec; - u32 nsec; - } __attribute__((__packed__)); - - whose data will be filled in by the hypervisor. The hypervisor is only - guaranteed to update this data at the moment of MSR write. - Users that want to reliably query this information more than once have - to write more than once to this MSR. Fields have the following meanings: - - version: guest has to check version before and after grabbing - time information and check that they are both equal and even. - An odd version indicates an in-progress update. - - sec: number of seconds for wallclock at time of boot. - - nsec: number of nanoseconds for wallclock at time of boot. - - In order to get the current wallclock time, the system_time from - MSR_KVM_SYSTEM_TIME_NEW needs to be added. - - Note that although MSRs are per-CPU entities, the effect of this - particular MSR is global. - - Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid - leaf prior to usage. - -MSR_KVM_SYSTEM_TIME_NEW: 0x4b564d01 - - data: 4-byte aligned physical address of a memory area which must be in - guest RAM, plus an enable bit in bit 0. This memory is expected to hold - a copy of the following structure: - - struct pvclock_vcpu_time_info { - u32 version; - u32 pad0; - u64 tsc_timestamp; - u64 system_time; - u32 tsc_to_system_mul; - s8 tsc_shift; - u8 flags; - u8 pad[2]; - } __attribute__((__packed__)); /* 32 bytes */ - - whose data will be filled in by the hypervisor periodically. Only one - write, or registration, is needed for each VCPU. The interval between - updates of this structure is arbitrary and implementation-dependent. - The hypervisor may update this structure at any time it sees fit until - anything with bit0 == 0 is written to it. - - Fields have the following meanings: - - version: guest has to check version before and after grabbing - time information and check that they are both equal and even. - An odd version indicates an in-progress update. - - tsc_timestamp: the tsc value at the current VCPU at the time - of the update of this structure. Guests can subtract this value - from current tsc to derive a notion of elapsed time since the - structure update. - - system_time: a host notion of monotonic time, including sleep - time at the time this structure was last updated. Unit is - nanoseconds. - - tsc_to_system_mul: multiplier to be used when converting - tsc-related quantity to nanoseconds - - tsc_shift: shift to be used when converting tsc-related - quantity to nanoseconds. This shift will ensure that - multiplication with tsc_to_system_mul does not overflow. - A positive value denotes a left shift, a negative value - a right shift. - - The conversion from tsc to nanoseconds involves an additional - right shift by 32 bits. With this information, guests can - derive per-CPU time by doing: - - time = (current_tsc - tsc_timestamp) - if (tsc_shift >= 0) - time <<= tsc_shift; - else - time >>= -tsc_shift; - time = (time * tsc_to_system_mul) >> 32 - time = time + system_time - - flags: bits in this field indicate extended capabilities - coordinated between the guest and the hypervisor. Availability - of specific flags has to be checked in 0x40000001 cpuid leaf. - Current flags are: - - flag bit | cpuid bit | meaning - ------------------------------------------------------------- - | | time measures taken across - 0 | 24 | multiple cpus are guaranteed to - | | be monotonic - ------------------------------------------------------------- - | | guest vcpu has been paused by - 1 | N/A | the host - | | See 4.70 in api.txt - ------------------------------------------------------------- - - Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid - leaf prior to usage. - - -MSR_KVM_WALL_CLOCK: 0x11 - - data and functioning: same as MSR_KVM_WALL_CLOCK_NEW. Use that instead. - - This MSR falls outside the reserved KVM range and may be removed in the - future. Its usage is deprecated. - - Availability of this MSR must be checked via bit 0 in 0x4000001 cpuid - leaf prior to usage. - -MSR_KVM_SYSTEM_TIME: 0x12 - - data and functioning: same as MSR_KVM_SYSTEM_TIME_NEW. Use that instead. - - This MSR falls outside the reserved KVM range and may be removed in the - future. Its usage is deprecated. - - Availability of this MSR must be checked via bit 0 in 0x4000001 cpuid - leaf prior to usage. - - The suggested algorithm for detecting kvmclock presence is then: - - if (!kvm_para_available()) /* refer to cpuid.txt */ - return NON_PRESENT; - - flags = cpuid_eax(0x40000001); - if (flags & 3) { - msr_kvm_system_time = MSR_KVM_SYSTEM_TIME_NEW; - msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK_NEW; - return PRESENT; - } else if (flags & 0) { - msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; - msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; - return PRESENT; - } else - return NON_PRESENT; - -MSR_KVM_ASYNC_PF_EN: 0x4b564d02 - data: Bits 63-6 hold 64-byte aligned physical address of a - 64 byte memory area which must be in guest RAM and must be - zeroed. Bits 5-3 are reserved and should be zero. Bit 0 is 1 - when asynchronous page faults are enabled on the vcpu 0 when - disabled. Bit 1 is 1 if asynchronous page faults can be injected - when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults - are delivered to L1 as #PF vmexits. Bit 2 can be set only if - KVM_FEATURE_ASYNC_PF_VMEXIT is present in CPUID. - - First 4 byte of 64 byte memory location will be written to by - the hypervisor at the time of asynchronous page fault (APF) - injection to indicate type of asynchronous page fault. Value - of 1 means that the page referred to by the page fault is not - present. Value 2 means that the page is now available. Disabling - interrupt inhibits APFs. Guest must not enable interrupt - before the reason is read, or it may be overwritten by another - APF. Since APF uses the same exception vector as regular page - fault guest must reset the reason to 0 before it does - something that can generate normal page fault. If during page - fault APF reason is 0 it means that this is regular page - fault. - - During delivery of type 1 APF cr2 contains a token that will - be used to notify a guest when missing page becomes - available. When page becomes available type 2 APF is sent with - cr2 set to the token associated with the page. There is special - kind of token 0xffffffff which tells vcpu that it should wake - up all processes waiting for APFs and no individual type 2 APFs - will be sent. - - If APF is disabled while there are outstanding APFs, they will - not be delivered. - - Currently type 2 APF will be always delivered on the same vcpu as - type 1 was, but guest should not rely on that. - -MSR_KVM_STEAL_TIME: 0x4b564d03 - - data: 64-byte alignment physical address of a memory area which must be - in guest RAM, plus an enable bit in bit 0. This memory is expected to - hold a copy of the following structure: - - struct kvm_steal_time { - __u64 steal; - __u32 version; - __u32 flags; - __u8 preempted; - __u8 u8_pad[3]; - __u32 pad[11]; - } - - whose data will be filled in by the hypervisor periodically. Only one - write, or registration, is needed for each VCPU. The interval between - updates of this structure is arbitrary and implementation-dependent. - The hypervisor may update this structure at any time it sees fit until - anything with bit0 == 0 is written to it. Guest is required to make sure - this structure is initialized to zero. - - Fields have the following meanings: - - version: a sequence counter. In other words, guest has to check - this field before and after grabbing time information and make - sure they are both equal and even. An odd version indicates an - in-progress update. - - flags: At this point, always zero. May be used to indicate - changes in this structure in the future. - - steal: the amount of time in which this vCPU did not run, in - nanoseconds. Time during which the vcpu is idle, will not be - reported as steal time. - - preempted: indicate the vCPU who owns this struct is running or - not. Non-zero values mean the vCPU has been preempted. Zero - means the vCPU is not preempted. NOTE, it is always zero if the - the hypervisor doesn't support this field. - -MSR_KVM_EOI_EN: 0x4b564d04 - data: Bit 0 is 1 when PV end of interrupt is enabled on the vcpu; 0 - when disabled. Bit 1 is reserved and must be zero. When PV end of - interrupt is enabled (bit 0 set), bits 63-2 hold a 4-byte aligned - physical address of a 4 byte memory area which must be in guest RAM and - must be zeroed. - - The first, least significant bit of 4 byte memory location will be - written to by the hypervisor, typically at the time of interrupt - injection. Value of 1 means that guest can skip writing EOI to the apic - (using MSR or MMIO write); instead, it is sufficient to signal - EOI by clearing the bit in guest memory - this location will - later be polled by the hypervisor. - Value of 0 means that the EOI write is required. - - It is always safe for the guest to ignore the optimization and perform - the APIC EOI write anyway. - - Hypervisor is guaranteed to only modify this least - significant bit while in the current VCPU context, this means that - guest does not need to use either lock prefix or memory ordering - primitives to synchronise with the hypervisor. - - However, hypervisor can set and clear this memory bit at any time: - therefore to make sure hypervisor does not interrupt the - guest and clear the least significant bit in the memory area - in the window between guest testing it to detect - whether it can skip EOI apic write and between guest - clearing it to signal EOI to the hypervisor, - guest must both read the least significant bit in the memory area and - clear it using a single CPU instruction, such as test and clear, or - compare and exchange. - -MSR_KVM_POLL_CONTROL: 0x4b564d05 - Control host-side polling. - - data: Bit 0 enables (1) or disables (0) host-side HLT polling logic. - - KVM guests can request the host not to poll on HLT, for example if - they are performing polling themselves. - diff --git a/Documentation/virtual/kvm/nested-vmx.txt b/Documentation/virtual/kvm/nested-vmx.txt deleted file mode 100644 index 97eb1353e962..000000000000 --- a/Documentation/virtual/kvm/nested-vmx.txt +++ /dev/null @@ -1,240 +0,0 @@ -Nested VMX -========== - -Overview ---------- - -On Intel processors, KVM uses Intel's VMX (Virtual-Machine eXtensions) -to easily and efficiently run guest operating systems. Normally, these guests -*cannot* themselves be hypervisors running their own guests, because in VMX, -guests cannot use VMX instructions. - -The "Nested VMX" feature adds this missing capability - of running guest -hypervisors (which use VMX) with their own nested guests. It does so by -allowing a guest to use VMX instructions, and correctly and efficiently -emulating them using the single level of VMX available in the hardware. - -We describe in much greater detail the theory behind the nested VMX feature, -its implementation and its performance characteristics, in the OSDI 2010 paper -"The Turtles Project: Design and Implementation of Nested Virtualization", -available at: - - http://www.usenix.org/events/osdi10/tech/full_papers/Ben-Yehuda.pdf - - -Terminology ------------ - -Single-level virtualization has two levels - the host (KVM) and the guests. -In nested virtualization, we have three levels: The host (KVM), which we call -L0, the guest hypervisor, which we call L1, and its nested guest, which we -call L2. - - -Running nested VMX ------------------- - -The nested VMX feature is disabled by default. It can be enabled by giving -the "nested=1" option to the kvm-intel module. - -No modifications are required to user space (qemu). However, qemu's default -emulated CPU type (qemu64) does not list the "VMX" CPU feature, so it must be -explicitly enabled, by giving qemu one of the following options: - - -cpu host (emulated CPU has all features of the real CPU) - - -cpu qemu64,+vmx (add just the vmx feature to a named CPU type) - - -ABIs ----- - -Nested VMX aims to present a standard and (eventually) fully-functional VMX -implementation for the a guest hypervisor to use. As such, the official -specification of the ABI that it provides is Intel's VMX specification, -namely volume 3B of their "Intel 64 and IA-32 Architectures Software -Developer's Manual". Not all of VMX's features are currently fully supported, -but the goal is to eventually support them all, starting with the VMX features -which are used in practice by popular hypervisors (KVM and others). - -As a VMX implementation, nested VMX presents a VMCS structure to L1. -As mandated by the spec, other than the two fields revision_id and abort, -this structure is *opaque* to its user, who is not supposed to know or care -about its internal structure. Rather, the structure is accessed through the -VMREAD and VMWRITE instructions. -Still, for debugging purposes, KVM developers might be interested to know the -internals of this structure; This is struct vmcs12 from arch/x86/kvm/vmx.c. - -The name "vmcs12" refers to the VMCS that L1 builds for L2. In the code we -also have "vmcs01", the VMCS that L0 built for L1, and "vmcs02" is the VMCS -which L0 builds to actually run L2 - how this is done is explained in the -aforementioned paper. - -For convenience, we repeat the content of struct vmcs12 here. If the internals -of this structure changes, this can break live migration across KVM versions. -VMCS12_REVISION (from vmx.c) should be changed if struct vmcs12 or its inner -struct shadow_vmcs is ever changed. - - typedef u64 natural_width; - struct __packed vmcs12 { - /* According to the Intel spec, a VMCS region must start with - * these two user-visible fields */ - u32 revision_id; - u32 abort; - - u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */ - u32 padding[7]; /* room for future expansion */ - - u64 io_bitmap_a; - u64 io_bitmap_b; - u64 msr_bitmap; - u64 vm_exit_msr_store_addr; - u64 vm_exit_msr_load_addr; - u64 vm_entry_msr_load_addr; - u64 tsc_offset; - u64 virtual_apic_page_addr; - u64 apic_access_addr; - u64 ept_pointer; - u64 guest_physical_address; - u64 vmcs_link_pointer; - u64 guest_ia32_debugctl; - u64 guest_ia32_pat; - u64 guest_ia32_efer; - u64 guest_pdptr0; - u64 guest_pdptr1; - u64 guest_pdptr2; - u64 guest_pdptr3; - u64 host_ia32_pat; - u64 host_ia32_efer; - u64 padding64[8]; /* room for future expansion */ - natural_width cr0_guest_host_mask; - natural_width cr4_guest_host_mask; - natural_width cr0_read_shadow; - natural_width cr4_read_shadow; - natural_width cr3_target_value0; - natural_width cr3_target_value1; - natural_width cr3_target_value2; - natural_width cr3_target_value3; - natural_width exit_qualification; - natural_width guest_linear_address; - natural_width guest_cr0; - natural_width guest_cr3; - natural_width guest_cr4; - natural_width guest_es_base; - natural_width guest_cs_base; - natural_width guest_ss_base; - natural_width guest_ds_base; - natural_width guest_fs_base; - natural_width guest_gs_base; - natural_width guest_ldtr_base; - natural_width guest_tr_base; - natural_width guest_gdtr_base; - natural_width guest_idtr_base; - natural_width guest_dr7; - natural_width guest_rsp; - natural_width guest_rip; - natural_width guest_rflags; - natural_width guest_pending_dbg_exceptions; - natural_width guest_sysenter_esp; - natural_width guest_sysenter_eip; - natural_width host_cr0; - natural_width host_cr3; - natural_width host_cr4; - natural_width host_fs_base; - natural_width host_gs_base; - natural_width host_tr_base; - natural_width host_gdtr_base; - natural_width host_idtr_base; - natural_width host_ia32_sysenter_esp; - natural_width host_ia32_sysenter_eip; - natural_width host_rsp; - natural_width host_rip; - natural_width paddingl[8]; /* room for future expansion */ - u32 pin_based_vm_exec_control; - u32 cpu_based_vm_exec_control; - u32 exception_bitmap; - u32 page_fault_error_code_mask; - u32 page_fault_error_code_match; - u32 cr3_target_count; - u32 vm_exit_controls; - u32 vm_exit_msr_store_count; - u32 vm_exit_msr_load_count; - u32 vm_entry_controls; - u32 vm_entry_msr_load_count; - u32 vm_entry_intr_info_field; - u32 vm_entry_exception_error_code; - u32 vm_entry_instruction_len; - u32 tpr_threshold; - u32 secondary_vm_exec_control; - u32 vm_instruction_error; - u32 vm_exit_reason; - u32 vm_exit_intr_info; - u32 vm_exit_intr_error_code; - u32 idt_vectoring_info_field; - u32 idt_vectoring_error_code; - u32 vm_exit_instruction_len; - u32 vmx_instruction_info; - u32 guest_es_limit; - u32 guest_cs_limit; - u32 guest_ss_limit; - u32 guest_ds_limit; - u32 guest_fs_limit; - u32 guest_gs_limit; - u32 guest_ldtr_limit; - u32 guest_tr_limit; - u32 guest_gdtr_limit; - u32 guest_idtr_limit; - u32 guest_es_ar_bytes; - u32 guest_cs_ar_bytes; - u32 guest_ss_ar_bytes; - u32 guest_ds_ar_bytes; - u32 guest_fs_ar_bytes; - u32 guest_gs_ar_bytes; - u32 guest_ldtr_ar_bytes; - u32 guest_tr_ar_bytes; - u32 guest_interruptibility_info; - u32 guest_activity_state; - u32 guest_sysenter_cs; - u32 host_ia32_sysenter_cs; - u32 padding32[8]; /* room for future expansion */ - u16 virtual_processor_id; - u16 guest_es_selector; - u16 guest_cs_selector; - u16 guest_ss_selector; - u16 guest_ds_selector; - u16 guest_fs_selector; - u16 guest_gs_selector; - u16 guest_ldtr_selector; - u16 guest_tr_selector; - u16 host_es_selector; - u16 host_cs_selector; - u16 host_ss_selector; - u16 host_ds_selector; - u16 host_fs_selector; - u16 host_gs_selector; - u16 host_tr_selector; - }; - - -Authors -------- - -These patches were written by: - Abel Gordon, abelg il.ibm.com - Nadav Har'El, nyh il.ibm.com - Orit Wasserman, oritw il.ibm.com - Ben-Ami Yassor, benami il.ibm.com - Muli Ben-Yehuda, muli il.ibm.com - -With contributions by: - Anthony Liguori, aliguori us.ibm.com - Mike Day, mdday us.ibm.com - Michael Factor, factor il.ibm.com - Zvi Dubitzky, dubi il.ibm.com - -And valuable reviews by: - Avi Kivity, avi redhat.com - Gleb Natapov, gleb redhat.com - Marcelo Tosatti, mtosatti redhat.com - Kevin Tian, kevin.tian intel.com - and others. diff --git a/Documentation/virtual/kvm/ppc-pv.txt b/Documentation/virtual/kvm/ppc-pv.txt deleted file mode 100644 index e26115ce4258..000000000000 --- a/Documentation/virtual/kvm/ppc-pv.txt +++ /dev/null @@ -1,212 +0,0 @@ -The PPC KVM paravirtual interface -================================= - -The basic execution principle by which KVM on PowerPC works is to run all kernel -space code in PR=1 which is user space. This way we trap all privileged -instructions and can emulate them accordingly. - -Unfortunately that is also the downfall. There are quite some privileged -instructions that needlessly return us to the hypervisor even though they -could be handled differently. - -This is what the PPC PV interface helps with. It takes privileged instructions -and transforms them into unprivileged ones with some help from the hypervisor. -This cuts down virtualization costs by about 50% on some of my benchmarks. - -The code for that interface can be found in arch/powerpc/kernel/kvm* - -Querying for existence -====================== - -To find out if we're running on KVM or not, we leverage the device tree. When -Linux is running on KVM, a node /hypervisor exists. That node contains a -compatible property with the value "linux,kvm". - -Once you determined you're running under a PV capable KVM, you can now use -hypercalls as described below. - -KVM hypercalls -============== - -Inside the device tree's /hypervisor node there's a property called -'hypercall-instructions'. This property contains at most 4 opcodes that make -up the hypercall. To call a hypercall, just call these instructions. - -The parameters are as follows: - - Register IN OUT - - r0 - volatile - r3 1st parameter Return code - r4 2nd parameter 1st output value - r5 3rd parameter 2nd output value - r6 4th parameter 3rd output value - r7 5th parameter 4th output value - r8 6th parameter 5th output value - r9 7th parameter 6th output value - r10 8th parameter 7th output value - r11 hypercall number 8th output value - r12 - volatile - -Hypercall definitions are shared in generic code, so the same hypercall numbers -apply for x86 and powerpc alike with the exception that each KVM hypercall -also needs to be ORed with the KVM vendor code which is (42 << 16). - -Return codes can be as follows: - - Code Meaning - - 0 Success - 12 Hypercall not implemented - <0 Error - -The magic page -============== - -To enable communication between the hypervisor and guest there is a new shared -page that contains parts of supervisor visible register state. The guest can -map this shared page using the KVM hypercall KVM_HC_PPC_MAP_MAGIC_PAGE. - -With this hypercall issued the guest always gets the magic page mapped at the -desired location. The first parameter indicates the effective address when the -MMU is enabled. The second parameter indicates the address in real mode, if -applicable to the target. For now, we always map the page to -4096. This way we -can access it using absolute load and store functions. The following -instruction reads the first field of the magic page: - - ld rX, -4096(0) - -The interface is designed to be extensible should there be need later to add -additional registers to the magic page. If you add fields to the magic page, -also define a new hypercall feature to indicate that the host can give you more -registers. Only if the host supports the additional features, make use of them. - -The magic page layout is described by struct kvm_vcpu_arch_shared -in arch/powerpc/include/asm/kvm_para.h. - -Magic page features -=================== - -When mapping the magic page using the KVM hypercall KVM_HC_PPC_MAP_MAGIC_PAGE, -a second return value is passed to the guest. This second return value contains -a bitmap of available features inside the magic page. - -The following enhancements to the magic page are currently available: - - KVM_MAGIC_FEAT_SR Maps SR registers r/w in the magic page - KVM_MAGIC_FEAT_MAS0_TO_SPRG7 Maps MASn, ESR, PIR and high SPRGs - -For enhanced features in the magic page, please check for the existence of the -feature before using them! - -Magic page flags -================ - -In addition to features that indicate whether a host is capable of a particular -feature we also have a channel for a guest to tell the guest whether it's capable -of something. This is what we call "flags". - -Flags are passed to the host in the low 12 bits of the Effective Address. - -The following flags are currently available for a guest to expose: - - MAGIC_PAGE_FLAG_NOT_MAPPED_NX Guest handles NX bits correctly wrt magic page - -MSR bits -======== - -The MSR contains bits that require hypervisor intervention and bits that do -not require direct hypervisor intervention because they only get interpreted -when entering the guest or don't have any impact on the hypervisor's behavior. - -The following bits are safe to be set inside the guest: - - MSR_EE - MSR_RI - -If any other bit changes in the MSR, please still use mtmsr(d). - -Patched instructions -==================== - -The "ld" and "std" instructions are transformed to "lwz" and "stw" instructions -respectively on 32 bit systems with an added offset of 4 to accommodate for big -endianness. - -The following is a list of mapping the Linux kernel performs when running as -guest. Implementing any of those mappings is optional, as the instruction traps -also act on the shared page. So calling privileged instructions still works as -before. - -From To -==== == - -mfmsr rX ld rX, magic_page->msr -mfsprg rX, 0 ld rX, magic_page->sprg0 -mfsprg rX, 1 ld rX, magic_page->sprg1 -mfsprg rX, 2 ld rX, magic_page->sprg2 -mfsprg rX, 3 ld rX, magic_page->sprg3 -mfsrr0 rX ld rX, magic_page->srr0 -mfsrr1 rX ld rX, magic_page->srr1 -mfdar rX ld rX, magic_page->dar -mfdsisr rX lwz rX, magic_page->dsisr - -mtmsr rX std rX, magic_page->msr -mtsprg 0, rX std rX, magic_page->sprg0 -mtsprg 1, rX std rX, magic_page->sprg1 -mtsprg 2, rX std rX, magic_page->sprg2 -mtsprg 3, rX std rX, magic_page->sprg3 -mtsrr0 rX std rX, magic_page->srr0 -mtsrr1 rX std rX, magic_page->srr1 -mtdar rX std rX, magic_page->dar -mtdsisr rX stw rX, magic_page->dsisr - -tlbsync nop - -mtmsrd rX, 0 b -mtmsr rX b - -mtmsrd rX, 1 b - -[Book3S only] -mtsrin rX, rY b - -[BookE only] -wrteei [0|1] b - - -Some instructions require more logic to determine what's going on than a load -or store instruction can deliver. To enable patching of those, we keep some -RAM around where we can live translate instructions to. What happens is the -following: - - 1) copy emulation code to memory - 2) patch that code to fit the emulated instruction - 3) patch that code to return to the original pc + 4 - 4) patch the original instruction to branch to the new code - -That way we can inject an arbitrary amount of code as replacement for a single -instruction. This allows us to check for pending interrupts when setting EE=1 -for example. - -Hypercall ABIs in KVM on PowerPC -================================= -1) KVM hypercalls (ePAPR) - -These are ePAPR compliant hypercall implementation (mentioned above). Even -generic hypercalls are implemented here, like the ePAPR idle hcall. These are -available on all targets. - -2) PAPR hypercalls - -PAPR hypercalls are needed to run server PowerPC PAPR guests (-M pseries in QEMU). -These are the same hypercalls that pHyp, the POWER hypervisor implements. Some of -them are handled in the kernel, some are handled in user space. This is only -available on book3s_64. - -3) OSI hypercalls - -Mac-on-Linux is another user of KVM on PowerPC, which has its own hypercall (long -before KVM). This is supported to maintain compatibility. All these hypercalls get -forwarded to user space. This is only useful on book3s_32, but can be used with -book3s_64 as well. diff --git a/Documentation/virtual/kvm/review-checklist.txt b/Documentation/virtual/kvm/review-checklist.txt deleted file mode 100644 index a83b27635fdd..000000000000 --- a/Documentation/virtual/kvm/review-checklist.txt +++ /dev/null @@ -1,38 +0,0 @@ -Review checklist for kvm patches -================================ - -1. The patch must follow Documentation/process/coding-style.rst and - Documentation/process/submitting-patches.rst. - -2. Patches should be against kvm.git master branch. - -3. If the patch introduces or modifies a new userspace API: - - the API must be documented in Documentation/virtual/kvm/api.txt - - the API must be discoverable using KVM_CHECK_EXTENSION - -4. New state must include support for save/restore. - -5. New features must default to off (userspace should explicitly request them). - Performance improvements can and should default to on. - -6. New cpu features should be exposed via KVM_GET_SUPPORTED_CPUID2 - -7. Emulator changes should be accompanied by unit tests for qemu-kvm.git - kvm/test directory. - -8. Changes should be vendor neutral when possible. Changes to common code - are better than duplicating changes to vendor code. - -9. Similarly, prefer changes to arch independent code than to arch dependent - code. - -10. User/kernel interfaces and guest/host interfaces must be 64-bit clean - (all variables and sizes naturally aligned on 64-bit; use specific types - only - u64 rather than ulong). - -11. New guest visible features must either be documented in a hardware manual - or be accompanied by documentation. - -12. Features must be robust against reset and kexec - for example, shared - host/guest memory must be unshared to prevent the host from writing to - guest memory that the guest has not reserved for this purpose. diff --git a/Documentation/virtual/kvm/s390-diag.txt b/Documentation/virtual/kvm/s390-diag.txt deleted file mode 100644 index 7c52e5f8b210..000000000000 --- a/Documentation/virtual/kvm/s390-diag.txt +++ /dev/null @@ -1,83 +0,0 @@ -The s390 DIAGNOSE call on KVM -============================= - -KVM on s390 supports the DIAGNOSE call for making hypercalls, both for -native hypercalls and for selected hypercalls found on other s390 -hypervisors. - -Note that bits are numbered as by the usual s390 convention (most significant -bit on the left). - - -General remarks ---------------- - -DIAGNOSE calls by the guest cause a mandatory intercept. This implies -all supported DIAGNOSE calls need to be handled by either KVM or its -userspace. - -All DIAGNOSE calls supported by KVM use the RS-a format: - --------------------------------------- -| '83' | R1 | R3 | B2 | D2 | --------------------------------------- -0 8 12 16 20 31 - -The second-operand address (obtained by the base/displacement calculation) -is not used to address data. Instead, bits 48-63 of this address specify -the function code, and bits 0-47 are ignored. - -The supported DIAGNOSE function codes vary by the userspace used. For -DIAGNOSE function codes not specific to KVM, please refer to the -documentation for the s390 hypervisors defining them. - - -DIAGNOSE function code 'X'500' - KVM virtio functions ------------------------------------------------------ - -If the function code specifies 0x500, various virtio-related functions -are performed. - -General register 1 contains the virtio subfunction code. Supported -virtio subfunctions depend on KVM's userspace. Generally, userspace -provides either s390-virtio (subcodes 0-2) or virtio-ccw (subcode 3). - -Upon completion of the DIAGNOSE instruction, general register 2 contains -the function's return code, which is either a return code or a subcode -specific value. - -Subcode 0 - s390-virtio notification and early console printk - Handled by userspace. - -Subcode 1 - s390-virtio reset - Handled by userspace. - -Subcode 2 - s390-virtio set status - Handled by userspace. - -Subcode 3 - virtio-ccw notification - Handled by either userspace or KVM (ioeventfd case). - - General register 2 contains a subchannel-identification word denoting - the subchannel of the virtio-ccw proxy device to be notified. - - General register 3 contains the number of the virtqueue to be notified. - - General register 4 contains a 64bit identifier for KVM usage (the - kvm_io_bus cookie). If general register 4 does not contain a valid - identifier, it is ignored. - - After completion of the DIAGNOSE call, general register 2 may contain - a 64bit identifier (in the kvm_io_bus cookie case), or a negative - error value, if an internal error occurred. - - See also the virtio standard for a discussion of this hypercall. - - -DIAGNOSE function code 'X'501 - KVM breakpoint ----------------------------------------------- - -If the function code specifies 0x501, breakpoint functions may be performed. -This function code is handled by userspace. - -This diagnose function code has no subfunctions and uses no parameters. diff --git a/Documentation/virtual/kvm/timekeeping.txt b/Documentation/virtual/kvm/timekeeping.txt deleted file mode 100644 index 76808a17ad84..000000000000 --- a/Documentation/virtual/kvm/timekeeping.txt +++ /dev/null @@ -1,612 +0,0 @@ - - Timekeeping Virtualization for X86-Based Architectures - - Zachary Amsden - Copyright (c) 2010, Red Hat. All rights reserved. - -1) Overview -2) Timing Devices -3) TSC Hardware -4) Virtualization Problems - -========================================================================= - -1) Overview - -One of the most complicated parts of the X86 platform, and specifically, -the virtualization of this platform is the plethora of timing devices available -and the complexity of emulating those devices. In addition, virtualization of -time introduces a new set of challenges because it introduces a multiplexed -division of time beyond the control of the guest CPU. - -First, we will describe the various timekeeping hardware available, then -present some of the problems which arise and solutions available, giving -specific recommendations for certain classes of KVM guests. - -The purpose of this document is to collect data and information relevant to -timekeeping which may be difficult to find elsewhere, specifically, -information relevant to KVM and hardware-based virtualization. - -========================================================================= - -2) Timing Devices - -First we discuss the basic hardware devices available. TSC and the related -KVM clock are special enough to warrant a full exposition and are described in -the following section. - -2.1) i8254 - PIT - -One of the first timer devices available is the programmable interrupt timer, -or PIT. The PIT has a fixed frequency 1.193182 MHz base clock and three -channels which can be programmed to deliver periodic or one-shot interrupts. -These three channels can be configured in different modes and have individual -counters. Channel 1 and 2 were not available for general use in the original -IBM PC, and historically were connected to control RAM refresh and the PC -speaker. Now the PIT is typically integrated as part of an emulated chipset -and a separate physical PIT is not used. - -The PIT uses I/O ports 0x40 - 0x43. Access to the 16-bit counters is done -using single or multiple byte access to the I/O ports. There are 6 modes -available, but not all modes are available to all timers, as only timer 2 -has a connected gate input, required for modes 1 and 5. The gate line is -controlled by port 61h, bit 0, as illustrated in the following diagram. - - -------------- ---------------- -| | | | -| 1.1932 MHz |---------->| CLOCK OUT | ---------> IRQ 0 -| Clock | | | | - -------------- | +->| GATE TIMER 0 | - | ---------------- - | - | ---------------- - | | | - |------>| CLOCK OUT | ---------> 66.3 KHZ DRAM - | | | (aka /dev/null) - | +->| GATE TIMER 1 | - | ---------------- - | - | ---------------- - | | | - |------>| CLOCK OUT | ---------> Port 61h, bit 5 - | | | -Port 61h, bit 0 ---------->| GATE TIMER 2 | \_.---- ____ - ---------------- _| )--|LPF|---Speaker - / *---- \___/ -Port 61h, bit 1 -----------------------------------/ - -The timer modes are now described. - -Mode 0: Single Timeout. This is a one-shot software timeout that counts down - when the gate is high (always true for timers 0 and 1). When the count - reaches zero, the output goes high. - -Mode 1: Triggered One-shot. The output is initially set high. When the gate - line is set high, a countdown is initiated (which does not stop if the gate is - lowered), during which the output is set low. When the count reaches zero, - the output goes high. - -Mode 2: Rate Generator. The output is initially set high. When the countdown - reaches 1, the output goes low for one count and then returns high. The value - is reloaded and the countdown automatically resumes. If the gate line goes - low, the count is halted. If the output is low when the gate is lowered, the - output automatically goes high (this only affects timer 2). - -Mode 3: Square Wave. This generates a high / low square wave. The count - determines the length of the pulse, which alternates between high and low - when zero is reached. The count only proceeds when gate is high and is - automatically reloaded on reaching zero. The count is decremented twice at - each clock to generate a full high / low cycle at the full periodic rate. - If the count is even, the clock remains high for N/2 counts and low for N/2 - counts; if the clock is odd, the clock is high for (N+1)/2 counts and low - for (N-1)/2 counts. Only even values are latched by the counter, so odd - values are not observed when reading. This is the intended mode for timer 2, - which generates sine-like tones by low-pass filtering the square wave output. - -Mode 4: Software Strobe. After programming this mode and loading the counter, - the output remains high until the counter reaches zero. Then the output - goes low for 1 clock cycle and returns high. The counter is not reloaded. - Counting only occurs when gate is high. - -Mode 5: Hardware Strobe. After programming and loading the counter, the - output remains high. When the gate is raised, a countdown is initiated - (which does not stop if the gate is lowered). When the counter reaches zero, - the output goes low for 1 clock cycle and then returns high. The counter is - not reloaded. - -In addition to normal binary counting, the PIT supports BCD counting. The -command port, 0x43 is used to set the counter and mode for each of the three -timers. - -PIT commands, issued to port 0x43, using the following bit encoding: - -Bit 7-4: Command (See table below) -Bit 3-1: Mode (000 = Mode 0, 101 = Mode 5, 11X = undefined) -Bit 0 : Binary (0) / BCD (1) - -Command table: - -0000 - Latch Timer 0 count for port 0x40 - sample and hold the count to be read in port 0x40; - additional commands ignored until counter is read; - mode bits ignored. - -0001 - Set Timer 0 LSB mode for port 0x40 - set timer to read LSB only and force MSB to zero; - mode bits set timer mode - -0010 - Set Timer 0 MSB mode for port 0x40 - set timer to read MSB only and force LSB to zero; - mode bits set timer mode - -0011 - Set Timer 0 16-bit mode for port 0x40 - set timer to read / write LSB first, then MSB; - mode bits set timer mode - -0100 - Latch Timer 1 count for port 0x41 - as described above -0101 - Set Timer 1 LSB mode for port 0x41 - as described above -0110 - Set Timer 1 MSB mode for port 0x41 - as described above -0111 - Set Timer 1 16-bit mode for port 0x41 - as described above - -1000 - Latch Timer 2 count for port 0x42 - as described above -1001 - Set Timer 2 LSB mode for port 0x42 - as described above -1010 - Set Timer 2 MSB mode for port 0x42 - as described above -1011 - Set Timer 2 16-bit mode for port 0x42 as described above - -1101 - General counter latch - Latch combination of counters into corresponding ports - Bit 3 = Counter 2 - Bit 2 = Counter 1 - Bit 1 = Counter 0 - Bit 0 = Unused - -1110 - Latch timer status - Latch combination of counter mode into corresponding ports - Bit 3 = Counter 2 - Bit 2 = Counter 1 - Bit 1 = Counter 0 - - The output of ports 0x40-0x42 following this command will be: - - Bit 7 = Output pin - Bit 6 = Count loaded (0 if timer has expired) - Bit 5-4 = Read / Write mode - 01 = MSB only - 10 = LSB only - 11 = LSB / MSB (16-bit) - Bit 3-1 = Mode - Bit 0 = Binary (0) / BCD mode (1) - -2.2) RTC - -The second device which was available in the original PC was the MC146818 real -time clock. The original device is now obsolete, and usually emulated by the -system chipset, sometimes by an HPET and some frankenstein IRQ routing. - -The RTC is accessed through CMOS variables, which uses an index register to -control which bytes are read. Since there is only one index register, read -of the CMOS and read of the RTC require lock protection (in addition, it is -dangerous to allow userspace utilities such as hwclock to have direct RTC -access, as they could corrupt kernel reads and writes of CMOS memory). - -The RTC generates an interrupt which is usually routed to IRQ 8. The interrupt -can function as a periodic timer, an additional once a day alarm, and can issue -interrupts after an update of the CMOS registers by the MC146818 is complete. -The type of interrupt is signalled in the RTC status registers. - -The RTC will update the current time fields by battery power even while the -system is off. The current time fields should not be read while an update is -in progress, as indicated in the status register. - -The clock uses a 32.768kHz crystal, so bits 6-4 of register A should be -programmed to a 32kHz divider if the RTC is to count seconds. - -This is the RAM map originally used for the RTC/CMOS: - -Location Size Description ------------------------------------------- -00h byte Current second (BCD) -01h byte Seconds alarm (BCD) -02h byte Current minute (BCD) -03h byte Minutes alarm (BCD) -04h byte Current hour (BCD) -05h byte Hours alarm (BCD) -06h byte Current day of week (BCD) -07h byte Current day of month (BCD) -08h byte Current month (BCD) -09h byte Current year (BCD) -0Ah byte Register A - bit 7 = Update in progress - bit 6-4 = Divider for clock - 000 = 4.194 MHz - 001 = 1.049 MHz - 010 = 32 kHz - 10X = test modes - 110 = reset / disable - 111 = reset / disable - bit 3-0 = Rate selection for periodic interrupt - 000 = periodic timer disabled - 001 = 3.90625 uS - 010 = 7.8125 uS - 011 = .122070 mS - 100 = .244141 mS - ... - 1101 = 125 mS - 1110 = 250 mS - 1111 = 500 mS -0Bh byte Register B - bit 7 = Run (0) / Halt (1) - bit 6 = Periodic interrupt enable - bit 5 = Alarm interrupt enable - bit 4 = Update-ended interrupt enable - bit 3 = Square wave interrupt enable - bit 2 = BCD calendar (0) / Binary (1) - bit 1 = 12-hour mode (0) / 24-hour mode (1) - bit 0 = 0 (DST off) / 1 (DST enabled) -OCh byte Register C (read only) - bit 7 = interrupt request flag (IRQF) - bit 6 = periodic interrupt flag (PF) - bit 5 = alarm interrupt flag (AF) - bit 4 = update interrupt flag (UF) - bit 3-0 = reserved -ODh byte Register D (read only) - bit 7 = RTC has power - bit 6-0 = reserved -32h byte Current century BCD (*) - (*) location vendor specific and now determined from ACPI global tables - -2.3) APIC - -On Pentium and later processors, an on-board timer is available to each CPU -as part of the Advanced Programmable Interrupt Controller. The APIC is -accessed through memory-mapped registers and provides interrupt service to each -CPU, used for IPIs and local timer interrupts. - -Although in theory the APIC is a safe and stable source for local interrupts, -in practice, many bugs and glitches have occurred due to the special nature of -the APIC CPU-local memory-mapped hardware. Beware that CPU errata may affect -the use of the APIC and that workarounds may be required. In addition, some of -these workarounds pose unique constraints for virtualization - requiring either -extra overhead incurred from extra reads of memory-mapped I/O or additional -functionality that may be more computationally expensive to implement. - -Since the APIC is documented quite well in the Intel and AMD manuals, we will -avoid repetition of the detail here. It should be pointed out that the APIC -timer is programmed through the LVT (local vector timer) register, is capable -of one-shot or periodic operation, and is based on the bus clock divided down -by the programmable divider register. - -2.4) HPET - -HPET is quite complex, and was originally intended to replace the PIT / RTC -support of the X86 PC. It remains to be seen whether that will be the case, as -the de facto standard of PC hardware is to emulate these older devices. Some -systems designated as legacy free may support only the HPET as a hardware timer -device. - -The HPET spec is rather loose and vague, requiring at least 3 hardware timers, -but allowing implementation freedom to support many more. It also imposes no -fixed rate on the timer frequency, but does impose some extremal values on -frequency, error and slew. - -In general, the HPET is recommended as a high precision (compared to PIT /RTC) -time source which is independent of local variation (as there is only one HPET -in any given system). The HPET is also memory-mapped, and its presence is -indicated through ACPI tables by the BIOS. - -Detailed specification of the HPET is beyond the current scope of this -document, as it is also very well documented elsewhere. - -2.5) Offboard Timers - -Several cards, both proprietary (watchdog boards) and commonplace (e1000) have -timing chips built into the cards which may have registers which are accessible -to kernel or user drivers. To the author's knowledge, using these to generate -a clocksource for a Linux or other kernel has not yet been attempted and is in -general frowned upon as not playing by the agreed rules of the game. Such a -timer device would require additional support to be virtualized properly and is -not considered important at this time as no known operating system does this. - -========================================================================= - -3) TSC Hardware - -The TSC or time stamp counter is relatively simple in theory; it counts -instruction cycles issued by the processor, which can be used as a measure of -time. In practice, due to a number of problems, it is the most complicated -timekeeping device to use. - -The TSC is represented internally as a 64-bit MSR which can be read with the -RDMSR, RDTSC, or RDTSCP (when available) instructions. In the past, hardware -limitations made it possible to write the TSC, but generally on old hardware it -was only possible to write the low 32-bits of the 64-bit counter, and the upper -32-bits of the counter were cleared. Now, however, on Intel processors family -0Fh, for models 3, 4 and 6, and family 06h, models e and f, this restriction -has been lifted and all 64-bits are writable. On AMD systems, the ability to -write the TSC MSR is not an architectural guarantee. - -The TSC is accessible from CPL-0 and conditionally, for CPL > 0 software by -means of the CR4.TSD bit, which when enabled, disables CPL > 0 TSC access. - -Some vendors have implemented an additional instruction, RDTSCP, which returns -atomically not just the TSC, but an indicator which corresponds to the -processor number. This can be used to index into an array of TSC variables to -determine offset information in SMP systems where TSCs are not synchronized. -The presence of this instruction must be determined by consulting CPUID feature -bits. - -Both VMX and SVM provide extension fields in the virtualization hardware which -allows the guest visible TSC to be offset by a constant. Newer implementations -promise to allow the TSC to additionally be scaled, but this hardware is not -yet widely available. - -3.1) TSC synchronization - -The TSC is a CPU-local clock in most implementations. This means, on SMP -platforms, the TSCs of different CPUs may start at different times depending -on when the CPUs are powered on. Generally, CPUs on the same die will share -the same clock, however, this is not always the case. - -The BIOS may attempt to resynchronize the TSCs during the poweron process and -the operating system or other system software may attempt to do this as well. -Several hardware limitations make the problem worse - if it is not possible to -write the full 64-bits of the TSC, it may be impossible to match the TSC in -newly arriving CPUs to that of the rest of the system, resulting in -unsynchronized TSCs. This may be done by BIOS or system software, but in -practice, getting a perfectly synchronized TSC will not be possible unless all -values are read from the same clock, which generally only is possible on single -socket systems or those with special hardware support. - -3.2) TSC and CPU hotplug - -As touched on already, CPUs which arrive later than the boot time of the system -may not have a TSC value that is synchronized with the rest of the system. -Either system software, BIOS, or SMM code may actually try to establish the TSC -to a value matching the rest of the system, but a perfect match is usually not -a guarantee. This can have the effect of bringing a system from a state where -TSC is synchronized back to a state where TSC synchronization flaws, however -small, may be exposed to the OS and any virtualization environment. - -3.3) TSC and multi-socket / NUMA - -Multi-socket systems, especially large multi-socket systems are likely to have -individual clocksources rather than a single, universally distributed clock. -Since these clocks are driven by different crystals, they will not have -perfectly matched frequency, and temperature and electrical variations will -cause the CPU clocks, and thus the TSCs to drift over time. Depending on the -exact clock and bus design, the drift may or may not be fixed in absolute -error, and may accumulate over time. - -In addition, very large systems may deliberately slew the clocks of individual -cores. This technique, known as spread-spectrum clocking, reduces EMI at the -clock frequency and harmonics of it, which may be required to pass FCC -standards for telecommunications and computer equipment. - -It is recommended not to trust the TSCs to remain synchronized on NUMA or -multiple socket systems for these reasons. - -3.4) TSC and C-states - -C-states, or idling states of the processor, especially C1E and deeper sleep -states may be problematic for TSC as well. The TSC may stop advancing in such -a state, resulting in a TSC which is behind that of other CPUs when execution -is resumed. Such CPUs must be detected and flagged by the operating system -based on CPU and chipset identifications. - -The TSC in such a case may be corrected by catching it up to a known external -clocksource. - -3.5) TSC frequency change / P-states - -To make things slightly more interesting, some CPUs may change frequency. They -may or may not run the TSC at the same rate, and because the frequency change -may be staggered or slewed, at some points in time, the TSC rate may not be -known other than falling within a range of values. In this case, the TSC will -not be a stable time source, and must be calibrated against a known, stable, -external clock to be a usable source of time. - -Whether the TSC runs at a constant rate or scales with the P-state is model -dependent and must be determined by inspecting CPUID, chipset or vendor -specific MSR fields. - -In addition, some vendors have known bugs where the P-state is actually -compensated for properly during normal operation, but when the processor is -inactive, the P-state may be raised temporarily to service cache misses from -other processors. In such cases, the TSC on halted CPUs could advance faster -than that of non-halted processors. AMD Turion processors are known to have -this problem. - -3.6) TSC and STPCLK / T-states - -External signals given to the processor may also have the effect of stopping -the TSC. This is typically done for thermal emergency power control to prevent -an overheating condition, and typically, there is no way to detect that this -condition has happened. - -3.7) TSC virtualization - VMX - -VMX provides conditional trapping of RDTSC, RDMSR, WRMSR and RDTSCP -instructions, which is enough for full virtualization of TSC in any manner. In -addition, VMX allows passing through the host TSC plus an additional TSC_OFFSET -field specified in the VMCS. Special instructions must be used to read and -write the VMCS field. - -3.8) TSC virtualization - SVM - -SVM provides conditional trapping of RDTSC, RDMSR, WRMSR and RDTSCP -instructions, which is enough for full virtualization of TSC in any manner. In -addition, SVM allows passing through the host TSC plus an additional offset -field specified in the SVM control block. - -3.9) TSC feature bits in Linux - -In summary, there is no way to guarantee the TSC remains in perfect -synchronization unless it is explicitly guaranteed by the architecture. Even -if so, the TSCs in multi-sockets or NUMA systems may still run independently -despite being locally consistent. - -The following feature bits are used by Linux to signal various TSC attributes, -but they can only be taken to be meaningful for UP or single node systems. - -X86_FEATURE_TSC : The TSC is available in hardware -X86_FEATURE_RDTSCP : The RDTSCP instruction is available -X86_FEATURE_CONSTANT_TSC : The TSC rate is unchanged with P-states -X86_FEATURE_NONSTOP_TSC : The TSC does not stop in C-states -X86_FEATURE_TSC_RELIABLE : TSC sync checks are skipped (VMware) - -4) Virtualization Problems - -Timekeeping is especially problematic for virtualization because a number of -challenges arise. The most obvious problem is that time is now shared between -the host and, potentially, a number of virtual machines. Thus the virtual -operating system does not run with 100% usage of the CPU, despite the fact that -it may very well make that assumption. It may expect it to remain true to very -exacting bounds when interrupt sources are disabled, but in reality only its -virtual interrupt sources are disabled, and the machine may still be preempted -at any time. This causes problems as the passage of real time, the injection -of machine interrupts and the associated clock sources are no longer completely -synchronized with real time. - -This same problem can occur on native hardware to a degree, as SMM mode may -steal cycles from the naturally on X86 systems when SMM mode is used by the -BIOS, but not in such an extreme fashion. However, the fact that SMM mode may -cause similar problems to virtualization makes it a good justification for -solving many of these problems on bare metal. - -4.1) Interrupt clocking - -One of the most immediate problems that occurs with legacy operating systems -is that the system timekeeping routines are often designed to keep track of -time by counting periodic interrupts. These interrupts may come from the PIT -or the RTC, but the problem is the same: the host virtualization engine may not -be able to deliver the proper number of interrupts per second, and so guest -time may fall behind. This is especially problematic if a high interrupt rate -is selected, such as 1000 HZ, which is unfortunately the default for many Linux -guests. - -There are three approaches to solving this problem; first, it may be possible -to simply ignore it. Guests which have a separate time source for tracking -'wall clock' or 'real time' may not need any adjustment of their interrupts to -maintain proper time. If this is not sufficient, it may be necessary to inject -additional interrupts into the guest in order to increase the effective -interrupt rate. This approach leads to complications in extreme conditions, -where host load or guest lag is too much to compensate for, and thus another -solution to the problem has risen: the guest may need to become aware of lost -ticks and compensate for them internally. Although promising in theory, the -implementation of this policy in Linux has been extremely error prone, and a -number of buggy variants of lost tick compensation are distributed across -commonly used Linux systems. - -Windows uses periodic RTC clocking as a means of keeping time internally, and -thus requires interrupt slewing to keep proper time. It does use a low enough -rate (ed: is it 18.2 Hz?) however that it has not yet been a problem in -practice. - -4.2) TSC sampling and serialization - -As the highest precision time source available, the cycle counter of the CPU -has aroused much interest from developers. As explained above, this timer has -many problems unique to its nature as a local, potentially unstable and -potentially unsynchronized source. One issue which is not unique to the TSC, -but is highlighted because of its very precise nature is sampling delay. By -definition, the counter, once read is already old. However, it is also -possible for the counter to be read ahead of the actual use of the result. -This is a consequence of the superscalar execution of the instruction stream, -which may execute instructions out of order. Such execution is called -non-serialized. Forcing serialized execution is necessary for precise -measurement with the TSC, and requires a serializing instruction, such as CPUID -or an MSR read. - -Since CPUID may actually be virtualized by a trap and emulate mechanism, this -serialization can pose a performance issue for hardware virtualization. An -accurate time stamp counter reading may therefore not always be available, and -it may be necessary for an implementation to guard against "backwards" reads of -the TSC as seen from other CPUs, even in an otherwise perfectly synchronized -system. - -4.3) Timespec aliasing - -Additionally, this lack of serialization from the TSC poses another challenge -when using results of the TSC when measured against another time source. As -the TSC is much higher precision, many possible values of the TSC may be read -while another clock is still expressing the same value. - -That is, you may read (T,T+10) while external clock C maintains the same value. -Due to non-serialized reads, you may actually end up with a range which -fluctuates - from (T-1.. T+10). Thus, any time calculated from a TSC, but -calibrated against an external value may have a range of valid values. -Re-calibrating this computation may actually cause time, as computed after the -calibration, to go backwards, compared with time computed before the -calibration. - -This problem is particularly pronounced with an internal time source in Linux, -the kernel time, which is expressed in the theoretically high resolution -timespec - but which advances in much larger granularity intervals, sometimes -at the rate of jiffies, and possibly in catchup modes, at a much larger step. - -This aliasing requires care in the computation and recalibration of kvmclock -and any other values derived from TSC computation (such as TSC virtualization -itself). - -4.4) Migration - -Migration of a virtual machine raises problems for timekeeping in two ways. -First, the migration itself may take time, during which interrupts cannot be -delivered, and after which, the guest time may need to be caught up. NTP may -be able to help to some degree here, as the clock correction required is -typically small enough to fall in the NTP-correctable window. - -An additional concern is that timers based off the TSC (or HPET, if the raw bus -clock is exposed) may now be running at different rates, requiring compensation -in some way in the hypervisor by virtualizing these timers. In addition, -migrating to a faster machine may preclude the use of a passthrough TSC, as a -faster clock cannot be made visible to a guest without the potential of time -advancing faster than usual. A slower clock is less of a problem, as it can -always be caught up to the original rate. KVM clock avoids these problems by -simply storing multipliers and offsets against the TSC for the guest to convert -back into nanosecond resolution values. - -4.5) Scheduling - -Since scheduling may be based on precise timing and firing of interrupts, the -scheduling algorithms of an operating system may be adversely affected by -virtualization. In theory, the effect is random and should be universally -distributed, but in contrived as well as real scenarios (guest device access, -causes of virtualization exits, possible context switch), this may not always -be the case. The effect of this has not been well studied. - -In an attempt to work around this, several implementations have provided a -paravirtualized scheduler clock, which reveals the true amount of CPU time for -which a virtual machine has been running. - -4.6) Watchdogs - -Watchdog timers, such as the lock detector in Linux may fire accidentally when -running under hardware virtualization due to timer interrupts being delayed or -misinterpretation of the passage of real time. Usually, these warnings are -spurious and can be ignored, but in some circumstances it may be necessary to -disable such detection. - -4.7) Delays and precision timing - -Precise timing and delays may not be possible in a virtualized system. This -can happen if the system is controlling physical hardware, or issues delays to -compensate for slower I/O to and from devices. The first issue is not solvable -in general for a virtualized system; hardware control software can't be -adequately virtualized without a full real-time operating system, which would -require an RT aware virtualization platform. - -The second issue may cause performance problems, but this is unlikely to be a -significant issue. In many cases these delays may be eliminated through -configuration or paravirtualization. - -4.8) Covert channels and leaks - -In addition to the above problems, time information will inevitably leak to the -guest about the host in anything but a perfect implementation of virtualized -time. This may allow the guest to infer the presence of a hypervisor (as in a -red-pill type detection), and it may allow information to leak between guests -by using CPU utilization itself as a signalling channel. Preventing such -problems would require completely isolated virtual time which may not track -real time any longer. This may be useful in certain security or QA contexts, -but in general isn't recommended for real-world deployment scenarios. diff --git a/Documentation/virtual/kvm/vcpu-requests.rst b/Documentation/virtual/kvm/vcpu-requests.rst deleted file mode 100644 index 5feb3706a7ae..000000000000 --- a/Documentation/virtual/kvm/vcpu-requests.rst +++ /dev/null @@ -1,307 +0,0 @@ -================= -KVM VCPU Requests -================= - -Overview -======== - -KVM supports an internal API enabling threads to request a VCPU thread to -perform some activity. For example, a thread may request a VCPU to flush -its TLB with a VCPU request. The API consists of the following functions:: - - /* Check if any requests are pending for VCPU @vcpu. */ - bool kvm_request_pending(struct kvm_vcpu *vcpu); - - /* Check if VCPU @vcpu has request @req pending. */ - bool kvm_test_request(int req, struct kvm_vcpu *vcpu); - - /* Clear request @req for VCPU @vcpu. */ - void kvm_clear_request(int req, struct kvm_vcpu *vcpu); - - /* - * Check if VCPU @vcpu has request @req pending. When the request is - * pending it will be cleared and a memory barrier, which pairs with - * another in kvm_make_request(), will be issued. - */ - bool kvm_check_request(int req, struct kvm_vcpu *vcpu); - - /* - * Make request @req of VCPU @vcpu. Issues a memory barrier, which pairs - * with another in kvm_check_request(), prior to setting the request. - */ - void kvm_make_request(int req, struct kvm_vcpu *vcpu); - - /* Make request @req of all VCPUs of the VM with struct kvm @kvm. */ - bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); - -Typically a requester wants the VCPU to perform the activity as soon -as possible after making the request. This means most requests -(kvm_make_request() calls) are followed by a call to kvm_vcpu_kick(), -and kvm_make_all_cpus_request() has the kicking of all VCPUs built -into it. - -VCPU Kicks ----------- - -The goal of a VCPU kick is to bring a VCPU thread out of guest mode in -order to perform some KVM maintenance. To do so, an IPI is sent, forcing -a guest mode exit. However, a VCPU thread may not be in guest mode at the -time of the kick. Therefore, depending on the mode and state of the VCPU -thread, there are two other actions a kick may take. All three actions -are listed below: - -1) Send an IPI. This forces a guest mode exit. -2) Waking a sleeping VCPU. Sleeping VCPUs are VCPU threads outside guest - mode that wait on waitqueues. Waking them removes the threads from - the waitqueues, allowing the threads to run again. This behavior - may be suppressed, see KVM_REQUEST_NO_WAKEUP below. -3) Nothing. When the VCPU is not in guest mode and the VCPU thread is not - sleeping, then there is nothing to do. - -VCPU Mode ---------- - -VCPUs have a mode state, ``vcpu->mode``, that is used to track whether the -guest is running in guest mode or not, as well as some specific -outside guest mode states. The architecture may use ``vcpu->mode`` to -ensure VCPU requests are seen by VCPUs (see "Ensuring Requests Are Seen"), -as well as to avoid sending unnecessary IPIs (see "IPI Reduction"), and -even to ensure IPI acknowledgements are waited upon (see "Waiting for -Acknowledgements"). The following modes are defined: - -OUTSIDE_GUEST_MODE - - The VCPU thread is outside guest mode. - -IN_GUEST_MODE - - The VCPU thread is in guest mode. - -EXITING_GUEST_MODE - - The VCPU thread is transitioning from IN_GUEST_MODE to - OUTSIDE_GUEST_MODE. - -READING_SHADOW_PAGE_TABLES - - The VCPU thread is outside guest mode, but it wants the sender of - certain VCPU requests, namely KVM_REQ_TLB_FLUSH, to wait until the VCPU - thread is done reading the page tables. - -VCPU Request Internals -====================== - -VCPU requests are simply bit indices of the ``vcpu->requests`` bitmap. -This means general bitops, like those documented in [atomic-ops]_ could -also be used, e.g. :: - - clear_bit(KVM_REQ_UNHALT & KVM_REQUEST_MASK, &vcpu->requests); - -However, VCPU request users should refrain from doing so, as it would -break the abstraction. The first 8 bits are reserved for architecture -independent requests, all additional bits are available for architecture -dependent requests. - -Architecture Independent Requests ---------------------------------- - -KVM_REQ_TLB_FLUSH - - KVM's common MMU notifier may need to flush all of a guest's TLB - entries, calling kvm_flush_remote_tlbs() to do so. Architectures that - choose to use the common kvm_flush_remote_tlbs() implementation will - need to handle this VCPU request. - -KVM_REQ_MMU_RELOAD - - When shadow page tables are used and memory slots are removed it's - necessary to inform each VCPU to completely refresh the tables. This - request is used for that. - -KVM_REQ_PENDING_TIMER - - This request may be made from a timer handler run on the host on behalf - of a VCPU. It informs the VCPU thread to inject a timer interrupt. - -KVM_REQ_UNHALT - - This request may be made from the KVM common function kvm_vcpu_block(), - which is used to emulate an instruction that causes a CPU to halt until - one of an architectural specific set of events and/or interrupts is - received (determined by checking kvm_arch_vcpu_runnable()). When that - event or interrupt arrives kvm_vcpu_block() makes the request. This is - in contrast to when kvm_vcpu_block() returns due to any other reason, - such as a pending signal, which does not indicate the VCPU's halt - emulation should stop, and therefore does not make the request. - -KVM_REQUEST_MASK ----------------- - -VCPU requests should be masked by KVM_REQUEST_MASK before using them with -bitops. This is because only the lower 8 bits are used to represent the -request's number. The upper bits are used as flags. Currently only two -flags are defined. - -VCPU Request Flags ------------------- - -KVM_REQUEST_NO_WAKEUP - - This flag is applied to requests that only need immediate attention - from VCPUs running in guest mode. That is, sleeping VCPUs do not need - to be awaken for these requests. Sleeping VCPUs will handle the - requests when they are awaken later for some other reason. - -KVM_REQUEST_WAIT - - When requests with this flag are made with kvm_make_all_cpus_request(), - then the caller will wait for each VCPU to acknowledge its IPI before - proceeding. This flag only applies to VCPUs that would receive IPIs. - If, for example, the VCPU is sleeping, so no IPI is necessary, then - the requesting thread does not wait. This means that this flag may be - safely combined with KVM_REQUEST_NO_WAKEUP. See "Waiting for - Acknowledgements" for more information about requests with - KVM_REQUEST_WAIT. - -VCPU Requests with Associated State -=================================== - -Requesters that want the receiving VCPU to handle new state need to ensure -the newly written state is observable to the receiving VCPU thread's CPU -by the time it observes the request. This means a write memory barrier -must be inserted after writing the new state and before setting the VCPU -request bit. Additionally, on the receiving VCPU thread's side, a -corresponding read barrier must be inserted after reading the request bit -and before proceeding to read the new state associated with it. See -scenario 3, Message and Flag, of [lwn-mb]_ and the kernel documentation -[memory-barriers]_. - -The pair of functions, kvm_check_request() and kvm_make_request(), provide -the memory barriers, allowing this requirement to be handled internally by -the API. - -Ensuring Requests Are Seen -========================== - -When making requests to VCPUs, we want to avoid the receiving VCPU -executing in guest mode for an arbitrary long time without handling the -request. We can be sure this won't happen as long as we ensure the VCPU -thread checks kvm_request_pending() before entering guest mode and that a -kick will send an IPI to force an exit from guest mode when necessary. -Extra care must be taken to cover the period after the VCPU thread's last -kvm_request_pending() check and before it has entered guest mode, as kick -IPIs will only trigger guest mode exits for VCPU threads that are in guest -mode or at least have already disabled interrupts in order to prepare to -enter guest mode. This means that an optimized implementation (see "IPI -Reduction") must be certain when it's safe to not send the IPI. One -solution, which all architectures except s390 apply, is to: - -- set ``vcpu->mode`` to IN_GUEST_MODE between disabling the interrupts and - the last kvm_request_pending() check; -- enable interrupts atomically when entering the guest. - -This solution also requires memory barriers to be placed carefully in both -the requesting thread and the receiving VCPU. With the memory barriers we -can exclude the possibility of a VCPU thread observing -!kvm_request_pending() on its last check and then not receiving an IPI for -the next request made of it, even if the request is made immediately after -the check. This is done by way of the Dekker memory barrier pattern -(scenario 10 of [lwn-mb]_). As the Dekker pattern requires two variables, -this solution pairs ``vcpu->mode`` with ``vcpu->requests``. Substituting -them into the pattern gives:: - - CPU1 CPU2 - ================= ================= - local_irq_disable(); - WRITE_ONCE(vcpu->mode, IN_GUEST_MODE); kvm_make_request(REQ, vcpu); - smp_mb(); smp_mb(); - if (kvm_request_pending(vcpu)) { if (READ_ONCE(vcpu->mode) == - IN_GUEST_MODE) { - ...abort guest entry... ...send IPI... - } } - -As stated above, the IPI is only useful for VCPU threads in guest mode or -that have already disabled interrupts. This is why this specific case of -the Dekker pattern has been extended to disable interrupts before setting -``vcpu->mode`` to IN_GUEST_MODE. WRITE_ONCE() and READ_ONCE() are used to -pedantically implement the memory barrier pattern, guaranteeing the -compiler doesn't interfere with ``vcpu->mode``'s carefully planned -accesses. - -IPI Reduction -------------- - -As only one IPI is needed to get a VCPU to check for any/all requests, -then they may be coalesced. This is easily done by having the first IPI -sending kick also change the VCPU mode to something !IN_GUEST_MODE. The -transitional state, EXITING_GUEST_MODE, is used for this purpose. - -Waiting for Acknowledgements ----------------------------- - -Some requests, those with the KVM_REQUEST_WAIT flag set, require IPIs to -be sent, and the acknowledgements to be waited upon, even when the target -VCPU threads are in modes other than IN_GUEST_MODE. For example, one case -is when a target VCPU thread is in READING_SHADOW_PAGE_TABLES mode, which -is set after disabling interrupts. To support these cases, the -KVM_REQUEST_WAIT flag changes the condition for sending an IPI from -checking that the VCPU is IN_GUEST_MODE to checking that it is not -OUTSIDE_GUEST_MODE. - -Request-less VCPU Kicks ------------------------ - -As the determination of whether or not to send an IPI depends on the -two-variable Dekker memory barrier pattern, then it's clear that -request-less VCPU kicks are almost never correct. Without the assurance -that a non-IPI generating kick will still result in an action by the -receiving VCPU, as the final kvm_request_pending() check does for -request-accompanying kicks, then the kick may not do anything useful at -all. If, for instance, a request-less kick was made to a VCPU that was -just about to set its mode to IN_GUEST_MODE, meaning no IPI is sent, then -the VCPU thread may continue its entry without actually having done -whatever it was the kick was meant to initiate. - -One exception is x86's posted interrupt mechanism. In this case, however, -even the request-less VCPU kick is coupled with the same -local_irq_disable() + smp_mb() pattern described above; the ON bit -(Outstanding Notification) in the posted interrupt descriptor takes the -role of ``vcpu->requests``. When sending a posted interrupt, PIR.ON is -set before reading ``vcpu->mode``; dually, in the VCPU thread, -vmx_sync_pir_to_irr() reads PIR after setting ``vcpu->mode`` to -IN_GUEST_MODE. - -Additional Considerations -========================= - -Sleeping VCPUs --------------- - -VCPU threads may need to consider requests before and/or after calling -functions that may put them to sleep, e.g. kvm_vcpu_block(). Whether they -do or not, and, if they do, which requests need consideration, is -architecture dependent. kvm_vcpu_block() calls kvm_arch_vcpu_runnable() -to check if it should awaken. One reason to do so is to provide -architectures a function where requests may be checked if necessary. - -Clearing Requests ------------------ - -Generally it only makes sense for the receiving VCPU thread to clear a -request. However, in some circumstances, such as when the requesting -thread and the receiving VCPU thread are executed serially, such as when -they are the same thread, or when they are using some form of concurrency -control to temporarily execute synchronously, then it's possible to know -that the request may be cleared immediately, rather than waiting for the -receiving VCPU thread to handle the request in VCPU RUN. The only current -examples of this are kvm_vcpu_block() calls made by VCPUs to block -themselves. A possible side-effect of that call is to make the -KVM_REQ_UNHALT request, which may then be cleared immediately when the -VCPU returns from the call. - -References -========== - -.. [atomic-ops] Documentation/core-api/atomic_ops.rst -.. [memory-barriers] Documentation/memory-barriers.txt -.. [lwn-mb] https://lwn.net/Articles/573436/ diff --git a/Documentation/virtual/paravirt_ops.rst b/Documentation/virtual/paravirt_ops.rst deleted file mode 100644 index 6b789d27cead..000000000000 --- a/Documentation/virtual/paravirt_ops.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. SPDX-License-Identifier: GPL-2.0 - -============ -Paravirt_ops -============ - -Linux provides support for different hypervisor virtualization technologies. -Historically different binary kernels would be required in order to support -different hypervisors, this restriction was removed with pv_ops. -Linux pv_ops is a virtualization API which enables support for different -hypervisors. It allows each hypervisor to override critical operations and -allows a single kernel binary to run on all supported execution environments -including native machine -- without any hypervisors. - -pv_ops provides a set of function pointers which represent operations -corresponding to low level critical instructions and high level -functionalities in various areas. pv-ops allows for optimizations at run -time by enabling binary patching of the low-ops critical operations -at boot time. - -pv_ops operations are classified into three categories: - -- simple indirect call - These operations correspond to high level functionality where it is - known that the overhead of indirect call isn't very important. - -- indirect call which allows optimization with binary patch - Usually these operations correspond to low level critical instructions. They - are called frequently and are performance critical. The overhead is - very important. - -- a set of macros for hand written assembly code - Hand written assembly codes (.S files) also need paravirtualization - because they include sensitive instructions or some of code paths in - them are very performance critical. diff --git a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt b/Documentation/virtual/uml/UserModeLinux-HOWTO.txt deleted file mode 100644 index 87b80f589e1c..000000000000 --- a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt +++ /dev/null @@ -1,4589 +0,0 @@ - User Mode Linux HOWTO - User Mode Linux Core Team - Mon Nov 18 14:16:16 EST 2002 - - This document describes the use and abuse of Jeff Dike's User Mode - Linux: a port of the Linux kernel as a normal Intel Linux process. - ______________________________________________________________________ - - Table of Contents - - 1. Introduction - - 1.1 How is User Mode Linux Different? - 1.2 Why Would I Want User Mode Linux? - - 2. Compiling the kernel and modules - - 2.1 Compiling the kernel - 2.2 Compiling and installing kernel modules - 2.3 Compiling and installing uml_utilities - - 3. Running UML and logging in - - 3.1 Running UML - 3.2 Logging in - 3.3 Examples - - 4. UML on 2G/2G hosts - - 4.1 Introduction - 4.2 The problem - 4.3 The solution - - 5. Setting up serial lines and consoles - - 5.1 Specifying the device - 5.2 Specifying the channel - 5.3 Examples - - 6. Setting up the network - - 6.1 General setup - 6.2 Userspace daemons - 6.3 Specifying ethernet addresses - 6.4 UML interface setup - 6.5 Multicast - 6.6 TUN/TAP with the uml_net helper - 6.7 TUN/TAP with a preconfigured tap device - 6.8 Ethertap - 6.9 The switch daemon - 6.10 Slip - 6.11 Slirp - 6.12 pcap - 6.13 Setting up the host yourself - - 7. Sharing Filesystems between Virtual Machines - - 7.1 A warning - 7.2 Using layered block devices - 7.3 Note! - 7.4 Another warning - 7.5 uml_moo : Merging a COW file with its backing file - - 8. Creating filesystems - - 8.1 Create the filesystem file - 8.2 Assign the file to a UML device - 8.3 Creating and mounting the filesystem - - 9. Host file access - - 9.1 Using hostfs - 9.2 hostfs as the root filesystem - 9.3 Building hostfs - - 10. The Management Console - 10.1 version - 10.2 halt and reboot - 10.3 config - 10.4 remove - 10.5 sysrq - 10.6 help - 10.7 cad - 10.8 stop - 10.9 go - - 11. Kernel debugging - - 11.1 Starting the kernel under gdb - 11.2 Examining sleeping processes - 11.3 Running ddd on UML - 11.4 Debugging modules - 11.5 Attaching gdb to the kernel - 11.6 Using alternate debuggers - - 12. Kernel debugging examples - - 12.1 The case of the hung fsck - 12.2 Episode 2: The case of the hung fsck - - 13. What to do when UML doesn't work - - 13.1 Strange compilation errors when you build from source - 13.2 (obsolete) - 13.3 A variety of panics and hangs with /tmp on a reiserfs filesystem - 13.4 The compile fails with errors about conflicting types for 'open', 'dup', and 'waitpid' - 13.5 UML doesn't work when /tmp is an NFS filesystem - 13.6 UML hangs on boot when compiled with gprof support - 13.7 syslogd dies with a SIGTERM on startup - 13.8 TUN/TAP networking doesn't work on a 2.4 host - 13.9 You can network to the host but not to other machines on the net - 13.10 I have no root and I want to scream - 13.11 UML build conflict between ptrace.h and ucontext.h - 13.12 The UML BogoMips is exactly half the host's BogoMips - 13.13 When you run UML, it immediately segfaults - 13.14 xterms appear, then immediately disappear - 13.15 Any other panic, hang, or strange behavior - - 14. Diagnosing Problems - - 14.1 Case 1 : Normal kernel panics - 14.2 Case 2 : Tracing thread panics - 14.3 Case 3 : Tracing thread panics caused by other threads - 14.4 Case 4 : Hangs - - 15. Thanks - - 15.1 Code and Documentation - 15.2 Flushing out bugs - 15.3 Buglets and clean-ups - 15.4 Case Studies - 15.5 Other contributions - - - ______________________________________________________________________ - - 1. Introduction - - Welcome to User Mode Linux. It's going to be fun. - - - - 1.1. How is User Mode Linux Different? - - Normally, the Linux Kernel talks straight to your hardware (video - card, keyboard, hard drives, etc), and any programs which run ask the - kernel to operate the hardware, like so: - - - - +-----------+-----------+----+ - | Process 1 | Process 2 | ...| - +-----------+-----------+----+ - | Linux Kernel | - +----------------------------+ - | Hardware | - +----------------------------+ - - - - - The User Mode Linux Kernel is different; instead of talking to the - hardware, it talks to a `real' Linux kernel (called the `host kernel' - from now on), like any other program. Programs can then run inside - User-Mode Linux as if they were running under a normal kernel, like - so: - - - - +----------------+ - | Process 2 | ...| - +-----------+----------------+ - | Process 1 | User-Mode Linux| - +----------------------------+ - | Linux Kernel | - +----------------------------+ - | Hardware | - +----------------------------+ - - - - - - 1.2. Why Would I Want User Mode Linux? - - - 1. If User Mode Linux crashes, your host kernel is still fine. - - 2. You can run a usermode kernel as a non-root user. - - 3. You can debug the User Mode Linux like any normal process. - - 4. You can run gprof (profiling) and gcov (coverage testing). - - 5. You can play with your kernel without breaking things. - - 6. You can use it as a sandbox for testing new apps. - - 7. You can try new development kernels safely. - - 8. You can run different distributions simultaneously. - - 9. It's extremely fun. - - - - - - 2. Compiling the kernel and modules - - - - - 2.1. Compiling the kernel - - - Compiling the user mode kernel is just like compiling any other - kernel. Let's go through the steps, using 2.4.0-prerelease (current - as of this writing) as an example: - - - 1. Download the latest UML patch from - - the download page - . - - - 3. Make a directory and unpack the kernel into it. - - - - host% - mkdir ~/uml - - - - - - - host% - cd ~/uml - - - - - - - host% - tar -xzvf linux-2.4.0-prerelease.tar.bz2 - - - - - - - 4. Apply the patch using - - - - host% - cd ~/uml/linux - - - - host% - bzcat uml-patch-2.4.0-prerelease.bz2 | patch -p1 - - - - - - - 5. Run your favorite config; `make xconfig ARCH=um' is the most - convenient. `make config ARCH=um' and 'make menuconfig ARCH=um' - will work as well. The defaults will give you a useful kernel. If - you want to change something, go ahead, it probably won't hurt - anything. - - - Note: If the host is configured with a 2G/2G address space split - rather than the usual 3G/1G split, then the packaged UML binaries - will not run. They will immediately segfault. See ``UML on 2G/2G - hosts'' for the scoop on running UML on your system. - - - - 6. Finish with `make linux ARCH=um': the result is a file called - `linux' in the top directory of your source tree. - - Make sure that you don't build this kernel in /usr/src/linux. On some - distributions, /usr/include/asm is a link into this pool. The user- - mode build changes the other end of that link, and things that include - stop compiling. - - The sources are also available from cvs at the project's cvs page, - which has directions on getting the sources. You can also browse the - CVS pool from there. - - If you get the CVS sources, you will have to check them out into an - empty directory. You will then have to copy each file into the - corresponding directory in the appropriate kernel pool. - - If you don't have the latest kernel pool, you can get the - corresponding user-mode sources with - - - host% cvs co -r v_2_3_x linux - - - - - where 'x' is the version in your pool. Note that you will not get the - bug fixes and enhancements that have gone into subsequent releases. - - - 2.2. Compiling and installing kernel modules - - UML modules are built in the same way as the native kernel (with the - exception of the 'ARCH=um' that you always need for UML): - - - host% make modules ARCH=um - - - - - Any modules that you want to load into this kernel need to be built in - the user-mode pool. Modules from the native kernel won't work. - - You can install them by using ftp or something to copy them into the - virtual machine and dropping them into /lib/modules/`uname -r`. - - You can also get the kernel build process to install them as follows: - - 1. with the kernel not booted, mount the root filesystem in the top - level of the kernel pool: - - - host% mount root_fs mnt -o loop - - - - - - - 2. run - - - host% - make modules_install INSTALL_MOD_PATH=`pwd`/mnt ARCH=um - - - - - - - 3. unmount the filesystem - - - host% umount mnt - - - - - - - 4. boot the kernel on it - - - When the system is booted, you can use insmod as usual to get the - modules into the kernel. A number of things have been loaded into UML - as modules, especially filesystems and network protocols and filters, - so most symbols which need to be exported probably already are. - However, if you do find symbols that need exporting, let us - know, and - they'll be "taken care of". - - - - 2.3. Compiling and installing uml_utilities - - Many features of the UML kernel require a user-space helper program, - so a uml_utilities package is distributed separately from the kernel - patch which provides these helpers. Included within this is: - - o port-helper - Used by consoles which connect to xterms or ports - - o tunctl - Configuration tool to create and delete tap devices - - o uml_net - Setuid binary for automatic tap device configuration - - o uml_switch - User-space virtual switch required for daemon - transport - - The uml_utilities tree is compiled with: - - - host# - make && make install - - - - - Note that UML kernel patches may require a specific version of the - uml_utilities distribution. If you don't keep up with the mailing - lists, ensure that you have the latest release of uml_utilities if you - are experiencing problems with your UML kernel, particularly when - dealing with consoles or command-line switches to the helper programs - - - - - - - - - 3. Running UML and logging in - - - - 3.1. Running UML - - It runs on 2.2.15 or later, and all 2.4 kernels. - - - Booting UML is straightforward. Simply run 'linux': it will try to - mount the file `root_fs' in the current directory. You do not need to - run it as root. If your root filesystem is not named `root_fs', then - you need to put a `ubd0=root_fs_whatever' switch on the linux command - line. - - - You will need a filesystem to boot UML from. There are a number - available for download from here . There are also several tools - which can be - used to generate UML-compatible filesystem images from media. - The kernel will boot up and present you with a login prompt. - - - Note: If the host is configured with a 2G/2G address space split - rather than the usual 3G/1G split, then the packaged UML binaries will - not run. They will immediately segfault. See ``UML on 2G/2G hosts'' - for the scoop on running UML on your system. - - - - 3.2. Logging in - - - - The prepackaged filesystems have a root account with password 'root' - and a user account with password 'user'. The login banner will - generally tell you how to log in. So, you log in and you will find - yourself inside a little virtual machine. Our filesystems have a - variety of commands and utilities installed (and it is fairly easy to - add more), so you will have a lot of tools with which to poke around - the system. - - There are a couple of other ways to log in: - - o On a virtual console - - - - Each virtual console that is configured (i.e. the device exists in - /dev and /etc/inittab runs a getty on it) will come up in its own - xterm. If you get tired of the xterms, read ``Setting up serial - lines and consoles'' to see how to attach the consoles to - something else, like host ptys. - - - - o Over the serial line - - - In the boot output, find a line that looks like: - - - - serial line 0 assigned pty /dev/ptyp1 - - - - - Attach your favorite terminal program to the corresponding tty. I.e. - for minicom, the command would be - - - host% minicom -o -p /dev/ttyp1 - - - - - - - o Over the net - - - If the network is running, then you can telnet to the virtual - machine and log in to it. See ``Setting up the network'' to learn - about setting up a virtual network. - - When you're done using it, run halt, and the kernel will bring itself - down and the process will exit. - - - 3.3. Examples - - Here are some examples of UML in action: - - o A login session - - o A virtual network - - - - - - - - 4. UML on 2G/2G hosts - - - - - 4.1. Introduction - - - Most Linux machines are configured so that the kernel occupies the - upper 1G (0xc0000000 - 0xffffffff) of the 4G address space and - processes use the lower 3G (0x00000000 - 0xbfffffff). However, some - machine are configured with a 2G/2G split, with the kernel occupying - the upper 2G (0x80000000 - 0xffffffff) and processes using the lower - 2G (0x00000000 - 0x7fffffff). - - - - - 4.2. The problem - - - The prebuilt UML binaries on this site will not run on 2G/2G hosts - because UML occupies the upper .5G of the 3G process address space - (0xa0000000 - 0xbfffffff). Obviously, on 2G/2G hosts, this is right - in the middle of the kernel address space, so UML won't even load - it - will immediately segfault. - - - - - 4.3. The solution - - - The fix for this is to rebuild UML from source after enabling - CONFIG_HOST_2G_2G (under 'General Setup'). This will cause UML to - load itself in the top .5G of that smaller process address space, - where it will run fine. See ``Compiling the kernel and modules'' if - you need help building UML from source. - - - - - - - - - - - 5. Setting up serial lines and consoles - - - It is possible to attach UML serial lines and consoles to many types - of host I/O channels by specifying them on the command line. - - - You can attach them to host ptys, ttys, file descriptors, and ports. - This allows you to do things like - - o have a UML console appear on an unused host console, - - o hook two virtual machines together by having one attach to a pty - and having the other attach to the corresponding tty - - o make a virtual machine accessible from the net by attaching a - console to a port on the host. - - - The general format of the command line option is device=channel. - - - - 5.1. Specifying the device - - Devices are specified with "con" or "ssl" (console or serial line, - respectively), optionally with a device number if you are talking - about a specific device. - - - Using just "con" or "ssl" describes all of the consoles or serial - lines. If you want to talk about console #3 or serial line #10, they - would be "con3" and "ssl10", respectively. - - - A specific device name will override a less general "con=" or "ssl=". - So, for example, you can assign a pty to each of the serial lines - except for the first two like this: - - - ssl=pty ssl0=tty:/dev/tty0 ssl1=tty:/dev/tty1 - - - - - The specificity of the device name is all that matters; order on the - command line is irrelevant. - - - - 5.2. Specifying the channel - - There are a number of different types of channels to attach a UML - device to, each with a different way of specifying exactly what to - attach to. - - o pseudo-terminals - device=pty pts terminals - device=pts - - - This will cause UML to allocate a free host pseudo-terminal for the - device. The terminal that it got will be announced in the boot - log. You access it by attaching a terminal program to the - corresponding tty: - - o screen /dev/pts/n - - o screen /dev/ttyxx - - o minicom -o -p /dev/ttyxx - minicom seems not able to handle pts - devices - - o kermit - start it up, 'open' the device, then 'connect' - - - - - - o terminals - device=tty:tty device file - - - This will make UML attach the device to the specified tty (i.e - - - con1=tty:/dev/tty3 - - - - - will attach UML's console 1 to the host's /dev/tty3). If the tty that - you specify is the slave end of a tty/pty pair, something else must - have already opened the corresponding pty in order for this to work. - - - - - - o xterms - device=xterm - - - UML will run an xterm and the device will be attached to it. - - - - - - o Port - device=port:port number - - - This will attach the UML devices to the specified host port. - Attaching console 1 to the host's port 9000 would be done like - this: - - - con1=port:9000 - - - - - Attaching all the serial lines to that port would be done similarly: - - - ssl=port:9000 - - - - - You access these devices by telnetting to that port. Each active tel- - net session gets a different device. If there are more telnets to a - port than UML devices attached to it, then the extra telnet sessions - will block until an existing telnet detaches, or until another device - becomes active (i.e. by being activated in /etc/inittab). - - This channel has the advantage that you can both attach multiple UML - devices to it and know how to access them without reading the UML boot - log. It is also unique in allowing access to a UML from remote - machines without requiring that the UML be networked. This could be - useful in allowing public access to UMLs because they would be - accessible from the net, but wouldn't need any kind of network - filtering or access control because they would have no network access. - - - If you attach the main console to a portal, then the UML boot will - appear to hang. In reality, it's waiting for a telnet to connect, at - which point the boot will proceed. - - - - - - o already-existing file descriptors - device=file descriptor - - - If you set up a file descriptor on the UML command line, you can - attach a UML device to it. This is most commonly used to put the - main console back on stdin and stdout after assigning all the other - consoles to something else: - - - con0=fd:0,fd:1 con=pts - - - - - - - - - o Nothing - device=null - - - This allows the device to be opened, in contrast to 'none', but - reads will block, and writes will succeed and the data will be - thrown out. - - - - - - o None - device=none - - - This causes the device to disappear. - - - - You can also specify different input and output channels for a device - by putting a comma between them: - - - ssl3=tty:/dev/tty2,xterm - - - - - will cause serial line 3 to accept input on the host's /dev/tty2 and - display output on an xterm. That's a silly example - the most common - use of this syntax is to reattach the main console to stdin and stdout - as shown above. - - - If you decide to move the main console away from stdin/stdout, the - initial boot output will appear in the terminal that you're running - UML in. However, once the console driver has been officially - initialized, then the boot output will start appearing wherever you - specified that console 0 should be. That device will receive all - subsequent output. - - - - 5.3. Examples - - There are a number of interesting things you can do with this - capability. - - - First, this is how you get rid of those bleeding console xterms by - attaching them to host ptys: - - - con=pty con0=fd:0,fd:1 - - - - - This will make a UML console take over an unused host virtual console, - so that when you switch to it, you will see the UML login prompt - rather than the host login prompt: - - - con1=tty:/dev/tty6 - - - - - You can attach two virtual machines together with what amounts to a - serial line as follows: - - Run one UML with a serial line attached to a pty - - - - ssl1=pty - - - - - Look at the boot log to see what pty it got (this example will assume - that it got /dev/ptyp1). - - Boot the other UML with a serial line attached to the corresponding - tty - - - - ssl1=tty:/dev/ttyp1 - - - - - Log in, make sure that it has no getty on that serial line, attach a - terminal program like minicom to it, and you should see the login - prompt of the other virtual machine. - - - 6. Setting up the network - - - - This page describes how to set up the various transports and to - provide a UML instance with network access to the host, other machines - on the local net, and the rest of the net. - - - As of 2.4.5, UML networking has been completely redone to make it much - easier to set up, fix bugs, and add new features. - - - There is a new helper, uml_net, which does the host setup that - requires root privileges. - - - There are currently five transport types available for a UML virtual - machine to exchange packets with other hosts: - - o ethertap - - o TUN/TAP - - o Multicast - - o a switch daemon - - o slip - - o slirp - - o pcap - - The TUN/TAP, ethertap, slip, and slirp transports allow a UML - instance to exchange packets with the host. They may be directed - to the host or the host may just act as a router to provide access - to other physical or virtual machines. - - - The pcap transport is a synthetic read-only interface, using the - libpcap binary to collect packets from interfaces on the host and - filter them. This is useful for building preconfigured traffic - monitors or sniffers. - - - The daemon and multicast transports provide a completely virtual - network to other virtual machines. This network is completely - disconnected from the physical network unless one of the virtual - machines on it is acting as a gateway. - - - With so many host transports, which one should you use? Here's when - you should use each one: - - o ethertap - if you want access to the host networking and it is - running 2.2 - - o TUN/TAP - if you want access to the host networking and it is - running 2.4. Also, the TUN/TAP transport is able to use a - preconfigured device, allowing it to avoid using the setuid uml_net - helper, which is a security advantage. - - o Multicast - if you want a purely virtual network and you don't want - to set up anything but the UML - - o a switch daemon - if you want a purely virtual network and you - don't mind running the daemon in order to get somewhat better - performance - - o slip - there is no particular reason to run the slip backend unless - ethertap and TUN/TAP are just not available for some reason - - o slirp - if you don't have root access on the host to setup - networking, or if you don't want to allocate an IP to your UML - - o pcap - not much use for actual network connectivity, but great for - monitoring traffic on the host - - Ethertap is available on 2.4 and works fine. TUN/TAP is preferred - to it because it has better performance and ethertap is officially - considered obsolete in 2.4. Also, the root helper only needs to - run occasionally for TUN/TAP, rather than handling every packet, as - it does with ethertap. This is a slight security advantage since - it provides fewer opportunities for a nasty UML user to somehow - exploit the helper's root privileges. - - - 6.1. General setup - - First, you must have the virtual network enabled in your UML. If are - running a prebuilt kernel from this site, everything is already - enabled. If you build the kernel yourself, under the "Network device - support" menu, enable "Network device support", and then the three - transports. - - - The next step is to provide a network device to the virtual machine. - This is done by describing it on the kernel command line. - - The general format is - - - eth = , - - - - - For example, a virtual ethernet device may be attached to a host - ethertap device as follows: - - - eth0=ethertap,tap0,fe:fd:0:0:0:1,192.168.0.254 - - - - - This sets up eth0 inside the virtual machine to attach itself to the - host /dev/tap0, assigns it an ethernet address, and assigns the host - tap0 interface an IP address. - - - - Note that the IP address you assign to the host end of the tap device - must be different than the IP you assign to the eth device inside UML. - If you are short on IPs and don't want to consume two per UML, then - you can reuse the host's eth IP address for the host ends of the tap - devices. Internally, the UMLs must still get unique IPs for their eth - devices. You can also give the UMLs non-routable IPs (192.168.x.x or - 10.x.x.x) and have the host masquerade them. This will let outgoing - connections work, but incoming connections won't without more work, - such as port forwarding from the host. - Also note that when you configure the host side of an interface, it is - only acting as a gateway. It will respond to pings sent to it - locally, but is not useful to do that since it's a host interface. - You are not talking to the UML when you ping that interface and get a - response. - - - You can also add devices to a UML and remove them at runtime. See the - ``The Management Console'' page for details. - - - The sections below describe this in more detail. - - - Once you've decided how you're going to set up the devices, you boot - UML, log in, configure the UML side of the devices, and set up routes - to the outside world. At that point, you will be able to talk to any - other machines, physical or virtual, on the net. - - - If ifconfig inside UML fails and the network refuses to come up, run - tell you what went wrong. - - - - 6.2. Userspace daemons - - You will likely need the setuid helper, or the switch daemon, or both. - They are both installed with the RPM and deb, so if you've installed - either, you can skip the rest of this section. - - - If not, then you need to check them out of CVS, build them, and - install them. The helper is uml_net, in CVS /tools/uml_net, and the - daemon is uml_switch, in CVS /tools/uml_router. They are both built - with a plain 'make'. Both need to be installed in a directory that's - in your path - /usr/bin is recommend. On top of that, uml_net needs - to be setuid root. - - - - 6.3. Specifying ethernet addresses - - Below, you will see that the TUN/TAP, ethertap, and daemon interfaces - allow you to specify hardware addresses for the virtual ethernet - devices. This is generally not necessary. If you don't have a - specific reason to do it, you probably shouldn't. If one is not - specified on the command line, the driver will assign one based on the - device IP address. It will provide the address fe:fd:nn:nn:nn:nn - where nn.nn.nn.nn is the device IP address. This is nearly always - sufficient to guarantee a unique hardware address for the device. A - couple of exceptions are: - - o Another set of virtual ethernet devices are on the same network and - they are assigned hardware addresses using a different scheme which - may conflict with the UML IP address-based scheme - - o You aren't going to use the device for IP networking, so you don't - assign the device an IP address - - If you let the driver provide the hardware address, you should make - sure that the device IP address is known before the interface is - brought up. So, inside UML, this will guarantee that: - - - - UML# - ifconfig eth0 192.168.0.250 up - - - - - If you decide to assign the hardware address yourself, make sure that - the first byte of the address is even. Addresses with an odd first - byte are broadcast addresses, which you don't want assigned to a - device. - - - - 6.4. UML interface setup - - Once the network devices have been described on the command line, you - should boot UML and log in. - - - The first thing to do is bring the interface up: - - - UML# ifconfig ethn ip-address up - - - - - You should be able to ping the host at this point. - - - To reach the rest of the world, you should set a default route to the - host: - - - UML# route add default gw host ip - - - - - Again, with host ip of 192.168.0.4: - - - UML# route add default gw 192.168.0.4 - - - - - This page used to recommend setting a network route to your local net. - This is wrong, because it will cause UML to try to figure out hardware - addresses of the local machines by arping on the interface to the - host. Since that interface is basically a single strand of ethernet - with two nodes on it (UML and the host) and arp requests don't cross - networks, they will fail to elicit any responses. So, what you want - is for UML to just blindly throw all packets at the host and let it - figure out what to do with them, which is what leaving out the network - route and adding the default route does. - - - Note: If you can't communicate with other hosts on your physical - ethernet, it's probably because of a network route that's - automatically set up. If you run 'route -n' and see a route that - looks like this: - - - - - Destination Gateway Genmask Flags Metric Ref Use Iface - 192.168.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0 - - - - - with a mask that's not 255.255.255.255, then replace it with a route - to your host: - - - UML# - route del -net 192.168.0.0 dev eth0 netmask 255.255.255.0 - - - - - - - UML# - route add -host 192.168.0.4 dev eth0 - - - - - This, plus the default route to the host, will allow UML to exchange - packets with any machine on your ethernet. - - - - 6.5. Multicast - - The simplest way to set up a virtual network between multiple UMLs is - to use the mcast transport. This was written by Harald Welte and is - present in UML version 2.4.5-5um and later. Your system must have - multicast enabled in the kernel and there must be a multicast-capable - network device on the host. Normally, this is eth0, but if there is - no ethernet card on the host, then you will likely get strange error - messages when you bring the device up inside UML. - - - To use it, run two UMLs with - - - eth0=mcast - - - - - on their command lines. Log in, configure the ethernet device in each - machine with different IP addresses: - - - UML1# ifconfig eth0 192.168.0.254 - - - - - - - UML2# ifconfig eth0 192.168.0.253 - - - - - and they should be able to talk to each other. - - The full set of command line options for this transport are - - - - ethn=mcast,ethernet address,multicast - address,multicast port,ttl - - - - - Harald's original README is here and explains these in detail, as well as - some other issues. - - There is also a related point-to-point only "ucast" transport. - This is useful when your network does not support multicast, and - all network connections are simple point to point links. - - The full set of command line options for this transport are - - - ethn=ucast,ethernet address,remote address,listen port,remote port - - - - - 6.6. TUN/TAP with the uml_net helper - - TUN/TAP is the preferred mechanism on 2.4 to exchange packets with the - host. The TUN/TAP backend has been in UML since 2.4.9-3um. - - - The easiest way to get up and running is to let the setuid uml_net - helper do the host setup for you. This involves insmod-ing the tun.o - module if necessary, configuring the device, and setting up IP - forwarding, routing, and proxy arp. If you are new to UML networking, - do this first. If you're concerned about the security implications of - the setuid helper, use it to get up and running, then read the next - section to see how to have UML use a preconfigured tap device, which - avoids the use of uml_net. - - - If you specify an IP address for the host side of the device, the - uml_net helper will do all necessary setup on the host - the only - requirement is that TUN/TAP be available, either built in to the host - kernel or as the tun.o module. - - The format of the command line switch to attach a device to a TUN/TAP - device is - - - eth =tuntap,,, - - - - - For example, this argument will attach the UML's eth0 to the next - available tap device and assign an ethernet address to it based on its - IP address - - - eth0=tuntap,,,192.168.0.254 - - - - - - - Note that the IP address that must be used for the eth device inside - UML is fixed by the routing and proxy arp that is set up on the - TUN/TAP device on the host. You can use a different one, but it won't - work because reply packets won't reach the UML. This is a feature. - It prevents a nasty UML user from doing things like setting the UML IP - to the same as the network's nameserver or mail server. - - - There are a couple potential problems with running the TUN/TAP - transport on a 2.4 host kernel - - o TUN/TAP seems not to work on 2.4.3 and earlier. Upgrade the host - kernel or use the ethertap transport. - - o With an upgraded kernel, TUN/TAP may fail with - - - File descriptor in bad state - - - - - This is due to a header mismatch between the upgraded kernel and the - kernel that was originally installed on the machine. The fix is to - make sure that /usr/src/linux points to the headers for the running - kernel. - - These were pointed out by Tim Robinson in - name="this uml- - user post"> . - - - - 6.7. TUN/TAP with a preconfigured tap device - - If you prefer not to have UML use uml_net (which is somewhat - insecure), with UML 2.4.17-11, you can set up a TUN/TAP device - beforehand. The setup needs to be done as root, but once that's done, - there is no need for root assistance. Setting up the device is done - as follows: - - o Create the device with tunctl (available from the UML utilities - tarball) - - - - - host# tunctl -u uid - - - - - where uid is the user id or username that UML will be run as. This - will tell you what device was created. - - o Configure the device IP (change IP addresses and device name to - suit) - - - - - host# ifconfig tap0 192.168.0.254 up - - - - - - o Set up routing and arping if desired - this is my recipe, there are - other ways of doing the same thing - - - host# - bash -c 'echo 1 > /proc/sys/net/ipv4/ip_forward' - - host# - route add -host 192.168.0.253 dev tap0 - - - - - - - host# - bash -c 'echo 1 > /proc/sys/net/ipv4/conf/tap0/proxy_arp' - - - - - - - host# - arp -Ds 192.168.0.253 eth0 pub - - - - - Note that this must be done every time the host boots - this configu- - ration is not stored across host reboots. So, it's probably a good - idea to stick it in an rc file. An even better idea would be a little - utility which reads the information from a config file and sets up - devices at boot time. - - o Rather than using up two IPs and ARPing for one of them, you can - also provide direct access to your LAN by the UML by using a - bridge. - - - host# - brctl addbr br0 - - - - - - - host# - ifconfig eth0 0.0.0.0 promisc up - - - - - - - host# - ifconfig tap0 0.0.0.0 promisc up - - - - - - - host# - ifconfig br0 192.168.0.1 netmask 255.255.255.0 up - - - - - - - - host# - brctl stp br0 off - - - - - - - host# - brctl setfd br0 1 - - - - - - - host# - brctl sethello br0 1 - - - - - - - host# - brctl addif br0 eth0 - - - - - - - host# - brctl addif br0 tap0 - - - - - Note that 'br0' should be setup using ifconfig with the existing IP - address of eth0, as eth0 no longer has its own IP. - - o - - - Also, the /dev/net/tun device must be writable by the user running - UML in order for the UML to use the device that's been configured - for it. The simplest thing to do is - - - host# chmod 666 /dev/net/tun - - - - - Making it world-writable looks bad, but it seems not to be - exploitable as a security hole. However, it does allow anyone to cre- - ate useless tap devices (useless because they can't configure them), - which is a DOS attack. A somewhat more secure alternative would to be - to create a group containing all the users who have preconfigured tap - devices and chgrp /dev/net/tun to that group with mode 664 or 660. - - - o Once the device is set up, run UML with 'eth0=tuntap,device name' - (i.e. 'eth0=tuntap,tap0') on the command line (or do it with the - mconsole config command). - - o Bring the eth device up in UML and you're in business. - - If you don't want that tap device any more, you can make it non- - persistent with - - - host# tunctl -d tap device - - - - - Finally, tunctl has a -b (for brief mode) switch which causes it to - output only the name of the tap device it created. This makes it - suitable for capture by a script: - - - host# TAP=`tunctl -u 1000 -b` - - - - - - - 6.8. Ethertap - - Ethertap is the general mechanism on 2.2 for userspace processes to - exchange packets with the kernel. - - - - To use this transport, you need to describe the virtual network device - on the UML command line. The general format for this is - - - eth =ethertap, , , - - - - - So, the previous example - - - eth0=ethertap,tap0,fe:fd:0:0:0:1,192.168.0.254 - - - - - attaches the UML eth0 device to the host /dev/tap0, assigns it the - ethernet address fe:fd:0:0:0:1, and assigns the IP address - 192.168.0.254 to the tap device. - - - - The tap device is mandatory, but the others are optional. If the - ethernet address is omitted, one will be assigned to it. - - - The presence of the tap IP address will cause the helper to run and do - whatever host setup is needed to allow the virtual machine to - communicate with the outside world. If you're not sure you know what - you're doing, this is the way to go. - - - If it is absent, then you must configure the tap device and whatever - arping and routing you will need on the host. However, even in this - case, the uml_net helper still needs to be in your path and it must be - setuid root if you're not running UML as root. This is because the - tap device doesn't support SIGIO, which UML needs in order to use - something as a source of input. So, the helper is used as a - convenient asynchronous IO thread. - - If you're using the uml_net helper, you can ignore the following host - setup - uml_net will do it for you. You just need to make sure you - have ethertap available, either built in to the host kernel or - available as a module. - - - If you want to set things up yourself, you need to make sure that the - appropriate /dev entry exists. If it doesn't, become root and create - it as follows: - - - mknod /dev/tap c 36 + 16 - - - - - For example, this is how to create /dev/tap0: - - - mknod /dev/tap0 c 36 0 + 16 - - - - - You also need to make sure that the host kernel has ethertap support. - If ethertap is enabled as a module, you apparently need to insmod - ethertap once for each ethertap device you want to enable. So, - - - host# - insmod ethertap - - - - - will give you the tap0 interface. To get the tap1 interface, you need - to run - - - host# - insmod ethertap unit=1 -o ethertap1 - - - - - - - - 6.9. The switch daemon - - Note: This is the daemon formerly known as uml_router, but which was - renamed so the network weenies of the world would stop growling at me. - - - The switch daemon, uml_switch, provides a mechanism for creating a - totally virtual network. By default, it provides no connection to the - host network (but see -tap, below). - - - The first thing you need to do is run the daemon. Running it with no - arguments will make it listen on a default pair of unix domain - sockets. - - - If you want it to listen on a different pair of sockets, use - - - -unix control socket data socket - - - - - - If you want it to act as a hub rather than a switch, use - - - -hub - - - - - - If you want the switch to be connected to host networking (allowing - the umls to get access to the outside world through the host), use - - - -tap tap0 - - - - - - Note that the tap device must be preconfigured (see "TUN/TAP with a - preconfigured tap device", above). If you're using a different tap - device than tap0, specify that instead of tap0. - - - uml_switch can be backgrounded as follows - - - host% - uml_switch [ options ] < /dev/null > /dev/null - - - - - The reason it doesn't background by default is that it listens to - stdin for EOF. When it sees that, it exits. - - - The general format of the kernel command line switch is - - - - ethn=daemon,ethernet address,socket - type,control socket,data socket - - - - - You can leave off everything except the 'daemon'. You only need to - specify the ethernet address if the one that will be assigned to it - isn't acceptable for some reason. The rest of the arguments describe - how to communicate with the daemon. You should only specify them if - you told the daemon to use different sockets than the default. So, if - you ran the daemon with no arguments, running the UML on the same - machine with - eth0=daemon - - - - - will cause the eth0 driver to attach itself to the daemon correctly. - - - - 6.10. Slip - - Slip is another, less general, mechanism for a process to communicate - with the host networking. In contrast to the ethertap interface, - which exchanges ethernet frames with the host and can be used to - transport any higher-level protocol, it can only be used to transport - IP. - - - The general format of the command line switch is - - - - ethn=slip,slip IP - - - - - The slip IP argument is the IP address that will be assigned to the - host end of the slip device. If it is specified, the helper will run - and will set up the host so that the virtual machine can reach it and - the rest of the network. - - - There are some oddities with this interface that you should be aware - of. You should only specify one slip device on a given virtual - machine, and its name inside UML will be 'umn', not 'eth0' or whatever - you specified on the command line. These problems will be fixed at - some point. - - - - 6.11. Slirp - - slirp uses an external program, usually /usr/bin/slirp, to provide IP - only networking connectivity through the host. This is similar to IP - masquerading with a firewall, although the translation is performed in - user-space, rather than by the kernel. As slirp does not set up any - interfaces on the host, or changes routing, slirp does not require - root access or setuid binaries on the host. - - - The general format of the command line switch for slirp is: - - - - ethn=slirp,ethernet address,slirp path - - - - - The ethernet address is optional, as UML will set up the interface - with an ethernet address based upon the initial IP address of the - interface. The slirp path is generally /usr/bin/slirp, although it - will depend on distribution. - - - The slirp program can have a number of options passed to the command - line and we can't add them to the UML command line, as they will be - parsed incorrectly. Instead, a wrapper shell script can be written or - the options inserted into the /.slirprc file. More information on - all of the slirp options can be found in its man pages. - - - The eth0 interface on UML should be set up with the IP 10.2.0.15, - although you can use anything as long as it is not used by a network - you will be connecting to. The default route on UML should be set to - use - - - UML# - route add default dev eth0 - - - - - slirp provides a number of useful IP addresses which can be used by - UML, such as 10.0.2.3 which is an alias for the DNS server specified - in /etc/resolv.conf on the host or the IP given in the 'dns' option - for slirp. - - - Even with a baudrate setting higher than 115200, the slirp connection - is limited to 115200. If you need it to go faster, the slirp binary - needs to be compiled with FULL_BOLT defined in config.h. - - - - 6.12. pcap - - The pcap transport is attached to a UML ethernet device on the command - line or with uml_mconsole with the following syntax: - - - - ethn=pcap,host interface,filter - expression,option1,option2 - - - - - The expression and options are optional. - - - The interface is whatever network device on the host you want to - sniff. The expression is a pcap filter expression, which is also what - tcpdump uses, so if you know how to specify tcpdump filters, you will - use the same expressions here. The options are up to two of - 'promisc', control whether pcap puts the host interface into - promiscuous mode. 'optimize' and 'nooptimize' control whether the pcap - expression optimizer is used. - - - Example: - - - - eth0=pcap,eth0,tcp - - eth1=pcap,eth0,!tcp - - - - will cause the UML eth0 to emit all tcp packets on the host eth0 and - the UML eth1 to emit all non-tcp packets on the host eth0. - - - - 6.13. Setting up the host yourself - - If you don't specify an address for the host side of the ethertap or - slip device, UML won't do any setup on the host. So this is what is - needed to get things working (the examples use a host-side IP of - 192.168.0.251 and a UML-side IP of 192.168.0.250 - adjust to suit your - own network): - - o The device needs to be configured with its IP address. Tap devices - are also configured with an mtu of 1484. Slip devices are - configured with a point-to-point address pointing at the UML ip - address. - - - host# ifconfig tap0 arp mtu 1484 192.168.0.251 up - - - - - - - host# - ifconfig sl0 192.168.0.251 pointopoint 192.168.0.250 up - - - - - - o If a tap device is being set up, a route is set to the UML IP. - - - UML# route add -host 192.168.0.250 gw 192.168.0.251 - - - - - - o To allow other hosts on your network to see the virtual machine, - proxy arp is set up for it. - - - host# arp -Ds 192.168.0.250 eth0 pub - - - - - - o Finally, the host is set up to route packets. - - - host# echo 1 > /proc/sys/net/ipv4/ip_forward - - - - - - - - - - - 7. Sharing Filesystems between Virtual Machines - - - - - 7.1. A warning - - Don't attempt to share filesystems simply by booting two UMLs from the - same file. That's the same thing as booting two physical machines - from a shared disk. It will result in filesystem corruption. - - - - 7.2. Using layered block devices - - The way to share a filesystem between two virtual machines is to use - the copy-on-write (COW) layering capability of the ubd block driver. - As of 2.4.6-2um, the driver supports layering a read-write private - device over a read-only shared device. A machine's writes are stored - in the private device, while reads come from either device - the - private one if the requested block is valid in it, the shared one if - not. Using this scheme, the majority of data which is unchanged is - shared between an arbitrary number of virtual machines, each of which - has a much smaller file containing the changes that it has made. With - a large number of UMLs booting from a large root filesystem, this - leads to a huge disk space saving. It will also help performance, - since the host will be able to cache the shared data using a much - smaller amount of memory, so UML disk requests will be served from the - host's memory rather than its disks. - - - - - To add a copy-on-write layer to an existing block device file, simply - add the name of the COW file to the appropriate ubd switch: - - - ubd0=root_fs_cow,root_fs_debian_22 - - - - - where 'root_fs_cow' is the private COW file and 'root_fs_debian_22' is - the existing shared filesystem. The COW file need not exist. If it - doesn't, the driver will create and initialize it. Once the COW file - has been initialized, it can be used on its own on the command line: - - - ubd0=root_fs_cow - - - - - The name of the backing file is stored in the COW file header, so it - would be redundant to continue specifying it on the command line. - - - - 7.3. Note! - - When checking the size of the COW file in order to see the gobs of - space that you're saving, make sure you use 'ls -ls' to see the actual - disk consumption rather than the length of the file. The COW file is - sparse, so the length will be very different from the disk usage. - Here is a 'ls -l' of a COW file and backing file from one boot and - shutdown: - host% ls -l cow.debian debian2.2 - -rw-r--r-- 1 jdike jdike 492504064 Aug 6 21:16 cow.debian - -rwxrw-rw- 1 jdike jdike 537919488 Aug 6 20:42 debian2.2 - - - - - Doesn't look like much saved space, does it? Well, here's 'ls -ls': - - - host% ls -ls cow.debian debian2.2 - 880 -rw-r--r-- 1 jdike jdike 492504064 Aug 6 21:16 cow.debian - 525832 -rwxrw-rw- 1 jdike jdike 537919488 Aug 6 20:42 debian2.2 - - - - - Now, you can see that the COW file has less than a meg of disk, rather - than 492 meg. - - - - 7.4. Another warning - - Once a filesystem is being used as a readonly backing file for a COW - file, do not boot directly from it or modify it in any way. Doing so - will invalidate any COW files that are using it. The mtime and size - of the backing file are stored in the COW file header at its creation, - and they must continue to match. If they don't, the driver will - refuse to use the COW file. - - - - - If you attempt to evade this restriction by changing either the - backing file or the COW header by hand, you will get a corrupted - filesystem. - - - - - Among other things, this means that upgrading the distribution in a - backing file and expecting that all of the COW files using it will see - the upgrade will not work. - - - - - 7.5. uml_moo : Merging a COW file with its backing file - - Depending on how you use UML and COW devices, it may be advisable to - merge the changes in the COW file into the backing file every once in - a while. - - - - - The utility that does this is uml_moo. Its usage is - - - host% uml_moo COW file new backing file - - - - - There's no need to specify the backing file since that information is - already in the COW file header. If you're paranoid, boot the new - merged file, and if you're happy with it, move it over the old backing - file. - - - - - uml_moo creates a new backing file by default as a safety measure. It - also has a destructive merge option which will merge the COW file - directly into its current backing file. This is really only usable - when the backing file only has one COW file associated with it. If - there are multiple COWs associated with a backing file, a -d merge of - one of them will invalidate all of the others. However, it is - convenient if you're short of disk space, and it should also be - noticeably faster than a non-destructive merge. - - - - - uml_moo is installed with the UML deb and RPM. If you didn't install - UML from one of those packages, you can also get it from the UML - utilities tar file in tools/moo. - - - - - - - - - 8. Creating filesystems - - - You may want to create and mount new UML filesystems, either because - your root filesystem isn't large enough or because you want to use a - filesystem other than ext2. - - - This was written on the occasion of reiserfs being included in the - 2.4.1 kernel pool, and therefore the 2.4.1 UML, so the examples will - talk about reiserfs. This information is generic, and the examples - should be easy to translate to the filesystem of your choice. - - - 8.1. Create the filesystem file - - dd is your friend. All you need to do is tell dd to create an empty - file of the appropriate size. I usually make it sparse to save time - and to avoid allocating disk space until it's actually used. For - example, the following command will create a sparse 100 meg file full - of zeroes. - - - host% - dd if=/dev/zero of=new_filesystem seek=100 count=1 bs=1M - - - - - - - 8.2. Assign the file to a UML device - - Add an argument like the following to the UML command line: - - ubd4=new_filesystem - - - - - making sure that you use an unassigned ubd device number. - - - - 8.3. Creating and mounting the filesystem - - Make sure that the filesystem is available, either by being built into - the kernel, or available as a module, then boot up UML and log in. If - the root filesystem doesn't have the filesystem utilities (mkfs, fsck, - etc), then get them into UML by way of the net or hostfs. - - - Make the new filesystem on the device assigned to the new file: - - - host# mkreiserfs /dev/ubd/4 - - - <----------- MKREISERFSv2 -----------> - - ReiserFS version 3.6.25 - Block size 4096 bytes - Block count 25856 - Used blocks 8212 - Journal - 8192 blocks (18-8209), journal header is in block 8210 - Bitmaps: 17 - Root block 8211 - Hash function "r5" - ATTENTION: ALL DATA WILL BE LOST ON '/dev/ubd/4'! (y/n)y - journal size 8192 (from 18) - Initializing journal - 0%....20%....40%....60%....80%....100% - Syncing..done. - - - - - Now, mount it: - - - UML# - mount /dev/ubd/4 /mnt - - - - - and you're in business. - - - - - - - - - - 9. Host file access - - - If you want to access files on the host machine from inside UML, you - can treat it as a separate machine and either nfs mount directories - from the host or copy files into the virtual machine with scp or rcp. - However, since UML is running on the host, it can access those - files just like any other process and make them available inside the - virtual machine without needing to use the network. - - - This is now possible with the hostfs virtual filesystem. With it, you - can mount a host directory into the UML filesystem and access the - files contained in it just as you would on the host. - - - 9.1. Using hostfs - - To begin with, make sure that hostfs is available inside the virtual - machine with - - - UML# cat /proc/filesystems - - - - . hostfs should be listed. If it's not, either rebuild the kernel - with hostfs configured into it or make sure that hostfs is built as a - module and available inside the virtual machine, and insmod it. - - - Now all you need to do is run mount: - - - UML# mount none /mnt/host -t hostfs - - - - - will mount the host's / on the virtual machine's /mnt/host. - - - If you don't want to mount the host root directory, then you can - specify a subdirectory to mount with the -o switch to mount: - - - UML# mount none /mnt/home -t hostfs -o /home - - - - - will mount the hosts's /home on the virtual machine's /mnt/home. - - - - 9.2. hostfs as the root filesystem - - It's possible to boot from a directory hierarchy on the host using - hostfs rather than using the standard filesystem in a file. - - To start, you need that hierarchy. The easiest way is to loop mount - an existing root_fs file: - - - host# mount root_fs uml_root_dir -o loop - - - - - You need to change the filesystem type of / in etc/fstab to be - 'hostfs', so that line looks like this: - - /dev/ubd/0 / hostfs defaults 1 1 - - - - - Then you need to chown to yourself all the files in that directory - that are owned by root. This worked for me: - - - host# find . -uid 0 -exec chown jdike {} \; - - - - - Next, make sure that your UML kernel has hostfs compiled in, not as a - module. Then run UML with the boot device pointing at that directory: - - - ubd0=/path/to/uml/root/directory - - - - - UML should then boot as it does normally. - - - 9.3. Building hostfs - - If you need to build hostfs because it's not in your kernel, you have - two choices: - - - - o Compiling hostfs into the kernel: - - - Reconfigure the kernel and set the 'Host filesystem' option under - - - o Compiling hostfs as a module: - - - Reconfigure the kernel and set the 'Host filesystem' option under - be in arch/um/fs/hostfs/hostfs.o. Install that in - /lib/modules/`uname -r`/fs in the virtual machine, boot it up, and - - - UML# insmod hostfs - - - - - - - - - - - - - 10. The Management Console - - - - The UML management console is a low-level interface to the kernel, - somewhat like the i386 SysRq interface. Since there is a full-blown - operating system under UML, there is much greater flexibility possible - than with the SysRq mechanism. - - - There are a number of things you can do with the mconsole interface: - - o get the kernel version - - o add and remove devices - - o halt or reboot the machine - - o Send SysRq commands - - o Pause and resume the UML - - - You need the mconsole client (uml_mconsole) which is present in CVS - (/tools/mconsole) in 2.4.5-9um and later, and will be in the RPM in - 2.4.6. - - - You also need CONFIG_MCONSOLE (under 'General Setup') enabled in UML. - When you boot UML, you'll see a line like: - - - mconsole initialized on /home/jdike/.uml/umlNJ32yL/mconsole - - - - - If you specify a unique machine id one the UML command line, i.e. - - - umid=debian - - - - - you'll see this - - - mconsole initialized on /home/jdike/.uml/debian/mconsole - - - - - That file is the socket that uml_mconsole will use to communicate with - UML. Run it with either the umid or the full path as its argument: - - - host% uml_mconsole debian - - - - - or - - - host% uml_mconsole /home/jdike/.uml/debian/mconsole - - - - - You'll get a prompt, at which you can run one of these commands: - - o version - - o halt - - o reboot - - o config - - o remove - - o sysrq - - o help - - o cad - - o stop - - o go - - - 10.1. version - - This takes no arguments. It prints the UML version. - - - (mconsole) version - OK Linux usermode 2.4.5-9um #1 Wed Jun 20 22:47:08 EDT 2001 i686 - - - - - There are a couple actual uses for this. It's a simple no-op which - can be used to check that a UML is running. It's also a way of - sending an interrupt to the UML. This is sometimes useful on SMP - hosts, where there's a bug which causes signals to UML to be lost, - often causing it to appear to hang. Sending such a UML the mconsole - version command is a good way to 'wake it up' before networking has - been enabled, as it does not do anything to the function of the UML. - - - - 10.2. halt and reboot - - These take no arguments. They shut the machine down immediately, with - no syncing of disks and no clean shutdown of userspace. So, they are - pretty close to crashing the machine. - - - (mconsole) halt - OK - - - - - - - 10.3. config - - "config" adds a new device to the virtual machine. Currently the ubd - and network drivers support this. It takes one argument, which is the - device to add, with the same syntax as the kernel command line. - - - - - (mconsole) - config ubd3=/home/jdike/incoming/roots/root_fs_debian22 - - OK - (mconsole) config eth1=mcast - OK - - - - - - - 10.4. remove - - "remove" deletes a device from the system. Its argument is just the - name of the device to be removed. The device must be idle in whatever - sense the driver considers necessary. In the case of the ubd driver, - the removed block device must not be mounted, swapped on, or otherwise - open, and in the case of the network driver, the device must be down. - - - (mconsole) remove ubd3 - OK - (mconsole) remove eth1 - OK - - - - - - - 10.5. sysrq - - This takes one argument, which is a single letter. It calls the - generic kernel's SysRq driver, which does whatever is called for by - that argument. See the SysRq documentation in - Documentation/admin-guide/sysrq.rst in your favorite kernel tree to - see what letters are valid and what they do. - - - - 10.6. help - - "help" returns a string listing the valid commands and what each one - does. - - - - 10.7. cad - - This invokes the Ctl-Alt-Del action on init. What exactly this ends - up doing is up to /etc/inittab. Normally, it reboots the machine. - With UML, this is usually not desired, so if a halt would be better, - then find the section of inittab that looks like this - - - # What to do when CTRL-ALT-DEL is pressed. - ca:12345:ctrlaltdel:/sbin/shutdown -t1 -a -r now - - - - - and change the command to halt. - - - - 10.8. stop - - This puts the UML in a loop reading mconsole requests until a 'go' - mconsole command is received. This is very useful for making backups - of UML filesystems, as the UML can be stopped, then synced via 'sysrq - s', so that everything is written to the filesystem. You can then copy - the filesystem and then send the UML 'go' via mconsole. - - - Note that a UML running with more than one CPU will have problems - after you send the 'stop' command, as only one CPU will be held in a - mconsole loop and all others will continue as normal. This is a bug, - and will be fixed. - - - - 10.9. go - - This resumes a UML after being paused by a 'stop' command. Note that - when the UML has resumed, TCP connections may have timed out and if - the UML is paused for a long period of time, crond might go a little - crazy, running all the jobs it didn't do earlier. - - - - - - - - - 11. Kernel debugging - - - Note: The interface that makes debugging, as described here, possible - is present in 2.4.0-test6 kernels and later. - - - Since the user-mode kernel runs as a normal Linux process, it is - possible to debug it with gdb almost like any other process. It is - slightly different because the kernel's threads are already being - ptraced for system call interception, so gdb can't ptrace them. - However, a mechanism has been added to work around that problem. - - - In order to debug the kernel, you need build it from source. See - ``Compiling the kernel and modules'' for information on doing that. - Make sure that you enable CONFIG_DEBUGSYM and CONFIG_PT_PROXY during - the config. These will compile the kernel with -g, and enable the - ptrace proxy so that gdb works with UML, respectively. - - - - - 11.1. Starting the kernel under gdb - - You can have the kernel running under the control of gdb from the - beginning by putting 'debug' on the command line. You will get an - xterm with gdb running inside it. The kernel will send some commands - to gdb which will leave it stopped at the beginning of start_kernel. - At this point, you can get things going with 'next', 'step', or - 'cont'. - - - There is a transcript of a debugging session here , with breakpoints being set in the scheduler and in an - interrupt handler. - 11.2. Examining sleeping processes - - Not every bug is evident in the currently running process. Sometimes, - processes hang in the kernel when they shouldn't because they've - deadlocked on a semaphore or something similar. In this case, when - you ^C gdb and get a backtrace, you will see the idle thread, which - isn't very relevant. - - - What you want is the stack of whatever process is sleeping when it - shouldn't be. You need to figure out which process that is, which is - generally fairly easy. Then you need to get its host process id, - which you can do either by looking at ps on the host or at - task.thread.extern_pid in gdb. - - - Now what you do is this: - - o detach from the current thread - - - (UML gdb) det - - - - - - o attach to the thread you are interested in - - - (UML gdb) att - - - - - - o look at its stack and anything else of interest - - - (UML gdb) bt - - - - - Note that you can't do anything at this point that requires that a - process execute, e.g. calling a function - - o when you're done looking at that process, reattach to the current - thread and continue it - - - (UML gdb) - att 1 - - - - - - - (UML gdb) - c - - - - - Here, specifying any pid which is not the process id of a UML thread - will cause gdb to reattach to the current thread. I commonly use 1, - but any other invalid pid would work. - - - - 11.3. Running ddd on UML - - ddd works on UML, but requires a special kludge. The process goes - like this: - - o Start ddd - - - host% ddd linux - - - - - - o With ps, get the pid of the gdb that ddd started. You can ask the - gdb to tell you, but for some reason that confuses things and - causes a hang. - - o run UML with 'debug=parent gdb-pid=' added to the command line - - it will just sit there after you hit return - - o type 'att 1' to the ddd gdb and you will see something like - - - 0xa013dc51 in __kill () - - - (gdb) - - - - - - o At this point, type 'c', UML will boot up, and you can use ddd just - as you do on any other process. - - - - 11.4. Debugging modules - - gdb has support for debugging code which is dynamically loaded into - the process. This support is what is needed to debug kernel modules - under UML. - - - Using that support is somewhat complicated. You have to tell gdb what - object file you just loaded into UML and where in memory it is. Then, - it can read the symbol table, and figure out where all the symbols are - from the load address that you provided. It gets more interesting - when you load the module again (i.e. after an rmmod). You have to - tell gdb to forget about all its symbols, including the main UML ones - for some reason, then load then all back in again. - - - There's an easy way and a hard way to do this. The easy way is to use - the umlgdb expect script written by Chandan Kudige. It basically - automates the process for you. - - - First, you must tell it where your modules are. There is a list in - the script that looks like this: - set MODULE_PATHS { - "fat" "/usr/src/uml/linux-2.4.18/fs/fat/fat.o" - "isofs" "/usr/src/uml/linux-2.4.18/fs/isofs/isofs.o" - "minix" "/usr/src/uml/linux-2.4.18/fs/minix/minix.o" - } - - - - - You change that to list the names and paths of the modules that you - are going to debug. Then you run it from the toplevel directory of - your UML pool and it basically tells you what to do: - - - - - ******** GDB pid is 21903 ******** - Start UML as: ./linux debug gdb-pid=21903 - - - - GNU gdb 5.0rh-5 Red Hat Linux 7.1 - Copyright 2001 Free Software Foundation, Inc. - GDB is free software, covered by the GNU General Public License, and you are - welcome to change it and/or distribute copies of it under certain conditions. - Type "show copying" to see the conditions. - There is absolutely no warranty for GDB. Type "show warranty" for details. - This GDB was configured as "i386-redhat-linux"... - (gdb) b sys_init_module - Breakpoint 1 at 0xa0011923: file module.c, line 349. - (gdb) att 1 - - - - - After you run UML and it sits there doing nothing, you hit return at - the 'att 1' and continue it: - - - Attaching to program: /home/jdike/linux/2.4/um/./linux, process 1 - 0xa00f4221 in __kill () - (UML gdb) c - Continuing. - - - - - At this point, you debug normally. When you insmod something, the - expect magic will kick in and you'll see something like: - - - - - - - - - - - - - - - - - - *** Module hostfs loaded *** - Breakpoint 1, sys_init_module (name_user=0x805abb0 "hostfs", - mod_user=0x8070e00) at module.c:349 - 349 char *name, *n_name, *name_tmp = NULL; - (UML gdb) finish - Run till exit from #0 sys_init_module (name_user=0x805abb0 "hostfs", - mod_user=0x8070e00) at module.c:349 - 0xa00e2e23 in execute_syscall (r=0xa8140284) at syscall_kern.c:411 - 411 else res = EXECUTE_SYSCALL(syscall, regs); - Value returned is $1 = 0 - (UML gdb) - p/x (int)module_list + module_list->size_of_struct - - $2 = 0xa9021054 - (UML gdb) symbol-file ./linux - Load new symbol table from "./linux"? (y or n) y - Reading symbols from ./linux... - done. - (UML gdb) - add-symbol-file /home/jdike/linux/2.4/um/arch/um/fs/hostfs/hostfs.o 0xa9021054 - - add symbol table from file "/home/jdike/linux/2.4/um/arch/um/fs/hostfs/hostfs.o" at - .text_addr = 0xa9021054 - (y or n) y - - Reading symbols from /home/jdike/linux/2.4/um/arch/um/fs/hostfs/hostfs.o... - done. - (UML gdb) p *module_list - $1 = {size_of_struct = 84, next = 0xa0178720, name = 0xa9022de0 "hostfs", - size = 9016, uc = {usecount = {counter = 0}, pad = 0}, flags = 1, - nsyms = 57, ndeps = 0, syms = 0xa9023170, deps = 0x0, refs = 0x0, - init = 0xa90221f0 , cleanup = 0xa902222c , - ex_table_start = 0x0, ex_table_end = 0x0, persist_start = 0x0, - persist_end = 0x0, can_unload = 0, runsize = 0, kallsyms_start = 0x0, - kallsyms_end = 0x0, - archdata_start = 0x1b855
, - archdata_end = 0xe5890000
, - kernel_data = 0xf689c35d
} - >> Finished loading symbols for hostfs ... - - - - - That's the easy way. It's highly recommended. The hard way is - described below in case you're interested in what's going on. - - - Boot the kernel under the debugger and load the module with insmod or - modprobe. With gdb, do: - - - (UML gdb) p module_list - - - - - This is a list of modules that have been loaded into the kernel, with - the most recently loaded module first. Normally, the module you want - is at module_list. If it's not, walk down the next links, looking at - the name fields until find the module you want to debug. Take the - address of that structure, and add module.size_of_struct (which in - 2.4.10 kernels is 96 (0x60)) to it. Gdb can make this hard addition - for you :-): - - - - (UML gdb) - printf "%#x\n", (int)module_list module_list->size_of_struct - - - - - The offset from the module start occasionally changes (before 2.4.0, - it was module.size_of_struct + 4), so it's a good idea to check the - init and cleanup addresses once in a while, as describe below. Now - do: - - - (UML gdb) - add-symbol-file /path/to/module/on/host that_address - - - - - Tell gdb you really want to do it, and you're in business. - - - If there's any doubt that you got the offset right, like breakpoints - appear not to work, or they're appearing in the wrong place, you can - check it by looking at the module structure. The init and cleanup - fields should look like: - - - init = 0x588066b0 , cleanup = 0x588066c0 - - - - - with no offsets on the symbol names. If the names are right, but they - are offset, then the offset tells you how much you need to add to the - address you gave to add-symbol-file. - - - When you want to load in a new version of the module, you need to get - gdb to forget about the old one. The only way I've found to do that - is to tell gdb to forget about all symbols that it knows about: - - - (UML gdb) symbol-file - - - - - Then reload the symbols from the kernel binary: - - - (UML gdb) symbol-file /path/to/kernel - - - - - and repeat the process above. You'll also need to re-enable break- - points. They were disabled when you dumped all the symbols because - gdb couldn't figure out where they should go. - - - - 11.5. Attaching gdb to the kernel - - If you don't have the kernel running under gdb, you can attach gdb to - it later by sending the tracing thread a SIGUSR1. The first line of - the console output identifies its pid: - tracing thread pid = 20093 - - - - - When you send it the signal: - - - host% kill -USR1 20093 - - - - - you will get an xterm with gdb running in it. - - - If you have the mconsole compiled into UML, then the mconsole client - can be used to start gdb: - - - (mconsole) (mconsole) config gdb=xterm - - - - - will fire up an xterm with gdb running in it. - - - - 11.6. Using alternate debuggers - - UML has support for attaching to an already running debugger rather - than starting gdb itself. This is present in CVS as of 17 Apr 2001. - I sent it to Alan for inclusion in the ac tree, and it will be in my - 2.4.4 release. - - - This is useful when gdb is a subprocess of some UI, such as emacs or - ddd. It can also be used to run debuggers other than gdb on UML. - Below is an example of using strace as an alternate debugger. - - - To do this, you need to get the pid of the debugger and pass it in - with the - - - If you are using gdb under some UI, then tell it to 'att 1', and - you'll find yourself attached to UML. - - - If you are using something other than gdb as your debugger, then - you'll need to get it to do the equivalent of 'att 1' if it doesn't do - it automatically. - - - An example of an alternate debugger is strace. You can strace the - actual kernel as follows: - - o Run the following in a shell - - - host% - sh -c 'echo pid=$$; echo -n hit return; read x; exec strace -p 1 -o strace.out' - - - - o Run UML with 'debug' and 'gdb-pid=' with the pid printed out - by the previous command - - o Hit return in the shell, and UML will start running, and strace - output will start accumulating in the output file. - - Note that this is different from running - - - host% strace ./linux - - - - - That will strace only the main UML thread, the tracing thread, which - doesn't do any of the actual kernel work. It just oversees the vir- - tual machine. In contrast, using strace as described above will show - you the low-level activity of the virtual machine. - - - - - - 12. Kernel debugging examples - - 12.1. The case of the hung fsck - - When booting up the kernel, fsck failed, and dropped me into a shell - to fix things up. I ran fsck -y, which hung: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Setting hostname uml [ OK ] - Checking root filesystem - /dev/fhd0 was not cleanly unmounted, check forced. - Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780. - - /dev/fhd0: UNEXPECTED INCONSISTENCY; RUN fsck MANUALLY. - (i.e., without -a or -p options) - [ FAILED ] - - *** An error occurred during the file system check. - *** Dropping you to a shell; the system will reboot - *** when you leave the shell. - Give root password for maintenance - (or type Control-D for normal startup): - - [root@uml /root]# fsck -y /dev/fhd0 - fsck -y /dev/fhd0 - Parallelizing fsck version 1.14 (9-Jan-1999) - e2fsck 1.14, 9-Jan-1999 for EXT2 FS 0.5b, 95/08/09 - /dev/fhd0 contains a file system with errors, check forced. - Pass 1: Checking inodes, blocks, and sizes - Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780. Ignore error? yes - - Inode 19780, i_blocks is 1548, should be 540. Fix? yes - - Pass 2: Checking directory structure - Error reading block 49405 (Attempt to read block from filesystem resulted in short read). Ignore error? yes - - Directory inode 11858, block 0, offset 0: directory corrupted - Salvage? yes - - Missing '.' in directory inode 11858. - Fix? yes - - Missing '..' in directory inode 11858. - Fix? yes - - - - - - The standard drill in this sort of situation is to fire up gdb on the - signal thread, which, in this case, was pid 1935. In another window, - I run gdb and attach pid 1935. - - - - - ~/linux/2.3.26/um 1016: gdb linux - GNU gdb 4.17.0.11 with Linux support - Copyright 1998 Free Software Foundation, Inc. - GDB is free software, covered by the GNU General Public License, and you are - welcome to change it and/or distribute copies of it under certain conditions. - Type "show copying" to see the conditions. - There is absolutely no warranty for GDB. Type "show warranty" for details. - This GDB was configured as "i386-redhat-linux"... - - (gdb) att 1935 - Attaching to program `/home/dike/linux/2.3.26/um/linux', Pid 1935 - 0x100756d9 in __wait4 () - - - - - - - Let's see what's currently running: - - - - (gdb) p current_task.pid - $1 = 0 - - - - - - It's the idle thread, which means that fsck went to sleep for some - reason and never woke up. - - - Let's guess that the last process in the process list is fsck: - - - - (gdb) p current_task.prev_task.comm - $13 = "fsck.ext2\000\000\000\000\000\000" - - - - - - It is, so let's see what it thinks it's up to: - - - - (gdb) p current_task.prev_task.thread - $14 = {extern_pid = 1980, tracing = 0, want_tracing = 0, forking = 0, - kernel_stack_page = 0, signal_stack = 1342627840, syscall = {id = 4, args = { - 3, 134973440, 1024, 0, 1024}, have_result = 0, result = 50590720}, - request = {op = 2, u = {exec = {ip = 1350467584, sp = 2952789424}, fork = { - regs = {1350467584, 2952789424, 0 }, sigstack = 0, - pid = 0}, switch_to = 0x507e8000, thread = {proc = 0x507e8000, - arg = 0xaffffdb0, flags = 0, new_pid = 0}, input_request = { - op = 1350467584, fd = -1342177872, proc = 0, pid = 0}}}} - - - - - - The interesting things here are the fact that its .thread.syscall.id - is __NR_write (see the big switch in arch/um/kernel/syscall_kern.c or - the defines in include/asm-um/arch/unistd.h), and that it never - returned. Also, its .request.op is OP_SWITCH (see - arch/um/include/user_util.h). These mean that it went into a write, - and, for some reason, called schedule(). - - - The fact that it never returned from write means that its stack should - be fairly interesting. Its pid is 1980 (.thread.extern_pid). That - process is being ptraced by the signal thread, so it must be detached - before gdb can attach it: - - - - - - - - - - - (gdb) call detach(1980) - - Program received signal SIGSEGV, Segmentation fault. - - The program being debugged stopped while in a function called from GDB. - When the function (detach) is done executing, GDB will silently - stop (instead of continuing to evaluate the expression containing - the function call). - (gdb) call detach(1980) - $15 = 0 - - - - - - The first detach segfaults for some reason, and the second one - succeeds. - - - Now I detach from the signal thread, attach to the fsck thread, and - look at its stack: - - - (gdb) det - Detaching from program: /home/dike/linux/2.3.26/um/linux Pid 1935 - (gdb) att 1980 - Attaching to program `/home/dike/linux/2.3.26/um/linux', Pid 1980 - 0x10070451 in __kill () - (gdb) bt - #0 0x10070451 in __kill () - #1 0x10068ccd in usr1_pid (pid=1980) at process.c:30 - #2 0x1006a03f in _switch_to (prev=0x50072000, next=0x507e8000) - at process_kern.c:156 - #3 0x1006a052 in switch_to (prev=0x50072000, next=0x507e8000, last=0x50072000) - at process_kern.c:161 - #4 0x10001d12 in schedule () at core.c:777 - #5 0x1006a744 in __down (sem=0x507d241c) at semaphore.c:71 - #6 0x1006aa10 in __down_failed () at semaphore.c:157 - #7 0x1006c5d8 in segv_handler (sc=0x5006e940) at trap_user.c:174 - #8 0x1006c5ec in kern_segv_handler (sig=11) at trap_user.c:182 - #9 - #10 0x10155404 in errno () - #11 0x1006c0aa in segv (address=1342179328, is_write=2) at trap_kern.c:50 - #12 0x1006c5d8 in segv_handler (sc=0x5006eaf8) at trap_user.c:174 - #13 0x1006c5ec in kern_segv_handler (sig=11) at trap_user.c:182 - #14 - #15 0xc0fd in ?? () - #16 0x10016647 in sys_write (fd=3, - buf=0x80b8800
, count=1024) - at read_write.c:159 - #17 0x1006d5b3 in execute_syscall (syscall=4, args=0x5006ef08) - at syscall_kern.c:254 - #18 0x1006af87 in really_do_syscall (sig=12) at syscall_user.c:35 - #19 - #20 0x400dc8b0 in ?? () - - - - - - The interesting things here are : - - o There are two segfaults on this stack (frames 9 and 14) - - o The first faulting address (frame 11) is 0x50000800 - - (gdb) p (void *)1342179328 - $16 = (void *) 0x50000800 - - - - - - The initial faulting address is interesting because it is on the idle - thread's stack. I had been seeing the idle thread segfault for no - apparent reason, and the cause looked like stack corruption. In hopes - of catching the culprit in the act, I had turned off all protections - to that stack while the idle thread wasn't running. This apparently - tripped that trap. - - - However, the more immediate problem is that second segfault and I'm - going to concentrate on that. First, I want to see where the fault - happened, so I have to go look at the sigcontent struct in frame 8: - - - - (gdb) up - #1 0x10068ccd in usr1_pid (pid=1980) at process.c:30 - 30 kill(pid, SIGUSR1); - (gdb) - #2 0x1006a03f in _switch_to (prev=0x50072000, next=0x507e8000) - at process_kern.c:156 - 156 usr1_pid(getpid()); - (gdb) - #3 0x1006a052 in switch_to (prev=0x50072000, next=0x507e8000, last=0x50072000) - at process_kern.c:161 - 161 _switch_to(prev, next); - (gdb) - #4 0x10001d12 in schedule () at core.c:777 - 777 switch_to(prev, next, prev); - (gdb) - #5 0x1006a744 in __down (sem=0x507d241c) at semaphore.c:71 - 71 schedule(); - (gdb) - #6 0x1006aa10 in __down_failed () at semaphore.c:157 - 157 } - (gdb) - #7 0x1006c5d8 in segv_handler (sc=0x5006e940) at trap_user.c:174 - 174 segv(sc->cr2, sc->err & 2); - (gdb) - #8 0x1006c5ec in kern_segv_handler (sig=11) at trap_user.c:182 - 182 segv_handler(sc); - (gdb) p *sc - Cannot access memory at address 0x0. - - - - - That's not very useful, so I'll try a more manual method: - - - (gdb) p *((struct sigcontext *) (&sig + 1)) - $19 = {gs = 0, __gsh = 0, fs = 0, __fsh = 0, es = 43, __esh = 0, ds = 43, - __dsh = 0, edi = 1342179328, esi = 1350378548, ebp = 1342630440, - esp = 1342630420, ebx = 1348150624, edx = 1280, ecx = 0, eax = 0, - trapno = 14, err = 4, eip = 268480945, cs = 35, __csh = 0, eflags = 66118, - esp_at_signal = 1342630420, ss = 43, __ssh = 0, fpstate = 0x0, oldmask = 0, - cr2 = 1280} - - - - The ip is in handle_mm_fault: - - - (gdb) p (void *)268480945 - $20 = (void *) 0x1000b1b1 - (gdb) i sym $20 - handle_mm_fault + 57 in section .text - - - - - - Specifically, it's in pte_alloc: - - - (gdb) i line *$20 - Line 124 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" - starts at address 0x1000b1b1 - and ends at 0x1000b1b7 . - - - - - - To find where in handle_mm_fault this is, I'll jump forward in the - code until I see an address in that procedure: - - - - (gdb) i line *0x1000b1c0 - Line 126 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" - starts at address 0x1000b1b7 - and ends at 0x1000b1c3 . - (gdb) i line *0x1000b1d0 - Line 131 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" - starts at address 0x1000b1d0 - and ends at 0x1000b1da . - (gdb) i line *0x1000b1e0 - Line 61 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" - starts at address 0x1000b1da - and ends at 0x1000b1e1 . - (gdb) i line *0x1000b1f0 - Line 134 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" - starts at address 0x1000b1f0 - and ends at 0x1000b200 . - (gdb) i line *0x1000b200 - Line 135 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" - starts at address 0x1000b200 - and ends at 0x1000b208 . - (gdb) i line *0x1000b210 - Line 139 of "/home/dike/linux/2.3.26/um/include/asm/pgalloc.h" - starts at address 0x1000b210 - and ends at 0x1000b219 . - (gdb) i line *0x1000b220 - Line 1168 of "memory.c" starts at address 0x1000b21e - and ends at 0x1000b222 . - - - - - - Something is apparently wrong with the page tables or vma_structs, so - lets go back to frame 11 and have a look at them: - - - - #11 0x1006c0aa in segv (address=1342179328, is_write=2) at trap_kern.c:50 - 50 handle_mm_fault(current, vma, address, is_write); - (gdb) call pgd_offset_proc(vma->vm_mm, address) - $22 = (pgd_t *) 0x80a548c - - - - - - That's pretty bogus. Page tables aren't supposed to be in process - text or data areas. Let's see what's in the vma: - - - (gdb) p *vma - $23 = {vm_mm = 0x507d2434, vm_start = 0, vm_end = 134512640, - vm_next = 0x80a4f8c, vm_page_prot = {pgprot = 0}, vm_flags = 31200, - vm_avl_height = 2058, vm_avl_left = 0x80a8c94, vm_avl_right = 0x80d1000, - vm_next_share = 0xaffffdb0, vm_pprev_share = 0xaffffe63, - vm_ops = 0xaffffe7a, vm_pgoff = 2952789626, vm_file = 0xafffffec, - vm_private_data = 0x62} - (gdb) p *vma.vm_mm - $24 = {mmap = 0x507d2434, mmap_avl = 0x0, mmap_cache = 0x8048000, - pgd = 0x80a4f8c, mm_users = {counter = 0}, mm_count = {counter = 134904288}, - map_count = 134909076, mmap_sem = {count = {counter = 135073792}, - sleepers = -1342177872, wait = {lock = , - task_list = {next = 0xaffffe63, prev = 0xaffffe7a}, - __magic = -1342177670, __creator = -1342177300}, __magic = 98}, - page_table_lock = {}, context = 138, start_code = 0, end_code = 0, - start_data = 0, end_data = 0, start_brk = 0, brk = 0, start_stack = 0, - arg_start = 0, arg_end = 0, env_start = 0, env_end = 0, rss = 1350381536, - total_vm = 0, locked_vm = 0, def_flags = 0, cpu_vm_mask = 0, swap_cnt = 0, - swap_address = 0, segments = 0x0} - - - - - - This also pretty bogus. With all of the 0x80xxxxx and 0xaffffxxx - addresses, this is looking like a stack was plonked down on top of - these structures. Maybe it's a stack overflow from the next page: - - - - (gdb) p vma - $25 = (struct vm_area_struct *) 0x507d2434 - - - - - - That's towards the lower quarter of the page, so that would have to - have been pretty heavy stack overflow: - - - - - - - - - - - - - - - (gdb) x/100x $25 - 0x507d2434: 0x507d2434 0x00000000 0x08048000 0x080a4f8c - 0x507d2444: 0x00000000 0x080a79e0 0x080a8c94 0x080d1000 - 0x507d2454: 0xaffffdb0 0xaffffe63 0xaffffe7a 0xaffffe7a - 0x507d2464: 0xafffffec 0x00000062 0x0000008a 0x00000000 - 0x507d2474: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d2484: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d2494: 0x00000000 0x00000000 0x507d2fe0 0x00000000 - 0x507d24a4: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d24b4: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d24c4: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d24d4: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d24e4: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d24f4: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d2504: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d2514: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d2524: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d2534: 0x00000000 0x00000000 0x507d25dc 0x00000000 - 0x507d2544: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d2554: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d2564: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d2574: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d2584: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d2594: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d25a4: 0x00000000 0x00000000 0x00000000 0x00000000 - 0x507d25b4: 0x00000000 0x00000000 0x00000000 0x00000000 - - - - - - It's not stack overflow. The only "stack-like" piece of this data is - the vma_struct itself. - - - At this point, I don't see any avenues to pursue, so I just have to - admit that I have no idea what's going on. What I will do, though, is - stick a trap on the segfault handler which will stop if it sees any - writes to the idle thread's stack. That was the thing that happened - first, and it may be that if I can catch it immediately, what's going - on will be somewhat clearer. - - - 12.2. Episode 2: The case of the hung fsck - - After setting a trap in the SEGV handler for accesses to the signal - thread's stack, I reran the kernel. - - - fsck hung again, this time by hitting the trap: - - - - - - - - - - - - - - - - - Setting hostname uml [ OK ] - Checking root filesystem - /dev/fhd0 contains a file system with errors, check forced. - Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780. - - /dev/fhd0: UNEXPECTED INCONSISTENCY; RUN fsck MANUALLY. - (i.e., without -a or -p options) - [ FAILED ] - - *** An error occurred during the file system check. - *** Dropping you to a shell; the system will reboot - *** when you leave the shell. - Give root password for maintenance - (or type Control-D for normal startup): - - [root@uml /root]# fsck -y /dev/fhd0 - fsck -y /dev/fhd0 - Parallelizing fsck version 1.14 (9-Jan-1999) - e2fsck 1.14, 9-Jan-1999 for EXT2 FS 0.5b, 95/08/09 - /dev/fhd0 contains a file system with errors, check forced. - Pass 1: Checking inodes, blocks, and sizes - Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780. Ignore error? yes - - Pass 2: Checking directory structure - Error reading block 49405 (Attempt to read block from filesystem resulted in short read). Ignore error? yes - - Directory inode 11858, block 0, offset 0: directory corrupted - Salvage? yes - - Missing '.' in directory inode 11858. - Fix? yes - - Missing '..' in directory inode 11858. - Fix? yes - - Untested (4127) [100fe44c]: trap_kern.c line 31 - - - - - - I need to get the signal thread to detach from pid 4127 so that I can - attach to it with gdb. This is done by sending it a SIGUSR1, which is - caught by the signal thread, which detaches the process: - - - kill -USR1 4127 - - - - - - Now I can run gdb on it: - - - - - - - - - - - - - - ~/linux/2.3.26/um 1034: gdb linux - GNU gdb 4.17.0.11 with Linux support - Copyright 1998 Free Software Foundation, Inc. - GDB is free software, covered by the GNU General Public License, and you are - welcome to change it and/or distribute copies of it under certain conditions. - Type "show copying" to see the conditions. - There is absolutely no warranty for GDB. Type "show warranty" for details. - This GDB was configured as "i386-redhat-linux"... - (gdb) att 4127 - Attaching to program `/home/dike/linux/2.3.26/um/linux', Pid 4127 - 0x10075891 in __libc_nanosleep () - - - - - - The backtrace shows that it was in a write and that the fault address - (address in frame 3) is 0x50000800, which is right in the middle of - the signal thread's stack page: - - - (gdb) bt - #0 0x10075891 in __libc_nanosleep () - #1 0x1007584d in __sleep (seconds=1000000) - at ../sysdeps/unix/sysv/linux/sleep.c:78 - #2 0x1006ce9a in stop () at user_util.c:191 - #3 0x1006bf88 in segv (address=1342179328, is_write=2) at trap_kern.c:31 - #4 0x1006c628 in segv_handler (sc=0x5006eaf8) at trap_user.c:174 - #5 0x1006c63c in kern_segv_handler (sig=11) at trap_user.c:182 - #6 - #7 0xc0fd in ?? () - #8 0x10016647 in sys_write (fd=3, buf=0x80b8800 "R.", count=1024) - at read_write.c:159 - #9 0x1006d603 in execute_syscall (syscall=4, args=0x5006ef08) - at syscall_kern.c:254 - #10 0x1006af87 in really_do_syscall (sig=12) at syscall_user.c:35 - #11 - #12 0x400dc8b0 in ?? () - #13 - #14 0x400dc8b0 in ?? () - #15 0x80545fd in ?? () - #16 0x804daae in ?? () - #17 0x8054334 in ?? () - #18 0x804d23e in ?? () - #19 0x8049632 in ?? () - #20 0x80491d2 in ?? () - #21 0x80596b5 in ?? () - (gdb) p (void *)1342179328 - $3 = (void *) 0x50000800 - - - - - - Going up the stack to the segv_handler frame and looking at where in - the code the access happened shows that it happened near line 110 of - block_dev.c: - - - - - - - - - - (gdb) up - #1 0x1007584d in __sleep (seconds=1000000) - at ../sysdeps/unix/sysv/linux/sleep.c:78 - ../sysdeps/unix/sysv/linux/sleep.c:78: No such file or directory. - (gdb) - #2 0x1006ce9a in stop () at user_util.c:191 - 191 while(1) sleep(1000000); - (gdb) - #3 0x1006bf88 in segv (address=1342179328, is_write=2) at trap_kern.c:31 - 31 KERN_UNTESTED(); - (gdb) - #4 0x1006c628 in segv_handler (sc=0x5006eaf8) at trap_user.c:174 - 174 segv(sc->cr2, sc->err & 2); - (gdb) p *sc - $1 = {gs = 0, __gsh = 0, fs = 0, __fsh = 0, es = 43, __esh = 0, ds = 43, - __dsh = 0, edi = 1342179328, esi = 134973440, ebp = 1342631484, - esp = 1342630864, ebx = 256, edx = 0, ecx = 256, eax = 1024, trapno = 14, - err = 6, eip = 268550834, cs = 35, __csh = 0, eflags = 66070, - esp_at_signal = 1342630864, ss = 43, __ssh = 0, fpstate = 0x0, oldmask = 0, - cr2 = 1342179328} - (gdb) p (void *)268550834 - $2 = (void *) 0x1001c2b2 - (gdb) i sym $2 - block_write + 1090 in section .text - (gdb) i line *$2 - Line 209 of "/home/dike/linux/2.3.26/um/include/asm/arch/string.h" - starts at address 0x1001c2a1 - and ends at 0x1001c2bf . - (gdb) i line *0x1001c2c0 - Line 110 of "block_dev.c" starts at address 0x1001c2bf - and ends at 0x1001c2e3 . - - - - - - Looking at the source shows that the fault happened during a call to - copy_from_user to copy the data into the kernel: - - - 107 count -= chars; - 108 copy_from_user(p,buf,chars); - 109 p += chars; - 110 buf += chars; - - - - - - p is the pointer which must contain 0x50000800, since buf contains - 0x80b8800 (frame 8 above). It is defined as: - - - p = offset + bh->b_data; - - - - - - I need to figure out what bh is, and it just so happens that bh is - passed as an argument to mark_buffer_uptodate and mark_buffer_dirty a - few lines later, so I do a little disassembly: - - - - - (gdb) disas 0x1001c2bf 0x1001c2e0 - Dump of assembler code from 0x1001c2bf to 0x1001c2d0: - 0x1001c2bf : addl %eax,0xc(%ebp) - 0x1001c2c2 : movl 0xfffffdd4(%ebp),%edx - 0x1001c2c8 : btsl $0x0,0x18(%edx) - 0x1001c2cd : btsl $0x1,0x18(%edx) - 0x1001c2d2 : sbbl %ecx,%ecx - 0x1001c2d4 : testl %ecx,%ecx - 0x1001c2d6 : jne 0x1001c2e3 - 0x1001c2d8 : pushl $0x0 - 0x1001c2da : pushl %edx - 0x1001c2db : call 0x1001819c <__mark_buffer_dirty> - End of assembler dump. - - - - - - At that point, bh is in %edx (address 0x1001c2da), which is calculated - at 0x1001c2c2 as %ebp + 0xfffffdd4, so I figure exactly what that is, - taking %ebp from the sigcontext_struct above: - - - (gdb) p (void *)1342631484 - $5 = (void *) 0x5006ee3c - (gdb) p 0x5006ee3c+0xfffffdd4 - $6 = 1342630928 - (gdb) p (void *)$6 - $7 = (void *) 0x5006ec10 - (gdb) p *((void **)$7) - $8 = (void *) 0x50100200 - - - - - - Now, I look at the structure to see what's in it, and particularly, - what its b_data field contains: - - - (gdb) p *((struct buffer_head *)0x50100200) - $13 = {b_next = 0x50289380, b_blocknr = 49405, b_size = 1024, b_list = 0, - b_dev = 15872, b_count = {counter = 1}, b_rdev = 15872, b_state = 24, - b_flushtime = 0, b_next_free = 0x501001a0, b_prev_free = 0x50100260, - b_this_page = 0x501001a0, b_reqnext = 0x0, b_pprev = 0x507fcf58, - b_data = 0x50000800 "", b_page = 0x50004000, - b_end_io = 0x10017f60 , b_dev_id = 0x0, - b_rsector = 98810, b_wait = {lock = , - task_list = {next = 0x50100248, prev = 0x50100248}, __magic = 1343226448, - __creator = 0}, b_kiobuf = 0x0} - - - - - - The b_data field is indeed 0x50000800, so the question becomes how - that happened. The rest of the structure looks fine, so this probably - is not a case of data corruption. It happened on purpose somehow. - - - The b_page field is a pointer to the page_struct representing the - 0x50000000 page. Looking at it shows the kernel's idea of the state - of that page: - - - - (gdb) p *$13.b_page - $17 = {list = {next = 0x50004a5c, prev = 0x100c5174}, mapping = 0x0, - index = 0, next_hash = 0x0, count = {counter = 1}, flags = 132, lru = { - next = 0x50008460, prev = 0x50019350}, wait = { - lock = , task_list = {next = 0x50004024, - prev = 0x50004024}, __magic = 1342193708, __creator = 0}, - pprev_hash = 0x0, buffers = 0x501002c0, virtual = 1342177280, - zone = 0x100c5160} - - - - - - Some sanity-checking: the virtual field shows the "virtual" address of - this page, which in this kernel is the same as its "physical" address, - and the page_struct itself should be mem_map[0], since it represents - the first page of memory: - - - - (gdb) p (void *)1342177280 - $18 = (void *) 0x50000000 - (gdb) p mem_map - $19 = (mem_map_t *) 0x50004000 - - - - - - These check out fine. - - - Now to check out the page_struct itself. In particular, the flags - field shows whether the page is considered free or not: - - - (gdb) p (void *)132 - $21 = (void *) 0x84 - - - - - - The "reserved" bit is the high bit, which is definitely not set, so - the kernel considers the signal stack page to be free and available to - be used. - - - At this point, I jump to conclusions and start looking at my early - boot code, because that's where that page is supposed to be reserved. - - - In my setup_arch procedure, I have the following code which looks just - fine: - - - - bootmap_size = init_bootmem(start_pfn, end_pfn - start_pfn); - free_bootmem(__pa(low_physmem) + bootmap_size, high_physmem - low_physmem); - - - - - - Two stack pages have already been allocated, and low_physmem points to - the third page, which is the beginning of free memory. - The init_bootmem call declares the entire memory to the boot memory - manager, which marks it all reserved. The free_bootmem call frees up - all of it, except for the first two pages. This looks correct to me. - - - So, I decide to see init_bootmem run and make sure that it is marking - those first two pages as reserved. I never get that far. - - - Stepping into init_bootmem, and looking at bootmem_map before looking - at what it contains shows the following: - - - - (gdb) p bootmem_map - $3 = (void *) 0x50000000 - - - - - - Aha! The light dawns. That first page is doing double duty as a - stack and as the boot memory map. The last thing that the boot memory - manager does is to free the pages used by its memory map, so this page - is getting freed even its marked as reserved. - - - The fix was to initialize the boot memory manager before allocating - those two stack pages, and then allocate them through the boot memory - manager. After doing this, and fixing a couple of subsequent buglets, - the stack corruption problem disappeared. - - - - - - 13. What to do when UML doesn't work - - - - - 13.1. Strange compilation errors when you build from source - - As of test11, it is necessary to have "ARCH=um" in the environment or - on the make command line for all steps in building UML, including - clean, distclean, or mrproper, config, menuconfig, or xconfig, dep, - and linux. If you forget for any of them, the i386 build seems to - contaminate the UML build. If this happens, start from scratch with - - - host% - make mrproper ARCH=um - - - - - and repeat the build process with ARCH=um on all the steps. - - - See ``Compiling the kernel and modules'' for more details. - - - Another cause of strange compilation errors is building UML in - /usr/src/linux. If you do this, the first thing you need to do is - clean up the mess you made. The /usr/src/linux/asm link will now - point to /usr/src/linux/asm-um. Make it point back to - /usr/src/linux/asm-i386. Then, move your UML pool someplace else and - build it there. Also see below, where a more specific set of symptoms - is described. - - - - 13.3. A variety of panics and hangs with /tmp on a reiserfs filesys- - tem - - I saw this on reiserfs 3.5.21 and it seems to be fixed in 3.5.27. - Panics preceded by - - - Detaching pid nnnn - - - - are diagnostic of this problem. This is a reiserfs bug which causes a - thread to occasionally read stale data from a mmapped page shared with - another thread. The fix is to upgrade the filesystem or to have /tmp - be an ext2 filesystem. - - - - 13.4. The compile fails with errors about conflicting types for - 'open', 'dup', and 'waitpid' - - This happens when you build in /usr/src/linux. The UML build makes - the include/asm link point to include/asm-um. /usr/include/asm points - to /usr/src/linux/include/asm, so when that link gets moved, files - which need to include the asm-i386 versions of headers get the - incompatible asm-um versions. The fix is to move the include/asm link - back to include/asm-i386 and to do UML builds someplace else. - - - - 13.5. UML doesn't work when /tmp is an NFS filesystem - - This seems to be a similar situation with the ReiserFS problem above. - Some versions of NFS seems not to handle mmap correctly, which UML - depends on. The workaround is have /tmp be a non-NFS directory. - - - 13.6. UML hangs on boot when compiled with gprof support - - If you build UML with gprof support and, early in the boot, it does - this - - - kernel BUG at page_alloc.c:100! - - - - - you have a buggy gcc. You can work around the problem by removing - UM_FASTCALL from CFLAGS in arch/um/Makefile-i386. This will open up - another bug, but that one is fairly hard to reproduce. - - - - 13.7. syslogd dies with a SIGTERM on startup - - The exact boot error depends on the distribution that you're booting, - but Debian produces this: - - - /etc/rc2.d/S10sysklogd: line 49: 93 Terminated - start-stop-daemon --start --quiet --exec /sbin/syslogd -- $SYSLOGD - - - - - This is a syslogd bug. There's a race between a parent process - installing a signal handler and its child sending the signal. See - this uml-devel post for the details. - - - - 13.8. TUN/TAP networking doesn't work on a 2.4 host - - There are a couple of problems which were - name="pointed - out"> by Tim Robinson - - o It doesn't work on hosts running 2.4.7 (or thereabouts) or earlier. - The fix is to upgrade to something more recent and then read the - next item. - - o If you see - - - File descriptor in bad state - - - - when you bring up the device inside UML, you have a header mismatch - between the original kernel and the upgraded one. Make /usr/src/linux - point at the new headers. This will only be a problem if you build - uml_net yourself. - - - - 13.9. You can network to the host but not to other machines on the - net - - If you can connect to the host, and the host can connect to UML, but - you cannot connect to any other machines, then you may need to enable - IP Masquerading on the host. Usually this is only experienced when - using private IP addresses (192.168.x.x or 10.x.x.x) for host/UML - networking, rather than the public address space that your host is - connected to. UML does not enable IP Masquerading, so you will need - to create a static rule to enable it: - - - host% - iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE - - - - - Replace eth0 with the interface that you use to talk to the rest of - the world. - - - Documentation on IP Masquerading, and SNAT, can be found at - www.netfilter.org . - - - If you can reach the local net, but not the outside Internet, then - that is usually a routing problem. The UML needs a default route: - - - UML# - route add default gw gateway IP - - - - - The gateway IP can be any machine on the local net that knows how to - reach the outside world. Usually, this is the host or the local net- - work's gateway. - - - Occasionally, we hear from someone who can reach some machines, but - not others on the same net, or who can reach some ports on other - machines, but not others. These are usually caused by strange - firewalling somewhere between the UML and the other box. You track - this down by running tcpdump on every interface the packets travel - over and see where they disappear. When you find a machine that takes - the packets in, but does not send them onward, that's the culprit. - - - - 13.10. I have no root and I want to scream - - Thanks to Birgit Wahlich for telling me about this strange one. It - turns out that there's a limit of six environment variables on the - kernel command line. When that limit is reached or exceeded, argument - processing stops, which means that the 'root=' argument that UML - usually adds is not seen. So, the filesystem has no idea what the - root device is, so it panics. - - - The fix is to put less stuff on the command line. Glomming all your - setup variables into one is probably the best way to go. - - - - 13.11. UML build conflict between ptrace.h and ucontext.h - - On some older systems, /usr/include/asm/ptrace.h and - /usr/include/sys/ucontext.h define the same names. So, when they're - included together, the defines from one completely mess up the parsing - of the other, producing errors like: - /usr/include/sys/ucontext.h:47: parse error before - `10' - - - - - plus a pile of warnings. - - - This is a libc botch, which has since been fixed, and I don't see any - way around it besides upgrading. - - - - 13.12. The UML BogoMips is exactly half the host's BogoMips - - On i386 kernels, there are two ways of running the loop that is used - to calculate the BogoMips rating, using the TSC if it's there or using - a one-instruction loop. The TSC produces twice the BogoMips as the - loop. UML uses the loop, since it has nothing resembling a TSC, and - will get almost exactly the same BogoMips as a host using the loop. - However, on a host with a TSC, its BogoMips will be double the loop - BogoMips, and therefore double the UML BogoMips. - - - - 13.13. When you run UML, it immediately segfaults - - If the host is configured with the 2G/2G address space split, that's - why. See ``UML on 2G/2G hosts'' for the details on getting UML to - run on your host. - - - - 13.14. xterms appear, then immediately disappear - - If you're running an up to date kernel with an old release of - uml_utilities, the port-helper program will not work properly, so - xterms will exit straight after they appear. The solution is to - upgrade to the latest release of uml_utilities. Usually this problem - occurs when you have installed a packaged release of UML then compiled - your own development kernel without upgrading the uml_utilities from - the source distribution. - - - - 13.15. Any other panic, hang, or strange behavior - - If you're seeing truly strange behavior, such as hangs or panics that - happen in random places, or you try running the debugger to see what's - happening and it acts strangely, then it could be a problem in the - host kernel. If you're not running a stock Linus or -ac kernel, then - try that. An early version of the preemption patch and a 2.4.10 SuSE - kernel have caused very strange problems in UML. - - - Otherwise, let me know about it. Send a message to one of the UML - mailing lists - either the developer list - user-mode-linux-devel at - lists dot sourceforge dot net (subscription info) or the user list - - user-mode-linux-user at lists dot sourceforge do net (subscription - info), whichever you prefer. Don't assume that everyone knows about - it and that a fix is imminent. - - - If you want to be super-helpful, read ``Diagnosing Problems'' and - follow the instructions contained therein. - 14. Diagnosing Problems - - - If you get UML to crash, hang, or otherwise misbehave, you should - report this on one of the project mailing lists, either the developer - list - user-mode-linux-devel at lists dot sourceforge dot net - (subscription info) or the user list - user-mode-linux-user at lists - dot sourceforge dot net (subscription info). When you do, it is - likely that I will want more information. So, it would be helpful to - read the stuff below, do whatever is applicable in your case, and - report the results to the list. - - - For any diagnosis, you're going to need to build a debugging kernel. - The binaries from this site aren't debuggable. If you haven't done - this before, read about ``Compiling the kernel and modules'' and - ``Kernel debugging'' UML first. - - - 14.1. Case 1 : Normal kernel panics - - The most common case is for a normal thread to panic. To debug this, - you will need to run it under the debugger (add 'debug' to the command - line). An xterm will start up with gdb running inside it. Continue - it when it stops in start_kernel and make it crash. Now ^C gdb and - - - If the panic was a "Kernel mode fault", then there will be a segv - frame on the stack and I'm going to want some more information. The - stack might look something like this: - - - (UML gdb) backtrace - #0 0x1009bf76 in __sigprocmask (how=1, set=0x5f347940, oset=0x0) - at ../sysdeps/unix/sysv/linux/sigprocmask.c:49 - #1 0x10091411 in change_sig (signal=10, on=1) at process.c:218 - #2 0x10094785 in timer_handler (sig=26) at time_kern.c:32 - #3 0x1009bf38 in __restore () - at ../sysdeps/unix/sysv/linux/i386/sigaction.c:125 - #4 0x1009534c in segv (address=8, ip=268849158, is_write=2, is_user=0) - at trap_kern.c:66 - #5 0x10095c04 in segv_handler (sig=11) at trap_user.c:285 - #6 0x1009bf38 in __restore () - - - - - I'm going to want to see the symbol and line information for the value - of ip in the segv frame. In this case, you would do the following: - - - (UML gdb) i sym 268849158 - - - - - and - - - (UML gdb) i line *268849158 - - - - - The reason for this is the __restore frame right above the segv_han- - dler frame is hiding the frame that actually segfaulted. So, I have - to get that information from the faulting ip. - - - 14.2. Case 2 : Tracing thread panics - - The less common and more painful case is when the tracing thread - panics. In this case, the kernel debugger will be useless because it - needs a healthy tracing thread in order to work. The first thing to - do is get a backtrace from the tracing thread. This is done by - figuring out what its pid is, firing up gdb, and attaching it to that - pid. You can figure out the tracing thread pid by looking at the - first line of the console output, which will look like this: - - - tracing thread pid = 15851 - - - - - or by running ps on the host and finding the line that looks like - this: - - - jdike 15851 4.5 0.4 132568 1104 pts/0 S 21:34 0:05 ./linux [(tracing thread)] - - - - - If the panic was 'segfault in signals', then follow the instructions - above for collecting information about the location of the seg fault. - - - If the tracing thread flaked out all by itself, then send that - backtrace in and wait for our crack debugging team to fix the problem. - - - 14.3. Case 3 : Tracing thread panics caused by other threads - - However, there are cases where the misbehavior of another thread - caused the problem. The most common panic of this type is: - - - wait_for_stop failed to wait for to stop with - - - - - In this case, you'll need to get a backtrace from the process men- - tioned in the panic, which is complicated by the fact that the kernel - debugger is defunct and without some fancy footwork, another gdb can't - attach to it. So, this is how the fancy footwork goes: - - In a shell: - - - host% kill -STOP pid - - - - - Run gdb on the tracing thread as described in case 2 and do: - - - (host gdb) call detach(pid) - - - If you get a segfault, do it again. It always works the second time. - - Detach from the tracing thread and attach to that other thread: - - - (host gdb) detach - - - - - - - (host gdb) attach pid - - - - - If gdb hangs when attaching to that process, go back to a shell and - do: - - - host% - kill -CONT pid - - - - - And then get the backtrace: - - - (host gdb) backtrace - - - - - - 14.4. Case 4 : Hangs - - Hangs seem to be fairly rare, but they sometimes happen. When a hang - happens, we need a backtrace from the offending process. Run the - kernel debugger as described in case 1 and get a backtrace. If the - current process is not the idle thread, then send in the backtrace. - You can tell that it's the idle thread if the stack looks like this: - - - #0 0x100b1401 in __libc_nanosleep () - #1 0x100a2885 in idle_sleep (secs=10) at time.c:122 - #2 0x100a546f in do_idle () at process_kern.c:445 - #3 0x100a5508 in cpu_idle () at process_kern.c:471 - #4 0x100ec18f in start_kernel () at init/main.c:592 - #5 0x100a3e10 in start_kernel_proc (unused=0x0) at um_arch.c:71 - #6 0x100a383f in signal_tramp (arg=0x100a3dd8) at trap_user.c:50 - - - - - If this is the case, then some other process is at fault, and went to - sleep when it shouldn't have. Run ps on the host and figure out which - process should not have gone to sleep and stayed asleep. Then attach - to it with gdb and get a backtrace as described in case 3. - - - - - - - 15. Thanks - - - A number of people have helped this project in various ways, and this - page gives recognition where recognition is due. - - - If you're listed here and you would prefer a real link on your name, - or no link at all, instead of the despammed email address pseudo-link, - let me know. - - - If you're not listed here and you think maybe you should be, please - let me know that as well. I try to get everyone, but sometimes my - bookkeeping lapses and I forget about contributions. - - - 15.1. Code and Documentation - - Rusty Russell - - - o wrote the HOWTO - - o prodded me into making this project official and putting it on - SourceForge - - o came up with the way cool UML logo - - o redid the config process - - - Peter Moulder - Fixed my config and build - processes, and added some useful code to the block driver - - - Bill Stearns - - - o HOWTO updates - - o lots of bug reports - - o lots of testing - - o dedicated a box (uml.ists.dartmouth.edu) to support UML development - - o wrote the mkrootfs script, which allows bootable filesystems of - RPM-based distributions to be cranked out - - o cranked out a large number of filesystems with said script - - - Jim Leu - Wrote the virtual ethernet driver - and associated usermode tools - - Lars Brinkhoff - Contributed the ptrace - proxy from his own project to allow easier - kernel debugging - - - Andrea Arcangeli - Redid some of the early boot - code so that it would work on machines with Large File Support - - - Chris Emerson - Did - the first UML port to Linux/ppc - - - Harald Welte - Wrote the multicast - transport for the network driver - - - Jorgen Cederlof - Added special file support to hostfs - - - Greg Lonnon - Changed the ubd driver - to allow it to layer a COW file on a shared read-only filesystem and - wrote the iomem emulation support - - - Henrik Nordstrom - Provided a variety - of patches, fixes, and clues - - - Lennert Buytenhek - Contributed various patches, a rewrite of the - network driver, the first implementation of the mconsole driver, and - did the bulk of the work needed to get SMP working again. - - - Yon Uriarte - Fixed the TUN/TAP network backend while I slept. - - - Adam Heath - Made a bunch of nice cleanups to the initialization code, - plus various other small patches. - - - Matt Zimmerman - Matt volunteered to be the UML Debian maintainer and - is doing a real nice job of it. He also noticed and fixed a number of - actually and potentially exploitable security holes in uml_net. Plus - the occasional patch. I like patches. - - - James McMechan - James seems to have taken over maintenance of the ubd - driver and is doing a nice job of it. - - - Chandan Kudige - wrote the umlgdb script which automates the reloading - of module symbols. - - - Steve Schmidtke - wrote the UML slirp transport and hostaudio drivers, - enabling UML processes to access audio devices on the host. He also - submitted patches for the slip transport and lots of other things. - - - David Coulson - - - o Set up the usermodelinux.org site, - which is a great way of keeping the UML user community on top of - UML goings-on. - - o Site documentation and updates - - o Nifty little UML management daemon UMLd - - - o Lots of testing and bug reports - - - - - 15.2. Flushing out bugs - - - - o Yuri Pudgorodsky - - o Gerald Britton - - o Ian Wehrman - - o Gord Lamb - - o Eugene Koontz - - o John H. Hartman - - o Anders Karlsson - - o Daniel Phillips - - o John Fremlin - - o Rainer Burgstaller - - o James Stevenson - - o Matt Clay - - o Cliff Jefferies - - o Geoff Hoff - - o Lennert Buytenhek - - o Al Viro - - o Frank Klingenhoefer - - o Livio Baldini Soares - - o Jon Burgess - - o Petru Paler - - o Paul - - o Chris Reahard - - o Sverker Nilsson - - o Gong Su - - o johan verrept - - o Bjorn Eriksson - - o Lorenzo Allegrucci - - o Muli Ben-Yehuda - - o David Mansfield - - o Howard Goff - - o Mike Anderson - - o John Byrne - - o Sapan J. Batia - - o Iris Huang - - o Jan Hudec - - o Voluspa - - - - - 15.3. Buglets and clean-ups - - - - o Dave Zarzycki - - o Adam Lazur - - o Boria Feigin - - o Brian J. Murrell - - o JS - - o Roman Zippel - - o Wil Cooley - - o Ayelet Shemesh - - o Will Dyson - - o Sverker Nilsson - - o dvorak - - o v.naga srinivas - - o Shlomi Fish - - o Roger Binns - - o johan verrept - - o MrChuoi - - o Peter Cleve - - o Vincent Guffens - - o Nathan Scott - - o Patrick Caulfield - - o jbearce - - o Catalin Marinas - - o Shane Spencer - - o Zou Min - - - o Ryan Boder - - o Lorenzo Colitti - - o Gwendal Grignou - - o Andre' Breiler - - o Tsutomu Yasuda - - - - 15.4. Case Studies - - - o Jon Wright - - o William McEwan - - o Michael Richardson - - - - 15.5. Other contributions - - - Bill Carr made the Red Hat mkrootfs script - work with RH 6.2. - - Michael Jennings sent in some material which - is now gracing the top of the index page of this site. - - SGI (and more specifically Ralf Baechle ) gave me an account on oss.sgi.com - . The bandwidth there made it possible to - produce most of the filesystems available on the project download - page. - - Laurent Bonnaud took the old grotty - Debian filesystem that I've been distributing and updated it to 2.2. - It is now available by itself here. - - Rik van Riel gave me some ftp space on ftp.nl.linux.org so I can make - releases even when Sourceforge is broken. - - Rodrigo de Castro looked at my broken pte code and told me what was - wrong with it, letting me fix a long-standing (several weeks) and - serious set of bugs. - - Chris Reahard built a specialized root filesystem for running a DNS - server jailed inside UML. It's available from the download - page in the Jail - Filesystems section. - - - - - - - - - - - - diff --git a/MAINTAINERS b/MAINTAINERS index debbb7b97c98..1aec93695040 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8727,7 +8727,7 @@ L: kvm@vger.kernel.org W: http://www.linux-kvm.org T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git S: Supported -F: Documentation/virtual/kvm/ +F: Documentation/virt/kvm/ F: include/trace/events/kvm.h F: include/uapi/asm-generic/kvm* F: include/uapi/linux/kvm* @@ -12054,7 +12054,7 @@ M: Juergen Gross M: Alok Kataria L: virtualization@lists.linux-foundation.org S: Supported -F: Documentation/virtual/paravirt_ops.txt +F: Documentation/virt/paravirt_ops.txt F: arch/*/kernel/paravirt* F: arch/*/include/asm/paravirt*.h F: include/linux/hypervisor.h @@ -16745,7 +16745,7 @@ W: http://user-mode-linux.sourceforge.net Q: https://patchwork.ozlabs.org/project/linux-um/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git S: Maintained -F: Documentation/virtual/uml/ +F: Documentation/virt/uml/ F: arch/um/ F: arch/x86/um/ F: fs/hostfs/ diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h index 01555c6ae0f5..be48c2215fa2 100644 --- a/arch/powerpc/include/uapi/asm/kvm_para.h +++ b/arch/powerpc/include/uapi/asm/kvm_para.h @@ -31,7 +31,7 @@ * Struct fields are always 32 or 64 bit aligned, depending on them being 32 * or 64 bit wide respectively. * - * See Documentation/virtual/kvm/ppc-pv.txt + * See Documentation/virt/kvm/ppc-pv.txt */ struct kvm_vcpu_arch_shared { __u64 scratch1; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8f72526e2f68..24843cf49579 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3466,7 +3466,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, /* * Currently, fast page fault only works for direct mapping * since the gfn is not stable for indirect shadow page. See - * Documentation/virtual/kvm/locking.txt to get more detail. + * Documentation/virt/kvm/locking.txt to get more detail. */ fault_handled = fast_pf_fix_direct_spte(vcpu, sp, iterator.sptep, spte, diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index a7c19540ce21..5e3f12d5359e 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -116,7 +116,7 @@ struct kvm_irq_level { * ACPI gsi notion of irq. * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47.. * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23.. - * For ARM: See Documentation/virtual/kvm/api.txt + * For ARM: See Documentation/virt/kvm/api.txt */ union { __u32 irq; @@ -1086,7 +1086,7 @@ struct kvm_xen_hvm_config { * * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies * the irqfd to operate in resampling mode for level triggered interrupt - * emulation. See Documentation/virtual/kvm/api.txt. + * emulation. See Documentation/virt/kvm/api.txt. */ #define KVM_IRQFD_FLAG_RESAMPLE (1 << 1) diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index c2152f3dd02d..e7c67be7c15f 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -116,7 +116,7 @@ struct kvm_irq_level { * ACPI gsi notion of irq. * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47.. * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23.. - * For ARM: See Documentation/virtual/kvm/api.txt + * For ARM: See Documentation/virt/kvm/api.txt */ union { __u32 irq; @@ -1085,7 +1085,7 @@ struct kvm_xen_hvm_config { * * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies * the irqfd to operate in resampling mode for level triggered interrupt - * emulation. See Documentation/virtual/kvm/api.txt. + * emulation. See Documentation/virt/kvm/api.txt. */ #define KVM_IRQFD_FLAG_RESAMPLE (1 << 1) diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index f645c0fbf7ec..acc43242a310 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -727,7 +727,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) * Ensure we set mode to IN_GUEST_MODE after we disable * interrupts and before the final VCPU requests check. * See the comment in kvm_vcpu_exiting_guest_mode() and - * Documentation/virtual/kvm/vcpu-requests.rst + * Documentation/virt/kvm/vcpu-requests.rst */ smp_store_mb(vcpu->mode, IN_GUEST_MODE); diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 936962abc38d..c45e2d7e942f 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -250,7 +250,7 @@ static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu, * pending state of interrupt is latched in pending_latch variable. * Userspace will save and restore pending state and line_level * separately. - * Refer to Documentation/virtual/kvm/devices/arm-vgic-v3.txt + * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.txt * for handling of ISPENDR and ICPENDR. */ for (i = 0; i < len * 8; i++) { diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index 57205beaa981..3b7525deec80 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -42,7 +42,7 @@ VGIC_AFFINITY_LEVEL(val, 3)) /* - * As per Documentation/virtual/kvm/devices/arm-vgic-v3.txt, + * As per Documentation/virt/kvm/devices/arm-vgic-v3.txt, * below macros are defined for CPUREG encoding. */ #define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK 0x000000000000c000 @@ -63,7 +63,7 @@ KVM_REG_ARM_VGIC_SYSREG_OP2_MASK) /* - * As per Documentation/virtual/kvm/devices/arm-vgic-its.txt, + * As per Documentation/virt/kvm/devices/arm-vgic-its.txt, * below macros are defined for ITS table entry encoding. */ #define KVM_ITS_CTE_VALID_SHIFT 63 -- cgit v1.2.3 From d9c5252295218df4cfe64353aa860d7b5c8700ef Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 25 Jul 2019 16:58:31 +0900 Subject: treewide: add "WITH Linux-syscall-note" to SPDX tag of uapi headers UAPI headers licensed under GPL are supposed to have exception "WITH Linux-syscall-note" so that they can be included into non-GPL user space application code. The exception note is missing in some UAPI headers. Some of them slipped in by the treewide conversion commit b24413180f56 ("License cleanup: add SPDX GPL-2.0 license identifier to files with no license"). Just run: $ git show --oneline b24413180f56 -- arch/x86/include/uapi/asm/ I believe they are not intentional, and should be fixed too. This patch was generated by the following script: git grep -l --not -e Linux-syscall-note --and -e SPDX-License-Identifier \ -- :arch/*/include/uapi/asm/*.h :include/uapi/ :^*/Kbuild | while read file do sed -i -e '/[[:space:]]OR[[:space:]]/s/\(GPL-[^[:space:]]*\)/(\1 WITH Linux-syscall-note)/g' \ -e '/[[:space:]]or[[:space:]]/s/\(GPL-[^[:space:]]*\)/(\1 WITH Linux-syscall-note)/g' \ -e '/[[:space:]]OR[[:space:]]/!{/[[:space:]]or[[:space:]]/!s/\(GPL-[^[:space:]]*\)/\1 WITH Linux-syscall-note/g}' $file done After this patch is applied, there are 5 UAPI headers that do not contain "WITH Linux-syscall-note". They are kept untouched since this exception applies only to GPL variants. $ git grep --not -e Linux-syscall-note --and -e SPDX-License-Identifier \ -- :arch/*/include/uapi/asm/*.h :include/uapi/ :^*/Kbuild include/uapi/drm/panfrost_drm.h:/* SPDX-License-Identifier: MIT */ include/uapi/linux/batman_adv.h:/* SPDX-License-Identifier: MIT */ include/uapi/linux/qemu_fw_cfg.h:/* SPDX-License-Identifier: BSD-3-Clause */ include/uapi/linux/vbox_err.h:/* SPDX-License-Identifier: MIT */ include/uapi/linux/virtio_iommu.h:/* SPDX-License-Identifier: BSD-3-Clause */ Signed-off-by: Masahiro Yamada Reviewed-by: Thomas Gleixner Signed-off-by: Greg Kroah-Hartman --- arch/arm64/include/uapi/asm/bpf_perf_event.h | 2 +- arch/csky/include/uapi/asm/byteorder.h | 2 +- arch/csky/include/uapi/asm/cachectl.h | 2 +- arch/csky/include/uapi/asm/perf_regs.h | 2 +- arch/csky/include/uapi/asm/ptrace.h | 2 +- arch/csky/include/uapi/asm/sigcontext.h | 2 +- arch/csky/include/uapi/asm/unistd.h | 2 +- arch/nds32/include/uapi/asm/auxvec.h | 2 +- arch/nds32/include/uapi/asm/byteorder.h | 2 +- arch/nds32/include/uapi/asm/cachectl.h | 2 +- arch/nds32/include/uapi/asm/fp_udfiex_crtl.h | 2 +- arch/nds32/include/uapi/asm/param.h | 2 +- arch/nds32/include/uapi/asm/ptrace.h | 2 +- arch/nds32/include/uapi/asm/sigcontext.h | 2 +- arch/nds32/include/uapi/asm/unistd.h | 2 +- arch/powerpc/include/uapi/asm/bpf_perf_event.h | 2 +- arch/riscv/include/uapi/asm/auxvec.h | 2 +- arch/riscv/include/uapi/asm/bitsperlong.h | 2 +- arch/riscv/include/uapi/asm/byteorder.h | 2 +- arch/riscv/include/uapi/asm/hwcap.h | 2 +- arch/riscv/include/uapi/asm/ptrace.h | 2 +- arch/riscv/include/uapi/asm/sigcontext.h | 2 +- arch/riscv/include/uapi/asm/ucontext.h | 2 +- arch/s390/include/uapi/asm/bpf_perf_event.h | 2 +- arch/s390/include/uapi/asm/ipl.h | 2 +- arch/sh/include/uapi/asm/setup.h | 2 +- arch/sh/include/uapi/asm/types.h | 2 +- arch/sparc/include/uapi/asm/oradax.h | 2 +- arch/x86/include/uapi/asm/byteorder.h | 2 +- arch/x86/include/uapi/asm/hwcap2.h | 2 +- arch/x86/include/uapi/asm/sigcontext32.h | 2 +- arch/x86/include/uapi/asm/types.h | 2 +- include/uapi/linux/bpfilter.h | 2 +- include/uapi/linux/ipmi_bmc.h | 2 +- include/uapi/linux/isst_if.h | 2 +- include/uapi/linux/netfilter/nf_synproxy.h | 2 +- include/uapi/linux/psp-sev.h | 2 +- include/uapi/linux/rxrpc.h | 2 +- include/uapi/linux/usb/g_uvc.h | 2 +- include/uapi/linux/vbox_vmmdev_types.h | 2 +- include/uapi/linux/vboxguest.h | 2 +- include/uapi/linux/virtio_pmem.h | 2 +- include/uapi/linux/vmcore.h | 2 +- include/uapi/linux/wmi.h | 2 +- include/uapi/misc/fastrpc.h | 2 +- include/uapi/rdma/rvt-abi.h | 2 +- include/uapi/rdma/siw-abi.h | 2 +- include/uapi/scsi/scsi_bsg_ufs.h | 2 +- include/uapi/sound/skl-tplg-interface.h | 2 +- 49 files changed, 49 insertions(+), 49 deletions(-) (limited to 'include/uapi') diff --git a/arch/arm64/include/uapi/asm/bpf_perf_event.h b/arch/arm64/include/uapi/asm/bpf_perf_event.h index b551b741653d..5e1e648aeec4 100644 --- a/arch/arm64/include/uapi/asm/bpf_perf_event.h +++ b/arch/arm64/include/uapi/asm/bpf_perf_event.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ #define _UAPI__ASM_BPF_PERF_EVENT_H__ diff --git a/arch/csky/include/uapi/asm/byteorder.h b/arch/csky/include/uapi/asm/byteorder.h index b079ec715cdf..d150cd664873 100644 --- a/arch/csky/include/uapi/asm/byteorder.h +++ b/arch/csky/include/uapi/asm/byteorder.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_BYTEORDER_H diff --git a/arch/csky/include/uapi/asm/cachectl.h b/arch/csky/include/uapi/asm/cachectl.h index ddf2f39aa925..ed7fad1ea20d 100644 --- a/arch/csky/include/uapi/asm/cachectl.h +++ b/arch/csky/include/uapi/asm/cachectl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef __ASM_CSKY_CACHECTL_H #define __ASM_CSKY_CACHECTL_H diff --git a/arch/csky/include/uapi/asm/perf_regs.h b/arch/csky/include/uapi/asm/perf_regs.h index ee323d818592..49d4e147a559 100644 --- a/arch/csky/include/uapi/asm/perf_regs.h +++ b/arch/csky/include/uapi/asm/perf_regs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. #ifndef _ASM_CSKY_PERF_REGS_H diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h index 4e248d5b86ef..66b2268e324e 100644 --- a/arch/csky/include/uapi/asm/ptrace.h +++ b/arch/csky/include/uapi/asm/ptrace.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef _CSKY_PTRACE_H diff --git a/arch/csky/include/uapi/asm/sigcontext.h b/arch/csky/include/uapi/asm/sigcontext.h index e81e7ff11e36..670c020f2cb8 100644 --- a/arch/csky/include/uapi/asm/sigcontext.h +++ b/arch/csky/include/uapi/asm/sigcontext.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #ifndef __ASM_CSKY_SIGCONTEXT_H diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h index ec60e49cea66..211c983c7282 100644 --- a/arch/csky/include/uapi/asm/unistd.h +++ b/arch/csky/include/uapi/asm/unistd.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. #define __ARCH_WANT_SYS_CLONE diff --git a/arch/nds32/include/uapi/asm/auxvec.h b/arch/nds32/include/uapi/asm/auxvec.h index b5d58ea8decb..bc0b92ab8c15 100644 --- a/arch/nds32/include/uapi/asm/auxvec.h +++ b/arch/nds32/include/uapi/asm/auxvec.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2005-2017 Andes Technology Corporation #ifndef __ASM_AUXVEC_H diff --git a/arch/nds32/include/uapi/asm/byteorder.h b/arch/nds32/include/uapi/asm/byteorder.h index 511e653c709d..c264ef12c49c 100644 --- a/arch/nds32/include/uapi/asm/byteorder.h +++ b/arch/nds32/include/uapi/asm/byteorder.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2005-2017 Andes Technology Corporation #ifndef __NDS32_BYTEORDER_H__ diff --git a/arch/nds32/include/uapi/asm/cachectl.h b/arch/nds32/include/uapi/asm/cachectl.h index 73793662815c..31b9b439d819 100644 --- a/arch/nds32/include/uapi/asm/cachectl.h +++ b/arch/nds32/include/uapi/asm/cachectl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 1994, 1995, 1996 by Ralf Baechle // Copyright (C) 2005-2017 Andes Technology Corporation #ifndef _ASM_CACHECTL diff --git a/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h index d54a5d6c6538..f17396db16ec 100644 --- a/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h +++ b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* Copyright (C) 2005-2019 Andes Technology Corporation */ #ifndef _FP_UDF_IEX_CRTL_H #define _FP_UDF_IEX_CRTL_H diff --git a/arch/nds32/include/uapi/asm/param.h b/arch/nds32/include/uapi/asm/param.h index 2977534a6bd3..48d00328d328 100644 --- a/arch/nds32/include/uapi/asm/param.h +++ b/arch/nds32/include/uapi/asm/param.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2005-2017 Andes Technology Corporation #ifndef __ASM_NDS32_PARAM_H diff --git a/arch/nds32/include/uapi/asm/ptrace.h b/arch/nds32/include/uapi/asm/ptrace.h index 1a6e01c00e6f..d76217c7c010 100644 --- a/arch/nds32/include/uapi/asm/ptrace.h +++ b/arch/nds32/include/uapi/asm/ptrace.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2005-2017 Andes Technology Corporation #ifndef __UAPI_ASM_NDS32_PTRACE_H diff --git a/arch/nds32/include/uapi/asm/sigcontext.h b/arch/nds32/include/uapi/asm/sigcontext.h index dc89af7ddcc3..6c1e6648878f 100644 --- a/arch/nds32/include/uapi/asm/sigcontext.h +++ b/arch/nds32/include/uapi/asm/sigcontext.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2005-2017 Andes Technology Corporation #ifndef _ASMNDS32_SIGCONTEXT_H diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h index a0b2f7b9c0f2..410795e280fe 100644 --- a/arch/nds32/include/uapi/asm/unistd.h +++ b/arch/nds32/include/uapi/asm/unistd.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ // Copyright (C) 2005-2017 Andes Technology Corporation #define __ARCH_WANT_STAT64 diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h index b551b741653d..5e1e648aeec4 100644 --- a/arch/powerpc/include/uapi/asm/bpf_perf_event.h +++ b/arch/powerpc/include/uapi/asm/bpf_perf_event.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ #define _UAPI__ASM_BPF_PERF_EVENT_H__ diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h index 62716653554b..d86cb17bbabe 100644 --- a/arch/riscv/include/uapi/asm/auxvec.h +++ b/arch/riscv/include/uapi/asm/auxvec.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2015 Regents of the University of California diff --git a/arch/riscv/include/uapi/asm/bitsperlong.h b/arch/riscv/include/uapi/asm/bitsperlong.h index 0b9b58b57ff6..7d0b32e3b701 100644 --- a/arch/riscv/include/uapi/asm/bitsperlong.h +++ b/arch/riscv/include/uapi/asm/bitsperlong.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2015 Regents of the University of California diff --git a/arch/riscv/include/uapi/asm/byteorder.h b/arch/riscv/include/uapi/asm/byteorder.h index 1920debc09c0..f671e16bf6af 100644 --- a/arch/riscv/include/uapi/asm/byteorder.h +++ b/arch/riscv/include/uapi/asm/byteorder.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2015 Regents of the University of California diff --git a/arch/riscv/include/uapi/asm/hwcap.h b/arch/riscv/include/uapi/asm/hwcap.h index 7d786145183b..4e7646077056 100644 --- a/arch/riscv/include/uapi/asm/hwcap.h +++ b/arch/riscv/include/uapi/asm/hwcap.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copied from arch/arm64/include/asm/hwcap.h * diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h index 92d8f7cd8f84..882547f6bd5c 100644 --- a/arch/riscv/include/uapi/asm/ptrace.h +++ b/arch/riscv/include/uapi/asm/ptrace.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (C) 2012 Regents of the University of California */ diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h index 053f809e52ce..84f2dfcfdbce 100644 --- a/arch/riscv/include/uapi/asm/sigcontext.h +++ b/arch/riscv/include/uapi/asm/sigcontext.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (C) 2012 Regents of the University of California */ diff --git a/arch/riscv/include/uapi/asm/ucontext.h b/arch/riscv/include/uapi/asm/ucontext.h index b58e00cee2ec..411dd7b52ed6 100644 --- a/arch/riscv/include/uapi/asm/ucontext.h +++ b/arch/riscv/include/uapi/asm/ucontext.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2017 SiFive, Inc. diff --git a/arch/s390/include/uapi/asm/bpf_perf_event.h b/arch/s390/include/uapi/asm/bpf_perf_event.h index cefe7c7cd4f6..3ed42ff6da94 100644 --- a/arch/s390/include/uapi/asm/bpf_perf_event.h +++ b/arch/s390/include/uapi/asm/bpf_perf_event.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ #define _UAPI__ASM_BPF_PERF_EVENT_H__ diff --git a/arch/s390/include/uapi/asm/ipl.h b/arch/s390/include/uapi/asm/ipl.h index fd32b1cd80d2..451ba7d08905 100644 --- a/arch/s390/include/uapi/asm/ipl.h +++ b/arch/s390/include/uapi/asm/ipl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _ASM_S390_UAPI_IPL_H #define _ASM_S390_UAPI_IPL_H diff --git a/arch/sh/include/uapi/asm/setup.h b/arch/sh/include/uapi/asm/setup.h index 1170dd2fb998..4bd19f80f9b0 100644 --- a/arch/sh/include/uapi/asm/setup.h +++ b/arch/sh/include/uapi/asm/setup.h @@ -1,2 +1,2 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #include diff --git a/arch/sh/include/uapi/asm/types.h b/arch/sh/include/uapi/asm/types.h index f83795fdc0da..68100e108ea6 100644 --- a/arch/sh/include/uapi/asm/types.h +++ b/arch/sh/include/uapi/asm/types.h @@ -1,2 +1,2 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #include diff --git a/arch/sparc/include/uapi/asm/oradax.h b/arch/sparc/include/uapi/asm/oradax.h index 64c67f2ea33f..0dace69058ab 100644 --- a/arch/sparc/include/uapi/asm/oradax.h +++ b/arch/sparc/include/uapi/asm/oradax.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */ /* * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. */ diff --git a/arch/x86/include/uapi/asm/byteorder.h b/arch/x86/include/uapi/asm/byteorder.h index 484e3cfd7ef2..149143cab9ff 100644 --- a/arch/x86/include/uapi/asm/byteorder.h +++ b/arch/x86/include/uapi/asm/byteorder.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _ASM_X86_BYTEORDER_H #define _ASM_X86_BYTEORDER_H diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h index 6ebaae90e207..8b2effe6efb8 100644 --- a/arch/x86/include/uapi/asm/hwcap2.h +++ b/arch/x86/include/uapi/asm/hwcap2.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _ASM_X86_HWCAP2_H #define _ASM_X86_HWCAP2_H diff --git a/arch/x86/include/uapi/asm/sigcontext32.h b/arch/x86/include/uapi/asm/sigcontext32.h index 6b18e88de8a6..7114801d0499 100644 --- a/arch/x86/include/uapi/asm/sigcontext32.h +++ b/arch/x86/include/uapi/asm/sigcontext32.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _ASM_X86_SIGCONTEXT32_H #define _ASM_X86_SIGCONTEXT32_H diff --git a/arch/x86/include/uapi/asm/types.h b/arch/x86/include/uapi/asm/types.h index df55e1ddb0c9..9d5c11a24279 100644 --- a/arch/x86/include/uapi/asm/types.h +++ b/arch/x86/include/uapi/asm/types.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _ASM_X86_TYPES_H #define _ASM_X86_TYPES_H diff --git a/include/uapi/linux/bpfilter.h b/include/uapi/linux/bpfilter.h index 2ec3cc99ea4c..cbc1f5813f50 100644 --- a/include/uapi/linux/bpfilter.h +++ b/include/uapi/linux/bpfilter.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_LINUX_BPFILTER_H #define _UAPI_LINUX_BPFILTER_H diff --git a/include/uapi/linux/ipmi_bmc.h b/include/uapi/linux/ipmi_bmc.h index 1670f0944227..782a03eb1086 100644 --- a/include/uapi/linux/ipmi_bmc.h +++ b/include/uapi/linux/ipmi_bmc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Copyright (c) 2015-2018, Intel Corporation. */ diff --git a/include/uapi/linux/isst_if.h b/include/uapi/linux/isst_if.h index d10b832c58c5..0a52b7b093d3 100644 --- a/include/uapi/linux/isst_if.h +++ b/include/uapi/linux/isst_if.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * Intel Speed Select Interface: OS to hardware Interface * Copyright (c) 2019, Intel Corporation. diff --git a/include/uapi/linux/netfilter/nf_synproxy.h b/include/uapi/linux/netfilter/nf_synproxy.h index 6f3791c8946f..00d787f0260e 100644 --- a/include/uapi/linux/netfilter/nf_synproxy.h +++ b/include/uapi/linux/netfilter/nf_synproxy.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _NF_SYNPROXY_H #define _NF_SYNPROXY_H diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 8654b2442f6a..592a0c1b77c9 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * Userspace interface for AMD Secure Encrypted Virtualization (SEV) * platform management commands. diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h index 782069dcf607..4accfa7e266d 100644 --- a/include/uapi/linux/rxrpc.h +++ b/include/uapi/linux/rxrpc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */ /* Types and definitions for AF_RXRPC. * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. diff --git a/include/uapi/linux/usb/g_uvc.h b/include/uapi/linux/usb/g_uvc.h index 3c9ee3020cbb..652f169a019e 100644 --- a/include/uapi/linux/usb/g_uvc.h +++ b/include/uapi/linux/usb/g_uvc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ /* * g_uvc.h -- USB Video Class Gadget driver API * diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h index 26f39816af14..c27289fd619a 100644 --- a/include/uapi/linux/vbox_vmmdev_types.h +++ b/include/uapi/linux/vbox_vmmdev_types.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR CDDL-1.0) */ /* * Virtual Device for Guest <-> VMM/Host communication, type definitions * which are also used for the vboxguest ioctl interface / by vboxsf diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h index 612f0c7d3558..9cec58a6a5ea 100644 --- a/include/uapi/linux/vboxguest.h +++ b/include/uapi/linux/vboxguest.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR CDDL-1.0) */ /* * VBoxGuest - VirtualBox Guest Additions Driver Interface. * diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h index 9a63ed6d062f..b022787ffb94 100644 --- a/include/uapi/linux/virtio_pmem.h +++ b/include/uapi/linux/virtio_pmem.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */ /* * Definitions for virtio-pmem devices. * diff --git a/include/uapi/linux/vmcore.h b/include/uapi/linux/vmcore.h index 022619668e0e..3e9da91866ff 100644 --- a/include/uapi/linux/vmcore.h +++ b/include/uapi/linux/vmcore.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef _UAPI_VMCORE_H #define _UAPI_VMCORE_H diff --git a/include/uapi/linux/wmi.h b/include/uapi/linux/wmi.h index c36f2d7675a4..7085c5dca9fa 100644 --- a/include/uapi/linux/wmi.h +++ b/include/uapi/linux/wmi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ /* * User API methods for ACPI-WMI mapping driver * diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h index 6d701af9fc42..fb792e882cef 100644 --- a/include/uapi/misc/fastrpc.h +++ b/include/uapi/misc/fastrpc.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef __QCOM_FASTRPC_H__ #define __QCOM_FASTRPC_H__ diff --git a/include/uapi/rdma/rvt-abi.h b/include/uapi/rdma/rvt-abi.h index 7328293c715c..7c05a02d2be5 100644 --- a/include/uapi/rdma/rvt-abi.h +++ b/include/uapi/rdma/rvt-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ /* * This file contains defines, structures, etc. that are used diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h index 3dd8071ace7b..7de68f1dc707 100644 --- a/include/uapi/rdma/siw-abi.h +++ b/include/uapi/rdma/siw-abi.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */ +/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) or BSD-3-Clause */ /* Authors: Bernard Metzler */ /* Copyright (c) 2008-2019, IBM Corporation */ diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h index 17c7abd0803a..9988db6ad244 100644 --- a/include/uapi/scsi/scsi_bsg_ufs.h +++ b/include/uapi/scsi/scsi_bsg_ufs.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * UFS Transport SGIO v4 BSG Message Support * diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h index f39352cef382..9eee32f5e407 100644 --- a/include/uapi/sound/skl-tplg-interface.h +++ b/include/uapi/sound/skl-tplg-interface.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * skl-tplg-interface.h - Intel DSP FW private data interface * -- cgit v1.2.3 From 4a2b8560e3dff8637ccb09524650864f60ebab7f Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 22 Jul 2019 08:51:46 +0200 Subject: tty: serial: netx: Delete driver The Netx ARM machine was deleted from the kernel. This driver had no users and has to go. Cc: Robert Schwebel Cc: Sascha Hauer Signed-off-by: Linus Walleij Signed-off-by: Arnd Bergmann Link: https://lore.kernel.org/r/20190722065146.4844-1-linus.walleij@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/Kconfig | 19 - drivers/tty/serial/Makefile | 1 - drivers/tty/serial/netx-serial.c | 733 --------------------------------------- include/uapi/linux/serial_core.h | 3 - 4 files changed, 756 deletions(-) delete mode 100644 drivers/tty/serial/netx-serial.c (limited to 'include/uapi') diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index fd385c8c53a5..3083dbae35f7 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1035,25 +1035,6 @@ config SERIAL_VT8500_CONSOLE depends on SERIAL_VT8500=y select SERIAL_CORE_CONSOLE -config SERIAL_NETX - tristate "NetX serial port support" - depends on ARCH_NETX - select SERIAL_CORE - help - If you have a machine based on a Hilscher NetX SoC you - can enable its onboard serial port by enabling this option. - - To compile this driver as a module, choose M here: the - module will be called netx-serial. - -config SERIAL_NETX_CONSOLE - bool "Console on NetX serial port" - depends on SERIAL_NETX=y - select SERIAL_CORE_CONSOLE - help - If you have enabled the serial port on the Hilscher NetX SoC - you can make it the console by answering Y to this option. - config SERIAL_OMAP tristate "OMAP serial port support" depends on ARCH_OMAP2PLUS diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index 7cd7cabfa6c4..15a0fccadf7e 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -59,7 +59,6 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o obj-$(CONFIG_SERIAL_MSM) += msm_serial.o obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o -obj-$(CONFIG_SERIAL_NETX) += netx-serial.o obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c deleted file mode 100644 index b3556863491f..000000000000 --- a/drivers/tty/serial/netx-serial.c +++ /dev/null @@ -1,733 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (c) 2005 Sascha Hauer , Pengutronix - */ - -#if defined(CONFIG_SERIAL_NETX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) -#define SUPPORT_SYSRQ -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -/* We've been assigned a range on the "Low-density serial ports" major */ -#define SERIAL_NX_MAJOR 204 -#define MINOR_START 170 - -enum uart_regs { - UART_DR = 0x00, - UART_SR = 0x04, - UART_LINE_CR = 0x08, - UART_BAUDDIV_MSB = 0x0c, - UART_BAUDDIV_LSB = 0x10, - UART_CR = 0x14, - UART_FR = 0x18, - UART_IIR = 0x1c, - UART_ILPR = 0x20, - UART_RTS_CR = 0x24, - UART_RTS_LEAD = 0x28, - UART_RTS_TRAIL = 0x2c, - UART_DRV_ENABLE = 0x30, - UART_BRM_CR = 0x34, - UART_RXFIFO_IRQLEVEL = 0x38, - UART_TXFIFO_IRQLEVEL = 0x3c, -}; - -#define SR_FE (1<<0) -#define SR_PE (1<<1) -#define SR_BE (1<<2) -#define SR_OE (1<<3) - -#define LINE_CR_BRK (1<<0) -#define LINE_CR_PEN (1<<1) -#define LINE_CR_EPS (1<<2) -#define LINE_CR_STP2 (1<<3) -#define LINE_CR_FEN (1<<4) -#define LINE_CR_5BIT (0<<5) -#define LINE_CR_6BIT (1<<5) -#define LINE_CR_7BIT (2<<5) -#define LINE_CR_8BIT (3<<5) -#define LINE_CR_BITS_MASK (3<<5) - -#define CR_UART_EN (1<<0) -#define CR_SIREN (1<<1) -#define CR_SIRLP (1<<2) -#define CR_MSIE (1<<3) -#define CR_RIE (1<<4) -#define CR_TIE (1<<5) -#define CR_RTIE (1<<6) -#define CR_LBE (1<<7) - -#define FR_CTS (1<<0) -#define FR_DSR (1<<1) -#define FR_DCD (1<<2) -#define FR_BUSY (1<<3) -#define FR_RXFE (1<<4) -#define FR_TXFF (1<<5) -#define FR_RXFF (1<<6) -#define FR_TXFE (1<<7) - -#define IIR_MIS (1<<0) -#define IIR_RIS (1<<1) -#define IIR_TIS (1<<2) -#define IIR_RTIS (1<<3) -#define IIR_MASK 0xf - -#define RTS_CR_AUTO (1<<0) -#define RTS_CR_RTS (1<<1) -#define RTS_CR_COUNT (1<<2) -#define RTS_CR_MOD2 (1<<3) -#define RTS_CR_RTS_POL (1<<4) -#define RTS_CR_CTS_CTR (1<<5) -#define RTS_CR_CTS_POL (1<<6) -#define RTS_CR_STICK (1<<7) - -#define UART_PORT_SIZE 0x40 -#define DRIVER_NAME "netx-uart" - -struct netx_port { - struct uart_port port; -}; - -static void netx_stop_tx(struct uart_port *port) -{ - unsigned int val; - val = readl(port->membase + UART_CR); - writel(val & ~CR_TIE, port->membase + UART_CR); -} - -static void netx_stop_rx(struct uart_port *port) -{ - unsigned int val; - val = readl(port->membase + UART_CR); - writel(val & ~CR_RIE, port->membase + UART_CR); -} - -static void netx_enable_ms(struct uart_port *port) -{ - unsigned int val; - val = readl(port->membase + UART_CR); - writel(val | CR_MSIE, port->membase + UART_CR); -} - -static inline void netx_transmit_buffer(struct uart_port *port) -{ - struct circ_buf *xmit = &port->state->xmit; - - if (port->x_char) { - writel(port->x_char, port->membase + UART_DR); - port->icount.tx++; - port->x_char = 0; - return; - } - - if (uart_tx_stopped(port) || uart_circ_empty(xmit)) { - netx_stop_tx(port); - return; - } - - do { - /* send xmit->buf[xmit->tail] - * out the port here */ - writel(xmit->buf[xmit->tail], port->membase + UART_DR); - xmit->tail = (xmit->tail + 1) & - (UART_XMIT_SIZE - 1); - port->icount.tx++; - if (uart_circ_empty(xmit)) - break; - } while (!(readl(port->membase + UART_FR) & FR_TXFF)); - - if (uart_circ_empty(xmit)) - netx_stop_tx(port); -} - -static void netx_start_tx(struct uart_port *port) -{ - writel( - readl(port->membase + UART_CR) | CR_TIE, port->membase + UART_CR); - - if (!(readl(port->membase + UART_FR) & FR_TXFF)) - netx_transmit_buffer(port); -} - -static unsigned int netx_tx_empty(struct uart_port *port) -{ - return readl(port->membase + UART_FR) & FR_BUSY ? 0 : TIOCSER_TEMT; -} - -static void netx_txint(struct uart_port *port) -{ - struct circ_buf *xmit = &port->state->xmit; - - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { - netx_stop_tx(port); - return; - } - - netx_transmit_buffer(port); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); -} - -static void netx_rxint(struct uart_port *port, unsigned long *flags) -{ - unsigned char rx, flg, status; - - while (!(readl(port->membase + UART_FR) & FR_RXFE)) { - rx = readl(port->membase + UART_DR); - flg = TTY_NORMAL; - port->icount.rx++; - status = readl(port->membase + UART_SR); - if (status & SR_BE) { - writel(0, port->membase + UART_SR); - if (uart_handle_break(port)) - continue; - } - - if (unlikely(status & (SR_FE | SR_PE | SR_OE))) { - - if (status & SR_PE) - port->icount.parity++; - else if (status & SR_FE) - port->icount.frame++; - if (status & SR_OE) - port->icount.overrun++; - - status &= port->read_status_mask; - - if (status & SR_BE) - flg = TTY_BREAK; - else if (status & SR_PE) - flg = TTY_PARITY; - else if (status & SR_FE) - flg = TTY_FRAME; - } - - if (uart_handle_sysrq_char(port, rx)) - continue; - - uart_insert_char(port, status, SR_OE, rx, flg); - } - - spin_unlock_irqrestore(&port->lock, *flags); - tty_flip_buffer_push(&port->state->port); - spin_lock_irqsave(&port->lock, *flags); -} - -static irqreturn_t netx_int(int irq, void *dev_id) -{ - struct uart_port *port = dev_id; - unsigned long flags; - unsigned char status; - - spin_lock_irqsave(&port->lock,flags); - - status = readl(port->membase + UART_IIR) & IIR_MASK; - while (status) { - if (status & IIR_RIS) - netx_rxint(port, &flags); - if (status & IIR_TIS) - netx_txint(port); - if (status & IIR_MIS) { - if (readl(port->membase + UART_FR) & FR_CTS) - uart_handle_cts_change(port, 1); - else - uart_handle_cts_change(port, 0); - } - writel(0, port->membase + UART_IIR); - status = readl(port->membase + UART_IIR) & IIR_MASK; - } - - spin_unlock_irqrestore(&port->lock,flags); - return IRQ_HANDLED; -} - -static unsigned int netx_get_mctrl(struct uart_port *port) -{ - unsigned int ret = TIOCM_DSR | TIOCM_CAR; - - if (readl(port->membase + UART_FR) & FR_CTS) - ret |= TIOCM_CTS; - - return ret; -} - -static void netx_set_mctrl(struct uart_port *port, unsigned int mctrl) -{ - unsigned int val; - - /* FIXME: Locking needed ? */ - if (mctrl & TIOCM_RTS) { - val = readl(port->membase + UART_RTS_CR); - writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR); - } -} - -static void netx_break_ctl(struct uart_port *port, int break_state) -{ - unsigned int line_cr; - spin_lock_irq(&port->lock); - - line_cr = readl(port->membase + UART_LINE_CR); - if (break_state != 0) - line_cr |= LINE_CR_BRK; - else - line_cr &= ~LINE_CR_BRK; - writel(line_cr, port->membase + UART_LINE_CR); - - spin_unlock_irq(&port->lock); -} - -static int netx_startup(struct uart_port *port) -{ - int ret; - - ret = request_irq(port->irq, netx_int, 0, - DRIVER_NAME, port); - if (ret) { - dev_err(port->dev, "unable to grab irq%d\n",port->irq); - goto exit; - } - - writel(readl(port->membase + UART_LINE_CR) | LINE_CR_FEN, - port->membase + UART_LINE_CR); - - writel(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE | CR_UART_EN, - port->membase + UART_CR); - -exit: - return ret; -} - -static void netx_shutdown(struct uart_port *port) -{ - writel(0, port->membase + UART_CR) ; - - free_irq(port->irq, port); -} - -static void -netx_set_termios(struct uart_port *port, struct ktermios *termios, - struct ktermios *old) -{ - unsigned int baud, quot; - unsigned char old_cr; - unsigned char line_cr = LINE_CR_FEN; - unsigned char rts_cr = 0; - - switch (termios->c_cflag & CSIZE) { - case CS5: - line_cr |= LINE_CR_5BIT; - break; - case CS6: - line_cr |= LINE_CR_6BIT; - break; - case CS7: - line_cr |= LINE_CR_7BIT; - break; - case CS8: - line_cr |= LINE_CR_8BIT; - break; - } - - if (termios->c_cflag & CSTOPB) - line_cr |= LINE_CR_STP2; - - if (termios->c_cflag & PARENB) { - line_cr |= LINE_CR_PEN; - if (!(termios->c_cflag & PARODD)) - line_cr |= LINE_CR_EPS; - } - - if (termios->c_cflag & CRTSCTS) - rts_cr = RTS_CR_AUTO | RTS_CR_CTS_CTR | RTS_CR_RTS_POL; - - baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); - quot = baud * 4096; - quot /= 1000; - quot *= 256; - quot /= 100000; - - spin_lock_irq(&port->lock); - - uart_update_timeout(port, termios->c_cflag, baud); - - old_cr = readl(port->membase + UART_CR); - - /* disable interrupts */ - writel(old_cr & ~(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE), - port->membase + UART_CR); - - /* drain transmitter */ - while (readl(port->membase + UART_FR) & FR_BUSY); - - /* disable UART */ - writel(old_cr & ~CR_UART_EN, port->membase + UART_CR); - - /* modem status interrupts */ - old_cr &= ~CR_MSIE; - if (UART_ENABLE_MS(port, termios->c_cflag)) - old_cr |= CR_MSIE; - - writel((quot>>8) & 0xff, port->membase + UART_BAUDDIV_MSB); - writel(quot & 0xff, port->membase + UART_BAUDDIV_LSB); - writel(line_cr, port->membase + UART_LINE_CR); - - writel(rts_cr, port->membase + UART_RTS_CR); - - /* - * Characters to ignore - */ - port->ignore_status_mask = 0; - if (termios->c_iflag & IGNPAR) - port->ignore_status_mask |= SR_PE; - if (termios->c_iflag & IGNBRK) { - port->ignore_status_mask |= SR_BE; - /* - * If we're ignoring parity and break indicators, - * ignore overruns too (for real raw support). - */ - if (termios->c_iflag & IGNPAR) - port->ignore_status_mask |= SR_PE; - } - - port->read_status_mask = 0; - if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) - port->read_status_mask |= SR_BE; - if (termios->c_iflag & INPCK) - port->read_status_mask |= SR_PE | SR_FE; - - writel(old_cr, port->membase + UART_CR); - - spin_unlock_irq(&port->lock); -} - -static const char *netx_type(struct uart_port *port) -{ - return port->type == PORT_NETX ? "NETX" : NULL; -} - -static void netx_release_port(struct uart_port *port) -{ - release_mem_region(port->mapbase, UART_PORT_SIZE); -} - -static int netx_request_port(struct uart_port *port) -{ - return request_mem_region(port->mapbase, UART_PORT_SIZE, - DRIVER_NAME) != NULL ? 0 : -EBUSY; -} - -static void netx_config_port(struct uart_port *port, int flags) -{ - if (flags & UART_CONFIG_TYPE && netx_request_port(port) == 0) - port->type = PORT_NETX; -} - -static int -netx_verify_port(struct uart_port *port, struct serial_struct *ser) -{ - int ret = 0; - - if (ser->type != PORT_UNKNOWN && ser->type != PORT_NETX) - ret = -EINVAL; - - return ret; -} - -static struct uart_ops netx_pops = { - .tx_empty = netx_tx_empty, - .set_mctrl = netx_set_mctrl, - .get_mctrl = netx_get_mctrl, - .stop_tx = netx_stop_tx, - .start_tx = netx_start_tx, - .stop_rx = netx_stop_rx, - .enable_ms = netx_enable_ms, - .break_ctl = netx_break_ctl, - .startup = netx_startup, - .shutdown = netx_shutdown, - .set_termios = netx_set_termios, - .type = netx_type, - .release_port = netx_release_port, - .request_port = netx_request_port, - .config_port = netx_config_port, - .verify_port = netx_verify_port, -}; - -static struct netx_port netx_ports[] = { - { - .port = { - .type = PORT_NETX, - .iotype = UPIO_MEM, - .membase = (char __iomem *)io_p2v(NETX_PA_UART0), - .mapbase = NETX_PA_UART0, - .irq = NETX_IRQ_UART0, - .uartclk = 100000000, - .fifosize = 16, - .flags = UPF_BOOT_AUTOCONF, - .ops = &netx_pops, - .line = 0, - }, - }, { - .port = { - .type = PORT_NETX, - .iotype = UPIO_MEM, - .membase = (char __iomem *)io_p2v(NETX_PA_UART1), - .mapbase = NETX_PA_UART1, - .irq = NETX_IRQ_UART1, - .uartclk = 100000000, - .fifosize = 16, - .flags = UPF_BOOT_AUTOCONF, - .ops = &netx_pops, - .line = 1, - }, - }, { - .port = { - .type = PORT_NETX, - .iotype = UPIO_MEM, - .membase = (char __iomem *)io_p2v(NETX_PA_UART2), - .mapbase = NETX_PA_UART2, - .irq = NETX_IRQ_UART2, - .uartclk = 100000000, - .fifosize = 16, - .flags = UPF_BOOT_AUTOCONF, - .ops = &netx_pops, - .line = 2, - }, - } -}; - -#ifdef CONFIG_SERIAL_NETX_CONSOLE - -static void netx_console_putchar(struct uart_port *port, int ch) -{ - while (readl(port->membase + UART_FR) & FR_BUSY); - writel(ch, port->membase + UART_DR); -} - -static void -netx_console_write(struct console *co, const char *s, unsigned int count) -{ - struct uart_port *port = &netx_ports[co->index].port; - unsigned char cr_save; - - cr_save = readl(port->membase + UART_CR); - writel(cr_save | CR_UART_EN, port->membase + UART_CR); - - uart_console_write(port, s, count, netx_console_putchar); - - while (readl(port->membase + UART_FR) & FR_BUSY); - writel(cr_save, port->membase + UART_CR); -} - -static void __init -netx_console_get_options(struct uart_port *port, int *baud, - int *parity, int *bits, int *flow) -{ - unsigned char line_cr; - - *baud = (readl(port->membase + UART_BAUDDIV_MSB) << 8) | - readl(port->membase + UART_BAUDDIV_LSB); - *baud *= 1000; - *baud /= 4096; - *baud *= 1000; - *baud /= 256; - *baud *= 100; - - line_cr = readl(port->membase + UART_LINE_CR); - *parity = 'n'; - if (line_cr & LINE_CR_PEN) { - if (line_cr & LINE_CR_EPS) - *parity = 'e'; - else - *parity = 'o'; - } - - switch (line_cr & LINE_CR_BITS_MASK) { - case LINE_CR_8BIT: - *bits = 8; - break; - case LINE_CR_7BIT: - *bits = 7; - break; - case LINE_CR_6BIT: - *bits = 6; - break; - case LINE_CR_5BIT: - *bits = 5; - break; - } - - if (readl(port->membase + UART_RTS_CR) & RTS_CR_AUTO) - *flow = 'r'; -} - -static int __init -netx_console_setup(struct console *co, char *options) -{ - struct netx_port *sport; - int baud = 9600; - int bits = 8; - int parity = 'n'; - int flow = 'n'; - - /* - * Check whether an invalid uart number has been specified, and - * if so, search for the first available port that does have - * console support. - */ - if (co->index == -1 || co->index >= ARRAY_SIZE(netx_ports)) - co->index = 0; - sport = &netx_ports[co->index]; - - if (options) { - uart_parse_options(options, &baud, &parity, &bits, &flow); - } else { - /* if the UART is enabled, assume it has been correctly setup - * by the bootloader and get the options - */ - if (readl(sport->port.membase + UART_CR) & CR_UART_EN) { - netx_console_get_options(&sport->port, &baud, - &parity, &bits, &flow); - } - - } - - return uart_set_options(&sport->port, co, baud, parity, bits, flow); -} - -static struct uart_driver netx_reg; -static struct console netx_console = { - .name = "ttyNX", - .write = netx_console_write, - .device = uart_console_device, - .setup = netx_console_setup, - .flags = CON_PRINTBUFFER, - .index = -1, - .data = &netx_reg, -}; - -static int __init netx_console_init(void) -{ - register_console(&netx_console); - return 0; -} -console_initcall(netx_console_init); - -#define NETX_CONSOLE &netx_console -#else -#define NETX_CONSOLE NULL -#endif - -static struct uart_driver netx_reg = { - .owner = THIS_MODULE, - .driver_name = DRIVER_NAME, - .dev_name = "ttyNX", - .major = SERIAL_NX_MAJOR, - .minor = MINOR_START, - .nr = ARRAY_SIZE(netx_ports), - .cons = NETX_CONSOLE, -}; - -static int serial_netx_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct netx_port *sport = platform_get_drvdata(pdev); - - if (sport) - uart_suspend_port(&netx_reg, &sport->port); - - return 0; -} - -static int serial_netx_resume(struct platform_device *pdev) -{ - struct netx_port *sport = platform_get_drvdata(pdev); - - if (sport) - uart_resume_port(&netx_reg, &sport->port); - - return 0; -} - -static int serial_netx_probe(struct platform_device *pdev) -{ - struct uart_port *port = &netx_ports[pdev->id].port; - - dev_info(&pdev->dev, "initialising\n"); - - port->dev = &pdev->dev; - - writel(1, port->membase + UART_RXFIFO_IRQLEVEL); - uart_add_one_port(&netx_reg, &netx_ports[pdev->id].port); - platform_set_drvdata(pdev, &netx_ports[pdev->id]); - - return 0; -} - -static int serial_netx_remove(struct platform_device *pdev) -{ - struct netx_port *sport = platform_get_drvdata(pdev); - - if (sport) - uart_remove_one_port(&netx_reg, &sport->port); - - return 0; -} - -static struct platform_driver serial_netx_driver = { - .probe = serial_netx_probe, - .remove = serial_netx_remove, - - .suspend = serial_netx_suspend, - .resume = serial_netx_resume, - - .driver = { - .name = DRIVER_NAME, - }, -}; - -static int __init netx_serial_init(void) -{ - int ret; - - printk(KERN_INFO "Serial: NetX driver\n"); - - ret = uart_register_driver(&netx_reg); - if (ret) - return ret; - - ret = platform_driver_register(&serial_netx_driver); - if (ret != 0) - uart_unregister_driver(&netx_reg); - - return 0; -} - -static void __exit netx_serial_exit(void) -{ - platform_driver_unregister(&serial_netx_driver); - uart_unregister_driver(&netx_reg); -} - -module_init(netx_serial_init); -module_exit(netx_serial_exit); - -MODULE_AUTHOR("Sascha Hauer"); -MODULE_DESCRIPTION("NetX serial port driver"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 5642c05e0da0..3cc3af1c2ee1 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -150,9 +150,6 @@ #define PORT_PNX8XXX 70 -/* Hilscher netx */ -#define PORT_NETX 71 - /* SUN4V Hypervisor Console */ #define PORT_SUNHV 72 -- cgit v1.2.3 From 086f95682114fd2d1790bd3226e76cbae9a2d192 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 25 Jul 2019 15:52:25 -0700 Subject: bpf/flow_dissector: pass input flags to BPF flow dissector program C flow dissector supports input flags that tell it to customize parsing by either stopping early or trying to parse as deep as possible. Pass those flags to the BPF flow dissector so it can make the same decisions. In the next commits I'll add support for those flags to our reference bpf_flow.c v3: * Export copy of flow dissector flags instead of moving (Alexei Starovoitov) Acked-by: Petar Penkov Acked-by: Willem de Bruijn Acked-by: Song Liu Cc: Song Liu Cc: Willem de Bruijn Cc: Petar Penkov Signed-off-by: Stanislav Fomichev Signed-off-by: Alexei Starovoitov --- include/linux/skbuff.h | 2 +- include/uapi/linux/bpf.h | 5 +++++ net/bpf/test_run.c | 2 +- net/core/flow_dissector.c | 12 ++++++++++-- 4 files changed, 17 insertions(+), 4 deletions(-) (limited to 'include/uapi') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 718742b1c505..9b7a8038beec 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1271,7 +1271,7 @@ static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) struct bpf_flow_dissector; bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, - __be16 proto, int nhoff, int hlen); + __be16 proto, int nhoff, int hlen, unsigned int flags); bool __skb_flow_dissect(const struct net *net, const struct sk_buff *skb, diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index fa1c753dcdbc..88b9d743036f 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3507,6 +3507,10 @@ enum bpf_task_fd_type { BPF_FD_TYPE_URETPROBE, /* filename + offset */ }; +#define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG (1U << 0) +#define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL (1U << 1) +#define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP (1U << 2) + struct bpf_flow_keys { __u16 nhoff; __u16 thoff; @@ -3528,6 +3532,7 @@ struct bpf_flow_keys { __u32 ipv6_dst[4]; /* in6_addr; network order */ }; }; + __u32 flags; }; struct bpf_func_info { diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 80e6f3a6864d..4e41d15a1098 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -419,7 +419,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, time_start = ktime_get_ns(); for (i = 0; i < repeat; i++) { retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, - size); + size, 0); if (signal_pending(current)) { preempt_enable(); diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 3e6fedb57bc1..50ed1a688709 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -784,7 +784,7 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, } bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, - __be16 proto, int nhoff, int hlen) + __be16 proto, int nhoff, int hlen, unsigned int flags) { struct bpf_flow_keys *flow_keys = ctx->flow_keys; u32 result; @@ -795,6 +795,14 @@ bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, flow_keys->nhoff = nhoff; flow_keys->thoff = flow_keys->nhoff; + BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG != + (int)FLOW_DISSECTOR_F_PARSE_1ST_FRAG); + BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL != + (int)FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); + BUILD_BUG_ON((int)BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP != + (int)FLOW_DISSECTOR_F_STOP_AT_ENCAP); + flow_keys->flags = flags; + preempt_disable(); result = BPF_PROG_RUN(prog, ctx); preempt_enable(); @@ -914,7 +922,7 @@ bool __skb_flow_dissect(const struct net *net, } ret = bpf_flow_dissect(attached, &ctx, n_proto, nhoff, - hlen); + hlen, flags); __skb_flow_bpf_to_target(&flow_keys, flow_dissector, target_container); rcu_read_unlock(); -- cgit v1.2.3 From 71c99e32b926159ea628352751f66383d7d04d17 Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Thu, 25 Jul 2019 15:52:30 -0700 Subject: bpf/flow_dissector: support ipv6 flow_label and BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL Add support for exporting ipv6 flow label via bpf_flow_keys. Export flow label from bpf_flow.c and also return early when BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL is passed. Acked-by: Petar Penkov Acked-by: Willem de Bruijn Acked-by: Song Liu Cc: Song Liu Cc: Willem de Bruijn Cc: Petar Penkov Signed-off-by: Stanislav Fomichev Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 1 + net/core/flow_dissector.c | 9 +++++ tools/include/uapi/linux/bpf.h | 1 + .../selftests/bpf/prog_tests/flow_dissector.c | 46 ++++++++++++++++++++++ tools/testing/selftests/bpf/progs/bpf_flow.c | 10 +++++ 5 files changed, 67 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 88b9d743036f..e985f07a98ed 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -3533,6 +3533,7 @@ struct bpf_flow_keys { }; }; __u32 flags; + __be32 flow_label; }; struct bpf_func_info { diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 50ed1a688709..9741b593ea53 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -737,6 +737,7 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, struct flow_dissector_key_basic *key_basic; struct flow_dissector_key_addrs *key_addrs; struct flow_dissector_key_ports *key_ports; + struct flow_dissector_key_tags *key_tags; key_control = skb_flow_dissector_target(flow_dissector, FLOW_DISSECTOR_KEY_CONTROL, @@ -781,6 +782,14 @@ static void __skb_flow_bpf_to_target(const struct bpf_flow_keys *flow_keys, key_ports->src = flow_keys->sport; key_ports->dst = flow_keys->dport; } + + if (dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_FLOW_LABEL)) { + key_tags = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_FLOW_LABEL, + target_container); + key_tags->flow_label = ntohl(flow_keys->flow_label); + } } bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx, diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 2e4b0848d795..3d7fc67ec1b8 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -3530,6 +3530,7 @@ struct bpf_flow_keys { }; }; __u32 flags; + __be32 flow_label; }; struct bpf_func_info { diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c index 8e8c18aced9b..ef83f145a6f1 100644 --- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c +++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c @@ -20,6 +20,7 @@ "is_encap=%u/%u " \ "ip_proto=0x%x/0x%x " \ "n_proto=0x%x/0x%x " \ + "flow_label=0x%x/0x%x " \ "sport=%u/%u " \ "dport=%u/%u\n", \ got.nhoff, expected.nhoff, \ @@ -30,6 +31,7 @@ got.is_encap, expected.is_encap, \ got.ip_proto, expected.ip_proto, \ got.n_proto, expected.n_proto, \ + got.flow_label, expected.flow_label, \ got.sport, expected.sport, \ got.dport, expected.dport) @@ -257,6 +259,50 @@ struct test tests[] = { .is_first_frag = true, }, }, + { + .name = "ipv6-flow-label", + .pkt.ipv6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.flow_lbl = { 0xb, 0xee, 0xef }, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .sport = 80, + .dport = 8080, + .flow_label = __bpf_constant_htonl(0xbeeef), + }, + }, + { + .name = "ipv6-no-flow-label", + .pkt.ipv6 = { + .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6), + .iph.nexthdr = IPPROTO_TCP, + .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES), + .iph.flow_lbl = { 0xb, 0xee, 0xef }, + .tcp.doff = 5, + .tcp.source = 80, + .tcp.dest = 8080, + }, + .keys = { + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, + .nhoff = ETH_HLEN, + .thoff = ETH_HLEN + sizeof(struct ipv6hdr), + .addr_proto = ETH_P_IPV6, + .ip_proto = IPPROTO_TCP, + .n_proto = __bpf_constant_htons(ETH_P_IPV6), + .flow_label = __bpf_constant_htonl(0xbeeef), + }, + .flags = BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL, + }, }; static int create_tap(const char *ifname) diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c index f931cd3db9d4..7fbfa22f33df 100644 --- a/tools/testing/selftests/bpf/progs/bpf_flow.c +++ b/tools/testing/selftests/bpf/progs/bpf_flow.c @@ -83,6 +83,12 @@ static __always_inline int export_flow_keys(struct bpf_flow_keys *keys, return ret; } +#define IPV6_FLOWLABEL_MASK __bpf_constant_htonl(0x000FFFFF) +static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr) +{ + return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK; +} + static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb, __u16 hdr_size, void *buffer) @@ -308,6 +314,10 @@ PROG(IPV6)(struct __sk_buff *skb) keys->thoff += sizeof(struct ipv6hdr); keys->ip_proto = ip6h->nexthdr; + keys->flow_label = ip6_flowlabel(ip6h); + + if (keys->flags & BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL) + return export_flow_keys(keys, BPF_OK); return parse_ipv6_proto(skb, ip6h->nexthdr); } -- cgit v1.2.3 From 7a113110fc8cdda14023c0bffc7bd8b5f3da1edf Mon Sep 17 00:00:00 2001 From: Denis Kenzior Date: Mon, 22 Jul 2019 06:33:10 -0500 Subject: nl80211: document uapi for CMD_FRAME_WAIT_CANCEL Commit 1c38c7f22068 ("nl80211: send event when CMD_FRAME duration expires") added the possibility of NL80211_CMD_FRAME_WAIT_CANCEL being sent whenever the off-channel wait time associated with a CMD_FRAME completes. Document this in the uapi/linux/nl80211.h file. Signed-off-by: Denis Kenzior Link: https://lore.kernel.org/r/20190722113312.14031-1-denkenz@gmail.com Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index beb9a9d0c00a..c45587c2cf44 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -657,7 +657,9 @@ * is used during CSA period. * @NL80211_CMD_FRAME_WAIT_CANCEL: When an off-channel TX was requested, this * command may be used with the corresponding cookie to cancel the wait - * time if it is known that it is no longer necessary. + * time if it is known that it is no longer necessary. This command is + * also sent as an event whenever the driver has completed the off-channel + * wait time. * @NL80211_CMD_ACTION: Alias for @NL80211_CMD_FRAME for backward compatibility. * @NL80211_CMD_FRAME_TX_STATUS: Report TX status of a management frame * transmitted with %NL80211_CMD_FRAME. %NL80211_ATTR_COOKIE identifies -- cgit v1.2.3 From 085771ec14b9bdb843fe9283d4703ced395d1b0b Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 22 Jul 2019 09:26:20 -0700 Subject: fs-verity: add UAPI header Add the UAPI header for fs-verity, including two ioctls: - FS_IOC_ENABLE_VERITY - FS_IOC_MEASURE_VERITY These ioctls are documented in the "User API" section of Documentation/filesystems/fsverity.rst. Examples of using these ioctls can be found in fsverity-utils (https://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/fsverity-utils.git). I've also written xfstests that test these ioctls (https://git.kernel.org/pub/scm/linux/kernel/git/ebiggers/xfstests-dev.git/log/?h=fsverity). Reviewed-by: Theodore Ts'o Reviewed-by: Jaegeuk Kim Signed-off-by: Eric Biggers --- Documentation/ioctl/ioctl-number.rst | 1 + include/uapi/linux/fsverity.h | 39 ++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) create mode 100644 include/uapi/linux/fsverity.h (limited to 'include/uapi') diff --git a/Documentation/ioctl/ioctl-number.rst b/Documentation/ioctl/ioctl-number.rst index 7f8dcae7a230..bef79cd4c6b4 100644 --- a/Documentation/ioctl/ioctl-number.rst +++ b/Documentation/ioctl/ioctl-number.rst @@ -233,6 +233,7 @@ Code Seq# Include File Comments 'f' 00-0F fs/ext4/ext4.h conflict! 'f' 00-0F linux/fs.h conflict! 'f' 00-0F fs/ocfs2/ocfs2_fs.h conflict! +'f' 81-8F linux/fsverity.h 'g' 00-0F linux/usb/gadgetfs.h 'g' 20-2F linux/usb/g_printer.h 'h' 00-7F conflict! Charon filesystem diff --git a/include/uapi/linux/fsverity.h b/include/uapi/linux/fsverity.h new file mode 100644 index 000000000000..57d1d7fc0c34 --- /dev/null +++ b/include/uapi/linux/fsverity.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * fs-verity user API + * + * These ioctls can be used on filesystems that support fs-verity. See the + * "User API" section of Documentation/filesystems/fsverity.rst. + * + * Copyright 2019 Google LLC + */ +#ifndef _UAPI_LINUX_FSVERITY_H +#define _UAPI_LINUX_FSVERITY_H + +#include +#include + +#define FS_VERITY_HASH_ALG_SHA256 1 + +struct fsverity_enable_arg { + __u32 version; + __u32 hash_algorithm; + __u32 block_size; + __u32 salt_size; + __u64 salt_ptr; + __u32 sig_size; + __u32 __reserved1; + __u64 sig_ptr; + __u64 __reserved2[11]; +}; + +struct fsverity_digest { + __u16 digest_algorithm; + __u16 digest_size; /* input/output */ + __u8 digest[]; +}; + +#define FS_IOC_ENABLE_VERITY _IOW('f', 133, struct fsverity_enable_arg) +#define FS_IOC_MEASURE_VERITY _IOWR('f', 134, struct fsverity_digest) + +#endif /* _UAPI_LINUX_FSVERITY_H */ -- cgit v1.2.3 From fe9918d3b228b3e8c726849d1486933f46b9069e Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 22 Jul 2019 09:26:21 -0700 Subject: fs: uapi: define verity bit for FS_IOC_GETFLAGS Add FS_VERITY_FL to the flags for FS_IOC_GETFLAGS, so that applications can easily determine whether a file is a verity file at the same time as they're checking other file flags. This flag will be gettable only; FS_IOC_SETFLAGS won't allow setting it, since an ioctl must be used instead to provide more parameters. This flag matches the on-disk bit that was already allocated for ext4. Reviewed-by: Theodore Ts'o Reviewed-by: Jaegeuk Kim Signed-off-by: Eric Biggers --- include/uapi/linux/fs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 59c71fa8c553..df261b7e0587 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -306,6 +306,7 @@ struct fscrypt_key { #define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/ #define FS_HUGE_FILE_FL 0x00040000 /* Reserved for ext4 */ #define FS_EXTENT_FL 0x00080000 /* Extents */ +#define FS_VERITY_FL 0x00100000 /* Verity protected inode */ #define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */ #define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */ #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ -- cgit v1.2.3 From 91826ba13855f73e252fef68369b3b0e1ed25253 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 29 Jul 2019 00:51:38 +0900 Subject: netfilter: add include guard to xt_connlabel.h Add a header include guard just in case. Signed-off-by: Masahiro Yamada Acked-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/xt_connlabel.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/netfilter/xt_connlabel.h b/include/uapi/linux/netfilter/xt_connlabel.h index 2312f0ec07b2..323f0dfc2a4e 100644 --- a/include/uapi/linux/netfilter/xt_connlabel.h +++ b/include/uapi/linux/netfilter/xt_connlabel.h @@ -1,4 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _UAPI_XT_CONNLABEL_H +#define _UAPI_XT_CONNLABEL_H + #include #define XT_CONNLABEL_MAXBIT 127 @@ -11,3 +15,5 @@ struct xt_connlabel_mtinfo { __u16 bit; __u16 options; }; + +#endif /* _UAPI_XT_CONNLABEL_H */ -- cgit v1.2.3 From 6f9d451ab1a33728adb72d7ff66a7b374d665176 Mon Sep 17 00:00:00 2001 From: Toke Høiland-Jørgensen Date: Fri, 26 Jul 2019 18:06:55 +0200 Subject: xdp: Add devmap_hash map type for looking up devices by hashed index MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A common pattern when using xdp_redirect_map() is to create a device map where the lookup key is simply ifindex. Because device maps are arrays, this leaves holes in the map, and the map has to be sized to fit the largest ifindex, regardless of how many devices actually are actually needed in the map. This patch adds a second type of device map where the key is looked up using a hashmap, instead of being used as an array index. This allows maps to be densely packed, so they can be smaller. Signed-off-by: Toke Høiland-Jørgensen Acked-by: Yonghong Song Acked-by: Jesper Dangaard Brouer Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 7 ++ include/linux/bpf_types.h | 1 + include/trace/events/xdp.h | 3 +- include/uapi/linux/bpf.h | 1 + kernel/bpf/devmap.c | 200 +++++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/verifier.c | 2 + net/core/filter.c | 9 +- 7 files changed, 220 insertions(+), 3 deletions(-) (limited to 'include/uapi') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index bfdb54dd2ad1..f9a506147c8a 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -713,6 +713,7 @@ struct xdp_buff; struct sk_buff; struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); +struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); void __dev_map_flush(struct bpf_map *map); int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, struct net_device *dev_rx); @@ -799,6 +800,12 @@ static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, return NULL; } +static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, + u32 key) +{ + return NULL; +} + static inline void __dev_map_flush(struct bpf_map *map) { } diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index eec5aeeeaf92..36a9c2325176 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -62,6 +62,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) #ifdef CONFIG_NET BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) +BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops) BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops) #if defined(CONFIG_BPF_STREAM_PARSER) BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h index 68899fdc985b..8c8420230a10 100644 --- a/include/trace/events/xdp.h +++ b/include/trace/events/xdp.h @@ -175,7 +175,8 @@ struct _bpf_dtab_netdev { #endif /* __DEVMAP_OBJ_TYPE */ #define devmap_ifindex(fwd, map) \ - ((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \ + ((map->map_type == BPF_MAP_TYPE_DEVMAP || \ + map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) ? \ ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0) #define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e985f07a98ed..6bbef0c7f585 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -134,6 +134,7 @@ enum bpf_map_type { BPF_MAP_TYPE_QUEUE, BPF_MAP_TYPE_STACK, BPF_MAP_TYPE_SK_STORAGE, + BPF_MAP_TYPE_DEVMAP_HASH, }; /* Note that tracing related programs such as diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index a0501266bdb8..9af048a932b5 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -37,6 +37,12 @@ * notifier hook walks the map we know that new dev references can not be * added by the user because core infrastructure ensures dev_get_by_index() * calls will fail at this point. + * + * The devmap_hash type is a map type which interprets keys as ifindexes and + * indexes these using a hashmap. This allows maps that use ifindex as key to be + * densely packed instead of having holes in the lookup array for unused + * ifindexes. The setup and packet enqueue/send code is shared between the two + * types of devmap; only the lookup and insertion is different. */ #include #include @@ -59,6 +65,7 @@ struct xdp_bulk_queue { struct bpf_dtab_netdev { struct net_device *dev; /* must be first member, due to tracepoint */ + struct hlist_node index_hlist; struct bpf_dtab *dtab; struct xdp_bulk_queue __percpu *bulkq; struct rcu_head rcu; @@ -70,11 +77,30 @@ struct bpf_dtab { struct bpf_dtab_netdev **netdev_map; struct list_head __percpu *flush_list; struct list_head list; + + /* these are only used for DEVMAP_HASH type maps */ + struct hlist_head *dev_index_head; + spinlock_t index_lock; + unsigned int items; + u32 n_buckets; }; static DEFINE_SPINLOCK(dev_map_lock); static LIST_HEAD(dev_map_list); +static struct hlist_head *dev_map_create_hash(unsigned int entries) +{ + int i; + struct hlist_head *hash; + + hash = kmalloc_array(entries, sizeof(*hash), GFP_KERNEL); + if (hash != NULL) + for (i = 0; i < entries; i++) + INIT_HLIST_HEAD(&hash[i]); + + return hash; +} + static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) { int err, cpu; @@ -97,6 +123,14 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); cost += sizeof(struct list_head) * num_possible_cpus(); + if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { + dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); + + if (!dtab->n_buckets) /* Overflow check */ + return -EINVAL; + cost += sizeof(struct hlist_head) * dtab->n_buckets; + } + /* if map size is larger than memlock limit, reject it */ err = bpf_map_charge_init(&dtab->map.memory, cost); if (err) @@ -115,8 +149,18 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) if (!dtab->netdev_map) goto free_percpu; + if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { + dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets); + if (!dtab->dev_index_head) + goto free_map_area; + + spin_lock_init(&dtab->index_lock); + } + return 0; +free_map_area: + bpf_map_area_free(dtab->netdev_map); free_percpu: free_percpu(dtab->flush_list); free_charge: @@ -198,6 +242,7 @@ static void dev_map_free(struct bpf_map *map) free_percpu(dtab->flush_list); bpf_map_area_free(dtab->netdev_map); + kfree(dtab->dev_index_head); kfree(dtab); } @@ -218,6 +263,70 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) return 0; } +static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, + int idx) +{ + return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; +} + +struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + struct hlist_head *head = dev_map_index_hash(dtab, key); + struct bpf_dtab_netdev *dev; + + hlist_for_each_entry_rcu(dev, head, index_hlist) + if (dev->idx == key) + return dev; + + return NULL; +} + +static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, + void *next_key) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + u32 idx, *next = next_key; + struct bpf_dtab_netdev *dev, *next_dev; + struct hlist_head *head; + int i = 0; + + if (!key) + goto find_first; + + idx = *(u32 *)key; + + dev = __dev_map_hash_lookup_elem(map, idx); + if (!dev) + goto find_first; + + next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), + struct bpf_dtab_netdev, index_hlist); + + if (next_dev) { + *next = next_dev->idx; + return 0; + } + + i = idx & (dtab->n_buckets - 1); + i++; + + find_first: + for (; i < dtab->n_buckets; i++) { + head = dev_map_index_hash(dtab, i); + + next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), + struct bpf_dtab_netdev, + index_hlist); + if (next_dev) { + *next = next_dev->idx; + return 0; + } + } + + return -ENOENT; +} + static int bq_xmit_all(struct xdp_bulk_queue *bq, u32 flags, bool in_napi_ctx) { @@ -373,6 +482,15 @@ static void *dev_map_lookup_elem(struct bpf_map *map, void *key) return dev ? &dev->ifindex : NULL; } +static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) +{ + struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, + *(u32 *)key); + struct net_device *dev = obj ? obj->dev : NULL; + + return dev ? &dev->ifindex : NULL; +} + static void dev_map_flush_old(struct bpf_dtab_netdev *dev) { if (dev->dev->netdev_ops->ndo_xdp_xmit) { @@ -422,6 +540,28 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key) return 0; } +static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + struct bpf_dtab_netdev *old_dev; + int k = *(u32 *)key; + unsigned long flags; + int ret = -ENOENT; + + spin_lock_irqsave(&dtab->index_lock, flags); + + old_dev = __dev_map_hash_lookup_elem(map, k); + if (old_dev) { + dtab->items--; + hlist_del_init_rcu(&old_dev->index_hlist); + call_rcu(&old_dev->rcu, __dev_map_entry_free); + ret = 0; + } + spin_unlock_irqrestore(&dtab->index_lock, flags); + + return ret; +} + static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, struct bpf_dtab *dtab, u32 ifindex, @@ -502,6 +642,56 @@ static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, map, key, value, map_flags); } +static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, + void *key, void *value, u64 map_flags) +{ + struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); + struct bpf_dtab_netdev *dev, *old_dev; + u32 ifindex = *(u32 *)value; + u32 idx = *(u32 *)key; + unsigned long flags; + + if (unlikely(map_flags > BPF_EXIST || !ifindex)) + return -EINVAL; + + old_dev = __dev_map_hash_lookup_elem(map, idx); + if (old_dev && (map_flags & BPF_NOEXIST)) + return -EEXIST; + + dev = __dev_map_alloc_node(net, dtab, ifindex, idx); + if (IS_ERR(dev)) + return PTR_ERR(dev); + + spin_lock_irqsave(&dtab->index_lock, flags); + + if (old_dev) { + hlist_del_rcu(&old_dev->index_hlist); + } else { + if (dtab->items >= dtab->map.max_entries) { + spin_unlock_irqrestore(&dtab->index_lock, flags); + call_rcu(&dev->rcu, __dev_map_entry_free); + return -E2BIG; + } + dtab->items++; + } + + hlist_add_head_rcu(&dev->index_hlist, + dev_map_index_hash(dtab, idx)); + spin_unlock_irqrestore(&dtab->index_lock, flags); + + if (old_dev) + call_rcu(&old_dev->rcu, __dev_map_entry_free); + + return 0; +} + +static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, + u64 map_flags) +{ + return __dev_map_hash_update_elem(current->nsproxy->net_ns, + map, key, value, map_flags); +} + const struct bpf_map_ops dev_map_ops = { .map_alloc = dev_map_alloc, .map_free = dev_map_free, @@ -512,6 +702,16 @@ const struct bpf_map_ops dev_map_ops = { .map_check_btf = map_check_no_btf, }; +const struct bpf_map_ops dev_map_hash_ops = { + .map_alloc = dev_map_alloc, + .map_free = dev_map_free, + .map_get_next_key = dev_map_hash_get_next_key, + .map_lookup_elem = dev_map_hash_lookup_elem, + .map_update_elem = dev_map_hash_update_elem, + .map_delete_elem = dev_map_hash_delete_elem, + .map_check_btf = map_check_no_btf, +}; + static int dev_map_notification(struct notifier_block *notifier, ulong event, void *ptr) { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5900cbb966b1..cef851cd5c36 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3457,6 +3457,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, goto error; break; case BPF_MAP_TYPE_DEVMAP: + case BPF_MAP_TYPE_DEVMAP_HASH: if (func_id != BPF_FUNC_redirect_map && func_id != BPF_FUNC_map_lookup_elem) goto error; @@ -3539,6 +3540,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, break; case BPF_FUNC_redirect_map: if (map->map_type != BPF_MAP_TYPE_DEVMAP && + map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && map->map_type != BPF_MAP_TYPE_CPUMAP && map->map_type != BPF_MAP_TYPE_XSKMAP) goto error; diff --git a/net/core/filter.c b/net/core/filter.c index 3961437ccc50..1eee70f80fba 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3517,7 +3517,8 @@ static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, int err; switch (map->map_type) { - case BPF_MAP_TYPE_DEVMAP: { + case BPF_MAP_TYPE_DEVMAP: + case BPF_MAP_TYPE_DEVMAP_HASH: { struct bpf_dtab_netdev *dst = fwd; err = dev_map_enqueue(dst, xdp, dev_rx); @@ -3554,6 +3555,7 @@ void xdp_do_flush_map(void) if (map) { switch (map->map_type) { case BPF_MAP_TYPE_DEVMAP: + case BPF_MAP_TYPE_DEVMAP_HASH: __dev_map_flush(map); break; case BPF_MAP_TYPE_CPUMAP: @@ -3574,6 +3576,8 @@ static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index) switch (map->map_type) { case BPF_MAP_TYPE_DEVMAP: return __dev_map_lookup_elem(map, index); + case BPF_MAP_TYPE_DEVMAP_HASH: + return __dev_map_hash_lookup_elem(map, index); case BPF_MAP_TYPE_CPUMAP: return __cpu_map_lookup_elem(map, index); case BPF_MAP_TYPE_XSKMAP: @@ -3655,7 +3659,8 @@ static int xdp_do_generic_redirect_map(struct net_device *dev, ri->tgt_value = NULL; WRITE_ONCE(ri->map, NULL); - if (map->map_type == BPF_MAP_TYPE_DEVMAP) { + if (map->map_type == BPF_MAP_TYPE_DEVMAP || + map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { struct bpf_dtab_netdev *dst = fwd; err = dev_map_generic_redirect(dst, skb, xdp_prog); -- cgit v1.2.3 From 509ce4c85bd055ee1013bc853b5d543428b0f017 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 29 Jul 2019 00:27:39 +0900 Subject: ppdev: add header include guard Add a header include guard just in case. Signed-off-by: Masahiro Yamada Link: https://lore.kernel.org/r/20190728152739.9249-1-yamada.masahiro@socionext.com Signed-off-by: Greg Kroah-Hartman --- include/uapi/linux/ppdev.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/ppdev.h b/include/uapi/linux/ppdev.h index 8fe3c64d149e..eb895b83f2bd 100644 --- a/include/uapi/linux/ppdev.h +++ b/include/uapi/linux/ppdev.h @@ -15,6 +15,9 @@ * Added PPGETMODES/PPGETMODE/PPGETPHASE, Fred Barnes , 03/01/2001 */ +#ifndef _UAPI_LINUX_PPDEV_H +#define _UAPI_LINUX_PPDEV_H + #define PP_IOCTL 'p' /* Set mode for read/write (e.g. IEEE1284_MODE_EPP) */ @@ -97,4 +100,4 @@ struct ppdev_frob_struct { /* only masks user-visible flags */ #define PP_FLAGMASK (PP_FASTWRITE | PP_FASTREAD | PP_W91284PIC) - +#endif /* _UAPI_LINUX_PPDEV_H */ -- cgit v1.2.3 From 70d66244317e958092e9c971b08dd5b7fd29d9cb Mon Sep 17 00:00:00 2001 From: Petar Penkov Date: Mon, 29 Jul 2019 09:59:15 -0700 Subject: bpf: add bpf_tcp_gen_syncookie helper This helper function allows BPF programs to try to generate SYN cookies, given a reference to a listener socket. The function works from XDP and with an skb context since bpf_skc_lookup_tcp can lookup a socket in both cases. Signed-off-by: Petar Penkov Suggested-by: Eric Dumazet Reviewed-by: Lorenz Bauer Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 30 +++++++++++++++++++- net/core/filter.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 6bbef0c7f585..4393bd4b2419 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -2714,6 +2714,33 @@ union bpf_attr { * **-EPERM** if no permission to send the *sig*. * * **-EAGAIN** if bpf program can try again. + * + * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) + * Description + * Try to issue a SYN cookie for the packet with corresponding + * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. + * + * *iph* points to the start of the IPv4 or IPv6 header, while + * *iph_len* contains **sizeof**\ (**struct iphdr**) or + * **sizeof**\ (**struct ip6hdr**). + * + * *th* points to the start of the TCP header, while *th_len* + * contains the length of the TCP header. + * + * Return + * On success, lower 32 bits hold the generated SYN cookie in + * followed by 16 bits which hold the MSS value for that cookie, + * and the top 16 bits are unused. + * + * On failure, the returned value is one of the following: + * + * **-EINVAL** SYN cookie cannot be issued due to error + * + * **-ENOENT** SYN cookie should not be issued (no SYN flood) + * + * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies + * + * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2825,7 +2852,8 @@ union bpf_attr { FN(strtoul), \ FN(sk_storage_get), \ FN(sk_storage_delete), \ - FN(send_signal), + FN(send_signal), \ + FN(tcp_gen_syncookie), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/net/core/filter.c b/net/core/filter.c index 1eee70f80fba..5a2707918629 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -5855,6 +5855,75 @@ static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = { .arg5_type = ARG_CONST_SIZE, }; +BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len, + struct tcphdr *, th, u32, th_len) +{ +#ifdef CONFIG_SYN_COOKIES + u32 cookie; + u16 mss; + + if (unlikely(th_len < sizeof(*th) || th_len != th->doff * 4)) + return -EINVAL; + + if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) + return -EINVAL; + + if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies) + return -ENOENT; + + if (!th->syn || th->ack || th->fin || th->rst) + return -EINVAL; + + if (unlikely(iph_len < sizeof(struct iphdr))) + return -EINVAL; + + /* Both struct iphdr and struct ipv6hdr have the version field at the + * same offset so we can cast to the shorter header (struct iphdr). + */ + switch (((struct iphdr *)iph)->version) { + case 4: + if (sk->sk_family == AF_INET6 && sk->sk_ipv6only) + return -EINVAL; + + mss = tcp_v4_get_syncookie(sk, iph, th, &cookie); + break; + +#if IS_BUILTIN(CONFIG_IPV6) + case 6: + if (unlikely(iph_len < sizeof(struct ipv6hdr))) + return -EINVAL; + + if (sk->sk_family != AF_INET6) + return -EINVAL; + + mss = tcp_v6_get_syncookie(sk, iph, th, &cookie); + break; +#endif /* CONFIG_IPV6 */ + + default: + return -EPROTONOSUPPORT; + } + if (mss <= 0) + return -ENOENT; + + return cookie | ((u64)mss << 32); +#else + return -EOPNOTSUPP; +#endif /* CONFIG_SYN_COOKIES */ +} + +static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = { + .func = bpf_tcp_gen_syncookie, + .gpl_only = true, /* __cookie_v*_init_sequence() is GPL */ + .pkt_access = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_SOCK_COMMON, + .arg2_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_CONST_SIZE, + .arg4_type = ARG_PTR_TO_MEM, + .arg5_type = ARG_CONST_SIZE, +}; + #endif /* CONFIG_INET */ bool bpf_helper_changes_pkt_data(void *func) @@ -6144,6 +6213,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_tcp_check_syncookie_proto; case BPF_FUNC_skb_ecn_set_ce: return &bpf_skb_ecn_set_ce_proto; + case BPF_FUNC_tcp_gen_syncookie: + return &bpf_tcp_gen_syncookie_proto; #endif default: return bpf_base_func_proto(func_id); @@ -6183,6 +6254,8 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_xdp_skc_lookup_tcp_proto; case BPF_FUNC_tcp_check_syncookie: return &bpf_tcp_check_syncookie_proto; + case BPF_FUNC_tcp_gen_syncookie: + return &bpf_tcp_gen_syncookie_proto; #endif default: return bpf_base_func_proto(func_id); -- cgit v1.2.3 From 796e90f42b7e52cf1c88e978e1d5ee69c102d85d Mon Sep 17 00:00:00 2001 From: John Crispin Date: Tue, 30 Jul 2019 18:37:00 +0200 Subject: cfg80211: add support for parsing OBBS_PD attributes Add the data structure, policy and parsing code allowing userland to send the OBSS PD information into the kernel. Signed-off-by: John Crispin Link: https://lore.kernel.org/r/20190730163701.18836-2-john@phrozen.org Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 15 +++++++++++++++ include/uapi/linux/nl80211.h | 27 ++++++++++++++++++++++++++ net/wireless/nl80211.c | 45 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 87 insertions(+) (limited to 'include/uapi') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 45850a8391d9..35ec1f0a2bf9 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -246,6 +246,19 @@ struct ieee80211_rate { u16 hw_value, hw_value_short; }; +/** + * struct ieee80211_he_obss_pd - AP settings for spatial reuse + * + * @enable: is the feature enabled. + * @min_offset: minimal tx power offset an associated station shall use + * @max_offset: maximum tx power offset an associated station shall use + */ +struct ieee80211_he_obss_pd { + bool enable; + u8 min_offset; + u8 max_offset; +}; + /** * struct ieee80211_sta_ht_cap - STA's HT capabilities * @@ -896,6 +909,7 @@ enum cfg80211_ap_settings_flags { * @vht_required: stations must support VHT * @twt_responder: Enable Target Wait Time * @flags: flags, as defined in enum cfg80211_ap_settings_flags + * @he_obss_pd: OBSS Packet Detection settings */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; @@ -923,6 +937,7 @@ struct cfg80211_ap_settings { bool ht_required, vht_required; bool twt_responder; u32 flags; + struct ieee80211_he_obss_pd he_obss_pd; }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index c45587c2cf44..822851d369ab 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2358,6 +2358,9 @@ enum nl80211_commands { * * @NL80211_ATTR_TWT_RESPONDER: Enable target wait time responder support. * + * @NL80211_ATTR_HE_OBSS_PD: nested attribute for OBSS Packet Detection + * functionality. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2815,6 +2818,8 @@ enum nl80211_attrs { NL80211_ATTR_TWT_RESPONDER, + NL80211_ATTR_HE_OBSS_PD, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -6490,4 +6495,26 @@ enum nl80211_peer_measurement_ftm_resp { NL80211_PMSR_FTM_RESP_ATTR_MAX = NUM_NL80211_PMSR_FTM_RESP_ATTR - 1 }; +/** + * enum nl80211_obss_pd_attributes - OBSS packet detection attributes + * @__NL80211_HE_OBSS_PD_ATTR_INVALID: Invalid + * + * @NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET: the OBSS PD minimum tx power offset. + * @NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET: the OBSS PD maximum tx power offset. + * + * @__NL80211_HE_OBSS_PD_ATTR_LAST: Internal + * @NL80211_HE_OBSS_PD_ATTR_MAX: highest OBSS PD attribute. + */ +enum nl80211_obss_pd_attributes { + __NL80211_HE_OBSS_PD_ATTR_INVALID, + + NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET, + NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET, + + /* keep last */ + __NL80211_HE_OBSS_PD_ATTR_LAST, + NL80211_HE_OBSS_PD_ATTR_MAX = __NL80211_HE_OBSS_PD_ATTR_LAST - 1, +}; + + #endif /* __LINUX_NL80211_H */ diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 1e78ed45a759..3006cfce7158 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -281,6 +281,14 @@ nl80211_pmsr_attr_policy[NL80211_PMSR_ATTR_MAX + 1] = { NLA_POLICY_NESTED_ARRAY(nl80211_psmr_peer_attr_policy), }; +static const struct nla_policy +he_obss_pd_policy[NL80211_HE_OBSS_PD_ATTR_MAX + 1] = { + [NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] = + NLA_POLICY_RANGE(NLA_U8, 1, 20), + [NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET] = + NLA_POLICY_RANGE(NLA_U8, 1, 20), +}; + const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, @@ -574,6 +582,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_SAE_PASSWORD] = { .type = NLA_BINARY, .len = SAE_PASSWORD_MAX_LEN }, [NL80211_ATTR_TWT_RESPONDER] = { .type = NLA_FLAG }, + [NL80211_ATTR_HE_OBSS_PD] = NLA_POLICY_NESTED(he_obss_pd_policy), }; /* policy for the key attributes */ @@ -4405,6 +4414,34 @@ static int nl80211_parse_beacon(struct cfg80211_registered_device *rdev, return 0; } +static int nl80211_parse_he_obss_pd(struct nlattr *attrs, + struct ieee80211_he_obss_pd *he_obss_pd) +{ + struct nlattr *tb[NL80211_HE_OBSS_PD_ATTR_MAX + 1]; + int err; + + err = nla_parse_nested(tb, NL80211_HE_OBSS_PD_ATTR_MAX, attrs, + he_obss_pd_policy, NULL); + if (err) + return err; + + if (!tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] || + !tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]) + return -EINVAL; + + he_obss_pd->min_offset = + nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]); + he_obss_pd->max_offset = + nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]); + + if (he_obss_pd->min_offset >= he_obss_pd->max_offset) + return -EINVAL; + + he_obss_pd->enable = true; + + return 0; +} + static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params, const u8 *rates) { @@ -4689,6 +4726,14 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) params.twt_responder = nla_get_flag(info->attrs[NL80211_ATTR_TWT_RESPONDER]); + if (info->attrs[NL80211_ATTR_HE_OBSS_PD]) { + err = nl80211_parse_he_obss_pd( + info->attrs[NL80211_ATTR_HE_OBSS_PD], + ¶ms.he_obss_pd); + if (err) + return err; + } + nl80211_calculate_ap_params(¶ms); if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT]) -- cgit v1.2.3 From 3247b272048ffefc12c7dcfa3169bd03047a49bc Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Tue, 30 Jul 2019 15:20:41 +0300 Subject: net: bridge: mcast: add delete due to fast-leave mdb flag In user-space there's no way to distinguish why an mdb entry was deleted and that is a problem for daemons which would like to keep the mdb in sync with remote ends (e.g. mlag) but would also like to converge faster. In almost all cases we'd like to age-out the remote entry for performance and convergence reasons except when fast-leave is enabled. In that case we want explicit immediate remote delete, thus add mdb flag which is set only when the entry is being deleted due to fast-leave. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- include/uapi/linux/if_bridge.h | 1 + net/bridge/br_mdb.c | 2 ++ net/bridge/br_multicast.c | 2 +- net/bridge/br_private.h | 1 + 4 files changed, 5 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index 773e476a8e54..1b3c2b643a02 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -237,6 +237,7 @@ struct br_mdb_entry { #define MDB_PERMANENT 1 __u8 state; #define MDB_FLAGS_OFFLOAD (1 << 0) +#define MDB_FLAGS_FAST_LEAVE (1 << 1) __u8 flags; __u16 vid; struct { diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index bf6acd34234d..428af1abf8cc 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -60,6 +60,8 @@ static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags) e->flags = 0; if (flags & MDB_PG_FLAGS_OFFLOAD) e->flags |= MDB_FLAGS_OFFLOAD; + if (flags & MDB_PG_FLAGS_FAST_LEAVE) + e->flags |= MDB_FLAGS_FAST_LEAVE; } static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip) diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 3d8deac2353d..3d4b2817687f 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1393,7 +1393,7 @@ br_multicast_leave_group(struct net_bridge *br, del_timer(&p->timer); kfree_rcu(p, rcu); br_mdb_notify(br->dev, port, group, RTM_DELMDB, - p->flags); + p->flags | MDB_PG_FLAGS_FAST_LEAVE); if (!mp->ports && !mp->host_joined && netif_running(br->dev)) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index e8cf03b43b7d..c4fd307fbfdc 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -199,6 +199,7 @@ struct net_bridge_fdb_entry { #define MDB_PG_FLAGS_PERMANENT BIT(0) #define MDB_PG_FLAGS_OFFLOAD BIT(1) +#define MDB_PG_FLAGS_FAST_LEAVE BIT(2) struct net_bridge_port_group { struct net_bridge_port *port; -- cgit v1.2.3 From 3695eae5fee0605f316fbaad0b9e3de791d7dfaf Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Sun, 28 Jul 2019 00:22:29 +0200 Subject: pidfd: add P_PIDFD to waitid() This adds the P_PIDFD type to waitid(). One of the last remaining bits for the pidfd api is to make it possible to wait on pidfds. With P_PIDFD added to waitid() the parts of userspace that want to use the pidfd api to exclusively manage processes can do so now. One of the things this will unblock in the future is the ability to make it possible to retrieve the exit status via waitid(P_PIDFD) for non-parent processes if handed a _suitable_ pidfd that has this feature set. This is similar to what you can do on FreeBSD with kqueue(). It might even end up being possible to wait on a process as a non-parent if an appropriate property is enabled on the pidfd. With P_PIDFD no scoping of the process identified by the pidfd is possible, i.e. it explicitly blocks things such as wait4(-1), wait4(0), waitid(P_ALL), waitid(P_PGID) etc. It only allows for semantics equivalent to wait4(pid), waitid(P_PID). Users that need scoping should rely on pid-based wait*() syscalls for now. Signed-off-by: Christian Brauner Reviewed-by: Kees Cook Reviewed-by: Oleg Nesterov Cc: Arnd Bergmann Cc: "Eric W. Biederman" Cc: Joel Fernandes (Google) Cc: Thomas Gleixner Cc: David Howells Cc: Jann Horn Cc: Andy Lutomirsky Cc: Andrew Morton Cc: Aleksa Sarai Cc: Linus Torvalds Cc: Al Viro Link: https://lore.kernel.org/r/20190727222229.6516-2-christian@brauner.io --- include/linux/pid.h | 4 ++++ include/uapi/linux/wait.h | 1 + kernel/exit.c | 33 ++++++++++++++++++++++++++++++--- kernel/fork.c | 8 ++++++++ kernel/signal.c | 7 +++++-- 5 files changed, 48 insertions(+), 5 deletions(-) (limited to 'include/uapi') diff --git a/include/linux/pid.h b/include/linux/pid.h index 2a83e434db9d..9645b1194c98 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -72,6 +72,10 @@ extern struct pid init_struct_pid; extern const struct file_operations pidfd_fops; +struct file; + +extern struct pid *pidfd_pid(const struct file *file); + static inline struct pid *get_pid(struct pid *pid) { if (pid) diff --git a/include/uapi/linux/wait.h b/include/uapi/linux/wait.h index ac49a220cf2a..85b809fc9f11 100644 --- a/include/uapi/linux/wait.h +++ b/include/uapi/linux/wait.h @@ -17,6 +17,7 @@ #define P_ALL 0 #define P_PID 1 #define P_PGID 2 +#define P_PIDFD 3 #endif /* _UAPI_LINUX_WAIT_H */ diff --git a/kernel/exit.c b/kernel/exit.c index a75b6a7f458a..64bb6893a37d 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1552,6 +1552,23 @@ end: return retval; } +static struct pid *pidfd_get_pid(unsigned int fd) +{ + struct fd f; + struct pid *pid; + + f = fdget(fd); + if (!f.file) + return ERR_PTR(-EBADF); + + pid = pidfd_pid(f.file); + if (!IS_ERR(pid)) + get_pid(pid); + + fdput(f); + return pid; +} + static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop, int options, struct rusage *ru) { @@ -1574,19 +1591,29 @@ static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop, type = PIDTYPE_PID; if (upid <= 0) return -EINVAL; + + pid = find_get_pid(upid); break; case P_PGID: type = PIDTYPE_PGID; if (upid <= 0) return -EINVAL; + + pid = find_get_pid(upid); + break; + case P_PIDFD: + type = PIDTYPE_PID; + if (upid < 0) + return -EINVAL; + + pid = pidfd_get_pid(upid); + if (IS_ERR(pid)) + return PTR_ERR(pid); break; default: return -EINVAL; } - if (type < PIDTYPE_MAX) - pid = find_get_pid(upid); - wo.wo_type = type; wo.wo_pid = pid; wo.wo_flags = options; diff --git a/kernel/fork.c b/kernel/fork.c index d8ae0f1b4148..b169e2ca2d84 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1690,6 +1690,14 @@ static inline void rcu_copy_process(struct task_struct *p) #endif /* #ifdef CONFIG_TASKS_RCU */ } +struct pid *pidfd_pid(const struct file *file) +{ + if (file->f_op == &pidfd_fops) + return file->private_data; + + return ERR_PTR(-EBADF); +} + static int pidfd_release(struct inode *inode, struct file *file) { struct pid *pid = file->private_data; diff --git a/kernel/signal.c b/kernel/signal.c index 91b789dd6e72..2e567f64812f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3672,8 +3672,11 @@ static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info) static struct pid *pidfd_to_pid(const struct file *file) { - if (file->f_op == &pidfd_fops) - return file->private_data; + struct pid *pid; + + pid = pidfd_pid(file); + if (!IS_ERR(pid)) + return pid; return tgid_pidfd_to_pid(file); } -- cgit v1.2.3 From 68289c634344431d6f97480eb5384516f37e39b8 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 23 Jul 2019 20:43:43 +0900 Subject: crypto: add header include guards Add header include guards in case they are included multiple times. Signed-off-by: Masahiro Yamada Signed-off-by: Herbert Xu --- include/crypto/sha1_base.h | 5 +++++ include/crypto/sha256_base.h | 5 +++++ include/crypto/sha512_base.h | 5 +++++ include/crypto/sm3_base.h | 5 +++++ include/uapi/linux/cryptouser.h | 5 +++++ 5 files changed, 25 insertions(+) (limited to 'include/uapi') diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h index 63c14f2dc7bd..20fd1f7468af 100644 --- a/include/crypto/sha1_base.h +++ b/include/crypto/sha1_base.h @@ -5,6 +5,9 @@ * Copyright (C) 2015 Linaro Ltd */ +#ifndef _CRYPTO_SHA1_BASE_H +#define _CRYPTO_SHA1_BASE_H + #include #include #include @@ -101,3 +104,5 @@ static inline int sha1_base_finish(struct shash_desc *desc, u8 *out) *sctx = (struct sha1_state){}; return 0; } + +#endif /* _CRYPTO_SHA1_BASE_H */ diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h index 59159bc944f5..b50a035a2bc7 100644 --- a/include/crypto/sha256_base.h +++ b/include/crypto/sha256_base.h @@ -5,6 +5,9 @@ * Copyright (C) 2015 Linaro Ltd */ +#ifndef _CRYPTO_SHA256_BASE_H +#define _CRYPTO_SHA256_BASE_H + #include #include #include @@ -123,3 +126,5 @@ static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) *sctx = (struct sha256_state){}; return 0; } + +#endif /* _CRYPTO_SHA256_BASE_H */ diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h index 099be8027f3f..fb19c77494dc 100644 --- a/include/crypto/sha512_base.h +++ b/include/crypto/sha512_base.h @@ -5,6 +5,9 @@ * Copyright (C) 2015 Linaro Ltd */ +#ifndef _CRYPTO_SHA512_BASE_H +#define _CRYPTO_SHA512_BASE_H + #include #include #include @@ -126,3 +129,5 @@ static inline int sha512_base_finish(struct shash_desc *desc, u8 *out) *sctx = (struct sha512_state){}; return 0; } + +#endif /* _CRYPTO_SHA512_BASE_H */ diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h index 31891b0dc7e3..1cbf9aa1fe52 100644 --- a/include/crypto/sm3_base.h +++ b/include/crypto/sm3_base.h @@ -6,6 +6,9 @@ * Written by Gilad Ben-Yossef */ +#ifndef _CRYPTO_SM3_BASE_H +#define _CRYPTO_SM3_BASE_H + #include #include #include @@ -104,3 +107,5 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out) *sctx = (struct sm3_state){}; return 0; } + +#endif /* _CRYPTO_SM3_BASE_H */ diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 4dc1603919ce..5730c67f0617 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h @@ -19,6 +19,9 @@ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ +#ifndef _UAPI_LINUX_CRYPTOUSER_H +#define _UAPI_LINUX_CRYPTOUSER_H + #include /* Netlink configuration messages. */ @@ -198,3 +201,5 @@ struct crypto_report_acomp { #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ sizeof(struct crypto_report_blkcipher)) + +#endif /* _UAPI_LINUX_CRYPTOUSER_H */ -- cgit v1.2.3 From d8f4981e2e8a968411105db568f3d48256b2ebbc Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Mon, 8 Jul 2019 20:09:21 -0400 Subject: drm/amdgpu: Add flag to wipe VRAM on release MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This memory allocation flag will be used to indicate BOs containing sensitive data that should not be leaked to other processes. Signed-off-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- include/uapi/drm/amdgpu_drm.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 11cc57322962..c99b4f2482c6 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -128,6 +128,10 @@ extern "C" { * for the second page onward should be set to NC. */ #define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8) +/* Flag that BO may contain sensitive data that must be wiped before + * releasing the memory + */ +#define AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE (1 << 9) struct drm_amdgpu_gem_create_in { /** the requested memory size */ -- cgit v1.2.3 From 5e5412c365a32e452daa762eac36121cb8a370bb Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Tue, 30 Jul 2019 11:30:33 -0400 Subject: net/socket: fix GCC8+ Wpacked-not-aligned warnings There are a lot of those warnings with GCC8+ 64-bit, In file included from ./include/linux/sctp.h:42, from net/core/skbuff.c:47: ./include/uapi/linux/sctp.h:395:1: warning: alignment 4 of 'struct sctp_paddr_change' is less than 8 [-Wpacked-not-aligned] } __attribute__((packed, aligned(4))); ^ ./include/uapi/linux/sctp.h:728:1: warning: alignment 4 of 'struct sctp_setpeerprim' is less than 8 [-Wpacked-not-aligned] } __attribute__((packed, aligned(4))); ^ ./include/uapi/linux/sctp.h:727:26: warning: 'sspp_addr' offset 4 in 'struct sctp_setpeerprim' isn't aligned to 8 [-Wpacked-not-aligned] struct sockaddr_storage sspp_addr; ^~~~~~~~~ ./include/uapi/linux/sctp.h:741:1: warning: alignment 4 of 'struct sctp_prim' is less than 8 [-Wpacked-not-aligned] } __attribute__((packed, aligned(4))); ^ ./include/uapi/linux/sctp.h:740:26: warning: 'ssp_addr' offset 4 in 'struct sctp_prim' isn't aligned to 8 [-Wpacked-not-aligned] struct sockaddr_storage ssp_addr; ^~~~~~~~ ./include/uapi/linux/sctp.h:792:1: warning: alignment 4 of 'struct sctp_paddrparams' is less than 8 [-Wpacked-not-aligned] } __attribute__((packed, aligned(4))); ^ ./include/uapi/linux/sctp.h:784:26: warning: 'spp_address' offset 4 in 'struct sctp_paddrparams' isn't aligned to 8 [-Wpacked-not-aligned] struct sockaddr_storage spp_address; ^~~~~~~~~~~ ./include/uapi/linux/sctp.h:905:1: warning: alignment 4 of 'struct sctp_paddrinfo' is less than 8 [-Wpacked-not-aligned] } __attribute__((packed, aligned(4))); ^ ./include/uapi/linux/sctp.h:899:26: warning: 'spinfo_address' offset 4 in 'struct sctp_paddrinfo' isn't aligned to 8 [-Wpacked-not-aligned] struct sockaddr_storage spinfo_address; ^~~~~~~~~~~~~~ This is because the commit 20c9c825b12f ("[SCTP] Fix SCTP socket options to work with 32-bit apps on 64-bit kernels.") added "packed, aligned(4)" GCC attributes to some structures but one of the members, i.e, "struct sockaddr_storage" in those structures has the attribute, "aligned(__alignof__ (struct sockaddr *)" which is 8-byte on 64-bit systems, so the commit overwrites the designed alignments for "sockaddr_storage". To fix this, "struct sockaddr_storage" needs to be aligned to 4-byte as it is only used in those packed sctp structure which is part of UAPI, and "struct __kernel_sockaddr_storage" is used in some other places of UAPI that need not to change alignments in order to not breaking userspace. Use an implicit alignment for "struct __kernel_sockaddr_storage" so it can keep the same alignments as a member in both packed and un-packed structures without breaking UAPI. Suggested-by: David Laight Signed-off-by: Qian Cai Signed-off-by: David S. Miller --- include/uapi/linux/socket.h | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h index 8eb96021709c..c3409c8ec0dd 100644 --- a/include/uapi/linux/socket.h +++ b/include/uapi/linux/socket.h @@ -6,17 +6,24 @@ * Desired design of maximum size and alignment (see RFC2553) */ #define _K_SS_MAXSIZE 128 /* Implementation specific max size */ -#define _K_SS_ALIGNSIZE (__alignof__ (struct sockaddr *)) - /* Implementation specific desired alignment */ typedef unsigned short __kernel_sa_family_t; +/* + * The definition uses anonymous union and struct in order to control the + * default alignment. + */ struct __kernel_sockaddr_storage { - __kernel_sa_family_t ss_family; /* address family */ - /* Following field(s) are implementation specific */ - char __data[_K_SS_MAXSIZE - sizeof(unsigned short)]; + union { + struct { + __kernel_sa_family_t ss_family; /* address family */ + /* Following field(s) are implementation specific */ + char __data[_K_SS_MAXSIZE - sizeof(unsigned short)]; /* space to achieve desired size, */ /* _SS_MAXSIZE value minus size of ss_family */ -} __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */ + }; + void *__align; /* implementation specific desired alignment */ + }; +}; #endif /* _UAPI_LINUX_SOCKET_H */ -- cgit v1.2.3 From 69bb18ddfc4331ba1dea9db811caf93e95726408 Mon Sep 17 00:00:00 2001 From: Wu Hao Date: Sun, 4 Aug 2019 18:20:11 +0800 Subject: fpga: dfl: fme: add DFL_FPGA_FME_PORT_RELEASE/ASSIGN ioctl support. In order to support virtualization usage via PCIe SRIOV, this patch adds two ioctls under FPGA Management Engine (FME) to release and assign back the port device. In order to safely turn Port from PF into VF and enable PCIe SRIOV, it requires user to invoke this PORT_RELEASE ioctl to release port firstly to remove userspace interfaces, and then configure the PF/VF access register in FME. After disable SRIOV, it requires user to invoke this PORT_ASSIGN ioctl to attach the port back to PF. Ioctl interfaces: * DFL_FPGA_FME_PORT_RELEASE Release platform device of given port, it deletes port platform device to remove related userspace interfaces on PF. After this function, then it's safe to configure PF/VF access mode to VF, and enable VFs via SRIOV. * DFL_FPGA_FME_PORT_ASSIGN Assign platform device of given port back to PF. After configure PF/VF access mode to PF, this ioctl adds port platform device back to re-enable related userspace interfaces on PF. Signed-off-by: Zhang Yi Z Signed-off-by: Xu Yilun Signed-off-by: Wu Hao Acked-by: Alan Tull Acked-by: Moritz Fischer Signed-off-by: Moritz Fischer Link: https://lore.kernel.org/r/1564914022-3710-2-git-send-email-hao.wu@intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/dfl-fme-main.c | 42 ++++++++++++++++ drivers/fpga/dfl.c | 113 +++++++++++++++++++++++++++++++++++++----- drivers/fpga/dfl.h | 10 ++++ include/uapi/linux/fpga-dfl.h | 18 +++++++ 4 files changed, 171 insertions(+), 12 deletions(-) (limited to 'include/uapi') diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c index 0be4635583d5..dfea2dee78c3 100644 --- a/drivers/fpga/dfl-fme-main.c +++ b/drivers/fpga/dfl-fme-main.c @@ -16,6 +16,7 @@ #include #include +#include #include #include "dfl.h" @@ -104,9 +105,50 @@ static void fme_hdr_uinit(struct platform_device *pdev, device_remove_groups(&pdev->dev, fme_hdr_groups); } +static long fme_hdr_ioctl_release_port(struct dfl_feature_platform_data *pdata, + unsigned long arg) +{ + struct dfl_fpga_cdev *cdev = pdata->dfl_cdev; + int port_id; + + if (get_user(port_id, (int __user *)arg)) + return -EFAULT; + + return dfl_fpga_cdev_release_port(cdev, port_id); +} + +static long fme_hdr_ioctl_assign_port(struct dfl_feature_platform_data *pdata, + unsigned long arg) +{ + struct dfl_fpga_cdev *cdev = pdata->dfl_cdev; + int port_id; + + if (get_user(port_id, (int __user *)arg)) + return -EFAULT; + + return dfl_fpga_cdev_assign_port(cdev, port_id); +} + +static long fme_hdr_ioctl(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned int cmd, unsigned long arg) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + + switch (cmd) { + case DFL_FPGA_FME_PORT_RELEASE: + return fme_hdr_ioctl_release_port(pdata, arg); + case DFL_FPGA_FME_PORT_ASSIGN: + return fme_hdr_ioctl_assign_port(pdata, arg); + } + + return -ENODEV; +} + static const struct dfl_feature_ops fme_hdr_ops = { .init = fme_hdr_init, .uinit = fme_hdr_uinit, + .ioctl = fme_hdr_ioctl, }; static struct dfl_feature_driver fme_feature_drvs[] = { diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index 4b66aaa32b5a..70ffe8b4c157 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -231,16 +231,20 @@ EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del); */ int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id) { - struct dfl_fpga_port_ops *port_ops = dfl_fpga_port_ops_get(pdev); - int port_id; + struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_fpga_port_ops *port_ops; + + if (pdata->id != FEATURE_DEV_ID_UNUSED) + return pdata->id == *(int *)pport_id; + port_ops = dfl_fpga_port_ops_get(pdev); if (!port_ops || !port_ops->get_id) return 0; - port_id = port_ops->get_id(pdev); + pdata->id = port_ops->get_id(pdev); dfl_fpga_port_ops_put(port_ops); - return port_id == *(int *)pport_id; + return pdata->id == *(int *)pport_id; } EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id); @@ -474,6 +478,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) pdata->dev = fdev; pdata->num = binfo->feature_num; pdata->dfl_cdev = binfo->cdev; + pdata->id = FEATURE_DEV_ID_UNUSED; mutex_init(&pdata->lock); lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type], dfl_pdata_key_strings[type]); @@ -973,25 +978,27 @@ void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev) { struct dfl_feature_platform_data *pdata, *ptmp; - remove_feature_devs(cdev); - mutex_lock(&cdev->lock); - if (cdev->fme_dev) { - /* the fme should be unregistered. */ - WARN_ON(device_is_registered(cdev->fme_dev)); + if (cdev->fme_dev) put_device(cdev->fme_dev); - } list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) { struct platform_device *port_dev = pdata->dev; - /* the port should be unregistered. */ - WARN_ON(device_is_registered(&port_dev->dev)); + /* remove released ports */ + if (!device_is_registered(&port_dev->dev)) { + dfl_id_free(feature_dev_id_type(port_dev), + port_dev->id); + platform_device_put(port_dev); + } + list_del(&pdata->node); put_device(&port_dev->dev); } mutex_unlock(&cdev->lock); + remove_feature_devs(cdev); + fpga_region_unregister(cdev->region); devm_kfree(cdev->parent, cdev); } @@ -1042,6 +1049,88 @@ static int __init dfl_fpga_init(void) return ret; } +/** + * dfl_fpga_cdev_release_port - release a port platform device + * + * @cdev: parent container device. + * @port_id: id of the port platform device. + * + * This function allows user to release a port platform device. This is a + * mandatory step before turn a port from PF into VF for SRIOV support. + * + * Return: 0 on success, negative error code otherwise. + */ +int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id) +{ + struct platform_device *port_pdev; + int ret = -ENODEV; + + mutex_lock(&cdev->lock); + port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id, + dfl_fpga_check_port_id); + if (!port_pdev) + goto unlock_exit; + + if (!device_is_registered(&port_pdev->dev)) { + ret = -EBUSY; + goto put_dev_exit; + } + + ret = dfl_feature_dev_use_begin(dev_get_platdata(&port_pdev->dev)); + if (ret) + goto put_dev_exit; + + platform_device_del(port_pdev); + cdev->released_port_num++; +put_dev_exit: + put_device(&port_pdev->dev); +unlock_exit: + mutex_unlock(&cdev->lock); + return ret; +} +EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port); + +/** + * dfl_fpga_cdev_assign_port - assign a port platform device back + * + * @cdev: parent container device. + * @port_id: id of the port platform device. + * + * This function allows user to assign a port platform device back. This is + * a mandatory step after disable SRIOV support. + * + * Return: 0 on success, negative error code otherwise. + */ +int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id) +{ + struct platform_device *port_pdev; + int ret = -ENODEV; + + mutex_lock(&cdev->lock); + port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id, + dfl_fpga_check_port_id); + if (!port_pdev) + goto unlock_exit; + + if (device_is_registered(&port_pdev->dev)) { + ret = -EBUSY; + goto put_dev_exit; + } + + ret = platform_device_add(port_pdev); + if (ret) + goto put_dev_exit; + + dfl_feature_dev_use_end(dev_get_platdata(&port_pdev->dev)); + cdev->released_port_num--; +put_dev_exit: + put_device(&port_pdev->dev); +unlock_exit: + mutex_unlock(&cdev->lock); + return ret; +} +EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port); + static void __exit dfl_fpga_exit(void) { dfl_chardev_uinit(); diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h index a8b869e9e5b7..6f7855e57869 100644 --- a/drivers/fpga/dfl.h +++ b/drivers/fpga/dfl.h @@ -183,6 +183,8 @@ struct dfl_feature { #define DEV_STATUS_IN_USE 0 +#define FEATURE_DEV_ID_UNUSED (-1) + /** * struct dfl_feature_platform_data - platform data for feature devices * @@ -191,6 +193,7 @@ struct dfl_feature { * @cdev: cdev of feature dev. * @dev: ptr to platform device linked with this platform data. * @dfl_cdev: ptr to container device. + * @id: id used for this feature device. * @disable_count: count for port disable. * @num: number for sub features. * @dev_status: dev status (e.g. DEV_STATUS_IN_USE). @@ -203,6 +206,7 @@ struct dfl_feature_platform_data { struct cdev cdev; struct platform_device *dev; struct dfl_fpga_cdev *dfl_cdev; + int id; unsigned int disable_count; unsigned long dev_status; void *private; @@ -373,6 +377,7 @@ void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info); * @fme_dev: FME feature device under this container device. * @lock: mutex lock to protect the port device list. * @port_dev_list: list of all port feature devices under this container device. + * @released_port_num: released port number under this container device. */ struct dfl_fpga_cdev { struct device *parent; @@ -380,6 +385,7 @@ struct dfl_fpga_cdev { struct device *fme_dev; struct mutex lock; struct list_head port_dev_list; + int released_port_num; }; struct dfl_fpga_cdev * @@ -407,4 +413,8 @@ dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data, return pdev; } + +int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id); +int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id); + #endif /* __FPGA_DFL_H */ diff --git a/include/uapi/linux/fpga-dfl.h b/include/uapi/linux/fpga-dfl.h index 2e324e515c41..ec70a0746e59 100644 --- a/include/uapi/linux/fpga-dfl.h +++ b/include/uapi/linux/fpga-dfl.h @@ -176,4 +176,22 @@ struct dfl_fpga_fme_port_pr { #define DFL_FPGA_FME_PORT_PR _IO(DFL_FPGA_MAGIC, DFL_FME_BASE + 0) +/** + * DFL_FPGA_FME_PORT_RELEASE - _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 1, + * int port_id) + * + * Driver releases the port per Port ID provided by caller. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_FME_PORT_RELEASE _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 1, int) + +/** + * DFL_FPGA_FME_PORT_ASSIGN - _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 2, + * int port_id) + * + * Driver assigns the port back per Port ID provided by caller. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_FME_PORT_ASSIGN _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 2, int) + #endif /* _UAPI_LINUX_FPGA_DFL_H */ -- cgit v1.2.3 From 63f0c60379650d82250f22e4cf4137ef3dc4f43d Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 23 Jul 2019 19:58:39 +0200 Subject: arm64: Introduce prctl() options to control the tagged user addresses ABI It is not desirable to relax the ABI to allow tagged user addresses into the kernel indiscriminately. This patch introduces a prctl() interface for enabling or disabling the tagged ABI with a global sysctl control for preventing applications from enabling the relaxed ABI (meant for testing user-space prctl() return error checking without reconfiguring the kernel). The ABI properties are inherited by threads of the same application and fork()'ed children but cleared on execve(). A Kconfig option allows the overall disabling of the relaxed ABI. The PR_SET_TAGGED_ADDR_CTRL will be expanded in the future to handle MTE-specific settings like imprecise vs precise exceptions. Reviewed-by: Kees Cook Signed-off-by: Catalin Marinas Signed-off-by: Andrey Konovalov Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 9 +++++ arch/arm64/include/asm/processor.h | 8 ++++ arch/arm64/include/asm/thread_info.h | 1 + arch/arm64/include/asm/uaccess.h | 4 +- arch/arm64/kernel/process.c | 73 ++++++++++++++++++++++++++++++++++++ include/uapi/linux/prctl.h | 5 +++ kernel/sys.c | 12 ++++++ 7 files changed, 111 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3adcec05b1f6..5d254178b9ca 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1110,6 +1110,15 @@ config ARM64_SW_TTBR0_PAN zeroed area and reserved ASID. The user access routines restore the valid TTBR0_EL1 temporarily. +config ARM64_TAGGED_ADDR_ABI + bool "Enable the tagged user addresses syscall ABI" + default y + help + When this option is enabled, user applications can opt in to a + relaxed ABI via prctl() allowing tagged addresses to be passed + to system calls as pointer arguments. For details, see + Documentation/arm64/tagged-address-abi.txt. + menuconfig COMPAT bool "Kernel support for 32-bit EL0" depends on ARM64_4K_PAGES || EXPERT diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 844e2964b0f5..28eed40ffc12 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -306,6 +306,14 @@ extern void __init minsigstksz_setup(void); /* PR_PAC_RESET_KEYS prctl */ #define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg) +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI +/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */ +long set_tagged_addr_ctrl(unsigned long arg); +long get_tagged_addr_ctrl(void); +#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(arg) +#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl() +#endif + /* * For CONFIG_GCC_PLUGIN_STACKLEAK * diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 180b34ec5965..012238d8e58d 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -90,6 +90,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_SVE 23 /* Scalable Vector Extension in use */ #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ #define TIF_SSBD 25 /* Wants SSB mitigation */ +#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index a138e3b4f717..097d6bfac0b7 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -62,7 +62,9 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si { unsigned long ret, limit = current_thread_info()->addr_limit; - addr = untagged_addr(addr); + if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && + test_thread_flag(TIF_TAGGED_ADDR)) + addr = untagged_addr(addr); __chk_user_ptr(addr); asm volatile( diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index f674f28df663..76b7c55026aa 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -38,6 +39,7 @@ #include #include #include +#include #include #include @@ -307,11 +309,18 @@ static void tls_thread_flush(void) } } +static void flush_tagged_addr_state(void) +{ + if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) + clear_thread_flag(TIF_TAGGED_ADDR); +} + void flush_thread(void) { fpsimd_flush_thread(); tls_thread_flush(); flush_ptrace_hw_breakpoint(current); + flush_tagged_addr_state(); } void release_thread(struct task_struct *dead_task) @@ -565,3 +574,67 @@ void arch_setup_new_exec(void) ptrauth_thread_init_user(current); } + +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI +/* + * Control the relaxed ABI allowing tagged user addresses into the kernel. + */ +static unsigned int tagged_addr_prctl_allowed = 1; + +long set_tagged_addr_ctrl(unsigned long arg) +{ + if (!tagged_addr_prctl_allowed) + return -EINVAL; + if (is_compat_task()) + return -EINVAL; + if (arg & ~PR_TAGGED_ADDR_ENABLE) + return -EINVAL; + + update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); + + return 0; +} + +long get_tagged_addr_ctrl(void) +{ + if (!tagged_addr_prctl_allowed) + return -EINVAL; + if (is_compat_task()) + return -EINVAL; + + if (test_thread_flag(TIF_TAGGED_ADDR)) + return PR_TAGGED_ADDR_ENABLE; + + return 0; +} + +/* + * Global sysctl to disable the tagged user addresses support. This control + * only prevents the tagged address ABI enabling via prctl() and does not + * disable it for tasks that already opted in to the relaxed ABI. + */ +static int zero; +static int one = 1; + +static struct ctl_table tagged_addr_sysctl_table[] = { + { + .procname = "tagged_addr", + .mode = 0644, + .data = &tagged_addr_prctl_allowed, + .maxlen = sizeof(int), + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, + { } +}; + +static int __init tagged_addr_init(void) +{ + if (!register_sysctl("abi", tagged_addr_sysctl_table)) + return -EINVAL; + return 0; +} + +core_initcall(tagged_addr_init); +#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 094bb03b9cc2..2e927b3e9d6c 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -229,4 +229,9 @@ struct prctl_mm_map { # define PR_PAC_APDBKEY (1UL << 3) # define PR_PAC_APGAKEY (1UL << 4) +/* Tagged user address controls for arm64 */ +#define PR_SET_TAGGED_ADDR_CTRL 55 +#define PR_GET_TAGGED_ADDR_CTRL 56 +# define PR_TAGGED_ADDR_ENABLE (1UL << 0) + #endif /* _LINUX_PRCTL_H */ diff --git a/kernel/sys.c b/kernel/sys.c index 2969304c29fe..c6c4d5358bd3 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -124,6 +124,12 @@ #ifndef PAC_RESET_KEYS # define PAC_RESET_KEYS(a, b) (-EINVAL) #endif +#ifndef SET_TAGGED_ADDR_CTRL +# define SET_TAGGED_ADDR_CTRL(a) (-EINVAL) +#endif +#ifndef GET_TAGGED_ADDR_CTRL +# define GET_TAGGED_ADDR_CTRL() (-EINVAL) +#endif /* * this is where the system-wide overflow UID and GID are defined, for @@ -2492,6 +2498,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, return -EINVAL; error = PAC_RESET_KEYS(me, arg2); break; + case PR_SET_TAGGED_ADDR_CTRL: + error = SET_TAGGED_ADDR_CTRL(arg2); + break; + case PR_GET_TAGGED_ADDR_CTRL: + error = GET_TAGGED_ADDR_CTRL(); + break; default: error = -EINVAL; break; -- cgit v1.2.3 From 4b3e30ed3ec7864e798403a63ff2e96bd0c19ab0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 7 Aug 2019 00:23:07 -0500 Subject: Revert "drm/amdkfd: New IOCTL to allocate queue GWS" This reverts commit 1a058c3376765ee31d65e28cbbb9d4ff15120056. This interface is still in too much flux. Revert until it's sorted out. Acked-by: Oak Zeng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 28 ---------------------------- include/uapi/linux/kfd_ioctl.h | 20 +------------------- 2 files changed, 1 insertion(+), 47 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 26b15cc56c31..1d3cd5c50d5f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1567,32 +1567,6 @@ copy_from_user_failed: return err; } -static int kfd_ioctl_alloc_queue_gws(struct file *filep, - struct kfd_process *p, void *data) -{ - int retval; - struct kfd_ioctl_alloc_queue_gws_args *args = data; - struct kfd_dev *dev; - - if (!hws_gws_support) - return -ENODEV; - - dev = kfd_device_by_id(args->gpu_id); - if (!dev) { - pr_debug("Could not find gpu id 0x%x\n", args->gpu_id); - return -ENODEV; - } - if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) - return -ENODEV; - - mutex_lock(&p->mutex); - retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL); - mutex_unlock(&p->mutex); - - args->first_gws = 0; - return retval; -} - static int kfd_ioctl_get_dmabuf_info(struct file *filep, struct kfd_process *p, void *data) { @@ -1795,8 +1769,6 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF, kfd_ioctl_import_dmabuf, 0), - AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS, - kfd_ioctl_alloc_queue_gws, 0), }; #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 070d1bc7e725..20917c59f39c 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -410,21 +410,6 @@ struct kfd_ioctl_unmap_memory_from_gpu_args { __u32 n_success; /* to/from KFD */ }; -/* Allocate GWS for specific queue - * - * @gpu_id: device identifier - * @queue_id: queue's id that GWS is allocated for - * @num_gws: how many GWS to allocate - * @first_gws: index of the first GWS allocated. - * only support contiguous GWS allocation - */ -struct kfd_ioctl_alloc_queue_gws_args { - __u32 gpu_id; /* to KFD */ - __u32 queue_id; /* to KFD */ - __u32 num_gws; /* to KFD */ - __u32 first_gws; /* from KFD */ -}; - struct kfd_ioctl_get_dmabuf_info_args { __u64 size; /* from KFD */ __u64 metadata_ptr; /* to KFD */ @@ -544,10 +529,7 @@ enum kfd_mmio_remap { #define AMDKFD_IOC_IMPORT_DMABUF \ AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args) -#define AMDKFD_IOC_ALLOC_QUEUE_GWS \ - AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args) - #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x1F +#define AMDKFD_COMMAND_END 0x1E #endif -- cgit v1.2.3 From 7794f486ed0b1fa8022dd0a27b9babf86a46d1cf Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Wed, 7 Aug 2019 10:29:50 -0400 Subject: usbfs: Add ioctls for runtime power management It has been requested that usbfs should implement runtime power management, instead of forcing the device to remain at full power as long as the device file is open. This patch introduces that new feature. It does so by adding three new usbfs ioctls: USBDEVFS_FORBID_SUSPEND: Prevents the device from going into runtime suspend (and causes a resume if the device is already suspended). USBDEVFS_ALLOW_SUSPEND: Allows the device to go into runtime suspend. Some time may elapse before the device actually is suspended, depending on things like the autosuspend delay. USBDEVFS_WAIT_FOR_RESUME: Blocks until the call is interrupted by a signal or at least one runtime resume has occurred since the most recent ALLOW_SUSPEND ioctl call (which may mean immediately, even if the device is currently suspended). In the latter case, the device is prevented from suspending again just as if FORBID_SUSPEND was called before the ioctl returns. For backward compatibility, when the device file is first opened runtime suspends are forbidden. The userspace program can then allow suspends whenever it wants, and either resume the device directly (by forbidding suspends again) or wait for a resume from some other source (such as a remote wakeup). URBs submitted to a suspended device will fail or will complete with an appropriate error code. This combination of ioctls is sufficient for user programs to have nearly the same degree of control over a device's runtime power behavior as kernel drivers do. Still lacking is documentation for the new ioctls. I intend to add it later, after the existing documentation for the usbfs userspace API is straightened out into a reasonable form. Suggested-by: Mayuresh Kulkarni Signed-off-by: Alan Stern Link: https://lore.kernel.org/r/Pine.LNX.4.44L0.1908071013220.1514-100000@iolanthe.rowland.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/devio.c | 96 +++++++++++++++++++++++++++++++++++++-- drivers/usb/core/generic.c | 5 ++ drivers/usb/core/usb.h | 3 ++ include/uapi/linux/usbdevice_fs.h | 3 ++ 4 files changed, 102 insertions(+), 5 deletions(-) (limited to 'include/uapi') diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index b265ab5405f9..cdd34dcb2395 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -48,6 +48,9 @@ #define USB_DEVICE_MAX (USB_MAXBUS * 128) #define USB_SG_SIZE 16384 /* split-size for large txs */ +/* Mutual exclusion for ps->list in resume vs. release and remove */ +static DEFINE_MUTEX(usbfs_mutex); + struct usb_dev_state { struct list_head list; /* state list */ struct usb_device *dev; @@ -57,14 +60,17 @@ struct usb_dev_state { struct list_head async_completed; struct list_head memory_list; wait_queue_head_t wait; /* wake up if a request completed */ + wait_queue_head_t wait_for_resume; /* wake up upon runtime resume */ unsigned int discsignr; struct pid *disc_pid; const struct cred *cred; sigval_t disccontext; unsigned long ifclaimed; u32 disabled_bulk_eps; - bool privileges_dropped; unsigned long interface_allowed_mask; + int not_yet_resumed; + bool suspend_allowed; + bool privileges_dropped; }; struct usb_memory { @@ -694,9 +700,7 @@ static void driver_disconnect(struct usb_interface *intf) destroy_async_on_interface(ps, ifnum); } -/* The following routines are merely placeholders. There is no way - * to inform a user task about suspend or resumes. - */ +/* We don't care about suspend/resume of claimed interfaces */ static int driver_suspend(struct usb_interface *intf, pm_message_t msg) { return 0; @@ -707,12 +711,32 @@ static int driver_resume(struct usb_interface *intf) return 0; } +/* The following routines apply to the entire device, not interfaces */ +void usbfs_notify_suspend(struct usb_device *udev) +{ + /* We don't need to handle this */ +} + +void usbfs_notify_resume(struct usb_device *udev) +{ + struct usb_dev_state *ps; + + /* Protect against simultaneous remove or release */ + mutex_lock(&usbfs_mutex); + list_for_each_entry(ps, &udev->filelist, list) { + WRITE_ONCE(ps->not_yet_resumed, 0); + wake_up_all(&ps->wait_for_resume); + } + mutex_unlock(&usbfs_mutex); +} + struct usb_driver usbfs_driver = { .name = "usbfs", .probe = driver_probe, .disconnect = driver_disconnect, .suspend = driver_suspend, .resume = driver_resume, + .supports_autosuspend = 1, }; static int claimintf(struct usb_dev_state *ps, unsigned int ifnum) @@ -997,9 +1021,12 @@ static int usbdev_open(struct inode *inode, struct file *file) INIT_LIST_HEAD(&ps->async_completed); INIT_LIST_HEAD(&ps->memory_list); init_waitqueue_head(&ps->wait); + init_waitqueue_head(&ps->wait_for_resume); ps->disc_pid = get_pid(task_pid(current)); ps->cred = get_current_cred(); smp_wmb(); + + /* Can't race with resume; the device is already active */ list_add_tail(&ps->list, &dev->filelist); file->private_data = ps; usb_unlock_device(dev); @@ -1025,7 +1052,10 @@ static int usbdev_release(struct inode *inode, struct file *file) usb_lock_device(dev); usb_hub_release_all_ports(dev, ps); + /* Protect against simultaneous resume */ + mutex_lock(&usbfs_mutex); list_del_init(&ps->list); + mutex_unlock(&usbfs_mutex); for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed); ifnum++) { @@ -1033,7 +1063,8 @@ static int usbdev_release(struct inode *inode, struct file *file) releaseintf(ps, ifnum); } destroy_all_async(ps); - usb_autosuspend_device(dev); + if (!ps->suspend_allowed) + usb_autosuspend_device(dev); usb_unlock_device(dev); usb_put_dev(dev); put_pid(ps->disc_pid); @@ -2384,6 +2415,47 @@ static int proc_drop_privileges(struct usb_dev_state *ps, void __user *arg) return 0; } +static int proc_forbid_suspend(struct usb_dev_state *ps) +{ + int ret = 0; + + if (ps->suspend_allowed) { + ret = usb_autoresume_device(ps->dev); + if (ret == 0) + ps->suspend_allowed = false; + else if (ret != -ENODEV) + ret = -EIO; + } + return ret; +} + +static int proc_allow_suspend(struct usb_dev_state *ps) +{ + if (!connected(ps)) + return -ENODEV; + + WRITE_ONCE(ps->not_yet_resumed, 1); + if (!ps->suspend_allowed) { + usb_autosuspend_device(ps->dev); + ps->suspend_allowed = true; + } + return 0; +} + +static int proc_wait_for_resume(struct usb_dev_state *ps) +{ + int ret; + + usb_unlock_device(ps->dev); + ret = wait_event_interruptible(ps->wait_for_resume, + READ_ONCE(ps->not_yet_resumed) == 0); + usb_lock_device(ps->dev); + + if (ret != 0) + return -EINTR; + return proc_forbid_suspend(ps); +} + /* * NOTE: All requests here that have interface numbers as parameters * are assuming that somehow the configuration has been prevented from @@ -2578,6 +2650,15 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, case USBDEVFS_GET_SPEED: ret = ps->dev->speed; break; + case USBDEVFS_FORBID_SUSPEND: + ret = proc_forbid_suspend(ps); + break; + case USBDEVFS_ALLOW_SUSPEND: + ret = proc_allow_suspend(ps); + break; + case USBDEVFS_WAIT_FOR_RESUME: + ret = proc_wait_for_resume(ps); + break; } /* Handle variable-length commands */ @@ -2651,15 +2732,20 @@ static void usbdev_remove(struct usb_device *udev) { struct usb_dev_state *ps; + /* Protect against simultaneous resume */ + mutex_lock(&usbfs_mutex); while (!list_empty(&udev->filelist)) { ps = list_entry(udev->filelist.next, struct usb_dev_state, list); destroy_all_async(ps); wake_up_all(&ps->wait); + WRITE_ONCE(ps->not_yet_resumed, 0); + wake_up_all(&ps->wait_for_resume); list_del_init(&ps->list); if (ps->discsignr) kill_pid_usb_asyncio(ps->discsignr, EPIPE, ps->disccontext, ps->disc_pid, ps->cred); } + mutex_unlock(&usbfs_mutex); } static int usbdev_notify(struct notifier_block *self, diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c index 1ac9c1e5f773..38f8b3e31762 100644 --- a/drivers/usb/core/generic.c +++ b/drivers/usb/core/generic.c @@ -257,6 +257,8 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg) else rc = usb_port_suspend(udev, msg); + if (rc == 0) + usbfs_notify_suspend(udev); return rc; } @@ -273,6 +275,9 @@ static int generic_resume(struct usb_device *udev, pm_message_t msg) rc = hcd_bus_resume(udev, msg); else rc = usb_port_resume(udev, msg); + + if (rc == 0) + usbfs_notify_resume(udev); return rc; } diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index bd8d01f85a13..2932d1ec5def 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -95,6 +95,9 @@ extern int usb_runtime_idle(struct device *dev); extern int usb_enable_usb2_hardware_lpm(struct usb_device *udev); extern int usb_disable_usb2_hardware_lpm(struct usb_device *udev); +extern void usbfs_notify_suspend(struct usb_device *udev); +extern void usbfs_notify_resume(struct usb_device *udev); + #else static inline int usb_port_suspend(struct usb_device *udev, pm_message_t msg) diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h index 78efe870c2b7..d24bbb6d3ca1 100644 --- a/include/uapi/linux/usbdevice_fs.h +++ b/include/uapi/linux/usbdevice_fs.h @@ -223,5 +223,8 @@ struct usbdevfs_streams { * extending size of the data returned. */ #define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len) +#define USBDEVFS_FORBID_SUSPEND _IO('U', 33) +#define USBDEVFS_ALLOW_SUSPEND _IO('U', 34) +#define USBDEVFS_WAIT_FOR_RESUME _IO('U', 35) #endif /* _UAPI_LINUX_USBDEVICE_FS_H */ -- cgit v1.2.3 From cd48bdda4fb82c2fe569d97af4217c530168c99c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 8 Aug 2019 13:57:25 +0200 Subject: sock: make cookie generation global instead of per netns Generating and retrieving socket cookies are a useful feature that is exposed to BPF for various program types through bpf_get_socket_cookie() helper. The fact that the cookie counter is per netns is quite a limitation for BPF in practice in particular for programs in host namespace that use socket cookies as part of a map lookup key since they will be causing socket cookie collisions e.g. when attached to BPF cgroup hooks or cls_bpf on tc egress in host namespace handling container traffic from veth or ipvlan devices with peer in different netns. Change the counter to be global instead. Socket cookie consumers must assume the value as opqaue in any case. Not every socket must have a cookie generated and knowledge of the counter value itself does not provide much value either way hence conversion to global is fine. Signed-off-by: Daniel Borkmann Cc: Eric Dumazet Cc: Alexei Starovoitov Cc: Willem de Bruijn Cc: Martynas Pumputis Signed-off-by: David S. Miller --- include/net/net_namespace.h | 1 - include/uapi/linux/bpf.h | 4 ++-- net/core/sock_diag.c | 3 ++- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/uapi') diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index 4a9da951a794..cb668bc2692d 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -61,7 +61,6 @@ struct net { spinlock_t rules_mod_lock; u32 hash_mix; - atomic64_t cookie_gen; struct list_head list; /* list of network namespaces */ struct list_head exit_list; /* To linked to call pernet exit diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index fa1c753dcdbc..a5aa7d3ac6a1 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1466,8 +1466,8 @@ union bpf_attr { * If no cookie has been set yet, generate a new cookie. Once * generated, the socket cookie remains stable for the life of the * socket. This helper can be useful for monitoring per socket - * networking traffic statistics as it provides a unique socket - * identifier per namespace. + * networking traffic statistics as it provides a global socket + * identifier that can be assumed unique. * Return * A 8-byte long non-decreasing number on success, or 0 if the * socket field is missing inside *skb*. diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index 3312a5849a97..c13ffbd33d8d 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -19,6 +19,7 @@ static const struct sock_diag_handler *sock_diag_handlers[AF_MAX]; static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); static DEFINE_MUTEX(sock_diag_table_mutex); static struct workqueue_struct *broadcast_wq; +static atomic64_t cookie_gen; u64 sock_gen_cookie(struct sock *sk) { @@ -27,7 +28,7 @@ u64 sock_gen_cookie(struct sock *sk) if (res) return res; - res = atomic64_inc_return(&sock_net(sk)->cookie_gen); + res = atomic64_inc_return(&cookie_gen); atomic64_cmpxchg(&sk->sk_cookie, 0, res); } } -- cgit v1.2.3 From 28315f7999870bb56da236f6b4ffce63efcc7897 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 11 Aug 2019 10:35:50 +0300 Subject: drop_monitor: Add alert mode operations The next patch is going to add another alert mode in which the dropped packet is notified to user space, instead of only a summary of recent drops. Abstract the differences between the modes by adding alert mode operations. The operations are selected based on the currently configured mode and associated with the probes and the work item just before tracing starts. Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/uapi/linux/net_dropmon.h | 9 +++++++++ net/core/drop_monitor.c | 38 ++++++++++++++++++++++++++++++++------ 2 files changed, 41 insertions(+), 6 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 5edbd0a675fd..0fecdedeb6ca 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -62,4 +62,13 @@ enum { * Our group identifiers */ #define NET_DM_GRP_ALERT 1 + +/** + * enum net_dm_alert_mode - Alert mode. + * @NET_DM_ALERT_MODE_SUMMARY: A summary of recent drops is sent to user space. + */ +enum net_dm_alert_mode { + NET_DM_ALERT_MODE_SUMMARY, +}; + #endif diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index cd2f3069f34e..9cd2f662cb9e 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -75,6 +75,16 @@ static int dm_delay = 1; static unsigned long dm_hw_check_delta = 2*HZ; static LIST_HEAD(hw_stats_list); +static enum net_dm_alert_mode net_dm_alert_mode = NET_DM_ALERT_MODE_SUMMARY; + +struct net_dm_alert_ops { + void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb, + void *location); + void (*napi_poll_probe)(void *ignore, struct napi_struct *napi, + int work, int budget); + void (*work_item_func)(struct work_struct *work); +}; + static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) { size_t al; @@ -241,10 +251,23 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi, rcu_read_unlock(); } +static const struct net_dm_alert_ops net_dm_alert_summary_ops = { + .kfree_skb_probe = trace_kfree_skb_hit, + .napi_poll_probe = trace_napi_poll_hit, + .work_item_func = send_dm_alert, +}; + +static const struct net_dm_alert_ops *net_dm_alert_ops_arr[] = { + [NET_DM_ALERT_MODE_SUMMARY] = &net_dm_alert_summary_ops, +}; + static int net_dm_trace_on_set(struct netlink_ext_ack *extack) { + const struct net_dm_alert_ops *ops; int cpu, rc; + ops = net_dm_alert_ops_arr[net_dm_alert_mode]; + if (!try_module_get(THIS_MODULE)) { NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module"); return -ENODEV; @@ -254,7 +277,7 @@ static int net_dm_trace_on_set(struct netlink_ext_ack *extack) struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu); struct sk_buff *skb; - INIT_WORK(&data->dm_alert_work, send_dm_alert); + INIT_WORK(&data->dm_alert_work, ops->work_item_func); timer_setup(&data->send_timer, sched_send_work, 0); /* Allocate a new per-CPU skb for the summary alert message and * free the old one which might contain stale data from @@ -264,13 +287,13 @@ static int net_dm_trace_on_set(struct netlink_ext_ack *extack) consume_skb(skb); } - rc = register_trace_kfree_skb(trace_kfree_skb_hit, NULL); + rc = register_trace_kfree_skb(ops->kfree_skb_probe, NULL); if (rc) { NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to kfree_skb() tracepoint"); goto err_module_put; } - rc = register_trace_napi_poll(trace_napi_poll_hit, NULL); + rc = register_trace_napi_poll(ops->napi_poll_probe, NULL); if (rc) { NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to napi_poll() tracepoint"); goto err_unregister_trace; @@ -279,7 +302,7 @@ static int net_dm_trace_on_set(struct netlink_ext_ack *extack) return 0; err_unregister_trace: - unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL); + unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL); err_module_put: module_put(THIS_MODULE); return rc; @@ -288,10 +311,13 @@ err_module_put: static void net_dm_trace_off_set(void) { struct dm_hw_stat_delta *new_stat, *temp; + const struct net_dm_alert_ops *ops; int cpu; - unregister_trace_napi_poll(trace_napi_poll_hit, NULL); - unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL); + ops = net_dm_alert_ops_arr[net_dm_alert_mode]; + + unregister_trace_napi_poll(ops->napi_poll_probe, NULL); + unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL); tracepoint_synchronize_unregister(); -- cgit v1.2.3 From ca30707dee2bc8bc81cfd8b4277fe90f7ca6df1f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 11 Aug 2019 10:35:51 +0300 Subject: drop_monitor: Add packet alert mode So far drop monitor supported only one alert mode in which a summary of locations in which packets were recently dropped was sent to user space. This alert mode is sufficient in order to understand that packets were dropped, but lacks information to perform a more detailed analysis. Add a new alert mode in which the dropped packet itself is passed to user space along with metadata: The drop location (as program counter and resolved symbol), ingress netdevice and drop timestamp. More metadata can be added in the future. To avoid performing expensive operations in the context in which kfree_skb() is invoked (can be hard IRQ), the dropped skb is cloned and queued on per-CPU skb drop list. Then, in process context the netlink message is allocated, prepared and finally sent to user space. The per-CPU skb drop list is limited to 1000 skbs to prevent exhausting the system's memory. Subsequent patches will make this limit configurable and also add a counter that indicates how many skbs were tail dropped. Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/uapi/linux/net_dropmon.h | 27 ++++ net/core/drop_monitor.c | 280 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 305 insertions(+), 2 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 0fecdedeb6ca..cfaaf75371b8 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -53,6 +53,7 @@ enum { NET_DM_CMD_CONFIG, NET_DM_CMD_START, NET_DM_CMD_STOP, + NET_DM_CMD_PACKET_ALERT, _NET_DM_CMD_MAX, }; @@ -63,12 +64,38 @@ enum { */ #define NET_DM_GRP_ALERT 1 +enum net_dm_attr { + NET_DM_ATTR_UNSPEC, + + NET_DM_ATTR_ALERT_MODE, /* u8 */ + NET_DM_ATTR_PC, /* u64 */ + NET_DM_ATTR_SYMBOL, /* string */ + NET_DM_ATTR_IN_PORT, /* nested */ + NET_DM_ATTR_TIMESTAMP, /* struct timespec */ + NET_DM_ATTR_PROTO, /* u16 */ + NET_DM_ATTR_PAYLOAD, /* binary */ + NET_DM_ATTR_PAD, + + __NET_DM_ATTR_MAX, + NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1 +}; + /** * enum net_dm_alert_mode - Alert mode. * @NET_DM_ALERT_MODE_SUMMARY: A summary of recent drops is sent to user space. + * @NET_DM_ALERT_MODE_PACKET: Each dropped packet is sent to user space along + * with metadata. */ enum net_dm_alert_mode { NET_DM_ALERT_MODE_SUMMARY, + NET_DM_ALERT_MODE_PACKET, +}; + +enum { + NET_DM_ATTR_PORT_NETDEV_IFINDEX, /* u32 */ + + __NET_DM_ATTR_PORT_MAX, + NET_DM_ATTR_PORT_MAX = __NET_DM_ATTR_PORT_MAX - 1 }; #endif diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 9cd2f662cb9e..ba765832413b 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -54,6 +54,7 @@ static DEFINE_MUTEX(net_dm_mutex); struct per_cpu_dm_data { spinlock_t lock; /* Protects 'skb' and 'send_timer' */ struct sk_buff *skb; + struct sk_buff_head drop_queue; struct work_struct dm_alert_work; struct timer_list send_timer; }; @@ -85,6 +86,14 @@ struct net_dm_alert_ops { void (*work_item_func)(struct work_struct *work); }; +struct net_dm_skb_cb { + void *pc; +}; + +#define NET_DM_SKB_CB(__skb) ((struct net_dm_skb_cb *)&((__skb)->cb[0])) + +#define NET_DM_QUEUE_LEN 1000 + static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) { size_t al; @@ -257,8 +266,214 @@ static const struct net_dm_alert_ops net_dm_alert_summary_ops = { .work_item_func = send_dm_alert, }; +static void net_dm_packet_trace_kfree_skb_hit(void *ignore, + struct sk_buff *skb, + void *location) +{ + ktime_t tstamp = ktime_get_real(); + struct per_cpu_dm_data *data; + struct sk_buff *nskb; + unsigned long flags; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return; + + NET_DM_SKB_CB(nskb)->pc = location; + /* Override the timestamp because we care about the time when the + * packet was dropped. + */ + nskb->tstamp = tstamp; + + data = this_cpu_ptr(&dm_cpu_data); + + spin_lock_irqsave(&data->drop_queue.lock, flags); + if (skb_queue_len(&data->drop_queue) < NET_DM_QUEUE_LEN) + __skb_queue_tail(&data->drop_queue, nskb); + else + goto unlock_free; + spin_unlock_irqrestore(&data->drop_queue.lock, flags); + + schedule_work(&data->dm_alert_work); + + return; + +unlock_free: + spin_unlock_irqrestore(&data->drop_queue.lock, flags); + consume_skb(nskb); +} + +static void net_dm_packet_trace_napi_poll_hit(void *ignore, + struct napi_struct *napi, + int work, int budget) +{ +} + +static size_t net_dm_in_port_size(void) +{ + /* NET_DM_ATTR_IN_PORT nest */ + return nla_total_size(0) + + /* NET_DM_ATTR_PORT_NETDEV_IFINDEX */ + nla_total_size(sizeof(u32)); +} + +#define NET_DM_MAX_SYMBOL_LEN 40 + +static size_t net_dm_packet_report_size(size_t payload_len) +{ + size_t size; + + size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize); + + return NLMSG_ALIGN(size) + + /* NET_DM_ATTR_PC */ + nla_total_size(sizeof(u64)) + + /* NET_DM_ATTR_SYMBOL */ + nla_total_size(NET_DM_MAX_SYMBOL_LEN + 1) + + /* NET_DM_ATTR_IN_PORT */ + net_dm_in_port_size() + + /* NET_DM_ATTR_TIMESTAMP */ + nla_total_size(sizeof(struct timespec)) + + /* NET_DM_ATTR_PROTO */ + nla_total_size(sizeof(u16)) + + /* NET_DM_ATTR_PAYLOAD */ + nla_total_size(payload_len); +} + +static int net_dm_packet_report_in_port_put(struct sk_buff *msg, int ifindex) +{ + struct nlattr *attr; + + attr = nla_nest_start(msg, NET_DM_ATTR_IN_PORT); + if (!attr) + return -EMSGSIZE; + + if (ifindex && + nla_put_u32(msg, NET_DM_ATTR_PORT_NETDEV_IFINDEX, ifindex)) + goto nla_put_failure; + + nla_nest_end(msg, attr); + + return 0; + +nla_put_failure: + nla_nest_cancel(msg, attr); + return -EMSGSIZE; +} + +static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb, + size_t payload_len) +{ + u64 pc = (u64)(uintptr_t) NET_DM_SKB_CB(skb)->pc; + char buf[NET_DM_MAX_SYMBOL_LEN]; + struct nlattr *attr; + struct timespec ts; + void *hdr; + int rc; + + hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0, + NET_DM_CMD_PACKET_ALERT); + if (!hdr) + return -EMSGSIZE; + + if (nla_put_u64_64bit(msg, NET_DM_ATTR_PC, pc, NET_DM_ATTR_PAD)) + goto nla_put_failure; + + snprintf(buf, sizeof(buf), "%pS", NET_DM_SKB_CB(skb)->pc); + if (nla_put_string(msg, NET_DM_ATTR_SYMBOL, buf)) + goto nla_put_failure; + + rc = net_dm_packet_report_in_port_put(msg, skb->skb_iif); + if (rc) + goto nla_put_failure; + + if (ktime_to_timespec_cond(skb->tstamp, &ts) && + nla_put(msg, NET_DM_ATTR_TIMESTAMP, sizeof(ts), &ts)) + goto nla_put_failure; + + if (!payload_len) + goto out; + + if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol))) + goto nla_put_failure; + + attr = skb_put(msg, nla_total_size(payload_len)); + attr->nla_type = NET_DM_ATTR_PAYLOAD; + attr->nla_len = nla_attr_size(payload_len); + if (skb_copy_bits(skb, 0, nla_data(attr), payload_len)) + goto nla_put_failure; + +out: + genlmsg_end(msg, hdr); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +#define NET_DM_MAX_PACKET_SIZE (0xffff - NLA_HDRLEN - NLA_ALIGNTO) + +static void net_dm_packet_report(struct sk_buff *skb) +{ + struct sk_buff *msg; + size_t payload_len; + int rc; + + /* Make sure we start copying the packet from the MAC header */ + if (skb->data > skb_mac_header(skb)) + skb_push(skb, skb->data - skb_mac_header(skb)); + else + skb_pull(skb, skb_mac_header(skb) - skb->data); + + /* Ensure packet fits inside a single netlink attribute */ + payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE); + + msg = nlmsg_new(net_dm_packet_report_size(payload_len), GFP_KERNEL); + if (!msg) + goto out; + + rc = net_dm_packet_report_fill(msg, skb, payload_len); + if (rc) { + nlmsg_free(msg); + goto out; + } + + genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL); + +out: + consume_skb(skb); +} + +static void net_dm_packet_work(struct work_struct *work) +{ + struct per_cpu_dm_data *data; + struct sk_buff_head list; + struct sk_buff *skb; + unsigned long flags; + + data = container_of(work, struct per_cpu_dm_data, dm_alert_work); + + __skb_queue_head_init(&list); + + spin_lock_irqsave(&data->drop_queue.lock, flags); + skb_queue_splice_tail_init(&data->drop_queue, &list); + spin_unlock_irqrestore(&data->drop_queue.lock, flags); + + while ((skb = __skb_dequeue(&list))) + net_dm_packet_report(skb); +} + +static const struct net_dm_alert_ops net_dm_alert_packet_ops = { + .kfree_skb_probe = net_dm_packet_trace_kfree_skb_hit, + .napi_poll_probe = net_dm_packet_trace_napi_poll_hit, + .work_item_func = net_dm_packet_work, +}; + static const struct net_dm_alert_ops *net_dm_alert_ops_arr[] = { [NET_DM_ALERT_MODE_SUMMARY] = &net_dm_alert_summary_ops, + [NET_DM_ALERT_MODE_PACKET] = &net_dm_alert_packet_ops, }; static int net_dm_trace_on_set(struct netlink_ext_ack *extack) @@ -326,9 +541,12 @@ static void net_dm_trace_off_set(void) */ for_each_possible_cpu(cpu) { struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu); + struct sk_buff *skb; del_timer_sync(&data->send_timer); cancel_work_sync(&data->dm_alert_work); + while ((skb = __skb_dequeue(&data->drop_queue))) + consume_skb(skb); } list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) { @@ -370,12 +588,61 @@ static int set_all_monitor_traces(int state, struct netlink_ext_ack *extack) return rc; } +static int net_dm_alert_mode_get_from_info(struct genl_info *info, + enum net_dm_alert_mode *p_alert_mode) +{ + u8 val; + + val = nla_get_u8(info->attrs[NET_DM_ATTR_ALERT_MODE]); + + switch (val) { + case NET_DM_ALERT_MODE_SUMMARY: /* fall-through */ + case NET_DM_ALERT_MODE_PACKET: + *p_alert_mode = val; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int net_dm_alert_mode_set(struct genl_info *info) +{ + struct netlink_ext_ack *extack = info->extack; + enum net_dm_alert_mode alert_mode; + int rc; + + if (!info->attrs[NET_DM_ATTR_ALERT_MODE]) + return 0; + + rc = net_dm_alert_mode_get_from_info(info, &alert_mode); + if (rc) { + NL_SET_ERR_MSG_MOD(extack, "Invalid alert mode"); + return -EINVAL; + } + + net_dm_alert_mode = alert_mode; + + return 0; +} + static int net_dm_cmd_config(struct sk_buff *skb, struct genl_info *info) { - NL_SET_ERR_MSG_MOD(info->extack, "Command not supported"); + struct netlink_ext_ack *extack = info->extack; + int rc; - return -EOPNOTSUPP; + if (trace_state == TRACE_ON) { + NL_SET_ERR_MSG_MOD(extack, "Cannot configure drop monitor while tracing is on"); + return -EBUSY; + } + + rc = net_dm_alert_mode_set(info); + if (rc) + return rc; + + return 0; } static int net_dm_cmd_trace(struct sk_buff *skb, @@ -430,6 +697,11 @@ out: return NOTIFY_DONE; } +static const struct nla_policy net_dm_nl_policy[NET_DM_ATTR_MAX + 1] = { + [NET_DM_ATTR_UNSPEC] = { .strict_start_type = NET_DM_ATTR_UNSPEC + 1 }, + [NET_DM_ATTR_ALERT_MODE] = { .type = NLA_U8 }, +}; + static const struct genl_ops dropmon_ops[] = { { .cmd = NET_DM_CMD_CONFIG, @@ -467,6 +739,8 @@ static struct genl_family net_drop_monitor_family __ro_after_init = { .hdrsize = 0, .name = "NET_DM", .version = 2, + .maxattr = NET_DM_ATTR_MAX, + .policy = net_dm_nl_policy, .pre_doit = net_dm_nl_pre_doit, .post_doit = net_dm_nl_post_doit, .module = THIS_MODULE, @@ -510,6 +784,7 @@ static int __init init_net_drop_monitor(void) for_each_possible_cpu(cpu) { data = &per_cpu(dm_cpu_data, cpu); spin_lock_init(&data->lock); + skb_queue_head_init(&data->drop_queue); } goto out; @@ -539,6 +814,7 @@ static void exit_net_drop_monitor(void) * to this struct and can free the skb inside it */ kfree_skb(data->skb); + WARN_ON(!skb_queue_empty(&data->drop_queue)); } BUG_ON(genl_unregister_family(&net_drop_monitor_family)); -- cgit v1.2.3 From 57986617a736aec2980c1c78a9dd8dcdf477ee6e Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 11 Aug 2019 10:35:52 +0300 Subject: drop_monitor: Allow truncation of dropped packets When sending dropped packets to user space it is not always necessary to copy the entire packet as usually only the headers are of interest. Allow user to specify the truncation length and add the original length of the packet as additional metadata to the netlink message. By default no truncation is performed. Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/uapi/linux/net_dropmon.h | 2 ++ net/core/drop_monitor.c | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index cfaaf75371b8..5cd7eb1f66ba 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -75,6 +75,8 @@ enum net_dm_attr { NET_DM_ATTR_PROTO, /* u16 */ NET_DM_ATTR_PAYLOAD, /* binary */ NET_DM_ATTR_PAD, + NET_DM_ATTR_TRUNC_LEN, /* u32 */ + NET_DM_ATTR_ORIG_LEN, /* u32 */ __NET_DM_ATTR_MAX, NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1 diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index ba765832413b..9f884adaa85f 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -77,6 +77,7 @@ static unsigned long dm_hw_check_delta = 2*HZ; static LIST_HEAD(hw_stats_list); static enum net_dm_alert_mode net_dm_alert_mode = NET_DM_ALERT_MODE_SUMMARY; +static u32 net_dm_trunc_len; struct net_dm_alert_ops { void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb, @@ -334,6 +335,8 @@ static size_t net_dm_packet_report_size(size_t payload_len) net_dm_in_port_size() + /* NET_DM_ATTR_TIMESTAMP */ nla_total_size(sizeof(struct timespec)) + + /* NET_DM_ATTR_ORIG_LEN */ + nla_total_size(sizeof(u32)) + /* NET_DM_ATTR_PROTO */ nla_total_size(sizeof(u16)) + /* NET_DM_ATTR_PAYLOAD */ @@ -391,6 +394,9 @@ static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb, nla_put(msg, NET_DM_ATTR_TIMESTAMP, sizeof(ts), &ts)) goto nla_put_failure; + if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len)) + goto nla_put_failure; + if (!payload_len) goto out; @@ -429,6 +435,8 @@ static void net_dm_packet_report(struct sk_buff *skb) /* Ensure packet fits inside a single netlink attribute */ payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE); + if (net_dm_trunc_len) + payload_len = min_t(size_t, net_dm_trunc_len, payload_len); msg = nlmsg_new(net_dm_packet_report_size(payload_len), GFP_KERNEL); if (!msg) @@ -627,6 +635,14 @@ static int net_dm_alert_mode_set(struct genl_info *info) return 0; } +static void net_dm_trunc_len_set(struct genl_info *info) +{ + if (!info->attrs[NET_DM_ATTR_TRUNC_LEN]) + return; + + net_dm_trunc_len = nla_get_u32(info->attrs[NET_DM_ATTR_TRUNC_LEN]); +} + static int net_dm_cmd_config(struct sk_buff *skb, struct genl_info *info) { @@ -642,6 +658,8 @@ static int net_dm_cmd_config(struct sk_buff *skb, if (rc) return rc; + net_dm_trunc_len_set(info); + return 0; } @@ -700,6 +718,7 @@ out: static const struct nla_policy net_dm_nl_policy[NET_DM_ATTR_MAX + 1] = { [NET_DM_ATTR_UNSPEC] = { .strict_start_type = NET_DM_ATTR_UNSPEC + 1 }, [NET_DM_ATTR_ALERT_MODE] = { .type = NLA_U8 }, + [NET_DM_ATTR_TRUNC_LEN] = { .type = NLA_U32 }, }; static const struct genl_ops dropmon_ops[] = { -- cgit v1.2.3 From 444be061d012f1a8ebf95292a648a4e0e2afa83f Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 11 Aug 2019 10:35:53 +0300 Subject: drop_monitor: Add a command to query current configuration Users should be able to query the current configuration of drop monitor before they start using it. Add a command to query the existing configuration which currently consists of alert mode and packet truncation length. Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/uapi/linux/net_dropmon.h | 2 ++ net/core/drop_monitor.c | 48 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 5cd7eb1f66ba..3b765a8428b5 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -54,6 +54,8 @@ enum { NET_DM_CMD_START, NET_DM_CMD_STOP, NET_DM_CMD_PACKET_ALERT, + NET_DM_CMD_CONFIG_GET, + NET_DM_CMD_CONFIG_NEW, _NET_DM_CMD_MAX, }; diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 9f884adaa85f..135638474ab8 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -676,6 +676,50 @@ static int net_dm_cmd_trace(struct sk_buff *skb, return -EOPNOTSUPP; } +static int net_dm_config_fill(struct sk_buff *msg, struct genl_info *info) +{ + void *hdr; + + hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, + &net_drop_monitor_family, 0, NET_DM_CMD_CONFIG_NEW); + if (!hdr) + return -EMSGSIZE; + + if (nla_put_u8(msg, NET_DM_ATTR_ALERT_MODE, net_dm_alert_mode)) + goto nla_put_failure; + + if (nla_put_u32(msg, NET_DM_ATTR_TRUNC_LEN, net_dm_trunc_len)) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int net_dm_cmd_config_get(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *msg; + int rc; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + rc = net_dm_config_fill(msg, info); + if (rc) + goto free_msg; + + return genlmsg_reply(msg, info); + +free_msg: + nlmsg_free(msg); + return rc; +} + static int dropmon_net_event(struct notifier_block *ev_block, unsigned long event, void *ptr) { @@ -738,6 +782,10 @@ static const struct genl_ops dropmon_ops[] = { .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = net_dm_cmd_trace, }, + { + .cmd = NET_DM_CMD_CONFIG_GET, + .doit = net_dm_cmd_config_get, + }, }; static int net_dm_nl_pre_doit(const struct genl_ops *ops, -- cgit v1.2.3 From 30328d46af593dcf24582f2a431d84ea0cf4bdef Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 11 Aug 2019 10:35:54 +0300 Subject: drop_monitor: Make drop queue length configurable In packet alert mode, each CPU holds a list of dropped skbs that need to be processed in process context and sent to user space. To avoid exhausting the system's memory the maximum length of this queue is currently set to 1000. Allow users to tune the length of this queue according to their needs. The configured length is reported to user space when drop monitor configuration is queried. Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/uapi/linux/net_dropmon.h | 1 + net/core/drop_monitor.c | 19 ++++++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 3b765a8428b5..1d0bdb1ba954 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -79,6 +79,7 @@ enum net_dm_attr { NET_DM_ATTR_PAD, NET_DM_ATTR_TRUNC_LEN, /* u32 */ NET_DM_ATTR_ORIG_LEN, /* u32 */ + NET_DM_ATTR_QUEUE_LEN, /* u32 */ __NET_DM_ATTR_MAX, NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1 diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 135638474ab8..eb3c34d69ea9 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -78,6 +78,7 @@ static LIST_HEAD(hw_stats_list); static enum net_dm_alert_mode net_dm_alert_mode = NET_DM_ALERT_MODE_SUMMARY; static u32 net_dm_trunc_len; +static u32 net_dm_queue_len = 1000; struct net_dm_alert_ops { void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb, @@ -93,8 +94,6 @@ struct net_dm_skb_cb { #define NET_DM_SKB_CB(__skb) ((struct net_dm_skb_cb *)&((__skb)->cb[0])) -#define NET_DM_QUEUE_LEN 1000 - static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) { size_t al; @@ -289,7 +288,7 @@ static void net_dm_packet_trace_kfree_skb_hit(void *ignore, data = this_cpu_ptr(&dm_cpu_data); spin_lock_irqsave(&data->drop_queue.lock, flags); - if (skb_queue_len(&data->drop_queue) < NET_DM_QUEUE_LEN) + if (skb_queue_len(&data->drop_queue) < net_dm_queue_len) __skb_queue_tail(&data->drop_queue, nskb); else goto unlock_free; @@ -643,6 +642,14 @@ static void net_dm_trunc_len_set(struct genl_info *info) net_dm_trunc_len = nla_get_u32(info->attrs[NET_DM_ATTR_TRUNC_LEN]); } +static void net_dm_queue_len_set(struct genl_info *info) +{ + if (!info->attrs[NET_DM_ATTR_QUEUE_LEN]) + return; + + net_dm_queue_len = nla_get_u32(info->attrs[NET_DM_ATTR_QUEUE_LEN]); +} + static int net_dm_cmd_config(struct sk_buff *skb, struct genl_info *info) { @@ -660,6 +667,8 @@ static int net_dm_cmd_config(struct sk_buff *skb, net_dm_trunc_len_set(info); + net_dm_queue_len_set(info); + return 0; } @@ -691,6 +700,9 @@ static int net_dm_config_fill(struct sk_buff *msg, struct genl_info *info) if (nla_put_u32(msg, NET_DM_ATTR_TRUNC_LEN, net_dm_trunc_len)) goto nla_put_failure; + if (nla_put_u32(msg, NET_DM_ATTR_QUEUE_LEN, net_dm_queue_len)) + goto nla_put_failure; + genlmsg_end(msg, hdr); return 0; @@ -763,6 +775,7 @@ static const struct nla_policy net_dm_nl_policy[NET_DM_ATTR_MAX + 1] = { [NET_DM_ATTR_UNSPEC] = { .strict_start_type = NET_DM_ATTR_UNSPEC + 1 }, [NET_DM_ATTR_ALERT_MODE] = { .type = NLA_U8 }, [NET_DM_ATTR_TRUNC_LEN] = { .type = NLA_U32 }, + [NET_DM_ATTR_QUEUE_LEN] = { .type = NLA_U32 }, }; static const struct genl_ops dropmon_ops[] = { -- cgit v1.2.3 From e9feb58020f952f7d9de785ede9a7d54ab1eda5c Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sun, 11 Aug 2019 10:35:55 +0300 Subject: drop_monitor: Expose tail drop counter Previous patch made the length of the per-CPU skb drop list configurable. Expose a counter that shows how many packets could not be enqueued to this list. This allows users determine the desired queue length. Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- include/uapi/linux/net_dropmon.h | 10 ++++ net/core/drop_monitor.c | 101 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 1d0bdb1ba954..405b31cbf723 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -56,6 +56,8 @@ enum { NET_DM_CMD_PACKET_ALERT, NET_DM_CMD_CONFIG_GET, NET_DM_CMD_CONFIG_NEW, + NET_DM_CMD_STATS_GET, + NET_DM_CMD_STATS_NEW, _NET_DM_CMD_MAX, }; @@ -80,6 +82,7 @@ enum net_dm_attr { NET_DM_ATTR_TRUNC_LEN, /* u32 */ NET_DM_ATTR_ORIG_LEN, /* u32 */ NET_DM_ATTR_QUEUE_LEN, /* u32 */ + NET_DM_ATTR_STATS, /* nested */ __NET_DM_ATTR_MAX, NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1 @@ -103,4 +106,11 @@ enum { NET_DM_ATTR_PORT_MAX = __NET_DM_ATTR_PORT_MAX - 1 }; +enum { + NET_DM_ATTR_STATS_DROPPED, /* u64 */ + + __NET_DM_ATTR_STATS_MAX, + NET_DM_ATTR_STATS_MAX = __NET_DM_ATTR_STATS_MAX - 1 +}; + #endif diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index eb3c34d69ea9..39e094907391 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -51,12 +51,18 @@ static int trace_state = TRACE_OFF; */ static DEFINE_MUTEX(net_dm_mutex); +struct net_dm_stats { + u64 dropped; + struct u64_stats_sync syncp; +}; + struct per_cpu_dm_data { spinlock_t lock; /* Protects 'skb' and 'send_timer' */ struct sk_buff *skb; struct sk_buff_head drop_queue; struct work_struct dm_alert_work; struct timer_list send_timer; + struct net_dm_stats stats; }; struct dm_hw_stat_delta { @@ -300,6 +306,9 @@ static void net_dm_packet_trace_kfree_skb_hit(void *ignore, unlock_free: spin_unlock_irqrestore(&data->drop_queue.lock, flags); + u64_stats_update_begin(&data->stats.syncp); + data->stats.dropped++; + u64_stats_update_end(&data->stats.syncp); consume_skb(nskb); } @@ -732,6 +741,93 @@ free_msg: return rc; } +static void net_dm_stats_read(struct net_dm_stats *stats) +{ + int cpu; + + memset(stats, 0, sizeof(*stats)); + for_each_possible_cpu(cpu) { + struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu); + struct net_dm_stats *cpu_stats = &data->stats; + unsigned int start; + u64 dropped; + + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + dropped = cpu_stats->dropped; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + stats->dropped += dropped; + } +} + +static int net_dm_stats_put(struct sk_buff *msg) +{ + struct net_dm_stats stats; + struct nlattr *attr; + + net_dm_stats_read(&stats); + + attr = nla_nest_start(msg, NET_DM_ATTR_STATS); + if (!attr) + return -EMSGSIZE; + + if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED, + stats.dropped, NET_DM_ATTR_PAD)) + goto nla_put_failure; + + nla_nest_end(msg, attr); + + return 0; + +nla_put_failure: + nla_nest_cancel(msg, attr); + return -EMSGSIZE; +} + +static int net_dm_stats_fill(struct sk_buff *msg, struct genl_info *info) +{ + void *hdr; + int rc; + + hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, + &net_drop_monitor_family, 0, NET_DM_CMD_STATS_NEW); + if (!hdr) + return -EMSGSIZE; + + rc = net_dm_stats_put(msg); + if (rc) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int net_dm_cmd_stats_get(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *msg; + int rc; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + rc = net_dm_stats_fill(msg, info); + if (rc) + goto free_msg; + + return genlmsg_reply(msg, info); + +free_msg: + nlmsg_free(msg); + return rc; +} + static int dropmon_net_event(struct notifier_block *ev_block, unsigned long event, void *ptr) { @@ -799,6 +895,10 @@ static const struct genl_ops dropmon_ops[] = { .cmd = NET_DM_CMD_CONFIG_GET, .doit = net_dm_cmd_config_get, }, + { + .cmd = NET_DM_CMD_STATS_GET, + .doit = net_dm_cmd_stats_get, + }, }; static int net_dm_nl_pre_doit(const struct genl_ops *ops, @@ -865,6 +965,7 @@ static int __init init_net_drop_monitor(void) data = &per_cpu(dm_cpu_data, cpu); spin_lock_init(&data->lock); skb_queue_head_init(&data->drop_queue); + u64_stats_init(&data->stats.syncp); } goto out; -- cgit v1.2.3 From 7af0ab0d3aab951518b0d520f95e9f6b1995ec69 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 4 Aug 2019 19:35:43 -0700 Subject: fs, fscrypt: move uapi definitions to new header More fscrypt definitions are being added, and we shouldn't use a disproportionate amount of space in for fscrypt stuff. So move the fscrypt definitions to a new header . For source compatibility with existing userspace programs, still includes the new header. Reviewed-by: Theodore Ts'o Signed-off-by: Eric Biggers --- MAINTAINERS | 1 + include/linux/fscrypt.h | 1 + include/uapi/linux/fs.h | 54 +++------------------------------------ include/uapi/linux/fscrypt.h | 61 ++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 66 insertions(+), 51 deletions(-) create mode 100644 include/uapi/linux/fscrypt.h (limited to 'include/uapi') diff --git a/MAINTAINERS b/MAINTAINERS index a2c343ee3b2c..714ffdaaa920 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6603,6 +6603,7 @@ T: git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git S: Supported F: fs/crypto/ F: include/linux/fscrypt*.h +F: include/uapi/linux/fscrypt.h F: Documentation/filesystems/fscrypt.rst FSI SUBSYSTEM diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index bd8f207a2fb6..81c0c754f8b2 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -16,6 +16,7 @@ #include #include #include +#include #define FS_CRYPTO_BLOCK_SIZE 16 diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 59c71fa8c553..41bd84d25a98 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -13,6 +13,9 @@ #include #include #include +#ifndef __KERNEL__ +#include +#endif /* Use of MS_* flags within the kernel is restricted to core mount(2) code. */ #if !defined(__KERNEL__) @@ -212,57 +215,6 @@ struct fsxattr { #define FS_IOC_GETFSLABEL _IOR(0x94, 49, char[FSLABEL_MAX]) #define FS_IOC_SETFSLABEL _IOW(0x94, 50, char[FSLABEL_MAX]) -/* - * File system encryption support - */ -/* Policy provided via an ioctl on the topmost directory */ -#define FS_KEY_DESCRIPTOR_SIZE 8 - -#define FS_POLICY_FLAGS_PAD_4 0x00 -#define FS_POLICY_FLAGS_PAD_8 0x01 -#define FS_POLICY_FLAGS_PAD_16 0x02 -#define FS_POLICY_FLAGS_PAD_32 0x03 -#define FS_POLICY_FLAGS_PAD_MASK 0x03 -#define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */ -#define FS_POLICY_FLAGS_VALID 0x07 - -/* Encryption algorithms */ -#define FS_ENCRYPTION_MODE_INVALID 0 -#define FS_ENCRYPTION_MODE_AES_256_XTS 1 -#define FS_ENCRYPTION_MODE_AES_256_GCM 2 -#define FS_ENCRYPTION_MODE_AES_256_CBC 3 -#define FS_ENCRYPTION_MODE_AES_256_CTS 4 -#define FS_ENCRYPTION_MODE_AES_128_CBC 5 -#define FS_ENCRYPTION_MODE_AES_128_CTS 6 -#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */ -#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */ -#define FS_ENCRYPTION_MODE_ADIANTUM 9 - -struct fscrypt_policy { - __u8 version; - __u8 contents_encryption_mode; - __u8 filenames_encryption_mode; - __u8 flags; - __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; -}; - -#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) -#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) -#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) - -/* Parameters for passing an encryption key into the kernel keyring */ -#define FS_KEY_DESC_PREFIX "fscrypt:" -#define FS_KEY_DESC_PREFIX_SIZE 8 - -/* Structure that userspace passes to the kernel keyring */ -#define FS_MAX_KEY_SIZE 64 - -struct fscrypt_key { - __u32 mode; - __u8 raw[FS_MAX_KEY_SIZE]; - __u32 size; -}; - /* * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS) * diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h new file mode 100644 index 000000000000..26f6d2c19afd --- /dev/null +++ b/include/uapi/linux/fscrypt.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * fscrypt user API + * + * These ioctls can be used on filesystems that support fscrypt. See the + * "User API" section of Documentation/filesystems/fscrypt.rst. + */ +#ifndef _UAPI_LINUX_FSCRYPT_H +#define _UAPI_LINUX_FSCRYPT_H + +#include + +#define FS_KEY_DESCRIPTOR_SIZE 8 + +/* Encryption policy flags */ +#define FS_POLICY_FLAGS_PAD_4 0x00 +#define FS_POLICY_FLAGS_PAD_8 0x01 +#define FS_POLICY_FLAGS_PAD_16 0x02 +#define FS_POLICY_FLAGS_PAD_32 0x03 +#define FS_POLICY_FLAGS_PAD_MASK 0x03 +#define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */ +#define FS_POLICY_FLAGS_VALID 0x07 + +/* Encryption algorithms */ +#define FS_ENCRYPTION_MODE_INVALID 0 +#define FS_ENCRYPTION_MODE_AES_256_XTS 1 +#define FS_ENCRYPTION_MODE_AES_256_GCM 2 +#define FS_ENCRYPTION_MODE_AES_256_CBC 3 +#define FS_ENCRYPTION_MODE_AES_256_CTS 4 +#define FS_ENCRYPTION_MODE_AES_128_CBC 5 +#define FS_ENCRYPTION_MODE_AES_128_CTS 6 +#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */ +#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */ +#define FS_ENCRYPTION_MODE_ADIANTUM 9 + +struct fscrypt_policy { + __u8 version; + __u8 contents_encryption_mode; + __u8 filenames_encryption_mode; + __u8 flags; + __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; +}; + +#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) +#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) +#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) + +/* Parameters for passing an encryption key into the kernel keyring */ +#define FS_KEY_DESC_PREFIX "fscrypt:" +#define FS_KEY_DESC_PREFIX_SIZE 8 + +/* Structure that userspace passes to the kernel keyring */ +#define FS_MAX_KEY_SIZE 64 + +struct fscrypt_key { + __u32 mode; + __u8 raw[FS_MAX_KEY_SIZE]; + __u32 size; +}; + +#endif /* _UAPI_LINUX_FSCRYPT_H */ -- cgit v1.2.3 From 2336d0deb2d4680349de59d6fbdfc338437be191 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 4 Aug 2019 19:35:44 -0700 Subject: fscrypt: use FSCRYPT_ prefix for uapi constants Prefix all filesystem encryption UAPI constants except the ioctl numbers with "FSCRYPT_" rather than with "FS_". This namespaces the constants more appropriately and makes it clear that they are related specifically to the filesystem encryption feature, and to the 'fscrypt_*' structures. With some of the old names like "FS_POLICY_FLAGS_VALID", it was not immediately clear that the constant had anything to do with encryption. This is also useful because we'll be adding more encryption-related constants, e.g. for the policy version, and we'd otherwise have to choose whether to use unclear names like FS_POLICY_V1 or inconsistent names like FS_ENCRYPTION_POLICY_V1. For source compatibility with existing userspace programs, keep the old names defined as aliases to the new names. Finally, as long as new names are being defined anyway, I skipped defining new names for the fscrypt mode numbers that aren't actually used: INVALID (0), AES_256_GCM (2), AES_256_CBC (3), SPECK128_256_XTS (7), and SPECK128_256_CTS (8). Reviewed-by: Theodore Ts'o Signed-off-by: Eric Biggers --- Documentation/filesystems/fscrypt.rst | 40 ++++++++++----------- include/uapi/linux/fscrypt.h | 65 ++++++++++++++++++++++------------- 2 files changed, 62 insertions(+), 43 deletions(-) (limited to 'include/uapi') diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst index 82efa41b0e6c..d60b885c4024 100644 --- a/Documentation/filesystems/fscrypt.rst +++ b/Documentation/filesystems/fscrypt.rst @@ -225,9 +225,10 @@ a little endian number, except that: is encrypted with AES-256 where the AES-256 key is the SHA-256 hash of the file's data encryption key. -- In the "direct key" configuration (FS_POLICY_FLAG_DIRECT_KEY set in - the fscrypt_policy), the file's nonce is also appended to the IV. - Currently this is only allowed with the Adiantum encryption mode. +- In the "direct key" configuration (FSCRYPT_POLICY_FLAG_DIRECT_KEY + set in the fscrypt_policy), the file's nonce is also appended to the + IV. Currently this is only allowed with the Adiantum encryption + mode. Filenames encryption -------------------- @@ -274,14 +275,14 @@ empty directory or verifies that a directory or regular file already has the specified encryption policy. It takes in a pointer to a :c:type:`struct fscrypt_policy`, defined as follows:: - #define FS_KEY_DESCRIPTOR_SIZE 8 + #define FSCRYPT_KEY_DESCRIPTOR_SIZE 8 struct fscrypt_policy { __u8 version; __u8 contents_encryption_mode; __u8 filenames_encryption_mode; __u8 flags; - __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; + __u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; }; This structure must be initialized as follows: @@ -289,19 +290,18 @@ This structure must be initialized as follows: - ``version`` must be 0. - ``contents_encryption_mode`` and ``filenames_encryption_mode`` must - be set to constants from ```` which identify the - encryption modes to use. If unsure, use - FS_ENCRYPTION_MODE_AES_256_XTS (1) for ``contents_encryption_mode`` - and FS_ENCRYPTION_MODE_AES_256_CTS (4) for - ``filenames_encryption_mode``. + be set to constants from ```` which identify the + encryption modes to use. If unsure, use FSCRYPT_MODE_AES_256_XTS + (1) for ``contents_encryption_mode`` and FSCRYPT_MODE_AES_256_CTS + (4) for ``filenames_encryption_mode``. -- ``flags`` must contain a value from ```` which +- ``flags`` must contain a value from ```` which identifies the amount of NUL-padding to use when encrypting - filenames. If unsure, use FS_POLICY_FLAGS_PAD_32 (0x3). - In addition, if the chosen encryption modes are both - FS_ENCRYPTION_MODE_ADIANTUM, this can contain - FS_POLICY_FLAG_DIRECT_KEY to specify that the master key should be - used directly, without key derivation. + filenames. If unsure, use FSCRYPT_POLICY_FLAGS_PAD_32 (0x3). In + addition, if the chosen encryption modes are both + FSCRYPT_MODE_ADIANTUM, this can contain + FSCRYPT_POLICY_FLAG_DIRECT_KEY to specify that the master key should + be used directly, without key derivation. - ``master_key_descriptor`` specifies how to find the master key in the keyring; see `Adding keys`_. It is up to userspace to choose a @@ -401,11 +401,11 @@ followed by the 16-character lower case hex representation of the ``master_key_descriptor`` that was set in the encryption policy. The key payload must conform to the following structure:: - #define FS_MAX_KEY_SIZE 64 + #define FSCRYPT_MAX_KEY_SIZE 64 struct fscrypt_key { u32 mode; - u8 raw[FS_MAX_KEY_SIZE]; + u8 raw[FSCRYPT_MAX_KEY_SIZE]; u32 size; }; @@ -574,7 +574,7 @@ much confusion if an encryption policy were to be added to or removed from anything other than an empty directory.) The struct is defined as follows:: - #define FS_KEY_DESCRIPTOR_SIZE 8 + #define FSCRYPT_KEY_DESCRIPTOR_SIZE 8 #define FS_KEY_DERIVATION_NONCE_SIZE 16 struct fscrypt_context { @@ -582,7 +582,7 @@ as follows:: u8 contents_encryption_mode; u8 filenames_encryption_mode; u8 flags; - u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; + u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; }; diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 26f6d2c19afd..674b0452ef57 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -10,35 +10,30 @@ #include -#define FS_KEY_DESCRIPTOR_SIZE 8 +#define FSCRYPT_KEY_DESCRIPTOR_SIZE 8 /* Encryption policy flags */ -#define FS_POLICY_FLAGS_PAD_4 0x00 -#define FS_POLICY_FLAGS_PAD_8 0x01 -#define FS_POLICY_FLAGS_PAD_16 0x02 -#define FS_POLICY_FLAGS_PAD_32 0x03 -#define FS_POLICY_FLAGS_PAD_MASK 0x03 -#define FS_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */ -#define FS_POLICY_FLAGS_VALID 0x07 +#define FSCRYPT_POLICY_FLAGS_PAD_4 0x00 +#define FSCRYPT_POLICY_FLAGS_PAD_8 0x01 +#define FSCRYPT_POLICY_FLAGS_PAD_16 0x02 +#define FSCRYPT_POLICY_FLAGS_PAD_32 0x03 +#define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03 +#define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */ +#define FSCRYPT_POLICY_FLAGS_VALID 0x07 /* Encryption algorithms */ -#define FS_ENCRYPTION_MODE_INVALID 0 -#define FS_ENCRYPTION_MODE_AES_256_XTS 1 -#define FS_ENCRYPTION_MODE_AES_256_GCM 2 -#define FS_ENCRYPTION_MODE_AES_256_CBC 3 -#define FS_ENCRYPTION_MODE_AES_256_CTS 4 -#define FS_ENCRYPTION_MODE_AES_128_CBC 5 -#define FS_ENCRYPTION_MODE_AES_128_CTS 6 -#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */ -#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */ -#define FS_ENCRYPTION_MODE_ADIANTUM 9 +#define FSCRYPT_MODE_AES_256_XTS 1 +#define FSCRYPT_MODE_AES_256_CTS 4 +#define FSCRYPT_MODE_AES_128_CBC 5 +#define FSCRYPT_MODE_AES_128_CTS 6 +#define FSCRYPT_MODE_ADIANTUM 9 struct fscrypt_policy { __u8 version; __u8 contents_encryption_mode; __u8 filenames_encryption_mode; __u8 flags; - __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; + __u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; }; #define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) @@ -46,16 +41,40 @@ struct fscrypt_policy { #define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) /* Parameters for passing an encryption key into the kernel keyring */ -#define FS_KEY_DESC_PREFIX "fscrypt:" -#define FS_KEY_DESC_PREFIX_SIZE 8 +#define FSCRYPT_KEY_DESC_PREFIX "fscrypt:" +#define FSCRYPT_KEY_DESC_PREFIX_SIZE 8 /* Structure that userspace passes to the kernel keyring */ -#define FS_MAX_KEY_SIZE 64 +#define FSCRYPT_MAX_KEY_SIZE 64 struct fscrypt_key { __u32 mode; - __u8 raw[FS_MAX_KEY_SIZE]; + __u8 raw[FSCRYPT_MAX_KEY_SIZE]; __u32 size; }; +/**********************************************************************/ + +/* old names; don't add anything new here! */ +#define FS_KEY_DESCRIPTOR_SIZE FSCRYPT_KEY_DESCRIPTOR_SIZE +#define FS_POLICY_FLAGS_PAD_4 FSCRYPT_POLICY_FLAGS_PAD_4 +#define FS_POLICY_FLAGS_PAD_8 FSCRYPT_POLICY_FLAGS_PAD_8 +#define FS_POLICY_FLAGS_PAD_16 FSCRYPT_POLICY_FLAGS_PAD_16 +#define FS_POLICY_FLAGS_PAD_32 FSCRYPT_POLICY_FLAGS_PAD_32 +#define FS_POLICY_FLAGS_PAD_MASK FSCRYPT_POLICY_FLAGS_PAD_MASK +#define FS_POLICY_FLAG_DIRECT_KEY FSCRYPT_POLICY_FLAG_DIRECT_KEY +#define FS_POLICY_FLAGS_VALID FSCRYPT_POLICY_FLAGS_VALID +#define FS_ENCRYPTION_MODE_INVALID 0 /* never used */ +#define FS_ENCRYPTION_MODE_AES_256_XTS FSCRYPT_MODE_AES_256_XTS +#define FS_ENCRYPTION_MODE_AES_256_GCM 2 /* never used */ +#define FS_ENCRYPTION_MODE_AES_256_CBC 3 /* never used */ +#define FS_ENCRYPTION_MODE_AES_256_CTS FSCRYPT_MODE_AES_256_CTS +#define FS_ENCRYPTION_MODE_AES_128_CBC FSCRYPT_MODE_AES_128_CBC +#define FS_ENCRYPTION_MODE_AES_128_CTS FSCRYPT_MODE_AES_128_CTS +#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* removed */ +#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* removed */ +#define FS_ENCRYPTION_MODE_ADIANTUM FSCRYPT_MODE_ADIANTUM +#define FS_KEY_DESC_PREFIX FSCRYPT_KEY_DESC_PREFIX +#define FS_KEY_DESC_PREFIX_SIZE FSCRYPT_KEY_DESC_PREFIX_SIZE +#define FS_MAX_KEY_SIZE FSCRYPT_MAX_KEY_SIZE #endif /* _UAPI_LINUX_FSCRYPT_H */ -- cgit v1.2.3 From 3b6df59bc4d242ac5847592de55d1ff327cd4549 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 4 Aug 2019 19:35:44 -0700 Subject: fscrypt: use FSCRYPT_* definitions, not FS_* Update fs/crypto/ to use the new names for the UAPI constants rather than the old names, then make the old definitions conditional on !__KERNEL__. Reviewed-by: Theodore Ts'o Signed-off-by: Eric Biggers --- fs/crypto/crypto.c | 2 +- fs/crypto/fname.c | 2 +- fs/crypto/fscrypt_private.h | 16 ++++++------- fs/crypto/keyinfo.c | 53 ++++++++++++++++++++++---------------------- fs/crypto/policy.c | 14 ++++++------ include/uapi/linux/fscrypt.h | 2 ++ 6 files changed, 46 insertions(+), 43 deletions(-) (limited to 'include/uapi') diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 3e4624cfe4b5..7502c1f0ede9 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -141,7 +141,7 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, memset(iv, 0, ci->ci_mode->ivsize); iv->lblk_num = cpu_to_le64(lblk_num); - if (ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY) + if (ci->ci_flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE); if (ci->ci_essiv_tfm != NULL) diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 5cab3bb2d1fc..f4977d44d69b 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -182,7 +182,7 @@ bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, u32 max_len, u32 *encrypted_len_ret) { int padding = 4 << (inode->i_crypt_info->ci_flags & - FS_POLICY_FLAGS_PAD_MASK); + FSCRYPT_POLICY_FLAGS_PAD_MASK); u32 encrypted_len; if (orig_len > max_len) diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 4d715708c6e1..fae411b2f78d 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -34,7 +34,7 @@ struct fscrypt_context { u8 contents_encryption_mode; u8 filenames_encryption_mode; u8 flags; - u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; + u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; } __packed; @@ -84,7 +84,7 @@ struct fscrypt_info { u8 ci_data_mode; u8 ci_filename_mode; u8 ci_flags; - u8 ci_master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE]; + u8 ci_master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE]; }; @@ -98,16 +98,16 @@ typedef enum { static inline bool fscrypt_valid_enc_modes(u32 contents_mode, u32 filenames_mode) { - if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC && - filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS) + if (contents_mode == FSCRYPT_MODE_AES_128_CBC && + filenames_mode == FSCRYPT_MODE_AES_128_CTS) return true; - if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS && - filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS) + if (contents_mode == FSCRYPT_MODE_AES_256_XTS && + filenames_mode == FSCRYPT_MODE_AES_256_CTS) return true; - if (contents_mode == FS_ENCRYPTION_MODE_ADIANTUM && - filenames_mode == FS_ENCRYPTION_MODE_ADIANTUM) + if (contents_mode == FSCRYPT_MODE_ADIANTUM && + filenames_mode == FSCRYPT_MODE_ADIANTUM) return true; return false; diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 212994300233..22345ddede11 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -20,7 +20,7 @@ static struct crypto_shash *essiv_hash_tfm; -/* Table of keys referenced by FS_POLICY_FLAG_DIRECT_KEY policies */ +/* Table of keys referenced by DIRECT_KEY policies */ static DEFINE_HASHTABLE(fscrypt_master_keys, 6); /* 6 bits = 64 buckets */ static DEFINE_SPINLOCK(fscrypt_master_keys_lock); @@ -77,7 +77,7 @@ out: */ static struct key * find_and_lock_process_key(const char *prefix, - const u8 descriptor[FS_KEY_DESCRIPTOR_SIZE], + const u8 descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE], unsigned int min_keysize, const struct fscrypt_key **payload_ret) { @@ -87,7 +87,7 @@ find_and_lock_process_key(const char *prefix, const struct fscrypt_key *payload; description = kasprintf(GFP_NOFS, "%s%*phN", prefix, - FS_KEY_DESCRIPTOR_SIZE, descriptor); + FSCRYPT_KEY_DESCRIPTOR_SIZE, descriptor); if (!description) return ERR_PTR(-ENOMEM); @@ -105,7 +105,7 @@ find_and_lock_process_key(const char *prefix, payload = (const struct fscrypt_key *)ukp->data; if (ukp->datalen != sizeof(struct fscrypt_key) || - payload->size < 1 || payload->size > FS_MAX_KEY_SIZE) { + payload->size < 1 || payload->size > FSCRYPT_MAX_KEY_SIZE) { fscrypt_warn(NULL, "key with description '%s' has invalid payload", key->description); @@ -129,32 +129,32 @@ invalid: } static struct fscrypt_mode available_modes[] = { - [FS_ENCRYPTION_MODE_AES_256_XTS] = { + [FSCRYPT_MODE_AES_256_XTS] = { .friendly_name = "AES-256-XTS", .cipher_str = "xts(aes)", .keysize = 64, .ivsize = 16, }, - [FS_ENCRYPTION_MODE_AES_256_CTS] = { + [FSCRYPT_MODE_AES_256_CTS] = { .friendly_name = "AES-256-CTS-CBC", .cipher_str = "cts(cbc(aes))", .keysize = 32, .ivsize = 16, }, - [FS_ENCRYPTION_MODE_AES_128_CBC] = { + [FSCRYPT_MODE_AES_128_CBC] = { .friendly_name = "AES-128-CBC", .cipher_str = "cbc(aes)", .keysize = 16, .ivsize = 16, .needs_essiv = true, }, - [FS_ENCRYPTION_MODE_AES_128_CTS] = { + [FSCRYPT_MODE_AES_128_CTS] = { .friendly_name = "AES-128-CTS-CBC", .cipher_str = "cts(cbc(aes))", .keysize = 16, .ivsize = 16, }, - [FS_ENCRYPTION_MODE_ADIANTUM] = { + [FSCRYPT_MODE_ADIANTUM] = { .friendly_name = "Adiantum", .cipher_str = "adiantum(xchacha12,aes)", .keysize = 32, @@ -192,7 +192,7 @@ static int find_and_derive_key(const struct inode *inode, const struct fscrypt_key *payload; int err; - key = find_and_lock_process_key(FS_KEY_DESC_PREFIX, + key = find_and_lock_process_key(FSCRYPT_KEY_DESC_PREFIX, ctx->master_key_descriptor, mode->keysize, &payload); if (key == ERR_PTR(-ENOKEY) && inode->i_sb->s_cop->key_prefix) { @@ -203,7 +203,7 @@ static int find_and_derive_key(const struct inode *inode, if (IS_ERR(key)) return PTR_ERR(key); - if (ctx->flags & FS_POLICY_FLAG_DIRECT_KEY) { + if (ctx->flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { if (mode->ivsize < offsetofend(union fscrypt_iv, nonce)) { fscrypt_warn(inode, "Direct key mode not allowed with %s", @@ -272,14 +272,14 @@ err_free_tfm: return ERR_PTR(err); } -/* Master key referenced by FS_POLICY_FLAG_DIRECT_KEY policy */ +/* Master key referenced by DIRECT_KEY policy */ struct fscrypt_master_key { struct hlist_node mk_node; refcount_t mk_refcount; const struct fscrypt_mode *mk_mode; struct crypto_skcipher *mk_ctfm; - u8 mk_descriptor[FS_KEY_DESCRIPTOR_SIZE]; - u8 mk_raw[FS_MAX_KEY_SIZE]; + u8 mk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; + u8 mk_raw[FSCRYPT_MAX_KEY_SIZE]; }; static void free_master_key(struct fscrypt_master_key *mk) @@ -320,13 +320,13 @@ find_or_insert_master_key(struct fscrypt_master_key *to_insert, * raw key, and use crypto_memneq() when comparing raw keys. */ - BUILD_BUG_ON(sizeof(hash_key) > FS_KEY_DESCRIPTOR_SIZE); + BUILD_BUG_ON(sizeof(hash_key) > FSCRYPT_KEY_DESCRIPTOR_SIZE); memcpy(&hash_key, ci->ci_master_key_descriptor, sizeof(hash_key)); spin_lock(&fscrypt_master_keys_lock); hash_for_each_possible(fscrypt_master_keys, mk, mk_node, hash_key) { if (memcmp(ci->ci_master_key_descriptor, mk->mk_descriptor, - FS_KEY_DESCRIPTOR_SIZE) != 0) + FSCRYPT_KEY_DESCRIPTOR_SIZE) != 0) continue; if (mode != mk->mk_mode) continue; @@ -370,7 +370,7 @@ fscrypt_get_master_key(const struct fscrypt_info *ci, struct fscrypt_mode *mode, goto err_free_mk; } memcpy(mk->mk_descriptor, ci->ci_master_key_descriptor, - FS_KEY_DESCRIPTOR_SIZE); + FSCRYPT_KEY_DESCRIPTOR_SIZE); memcpy(mk->mk_raw, raw_key, mode->keysize); return find_or_insert_master_key(mk, raw_key, mode, ci); @@ -448,8 +448,8 @@ out: /* * Given the encryption mode and key (normally the derived key, but for - * FS_POLICY_FLAG_DIRECT_KEY mode it's the master key), set up the inode's - * symmetric cipher transform object(s). + * DIRECT_KEY mode it's the master key), set up the inode's symmetric cipher + * transform object(s). */ static int setup_crypto_transform(struct fscrypt_info *ci, struct fscrypt_mode *mode, @@ -459,7 +459,7 @@ static int setup_crypto_transform(struct fscrypt_info *ci, struct crypto_skcipher *ctfm; int err; - if (ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY) { + if (ci->ci_flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { mk = fscrypt_get_master_key(ci, mode, raw_key, inode); if (IS_ERR(mk)) return PTR_ERR(mk); @@ -476,7 +476,7 @@ static int setup_crypto_transform(struct fscrypt_info *ci, if (mode->needs_essiv) { /* ESSIV implies 16-byte IVs which implies !DIRECT_KEY */ WARN_ON(mode->ivsize != AES_BLOCK_SIZE); - WARN_ON(ci->ci_flags & FS_POLICY_FLAG_DIRECT_KEY); + WARN_ON(ci->ci_flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY); err = init_essiv_generator(ci, raw_key, mode->keysize); if (err) { @@ -530,9 +530,10 @@ int fscrypt_get_encryption_info(struct inode *inode) /* Fake up a context for an unencrypted directory */ memset(&ctx, 0, sizeof(ctx)); ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; - ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS; - ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS; - memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE); + ctx.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; + ctx.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; + memset(ctx.master_key_descriptor, 0x42, + FSCRYPT_KEY_DESCRIPTOR_SIZE); } else if (res != sizeof(ctx)) { fscrypt_warn(inode, "Unknown encryption context size (%d bytes)", res); @@ -545,7 +546,7 @@ int fscrypt_get_encryption_info(struct inode *inode) return -EINVAL; } - if (ctx.flags & ~FS_POLICY_FLAGS_VALID) { + if (ctx.flags & ~FSCRYPT_POLICY_FLAGS_VALID) { fscrypt_warn(inode, "Unknown encryption context flags (0x%02x)", ctx.flags); return -EINVAL; @@ -559,7 +560,7 @@ int fscrypt_get_encryption_info(struct inode *inode) crypt_info->ci_data_mode = ctx.contents_encryption_mode; crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; memcpy(crypt_info->ci_master_key_descriptor, ctx.master_key_descriptor, - FS_KEY_DESCRIPTOR_SIZE); + FSCRYPT_KEY_DESCRIPTOR_SIZE); memcpy(crypt_info->ci_nonce, ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE); mode = select_encryption_mode(crypt_info, inode); diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 4941fe8471ce..da7ae9c8b4ad 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -22,7 +22,7 @@ static bool is_encryption_context_consistent_with_policy( const struct fscrypt_policy *policy) { return memcmp(ctx->master_key_descriptor, policy->master_key_descriptor, - FS_KEY_DESCRIPTOR_SIZE) == 0 && + FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 && (ctx->flags == policy->flags) && (ctx->contents_encryption_mode == policy->contents_encryption_mode) && @@ -37,13 +37,13 @@ static int create_encryption_context_from_policy(struct inode *inode, ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, - FS_KEY_DESCRIPTOR_SIZE); + FSCRYPT_KEY_DESCRIPTOR_SIZE); if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, policy->filenames_encryption_mode)) return -EINVAL; - if (policy->flags & ~FS_POLICY_FLAGS_VALID) + if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) return -EINVAL; ctx.contents_encryption_mode = policy->contents_encryption_mode; @@ -128,7 +128,7 @@ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) policy.filenames_encryption_mode = ctx.filenames_encryption_mode; policy.flags = ctx.flags; memcpy(policy.master_key_descriptor, ctx.master_key_descriptor, - FS_KEY_DESCRIPTOR_SIZE); + FSCRYPT_KEY_DESCRIPTOR_SIZE); if (copy_to_user(arg, &policy, sizeof(policy))) return -EFAULT; @@ -202,7 +202,7 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) if (parent_ci && child_ci) { return memcmp(parent_ci->ci_master_key_descriptor, child_ci->ci_master_key_descriptor, - FS_KEY_DESCRIPTOR_SIZE) == 0 && + FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 && (parent_ci->ci_data_mode == child_ci->ci_data_mode) && (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) && @@ -219,7 +219,7 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) return memcmp(parent_ctx.master_key_descriptor, child_ctx.master_key_descriptor, - FS_KEY_DESCRIPTOR_SIZE) == 0 && + FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 && (parent_ctx.contents_encryption_mode == child_ctx.contents_encryption_mode) && (parent_ctx.filenames_encryption_mode == @@ -257,7 +257,7 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child, ctx.filenames_encryption_mode = ci->ci_filename_mode; ctx.flags = ci->ci_flags; memcpy(ctx.master_key_descriptor, ci->ci_master_key_descriptor, - FS_KEY_DESCRIPTOR_SIZE); + FSCRYPT_KEY_DESCRIPTOR_SIZE); get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE); BUILD_BUG_ON(sizeof(ctx) != FSCRYPT_SET_CONTEXT_MAX_SIZE); res = parent->i_sb->s_cop->set_context(child, &ctx, diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 674b0452ef57..29a945d165de 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -55,6 +55,7 @@ struct fscrypt_key { /**********************************************************************/ /* old names; don't add anything new here! */ +#ifndef __KERNEL__ #define FS_KEY_DESCRIPTOR_SIZE FSCRYPT_KEY_DESCRIPTOR_SIZE #define FS_POLICY_FLAGS_PAD_4 FSCRYPT_POLICY_FLAGS_PAD_4 #define FS_POLICY_FLAGS_PAD_8 FSCRYPT_POLICY_FLAGS_PAD_8 @@ -76,5 +77,6 @@ struct fscrypt_key { #define FS_KEY_DESC_PREFIX FSCRYPT_KEY_DESC_PREFIX #define FS_KEY_DESC_PREFIX_SIZE FSCRYPT_KEY_DESC_PREFIX_SIZE #define FS_MAX_KEY_SIZE FSCRYPT_MAX_KEY_SIZE +#endif /* !__KERNEL__ */ #endif /* _UAPI_LINUX_FSCRYPT_H */ -- cgit v1.2.3 From 22d94f493bfb408fdd764f7b1d0363af2122fba5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 4 Aug 2019 19:35:46 -0700 Subject: fscrypt: add FS_IOC_ADD_ENCRYPTION_KEY ioctl Add a new fscrypt ioctl, FS_IOC_ADD_ENCRYPTION_KEY. This ioctl adds an encryption key to the filesystem's fscrypt keyring ->s_master_keys, making any files encrypted with that key appear "unlocked". Why we need this ~~~~~~~~~~~~~~~~ The main problem is that the "locked/unlocked" (ciphertext/plaintext) status of encrypted files is global, but the fscrypt keys are not. fscrypt only looks for keys in the keyring(s) the process accessing the filesystem is subscribed to: the thread keyring, process keyring, and session keyring, where the session keyring may contain the user keyring. Therefore, userspace has to put fscrypt keys in the keyrings for individual users or sessions. But this means that when a process with a different keyring tries to access encrypted files, whether they appear "unlocked" or not is nondeterministic. This is because it depends on whether the files are currently present in the inode cache. Fixing this by consistently providing each process its own view of the filesystem depending on whether it has the key or not isn't feasible due to how the VFS caches work. Furthermore, while sometimes users expect this behavior, it is misguided for two reasons. First, it would be an OS-level access control mechanism largely redundant with existing access control mechanisms such as UNIX file permissions, ACLs, LSMs, etc. Encryption is actually for protecting the data at rest. Second, almost all users of fscrypt actually do need the keys to be global. The largest users of fscrypt, Android and Chromium OS, achieve this by having PID 1 create a "session keyring" that is inherited by every process. This works, but it isn't scalable because it prevents session keyrings from being used for any other purpose. On general-purpose Linux distros, the 'fscrypt' userspace tool [1] can't similarly abuse the session keyring, so to make 'sudo' work on all systems it has to link all the user keyrings into root's user keyring [2]. This is ugly and raises security concerns. Moreover it can't make the keys available to system services, such as sshd trying to access the user's '~/.ssh' directory (see [3], [4]) or NetworkManager trying to read certificates from the user's home directory (see [5]); or to Docker containers (see [6], [7]). By having an API to add a key to the *filesystem* we'll be able to fix the above bugs, remove userspace workarounds, and clearly express the intended semantics: the locked/unlocked status of an encrypted directory is global, and encryption is orthogonal to OS-level access control. Why not use the add_key() syscall ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We use an ioctl for this API rather than the existing add_key() system call because the ioctl gives us the flexibility needed to implement fscrypt-specific semantics that will be introduced in later patches: - Supporting key removal with the semantics such that the secret is removed immediately and any unused inodes using the key are evicted; also, the eviction of any in-use inodes can be retried. - Calculating a key-dependent cryptographic identifier and returning it to userspace. - Allowing keys to be added and removed by non-root users, but only keys for v2 encryption policies; and to prevent denial-of-service attacks, users can only remove keys they themselves have added, and a key is only really removed after all users who added it have removed it. Trying to shoehorn these semantics into the keyrings syscalls would be very difficult, whereas the ioctls make things much easier. However, to reuse code the implementation still uses the keyrings service internally. Thus we get lockless RCU-mode key lookups without having to re-implement it, and the keys automatically show up in /proc/keys for debugging purposes. References: [1] https://github.com/google/fscrypt [2] https://goo.gl/55cCrI#heading=h.vf09isp98isb [3] https://github.com/google/fscrypt/issues/111#issuecomment-444347939 [4] https://github.com/google/fscrypt/issues/116 [5] https://bugs.launchpad.net/ubuntu/+source/fscrypt/+bug/1770715 [6] https://github.com/google/fscrypt/issues/128 [7] https://askubuntu.com/questions/1130306/cannot-run-docker-on-an-encrypted-filesystem Reviewed-by: Theodore Ts'o Signed-off-by: Eric Biggers --- fs/crypto/Makefile | 1 + fs/crypto/crypto.c | 10 +- fs/crypto/fscrypt_private.h | 62 +++++++++- fs/crypto/keyring.c | 286 +++++++++++++++++++++++++++++++++++++++++++ fs/crypto/keysetup.c | 35 +++++- fs/super.c | 2 + include/linux/fs.h | 1 + include/linux/fscrypt.h | 14 +++ include/uapi/linux/fscrypt.h | 49 ++++++-- 9 files changed, 447 insertions(+), 13 deletions(-) create mode 100644 fs/crypto/keyring.c (limited to 'include/uapi') diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile index ad14d4c29784..6b2485b41393 100644 --- a/fs/crypto/Makefile +++ b/fs/crypto/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o fscrypto-y := crypto.o \ fname.o \ hooks.o \ + keyring.o \ keysetup.o \ keysetup_v1.o \ policy.o diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 7502c1f0ede9..65ca077e8d58 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -478,6 +478,8 @@ void fscrypt_msg(const struct inode *inode, const char *level, */ static int __init fscrypt_init(void) { + int err = -ENOMEM; + /* * Use an unbound workqueue to allow bios to be decrypted in parallel * even when they happen to complete on the same CPU. This sacrifices @@ -500,13 +502,19 @@ static int __init fscrypt_init(void) if (!fscrypt_info_cachep) goto fail_free_ctx; + err = fscrypt_init_keyring(); + if (err) + goto fail_free_info; + return 0; +fail_free_info: + kmem_cache_destroy(fscrypt_info_cachep); fail_free_ctx: kmem_cache_destroy(fscrypt_ctx_cachep); fail_free_queue: destroy_workqueue(fscrypt_read_workqueue); fail: - return -ENOMEM; + return err; } late_initcall(fscrypt_init) diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 794dcba25ca8..0d9ebfd3bf3a 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -14,9 +14,12 @@ #include #include -/* Encryption parameters */ +#define CONST_STRLEN(str) (sizeof(str) - 1) + #define FS_KEY_DERIVATION_NONCE_SIZE 16 +#define FSCRYPT_MIN_KEY_SIZE 16 + /** * Encryption context for inode * @@ -156,6 +159,63 @@ extern bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, u32 max_len, u32 *encrypted_len_ret); +/* keyring.c */ + +/* + * fscrypt_master_key_secret - secret key material of an in-use master key + */ +struct fscrypt_master_key_secret { + + /* Size of the raw key in bytes */ + u32 size; + + /* The raw key */ + u8 raw[FSCRYPT_MAX_KEY_SIZE]; + +} __randomize_layout; + +/* + * fscrypt_master_key - an in-use master key + * + * This represents a master encryption key which has been added to the + * filesystem and can be used to "unlock" the encrypted files which were + * encrypted with it. + */ +struct fscrypt_master_key { + + /* The secret key material */ + struct fscrypt_master_key_secret mk_secret; + + /* Arbitrary key descriptor which was assigned by userspace */ + struct fscrypt_key_specifier mk_spec; + +} __randomize_layout; + +static inline const char *master_key_spec_type( + const struct fscrypt_key_specifier *spec) +{ + switch (spec->type) { + case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: + return "descriptor"; + } + return "[unknown]"; +} + +static inline int master_key_spec_len(const struct fscrypt_key_specifier *spec) +{ + switch (spec->type) { + case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: + return FSCRYPT_KEY_DESCRIPTOR_SIZE; + } + return 0; +} + +extern struct key * +fscrypt_find_master_key(struct super_block *sb, + const struct fscrypt_key_specifier *mk_spec); + +extern int __init fscrypt_init_keyring(void); + /* keysetup.c */ struct fscrypt_mode { diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c new file mode 100644 index 000000000000..bcd7d2836e1e --- /dev/null +++ b/fs/crypto/keyring.c @@ -0,0 +1,286 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Filesystem-level keyring for fscrypt + * + * Copyright 2019 Google LLC + */ + +/* + * This file implements management of fscrypt master keys in the + * filesystem-level keyring, including the ioctls: + * + * - FS_IOC_ADD_ENCRYPTION_KEY + * + * See the "User API" section of Documentation/filesystems/fscrypt.rst for more + * information about these ioctls. + */ + +#include +#include + +#include "fscrypt_private.h" + +static void wipe_master_key_secret(struct fscrypt_master_key_secret *secret) +{ + memzero_explicit(secret, sizeof(*secret)); +} + +static void move_master_key_secret(struct fscrypt_master_key_secret *dst, + struct fscrypt_master_key_secret *src) +{ + memcpy(dst, src, sizeof(*dst)); + memzero_explicit(src, sizeof(*src)); +} + +static void free_master_key(struct fscrypt_master_key *mk) +{ + wipe_master_key_secret(&mk->mk_secret); + kzfree(mk); +} + +static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec) +{ + if (spec->__reserved) + return false; + return master_key_spec_len(spec) != 0; +} + +static int fscrypt_key_instantiate(struct key *key, + struct key_preparsed_payload *prep) +{ + key->payload.data[0] = (struct fscrypt_master_key *)prep->data; + return 0; +} + +static void fscrypt_key_destroy(struct key *key) +{ + free_master_key(key->payload.data[0]); +} + +static void fscrypt_key_describe(const struct key *key, struct seq_file *m) +{ + seq_puts(m, key->description); +} + +/* + * Type of key in ->s_master_keys. Each key of this type represents a master + * key which has been added to the filesystem. Its payload is a + * 'struct fscrypt_master_key'. The "." prefix in the key type name prevents + * users from adding keys of this type via the keyrings syscalls rather than via + * the intended method of FS_IOC_ADD_ENCRYPTION_KEY. + */ +static struct key_type key_type_fscrypt = { + .name = "._fscrypt", + .instantiate = fscrypt_key_instantiate, + .destroy = fscrypt_key_destroy, + .describe = fscrypt_key_describe, +}; + +/* Search ->s_master_keys */ +static struct key *search_fscrypt_keyring(struct key *keyring, + struct key_type *type, + const char *description) +{ + /* + * We need to mark the keyring reference as "possessed" so that we + * acquire permission to search it, via the KEY_POS_SEARCH permission. + */ + key_ref_t keyref = make_key_ref(keyring, true /* possessed */); + + keyref = keyring_search(keyref, type, description, false); + if (IS_ERR(keyref)) { + if (PTR_ERR(keyref) == -EAGAIN || /* not found */ + PTR_ERR(keyref) == -EKEYREVOKED) /* recently invalidated */ + keyref = ERR_PTR(-ENOKEY); + return ERR_CAST(keyref); + } + return key_ref_to_ptr(keyref); +} + +#define FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE \ + (CONST_STRLEN("fscrypt-") + FIELD_SIZEOF(struct super_block, s_id)) + +#define FSCRYPT_MK_DESCRIPTION_SIZE (2 * FSCRYPT_KEY_DESCRIPTOR_SIZE + 1) + +static void format_fs_keyring_description( + char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE], + const struct super_block *sb) +{ + sprintf(description, "fscrypt-%s", sb->s_id); +} + +static void format_mk_description( + char description[FSCRYPT_MK_DESCRIPTION_SIZE], + const struct fscrypt_key_specifier *mk_spec) +{ + sprintf(description, "%*phN", + master_key_spec_len(mk_spec), (u8 *)&mk_spec->u); +} + +/* Create ->s_master_keys if needed. Synchronized by fscrypt_add_key_mutex. */ +static int allocate_filesystem_keyring(struct super_block *sb) +{ + char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE]; + struct key *keyring; + + if (sb->s_master_keys) + return 0; + + format_fs_keyring_description(description, sb); + keyring = keyring_alloc(description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + current_cred(), KEY_POS_SEARCH | + KEY_USR_SEARCH | KEY_USR_READ | KEY_USR_VIEW, + KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); + if (IS_ERR(keyring)) + return PTR_ERR(keyring); + + /* Pairs with READ_ONCE() in fscrypt_find_master_key() */ + smp_store_release(&sb->s_master_keys, keyring); + return 0; +} + +void fscrypt_sb_free(struct super_block *sb) +{ + key_put(sb->s_master_keys); + sb->s_master_keys = NULL; +} + +/* + * Find the specified master key in ->s_master_keys. + * Returns ERR_PTR(-ENOKEY) if not found. + */ +struct key *fscrypt_find_master_key(struct super_block *sb, + const struct fscrypt_key_specifier *mk_spec) +{ + struct key *keyring; + char description[FSCRYPT_MK_DESCRIPTION_SIZE]; + + /* pairs with smp_store_release() in allocate_filesystem_keyring() */ + keyring = READ_ONCE(sb->s_master_keys); + if (keyring == NULL) + return ERR_PTR(-ENOKEY); /* No keyring yet, so no keys yet. */ + + format_mk_description(description, mk_spec); + return search_fscrypt_keyring(keyring, &key_type_fscrypt, description); +} + +/* + * Allocate a new fscrypt_master_key which contains the given secret, set it as + * the payload of a new 'struct key' of type fscrypt, and link the 'struct key' + * into the given keyring. Synchronized by fscrypt_add_key_mutex. + */ +static int add_new_master_key(struct fscrypt_master_key_secret *secret, + const struct fscrypt_key_specifier *mk_spec, + struct key *keyring) +{ + struct fscrypt_master_key *mk; + char description[FSCRYPT_MK_DESCRIPTION_SIZE]; + struct key *key; + int err; + + mk = kzalloc(sizeof(*mk), GFP_KERNEL); + if (!mk) + return -ENOMEM; + + mk->mk_spec = *mk_spec; + + move_master_key_secret(&mk->mk_secret, secret); + + format_mk_description(description, mk_spec); + key = key_alloc(&key_type_fscrypt, description, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), + KEY_POS_SEARCH | KEY_USR_SEARCH | KEY_USR_VIEW, + KEY_ALLOC_NOT_IN_QUOTA, NULL); + if (IS_ERR(key)) { + err = PTR_ERR(key); + goto out_free_mk; + } + err = key_instantiate_and_link(key, mk, sizeof(*mk), keyring, NULL); + key_put(key); + if (err) + goto out_free_mk; + + return 0; + +out_free_mk: + free_master_key(mk); + return err; +} + +static int add_master_key(struct super_block *sb, + struct fscrypt_master_key_secret *secret, + const struct fscrypt_key_specifier *mk_spec) +{ + static DEFINE_MUTEX(fscrypt_add_key_mutex); + struct key *key; + int err; + + mutex_lock(&fscrypt_add_key_mutex); /* serialize find + link */ + key = fscrypt_find_master_key(sb, mk_spec); + if (IS_ERR(key)) { + err = PTR_ERR(key); + if (err != -ENOKEY) + goto out_unlock; + /* Didn't find the key in ->s_master_keys. Add it. */ + err = allocate_filesystem_keyring(sb); + if (err) + goto out_unlock; + err = add_new_master_key(secret, mk_spec, sb->s_master_keys); + } else { + key_put(key); + err = 0; + } +out_unlock: + mutex_unlock(&fscrypt_add_key_mutex); + return err; +} + +/* + * Add a master encryption key to the filesystem, causing all files which were + * encrypted with it to appear "unlocked" (decrypted) when accessed. + * + * For more details, see the "FS_IOC_ADD_ENCRYPTION_KEY" section of + * Documentation/filesystems/fscrypt.rst. + */ +int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) +{ + struct super_block *sb = file_inode(filp)->i_sb; + struct fscrypt_add_key_arg __user *uarg = _uarg; + struct fscrypt_add_key_arg arg; + struct fscrypt_master_key_secret secret; + int err; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (!valid_key_spec(&arg.key_spec)) + return -EINVAL; + + if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE || + arg.raw_size > FSCRYPT_MAX_KEY_SIZE) + return -EINVAL; + + if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) + return -EINVAL; + + memset(&secret, 0, sizeof(secret)); + secret.size = arg.raw_size; + err = -EFAULT; + if (copy_from_user(secret.raw, uarg->raw, secret.size)) + goto out_wipe_secret; + + err = -EACCES; + if (!capable(CAP_SYS_ADMIN)) + goto out_wipe_secret; + + err = add_master_key(sb, &secret, &arg.key_spec); +out_wipe_secret: + wipe_master_key_secret(&secret); + return err; +} +EXPORT_SYMBOL_GPL(fscrypt_ioctl_add_key); + +int __init fscrypt_init_keyring(void) +{ + return register_key_type(&key_type_fscrypt); +} diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index f4a47448e9ef..1c6d18bcdc7b 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -216,7 +216,40 @@ int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key) */ static int setup_file_encryption_key(struct fscrypt_info *ci) { - return fscrypt_setup_v1_file_key_via_subscribed_keyrings(ci); + struct key *key; + struct fscrypt_master_key *mk = NULL; + struct fscrypt_key_specifier mk_spec; + int err; + + mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; + memcpy(mk_spec.u.descriptor, ci->ci_master_key_descriptor, + FSCRYPT_KEY_DESCRIPTOR_SIZE); + + key = fscrypt_find_master_key(ci->ci_inode->i_sb, &mk_spec); + if (IS_ERR(key)) { + if (key != ERR_PTR(-ENOKEY)) + return PTR_ERR(key); + + return fscrypt_setup_v1_file_key_via_subscribed_keyrings(ci); + } + + mk = key->payload.data[0]; + + if (mk->mk_secret.size < ci->ci_mode->keysize) { + fscrypt_warn(NULL, + "key with %s %*phN is too short (got %u bytes, need %u+ bytes)", + master_key_spec_type(&mk_spec), + master_key_spec_len(&mk_spec), (u8 *)&mk_spec.u, + mk->mk_secret.size, ci->ci_mode->keysize); + err = -ENOKEY; + goto out_release_key; + } + + err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw); + +out_release_key: + key_put(key); + return err; } static void put_crypt_info(struct fscrypt_info *ci) diff --git a/fs/super.c b/fs/super.c index 5960578a4076..e486a442a61f 100644 --- a/fs/super.c +++ b/fs/super.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -290,6 +291,7 @@ static void __put_super(struct super_block *s) WARN_ON(s->s_inode_lru.node); WARN_ON(!list_empty(&s->s_mounts)); security_sb_free(s); + fscrypt_sb_free(s); put_user_ns(s->s_user_ns); kfree(s->s_subtype); call_rcu(&s->rcu, destroy_super_rcu); diff --git a/include/linux/fs.h b/include/linux/fs.h index 997a530ff4e9..5dff77326cec 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1427,6 +1427,7 @@ struct super_block { const struct xattr_handler **s_xattr; #ifdef CONFIG_FS_ENCRYPTION const struct fscrypt_operations *s_cop; + struct key *s_master_keys; /* master crypto keys in use */ #endif struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ struct list_head s_mounts; /* list of mounts; _not_ for fs use */ diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 583802cb2e35..46bf66cf76ef 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -138,6 +138,10 @@ extern int fscrypt_ioctl_get_policy(struct file *, void __user *); extern int fscrypt_has_permitted_context(struct inode *, struct inode *); extern int fscrypt_inherit_context(struct inode *, struct inode *, void *, bool); +/* keyring.c */ +extern void fscrypt_sb_free(struct super_block *sb); +extern int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); + /* keysetup.c */ extern int fscrypt_get_encryption_info(struct inode *); extern void fscrypt_put_encryption_info(struct inode *); @@ -367,6 +371,16 @@ static inline int fscrypt_inherit_context(struct inode *parent, return -EOPNOTSUPP; } +/* keyring.c */ +static inline void fscrypt_sb_free(struct super_block *sb) +{ +} + +static inline int fscrypt_ioctl_add_key(struct file *filp, void __user *arg) +{ + return -EOPNOTSUPP; +} + /* keysetup.c */ static inline int fscrypt_get_encryption_info(struct inode *inode) { diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 29a945d165de..6aeca3cb0a2d 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -36,22 +36,51 @@ struct fscrypt_policy { __u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; }; -#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) -#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) -#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) - -/* Parameters for passing an encryption key into the kernel keyring */ +/* + * Process-subscribed "logon" key description prefix and payload format. + * Deprecated; prefer FS_IOC_ADD_ENCRYPTION_KEY instead. + */ #define FSCRYPT_KEY_DESC_PREFIX "fscrypt:" -#define FSCRYPT_KEY_DESC_PREFIX_SIZE 8 - -/* Structure that userspace passes to the kernel keyring */ -#define FSCRYPT_MAX_KEY_SIZE 64 - +#define FSCRYPT_KEY_DESC_PREFIX_SIZE 8 +#define FSCRYPT_MAX_KEY_SIZE 64 struct fscrypt_key { __u32 mode; __u8 raw[FSCRYPT_MAX_KEY_SIZE]; __u32 size; }; + +/* + * Keys are specified by an arbitrary 8-byte key "descriptor", + * matching fscrypt_policy::master_key_descriptor. + */ +#define FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR 1 + +/* + * Specifies a key. This doesn't contain the actual key itself; this is just + * the "name" of the key. + */ +struct fscrypt_key_specifier { + __u32 type; /* one of FSCRYPT_KEY_SPEC_TYPE_* */ + __u32 __reserved; + union { + __u8 __reserved[32]; /* reserve some extra space */ + __u8 descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; + } u; +}; + +/* Struct passed to FS_IOC_ADD_ENCRYPTION_KEY */ +struct fscrypt_add_key_arg { + struct fscrypt_key_specifier key_spec; + __u32 raw_size; + __u32 __reserved[9]; + __u8 raw[]; +}; + +#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) +#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) +#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) +#define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg) + /**********************************************************************/ /* old names; don't add anything new here! */ -- cgit v1.2.3 From b1c0ec3599f42ad372063b0235a3c33f65eb1e30 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 4 Aug 2019 19:35:46 -0700 Subject: fscrypt: add FS_IOC_REMOVE_ENCRYPTION_KEY ioctl Add a new fscrypt ioctl, FS_IOC_REMOVE_ENCRYPTION_KEY. This ioctl removes an encryption key that was added by FS_IOC_ADD_ENCRYPTION_KEY. It wipes the secret key itself, then "locks" the encrypted files and directories that had been unlocked using that key -- implemented by evicting the relevant dentries and inodes from the VFS caches. The problem this solves is that many fscrypt users want the ability to remove encryption keys, causing the corresponding encrypted directories to appear "locked" (presented in ciphertext form) again. Moreover, users want removing an encryption key to *really* remove it, in the sense that the removed keys cannot be recovered even if kernel memory is compromised, e.g. by the exploit of a kernel security vulnerability or by a physical attack. This is desirable after a user logs out of the system, for example. In many cases users even already assume this to be the case and are surprised to hear when it's not. It is not sufficient to simply unlink the master key from the keyring (or to revoke or invalidate it), since the actual encryption transform objects are still pinned in memory by their inodes. Therefore, to really remove a key we must also evict the relevant inodes. Currently one workaround is to run 'sync && echo 2 > /proc/sys/vm/drop_caches'. But, that evicts all unused inodes in the system rather than just the inodes associated with the key being removed, causing severe performance problems. Moreover, it requires root privileges, so regular users can't "lock" their encrypted files. Another workaround, used in Chromium OS kernels, is to add a new VFS-level ioctl FS_IOC_DROP_CACHE which is a more restricted version of drop_caches that operates on a single super_block. It does: shrink_dcache_sb(sb); invalidate_inodes(sb, false); But it's still a hack. Yet, the major users of filesystem encryption want this feature badly enough that they are actually using these hacks. To properly solve the problem, start maintaining a list of the inodes which have been "unlocked" using each master key. Originally this wasn't possible because the kernel didn't keep track of in-use master keys at all. But, with the ->s_master_keys keyring it is now possible. Then, add an ioctl FS_IOC_REMOVE_ENCRYPTION_KEY. It finds the specified master key in ->s_master_keys, then wipes the secret key itself, which prevents any additional inodes from being unlocked with the key. Then, it syncs the filesystem and evicts the inodes in the key's list. The normal inode eviction code will free and wipe the per-file keys (in ->i_crypt_info). Note that freeing ->i_crypt_info without evicting the inodes was also considered, but would have been racy. Some inodes may still be in use when a master key is removed, and we can't simply revoke random file descriptors, mmap's, etc. Thus, the ioctl simply skips in-use inodes, and returns -EBUSY to indicate that some inodes weren't evicted. The master key *secret* is still removed, but the fscrypt_master_key struct remains to keep track of the remaining inodes. Userspace can then retry the ioctl to evict the remaining inodes. Alternatively, if userspace adds the key again, the refreshed secret will be associated with the existing list of inodes so they remain correctly tracked for future key removals. The ioctl doesn't wipe pagecache pages. Thus, we tolerate that after a kernel compromise some portions of plaintext file contents may still be recoverable from memory. This can be solved by enabling page poisoning system-wide, which security conscious users may choose to do. But it's very difficult to solve otherwise, e.g. note that plaintext file contents may have been read in other places than pagecache pages. Like FS_IOC_ADD_ENCRYPTION_KEY, FS_IOC_REMOVE_ENCRYPTION_KEY is initially restricted to privileged users only. This is sufficient for some use cases, but not all. A later patch will relax this restriction, but it will require introducing key hashes, among other changes. Reviewed-by: Theodore Ts'o Signed-off-by: Eric Biggers --- fs/crypto/fscrypt_private.h | 53 ++++++++- fs/crypto/keyring.c | 260 ++++++++++++++++++++++++++++++++++++++++++- fs/crypto/keysetup.c | 103 ++++++++++++++++- include/linux/fscrypt.h | 12 ++ include/uapi/linux/fscrypt.h | 9 ++ 5 files changed, 432 insertions(+), 5 deletions(-) (limited to 'include/uapi') diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 0d9ebfd3bf3a..fc804f4a03fc 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -78,6 +78,19 @@ struct fscrypt_info { /* Back-pointer to the inode */ struct inode *ci_inode; + /* + * The master key with which this inode was unlocked (decrypted). This + * will be NULL if the master key was found in a process-subscribed + * keyring rather than in the filesystem-level keyring. + */ + struct key *ci_master_key; + + /* + * Link in list of inodes that were unlocked with the master key. + * Only used when ->ci_master_key is set. + */ + struct list_head ci_master_key_link; + /* * If non-NULL, then encryption is done using the master key directly * and ci_ctfm will equal ci_direct_key->dk_ctfm. @@ -183,14 +196,52 @@ struct fscrypt_master_key_secret { */ struct fscrypt_master_key { - /* The secret key material */ + /* + * The secret key material. After FS_IOC_REMOVE_ENCRYPTION_KEY is + * executed, this is wiped and no new inodes can be unlocked with this + * key; however, there may still be inodes in ->mk_decrypted_inodes + * which could not be evicted. As long as some inodes still remain, + * FS_IOC_REMOVE_ENCRYPTION_KEY can be retried, or + * FS_IOC_ADD_ENCRYPTION_KEY can add the secret again. + * + * Locking: protected by key->sem. + */ struct fscrypt_master_key_secret mk_secret; /* Arbitrary key descriptor which was assigned by userspace */ struct fscrypt_key_specifier mk_spec; + /* + * Length of ->mk_decrypted_inodes, plus one if mk_secret is present. + * Once this goes to 0, the master key is removed from ->s_master_keys. + * The 'struct fscrypt_master_key' will continue to live as long as the + * 'struct key' whose payload it is, but we won't let this reference + * count rise again. + */ + refcount_t mk_refcount; + + /* + * List of inodes that were unlocked using this key. This allows the + * inodes to be evicted efficiently if the key is removed. + */ + struct list_head mk_decrypted_inodes; + spinlock_t mk_decrypted_inodes_lock; + } __randomize_layout; +static inline bool +is_master_key_secret_present(const struct fscrypt_master_key_secret *secret) +{ + /* + * The READ_ONCE() is only necessary for fscrypt_drop_inode() and + * fscrypt_key_describe(). These run in atomic context, so they can't + * take key->sem and thus 'secret' can change concurrently which would + * be a data race. But they only need to know whether the secret *was* + * present at the time of check, so READ_ONCE() suffices. + */ + return READ_ONCE(secret->size) != 0; +} + static inline const char *master_key_spec_type( const struct fscrypt_key_specifier *spec) { diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index bcd7d2836e1e..c3423f0edc70 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -10,6 +10,7 @@ * filesystem-level keyring, including the ioctls: * * - FS_IOC_ADD_ENCRYPTION_KEY + * - FS_IOC_REMOVE_ENCRYPTION_KEY * * See the "User API" section of Documentation/filesystems/fscrypt.rst for more * information about these ioctls. @@ -60,6 +61,13 @@ static void fscrypt_key_destroy(struct key *key) static void fscrypt_key_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); + + if (key_is_positive(key)) { + const struct fscrypt_master_key *mk = key->payload.data[0]; + + if (!is_master_key_secret_present(&mk->mk_secret)) + seq_puts(m, ": secret removed"); + } } /* @@ -186,6 +194,10 @@ static int add_new_master_key(struct fscrypt_master_key_secret *secret, move_master_key_secret(&mk->mk_secret, secret); + refcount_set(&mk->mk_refcount, 1); /* secret is present */ + INIT_LIST_HEAD(&mk->mk_decrypted_inodes); + spin_lock_init(&mk->mk_decrypted_inodes_lock); + format_mk_description(description, mk_spec); key = key_alloc(&key_type_fscrypt, description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), @@ -207,6 +219,21 @@ out_free_mk: return err; } +#define KEY_DEAD 1 + +static int add_existing_master_key(struct fscrypt_master_key *mk, + struct fscrypt_master_key_secret *secret) +{ + if (is_master_key_secret_present(&mk->mk_secret)) + return 0; + + if (!refcount_inc_not_zero(&mk->mk_refcount)) + return KEY_DEAD; + + move_master_key_secret(&mk->mk_secret, secret); + return 0; +} + static int add_master_key(struct super_block *sb, struct fscrypt_master_key_secret *secret, const struct fscrypt_key_specifier *mk_spec) @@ -216,6 +243,7 @@ static int add_master_key(struct super_block *sb, int err; mutex_lock(&fscrypt_add_key_mutex); /* serialize find + link */ +retry: key = fscrypt_find_master_key(sb, mk_spec); if (IS_ERR(key)) { err = PTR_ERR(key); @@ -227,8 +255,20 @@ static int add_master_key(struct super_block *sb, goto out_unlock; err = add_new_master_key(secret, mk_spec, sb->s_master_keys); } else { + /* + * Found the key in ->s_master_keys. Re-add the secret if + * needed. + */ + down_write(&key->sem); + err = add_existing_master_key(key->payload.data[0], secret); + up_write(&key->sem); + if (err == KEY_DEAD) { + /* Key being removed or needs to be removed */ + key_invalidate(key); + key_put(key); + goto retry; + } key_put(key); - err = 0; } out_unlock: mutex_unlock(&fscrypt_add_key_mutex); @@ -280,6 +320,224 @@ out_wipe_secret: } EXPORT_SYMBOL_GPL(fscrypt_ioctl_add_key); +/* + * Try to evict the inode's dentries from the dentry cache. If the inode is a + * directory, then it can have at most one dentry; however, that dentry may be + * pinned by child dentries, so first try to evict the children too. + */ +static void shrink_dcache_inode(struct inode *inode) +{ + struct dentry *dentry; + + if (S_ISDIR(inode->i_mode)) { + dentry = d_find_any_alias(inode); + if (dentry) { + shrink_dcache_parent(dentry); + dput(dentry); + } + } + d_prune_aliases(inode); +} + +static void evict_dentries_for_decrypted_inodes(struct fscrypt_master_key *mk) +{ + struct fscrypt_info *ci; + struct inode *inode; + struct inode *toput_inode = NULL; + + spin_lock(&mk->mk_decrypted_inodes_lock); + + list_for_each_entry(ci, &mk->mk_decrypted_inodes, ci_master_key_link) { + inode = ci->ci_inode; + spin_lock(&inode->i_lock); + if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) { + spin_unlock(&inode->i_lock); + continue; + } + __iget(inode); + spin_unlock(&inode->i_lock); + spin_unlock(&mk->mk_decrypted_inodes_lock); + + shrink_dcache_inode(inode); + iput(toput_inode); + toput_inode = inode; + + spin_lock(&mk->mk_decrypted_inodes_lock); + } + + spin_unlock(&mk->mk_decrypted_inodes_lock); + iput(toput_inode); +} + +static int check_for_busy_inodes(struct super_block *sb, + struct fscrypt_master_key *mk) +{ + struct list_head *pos; + size_t busy_count = 0; + unsigned long ino; + struct dentry *dentry; + char _path[256]; + char *path = NULL; + + spin_lock(&mk->mk_decrypted_inodes_lock); + + list_for_each(pos, &mk->mk_decrypted_inodes) + busy_count++; + + if (busy_count == 0) { + spin_unlock(&mk->mk_decrypted_inodes_lock); + return 0; + } + + { + /* select an example file to show for debugging purposes */ + struct inode *inode = + list_first_entry(&mk->mk_decrypted_inodes, + struct fscrypt_info, + ci_master_key_link)->ci_inode; + ino = inode->i_ino; + dentry = d_find_alias(inode); + } + spin_unlock(&mk->mk_decrypted_inodes_lock); + + if (dentry) { + path = dentry_path(dentry, _path, sizeof(_path)); + dput(dentry); + } + if (IS_ERR_OR_NULL(path)) + path = "(unknown)"; + + fscrypt_warn(NULL, + "%s: %zu inode(s) still busy after removing key with %s %*phN, including ino %lu (%s)", + sb->s_id, busy_count, master_key_spec_type(&mk->mk_spec), + master_key_spec_len(&mk->mk_spec), (u8 *)&mk->mk_spec.u, + ino, path); + return -EBUSY; +} + +static int try_to_lock_encrypted_files(struct super_block *sb, + struct fscrypt_master_key *mk) +{ + int err1; + int err2; + + /* + * An inode can't be evicted while it is dirty or has dirty pages. + * Thus, we first have to clean the inodes in ->mk_decrypted_inodes. + * + * Just do it the easy way: call sync_filesystem(). It's overkill, but + * it works, and it's more important to minimize the amount of caches we + * drop than the amount of data we sync. Also, unprivileged users can + * already call sync_filesystem() via sys_syncfs() or sys_sync(). + */ + down_read(&sb->s_umount); + err1 = sync_filesystem(sb); + up_read(&sb->s_umount); + /* If a sync error occurs, still try to evict as much as possible. */ + + /* + * Inodes are pinned by their dentries, so we have to evict their + * dentries. shrink_dcache_sb() would suffice, but would be overkill + * and inappropriate for use by unprivileged users. So instead go + * through the inodes' alias lists and try to evict each dentry. + */ + evict_dentries_for_decrypted_inodes(mk); + + /* + * evict_dentries_for_decrypted_inodes() already iput() each inode in + * the list; any inodes for which that dropped the last reference will + * have been evicted due to fscrypt_drop_inode() detecting the key + * removal and telling the VFS to evict the inode. So to finish, we + * just need to check whether any inodes couldn't be evicted. + */ + err2 = check_for_busy_inodes(sb, mk); + + return err1 ?: err2; +} + +/* + * Try to remove an fscrypt master encryption key. + * + * First we wipe the actual master key secret, so that no more inodes can be + * unlocked with it. Then we try to evict all cached inodes that had been + * unlocked with the key. + * + * If all inodes were evicted, then we unlink the fscrypt_master_key from the + * keyring. Otherwise it remains in the keyring in the "incompletely removed" + * state (without the actual secret key) where it tracks the list of remaining + * inodes. Userspace can execute the ioctl again later to retry eviction, or + * alternatively can re-add the secret key again. + * + * For more details, see the "Removing keys" section of + * Documentation/filesystems/fscrypt.rst. + */ +int fscrypt_ioctl_remove_key(struct file *filp, void __user *_uarg) +{ + struct super_block *sb = file_inode(filp)->i_sb; + struct fscrypt_remove_key_arg __user *uarg = _uarg; + struct fscrypt_remove_key_arg arg; + struct key *key; + struct fscrypt_master_key *mk; + u32 status_flags = 0; + int err; + bool dead; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (!valid_key_spec(&arg.key_spec)) + return -EINVAL; + + if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) + return -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + /* Find the key being removed. */ + key = fscrypt_find_master_key(sb, &arg.key_spec); + if (IS_ERR(key)) + return PTR_ERR(key); + mk = key->payload.data[0]; + + down_write(&key->sem); + + /* Wipe the secret. */ + dead = false; + if (is_master_key_secret_present(&mk->mk_secret)) { + wipe_master_key_secret(&mk->mk_secret); + dead = refcount_dec_and_test(&mk->mk_refcount); + } + up_write(&key->sem); + if (dead) { + /* + * No inodes reference the key, and we wiped the secret, so the + * key object is free to be removed from the keyring. + */ + key_invalidate(key); + err = 0; + } else { + /* Some inodes still reference this key; try to evict them. */ + err = try_to_lock_encrypted_files(sb, mk); + if (err == -EBUSY) { + status_flags |= + FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY; + err = 0; + } + } + /* + * We return 0 if we successfully did something: wiped the secret, or + * tried locking the files again. Users need to check the informational + * status flags if they care whether the key has been fully removed + * including all files locked. + */ + key_put(key); + if (err == 0) + err = put_user(status_flags, &uarg->removal_status_flags); + return err; +} +EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key); + int __init fscrypt_init_keyring(void) { return register_key_type(&key_type_fscrypt); diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 1c6d18bcdc7b..7b60a47fc73c 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -213,8 +213,16 @@ int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key) /* * Find the master key, then set up the inode's actual encryption key. + * + * If the master key is found in the filesystem-level keyring, then the + * corresponding 'struct key' is returned in *master_key_ret with + * ->sem read-locked. This is needed to ensure that only one task links the + * fscrypt_info into ->mk_decrypted_inodes (as multiple tasks may race to create + * an fscrypt_info for the same inode), and to synchronize the master key being + * removed with a new inode starting to use it. */ -static int setup_file_encryption_key(struct fscrypt_info *ci) +static int setup_file_encryption_key(struct fscrypt_info *ci, + struct key **master_key_ret) { struct key *key; struct fscrypt_master_key *mk = NULL; @@ -234,6 +242,13 @@ static int setup_file_encryption_key(struct fscrypt_info *ci) } mk = key->payload.data[0]; + down_read(&key->sem); + + /* Has the secret been removed (via FS_IOC_REMOVE_ENCRYPTION_KEY)? */ + if (!is_master_key_secret_present(&mk->mk_secret)) { + err = -ENOKEY; + goto out_release_key; + } if (mk->mk_secret.size < ci->ci_mode->keysize) { fscrypt_warn(NULL, @@ -246,14 +261,22 @@ static int setup_file_encryption_key(struct fscrypt_info *ci) } err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw); + if (err) + goto out_release_key; + + *master_key_ret = key; + return 0; out_release_key: + up_read(&key->sem); key_put(key); return err; } static void put_crypt_info(struct fscrypt_info *ci) { + struct key *key; + if (!ci) return; @@ -263,6 +286,26 @@ static void put_crypt_info(struct fscrypt_info *ci) crypto_free_skcipher(ci->ci_ctfm); crypto_free_cipher(ci->ci_essiv_tfm); } + + key = ci->ci_master_key; + if (key) { + struct fscrypt_master_key *mk = key->payload.data[0]; + + /* + * Remove this inode from the list of inodes that were unlocked + * with the master key. + * + * In addition, if we're removing the last inode from a key that + * already had its secret removed, invalidate the key so that it + * gets removed from ->s_master_keys. + */ + spin_lock(&mk->mk_decrypted_inodes_lock); + list_del(&ci->ci_master_key_link); + spin_unlock(&mk->mk_decrypted_inodes_lock); + if (refcount_dec_and_test(&mk->mk_refcount)) + key_invalidate(key); + key_put(key); + } kmem_cache_free(fscrypt_info_cachep, ci); } @@ -271,6 +314,7 @@ int fscrypt_get_encryption_info(struct inode *inode) struct fscrypt_info *crypt_info; struct fscrypt_context ctx; struct fscrypt_mode *mode; + struct key *master_key = NULL; int res; if (fscrypt_has_encryption_key(inode)) @@ -335,13 +379,30 @@ int fscrypt_get_encryption_info(struct inode *inode) WARN_ON(mode->ivsize > FSCRYPT_MAX_IV_SIZE); crypt_info->ci_mode = mode; - res = setup_file_encryption_key(crypt_info); + res = setup_file_encryption_key(crypt_info, &master_key); if (res) goto out; - if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL) + if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL) { + if (master_key) { + struct fscrypt_master_key *mk = + master_key->payload.data[0]; + + refcount_inc(&mk->mk_refcount); + crypt_info->ci_master_key = key_get(master_key); + spin_lock(&mk->mk_decrypted_inodes_lock); + list_add(&crypt_info->ci_master_key_link, + &mk->mk_decrypted_inodes); + spin_unlock(&mk->mk_decrypted_inodes_lock); + } crypt_info = NULL; + } + res = 0; out: + if (master_key) { + up_read(&master_key->sem); + key_put(master_key); + } if (res == -ENOKEY) res = 0; put_crypt_info(crypt_info); @@ -376,3 +437,39 @@ void fscrypt_free_inode(struct inode *inode) } } EXPORT_SYMBOL(fscrypt_free_inode); + +/** + * fscrypt_drop_inode - check whether the inode's master key has been removed + * + * Filesystems supporting fscrypt must call this from their ->drop_inode() + * method so that encrypted inodes are evicted as soon as they're no longer in + * use and their master key has been removed. + * + * Return: 1 if fscrypt wants the inode to be evicted now, otherwise 0 + */ +int fscrypt_drop_inode(struct inode *inode) +{ + const struct fscrypt_info *ci = READ_ONCE(inode->i_crypt_info); + const struct fscrypt_master_key *mk; + + /* + * If ci is NULL, then the inode doesn't have an encryption key set up + * so it's irrelevant. If ci_master_key is NULL, then the master key + * was provided via the legacy mechanism of the process-subscribed + * keyrings, so we don't know whether it's been removed or not. + */ + if (!ci || !ci->ci_master_key) + return 0; + mk = ci->ci_master_key->payload.data[0]; + + /* + * Note: since we aren't holding key->sem, the result here can + * immediately become outdated. But there's no correctness problem with + * unnecessarily evicting. Nor is there a correctness problem with not + * evicting while iput() is racing with the key being removed, since + * then the thread removing the key will either evict the inode itself + * or will correctly detect that it wasn't evicted due to the race. + */ + return !is_master_key_secret_present(&mk->mk_secret); +} +EXPORT_SYMBOL_GPL(fscrypt_drop_inode); diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 46bf66cf76ef..b494c5f9c01f 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -141,11 +141,13 @@ extern int fscrypt_inherit_context(struct inode *, struct inode *, /* keyring.c */ extern void fscrypt_sb_free(struct super_block *sb); extern int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); +extern int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); /* keysetup.c */ extern int fscrypt_get_encryption_info(struct inode *); extern void fscrypt_put_encryption_info(struct inode *); extern void fscrypt_free_inode(struct inode *); +extern int fscrypt_drop_inode(struct inode *inode); /* fname.c */ extern int fscrypt_setup_filename(struct inode *, const struct qstr *, @@ -381,6 +383,11 @@ static inline int fscrypt_ioctl_add_key(struct file *filp, void __user *arg) return -EOPNOTSUPP; } +static inline int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg) +{ + return -EOPNOTSUPP; +} + /* keysetup.c */ static inline int fscrypt_get_encryption_info(struct inode *inode) { @@ -396,6 +403,11 @@ static inline void fscrypt_free_inode(struct inode *inode) { } +static inline int fscrypt_drop_inode(struct inode *inode) +{ + return 0; +} + /* fname.c */ static inline int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 6aeca3cb0a2d..07f37a27a944 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -76,10 +76,19 @@ struct fscrypt_add_key_arg { __u8 raw[]; }; +/* Struct passed to FS_IOC_REMOVE_ENCRYPTION_KEY */ +struct fscrypt_remove_key_arg { + struct fscrypt_key_specifier key_spec; +#define FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY 0x00000001 + __u32 removal_status_flags; /* output */ + __u32 __reserved[5]; +}; + #define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) #define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) #define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) #define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg) +#define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg) /**********************************************************************/ -- cgit v1.2.3 From 5a7e29924dac34f0690b06906aad9d70d3c022a5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 4 Aug 2019 19:35:46 -0700 Subject: fscrypt: add FS_IOC_GET_ENCRYPTION_KEY_STATUS ioctl Add a new fscrypt ioctl, FS_IOC_GET_ENCRYPTION_KEY_STATUS. Given a key specified by 'struct fscrypt_key_specifier' (the same way a key is specified for the other fscrypt key management ioctls), it returns status information in a 'struct fscrypt_get_key_status_arg'. The main motivation for this is that applications need to be able to check whether an encrypted directory is "unlocked" or not, so that they can add the key if it is not, and avoid adding the key (which may involve prompting the user for a passphrase) if it already is. It's possible to use some workarounds such as checking whether opening a regular file fails with ENOKEY, or checking whether the filenames "look like gibberish" or not. However, no workaround is usable in all cases. Like the other key management ioctls, the keyrings syscalls may seem at first to be a good fit for this. Unfortunately, they are not. Even if we exposed the keyring ID of the ->s_master_keys keyring and gave everyone Search permission on it (note: currently the keyrings permission system would also allow everyone to "invalidate" the keyring too), the fscrypt keys have an additional state that doesn't map cleanly to the keyrings API: the secret can be removed, but we can be still tracking the files that were using the key, and the removal can be re-attempted or the secret added again. After later patches, some applications will also need a way to determine whether a key was added by the current user vs. by some other user. Reserved fields are included in fscrypt_get_key_status_arg for this and other future extensions. Reviewed-by: Theodore Ts'o Signed-off-by: Eric Biggers --- fs/crypto/keyring.c | 63 ++++++++++++++++++++++++++++++++++++++++++++ include/linux/fscrypt.h | 7 +++++ include/uapi/linux/fscrypt.h | 15 +++++++++++ 3 files changed, 85 insertions(+) (limited to 'include/uapi') diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index c3423f0edc70..8c94c2fa0c13 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -11,6 +11,7 @@ * * - FS_IOC_ADD_ENCRYPTION_KEY * - FS_IOC_REMOVE_ENCRYPTION_KEY + * - FS_IOC_GET_ENCRYPTION_KEY_STATUS * * See the "User API" section of Documentation/filesystems/fscrypt.rst for more * information about these ioctls. @@ -538,6 +539,68 @@ int fscrypt_ioctl_remove_key(struct file *filp, void __user *_uarg) } EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key); +/* + * Retrieve the status of an fscrypt master encryption key. + * + * We set ->status to indicate whether the key is absent, present, or + * incompletely removed. "Incompletely removed" means that the master key + * secret has been removed, but some files which had been unlocked with it are + * still in use. This field allows applications to easily determine the state + * of an encrypted directory without using a hack such as trying to open a + * regular file in it (which can confuse the "incompletely removed" state with + * absent or present). + * + * For more details, see the "FS_IOC_GET_ENCRYPTION_KEY_STATUS" section of + * Documentation/filesystems/fscrypt.rst. + */ +int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg) +{ + struct super_block *sb = file_inode(filp)->i_sb; + struct fscrypt_get_key_status_arg arg; + struct key *key; + struct fscrypt_master_key *mk; + int err; + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + if (!valid_key_spec(&arg.key_spec)) + return -EINVAL; + + if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) + return -EINVAL; + + memset(arg.__out_reserved, 0, sizeof(arg.__out_reserved)); + + key = fscrypt_find_master_key(sb, &arg.key_spec); + if (IS_ERR(key)) { + if (key != ERR_PTR(-ENOKEY)) + return PTR_ERR(key); + arg.status = FSCRYPT_KEY_STATUS_ABSENT; + err = 0; + goto out; + } + mk = key->payload.data[0]; + down_read(&key->sem); + + if (!is_master_key_secret_present(&mk->mk_secret)) { + arg.status = FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED; + err = 0; + goto out_release_key; + } + + arg.status = FSCRYPT_KEY_STATUS_PRESENT; + err = 0; +out_release_key: + up_read(&key->sem); + key_put(key); +out: + if (!err && copy_to_user(uarg, &arg, sizeof(arg))) + err = -EFAULT; + return err; +} +EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_key_status); + int __init fscrypt_init_keyring(void) { return register_key_type(&key_type_fscrypt); diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index b494c5f9c01f..6628d09585bd 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -142,6 +142,7 @@ extern int fscrypt_inherit_context(struct inode *, struct inode *, extern void fscrypt_sb_free(struct super_block *sb); extern int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); extern int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); +extern int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg); /* keysetup.c */ extern int fscrypt_get_encryption_info(struct inode *); @@ -388,6 +389,12 @@ static inline int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg) return -EOPNOTSUPP; } +static inline int fscrypt_ioctl_get_key_status(struct file *filp, + void __user *arg) +{ + return -EOPNOTSUPP; +} + /* keysetup.c */ static inline int fscrypt_get_encryption_info(struct inode *inode) { diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index 07f37a27a944..ed5995b15016 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -84,11 +84,26 @@ struct fscrypt_remove_key_arg { __u32 __reserved[5]; }; +/* Struct passed to FS_IOC_GET_ENCRYPTION_KEY_STATUS */ +struct fscrypt_get_key_status_arg { + /* input */ + struct fscrypt_key_specifier key_spec; + __u32 __reserved[6]; + + /* output */ +#define FSCRYPT_KEY_STATUS_ABSENT 1 +#define FSCRYPT_KEY_STATUS_PRESENT 2 +#define FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED 3 + __u32 status; + __u32 __out_reserved[15]; +}; + #define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) #define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) #define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) #define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg) #define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg) +#define FS_IOC_GET_ENCRYPTION_KEY_STATUS _IOWR('f', 26, struct fscrypt_get_key_status_arg) /**********************************************************************/ -- cgit v1.2.3 From 5dae460c2292dbbdac3a7a982cd566f470d957a2 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 4 Aug 2019 19:35:47 -0700 Subject: fscrypt: v2 encryption policy support Add a new fscrypt policy version, "v2". It has the following changes from the original policy version, which we call "v1" (*): - Master keys (the user-provided encryption keys) are only ever used as input to HKDF-SHA512. This is more flexible and less error-prone, and it avoids the quirks and limitations of the AES-128-ECB based KDF. Three classes of cryptographically isolated subkeys are defined: - Per-file keys, like used in v1 policies except for the new KDF. - Per-mode keys. These implement the semantics of the DIRECT_KEY flag, which for v1 policies made the master key be used directly. These are also planned to be used for inline encryption when support for it is added. - Key identifiers (see below). - Each master key is identified by a 16-byte master_key_identifier, which is derived from the key itself using HKDF-SHA512. This prevents users from associating the wrong key with an encrypted file or directory. This was easily possible with v1 policies, which identified the key by an arbitrary 8-byte master_key_descriptor. - The key must be provided in the filesystem-level keyring, not in a process-subscribed keyring. The following UAPI additions are made: - The existing ioctl FS_IOC_SET_ENCRYPTION_POLICY can now be passed a fscrypt_policy_v2 to set a v2 encryption policy. It's disambiguated from fscrypt_policy/fscrypt_policy_v1 by the version code prefix. - A new ioctl FS_IOC_GET_ENCRYPTION_POLICY_EX is added. It allows getting the v1 or v2 encryption policy of an encrypted file or directory. The existing FS_IOC_GET_ENCRYPTION_POLICY ioctl could not be used because it did not have a way for userspace to indicate which policy structure is expected. The new ioctl includes a size field, so it is extensible to future fscrypt policy versions. - The ioctls FS_IOC_ADD_ENCRYPTION_KEY, FS_IOC_REMOVE_ENCRYPTION_KEY, and FS_IOC_GET_ENCRYPTION_KEY_STATUS now support managing keys for v2 encryption policies. Such keys are kept logically separate from keys for v1 encryption policies, and are identified by 'identifier' rather than by 'descriptor'. The 'identifier' need not be provided when adding a key, since the kernel will calculate it anyway. This patch temporarily keeps adding/removing v2 policy keys behind the same permission check done for adding/removing v1 policy keys: capable(CAP_SYS_ADMIN). However, the next patch will carefully take advantage of the cryptographically secure master_key_identifier to allow non-root users to add/remove v2 policy keys, thus providing a full replacement for v1 policies. (*) Actually, in the API fscrypt_policy::version is 0 while on-disk fscrypt_context::format is 1. But I believe it makes the most sense to advance both to '2' to have them be in sync, and to consider the numbering to start at 1 except for the API quirk. Reviewed-by: Paul Crowley Reviewed-by: Theodore Ts'o Signed-off-by: Eric Biggers --- fs/crypto/crypto.c | 2 +- fs/crypto/fname.c | 3 +- fs/crypto/fscrypt_private.h | 187 ++++++++++++++++--- fs/crypto/keyring.c | 35 +++- fs/crypto/keysetup.c | 202 ++++++++++++++++----- fs/crypto/keysetup_v1.c | 18 +- fs/crypto/policy.c | 422 ++++++++++++++++++++++++++++++++----------- include/linux/fscrypt.h | 9 +- include/uapi/linux/fscrypt.h | 57 +++++- 9 files changed, 741 insertions(+), 194 deletions(-) (limited to 'include/uapi') diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 65ca077e8d58..32a7ad0098cc 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -141,7 +141,7 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, memset(iv, 0, ci->ci_mode->ivsize); iv->lblk_num = cpu_to_le64(lblk_num); - if (ci->ci_flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) + if (fscrypt_is_direct_key_policy(&ci->ci_policy)) memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE); if (ci->ci_essiv_tfm != NULL) diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index f4977d44d69b..3da3707c10e3 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -181,7 +181,8 @@ static int base64_decode(const char *src, int len, u8 *dst) bool fscrypt_fname_encrypted_size(const struct inode *inode, u32 orig_len, u32 max_len, u32 *encrypted_len_ret) { - int padding = 4 << (inode->i_crypt_info->ci_flags & + const struct fscrypt_info *ci = inode->i_crypt_info; + int padding = 4 << (fscrypt_policy_flags(&ci->ci_policy) & FSCRYPT_POLICY_FLAGS_PAD_MASK); u32 encrypted_len; diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index 9556e9499dc5..c89e37d38e42 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -20,27 +20,127 @@ #define FSCRYPT_MIN_KEY_SIZE 16 -/** - * Encryption context for inode - * - * Protector format: - * 1 byte: Protector format (1 = this version) - * 1 byte: File contents encryption mode - * 1 byte: File names encryption mode - * 1 byte: Flags - * 8 bytes: Master Key descriptor - * 16 bytes: Encryption Key derivation nonce - */ -struct fscrypt_context { - u8 format; +#define FSCRYPT_CONTEXT_V1 1 +#define FSCRYPT_CONTEXT_V2 2 + +struct fscrypt_context_v1 { + u8 version; /* FSCRYPT_CONTEXT_V1 */ u8 contents_encryption_mode; u8 filenames_encryption_mode; u8 flags; u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; -} __packed; +}; -#define FS_ENCRYPTION_CONTEXT_FORMAT_V1 1 +struct fscrypt_context_v2 { + u8 version; /* FSCRYPT_CONTEXT_V2 */ + u8 contents_encryption_mode; + u8 filenames_encryption_mode; + u8 flags; + u8 __reserved[4]; + u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]; + u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; +}; + +/** + * fscrypt_context - the encryption context of an inode + * + * This is the on-disk equivalent of an fscrypt_policy, stored alongside each + * encrypted file usually in a hidden extended attribute. It contains the + * fields from the fscrypt_policy, in order to identify the encryption algorithm + * and key with which the file is encrypted. It also contains a nonce that was + * randomly generated by fscrypt itself; this is used as KDF input or as a tweak + * to cause different files to be encrypted differently. + */ +union fscrypt_context { + u8 version; + struct fscrypt_context_v1 v1; + struct fscrypt_context_v2 v2; +}; + +/* + * Return the size expected for the given fscrypt_context based on its version + * number, or 0 if the context version is unrecognized. + */ +static inline int fscrypt_context_size(const union fscrypt_context *ctx) +{ + switch (ctx->version) { + case FSCRYPT_CONTEXT_V1: + BUILD_BUG_ON(sizeof(ctx->v1) != 28); + return sizeof(ctx->v1); + case FSCRYPT_CONTEXT_V2: + BUILD_BUG_ON(sizeof(ctx->v2) != 40); + return sizeof(ctx->v2); + } + return 0; +} + +#undef fscrypt_policy +union fscrypt_policy { + u8 version; + struct fscrypt_policy_v1 v1; + struct fscrypt_policy_v2 v2; +}; + +/* + * Return the size expected for the given fscrypt_policy based on its version + * number, or 0 if the policy version is unrecognized. + */ +static inline int fscrypt_policy_size(const union fscrypt_policy *policy) +{ + switch (policy->version) { + case FSCRYPT_POLICY_V1: + return sizeof(policy->v1); + case FSCRYPT_POLICY_V2: + return sizeof(policy->v2); + } + return 0; +} + +/* Return the contents encryption mode of a valid encryption policy */ +static inline u8 +fscrypt_policy_contents_mode(const union fscrypt_policy *policy) +{ + switch (policy->version) { + case FSCRYPT_POLICY_V1: + return policy->v1.contents_encryption_mode; + case FSCRYPT_POLICY_V2: + return policy->v2.contents_encryption_mode; + } + BUG(); +} + +/* Return the filenames encryption mode of a valid encryption policy */ +static inline u8 +fscrypt_policy_fnames_mode(const union fscrypt_policy *policy) +{ + switch (policy->version) { + case FSCRYPT_POLICY_V1: + return policy->v1.filenames_encryption_mode; + case FSCRYPT_POLICY_V2: + return policy->v2.filenames_encryption_mode; + } + BUG(); +} + +/* Return the flags (FSCRYPT_POLICY_FLAG*) of a valid encryption policy */ +static inline u8 +fscrypt_policy_flags(const union fscrypt_policy *policy) +{ + switch (policy->version) { + case FSCRYPT_POLICY_V1: + return policy->v1.flags; + case FSCRYPT_POLICY_V2: + return policy->v2.flags; + } + BUG(); +} + +static inline bool +fscrypt_is_direct_key_policy(const union fscrypt_policy *policy) +{ + return fscrypt_policy_flags(policy) & FSCRYPT_POLICY_FLAG_DIRECT_KEY; +} /** * For encrypted symlinks, the ciphertext length is stored at the beginning @@ -70,8 +170,8 @@ struct fscrypt_info { struct crypto_cipher *ci_essiv_tfm; /* - * Encryption mode used for this inode. It corresponds to either - * ci_data_mode or ci_filename_mode, depending on the inode type. + * Encryption mode used for this inode. It corresponds to either the + * contents or filenames encryption mode, depending on the inode type. */ struct fscrypt_mode *ci_mode; @@ -97,11 +197,10 @@ struct fscrypt_info { */ struct fscrypt_direct_key *ci_direct_key; - /* fields from the fscrypt_context */ - u8 ci_data_mode; - u8 ci_filename_mode; - u8 ci_flags; - u8 ci_master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; + /* The encryption policy used by this inode */ + union fscrypt_policy ci_policy; + + /* This inode's nonce, copied from the fscrypt_context */ u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE]; }; @@ -181,6 +280,17 @@ struct fscrypt_hkdf { extern int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, unsigned int master_key_size); +/* + * The list of contexts in which fscrypt uses HKDF. These values are used as + * the first byte of the HKDF application-specific info string to guarantee that + * info strings are never repeated between contexts. This ensures that all HKDF + * outputs are unique and cryptographically isolated, i.e. knowledge of one + * output doesn't reveal another. + */ +#define HKDF_CONTEXT_KEY_IDENTIFIER 1 +#define HKDF_CONTEXT_PER_FILE_KEY 2 +#define HKDF_CONTEXT_PER_MODE_KEY 3 + extern int fscrypt_hkdf_expand(struct fscrypt_hkdf *hkdf, u8 context, const u8 *info, unsigned int infolen, u8 *okm, unsigned int okmlen); @@ -194,10 +304,16 @@ extern void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); */ struct fscrypt_master_key_secret { - /* Size of the raw key in bytes */ + /* + * For v2 policy keys: HKDF context keyed by this master key. + * For v1 policy keys: not set (hkdf.hmac_tfm == NULL). + */ + struct fscrypt_hkdf hkdf; + + /* Size of the raw key in bytes. Set even if ->raw isn't set. */ u32 size; - /* The raw key */ + /* For v1 policy keys: the raw key. Wiped for v2 policy keys. */ u8 raw[FSCRYPT_MAX_KEY_SIZE]; } __randomize_layout; @@ -223,7 +339,12 @@ struct fscrypt_master_key { */ struct fscrypt_master_key_secret mk_secret; - /* Arbitrary key descriptor which was assigned by userspace */ + /* + * For v1 policy keys: an arbitrary key descriptor which was assigned by + * userspace (->descriptor). + * + * For v2 policy keys: a cryptographic hash of this key (->identifier). + */ struct fscrypt_key_specifier mk_spec; /* @@ -242,6 +363,9 @@ struct fscrypt_master_key { struct list_head mk_decrypted_inodes; spinlock_t mk_decrypted_inodes_lock; + /* Per-mode tfms for DIRECT_KEY policies, allocated on-demand */ + struct crypto_skcipher *mk_mode_keys[__FSCRYPT_MODE_MAX + 1]; + } __randomize_layout; static inline bool @@ -263,6 +387,8 @@ static inline const char *master_key_spec_type( switch (spec->type) { case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: return "descriptor"; + case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: + return "identifier"; } return "[unknown]"; } @@ -272,6 +398,8 @@ static inline int master_key_spec_len(const struct fscrypt_key_specifier *spec) switch (spec->type) { case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: return FSCRYPT_KEY_DESCRIPTOR_SIZE; + case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: + return FSCRYPT_KEY_IDENTIFIER_SIZE; } return 0; } @@ -315,5 +443,14 @@ extern int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, extern int fscrypt_setup_v1_file_key_via_subscribed_keyrings( struct fscrypt_info *ci); +/* policy.c */ + +extern bool fscrypt_policies_equal(const union fscrypt_policy *policy1, + const union fscrypt_policy *policy2); +extern bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, + const struct inode *inode); +extern int fscrypt_policy_from_context(union fscrypt_policy *policy_u, + const union fscrypt_context *ctx_u, + int ctx_size); #endif /* _FSCRYPT_PRIVATE_H */ diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index 8c94c2fa0c13..fca3bdf01e7c 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -17,6 +17,7 @@ * information about these ioctls. */ +#include #include #include @@ -24,6 +25,7 @@ static void wipe_master_key_secret(struct fscrypt_master_key_secret *secret) { + fscrypt_destroy_hkdf(&secret->hkdf); memzero_explicit(secret, sizeof(*secret)); } @@ -36,7 +38,13 @@ static void move_master_key_secret(struct fscrypt_master_key_secret *dst, static void free_master_key(struct fscrypt_master_key *mk) { + size_t i; + wipe_master_key_secret(&mk->mk_secret); + + for (i = 0; i < ARRAY_SIZE(mk->mk_mode_keys); i++) + crypto_free_skcipher(mk->mk_mode_keys[i]); + kzfree(mk); } @@ -109,7 +117,7 @@ static struct key *search_fscrypt_keyring(struct key *keyring, #define FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE \ (CONST_STRLEN("fscrypt-") + FIELD_SIZEOF(struct super_block, s_id)) -#define FSCRYPT_MK_DESCRIPTION_SIZE (2 * FSCRYPT_KEY_DESCRIPTOR_SIZE + 1) +#define FSCRYPT_MK_DESCRIPTION_SIZE (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + 1) static void format_fs_keyring_description( char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE], @@ -314,6 +322,31 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) if (!capable(CAP_SYS_ADMIN)) goto out_wipe_secret; + if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { + err = fscrypt_init_hkdf(&secret.hkdf, secret.raw, secret.size); + if (err) + goto out_wipe_secret; + + /* + * Now that the HKDF context is initialized, the raw key is no + * longer needed. + */ + memzero_explicit(secret.raw, secret.size); + + /* Calculate the key identifier and return it to userspace. */ + err = fscrypt_hkdf_expand(&secret.hkdf, + HKDF_CONTEXT_KEY_IDENTIFIER, + NULL, 0, arg.key_spec.u.identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE); + if (err) + goto out_wipe_secret; + err = -EFAULT; + if (copy_to_user(uarg->key_spec.u.identifier, + arg.key_spec.u.identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE)) + goto out_wipe_secret; + } + err = add_master_key(sb, &secret, &arg.key_spec); out_wipe_secret: wipe_master_key_secret(&secret); diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 7b60a47fc73c..f423d48264db 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -52,20 +52,14 @@ static struct fscrypt_mode available_modes[] = { }; static struct fscrypt_mode * -select_encryption_mode(const struct fscrypt_info *ci, const struct inode *inode) +select_encryption_mode(const union fscrypt_policy *policy, + const struct inode *inode) { - if (!fscrypt_valid_enc_modes(ci->ci_data_mode, ci->ci_filename_mode)) { - fscrypt_warn(inode, - "Unsupported encryption modes (contents mode %d, filenames mode %d)", - ci->ci_data_mode, ci->ci_filename_mode); - return ERR_PTR(-EINVAL); - } - if (S_ISREG(inode->i_mode)) - return &available_modes[ci->ci_data_mode]; + return &available_modes[fscrypt_policy_contents_mode(policy)]; if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) - return &available_modes[ci->ci_filename_mode]; + return &available_modes[fscrypt_policy_fnames_mode(policy)]; WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n", inode->i_ino, (inode->i_mode & S_IFMT)); @@ -211,6 +205,82 @@ int fscrypt_set_derived_key(struct fscrypt_info *ci, const u8 *derived_key) return 0; } +static int setup_per_mode_key(struct fscrypt_info *ci, + struct fscrypt_master_key *mk) +{ + struct fscrypt_mode *mode = ci->ci_mode; + u8 mode_num = mode - available_modes; + struct crypto_skcipher *tfm, *prev_tfm; + u8 mode_key[FSCRYPT_MAX_KEY_SIZE]; + int err; + + if (WARN_ON(mode_num >= ARRAY_SIZE(mk->mk_mode_keys))) + return -EINVAL; + + /* pairs with cmpxchg() below */ + tfm = READ_ONCE(mk->mk_mode_keys[mode_num]); + if (likely(tfm != NULL)) + goto done; + + BUILD_BUG_ON(sizeof(mode_num) != 1); + err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, + HKDF_CONTEXT_PER_MODE_KEY, + &mode_num, sizeof(mode_num), + mode_key, mode->keysize); + if (err) + return err; + tfm = fscrypt_allocate_skcipher(mode, mode_key, ci->ci_inode); + memzero_explicit(mode_key, mode->keysize); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + /* pairs with READ_ONCE() above */ + prev_tfm = cmpxchg(&mk->mk_mode_keys[mode_num], NULL, tfm); + if (prev_tfm != NULL) { + crypto_free_skcipher(tfm); + tfm = prev_tfm; + } +done: + ci->ci_ctfm = tfm; + return 0; +} + +static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, + struct fscrypt_master_key *mk) +{ + u8 derived_key[FSCRYPT_MAX_KEY_SIZE]; + int err; + + if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { + /* + * DIRECT_KEY: instead of deriving per-file keys, the per-file + * nonce will be included in all the IVs. But unlike v1 + * policies, for v2 policies in this case we don't encrypt with + * the master key directly but rather derive a per-mode key. + * This ensures that the master key is consistently used only + * for HKDF, avoiding key reuse issues. + */ + if (!fscrypt_mode_supports_direct_key(ci->ci_mode)) { + fscrypt_warn(ci->ci_inode, + "Direct key flag not allowed with %s", + ci->ci_mode->friendly_name); + return -EINVAL; + } + return setup_per_mode_key(ci, mk); + } + + err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, + HKDF_CONTEXT_PER_FILE_KEY, + ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE, + derived_key, ci->ci_mode->keysize); + if (err) + return err; + + err = fscrypt_set_derived_key(ci, derived_key); + memzero_explicit(derived_key, ci->ci_mode->keysize); + return err; +} + /* * Find the master key, then set up the inode's actual encryption key. * @@ -229,15 +299,36 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, struct fscrypt_key_specifier mk_spec; int err; - mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; - memcpy(mk_spec.u.descriptor, ci->ci_master_key_descriptor, - FSCRYPT_KEY_DESCRIPTOR_SIZE); + switch (ci->ci_policy.version) { + case FSCRYPT_POLICY_V1: + mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; + memcpy(mk_spec.u.descriptor, + ci->ci_policy.v1.master_key_descriptor, + FSCRYPT_KEY_DESCRIPTOR_SIZE); + break; + case FSCRYPT_POLICY_V2: + mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER; + memcpy(mk_spec.u.identifier, + ci->ci_policy.v2.master_key_identifier, + FSCRYPT_KEY_IDENTIFIER_SIZE); + break; + default: + WARN_ON(1); + return -EINVAL; + } key = fscrypt_find_master_key(ci->ci_inode->i_sb, &mk_spec); if (IS_ERR(key)) { - if (key != ERR_PTR(-ENOKEY)) + if (key != ERR_PTR(-ENOKEY) || + ci->ci_policy.version != FSCRYPT_POLICY_V1) return PTR_ERR(key); + /* + * As a legacy fallback for v1 policies, search for the key in + * the current task's subscribed keyrings too. Don't move this + * to before the search of ->s_master_keys, since users + * shouldn't be able to override filesystem-level keys. + */ return fscrypt_setup_v1_file_key_via_subscribed_keyrings(ci); } @@ -250,6 +341,12 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, goto out_release_key; } + /* + * Require that the master key be at least as long as the derived key. + * Otherwise, the derived key cannot possibly contain as much entropy as + * that required by the encryption mode it will be used for. For v1 + * policies it's also required for the KDF to work at all. + */ if (mk->mk_secret.size < ci->ci_mode->keysize) { fscrypt_warn(NULL, "key with %s %*phN is too short (got %u bytes, need %u+ bytes)", @@ -260,7 +357,18 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, goto out_release_key; } - err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw); + switch (ci->ci_policy.version) { + case FSCRYPT_POLICY_V1: + err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw); + break; + case FSCRYPT_POLICY_V2: + err = fscrypt_setup_v2_file_key(ci, mk); + break; + default: + WARN_ON(1); + err = -EINVAL; + break; + } if (err) goto out_release_key; @@ -282,7 +390,8 @@ static void put_crypt_info(struct fscrypt_info *ci) if (ci->ci_direct_key) { fscrypt_put_direct_key(ci->ci_direct_key); - } else { + } else if ((ci->ci_ctfm != NULL || ci->ci_essiv_tfm != NULL) && + !fscrypt_is_direct_key_policy(&ci->ci_policy)) { crypto_free_skcipher(ci->ci_ctfm); crypto_free_cipher(ci->ci_essiv_tfm); } @@ -312,7 +421,7 @@ static void put_crypt_info(struct fscrypt_info *ci) int fscrypt_get_encryption_info(struct inode *inode) { struct fscrypt_info *crypt_info; - struct fscrypt_context ctx; + union fscrypt_context ctx; struct fscrypt_mode *mode; struct key *master_key = NULL; int res; @@ -335,27 +444,12 @@ int fscrypt_get_encryption_info(struct inode *inode) } /* Fake up a context for an unencrypted directory */ memset(&ctx, 0, sizeof(ctx)); - ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; - ctx.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; - ctx.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; - memset(ctx.master_key_descriptor, 0x42, + ctx.version = FSCRYPT_CONTEXT_V1; + ctx.v1.contents_encryption_mode = FSCRYPT_MODE_AES_256_XTS; + ctx.v1.filenames_encryption_mode = FSCRYPT_MODE_AES_256_CTS; + memset(ctx.v1.master_key_descriptor, 0x42, FSCRYPT_KEY_DESCRIPTOR_SIZE); - } else if (res != sizeof(ctx)) { - fscrypt_warn(inode, - "Unknown encryption context size (%d bytes)", res); - return -EINVAL; - } - - if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1) { - fscrypt_warn(inode, "Unknown encryption context version (%d)", - ctx.format); - return -EINVAL; - } - - if (ctx.flags & ~FSCRYPT_POLICY_FLAGS_VALID) { - fscrypt_warn(inode, "Unknown encryption context flags (0x%02x)", - ctx.flags); - return -EINVAL; + res = sizeof(ctx.v1); } crypt_info = kmem_cache_zalloc(fscrypt_info_cachep, GFP_NOFS); @@ -364,14 +458,34 @@ int fscrypt_get_encryption_info(struct inode *inode) crypt_info->ci_inode = inode; - crypt_info->ci_flags = ctx.flags; - crypt_info->ci_data_mode = ctx.contents_encryption_mode; - crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; - memcpy(crypt_info->ci_master_key_descriptor, ctx.master_key_descriptor, - FSCRYPT_KEY_DESCRIPTOR_SIZE); - memcpy(crypt_info->ci_nonce, ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE); + res = fscrypt_policy_from_context(&crypt_info->ci_policy, &ctx, res); + if (res) { + fscrypt_warn(inode, + "Unrecognized or corrupt encryption context"); + goto out; + } + + switch (ctx.version) { + case FSCRYPT_CONTEXT_V1: + memcpy(crypt_info->ci_nonce, ctx.v1.nonce, + FS_KEY_DERIVATION_NONCE_SIZE); + break; + case FSCRYPT_CONTEXT_V2: + memcpy(crypt_info->ci_nonce, ctx.v2.nonce, + FS_KEY_DERIVATION_NONCE_SIZE); + break; + default: + WARN_ON(1); + res = -EINVAL; + goto out; + } + + if (!fscrypt_supported_policy(&crypt_info->ci_policy, inode)) { + res = -EINVAL; + goto out; + } - mode = select_encryption_mode(crypt_info, inode); + mode = select_encryption_mode(&crypt_info->ci_policy, inode); if (IS_ERR(mode)) { res = PTR_ERR(mode); goto out; diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index 631690bb6ed5..ad1a36c370c3 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -189,12 +189,13 @@ find_or_insert_direct_key(struct fscrypt_direct_key *to_insert, */ BUILD_BUG_ON(sizeof(hash_key) > FSCRYPT_KEY_DESCRIPTOR_SIZE); - memcpy(&hash_key, ci->ci_master_key_descriptor, sizeof(hash_key)); + memcpy(&hash_key, ci->ci_policy.v1.master_key_descriptor, + sizeof(hash_key)); spin_lock(&fscrypt_direct_keys_lock); hash_for_each_possible(fscrypt_direct_keys, dk, dk_node, hash_key) { - if (memcmp(ci->ci_master_key_descriptor, dk->dk_descriptor, - FSCRYPT_KEY_DESCRIPTOR_SIZE) != 0) + if (memcmp(ci->ci_policy.v1.master_key_descriptor, + dk->dk_descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE) != 0) continue; if (ci->ci_mode != dk->dk_mode) continue; @@ -237,7 +238,7 @@ fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key) dk->dk_ctfm = NULL; goto err_free_dk; } - memcpy(dk->dk_descriptor, ci->ci_master_key_descriptor, + memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE); memcpy(dk->dk_raw, raw_key, ci->ci_mode->keysize); @@ -262,7 +263,8 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci, return -EINVAL; } - if (ci->ci_data_mode != ci->ci_filename_mode) { + if (ci->ci_policy.v1.contents_encryption_mode != + ci->ci_policy.v1.filenames_encryption_mode) { fscrypt_warn(ci->ci_inode, "Direct key mode not allowed with different contents and filenames modes"); return -EINVAL; @@ -308,7 +310,7 @@ out: int fscrypt_setup_v1_file_key(struct fscrypt_info *ci, const u8 *raw_master_key) { - if (ci->ci_flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) + if (ci->ci_policy.v1.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) return setup_v1_file_key_direct(ci, raw_master_key); else return setup_v1_file_key_derived(ci, raw_master_key); @@ -321,11 +323,11 @@ int fscrypt_setup_v1_file_key_via_subscribed_keyrings(struct fscrypt_info *ci) int err; key = find_and_lock_process_key(FSCRYPT_KEY_DESC_PREFIX, - ci->ci_master_key_descriptor, + ci->ci_policy.v1.master_key_descriptor, ci->ci_mode->keysize, &payload); if (key == ERR_PTR(-ENOKEY) && ci->ci_inode->i_sb->s_cop->key_prefix) { key = find_and_lock_process_key(ci->ci_inode->i_sb->s_cop->key_prefix, - ci->ci_master_key_descriptor, + ci->ci_policy.v1.master_key_descriptor, ci->ci_mode->keysize, &payload); } if (IS_ERR(key)) diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index da7ae9c8b4ad..0141d338c1fd 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -5,8 +5,9 @@ * Copyright (C) 2015, Google, Inc. * Copyright (C) 2015, Motorola Mobility. * - * Written by Michael Halcrow, 2015. + * Originally written by Michael Halcrow, 2015. * Modified by Jaegeuk Kim, 2015. + * Modified by Eric Biggers, 2019 for v2 policy support. */ #include @@ -14,70 +15,291 @@ #include #include "fscrypt_private.h" -/* - * check whether an encryption policy is consistent with an encryption context +/** + * fscrypt_policies_equal - check whether two encryption policies are the same + * + * Return: %true if equal, else %false */ -static bool is_encryption_context_consistent_with_policy( - const struct fscrypt_context *ctx, - const struct fscrypt_policy *policy) +bool fscrypt_policies_equal(const union fscrypt_policy *policy1, + const union fscrypt_policy *policy2) { - return memcmp(ctx->master_key_descriptor, policy->master_key_descriptor, - FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 && - (ctx->flags == policy->flags) && - (ctx->contents_encryption_mode == - policy->contents_encryption_mode) && - (ctx->filenames_encryption_mode == - policy->filenames_encryption_mode); + if (policy1->version != policy2->version) + return false; + + return !memcmp(policy1, policy2, fscrypt_policy_size(policy1)); } -static int create_encryption_context_from_policy(struct inode *inode, - const struct fscrypt_policy *policy) +/** + * fscrypt_supported_policy - check whether an encryption policy is supported + * + * Given an encryption policy, check whether all its encryption modes and other + * settings are supported by this kernel. (But we don't currently don't check + * for crypto API support here, so attempting to use an algorithm not configured + * into the crypto API will still fail later.) + * + * Return: %true if supported, else %false + */ +bool fscrypt_supported_policy(const union fscrypt_policy *policy_u, + const struct inode *inode) { - struct fscrypt_context ctx; + switch (policy_u->version) { + case FSCRYPT_POLICY_V1: { + const struct fscrypt_policy_v1 *policy = &policy_u->v1; + + if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, + policy->filenames_encryption_mode)) { + fscrypt_warn(inode, + "Unsupported encryption modes (contents %d, filenames %d)", + policy->contents_encryption_mode, + policy->filenames_encryption_mode); + return false; + } + + if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) { + fscrypt_warn(inode, + "Unsupported encryption flags (0x%02x)", + policy->flags); + return false; + } + + return true; + } + case FSCRYPT_POLICY_V2: { + const struct fscrypt_policy_v2 *policy = &policy_u->v2; + + if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, + policy->filenames_encryption_mode)) { + fscrypt_warn(inode, + "Unsupported encryption modes (contents %d, filenames %d)", + policy->contents_encryption_mode, + policy->filenames_encryption_mode); + return false; + } + + if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) { + fscrypt_warn(inode, + "Unsupported encryption flags (0x%02x)", + policy->flags); + return false; + } + + if (memchr_inv(policy->__reserved, 0, + sizeof(policy->__reserved))) { + fscrypt_warn(inode, + "Reserved bits set in encryption policy"); + return false; + } + + return true; + } + } + return false; +} + +/** + * fscrypt_new_context_from_policy - create a new fscrypt_context from a policy + * + * Create an fscrypt_context for an inode that is being assigned the given + * encryption policy. A new nonce is randomly generated. + * + * Return: the size of the new context in bytes. + */ +static int fscrypt_new_context_from_policy(union fscrypt_context *ctx_u, + const union fscrypt_policy *policy_u) +{ + memset(ctx_u, 0, sizeof(*ctx_u)); + + switch (policy_u->version) { + case FSCRYPT_POLICY_V1: { + const struct fscrypt_policy_v1 *policy = &policy_u->v1; + struct fscrypt_context_v1 *ctx = &ctx_u->v1; + + ctx->version = FSCRYPT_CONTEXT_V1; + ctx->contents_encryption_mode = + policy->contents_encryption_mode; + ctx->filenames_encryption_mode = + policy->filenames_encryption_mode; + ctx->flags = policy->flags; + memcpy(ctx->master_key_descriptor, + policy->master_key_descriptor, + sizeof(ctx->master_key_descriptor)); + get_random_bytes(ctx->nonce, sizeof(ctx->nonce)); + return sizeof(*ctx); + } + case FSCRYPT_POLICY_V2: { + const struct fscrypt_policy_v2 *policy = &policy_u->v2; + struct fscrypt_context_v2 *ctx = &ctx_u->v2; + + ctx->version = FSCRYPT_CONTEXT_V2; + ctx->contents_encryption_mode = + policy->contents_encryption_mode; + ctx->filenames_encryption_mode = + policy->filenames_encryption_mode; + ctx->flags = policy->flags; + memcpy(ctx->master_key_identifier, + policy->master_key_identifier, + sizeof(ctx->master_key_identifier)); + get_random_bytes(ctx->nonce, sizeof(ctx->nonce)); + return sizeof(*ctx); + } + } + BUG(); +} - ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; - memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, - FSCRYPT_KEY_DESCRIPTOR_SIZE); +/** + * fscrypt_policy_from_context - convert an fscrypt_context to an fscrypt_policy + * + * Given an fscrypt_context, build the corresponding fscrypt_policy. + * + * Return: 0 on success, or -EINVAL if the fscrypt_context has an unrecognized + * version number or size. + * + * This does *not* validate the settings within the policy itself, e.g. the + * modes, flags, and reserved bits. Use fscrypt_supported_policy() for that. + */ +int fscrypt_policy_from_context(union fscrypt_policy *policy_u, + const union fscrypt_context *ctx_u, + int ctx_size) +{ + memset(policy_u, 0, sizeof(*policy_u)); - if (!fscrypt_valid_enc_modes(policy->contents_encryption_mode, - policy->filenames_encryption_mode)) + if (ctx_size <= 0 || ctx_size != fscrypt_context_size(ctx_u)) return -EINVAL; - if (policy->flags & ~FSCRYPT_POLICY_FLAGS_VALID) + switch (ctx_u->version) { + case FSCRYPT_CONTEXT_V1: { + const struct fscrypt_context_v1 *ctx = &ctx_u->v1; + struct fscrypt_policy_v1 *policy = &policy_u->v1; + + policy->version = FSCRYPT_POLICY_V1; + policy->contents_encryption_mode = + ctx->contents_encryption_mode; + policy->filenames_encryption_mode = + ctx->filenames_encryption_mode; + policy->flags = ctx->flags; + memcpy(policy->master_key_descriptor, + ctx->master_key_descriptor, + sizeof(policy->master_key_descriptor)); + return 0; + } + case FSCRYPT_CONTEXT_V2: { + const struct fscrypt_context_v2 *ctx = &ctx_u->v2; + struct fscrypt_policy_v2 *policy = &policy_u->v2; + + policy->version = FSCRYPT_POLICY_V2; + policy->contents_encryption_mode = + ctx->contents_encryption_mode; + policy->filenames_encryption_mode = + ctx->filenames_encryption_mode; + policy->flags = ctx->flags; + memcpy(policy->__reserved, ctx->__reserved, + sizeof(policy->__reserved)); + memcpy(policy->master_key_identifier, + ctx->master_key_identifier, + sizeof(policy->master_key_identifier)); + return 0; + } + } + /* unreachable */ + return -EINVAL; +} + +/* Retrieve an inode's encryption policy */ +static int fscrypt_get_policy(struct inode *inode, union fscrypt_policy *policy) +{ + const struct fscrypt_info *ci; + union fscrypt_context ctx; + int ret; + + ci = READ_ONCE(inode->i_crypt_info); + if (ci) { + /* key available, use the cached policy */ + *policy = ci->ci_policy; + return 0; + } + + if (!IS_ENCRYPTED(inode)) + return -ENODATA; + + ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); + if (ret < 0) + return (ret == -ERANGE) ? -EINVAL : ret; + + return fscrypt_policy_from_context(policy, &ctx, ret); +} + +static int set_encryption_policy(struct inode *inode, + const union fscrypt_policy *policy) +{ + union fscrypt_context ctx; + int ctxsize; + + if (!fscrypt_supported_policy(policy, inode)) return -EINVAL; - ctx.contents_encryption_mode = policy->contents_encryption_mode; - ctx.filenames_encryption_mode = policy->filenames_encryption_mode; - ctx.flags = policy->flags; - BUILD_BUG_ON(sizeof(ctx.nonce) != FS_KEY_DERIVATION_NONCE_SIZE); - get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE); + if (policy->version == FSCRYPT_POLICY_V1) { + /* + * The original encryption policy version provided no way of + * verifying that the correct master key was supplied, which was + * insecure in scenarios where multiple users have access to the + * same encrypted files (even just read-only access). The new + * encryption policy version fixes this and also implies use of + * an improved key derivation function and allows non-root users + * to securely remove keys. So as long as compatibility with + * old kernels isn't required, it is recommended to use the new + * policy version for all new encrypted directories. + */ + pr_warn_once("%s (pid %d) is setting deprecated v1 encryption policy; recommend upgrading to v2.\n", + current->comm, current->pid); + } - return inode->i_sb->s_cop->set_context(inode, &ctx, sizeof(ctx), NULL); + ctxsize = fscrypt_new_context_from_policy(&ctx, policy); + + return inode->i_sb->s_cop->set_context(inode, &ctx, ctxsize, NULL); } int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) { - struct fscrypt_policy policy; + union fscrypt_policy policy; + union fscrypt_policy existing_policy; struct inode *inode = file_inode(filp); + u8 version; + int size; int ret; - struct fscrypt_context ctx; - if (copy_from_user(&policy, arg, sizeof(policy))) + if (get_user(policy.version, (const u8 __user *)arg)) return -EFAULT; + size = fscrypt_policy_size(&policy); + if (size <= 0) + return -EINVAL; + + /* + * We should just copy the remaining 'size - 1' bytes here, but a + * bizarre bug in gcc 7 and earlier (fixed by gcc r255731) causes gcc to + * think that size can be 0 here (despite the check above!) *and* that + * it's a compile-time constant. Thus it would think copy_from_user() + * is passed compile-time constant ULONG_MAX, causing the compile-time + * buffer overflow check to fail, breaking the build. This only occurred + * when building an i386 kernel with -Os and branch profiling enabled. + * + * Work around it by just copying the first byte again... + */ + version = policy.version; + if (copy_from_user(&policy, arg, size)) + return -EFAULT; + policy.version = version; + if (!inode_owner_or_capable(inode)) return -EACCES; - if (policy.version != 0) - return -EINVAL; - ret = mnt_want_write_file(filp); if (ret) return ret; inode_lock(inode); - ret = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); + ret = fscrypt_get_policy(inode, &existing_policy); if (ret == -ENODATA) { if (!S_ISDIR(inode->i_mode)) ret = -ENOTDIR; @@ -86,14 +308,10 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) else if (!inode->i_sb->s_cop->empty_dir(inode)) ret = -ENOTEMPTY; else - ret = create_encryption_context_from_policy(inode, - &policy); - } else if (ret == sizeof(ctx) && - is_encryption_context_consistent_with_policy(&ctx, - &policy)) { - /* The file already uses the same encryption policy. */ - ret = 0; - } else if (ret >= 0 || ret == -ERANGE) { + ret = set_encryption_policy(inode, &policy); + } else if (ret == -EINVAL || + (ret == 0 && !fscrypt_policies_equal(&policy, + &existing_policy))) { /* The file already uses a different encryption policy. */ ret = -EEXIST; } @@ -105,37 +323,57 @@ int fscrypt_ioctl_set_policy(struct file *filp, const void __user *arg) } EXPORT_SYMBOL(fscrypt_ioctl_set_policy); +/* Original ioctl version; can only get the original policy version */ int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) { - struct inode *inode = file_inode(filp); - struct fscrypt_context ctx; - struct fscrypt_policy policy; - int res; + union fscrypt_policy policy; + int err; - if (!IS_ENCRYPTED(inode)) - return -ENODATA; + err = fscrypt_get_policy(file_inode(filp), &policy); + if (err) + return err; - res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); - if (res < 0 && res != -ERANGE) - return res; - if (res != sizeof(ctx)) - return -EINVAL; - if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1) + if (policy.version != FSCRYPT_POLICY_V1) return -EINVAL; - policy.version = 0; - policy.contents_encryption_mode = ctx.contents_encryption_mode; - policy.filenames_encryption_mode = ctx.filenames_encryption_mode; - policy.flags = ctx.flags; - memcpy(policy.master_key_descriptor, ctx.master_key_descriptor, - FSCRYPT_KEY_DESCRIPTOR_SIZE); - - if (copy_to_user(arg, &policy, sizeof(policy))) + if (copy_to_user(arg, &policy, sizeof(policy.v1))) return -EFAULT; return 0; } EXPORT_SYMBOL(fscrypt_ioctl_get_policy); +/* Extended ioctl version; can get policies of any version */ +int fscrypt_ioctl_get_policy_ex(struct file *filp, void __user *uarg) +{ + struct fscrypt_get_policy_ex_arg arg; + union fscrypt_policy *policy = (union fscrypt_policy *)&arg.policy; + size_t policy_size; + int err; + + /* arg is policy_size, then policy */ + BUILD_BUG_ON(offsetof(typeof(arg), policy_size) != 0); + BUILD_BUG_ON(offsetofend(typeof(arg), policy_size) != + offsetof(typeof(arg), policy)); + BUILD_BUG_ON(sizeof(arg.policy) != sizeof(*policy)); + + err = fscrypt_get_policy(file_inode(filp), policy); + if (err) + return err; + policy_size = fscrypt_policy_size(policy); + + if (copy_from_user(&arg, uarg, sizeof(arg.policy_size))) + return -EFAULT; + + if (policy_size > arg.policy_size) + return -EOVERFLOW; + arg.policy_size = policy_size; + + if (copy_to_user(uarg, &arg, sizeof(arg.policy_size) + policy_size)) + return -EFAULT; + return 0; +} +EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_policy_ex); + /** * fscrypt_has_permitted_context() - is a file's encryption policy permitted * within its directory? @@ -157,10 +395,8 @@ EXPORT_SYMBOL(fscrypt_ioctl_get_policy); */ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) { - const struct fscrypt_operations *cops = parent->i_sb->s_cop; - const struct fscrypt_info *parent_ci, *child_ci; - struct fscrypt_context parent_ctx, child_ctx; - int res; + union fscrypt_policy parent_policy, child_policy; + int err; /* No restrictions on file types which are never encrypted */ if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) && @@ -190,41 +426,22 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) * In any case, if an unexpected error occurs, fall back to "forbidden". */ - res = fscrypt_get_encryption_info(parent); - if (res) + err = fscrypt_get_encryption_info(parent); + if (err) return 0; - res = fscrypt_get_encryption_info(child); - if (res) + err = fscrypt_get_encryption_info(child); + if (err) return 0; - parent_ci = READ_ONCE(parent->i_crypt_info); - child_ci = READ_ONCE(child->i_crypt_info); - - if (parent_ci && child_ci) { - return memcmp(parent_ci->ci_master_key_descriptor, - child_ci->ci_master_key_descriptor, - FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 && - (parent_ci->ci_data_mode == child_ci->ci_data_mode) && - (parent_ci->ci_filename_mode == - child_ci->ci_filename_mode) && - (parent_ci->ci_flags == child_ci->ci_flags); - } - res = cops->get_context(parent, &parent_ctx, sizeof(parent_ctx)); - if (res != sizeof(parent_ctx)) + err = fscrypt_get_policy(parent, &parent_policy); + if (err) return 0; - res = cops->get_context(child, &child_ctx, sizeof(child_ctx)); - if (res != sizeof(child_ctx)) + err = fscrypt_get_policy(child, &child_policy); + if (err) return 0; - return memcmp(parent_ctx.master_key_descriptor, - child_ctx.master_key_descriptor, - FSCRYPT_KEY_DESCRIPTOR_SIZE) == 0 && - (parent_ctx.contents_encryption_mode == - child_ctx.contents_encryption_mode) && - (parent_ctx.filenames_encryption_mode == - child_ctx.filenames_encryption_mode) && - (parent_ctx.flags == child_ctx.flags); + return fscrypt_policies_equal(&parent_policy, &child_policy); } EXPORT_SYMBOL(fscrypt_has_permitted_context); @@ -240,7 +457,8 @@ EXPORT_SYMBOL(fscrypt_has_permitted_context); int fscrypt_inherit_context(struct inode *parent, struct inode *child, void *fs_data, bool preload) { - struct fscrypt_context ctx; + union fscrypt_context ctx; + int ctxsize; struct fscrypt_info *ci; int res; @@ -252,16 +470,10 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child, if (ci == NULL) return -ENOKEY; - ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; - ctx.contents_encryption_mode = ci->ci_data_mode; - ctx.filenames_encryption_mode = ci->ci_filename_mode; - ctx.flags = ci->ci_flags; - memcpy(ctx.master_key_descriptor, ci->ci_master_key_descriptor, - FSCRYPT_KEY_DESCRIPTOR_SIZE); - get_random_bytes(ctx.nonce, FS_KEY_DERIVATION_NONCE_SIZE); + ctxsize = fscrypt_new_context_from_policy(&ctx, &ci->ci_policy); + BUILD_BUG_ON(sizeof(ctx) != FSCRYPT_SET_CONTEXT_MAX_SIZE); - res = parent->i_sb->s_cop->set_context(child, &ctx, - sizeof(ctx), fs_data); + res = parent->i_sb->s_cop->set_context(child, &ctx, ctxsize, fs_data); if (res) return res; return preload ? fscrypt_get_encryption_info(child): 0; diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 6628d09585bd..8b8ff0484042 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -43,7 +43,7 @@ struct fscrypt_name { #define fname_len(p) ((p)->disk_name.len) /* Maximum value for the third parameter of fscrypt_operations.set_context(). */ -#define FSCRYPT_SET_CONTEXT_MAX_SIZE 28 +#define FSCRYPT_SET_CONTEXT_MAX_SIZE 40 #ifdef CONFIG_FS_ENCRYPTION /* @@ -135,6 +135,7 @@ extern void fscrypt_free_bounce_page(struct page *bounce_page); /* policy.c */ extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); extern int fscrypt_ioctl_get_policy(struct file *, void __user *); +extern int fscrypt_ioctl_get_policy_ex(struct file *, void __user *); extern int fscrypt_has_permitted_context(struct inode *, struct inode *); extern int fscrypt_inherit_context(struct inode *, struct inode *, void *, bool); @@ -361,6 +362,12 @@ static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) return -EOPNOTSUPP; } +static inline int fscrypt_ioctl_get_policy_ex(struct file *filp, + void __user *arg) +{ + return -EOPNOTSUPP; +} + static inline int fscrypt_has_permitted_context(struct inode *parent, struct inode *child) { diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index ed5995b15016..f961ebf83e98 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -10,15 +10,13 @@ #include -#define FSCRYPT_KEY_DESCRIPTOR_SIZE 8 - /* Encryption policy flags */ #define FSCRYPT_POLICY_FLAGS_PAD_4 0x00 #define FSCRYPT_POLICY_FLAGS_PAD_8 0x01 #define FSCRYPT_POLICY_FLAGS_PAD_16 0x02 #define FSCRYPT_POLICY_FLAGS_PAD_32 0x03 #define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03 -#define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04 /* use master key directly */ +#define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04 #define FSCRYPT_POLICY_FLAGS_VALID 0x07 /* Encryption algorithms */ @@ -27,14 +25,24 @@ #define FSCRYPT_MODE_AES_128_CBC 5 #define FSCRYPT_MODE_AES_128_CTS 6 #define FSCRYPT_MODE_ADIANTUM 9 +#define __FSCRYPT_MODE_MAX 9 -struct fscrypt_policy { +/* + * Legacy policy version; ad-hoc KDF and no key verification. + * For new encrypted directories, use fscrypt_policy_v2 instead. + * + * Careful: the .version field for this is actually 0, not 1. + */ +#define FSCRYPT_POLICY_V1 0 +#define FSCRYPT_KEY_DESCRIPTOR_SIZE 8 +struct fscrypt_policy_v1 { __u8 version; __u8 contents_encryption_mode; __u8 filenames_encryption_mode; __u8 flags; __u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; }; +#define fscrypt_policy fscrypt_policy_v1 /* * Process-subscribed "logon" key description prefix and payload format. @@ -50,14 +58,45 @@ struct fscrypt_key { }; /* - * Keys are specified by an arbitrary 8-byte key "descriptor", - * matching fscrypt_policy::master_key_descriptor. + * New policy version with HKDF and key verification (recommended). + */ +#define FSCRYPT_POLICY_V2 2 +#define FSCRYPT_KEY_IDENTIFIER_SIZE 16 +struct fscrypt_policy_v2 { + __u8 version; + __u8 contents_encryption_mode; + __u8 filenames_encryption_mode; + __u8 flags; + __u8 __reserved[4]; + __u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]; +}; + +/* Struct passed to FS_IOC_GET_ENCRYPTION_POLICY_EX */ +struct fscrypt_get_policy_ex_arg { + __u64 policy_size; /* input/output */ + union { + __u8 version; + struct fscrypt_policy_v1 v1; + struct fscrypt_policy_v2 v2; + } policy; /* output */ +}; + +/* + * v1 policy keys are specified by an arbitrary 8-byte key "descriptor", + * matching fscrypt_policy_v1::master_key_descriptor. */ #define FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR 1 /* - * Specifies a key. This doesn't contain the actual key itself; this is just - * the "name" of the key. + * v2 policy keys are specified by a 16-byte key "identifier" which the kernel + * calculates as a cryptographic hash of the key itself, + * matching fscrypt_policy_v2::master_key_identifier. + */ +#define FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER 2 + +/* + * Specifies a key, either for v1 or v2 policies. This doesn't contain the + * actual key itself; this is just the "name" of the key. */ struct fscrypt_key_specifier { __u32 type; /* one of FSCRYPT_KEY_SPEC_TYPE_* */ @@ -65,6 +104,7 @@ struct fscrypt_key_specifier { union { __u8 __reserved[32]; /* reserve some extra space */ __u8 descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; + __u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]; } u; }; @@ -101,6 +141,7 @@ struct fscrypt_get_key_status_arg { #define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) #define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) #define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) +#define FS_IOC_GET_ENCRYPTION_POLICY_EX _IOWR('f', 22, __u8[9]) /* size + version */ #define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg) #define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg) #define FS_IOC_GET_ENCRYPTION_KEY_STATUS _IOWR('f', 26, struct fscrypt_get_key_status_arg) -- cgit v1.2.3 From 23c688b54016eed15d39f4387ca9da241e165922 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 4 Aug 2019 19:35:47 -0700 Subject: fscrypt: allow unprivileged users to add/remove keys for v2 policies Allow the FS_IOC_ADD_ENCRYPTION_KEY and FS_IOC_REMOVE_ENCRYPTION_KEY ioctls to be used by non-root users to add and remove encryption keys from the filesystem-level crypto keyrings, subject to limitations. Motivation: while privileged fscrypt key management is sufficient for some users (e.g. Android and Chromium OS, where a privileged process manages all keys), the old API by design also allows non-root users to set up and use encrypted directories, and we don't want to regress on that. Especially, we don't want to force users to continue using the old API, running into the visibility mismatch between files and keyrings and being unable to "lock" encrypted directories. Intuitively, the ioctls have to be privileged since they manipulate filesystem-level state. However, it's actually safe to make them unprivileged if we very carefully enforce some specific limitations. First, each key must be identified by a cryptographic hash so that a user can't add the wrong key for another user's files. For v2 encryption policies, we use the key_identifier for this. v1 policies don't have this, so managing keys for them remains privileged. Second, each key a user adds is charged to their quota for the keyrings service. Thus, a user can't exhaust memory by adding a huge number of keys. By default each non-root user is allowed up to 200 keys; this can be changed using the existing sysctl 'kernel.keys.maxkeys'. Third, if multiple users add the same key, we keep track of those users of the key (of which there remains a single copy), and won't really remove the key, i.e. "lock" the encrypted files, until all those users have removed it. This prevents denial of service attacks that would be possible under simpler schemes, such allowing the first user who added a key to remove it -- since that could be a malicious user who has compromised the key. Of course, encryption keys should be kept secret, but the idea is that using encryption should never be *less* secure than not using encryption, even if your key was compromised. We tolerate that a user will be unable to really remove a key, i.e. unable to "lock" their encrypted files, if another user has added the same key. But in a sense, this is actually a good thing because it will avoid providing a false notion of security where a key appears to have been removed when actually it's still in memory, available to any attacker who compromises the operating system kernel. Reviewed-by: Theodore Ts'o Signed-off-by: Eric Biggers --- fs/crypto/fscrypt_private.h | 31 ++++- fs/crypto/keyring.c | 320 ++++++++++++++++++++++++++++++++++++++++--- fs/crypto/keysetup.c | 18 +-- include/uapi/linux/fscrypt.h | 6 +- 4 files changed, 341 insertions(+), 34 deletions(-) (limited to 'include/uapi') diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index c89e37d38e42..d0e238234234 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -335,9 +335,16 @@ struct fscrypt_master_key { * FS_IOC_REMOVE_ENCRYPTION_KEY can be retried, or * FS_IOC_ADD_ENCRYPTION_KEY can add the secret again. * - * Locking: protected by key->sem. + * Locking: protected by key->sem (outer) and mk_secret_sem (inner). + * The reason for two locks is that key->sem also protects modifying + * mk_users, which ranks it above the semaphore for the keyring key + * type, which is in turn above page faults (via keyring_read). But + * sometimes filesystems call fscrypt_get_encryption_info() from within + * a transaction, which ranks it below page faults. So we need a + * separate lock which protects mk_secret but not also mk_users. */ struct fscrypt_master_key_secret mk_secret; + struct rw_semaphore mk_secret_sem; /* * For v1 policy keys: an arbitrary key descriptor which was assigned by @@ -347,6 +354,22 @@ struct fscrypt_master_key { */ struct fscrypt_key_specifier mk_spec; + /* + * Keyring which contains a key of type 'key_type_fscrypt_user' for each + * user who has added this key. Normally each key will be added by just + * one user, but it's possible that multiple users share a key, and in + * that case we need to keep track of those users so that one user can't + * remove the key before the others want it removed too. + * + * This is NULL for v1 policy keys; those can only be added by root. + * + * Locking: in addition to this keyrings own semaphore, this is + * protected by the master key's key->sem, so we can do atomic + * search+insert. It can also be searched without taking any locks, but + * in that case the returned key may have already been removed. + */ + struct key *mk_users; + /* * Length of ->mk_decrypted_inodes, plus one if mk_secret is present. * Once this goes to 0, the master key is removed from ->s_master_keys. @@ -374,9 +397,9 @@ is_master_key_secret_present(const struct fscrypt_master_key_secret *secret) /* * The READ_ONCE() is only necessary for fscrypt_drop_inode() and * fscrypt_key_describe(). These run in atomic context, so they can't - * take key->sem and thus 'secret' can change concurrently which would - * be a data race. But they only need to know whether the secret *was* - * present at the time of check, so READ_ONCE() suffices. + * take ->mk_secret_sem and thus 'secret' can change concurrently which + * would be a data race. But they only need to know whether the secret + * *was* present at the time of check, so READ_ONCE() suffices. */ return READ_ONCE(secret->size) != 0; } diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index fca3bdf01e7c..c654effb83f5 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -45,6 +45,7 @@ static void free_master_key(struct fscrypt_master_key *mk) for (i = 0; i < ARRAY_SIZE(mk->mk_mode_keys); i++) crypto_free_skcipher(mk->mk_mode_keys[i]); + key_put(mk->mk_users); kzfree(mk); } @@ -93,7 +94,39 @@ static struct key_type key_type_fscrypt = { .describe = fscrypt_key_describe, }; -/* Search ->s_master_keys */ +static int fscrypt_user_key_instantiate(struct key *key, + struct key_preparsed_payload *prep) +{ + /* + * We just charge FSCRYPT_MAX_KEY_SIZE bytes to the user's key quota for + * each key, regardless of the exact key size. The amount of memory + * actually used is greater than the size of the raw key anyway. + */ + return key_payload_reserve(key, FSCRYPT_MAX_KEY_SIZE); +} + +static void fscrypt_user_key_describe(const struct key *key, struct seq_file *m) +{ + seq_puts(m, key->description); +} + +/* + * Type of key in ->mk_users. Each key of this type represents a particular + * user who has added a particular master key. + * + * Note that the name of this key type really should be something like + * ".fscrypt-user" instead of simply ".fscrypt". But the shorter name is chosen + * mainly for simplicity of presentation in /proc/keys when read by a non-root + * user. And it is expected to be rare that a key is actually added by multiple + * users, since users should keep their encryption keys confidential. + */ +static struct key_type key_type_fscrypt_user = { + .name = ".fscrypt", + .instantiate = fscrypt_user_key_instantiate, + .describe = fscrypt_user_key_describe, +}; + +/* Search ->s_master_keys or ->mk_users */ static struct key *search_fscrypt_keyring(struct key *keyring, struct key_type *type, const char *description) @@ -119,6 +152,13 @@ static struct key *search_fscrypt_keyring(struct key *keyring, #define FSCRYPT_MK_DESCRIPTION_SIZE (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + 1) +#define FSCRYPT_MK_USERS_DESCRIPTION_SIZE \ + (CONST_STRLEN("fscrypt-") + 2 * FSCRYPT_KEY_IDENTIFIER_SIZE + \ + CONST_STRLEN("-users") + 1) + +#define FSCRYPT_MK_USER_DESCRIPTION_SIZE \ + (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + CONST_STRLEN(".uid.") + 10 + 1) + static void format_fs_keyring_description( char description[FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE], const struct super_block *sb) @@ -134,6 +174,23 @@ static void format_mk_description( master_key_spec_len(mk_spec), (u8 *)&mk_spec->u); } +static void format_mk_users_keyring_description( + char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE], + const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) +{ + sprintf(description, "fscrypt-%*phN-users", + FSCRYPT_KEY_IDENTIFIER_SIZE, mk_identifier); +} + +static void format_mk_user_description( + char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE], + const u8 mk_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]) +{ + + sprintf(description, "%*phN.uid.%u", FSCRYPT_KEY_IDENTIFIER_SIZE, + mk_identifier, __kuid_val(current_fsuid())); +} + /* Create ->s_master_keys if needed. Synchronized by fscrypt_add_key_mutex. */ static int allocate_filesystem_keyring(struct super_block *sb) { @@ -181,6 +238,80 @@ struct key *fscrypt_find_master_key(struct super_block *sb, return search_fscrypt_keyring(keyring, &key_type_fscrypt, description); } +static int allocate_master_key_users_keyring(struct fscrypt_master_key *mk) +{ + char description[FSCRYPT_MK_USERS_DESCRIPTION_SIZE]; + struct key *keyring; + + format_mk_users_keyring_description(description, + mk->mk_spec.u.identifier); + keyring = keyring_alloc(description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + current_cred(), KEY_POS_SEARCH | + KEY_USR_SEARCH | KEY_USR_READ | KEY_USR_VIEW, + KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); + if (IS_ERR(keyring)) + return PTR_ERR(keyring); + + mk->mk_users = keyring; + return 0; +} + +/* + * Find the current user's "key" in the master key's ->mk_users. + * Returns ERR_PTR(-ENOKEY) if not found. + */ +static struct key *find_master_key_user(struct fscrypt_master_key *mk) +{ + char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE]; + + format_mk_user_description(description, mk->mk_spec.u.identifier); + return search_fscrypt_keyring(mk->mk_users, &key_type_fscrypt_user, + description); +} + +/* + * Give the current user a "key" in ->mk_users. This charges the user's quota + * and marks the master key as added by the current user, so that it cannot be + * removed by another user with the key. Either the master key's key->sem must + * be held for write, or the master key must be still undergoing initialization. + */ +static int add_master_key_user(struct fscrypt_master_key *mk) +{ + char description[FSCRYPT_MK_USER_DESCRIPTION_SIZE]; + struct key *mk_user; + int err; + + format_mk_user_description(description, mk->mk_spec.u.identifier); + mk_user = key_alloc(&key_type_fscrypt_user, description, + current_fsuid(), current_gid(), current_cred(), + KEY_POS_SEARCH | KEY_USR_VIEW, 0, NULL); + if (IS_ERR(mk_user)) + return PTR_ERR(mk_user); + + err = key_instantiate_and_link(mk_user, NULL, 0, mk->mk_users, NULL); + key_put(mk_user); + return err; +} + +/* + * Remove the current user's "key" from ->mk_users. + * The master key's key->sem must be held for write. + * + * Returns 0 if removed, -ENOKEY if not found, or another -errno code. + */ +static int remove_master_key_user(struct fscrypt_master_key *mk) +{ + struct key *mk_user; + int err; + + mk_user = find_master_key_user(mk); + if (IS_ERR(mk_user)) + return PTR_ERR(mk_user); + err = key_unlink(mk->mk_users, mk_user); + key_put(mk_user); + return err; +} + /* * Allocate a new fscrypt_master_key which contains the given secret, set it as * the payload of a new 'struct key' of type fscrypt, and link the 'struct key' @@ -202,11 +333,26 @@ static int add_new_master_key(struct fscrypt_master_key_secret *secret, mk->mk_spec = *mk_spec; move_master_key_secret(&mk->mk_secret, secret); + init_rwsem(&mk->mk_secret_sem); refcount_set(&mk->mk_refcount, 1); /* secret is present */ INIT_LIST_HEAD(&mk->mk_decrypted_inodes); spin_lock_init(&mk->mk_decrypted_inodes_lock); + if (mk_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { + err = allocate_master_key_users_keyring(mk); + if (err) + goto out_free_mk; + err = add_master_key_user(mk); + if (err) + goto out_free_mk; + } + + /* + * Note that we don't charge this key to anyone's quota, since when + * ->mk_users is in use those keys are charged instead, and otherwise + * (when ->mk_users isn't in use) only root can add these keys. + */ format_mk_description(description, mk_spec); key = key_alloc(&key_type_fscrypt, description, GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(), @@ -233,13 +379,45 @@ out_free_mk: static int add_existing_master_key(struct fscrypt_master_key *mk, struct fscrypt_master_key_secret *secret) { - if (is_master_key_secret_present(&mk->mk_secret)) - return 0; + struct key *mk_user; + bool rekey; + int err; - if (!refcount_inc_not_zero(&mk->mk_refcount)) + /* + * If the current user is already in ->mk_users, then there's nothing to + * do. (Not applicable for v1 policy keys, which have NULL ->mk_users.) + */ + if (mk->mk_users) { + mk_user = find_master_key_user(mk); + if (mk_user != ERR_PTR(-ENOKEY)) { + if (IS_ERR(mk_user)) + return PTR_ERR(mk_user); + key_put(mk_user); + return 0; + } + } + + /* If we'll be re-adding ->mk_secret, try to take the reference. */ + rekey = !is_master_key_secret_present(&mk->mk_secret); + if (rekey && !refcount_inc_not_zero(&mk->mk_refcount)) return KEY_DEAD; - move_master_key_secret(&mk->mk_secret, secret); + /* Add the current user to ->mk_users, if applicable. */ + if (mk->mk_users) { + err = add_master_key_user(mk); + if (err) { + if (rekey && refcount_dec_and_test(&mk->mk_refcount)) + return KEY_DEAD; + return err; + } + } + + /* Re-add the secret if needed. */ + if (rekey) { + down_write(&mk->mk_secret_sem); + move_master_key_secret(&mk->mk_secret, secret); + up_write(&mk->mk_secret_sem); + } return 0; } @@ -266,7 +444,7 @@ retry: } else { /* * Found the key in ->s_master_keys. Re-add the secret if - * needed. + * needed, and add the user to ->mk_users if needed. */ down_write(&key->sem); err = add_existing_master_key(key->payload.data[0], secret); @@ -288,6 +466,23 @@ out_unlock: * Add a master encryption key to the filesystem, causing all files which were * encrypted with it to appear "unlocked" (decrypted) when accessed. * + * When adding a key for use by v1 encryption policies, this ioctl is + * privileged, and userspace must provide the 'key_descriptor'. + * + * When adding a key for use by v2+ encryption policies, this ioctl is + * unprivileged. This is needed, in general, to allow non-root users to use + * encryption without encountering the visibility problems of process-subscribed + * keyrings and the inability to properly remove keys. This works by having + * each key identified by its cryptographically secure hash --- the + * 'key_identifier'. The cryptographic hash ensures that a malicious user + * cannot add the wrong key for a given identifier. Furthermore, each added key + * is charged to the appropriate user's quota for the keyrings service, which + * prevents a malicious user from adding too many keys. Finally, we forbid a + * user from removing a key while other users have added it too, which prevents + * a user who knows another user's key from causing a denial-of-service by + * removing it at an inopportune time. (We tolerate that a user who knows a key + * can prevent other users from removing it.) + * * For more details, see the "FS_IOC_ADD_ENCRYPTION_KEY" section of * Documentation/filesystems/fscrypt.rst. */ @@ -318,11 +513,18 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) if (copy_from_user(secret.raw, uarg->raw, secret.size)) goto out_wipe_secret; - err = -EACCES; - if (!capable(CAP_SYS_ADMIN)) - goto out_wipe_secret; - - if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) { + switch (arg.key_spec.type) { + case FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR: + /* + * Only root can add keys that are identified by an arbitrary + * descriptor rather than by a cryptographic hash --- since + * otherwise a malicious user could add the wrong key. + */ + err = -EACCES; + if (!capable(CAP_SYS_ADMIN)) + goto out_wipe_secret; + break; + case FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER: err = fscrypt_init_hkdf(&secret.hkdf, secret.raw, secret.size); if (err) goto out_wipe_secret; @@ -345,6 +547,11 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg) arg.key_spec.u.identifier, FSCRYPT_KEY_IDENTIFIER_SIZE)) goto out_wipe_secret; + break; + default: + WARN_ON(1); + err = -EINVAL; + goto out_wipe_secret; } err = add_master_key(sb, &secret, &arg.key_spec); @@ -492,9 +699,12 @@ static int try_to_lock_encrypted_files(struct super_block *sb, /* * Try to remove an fscrypt master encryption key. * - * First we wipe the actual master key secret, so that no more inodes can be - * unlocked with it. Then we try to evict all cached inodes that had been - * unlocked with the key. + * This removes the current user's claim to the key, then removes the key itself + * if no other users have claims. + * + * To "remove the key itself", first we wipe the actual master key secret, so + * that no more inodes can be unlocked with it. Then we try to evict all cached + * inodes that had been unlocked with the key. * * If all inodes were evicted, then we unlink the fscrypt_master_key from the * keyring. Otherwise it remains in the keyring in the "incompletely removed" @@ -525,7 +735,12 @@ int fscrypt_ioctl_remove_key(struct file *filp, void __user *_uarg) if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; - if (!capable(CAP_SYS_ADMIN)) + /* + * Only root can add and remove keys that are identified by an arbitrary + * descriptor rather than by a cryptographic hash. + */ + if (arg.key_spec.type == FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR && + !capable(CAP_SYS_ADMIN)) return -EACCES; /* Find the key being removed. */ @@ -536,11 +751,34 @@ int fscrypt_ioctl_remove_key(struct file *filp, void __user *_uarg) down_write(&key->sem); - /* Wipe the secret. */ + /* If relevant, remove current user's claim to the key */ + if (mk->mk_users && mk->mk_users->keys.nr_leaves_on_tree != 0) { + err = remove_master_key_user(mk); + if (err) { + up_write(&key->sem); + goto out_put_key; + } + if (mk->mk_users->keys.nr_leaves_on_tree != 0) { + /* + * Other users have still added the key too. We removed + * the current user's claim to the key, but we still + * can't remove the key itself. + */ + status_flags |= + FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS; + err = 0; + up_write(&key->sem); + goto out_put_key; + } + } + + /* No user claims remaining. Go ahead and wipe the secret. */ dead = false; if (is_master_key_secret_present(&mk->mk_secret)) { + down_write(&mk->mk_secret_sem); wipe_master_key_secret(&mk->mk_secret); dead = refcount_dec_and_test(&mk->mk_refcount); + up_write(&mk->mk_secret_sem); } up_write(&key->sem); if (dead) { @@ -560,11 +798,12 @@ int fscrypt_ioctl_remove_key(struct file *filp, void __user *_uarg) } } /* - * We return 0 if we successfully did something: wiped the secret, or - * tried locking the files again. Users need to check the informational - * status flags if they care whether the key has been fully removed - * including all files locked. + * We return 0 if we successfully did something: removed a claim to the + * key, wiped the secret, or tried locking the files again. Users need + * to check the informational status flags if they care whether the key + * has been fully removed including all files locked. */ +out_put_key: key_put(key); if (err == 0) err = put_user(status_flags, &uarg->removal_status_flags); @@ -583,6 +822,15 @@ EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key); * regular file in it (which can confuse the "incompletely removed" state with * absent or present). * + * In addition, for v2 policy keys we allow applications to determine, via + * ->status_flags and ->user_count, whether the key has been added by the + * current user, by other users, or by both. Most applications should not need + * this, since ordinarily only one user should know a given key. However, if a + * secret key is shared by multiple users, applications may wish to add an + * already-present key to prevent other users from removing it. This ioctl can + * be used to check whether that really is the case before the work is done to + * add the key --- which might e.g. require prompting the user for a passphrase. + * * For more details, see the "FS_IOC_GET_ENCRYPTION_KEY_STATUS" section of * Documentation/filesystems/fscrypt.rst. */ @@ -603,6 +851,8 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg) if (memchr_inv(arg.__reserved, 0, sizeof(arg.__reserved))) return -EINVAL; + arg.status_flags = 0; + arg.user_count = 0; memset(arg.__out_reserved, 0, sizeof(arg.__out_reserved)); key = fscrypt_find_master_key(sb, &arg.key_spec); @@ -623,6 +873,20 @@ int fscrypt_ioctl_get_key_status(struct file *filp, void __user *uarg) } arg.status = FSCRYPT_KEY_STATUS_PRESENT; + if (mk->mk_users) { + struct key *mk_user; + + arg.user_count = mk->mk_users->keys.nr_leaves_on_tree; + mk_user = find_master_key_user(mk); + if (!IS_ERR(mk_user)) { + arg.status_flags |= + FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF; + key_put(mk_user); + } else if (mk_user != ERR_PTR(-ENOKEY)) { + err = PTR_ERR(mk_user); + goto out_release_key; + } + } err = 0; out_release_key: up_read(&key->sem); @@ -636,5 +900,19 @@ EXPORT_SYMBOL_GPL(fscrypt_ioctl_get_key_status); int __init fscrypt_init_keyring(void) { - return register_key_type(&key_type_fscrypt); + int err; + + err = register_key_type(&key_type_fscrypt); + if (err) + return err; + + err = register_key_type(&key_type_fscrypt_user); + if (err) + goto err_unregister_fscrypt; + + return 0; + +err_unregister_fscrypt: + unregister_key_type(&key_type_fscrypt); + return err; } diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index f423d48264db..d71c2d6dd162 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -286,10 +286,10 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, * * If the master key is found in the filesystem-level keyring, then the * corresponding 'struct key' is returned in *master_key_ret with - * ->sem read-locked. This is needed to ensure that only one task links the - * fscrypt_info into ->mk_decrypted_inodes (as multiple tasks may race to create - * an fscrypt_info for the same inode), and to synchronize the master key being - * removed with a new inode starting to use it. + * ->mk_secret_sem read-locked. This is needed to ensure that only one task + * links the fscrypt_info into ->mk_decrypted_inodes (as multiple tasks may race + * to create an fscrypt_info for the same inode), and to synchronize the master + * key being removed with a new inode starting to use it. */ static int setup_file_encryption_key(struct fscrypt_info *ci, struct key **master_key_ret) @@ -333,7 +333,7 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, } mk = key->payload.data[0]; - down_read(&key->sem); + down_read(&mk->mk_secret_sem); /* Has the secret been removed (via FS_IOC_REMOVE_ENCRYPTION_KEY)? */ if (!is_master_key_secret_present(&mk->mk_secret)) { @@ -376,7 +376,7 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, return 0; out_release_key: - up_read(&key->sem); + up_read(&mk->mk_secret_sem); key_put(key); return err; } @@ -514,7 +514,9 @@ int fscrypt_get_encryption_info(struct inode *inode) res = 0; out: if (master_key) { - up_read(&master_key->sem); + struct fscrypt_master_key *mk = master_key->payload.data[0]; + + up_read(&mk->mk_secret_sem); key_put(master_key); } if (res == -ENOKEY) @@ -577,7 +579,7 @@ int fscrypt_drop_inode(struct inode *inode) mk = ci->ci_master_key->payload.data[0]; /* - * Note: since we aren't holding key->sem, the result here can + * Note: since we aren't holding ->mk_secret_sem, the result here can * immediately become outdated. But there's no correctness problem with * unnecessarily evicting. Nor is there a correctness problem with not * evicting while iput() is racing with the key being removed, since diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index f961ebf83e98..b9fb775e3db8 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -120,6 +120,7 @@ struct fscrypt_add_key_arg { struct fscrypt_remove_key_arg { struct fscrypt_key_specifier key_spec; #define FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY 0x00000001 +#define FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS 0x00000002 __u32 removal_status_flags; /* output */ __u32 __reserved[5]; }; @@ -135,7 +136,10 @@ struct fscrypt_get_key_status_arg { #define FSCRYPT_KEY_STATUS_PRESENT 2 #define FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED 3 __u32 status; - __u32 __out_reserved[15]; +#define FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF 0x00000001 + __u32 status_flags; + __u32 user_count; + __u32 __out_reserved[13]; }; #define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) -- cgit v1.2.3 From 78a1b96bcf7a0721c7852bb1475218c3cbef884a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Sun, 4 Aug 2019 19:35:47 -0700 Subject: fscrypt: add FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS ioctl Add a root-only variant of the FS_IOC_REMOVE_ENCRYPTION_KEY ioctl which removes all users' claims of the key, not just the current user's claim. I.e., it always removes the key itself, no matter how many users have added it. This is useful for forcing a directory to be locked, without having to figure out which user ID(s) the key was added under. This is planned to be used by a command like 'sudo fscrypt lock DIR --all-users' in the fscrypt userspace tool (http://github.com/google/fscrypt). Reviewed-by: Theodore Ts'o Signed-off-by: Eric Biggers --- fs/crypto/keyring.c | 29 ++++++++++++++++++++++++----- include/linux/fscrypt.h | 8 ++++++++ include/uapi/linux/fscrypt.h | 1 + 3 files changed, 33 insertions(+), 5 deletions(-) (limited to 'include/uapi') diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index c654effb83f5..8c600ead0e2e 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -11,6 +11,7 @@ * * - FS_IOC_ADD_ENCRYPTION_KEY * - FS_IOC_REMOVE_ENCRYPTION_KEY + * - FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS * - FS_IOC_GET_ENCRYPTION_KEY_STATUS * * See the "User API" section of Documentation/filesystems/fscrypt.rst for more @@ -699,8 +700,10 @@ static int try_to_lock_encrypted_files(struct super_block *sb, /* * Try to remove an fscrypt master encryption key. * - * This removes the current user's claim to the key, then removes the key itself - * if no other users have claims. + * FS_IOC_REMOVE_ENCRYPTION_KEY (all_users=false) removes the current user's + * claim to the key, then removes the key itself if no other users have claims. + * FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS (all_users=true) always removes the + * key itself. * * To "remove the key itself", first we wipe the actual master key secret, so * that no more inodes can be unlocked with it. Then we try to evict all cached @@ -715,7 +718,7 @@ static int try_to_lock_encrypted_files(struct super_block *sb, * For more details, see the "Removing keys" section of * Documentation/filesystems/fscrypt.rst. */ -int fscrypt_ioctl_remove_key(struct file *filp, void __user *_uarg) +static int do_remove_key(struct file *filp, void __user *_uarg, bool all_users) { struct super_block *sb = file_inode(filp)->i_sb; struct fscrypt_remove_key_arg __user *uarg = _uarg; @@ -751,9 +754,12 @@ int fscrypt_ioctl_remove_key(struct file *filp, void __user *_uarg) down_write(&key->sem); - /* If relevant, remove current user's claim to the key */ + /* If relevant, remove current user's (or all users) claim to the key */ if (mk->mk_users && mk->mk_users->keys.nr_leaves_on_tree != 0) { - err = remove_master_key_user(mk); + if (all_users) + err = keyring_clear(mk->mk_users); + else + err = remove_master_key_user(mk); if (err) { up_write(&key->sem); goto out_put_key; @@ -809,8 +815,21 @@ out_put_key: err = put_user(status_flags, &uarg->removal_status_flags); return err; } + +int fscrypt_ioctl_remove_key(struct file *filp, void __user *uarg) +{ + return do_remove_key(filp, uarg, false); +} EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key); +int fscrypt_ioctl_remove_key_all_users(struct file *filp, void __user *uarg) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + return do_remove_key(filp, uarg, true); +} +EXPORT_SYMBOL_GPL(fscrypt_ioctl_remove_key_all_users); + /* * Retrieve the status of an fscrypt master encryption key. * diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 8b8ff0484042..f622f7460ed8 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -143,6 +143,8 @@ extern int fscrypt_inherit_context(struct inode *, struct inode *, extern void fscrypt_sb_free(struct super_block *sb); extern int fscrypt_ioctl_add_key(struct file *filp, void __user *arg); extern int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg); +extern int fscrypt_ioctl_remove_key_all_users(struct file *filp, + void __user *arg); extern int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg); /* keysetup.c */ @@ -396,6 +398,12 @@ static inline int fscrypt_ioctl_remove_key(struct file *filp, void __user *arg) return -EOPNOTSUPP; } +static inline int fscrypt_ioctl_remove_key_all_users(struct file *filp, + void __user *arg) +{ + return -EOPNOTSUPP; +} + static inline int fscrypt_ioctl_get_key_status(struct file *filp, void __user *arg) { diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h index b9fb775e3db8..39ccfe9311c3 100644 --- a/include/uapi/linux/fscrypt.h +++ b/include/uapi/linux/fscrypt.h @@ -148,6 +148,7 @@ struct fscrypt_get_key_status_arg { #define FS_IOC_GET_ENCRYPTION_POLICY_EX _IOWR('f', 22, __u8[9]) /* size + version */ #define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg) #define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg) +#define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg) #define FS_IOC_GET_ENCRYPTION_KEY_STATUS _IOWR('f', 26, struct fscrypt_get_key_status_arg) /**********************************************************************/ -- cgit v1.2.3 From 075c2b6bf654bdba977795e95bba130113156a35 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 21 Jul 2019 23:25:02 +0900 Subject: scsi: use __u{8,16,32,64} instead of uint{8,16,32,64}_t in uapi headers When CONFIG_UAPI_HEADER_TEST=y, exported headers are compile-tested to make sure they can be included from user-space. Currently, scsi_bsg_fc.h, scsi_netlink.h, and scsi_netlink_fc.h are excluded from the test coverage. To make them join the compile-test, we need to fix the build errors attached below. For a case like this, we decided to use __u{8,16,32,64} variable types in this discussion: https://lkml.org/lkml/2019/6/5/18 Build log: CC usr/include/scsi/scsi_netlink_fc.h.s CC usr/include/scsi/scsi_netlink.h.s CC usr/include/scsi/scsi_bsg_fc.h.s In file included from ./usr/include/scsi/scsi_netlink_fc.h:10:0, from :32: ./usr/include/scsi/scsi_netlink.h:29:2: error: unknown type name uint8_t uint8_t version; ^~~~~~~ ./usr/include/scsi/scsi_netlink.h:30:2: error: unknown type name uint8_t uint8_t transport; ^~~~~~~ ./usr/include/scsi/scsi_netlink.h:31:2: error: unknown type name uint16_t uint16_t magic; ^~~~~~~~ ./usr/include/scsi/scsi_netlink.h:32:2: error: unknown type name uint16_t uint16_t msgtype; ^~~~~~~~ CC usr/include/rdma/vmw_pvrdma-abi.h.s ./usr/include/scsi/scsi_netlink.h:33:2: error: unknown type name uint16_t uint16_t msglen; ^~~~~~~~ ./usr/include/scsi/scsi_netlink.h:34:33: error: uint64_t undeclared here (not in a function); did you mean __uint128_t ? } __attribute__((aligned(sizeof(uint64_t)))); ^~~~~~~~ __uint128_t ./usr/include/scsi/scsi_netlink.h:78:2: error: expected specifier-qualifier-list before uint64_t uint64_t vendor_id; ^~~~~~~~ In file included from :32:0: ./usr/include/scsi/scsi_netlink_fc.h:46:2: error: expected specifier-qualifier-list before uint64_t uint64_t seconds; ^~~~~~~~ make[2]: *** [scripts/Makefile.build;302: usr/include/scsi/scsi_netlink_fc.h.s] Error 1 make[2]: *** Waiting for unfinished jobs.... In file included from :32:0: ./usr/include/scsi/scsi_netlink.h:29:2: error: unknown type name uint8_t uint8_t version; ^~~~~~~ ./usr/include/scsi/scsi_netlink.h:30:2: error: unknown type name uint8_t uint8_t transport; ^~~~~~~ ./usr/include/scsi/scsi_netlink.h:31:2: error: unknown type name uint16_t uint16_t magic; ^~~~~~~~ ./usr/include/scsi/scsi_netlink.h:32:2: error: unknown type name uint16_t uint16_t msgtype; ^~~~~~~~ ./usr/include/scsi/scsi_netlink.h:33:2: error: unknown type name uint16_t uint16_t msglen; ^~~~~~~~ ./usr/include/scsi/scsi_netlink.h:34:33: error: uint64_t undeclared here (not in a function); did you mean __uint128_t ? } __attribute__((aligned(sizeof(uint64_t)))); ^~~~~~~~ __uint128_t ./usr/include/scsi/scsi_netlink.h:78:2: error: expected specifier-qualifier-list before uint64_t uint64_t vendor_id; ^~~~~~~~ make[2]: *** [scripts/Makefile.build;302: usr/include/scsi/scsi_netlink.h.s] Error 1 In file included from :32:0: ./usr/include/scsi/scsi_bsg_fc.h:69:2: error: unknown type name uint8_t uint8_t reserved; ^~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:72:2: error: unknown type name uint8_t uint8_t port_id[3]; ^~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:90:2: error: unknown type name uint8_t uint8_t reserved; ^~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:93:2: error: unknown type name uint8_t uint8_t port_id[3]; ^~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:114:2: error: unknown type name uint8_t uint8_t command_code; ^~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:117:2: error: unknown type name uint8_t uint8_t port_id[3]; ^~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:154:2: error: unknown type name uint32_t uint32_t status; /* See FC_CTELS_STATUS_xxx */ ^~~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:158:3: error: unknown type name uint8_t uint8_t action; /* fragment_id for CT REJECT */ ^~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:159:3: error: unknown type name uint8_t uint8_t reason_code; ^~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:160:3: error: unknown type name uint8_t uint8_t reason_explanation; ^~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:161:3: error: unknown type name uint8_t uint8_t vendor_unique; ^~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:177:2: error: unknown type name uint8_t uint8_t reserved; ^~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:180:2: error: unknown type name uint8_t uint8_t port_id[3]; ^~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:185:2: error: unknown type name uint32_t uint32_t preamble_word0; /* revision & IN_ID */ ^~~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:186:2: error: unknown type name uint32_t uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ ^~~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:187:2: error: unknown type name uint32_t uint32_t preamble_word2; /* Cmd Code, Max Size */ ^~~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:207:2: error: unknown type name uint64_t uint64_t vendor_id; ^~~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:210:2: error: unknown type name uint32_t uint32_t vendor_cmd[0]; ^~~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:217:2: error: unknown type name uint32_t uint32_t vendor_rsp[0]; ^~~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:236:2: error: unknown type name uint8_t uint8_t els_code; ^~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:254:2: error: unknown type name uint32_t uint32_t preamble_word0; /* revision & IN_ID */ ^~~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:255:2: error: unknown type name uint32_t uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ ^~~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:256:2: error: unknown type name uint32_t uint32_t preamble_word2; /* Cmd Code, Max Size */ ^~~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:268:2: error: unknown type name uint32_t uint32_t msgcode; ^~~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:292:2: error: unknown type name uint32_t uint32_t result; ^~~~~~~~ ./usr/include/scsi/scsi_bsg_fc.h:295:2: error: unknown type name uint32_t uint32_t reply_payload_rcv_len; ^~~~~~~~ Signed-off-by: Masahiro Yamada Signed-off-by: Martin K. Petersen --- include/uapi/scsi/scsi_bsg_fc.h | 54 +++++++++++++++++++------------------ include/uapi/scsi/scsi_netlink.h | 20 +++++++------- include/uapi/scsi/scsi_netlink_fc.h | 17 ++++++------ 3 files changed, 47 insertions(+), 44 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h index 52f32a60d056..3ae65e93235c 100644 --- a/include/uapi/scsi/scsi_bsg_fc.h +++ b/include/uapi/scsi/scsi_bsg_fc.h @@ -8,6 +8,8 @@ #ifndef SCSI_BSG_FC_H #define SCSI_BSG_FC_H +#include + /* * This file intended to be included by both kernel and user space */ @@ -66,10 +68,10 @@ * with the transport upon completion of the login. */ struct fc_bsg_host_add_rport { - uint8_t reserved; + __u8 reserved; /* FC Address Identier of the remote port to login to */ - uint8_t port_id[3]; + __u8 port_id[3]; }; /* Response: @@ -87,10 +89,10 @@ struct fc_bsg_host_add_rport { * remain logged in with the remote port. */ struct fc_bsg_host_del_rport { - uint8_t reserved; + __u8 reserved; /* FC Address Identier of the remote port to logout of */ - uint8_t port_id[3]; + __u8 port_id[3]; }; /* Response: @@ -111,10 +113,10 @@ struct fc_bsg_host_els { * ELS Command Code being sent (must be the same as byte 0 * of the payload) */ - uint8_t command_code; + __u8 command_code; /* FC Address Identier of the remote port to send the ELS to */ - uint8_t port_id[3]; + __u8 port_id[3]; }; /* Response: @@ -151,14 +153,14 @@ struct fc_bsg_ctels_reply { * Note: x_RJT/BSY status will indicae that the rjt_data field * is valid and contains the reason/explanation values. */ - uint32_t status; /* See FC_CTELS_STATUS_xxx */ + __u32 status; /* See FC_CTELS_STATUS_xxx */ /* valid if status is not FC_CTELS_STATUS_OK */ struct { - uint8_t action; /* fragment_id for CT REJECT */ - uint8_t reason_code; - uint8_t reason_explanation; - uint8_t vendor_unique; + __u8 action; /* fragment_id for CT REJECT */ + __u8 reason_code; + __u8 reason_explanation; + __u8 vendor_unique; } rjt_data; }; @@ -174,17 +176,17 @@ struct fc_bsg_ctels_reply { * and whether to tear it down after the request. */ struct fc_bsg_host_ct { - uint8_t reserved; + __u8 reserved; /* FC Address Identier of the remote port to send the ELS to */ - uint8_t port_id[3]; + __u8 port_id[3]; /* * We need words 0-2 of the generic preamble for the LLD's */ - uint32_t preamble_word0; /* revision & IN_ID */ - uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ - uint32_t preamble_word2; /* Cmd Code, Max Size */ + __u32 preamble_word0; /* revision & IN_ID */ + __u32 preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ + __u32 preamble_word2; /* Cmd Code, Max Size */ }; /* Response: @@ -204,17 +206,17 @@ struct fc_bsg_host_vendor { * Identifies the vendor that the message is formatted for. This * should be the recipient of the message. */ - uint64_t vendor_id; + __u64 vendor_id; /* start of vendor command area */ - uint32_t vendor_cmd[0]; + __u32 vendor_cmd[0]; }; /* Response: */ struct fc_bsg_host_vendor_reply { /* start of vendor response area */ - uint32_t vendor_rsp[0]; + __u32 vendor_rsp[0]; }; @@ -233,7 +235,7 @@ struct fc_bsg_rport_els { * ELS Command Code being sent (must be the same as * byte 0 of the payload) */ - uint8_t els_code; + __u8 els_code; }; /* Response: @@ -251,9 +253,9 @@ struct fc_bsg_rport_ct { /* * We need words 0-2 of the generic preamble for the LLD's */ - uint32_t preamble_word0; /* revision & IN_ID */ - uint32_t preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ - uint32_t preamble_word2; /* Cmd Code, Max Size */ + __u32 preamble_word0; /* revision & IN_ID */ + __u32 preamble_word1; /* GS_Type, GS_SubType, Options, Rsvd */ + __u32 preamble_word2; /* Cmd Code, Max Size */ }; /* Response: * @@ -265,7 +267,7 @@ struct fc_bsg_rport_ct { /* request (CDB) structure of the sg_io_v4 */ struct fc_bsg_request { - uint32_t msgcode; + __u32 msgcode; union { struct fc_bsg_host_add_rport h_addrport; struct fc_bsg_host_del_rport h_delrport; @@ -289,10 +291,10 @@ struct fc_bsg_reply { * msg and status fields. The per-msgcode reply structure * will contain valid data. */ - uint32_t result; + __u32 result; /* If there was reply_payload, how much was recevied ? */ - uint32_t reply_payload_rcv_len; + __u32 reply_payload_rcv_len; union { struct fc_bsg_host_vendor_reply vendor_reply; diff --git a/include/uapi/scsi/scsi_netlink.h b/include/uapi/scsi/scsi_netlink.h index 5dd382054e45..1b1737c3c9d8 100644 --- a/include/uapi/scsi/scsi_netlink.h +++ b/include/uapi/scsi/scsi_netlink.h @@ -26,12 +26,12 @@ /* SCSI_TRANSPORT_MSG event message header */ struct scsi_nl_hdr { - uint8_t version; - uint8_t transport; - uint16_t magic; - uint16_t msgtype; - uint16_t msglen; -} __attribute__((aligned(sizeof(uint64_t)))); + __u8 version; + __u8 transport; + __u16 magic; + __u16 msgtype; + __u16 msglen; +} __attribute__((aligned(sizeof(__u64)))); /* scsi_nl_hdr->version value */ #define SCSI_NL_VERSION 1 @@ -75,10 +75,10 @@ struct scsi_nl_hdr { */ struct scsi_nl_host_vendor_msg { struct scsi_nl_hdr snlh; /* must be 1st element ! */ - uint64_t vendor_id; - uint16_t host_no; - uint16_t vmsg_datalen; -} __attribute__((aligned(sizeof(uint64_t)))); + __u64 vendor_id; + __u16 host_no; + __u16 vmsg_datalen; +} __attribute__((aligned(sizeof(__u64)))); /* diff --git a/include/uapi/scsi/scsi_netlink_fc.h b/include/uapi/scsi/scsi_netlink_fc.h index a39023579051..7535253f1a96 100644 --- a/include/uapi/scsi/scsi_netlink_fc.h +++ b/include/uapi/scsi/scsi_netlink_fc.h @@ -7,6 +7,7 @@ #ifndef SCSI_NETLINK_FC_H #define SCSI_NETLINK_FC_H +#include #include /* @@ -43,14 +44,14 @@ */ struct fc_nl_event { struct scsi_nl_hdr snlh; /* must be 1st element ! */ - uint64_t seconds; - uint64_t vendor_id; - uint16_t host_no; - uint16_t event_datalen; - uint32_t event_num; - uint32_t event_code; - uint32_t event_data; -} __attribute__((aligned(sizeof(uint64_t)))); + __u64 seconds; + __u64 vendor_id; + __u16 host_no; + __u16 event_datalen; + __u32 event_num; + __u32 event_code; + __u32 event_data; +} __attribute__((aligned(sizeof(__u64)))); #endif /* SCSI_NETLINK_FC_H */ -- cgit v1.2.3 From add890c9f9d2d1d79184ded72f23b37b164fc673 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Mon, 22 Jul 2019 09:26:23 -0700 Subject: fs-verity: add SHA-512 support Add SHA-512 support to fs-verity. This is primarily a demonstration of the trivial changes needed to support a new hash algorithm in fs-verity; most users will still use SHA-256, due to the smaller space required to store the hashes. But some users may prefer SHA-512. Reviewed-by: Theodore Ts'o Reviewed-by: Jaegeuk Kim Signed-off-by: Eric Biggers --- fs/verity/fsverity_private.h | 2 +- fs/verity/hash_algs.c | 5 +++++ include/uapi/linux/fsverity.h | 1 + 3 files changed, 7 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/fs/verity/fsverity_private.h b/fs/verity/fsverity_private.h index eaa2b3b93bbf..02a547f0667c 100644 --- a/fs/verity/fsverity_private.h +++ b/fs/verity/fsverity_private.h @@ -29,7 +29,7 @@ struct ahash_request; * Largest digest size among all hash algorithms supported by fs-verity. * Currently assumed to be <= size of fsverity_descriptor::root_hash. */ -#define FS_VERITY_MAX_DIGEST_SIZE SHA256_DIGEST_SIZE +#define FS_VERITY_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE /* A hash algorithm supported by fs-verity */ struct fsverity_hash_alg { diff --git a/fs/verity/hash_algs.c b/fs/verity/hash_algs.c index 7df1d67742b8..31e6d7d2389a 100644 --- a/fs/verity/hash_algs.c +++ b/fs/verity/hash_algs.c @@ -17,6 +17,11 @@ struct fsverity_hash_alg fsverity_hash_algs[] = { .digest_size = SHA256_DIGEST_SIZE, .block_size = SHA256_BLOCK_SIZE, }, + [FS_VERITY_HASH_ALG_SHA512] = { + .name = "sha512", + .digest_size = SHA512_DIGEST_SIZE, + .block_size = SHA512_BLOCK_SIZE, + }, }; /** diff --git a/include/uapi/linux/fsverity.h b/include/uapi/linux/fsverity.h index 57d1d7fc0c34..da0daf6c193b 100644 --- a/include/uapi/linux/fsverity.h +++ b/include/uapi/linux/fsverity.h @@ -14,6 +14,7 @@ #include #define FS_VERITY_HASH_ALG_SHA256 1 +#define FS_VERITY_HASH_ALG_SHA512 2 struct fsverity_enable_arg { __u32 version; -- cgit v1.2.3 From a1b2f04ea527397fcacacd09e0d690927feef429 Mon Sep 17 00:00:00 2001 From: Jeremy Sowden Date: Wed, 7 Aug 2019 15:16:59 +0100 Subject: netfilter: add missing includes to a number of header-files. A number of netfilter header-files used declarations and definitions from other headers without including them. Added include directives to make those declarations and definitions available. Signed-off-by: Jeremy Sowden Signed-off-by: Pablo Neira Ayuso --- include/linux/netfilter/ipset/ip_set_getport.h | 4 ++++ include/linux/netfilter/nf_conntrack_amanda.h | 4 ++++ include/linux/netfilter/nf_conntrack_ftp.h | 8 +++++--- include/linux/netfilter/nf_conntrack_h323.h | 7 +++++-- include/linux/netfilter/nf_conntrack_h323_asn1.h | 2 ++ include/linux/netfilter/nf_conntrack_irc.h | 4 ++++ include/linux/netfilter/nf_conntrack_pptp.h | 9 +++++---- include/linux/netfilter/nf_conntrack_sip.h | 4 ++-- include/linux/netfilter/nf_conntrack_snmp.h | 3 +++ include/linux/netfilter/nf_conntrack_tftp.h | 5 +++++ include/net/netfilter/br_netfilter.h | 2 ++ include/net/netfilter/ipv4/nf_dup_ipv4.h | 3 +++ include/net/netfilter/ipv6/nf_defrag_ipv6.h | 4 +++- include/net/netfilter/ipv6/nf_dup_ipv6.h | 2 ++ include/net/netfilter/nf_conntrack_bridge.h | 4 ++++ include/net/netfilter/nf_conntrack_count.h | 3 +++ include/net/netfilter/nf_dup_netdev.h | 2 ++ include/net/netfilter/nf_flow_table.h | 1 + include/net/netfilter/nf_nat_helper.h | 4 ++-- include/net/netfilter/nf_nat_redirect.h | 3 +++ include/net/netfilter/nf_queue.h | 2 ++ include/net/netfilter/nf_reject.h | 3 +++ include/net/netfilter/nf_tables_ipv6.h | 1 + include/net/netfilter/nft_fib.h | 2 ++ include/net/netfilter/nft_meta.h | 2 ++ include/net/netfilter/nft_reject.h | 5 +++++ include/uapi/linux/netfilter/xt_policy.h | 1 + 27 files changed, 80 insertions(+), 14 deletions(-) (limited to 'include/uapi') diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h index ac6a11d38a19..a906df06948b 100644 --- a/include/linux/netfilter/ipset/ip_set_getport.h +++ b/include/linux/netfilter/ipset/ip_set_getport.h @@ -2,6 +2,10 @@ #ifndef _IP_SET_GETPORT_H #define _IP_SET_GETPORT_H +#include +#include +#include + extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src, __be16 *port, u8 *proto); diff --git a/include/linux/netfilter/nf_conntrack_amanda.h b/include/linux/netfilter/nf_conntrack_amanda.h index 34345e543ba2..6f0ac896fcc9 100644 --- a/include/linux/netfilter/nf_conntrack_amanda.h +++ b/include/linux/netfilter/nf_conntrack_amanda.h @@ -3,6 +3,10 @@ #define _NF_CONNTRACK_AMANDA_H /* AMANDA tracking. */ +#include +#include +#include + extern unsigned int (*nf_nat_amanda_hook)(struct sk_buff *skb, enum ip_conntrack_info ctinfo, unsigned int protoff, diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h index 73a296dfd019..0e38302820b9 100644 --- a/include/linux/netfilter/nf_conntrack_ftp.h +++ b/include/linux/netfilter/nf_conntrack_ftp.h @@ -2,8 +2,12 @@ #ifndef _NF_CONNTRACK_FTP_H #define _NF_CONNTRACK_FTP_H +#include +#include +#include +#include #include - +#include #define FTP_PORT 21 @@ -20,8 +24,6 @@ struct nf_ct_ftp_master { u_int16_t flags[IP_CT_DIR_MAX]; }; -struct nf_conntrack_expect; - /* For NAT to hook in when we find a packet which describes what other * connection we should expect. */ extern unsigned int (*nf_nat_ftp_hook)(struct sk_buff *skb, diff --git a/include/linux/netfilter/nf_conntrack_h323.h b/include/linux/netfilter/nf_conntrack_h323.h index f76ed373a2a5..96dfa886f8c0 100644 --- a/include/linux/netfilter/nf_conntrack_h323.h +++ b/include/linux/netfilter/nf_conntrack_h323.h @@ -4,7 +4,12 @@ #ifdef __KERNEL__ +#include +#include +#include #include +#include +#include #define RAS_PORT 1719 #define Q931_PORT 1720 @@ -28,8 +33,6 @@ struct nf_ct_h323_master { }; }; -struct nf_conn; - int get_h225_addr(struct nf_conn *ct, unsigned char *data, TransportAddress *taddr, union nf_inet_addr *addr, __be16 *port); diff --git a/include/linux/netfilter/nf_conntrack_h323_asn1.h b/include/linux/netfilter/nf_conntrack_h323_asn1.h index 19df78341fb3..bd6797f823b2 100644 --- a/include/linux/netfilter/nf_conntrack_h323_asn1.h +++ b/include/linux/netfilter/nf_conntrack_h323_asn1.h @@ -37,6 +37,8 @@ /***************************************************************************** * H.323 Types ****************************************************************************/ + +#include #include typedef struct { diff --git a/include/linux/netfilter/nf_conntrack_irc.h b/include/linux/netfilter/nf_conntrack_irc.h index 00c2b74206e1..f75e005db969 100644 --- a/include/linux/netfilter/nf_conntrack_irc.h +++ b/include/linux/netfilter/nf_conntrack_irc.h @@ -4,6 +4,10 @@ #ifdef __KERNEL__ +#include +#include +#include + #define IRC_PORT 6667 extern unsigned int (*nf_nat_irc_hook)(struct sk_buff *skb, diff --git a/include/linux/netfilter/nf_conntrack_pptp.h b/include/linux/netfilter/nf_conntrack_pptp.h index 833a5b2255ea..3f10e806f0dc 100644 --- a/include/linux/netfilter/nf_conntrack_pptp.h +++ b/include/linux/netfilter/nf_conntrack_pptp.h @@ -3,7 +3,12 @@ #ifndef _NF_CONNTRACK_PPTP_H #define _NF_CONNTRACK_PPTP_H +#include +#include +#include #include +#include +#include extern const char *const pptp_msg_name[]; @@ -297,10 +302,6 @@ union pptp_ctrl_union { struct PptpSetLinkInfo setlink; }; -/* crap needed for nf_conntrack_compat.h */ -struct nf_conn; -struct nf_conntrack_expect; - extern int (*nf_nat_pptp_hook_outbound)(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h index c7fc38807a33..f6437f7841af 100644 --- a/include/linux/netfilter/nf_conntrack_sip.h +++ b/include/linux/netfilter/nf_conntrack_sip.h @@ -3,9 +3,9 @@ #define __NF_CONNTRACK_SIP_H__ #ifdef __KERNEL__ -#include - +#include #include +#include #define SIP_PORT 5060 #define SIP_TIMEOUT 3600 diff --git a/include/linux/netfilter/nf_conntrack_snmp.h b/include/linux/netfilter/nf_conntrack_snmp.h index 818088c47475..87e4f33eb55f 100644 --- a/include/linux/netfilter/nf_conntrack_snmp.h +++ b/include/linux/netfilter/nf_conntrack_snmp.h @@ -2,6 +2,9 @@ #ifndef _NF_CONNTRACK_SNMP_H #define _NF_CONNTRACK_SNMP_H +#include +#include + extern int (*nf_nat_snmp_hook)(struct sk_buff *skb, unsigned int protoff, struct nf_conn *ct, diff --git a/include/linux/netfilter/nf_conntrack_tftp.h b/include/linux/netfilter/nf_conntrack_tftp.h index 5769e12dd0a2..dc4c1b9beac0 100644 --- a/include/linux/netfilter/nf_conntrack_tftp.h +++ b/include/linux/netfilter/nf_conntrack_tftp.h @@ -4,6 +4,11 @@ #define TFTP_PORT 69 +#include +#include +#include +#include + struct tftphdr { __be16 opcode; }; diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h index 302fcd3aade2..ca121ed906df 100644 --- a/include/net/netfilter/br_netfilter.h +++ b/include/net/netfilter/br_netfilter.h @@ -2,6 +2,8 @@ #ifndef _BR_NETFILTER_H_ #define _BR_NETFILTER_H_ +#include + #include "../../../net/bridge/br_private.h" static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) diff --git a/include/net/netfilter/ipv4/nf_dup_ipv4.h b/include/net/netfilter/ipv4/nf_dup_ipv4.h index c962e0be3549..a2bc16cdbcd3 100644 --- a/include/net/netfilter/ipv4/nf_dup_ipv4.h +++ b/include/net/netfilter/ipv4/nf_dup_ipv4.h @@ -2,6 +2,9 @@ #ifndef _NF_DUP_IPV4_H_ #define _NF_DUP_IPV4_H_ +#include +#include + void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum, const struct in_addr *gw, int oif); diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h index 9d7e28736da9..6d31cd041143 100644 --- a/include/net/netfilter/ipv6/nf_defrag_ipv6.h +++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h @@ -2,7 +2,9 @@ #ifndef _NF_DEFRAG_IPV6_H #define _NF_DEFRAG_IPV6_H -struct net; +#include +#include + int nf_defrag_ipv6_enable(struct net *); int nf_ct_frag6_init(void); diff --git a/include/net/netfilter/ipv6/nf_dup_ipv6.h b/include/net/netfilter/ipv6/nf_dup_ipv6.h index caf0c2dd8ee7..f6312bb04a13 100644 --- a/include/net/netfilter/ipv6/nf_dup_ipv6.h +++ b/include/net/netfilter/ipv6/nf_dup_ipv6.h @@ -2,6 +2,8 @@ #ifndef _NF_DUP_IPV6_H_ #define _NF_DUP_IPV6_H_ +#include + void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum, const struct in6_addr *gw, int oif); diff --git a/include/net/netfilter/nf_conntrack_bridge.h b/include/net/netfilter/nf_conntrack_bridge.h index 9a5514d5bc51..8f2e5b2ab523 100644 --- a/include/net/netfilter/nf_conntrack_bridge.h +++ b/include/net/netfilter/nf_conntrack_bridge.h @@ -1,6 +1,10 @@ #ifndef NF_CONNTRACK_BRIDGE_ #define NF_CONNTRACK_BRIDGE_ +#include +#include +#include + struct nf_ct_bridge_info { struct nf_hook_ops *ops; unsigned int ops_size; diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h index f32fc8289473..9645b47fa7e4 100644 --- a/include/net/netfilter/nf_conntrack_count.h +++ b/include/net/netfilter/nf_conntrack_count.h @@ -2,6 +2,9 @@ #define _NF_CONNTRACK_COUNT_H #include +#include +#include +#include struct nf_conncount_data; diff --git a/include/net/netfilter/nf_dup_netdev.h b/include/net/netfilter/nf_dup_netdev.h index 2a6f6dcad3d9..181672672160 100644 --- a/include/net/netfilter/nf_dup_netdev.h +++ b/include/net/netfilter/nf_dup_netdev.h @@ -2,6 +2,8 @@ #ifndef _NF_DUP_NETDEV_H_ #define _NF_DUP_NETDEV_H_ +#include + void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif); void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif); diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index d8c187936bec..7249e331bd0b 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h index 97d7033e93a4..efae84646353 100644 --- a/include/net/netfilter/nf_nat_helper.h +++ b/include/net/netfilter/nf_nat_helper.h @@ -3,9 +3,9 @@ #define _NF_NAT_HELPER_H /* NAT protocol helper routines. */ +#include #include - -struct sk_buff; +#include /* These return true or false. */ bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct, diff --git a/include/net/netfilter/nf_nat_redirect.h b/include/net/netfilter/nf_nat_redirect.h index c129aacc8ae8..2418653a66db 100644 --- a/include/net/netfilter/nf_nat_redirect.h +++ b/include/net/netfilter/nf_nat_redirect.h @@ -2,6 +2,9 @@ #ifndef _NF_NAT_REDIRECT_H_ #define _NF_NAT_REDIRECT_H_ +#include +#include + unsigned int nf_nat_redirect_ipv4(struct sk_buff *skb, const struct nf_nat_ipv4_multi_range_compat *mr, diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index 3cb6dcf53a4e..359b80b43169 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h @@ -5,6 +5,8 @@ #include #include #include +#include +#include /* Each queued (to userspace) skbuff has one of these. */ struct nf_queue_entry { diff --git a/include/net/netfilter/nf_reject.h b/include/net/netfilter/nf_reject.h index 221f877f29d1..9051c3a0c8e7 100644 --- a/include/net/netfilter/nf_reject.h +++ b/include/net/netfilter/nf_reject.h @@ -2,6 +2,9 @@ #ifndef _NF_REJECT_H #define _NF_REJECT_H +#include +#include + static inline bool nf_reject_verify_csum(__u8 proto) { /* Skip protocols that don't use 16-bit one's complement checksum diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h index dabe6fdb553a..d0f1c537b017 100644 --- a/include/net/netfilter/nf_tables_ipv6.h +++ b/include/net/netfilter/nf_tables_ipv6.h @@ -4,6 +4,7 @@ #include #include +#include static inline void nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, struct sk_buff *skb) diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h index e4c4d8eaca8c..628b6fa579cd 100644 --- a/include/net/netfilter/nft_fib.h +++ b/include/net/netfilter/nft_fib.h @@ -2,6 +2,8 @@ #ifndef _NFT_FIB_H_ #define _NFT_FIB_H_ +#include + struct nft_fib { enum nft_registers dreg:8; u8 result; diff --git a/include/net/netfilter/nft_meta.h b/include/net/netfilter/nft_meta.h index 5c69e9b09388..07e2fd507963 100644 --- a/include/net/netfilter/nft_meta.h +++ b/include/net/netfilter/nft_meta.h @@ -2,6 +2,8 @@ #ifndef _NFT_META_H_ #define _NFT_META_H_ +#include + struct nft_meta { enum nft_meta_keys key:8; union { diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h index de80c50761f0..56b123a42220 100644 --- a/include/net/netfilter/nft_reject.h +++ b/include/net/netfilter/nft_reject.h @@ -2,6 +2,11 @@ #ifndef _NFT_REJECT_H_ #define _NFT_REJECT_H_ +#include +#include +#include +#include + struct nft_reject { enum nft_reject_types type:8; u8 icmp_code; diff --git a/include/uapi/linux/netfilter/xt_policy.h b/include/uapi/linux/netfilter/xt_policy.h index 323bfa3074c5..4cf2ce2a8a44 100644 --- a/include/uapi/linux/netfilter/xt_policy.h +++ b/include/uapi/linux/netfilter/xt_policy.h @@ -2,6 +2,7 @@ #ifndef _XT_POLICY_H #define _XT_POLICY_H +#include #include #include #include -- cgit v1.2.3 From 448d5a55759a2e60208b75ceed4bd88cd4f5a963 Mon Sep 17 00:00:00 2001 From: Vidya Sagar Date: Tue, 13 Aug 2019 17:06:15 +0530 Subject: PCI: Add #defines for some of PCIe spec r4.0 features Add #defines only for the Data Link Feature and Physical Layer 16.0 GT/s features as defined in PCIe spec r4.0, sec 7.7.4 for Data Link Feature and sec 7.7.5 for Physical Layer 16.0 GT/s. Signed-off-by: Vidya Sagar Signed-off-by: Lorenzo Pieralisi Reviewed-by: Thierry Reding Acked-by: Bjorn Helgaas --- include/uapi/linux/pci_regs.h | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index f28e562d7ca8..d28d0319d932 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -713,7 +713,9 @@ #define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */ #define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */ #define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */ -#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM +#define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */ +#define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */ +#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PL_16GT #define PCI_EXT_CAP_DSN_SIZEOF 12 #define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40 @@ -1053,4 +1055,14 @@ #define PCI_L1SS_CTL1_LTR_L12_TH_SCALE 0xe0000000 /* LTR_L1.2_THRESHOLD_Scale */ #define PCI_L1SS_CTL2 0x0c /* Control 2 Register */ +/* Data Link Feature */ +#define PCI_DLF_CAP 0x04 /* Capabilities Register */ +#define PCI_DLF_EXCHANGE_ENABLE 0x80000000 /* Data Link Feature Exchange Enable */ + +/* Physical Layer 16.0 GT/s */ +#define PCI_PL_16GT_LE_CTRL 0x20 /* Lane Equalization Control Register */ +#define PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK 0x0000000F +#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK 0x000000F0 +#define PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT 4 + #endif /* LINUX_PCI_REGS_H */ -- cgit v1.2.3 From e9dc7c60507c822992e793bd3845f0556ae0ff98 Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Sat, 10 Aug 2019 21:18:09 +0200 Subject: can: gw: use struct canfd_frame as internal data structure To prepare the CAN FD support this patch implements the first adaptions in data structures for CAN FD without changing the current functionality. Additionally some code at the end of this patch is moved or indented to simplify the review of the next implementation step. Signed-off-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- include/uapi/linux/can/gw.h | 5 +- net/can/gw.c | 222 +++++++++++++++++++++++--------------------- 2 files changed, 120 insertions(+), 107 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h index 7bee7a0b9800..ed811bc463b5 100644 --- a/include/uapi/linux/can/gw.h +++ b/include/uapi/linux/can/gw.h @@ -93,10 +93,11 @@ enum { /* CAN frame elements that are affected by curr. 3 CAN frame modifications */ #define CGW_MOD_ID 0x01 -#define CGW_MOD_DLC 0x02 +#define CGW_MOD_DLC 0x02 /* contains the data length in bytes */ +#define CGW_MOD_LEN CGW_MOD_DLC /* CAN FD length representation */ #define CGW_MOD_DATA 0x04 -#define CGW_FRAME_MODS 3 /* ID DLC DATA */ +#define CGW_FRAME_MODS 3 /* ID DLC/LEN DATA */ #define MAX_MODFUNCTIONS (CGW_MOD_FUNCS * CGW_FRAME_MODS) diff --git a/net/can/gw.c b/net/can/gw.c index 4c6ad0054a4c..3a1ad206fbef 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -85,10 +85,10 @@ static struct kmem_cache *cgw_cache __read_mostly; /* structure that contains the (on-the-fly) CAN frame modifications */ struct cf_mod { struct { - struct can_frame and; - struct can_frame or; - struct can_frame xor; - struct can_frame set; + struct canfd_frame and; + struct canfd_frame or; + struct canfd_frame xor; + struct canfd_frame set; } modframe; struct { u8 and; @@ -96,7 +96,7 @@ struct cf_mod { u8 xor; u8 set; } modtype; - void (*modfunc[MAX_MODFUNCTIONS])(struct can_frame *cf, + void (*modfunc[MAX_MODFUNCTIONS])(struct canfd_frame *cf, struct cf_mod *mod); /* CAN frame checksum calculation after CAN frame modifications */ @@ -105,8 +105,10 @@ struct cf_mod { struct cgw_csum_crc8 crc8; } csum; struct { - void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor); - void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8); + void (*xor)(struct canfd_frame *cf, + struct cgw_csum_xor *xor); + void (*crc8)(struct canfd_frame *cf, + struct cgw_csum_crc8 *crc8); } csumfunc; u32 uid; }; @@ -149,23 +151,23 @@ struct cgw_job { /* modification functions that are invoked in the hot path in can_can_gw_rcv */ -#define MODFUNC(func, op) static void func(struct can_frame *cf, \ +#define MODFUNC(func, op) static void func(struct canfd_frame *cf, \ struct cf_mod *mod) { op ; } MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id) -MODFUNC(mod_and_dlc, cf->can_dlc &= mod->modframe.and.can_dlc) +MODFUNC(mod_and_len, cf->len &= mod->modframe.and.len) MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data) MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id) -MODFUNC(mod_or_dlc, cf->can_dlc |= mod->modframe.or.can_dlc) +MODFUNC(mod_or_len, cf->len |= mod->modframe.or.len) MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data) MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id) -MODFUNC(mod_xor_dlc, cf->can_dlc ^= mod->modframe.xor.can_dlc) +MODFUNC(mod_xor_len, cf->len ^= mod->modframe.xor.len) MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data) MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id) -MODFUNC(mod_set_dlc, cf->can_dlc = mod->modframe.set.can_dlc) +MODFUNC(mod_set_len, cf->len = mod->modframe.set.len) MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data) -static inline void canframecpy(struct can_frame *dst, struct can_frame *src) +static void canframecpy(struct canfd_frame *dst, struct can_frame *src) { /* Copy the struct members separately to ensure that no uninitialized * data are copied in the 3 bytes hole of the struct. This is needed @@ -173,12 +175,14 @@ static inline void canframecpy(struct can_frame *dst, struct can_frame *src) */ dst->can_id = src->can_id; - dst->can_dlc = src->can_dlc; + dst->len = src->can_dlc; *(u64 *)dst->data = *(u64 *)src->data; } static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re) { + s8 dlen = CAN_MAX_DLEN; + /* absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0] * relative to received dlc -1 .. -8 : * e.g. for received dlc = 8 @@ -187,27 +191,27 @@ static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re) * -8 => index = 0 (data[0]) */ - if (fr > -9 && fr < 8 && - to > -9 && to < 8 && - re > -9 && re < 8) + if (fr >= -dlen && fr < dlen && + to >= -dlen && to < dlen && + re >= -dlen && re < dlen) return 0; else return -EINVAL; } -static inline int calc_idx(int idx, int rx_dlc) +static inline int calc_idx(int idx, int rx_len) { if (idx < 0) - return rx_dlc + idx; + return rx_len + idx; else return idx; } -static void cgw_csum_xor_rel(struct can_frame *cf, struct cgw_csum_xor *xor) +static void cgw_csum_xor_rel(struct canfd_frame *cf, struct cgw_csum_xor *xor) { - int from = calc_idx(xor->from_idx, cf->can_dlc); - int to = calc_idx(xor->to_idx, cf->can_dlc); - int res = calc_idx(xor->result_idx, cf->can_dlc); + int from = calc_idx(xor->from_idx, cf->len); + int to = calc_idx(xor->to_idx, cf->len); + int res = calc_idx(xor->result_idx, cf->len); u8 val = xor->init_xor_val; int i; @@ -225,7 +229,7 @@ static void cgw_csum_xor_rel(struct can_frame *cf, struct cgw_csum_xor *xor) cf->data[res] = val; } -static void cgw_csum_xor_pos(struct can_frame *cf, struct cgw_csum_xor *xor) +static void cgw_csum_xor_pos(struct canfd_frame *cf, struct cgw_csum_xor *xor) { u8 val = xor->init_xor_val; int i; @@ -236,7 +240,7 @@ static void cgw_csum_xor_pos(struct can_frame *cf, struct cgw_csum_xor *xor) cf->data[xor->result_idx] = val; } -static void cgw_csum_xor_neg(struct can_frame *cf, struct cgw_csum_xor *xor) +static void cgw_csum_xor_neg(struct canfd_frame *cf, struct cgw_csum_xor *xor) { u8 val = xor->init_xor_val; int i; @@ -247,11 +251,12 @@ static void cgw_csum_xor_neg(struct can_frame *cf, struct cgw_csum_xor *xor) cf->data[xor->result_idx] = val; } -static void cgw_csum_crc8_rel(struct can_frame *cf, struct cgw_csum_crc8 *crc8) +static void cgw_csum_crc8_rel(struct canfd_frame *cf, + struct cgw_csum_crc8 *crc8) { - int from = calc_idx(crc8->from_idx, cf->can_dlc); - int to = calc_idx(crc8->to_idx, cf->can_dlc); - int res = calc_idx(crc8->result_idx, cf->can_dlc); + int from = calc_idx(crc8->from_idx, cf->len); + int to = calc_idx(crc8->to_idx, cf->len); + int res = calc_idx(crc8->result_idx, cf->len); u8 crc = crc8->init_crc_val; int i; @@ -284,7 +289,8 @@ static void cgw_csum_crc8_rel(struct can_frame *cf, struct cgw_csum_crc8 *crc8) cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val; } -static void cgw_csum_crc8_pos(struct can_frame *cf, struct cgw_csum_crc8 *crc8) +static void cgw_csum_crc8_pos(struct canfd_frame *cf, + struct cgw_csum_crc8 *crc8) { u8 crc = crc8->init_crc_val; int i; @@ -310,7 +316,8 @@ static void cgw_csum_crc8_pos(struct can_frame *cf, struct cgw_csum_crc8 *crc8) cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val; } -static void cgw_csum_crc8_neg(struct can_frame *cf, struct cgw_csum_crc8 *crc8) +static void cgw_csum_crc8_neg(struct canfd_frame *cf, + struct cgw_csum_crc8 *crc8) { u8 crc = crc8->init_crc_val; int i; @@ -340,7 +347,7 @@ static void cgw_csum_crc8_neg(struct can_frame *cf, struct cgw_csum_crc8 *crc8) static void can_can_gw_rcv(struct sk_buff *skb, void *data) { struct cgw_job *gwj = (struct cgw_job *)data; - struct can_frame *cf; + struct canfd_frame *cf; struct sk_buff *nskb; int modidx = 0; @@ -400,7 +407,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) nskb->dev = gwj->dst.dev; /* pointer to modifiable CAN frame */ - cf = (struct can_frame *)nskb->data; + cf = (struct canfd_frame *)nskb->data; /* perform preprocessed modification functions if there are any */ while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) @@ -409,22 +416,22 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) /* Has the CAN frame been modified? */ if (modidx) { /* get available space for the processed CAN frame type */ - int max_len = nskb->len - offsetof(struct can_frame, data); + int max_len = nskb->len - offsetof(struct canfd_frame, data); /* dlc may have changed, make sure it fits to the CAN frame */ - if (cf->can_dlc > max_len) + if (cf->len > max_len) goto out_delete; /* check for checksum updates in classic CAN length only */ if (gwj->mod.csumfunc.crc8) { - if (cf->can_dlc > 8) + if (cf->len > 8) goto out_delete; (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); } if (gwj->mod.csumfunc.xor) { - if (cf->can_dlc > 8) + if (cf->len > 8) goto out_delete; (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); @@ -492,7 +499,6 @@ static int cgw_notifier(struct notifier_block *nb, static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, u32 pid, u32 seq, int flags) { - struct cgw_frame_mod mb; struct rtcanmsg *rtcan; struct nlmsghdr *nlh; @@ -529,32 +535,36 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, goto cancel; } - if (gwj->mod.modtype.and) { - memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.and; - if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0) - goto cancel; - } + if (1) { + struct cgw_frame_mod mb; - if (gwj->mod.modtype.or) { - memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.or; - if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0) - goto cancel; - } + if (gwj->mod.modtype.and) { + memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); + mb.modtype = gwj->mod.modtype.and; + if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0) + goto cancel; + } - if (gwj->mod.modtype.xor) { - memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.xor; - if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0) - goto cancel; - } + if (gwj->mod.modtype.or) { + memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); + mb.modtype = gwj->mod.modtype.or; + if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0) + goto cancel; + } - if (gwj->mod.modtype.set) { - memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); - mb.modtype = gwj->mod.modtype.set; - if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0) - goto cancel; + if (gwj->mod.modtype.xor) { + memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); + mb.modtype = gwj->mod.modtype.xor; + if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0) + goto cancel; + } + + if (gwj->mod.modtype.set) { + memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); + mb.modtype = gwj->mod.modtype.set; + if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0) + goto cancel; + } } if (gwj->mod.uid) { @@ -642,7 +652,6 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, u8 gwtype, void *gwtypeattr, u8 *limhops) { struct nlattr *tb[CGW_MAX + 1]; - struct cgw_frame_mod mb; int modidx = 0; int err = 0; @@ -662,69 +671,72 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, } /* check for AND/OR/XOR/SET modifications */ + if (1) { + struct cgw_frame_mod mb; - if (tb[CGW_MOD_AND]) { - nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN); + if (tb[CGW_MOD_AND]) { + nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN); - canframecpy(&mod->modframe.and, &mb.cf); - mod->modtype.and = mb.modtype; + canframecpy(&mod->modframe.and, &mb.cf); + mod->modtype.and = mb.modtype; - if (mb.modtype & CGW_MOD_ID) - mod->modfunc[modidx++] = mod_and_id; + if (mb.modtype & CGW_MOD_ID) + mod->modfunc[modidx++] = mod_and_id; - if (mb.modtype & CGW_MOD_DLC) - mod->modfunc[modidx++] = mod_and_dlc; + if (mb.modtype & CGW_MOD_LEN) + mod->modfunc[modidx++] = mod_and_len; - if (mb.modtype & CGW_MOD_DATA) - mod->modfunc[modidx++] = mod_and_data; - } + if (mb.modtype & CGW_MOD_DATA) + mod->modfunc[modidx++] = mod_and_data; + } - if (tb[CGW_MOD_OR]) { - nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN); + if (tb[CGW_MOD_OR]) { + nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN); - canframecpy(&mod->modframe.or, &mb.cf); - mod->modtype.or = mb.modtype; + canframecpy(&mod->modframe.or, &mb.cf); + mod->modtype.or = mb.modtype; - if (mb.modtype & CGW_MOD_ID) - mod->modfunc[modidx++] = mod_or_id; + if (mb.modtype & CGW_MOD_ID) + mod->modfunc[modidx++] = mod_or_id; - if (mb.modtype & CGW_MOD_DLC) - mod->modfunc[modidx++] = mod_or_dlc; + if (mb.modtype & CGW_MOD_LEN) + mod->modfunc[modidx++] = mod_or_len; - if (mb.modtype & CGW_MOD_DATA) - mod->modfunc[modidx++] = mod_or_data; - } + if (mb.modtype & CGW_MOD_DATA) + mod->modfunc[modidx++] = mod_or_data; + } - if (tb[CGW_MOD_XOR]) { - nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN); + if (tb[CGW_MOD_XOR]) { + nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN); - canframecpy(&mod->modframe.xor, &mb.cf); - mod->modtype.xor = mb.modtype; + canframecpy(&mod->modframe.xor, &mb.cf); + mod->modtype.xor = mb.modtype; - if (mb.modtype & CGW_MOD_ID) - mod->modfunc[modidx++] = mod_xor_id; + if (mb.modtype & CGW_MOD_ID) + mod->modfunc[modidx++] = mod_xor_id; - if (mb.modtype & CGW_MOD_DLC) - mod->modfunc[modidx++] = mod_xor_dlc; + if (mb.modtype & CGW_MOD_LEN) + mod->modfunc[modidx++] = mod_xor_len; - if (mb.modtype & CGW_MOD_DATA) - mod->modfunc[modidx++] = mod_xor_data; - } + if (mb.modtype & CGW_MOD_DATA) + mod->modfunc[modidx++] = mod_xor_data; + } - if (tb[CGW_MOD_SET]) { - nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN); + if (tb[CGW_MOD_SET]) { + nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN); - canframecpy(&mod->modframe.set, &mb.cf); - mod->modtype.set = mb.modtype; + canframecpy(&mod->modframe.set, &mb.cf); + mod->modtype.set = mb.modtype; - if (mb.modtype & CGW_MOD_ID) - mod->modfunc[modidx++] = mod_set_id; + if (mb.modtype & CGW_MOD_ID) + mod->modfunc[modidx++] = mod_set_id; - if (mb.modtype & CGW_MOD_DLC) - mod->modfunc[modidx++] = mod_set_dlc; + if (mb.modtype & CGW_MOD_LEN) + mod->modfunc[modidx++] = mod_set_len; - if (mb.modtype & CGW_MOD_DATA) - mod->modfunc[modidx++] = mod_set_data; + if (mb.modtype & CGW_MOD_DATA) + mod->modfunc[modidx++] = mod_set_data; + } } /* check for checksum operations after CAN frame modifications */ -- cgit v1.2.3 From 456a8a646b2563438c16a9b27decf9aa717f1ebb Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Sat, 10 Aug 2019 21:18:10 +0200 Subject: can: gw: add support for CAN FD frames Introduce CAN FD support which needs an extension of the netlink API to pass CAN FD type content to the kernel which has a different size to Classic CAN. Additionally the struct canfd_frame has a new 'flags' element that can now be modified with can-gw. The new CGW_FLAGS_CAN_FD option flag defines whether the routing job handles Classic CAN or CAN FD frames. This setting is very strict at reception time and enables the new possibilities, e.g. CGW_FDMOD_* and modifying the flags element of struct canfd_frame, only when CGW_FLAGS_CAN_FD is set. Signed-off-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- include/uapi/linux/can/gw.h | 14 ++- net/can/gw.c | 214 ++++++++++++++++++++++++++++++++++++++------ 2 files changed, 200 insertions(+), 28 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h index ed811bc463b5..3aea5388c8e4 100644 --- a/include/uapi/linux/can/gw.h +++ b/include/uapi/linux/can/gw.h @@ -80,6 +80,10 @@ enum { CGW_DELETED, /* number of deleted CAN frames (see max_hops param) */ CGW_LIM_HOPS, /* limit the number of hops of this specific rule */ CGW_MOD_UID, /* user defined identifier for modification updates */ + CGW_FDMOD_AND, /* CAN FD frame modification binary AND */ + CGW_FDMOD_OR, /* CAN FD frame modification binary OR */ + CGW_FDMOD_XOR, /* CAN FD frame modification binary XOR */ + CGW_FDMOD_SET, /* CAN FD frame modification set alternate values */ __CGW_MAX }; @@ -88,6 +92,7 @@ enum { #define CGW_FLAGS_CAN_ECHO 0x01 #define CGW_FLAGS_CAN_SRC_TSTAMP 0x02 #define CGW_FLAGS_CAN_IIF_TX_OK 0x04 +#define CGW_FLAGS_CAN_FD 0x08 #define CGW_MOD_FUNCS 4 /* AND OR XOR SET */ @@ -96,8 +101,9 @@ enum { #define CGW_MOD_DLC 0x02 /* contains the data length in bytes */ #define CGW_MOD_LEN CGW_MOD_DLC /* CAN FD length representation */ #define CGW_MOD_DATA 0x04 +#define CGW_MOD_FLAGS 0x08 /* CAN FD flags */ -#define CGW_FRAME_MODS 3 /* ID DLC/LEN DATA */ +#define CGW_FRAME_MODS 4 /* ID DLC/LEN DATA FLAGS */ #define MAX_MODFUNCTIONS (CGW_MOD_FUNCS * CGW_FRAME_MODS) @@ -106,7 +112,13 @@ struct cgw_frame_mod { __u8 modtype; } __attribute__((packed)); +struct cgw_fdframe_mod { + struct canfd_frame cf; + __u8 modtype; +} __attribute__((packed)); + #define CGW_MODATTR_LEN sizeof(struct cgw_frame_mod) +#define CGW_FDMODATTR_LEN sizeof(struct cgw_fdframe_mod) struct cgw_csum_xor { __s8 from_idx; diff --git a/net/can/gw.c b/net/can/gw.c index 3a1ad206fbef..65d60c93af29 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) /* gw.c - CAN frame Gateway/Router/Bridge with netlink interface * - * Copyright (c) 2017 Volkswagen Group Electronic Research + * Copyright (c) 2019 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -59,7 +59,7 @@ #include #include -#define CAN_GW_VERSION "20170425" +#define CAN_GW_VERSION "20190810" #define CAN_GW_NAME "can-gw" MODULE_DESCRIPTION("PF_CAN netlink gateway"); @@ -156,17 +156,50 @@ struct cgw_job { MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id) MODFUNC(mod_and_len, cf->len &= mod->modframe.and.len) +MODFUNC(mod_and_flags, cf->flags &= mod->modframe.and.flags) MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data) MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id) MODFUNC(mod_or_len, cf->len |= mod->modframe.or.len) +MODFUNC(mod_or_flags, cf->flags |= mod->modframe.or.flags) MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data) MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id) MODFUNC(mod_xor_len, cf->len ^= mod->modframe.xor.len) +MODFUNC(mod_xor_flags, cf->flags ^= mod->modframe.xor.flags) MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data) MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id) MODFUNC(mod_set_len, cf->len = mod->modframe.set.len) +MODFUNC(mod_set_flags, cf->flags = mod->modframe.set.flags) MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data) +static void mod_and_fddata(struct canfd_frame *cf, struct cf_mod *mod) +{ + int i; + + for (i = 0; i < CANFD_MAX_DLEN; i += 8) + *(u64 *)(cf->data + i) &= *(u64 *)(mod->modframe.and.data + i); +} + +static void mod_or_fddata(struct canfd_frame *cf, struct cf_mod *mod) +{ + int i; + + for (i = 0; i < CANFD_MAX_DLEN; i += 8) + *(u64 *)(cf->data + i) |= *(u64 *)(mod->modframe.or.data + i); +} + +static void mod_xor_fddata(struct canfd_frame *cf, struct cf_mod *mod) +{ + int i; + + for (i = 0; i < CANFD_MAX_DLEN; i += 8) + *(u64 *)(cf->data + i) ^= *(u64 *)(mod->modframe.xor.data + i); +} + +static void mod_set_fddata(struct canfd_frame *cf, struct cf_mod *mod) +{ + memcpy(cf->data, mod->modframe.set.data, CANFD_MAX_DLEN); +} + static void canframecpy(struct canfd_frame *dst, struct can_frame *src) { /* Copy the struct members separately to ensure that no uninitialized @@ -179,10 +212,26 @@ static void canframecpy(struct canfd_frame *dst, struct can_frame *src) *(u64 *)dst->data = *(u64 *)src->data; } -static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re) +static void canfdframecpy(struct canfd_frame *dst, struct canfd_frame *src) +{ + /* Copy the struct members separately to ensure that no uninitialized + * data are copied in the 2 bytes hole of the struct. This is needed + * to make easy compares of the data in the struct cf_mod. + */ + + dst->can_id = src->can_id; + dst->flags = src->flags; + dst->len = src->len; + memcpy(dst->data, src->data, CANFD_MAX_DLEN); +} + +static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re, struct rtcanmsg *r) { s8 dlen = CAN_MAX_DLEN; + if (r->flags & CGW_FLAGS_CAN_FD) + dlen = CANFD_MAX_DLEN; + /* absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0] * relative to received dlc -1 .. -8 : * e.g. for received dlc = 8 @@ -351,6 +400,15 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) struct sk_buff *nskb; int modidx = 0; + /* process strictly Classic CAN or CAN FD frames */ + if (gwj->flags & CGW_FLAGS_CAN_FD) { + if (skb->len != CANFD_MTU) + return; + } else { + if (skb->len != CAN_MTU) + return; + } + /* Do not handle CAN frames routed more than 'max_hops' times. * In general we should never catch this delimiter which is intended * to cover a misconfiguration protection (e.g. circular CAN routes). @@ -419,23 +477,19 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) int max_len = nskb->len - offsetof(struct canfd_frame, data); /* dlc may have changed, make sure it fits to the CAN frame */ - if (cf->len > max_len) - goto out_delete; - - /* check for checksum updates in classic CAN length only */ - if (gwj->mod.csumfunc.crc8) { - if (cf->len > 8) - goto out_delete; - - (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); + if (cf->len > max_len) { + /* delete frame due to misconfiguration */ + gwj->deleted_frames++; + kfree_skb(nskb); + return; } - if (gwj->mod.csumfunc.xor) { - if (cf->len > 8) - goto out_delete; + /* check for checksum updates */ + if (gwj->mod.csumfunc.crc8) + (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); + if (gwj->mod.csumfunc.xor) (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); - } } /* clear the skb timestamp if not configured the other way */ @@ -447,13 +501,6 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) gwj->dropped_frames++; else gwj->handled_frames++; - - return; - - out_delete: - /* delete frame due to misconfiguration */ - gwj->deleted_frames++; - kfree_skb(nskb); } static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj) @@ -535,7 +582,37 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, goto cancel; } - if (1) { + if (gwj->flags & CGW_FLAGS_CAN_FD) { + struct cgw_fdframe_mod mb; + + if (gwj->mod.modtype.and) { + memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); + mb.modtype = gwj->mod.modtype.and; + if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0) + goto cancel; + } + + if (gwj->mod.modtype.or) { + memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); + mb.modtype = gwj->mod.modtype.or; + if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0) + goto cancel; + } + + if (gwj->mod.modtype.xor) { + memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); + mb.modtype = gwj->mod.modtype.xor; + if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0) + goto cancel; + } + + if (gwj->mod.modtype.set) { + memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); + mb.modtype = gwj->mod.modtype.set; + if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0) + goto cancel; + } + } else { struct cgw_frame_mod mb; if (gwj->mod.modtype.and) { @@ -645,6 +722,10 @@ static const struct nla_policy cgw_policy[CGW_MAX + 1] = { [CGW_FILTER] = { .len = sizeof(struct can_filter) }, [CGW_LIM_HOPS] = { .type = NLA_U8 }, [CGW_MOD_UID] = { .type = NLA_U32 }, + [CGW_FDMOD_AND] = { .len = sizeof(struct cgw_fdframe_mod) }, + [CGW_FDMOD_OR] = { .len = sizeof(struct cgw_fdframe_mod) }, + [CGW_FDMOD_XOR] = { .len = sizeof(struct cgw_fdframe_mod) }, + [CGW_FDMOD_SET] = { .len = sizeof(struct cgw_fdframe_mod) }, }; /* check for common and gwtype specific attributes */ @@ -652,6 +733,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, u8 gwtype, void *gwtypeattr, u8 *limhops) { struct nlattr *tb[CGW_MAX + 1]; + struct rtcanmsg *r = nlmsg_data(nlh); int modidx = 0; int err = 0; @@ -671,7 +753,85 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, } /* check for AND/OR/XOR/SET modifications */ - if (1) { + if (r->flags & CGW_FLAGS_CAN_FD) { + struct cgw_fdframe_mod mb; + + if (tb[CGW_FDMOD_AND]) { + nla_memcpy(&mb, tb[CGW_FDMOD_AND], CGW_FDMODATTR_LEN); + + canfdframecpy(&mod->modframe.and, &mb.cf); + mod->modtype.and = mb.modtype; + + if (mb.modtype & CGW_MOD_ID) + mod->modfunc[modidx++] = mod_and_id; + + if (mb.modtype & CGW_MOD_LEN) + mod->modfunc[modidx++] = mod_and_len; + + if (mb.modtype & CGW_MOD_FLAGS) + mod->modfunc[modidx++] = mod_and_flags; + + if (mb.modtype & CGW_MOD_DATA) + mod->modfunc[modidx++] = mod_and_fddata; + } + + if (tb[CGW_FDMOD_OR]) { + nla_memcpy(&mb, tb[CGW_FDMOD_OR], CGW_FDMODATTR_LEN); + + canfdframecpy(&mod->modframe.or, &mb.cf); + mod->modtype.or = mb.modtype; + + if (mb.modtype & CGW_MOD_ID) + mod->modfunc[modidx++] = mod_or_id; + + if (mb.modtype & CGW_MOD_LEN) + mod->modfunc[modidx++] = mod_or_len; + + if (mb.modtype & CGW_MOD_FLAGS) + mod->modfunc[modidx++] = mod_or_flags; + + if (mb.modtype & CGW_MOD_DATA) + mod->modfunc[modidx++] = mod_or_fddata; + } + + if (tb[CGW_FDMOD_XOR]) { + nla_memcpy(&mb, tb[CGW_FDMOD_XOR], CGW_FDMODATTR_LEN); + + canfdframecpy(&mod->modframe.xor, &mb.cf); + mod->modtype.xor = mb.modtype; + + if (mb.modtype & CGW_MOD_ID) + mod->modfunc[modidx++] = mod_xor_id; + + if (mb.modtype & CGW_MOD_LEN) + mod->modfunc[modidx++] = mod_xor_len; + + if (mb.modtype & CGW_MOD_FLAGS) + mod->modfunc[modidx++] = mod_xor_flags; + + if (mb.modtype & CGW_MOD_DATA) + mod->modfunc[modidx++] = mod_xor_fddata; + } + + if (tb[CGW_FDMOD_SET]) { + nla_memcpy(&mb, tb[CGW_FDMOD_SET], CGW_FDMODATTR_LEN); + + canfdframecpy(&mod->modframe.set, &mb.cf); + mod->modtype.set = mb.modtype; + + if (mb.modtype & CGW_MOD_ID) + mod->modfunc[modidx++] = mod_set_id; + + if (mb.modtype & CGW_MOD_LEN) + mod->modfunc[modidx++] = mod_set_len; + + if (mb.modtype & CGW_MOD_FLAGS) + mod->modfunc[modidx++] = mod_set_flags; + + if (mb.modtype & CGW_MOD_DATA) + mod->modfunc[modidx++] = mod_set_fddata; + } + } else { struct cgw_frame_mod mb; if (tb[CGW_MOD_AND]) { @@ -745,7 +905,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]); err = cgw_chk_csum_parms(c->from_idx, c->to_idx, - c->result_idx); + c->result_idx, r); if (err) return err; @@ -768,7 +928,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]); err = cgw_chk_csum_parms(c->from_idx, c->to_idx, - c->result_idx); + c->result_idx, r); if (err) return err; -- cgit v1.2.3 From 3ca3c4aad2efa2931b663acc4ece7a38b31071d1 Mon Sep 17 00:00:00 2001 From: Andre Hartmann Date: Sat, 23 Mar 2019 16:04:19 +0100 Subject: can: netlink: fix documentation typos This patch fixes some documentation typos in struct can_bittiming_const. Signed-off-by: Andre Hartmann Signed-off-by: Marc Kleine-Budde --- include/uapi/linux/can/netlink.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h index 9f56fad4785b..1bc70d3a4d39 100644 --- a/include/uapi/linux/can/netlink.h +++ b/include/uapi/linux/can/netlink.h @@ -40,15 +40,15 @@ struct can_bittiming { }; /* - * CAN harware-dependent bit-timing constant + * CAN hardware-dependent bit-timing constant * * Used for calculating and checking bit-timing parameters */ struct can_bittiming_const { char name[16]; /* Name of the CAN controller hardware */ - __u32 tseg1_min; /* Time segement 1 = prop_seg + phase_seg1 */ + __u32 tseg1_min; /* Time segment 1 = prop_seg + phase_seg1 */ __u32 tseg1_max; - __u32 tseg2_min; /* Time segement 2 = phase_seg2 */ + __u32 tseg2_min; /* Time segment 2 = phase_seg2 */ __u32 tseg2_max; __u32 sjw_max; /* Synchronisation jump width */ __u32 brp_min; /* Bit-rate prescaler */ -- cgit v1.2.3 From 2c8ccb37b08fe364f02a9914daca474d43151453 Mon Sep 17 00:00:00 2001 From: Bernard Metzler Date: Fri, 9 Aug 2019 17:18:16 +0200 Subject: RDMA/siw: Change CQ flags from 64->32 bits This patch changes the driver/user shared (mmapped) CQ notification flags field from unsigned 64-bits size to unsigned 32-bits size. This enables building siw on 32-bit architectures. This patch changes the siw-abi, but as siw was only just merged in this merge window cycle, there are no released kernels with the prior abi. We are making no attempt to be binary compatible with siw user space libraries prior to the merge of siw into the upstream kernel, only moving forward with upstream kernels and upstream rdma-core provided siw libraries are we guaranteeing compatibility. Signed-off-by: Bernard Metzler Link: https://lore.kernel.org/r/20190809151816.13018-1-bmt@zurich.ibm.com Signed-off-by: Doug Ledford --- drivers/infiniband/sw/siw/Kconfig | 2 +- drivers/infiniband/sw/siw/siw.h | 2 +- drivers/infiniband/sw/siw/siw_qp.c | 14 ++++++++++---- drivers/infiniband/sw/siw/siw_verbs.c | 16 +++++++++++----- include/uapi/rdma/siw-abi.h | 3 ++- 5 files changed, 25 insertions(+), 12 deletions(-) (limited to 'include/uapi') diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig index dace276aea14..b622fc62f2cd 100644 --- a/drivers/infiniband/sw/siw/Kconfig +++ b/drivers/infiniband/sw/siw/Kconfig @@ -1,6 +1,6 @@ config RDMA_SIW tristate "Software RDMA over TCP/IP (iWARP) driver" - depends on INET && INFINIBAND && LIBCRC32C && 64BIT + depends on INET && INFINIBAND && LIBCRC32C select DMA_VIRT_OPS help This driver implements the iWARP RDMA transport over diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index 03fd7b2f595f..77b1aabf6ff3 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -214,7 +214,7 @@ struct siw_wqe { struct siw_cq { struct ib_cq base_cq; spinlock_t lock; - u64 *notify; + struct siw_cq_ctrl *notify; struct siw_cqe *queue; u32 cq_put; u32 cq_get; diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c index e27bd5b35b96..0990307c5d2c 100644 --- a/drivers/infiniband/sw/siw/siw_qp.c +++ b/drivers/infiniband/sw/siw/siw_qp.c @@ -1013,18 +1013,24 @@ out: */ static bool siw_cq_notify_now(struct siw_cq *cq, u32 flags) { - u64 cq_notify; + u32 cq_notify; if (!cq->base_cq.comp_handler) return false; - cq_notify = READ_ONCE(*cq->notify); + /* Read application shared notification state */ + cq_notify = READ_ONCE(cq->notify->flags); if ((cq_notify & SIW_NOTIFY_NEXT_COMPLETION) || ((cq_notify & SIW_NOTIFY_SOLICITED) && (flags & SIW_WQE_SOLICITED))) { - /* dis-arm CQ */ - smp_store_mb(*cq->notify, SIW_NOTIFY_NOT); + /* + * CQ notification is one-shot: Since the + * current CQE causes user notification, + * the CQ gets dis-aremd and must be re-aremd + * by the user for a new notification. + */ + WRITE_ONCE(cq->notify->flags, SIW_NOTIFY_NOT); return true; } diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 32dc79d0e898..e7f3a2379d9d 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -1049,7 +1049,7 @@ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr, spin_lock_init(&cq->lock); - cq->notify = &((struct siw_cq_ctrl *)&cq->queue[size])->notify; + cq->notify = (struct siw_cq_ctrl *)&cq->queue[size]; if (udata) { struct siw_uresp_create_cq uresp = {}; @@ -1141,11 +1141,17 @@ int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags) siw_dbg_cq(cq, "flags: 0x%02x\n", flags); if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED) - /* CQ event for next solicited completion */ - smp_store_mb(*cq->notify, SIW_NOTIFY_SOLICITED); + /* + * Enable CQ event for next solicited completion. + * and make it visible to all associated producers. + */ + smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED); else - /* CQ event for any signalled completion */ - smp_store_mb(*cq->notify, SIW_NOTIFY_ALL); + /* + * Enable CQ event for any signalled completion. + * and make it visible to all associated producers. + */ + smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL); if (flags & IB_CQ_REPORT_MISSED_EVENTS) return cq->cq_put - cq->cq_get; diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h index 7de68f1dc707..af735f55b291 100644 --- a/include/uapi/rdma/siw-abi.h +++ b/include/uapi/rdma/siw-abi.h @@ -180,6 +180,7 @@ struct siw_cqe { * to control CQ arming. */ struct siw_cq_ctrl { - __aligned_u64 notify; + __u32 flags; + __u32 pad; }; #endif -- cgit v1.2.3 From 4ed3350539aa931f58c939fcd803c7510584e143 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Tue, 13 Aug 2019 16:15:38 -0400 Subject: USB: usbfs: Add a capability flag for runtime suspend The recent commit 7794f486ed0b ("usbfs: Add ioctls for runtime power management") neglected to add a corresponding capability flag. This patch rectifies the omission. Signed-off-by: Alan Stern CC: Mayuresh Kulkarni Link: https://lore.kernel.org/r/Pine.LNX.4.44L0.1908131613490.1941-100000@iolanthe.rowland.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/devio.c | 9 ++++++++- include/uapi/linux/usbdevice_fs.h | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 5db7b6dbcb44..24d4a801ca64 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -44,6 +44,12 @@ #include "usb.h" +#ifdef CONFIG_PM +#define MAYBE_CAP_SUSPEND USBDEVFS_CAP_SUSPEND +#else +#define MAYBE_CAP_SUSPEND 0 +#endif + #define USB_MAXBUS 64 #define USB_DEVICE_MAX (USB_MAXBUS * 128) #define USB_SG_SIZE 16384 /* split-size for large txs */ @@ -2310,7 +2316,8 @@ static int proc_get_capabilities(struct usb_dev_state *ps, void __user *arg) caps = USBDEVFS_CAP_ZERO_PACKET | USBDEVFS_CAP_NO_PACKET_SIZE_LIM | USBDEVFS_CAP_REAP_AFTER_DISCONNECT | USBDEVFS_CAP_MMAP | - USBDEVFS_CAP_DROP_PRIVILEGES | USBDEVFS_CAP_CONNINFO_EX; + USBDEVFS_CAP_DROP_PRIVILEGES | + USBDEVFS_CAP_CONNINFO_EX | MAYBE_CAP_SUSPEND; if (!ps->dev->bus->no_stop_on_short) caps |= USBDEVFS_CAP_BULK_CONTINUATION; if (ps->dev->bus->sg_tablesize) diff --git a/include/uapi/linux/usbdevice_fs.h b/include/uapi/linux/usbdevice_fs.h index d24bbb6d3ca1..cf525cddeb94 100644 --- a/include/uapi/linux/usbdevice_fs.h +++ b/include/uapi/linux/usbdevice_fs.h @@ -158,6 +158,7 @@ struct usbdevfs_hub_portinfo { #define USBDEVFS_CAP_MMAP 0x20 #define USBDEVFS_CAP_DROP_PRIVILEGES 0x40 #define USBDEVFS_CAP_CONNINFO_EX 0x80 +#define USBDEVFS_CAP_SUSPEND 0x100 /* USBDEVFS_DISCONNECT_CLAIM flags & struct */ -- cgit v1.2.3 From 707816c8b0508ba7afcee9a13b02bb4261e4bd8c Mon Sep 17 00:00:00 2001 From: Jeremy Sowden Date: Wed, 14 Aug 2019 09:01:28 +0100 Subject: netfilter: remove deprecation warnings from uapi headers. There are two netfilter userspace headers which contain deprecation warnings. While these headers are not used within the kernel, they are compiled stand-alone for header-testing. Pablo informs me that userspace iptables still refer to these headers, and the intention was to use xt_LOG.h instead and remove these, but userspace was never updated. Remove the warnings. Fixes: 2a475c409fe8 ("kbuild: remove all netfilter headers from header-test blacklist.") Reported-by: kbuild test robot Signed-off-by: Jeremy Sowden Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter_ipv4/ipt_LOG.h | 2 -- include/uapi/linux/netfilter_ipv6/ip6t_LOG.h | 2 -- 2 files changed, 4 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/netfilter_ipv4/ipt_LOG.h b/include/uapi/linux/netfilter_ipv4/ipt_LOG.h index 6dec14ba851b..b7cf2c669f40 100644 --- a/include/uapi/linux/netfilter_ipv4/ipt_LOG.h +++ b/include/uapi/linux/netfilter_ipv4/ipt_LOG.h @@ -2,8 +2,6 @@ #ifndef _IPT_LOG_H #define _IPT_LOG_H -#warning "Please update iptables, this file will be removed soon!" - /* make sure not to change this without changing netfilter.h:NF_LOG_* (!) */ #define IPT_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */ #define IPT_LOG_TCPOPT 0x02 /* Log TCP options */ diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h b/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h index 7553a434e4da..23e91a9c2583 100644 --- a/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h +++ b/include/uapi/linux/netfilter_ipv6/ip6t_LOG.h @@ -2,8 +2,6 @@ #ifndef _IP6T_LOG_H #define _IP6T_LOG_H -#warning "Please update iptables, this file will be removed soon!" - /* make sure not to change this without changing netfilter.h:NF_LOG_* (!) */ #define IP6T_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */ #define IP6T_LOG_TCPOPT 0x02 /* Log TCP options */ -- cgit v1.2.3 From 088880ddc0b20086b71bb87b805fb63ff07c35f2 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Fri, 2 Aug 2019 14:27:33 +0200 Subject: drm/etnaviv: implement softpin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With softpin we allow the userspace to take control over the GPU virtual address space. The new capability is relected by a bump of the minor DRM version. There are a few restrictions for userspace to take into account: 1. The kernel reserves a bit of the address space to implement zero page faulting and mapping of the kernel internal ring buffer. Userspace can query the kernel for the first usable GPU VM address via ETNAVIV_PARAM_SOFTPIN_START_ADDR. 2. We only allow softpin on GPUs, which implement proper process separation via PPAS. If softpin is not available the softpin start address will be set to ~0. 3. Softpin is all or nothing. A submit using softpin must not use any address fixups via relocs. Signed-off-by: Lucas Stach Reviewed-by: Philipp Zabel Reviewed-by: Guido Günther --- drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c | 1 + drivers/gpu/drm/etnaviv/etnaviv_drv.c | 2 +- drivers/gpu/drm/etnaviv/etnaviv_drv.h | 2 ++ drivers/gpu/drm/etnaviv/etnaviv_gem.c | 5 +++-- drivers/gpu/drm/etnaviv/etnaviv_gem.h | 1 + drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c | 26 +++++++++++++++++++++++++- drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 9 +++++++++ include/uapi/drm/etnaviv_drm.h | 10 +++++++++- 8 files changed, 51 insertions(+), 5 deletions(-) (limited to 'include/uapi') diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c index f39430ba3593..9dc20d892c15 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c @@ -44,6 +44,7 @@ etnaviv_cmdbuf_suballoc_new(struct device *dev) mutex_init(&suballoc->lock); init_waitqueue_head(&suballoc->free_event); + BUILD_BUG_ON(ETNAVIV_SOFTPIN_START_ADDRESS < SUBALLOC_SIZE); suballoc->vaddr = dma_alloc_wc(dev, SUBALLOC_SIZE, &suballoc->paddr, GFP_KERNEL); if (!suballoc->vaddr) { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 80f1edcbbea0..45851d510f62 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -529,7 +529,7 @@ static struct drm_driver etnaviv_drm_driver = { .desc = "etnaviv DRM", .date = "20151214", .major = 1, - .minor = 2, + .minor = 3, }; /* diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index a488cfdb6bbf..32cfa5a48d42 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -24,6 +24,8 @@ struct etnaviv_gem_object; struct etnaviv_gem_submit; struct etnaviv_iommu_global; +#define ETNAVIV_SOFTPIN_START_ADDRESS SZ_4M /* must be >= SUBALLOC_SIZE */ + struct etnaviv_file_private { struct etnaviv_iommu_context *mmu; struct drm_sched_entity sched_entity[ETNA_MAX_PIPES]; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index 8c62c5e0a494..401ce4512b8d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -248,7 +248,8 @@ void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) } struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( - struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context) + struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context, + u64 va) { struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); struct etnaviv_vram_mapping *mapping; @@ -309,7 +310,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj, mmu_context->global->memory_base, - mapping, 0); + mapping, va); if (ret < 0) { etnaviv_iommu_context_put(mmu_context); kfree(mapping); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h index 2d01ee1f65c1..f1164934ecdf 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h @@ -77,6 +77,7 @@ static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj) struct etnaviv_gem_submit_bo { u32 flags; + u64 va; struct etnaviv_gem_object *obj; struct etnaviv_vram_mapping *mapping; struct dma_fence *excl; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 63a1206492d2..75afce42921b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -72,6 +72,14 @@ static int submit_lookup_objects(struct etnaviv_gem_submit *submit, } submit->bos[i].flags = bo->flags; + if (submit->flags & ETNA_SUBMIT_SOFTPIN) { + if (bo->presumed < ETNAVIV_SOFTPIN_START_ADDRESS) { + DRM_ERROR("invalid softpin address\n"); + ret = -EINVAL; + goto out_unlock; + } + submit->bos[i].va = bo->presumed; + } /* normally use drm_gem_object_lookup(), but for bulk lookup * all under single table_lock just hit object_idr directly: @@ -224,11 +232,17 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit) struct etnaviv_vram_mapping *mapping; mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base, - submit->mmu_context); + submit->mmu_context, + submit->bos[i].va); if (IS_ERR(mapping)) { ret = PTR_ERR(mapping); break; } + + if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && + submit->bos[i].va != mapping->iova) + return -EINVAL; + atomic_inc(&etnaviv_obj->gpu_active); submit->bos[i].flags |= BO_PINNED; @@ -261,6 +275,10 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream, u32 *ptr = stream; int ret; + /* Submits using softpin don't blend with relocs */ + if ((submit->flags & ETNA_SUBMIT_SOFTPIN) && nr_relocs != 0) + return -EINVAL; + for (i = 0; i < nr_relocs; i++) { const struct drm_etnaviv_gem_submit_reloc *r = relocs + i; struct etnaviv_gem_submit_bo *bo; @@ -445,6 +463,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, return -EINVAL; } + if ((args->flags & ETNA_SUBMIT_SOFTPIN) && + priv->mmu_global->version != ETNAVIV_IOMMU_V2) { + DRM_ERROR("softpin requested on incompatible MMU\n"); + return -EINVAL; + } + /* * Copy the command submission and bo array to kernel space in * one go, and do this outside of any locks. diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index d8a83ebfce47..d47d1a8e0219 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -42,6 +42,8 @@ static const struct platform_device_id gpu_ids[] = { int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) { + struct etnaviv_drm_private *priv = gpu->drm->dev_private; + switch (param) { case ETNAVIV_PARAM_GPU_MODEL: *value = gpu->identity.model; @@ -147,6 +149,13 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) *value = gpu->identity.varyings_count; break; + case ETNAVIV_PARAM_SOFTPIN_START_ADDR: + if (priv->mmu_global->version == ETNAVIV_IOMMU_V2) + *value = ETNAVIV_SOFTPIN_START_ADDRESS; + else + *value = ~0ULL; + break; + default: DBG("%s: invalid param: %u", dev_name(gpu->dev), param); return -EINVAL; diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h index 0d5c49dc478c..09d0df8b71c5 100644 --- a/include/uapi/drm/etnaviv_drm.h +++ b/include/uapi/drm/etnaviv_drm.h @@ -73,6 +73,7 @@ struct drm_etnaviv_timespec { #define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT 0x18 #define ETNAVIV_PARAM_GPU_NUM_CONSTANTS 0x19 #define ETNAVIV_PARAM_GPU_NUM_VARYINGS 0x1a +#define ETNAVIV_PARAM_SOFTPIN_START_ADDR 0x1b #define ETNA_MAX_PIPES 4 @@ -148,6 +149,11 @@ struct drm_etnaviv_gem_submit_reloc { * then patching the cmdstream for this entry is skipped. This can * avoid kernel needing to map/access the cmdstream bo in the common * case. + * If the submit is a softpin submit (ETNA_SUBMIT_SOFTPIN) the 'presumed' + * field is interpreted as the fixed location to map the bo into the gpu + * virtual address space. If the kernel is unable to map the buffer at + * this location the submit will fail. This means userspace is responsible + * for the whole gpu virtual address management. */ #define ETNA_SUBMIT_BO_READ 0x0001 #define ETNA_SUBMIT_BO_WRITE 0x0002 @@ -177,9 +183,11 @@ struct drm_etnaviv_gem_submit_pmr { #define ETNA_SUBMIT_NO_IMPLICIT 0x0001 #define ETNA_SUBMIT_FENCE_FD_IN 0x0002 #define ETNA_SUBMIT_FENCE_FD_OUT 0x0004 +#define ETNA_SUBMIT_SOFTPIN 0x0008 #define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \ ETNA_SUBMIT_FENCE_FD_IN | \ - ETNA_SUBMIT_FENCE_FD_OUT) + ETNA_SUBMIT_FENCE_FD_OUT| \ + ETNA_SUBMIT_SOFTPIN) #define ETNA_PIPE_3D 0x00 #define ETNA_PIPE_2D 0x01 #define ETNA_PIPE_VG 0x02 -- cgit v1.2.3 From 6d54e455689edcf1f9ef30761dd4fdfdc1cba33a Mon Sep 17 00:00:00 2001 From: Dragan Cvetic Date: Sat, 27 Jul 2019 09:33:51 +0100 Subject: misc: xilinx_sdfec: Store driver config and state Stores configuration based on parameters from the DT node and values from the SD-FEC core plus reads the default state from the SD-FEC core. To obtain values from the core register read, write capabilities have been added plus related register map details. Tested-by: Dragan Cvetic Signed-off-by: Derek Kiernan Signed-off-by: Dragan Cvetic Link: https://lore.kernel.org/r/1564216438-322406-2-git-send-email-dragan.cvetic@xilinx.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/xilinx_sdfec.c | 305 ++++++++++++++++++++++++++++++++++++++- include/uapi/misc/xilinx_sdfec.h | 138 ++++++++++++++++++ 2 files changed, 437 insertions(+), 6 deletions(-) create mode 100644 include/uapi/misc/xilinx_sdfec.h (limited to 'include/uapi') diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index f257d3812110..24d9f79fe073 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -20,11 +20,92 @@ #include #include +#include + #define DEV_NAME_LEN 12 static struct idr dev_idr; static struct mutex dev_idr_lock; +/* Xilinx SDFEC Register Map */ +/* CODE_WRI_PROTECT Register */ +#define XSDFEC_CODE_WR_PROTECT_ADDR (0x4) + +/* ACTIVE Register */ +#define XSDFEC_ACTIVE_ADDR (0x8) +#define XSDFEC_IS_ACTIVITY_SET (0x1) + +/* AXIS_WIDTH Register */ +#define XSDFEC_AXIS_WIDTH_ADDR (0xC) +#define XSDFEC_AXIS_DOUT_WORDS_LSB (5) +#define XSDFEC_AXIS_DOUT_WIDTH_LSB (3) +#define XSDFEC_AXIS_DIN_WORDS_LSB (2) +#define XSDFEC_AXIS_DIN_WIDTH_LSB (0) + +/* AXIS_ENABLE Register */ +#define XSDFEC_AXIS_ENABLE_ADDR (0x10) +#define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38) +#define XSDFEC_AXIS_IN_ENABLE_MASK (0x7) +#define XSDFEC_AXIS_ENABLE_MASK \ + (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK) + +/* FEC_CODE Register */ +#define XSDFEC_FEC_CODE_ADDR (0x14) + +/* ORDER Register Map */ +#define XSDFEC_ORDER_ADDR (0x18) + +/* Interrupt Status Register */ +#define XSDFEC_ISR_ADDR (0x1C) +/* Interrupt Status Register Bit Mask */ +#define XSDFEC_ISR_MASK (0x3F) + +/* Write Only - Interrupt Enable Register */ +#define XSDFEC_IER_ADDR (0x20) +/* Write Only - Interrupt Disable Register */ +#define XSDFEC_IDR_ADDR (0x24) +/* Read Only - Interrupt Mask Register */ +#define XSDFEC_IMR_ADDR (0x28) + +/* ECC Interrupt Status Register */ +#define XSDFEC_ECC_ISR_ADDR (0x2C) +/* Single Bit Errors */ +#define XSDFEC_ECC_ISR_SBE_MASK (0x7FF) +/* PL Initialize Single Bit Errors */ +#define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000) +/* Multi Bit Errors */ +#define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800) +/* PL Initialize Multi Bit Errors */ +#define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000) +/* Multi Bit Error to Event Shift */ +#define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11) +/* PL Initialize Multi Bit Error to Event Shift */ +#define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4) +/* ECC Interrupt Status Bit Mask */ +#define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK) +/* ECC Interrupt Status PL Initialize Bit Mask */ +#define XSDFEC_PL_INIT_ECC_ISR_MASK \ + (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK) +/* ECC Interrupt Status All Bit Mask */ +#define XSDFEC_ALL_ECC_ISR_MASK \ + (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK) +/* ECC Interrupt Status Single Bit Errors Mask */ +#define XSDFEC_ALL_ECC_ISR_SBE_MASK \ + (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK) +/* ECC Interrupt Status Multi Bit Errors Mask */ +#define XSDFEC_ALL_ECC_ISR_MBE_MASK \ + (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK) + +/* Write Only - ECC Interrupt Enable Register */ +#define XSDFEC_ECC_IER_ADDR (0x30) +/* Write Only - ECC Interrupt Disable Register */ +#define XSDFEC_ECC_IDR_ADDR (0x34) +/* Read Only - ECC Interrupt Mask Register */ +#define XSDFEC_ECC_IMR_ADDR (0x38) + +/* BYPASS Register */ +#define XSDFEC_BYPASS_ADDR (0x3C) + /** * struct xsdfec_clks - For managing SD-FEC clocks * @core_clk: Main processing clock for core @@ -49,31 +130,237 @@ struct xsdfec_clks { /** * struct xsdfec_dev - Driver data for SDFEC - * @regs: device physical base address - * @dev: pointer to device struct * @miscdev: Misc device handle - * @error_data_lock: Error counter and states spinlock * @clks: Clocks managed by the SDFEC driver + * @regs: device physical base address + * @dev: pointer to device struct + * @config: Configuration of the SDFEC device * @dev_name: Device name + * @state: State of the SDFEC device + * @error_data_lock: Error counter and states spinlock * @dev_id: Device ID * * This structure contains necessary state for SDFEC driver to operate */ struct xsdfec_dev { + struct miscdevice miscdev; + struct xsdfec_clks clks; void __iomem *regs; struct device *dev; - struct miscdevice miscdev; + struct xsdfec_config config; + char dev_name[DEV_NAME_LEN]; + enum xsdfec_state state; /* Spinlock to protect state_updated and stats_updated */ spinlock_t error_data_lock; - struct xsdfec_clks clks; - char dev_name[DEV_NAME_LEN]; int dev_id; }; +static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr, + u32 value) +{ + dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr); + iowrite32(value, xsdfec->regs + addr); +} + +static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr) +{ + u32 rval; + + rval = ioread32(xsdfec->regs + addr); + dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr); + return rval; +} + +static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec, + u32 reg_offset, u32 bit_num, + char *config_value) +{ + u32 reg_val; + u32 bit_mask = 1 << bit_num; + + reg_val = xsdfec_regread(xsdfec, reg_offset); + *config_value = (reg_val & bit_mask) > 0; +} + +static void update_config_from_hw(struct xsdfec_dev *xsdfec) +{ + u32 reg_value; + bool sdfec_started; + + /* Update the Order */ + reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR); + xsdfec->config.order = reg_value; + + update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR, + 0, /* Bit Number, maybe change to mask */ + &xsdfec->config.bypass); + + update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR, + 0, /* Bit Number */ + &xsdfec->config.code_wr_protect); + + reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); + xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0; + + reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); + xsdfec->config.irq.enable_ecc_isr = + (reg_value & XSDFEC_ECC_ISR_MASK) > 0; + + reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR); + sdfec_started = (reg_value & XSDFEC_AXIS_IN_ENABLE_MASK) > 0; + if (sdfec_started) + xsdfec->state = XSDFEC_STARTED; + else + xsdfec->state = XSDFEC_STOPPED; +} + +static u32 +xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg) +{ + u32 axis_width_field = 0; + + switch (axis_width_cfg) { + case XSDFEC_1x128b: + axis_width_field = 0; + break; + case XSDFEC_2x128b: + axis_width_field = 1; + break; + case XSDFEC_4x128b: + axis_width_field = 2; + break; + } + + return axis_width_field; +} + +static u32 xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include + axis_word_inc_cfg) +{ + u32 axis_words_field = 0; + + if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE || + axis_word_inc_cfg == XSDFEC_IN_BLOCK) + axis_words_field = 0; + else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION) + axis_words_field = 1; + + return axis_words_field; +} + +static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec) +{ + u32 reg_value; + u32 dout_words_field; + u32 dout_width_field; + u32 din_words_field; + u32 din_width_field; + struct xsdfec_config *config = &xsdfec->config; + + /* translate config info to register values */ + dout_words_field = + xsdfec_translate_axis_words_cfg_val(config->dout_word_include); + dout_width_field = + xsdfec_translate_axis_width_cfg_val(config->dout_width); + din_words_field = + xsdfec_translate_axis_words_cfg_val(config->din_word_include); + din_width_field = + xsdfec_translate_axis_width_cfg_val(config->din_width); + + reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB; + reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB; + reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB; + reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB; + + xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value); + + return 0; +} + static const struct file_operations xsdfec_fops = { .owner = THIS_MODULE, }; +static int xsdfec_parse_of(struct xsdfec_dev *xsdfec) +{ + struct device *dev = xsdfec->dev; + struct device_node *node = dev->of_node; + int rval; + const char *fec_code; + u32 din_width; + u32 din_word_include; + u32 dout_width; + u32 dout_word_include; + + rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code); + if (rval < 0) + return rval; + + if (!strcasecmp(fec_code, "ldpc")) + xsdfec->config.code = XSDFEC_LDPC_CODE; + else if (!strcasecmp(fec_code, "turbo")) + xsdfec->config.code = XSDFEC_TURBO_CODE; + else + return -EINVAL; + + rval = of_property_read_u32(node, "xlnx,sdfec-din-words", + &din_word_include); + if (rval < 0) + return rval; + + if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX) + xsdfec->config.din_word_include = din_word_include; + else + return -EINVAL; + + rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width); + if (rval < 0) + return rval; + + switch (din_width) { + /* Fall through and set for valid values */ + case XSDFEC_1x128b: + case XSDFEC_2x128b: + case XSDFEC_4x128b: + xsdfec->config.din_width = din_width; + break; + default: + return -EINVAL; + } + + rval = of_property_read_u32(node, "xlnx,sdfec-dout-words", + &dout_word_include); + if (rval < 0) + return rval; + + if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX) + xsdfec->config.dout_word_include = dout_word_include; + else + return -EINVAL; + + rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width); + if (rval < 0) + return rval; + + switch (dout_width) { + /* Fall through and set for valid values */ + case XSDFEC_1x128b: + case XSDFEC_2x128b: + case XSDFEC_4x128b: + xsdfec->config.dout_width = dout_width; + break; + default: + return -EINVAL; + } + + /* Write LDPC to CODE Register */ + xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code); + + xsdfec_cfg_axi_streams(xsdfec); + + return 0; +} + static int xsdfec_clk_init(struct platform_device *pdev, struct xsdfec_clks *clks) { @@ -260,6 +547,12 @@ static int xsdfec_probe(struct platform_device *pdev) goto err_xsdfec_dev; } + err = xsdfec_parse_of(xsdfec); + if (err < 0) + goto err_xsdfec_dev; + + update_config_from_hw(xsdfec); + /* Save driver private data */ platform_set_drvdata(pdev, xsdfec); diff --git a/include/uapi/misc/xilinx_sdfec.h b/include/uapi/misc/xilinx_sdfec.h new file mode 100644 index 000000000000..330ea25ddfa1 --- /dev/null +++ b/include/uapi/misc/xilinx_sdfec.h @@ -0,0 +1,138 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * Xilinx SD-FEC + * + * Copyright (C) 2019 Xilinx, Inc. + * + * Description: + * This driver is developed for SDFEC16 IP. It provides a char device + * in sysfs and supports file operations like open(), close() and ioctl(). + */ +#ifndef __XILINX_SDFEC_H__ +#define __XILINX_SDFEC_H__ + +#include + +/** + * enum xsdfec_code - Code Type. + * @XSDFEC_TURBO_CODE: Driver is configured for Turbo mode. + * @XSDFEC_LDPC_CODE: Driver is configured for LDPC mode. + * + * This enum is used to indicate the mode of the driver. The mode is determined + * by checking which codes are set in the driver. Note that the mode cannot be + * changed by the driver. + */ +enum xsdfec_code { + XSDFEC_TURBO_CODE = 0, + XSDFEC_LDPC_CODE, +}; + +/** + * enum xsdfec_order - Order + * @XSDFEC_MAINTAIN_ORDER: Maintain order execution of blocks. + * @XSDFEC_OUT_OF_ORDER: Out-of-order execution of blocks. + * + * This enum is used to indicate whether the order of blocks can change from + * input to output. + */ +enum xsdfec_order { + XSDFEC_MAINTAIN_ORDER = 0, + XSDFEC_OUT_OF_ORDER, +}; + +/** + * enum xsdfec_state - State. + * @XSDFEC_INIT: Driver is initialized. + * @XSDFEC_STARTED: Driver is started. + * @XSDFEC_STOPPED: Driver is stopped. + * @XSDFEC_NEEDS_RESET: Driver needs to be reset. + * @XSDFEC_PL_RECONFIGURE: Programmable Logic needs to be recofigured. + * + * This enum is used to indicate the state of the driver. + */ +enum xsdfec_state { + XSDFEC_INIT = 0, + XSDFEC_STARTED, + XSDFEC_STOPPED, + XSDFEC_NEEDS_RESET, + XSDFEC_PL_RECONFIGURE, +}; + +/** + * enum xsdfec_axis_width - AXIS_WIDTH.DIN Setting for 128-bit width. + * @XSDFEC_1x128b: DIN data input stream consists of a 128-bit lane + * @XSDFEC_2x128b: DIN data input stream consists of two 128-bit lanes + * @XSDFEC_4x128b: DIN data input stream consists of four 128-bit lanes + * + * This enum is used to indicate the AXIS_WIDTH.DIN setting for 128-bit width. + * The number of lanes of the DIN data input stream depends upon the + * AXIS_WIDTH.DIN parameter. + */ +enum xsdfec_axis_width { + XSDFEC_1x128b = 1, + XSDFEC_2x128b = 2, + XSDFEC_4x128b = 4, +}; + +/** + * enum xsdfec_axis_word_include - Words Configuration. + * @XSDFEC_FIXED_VALUE: Fixed, the DIN_WORDS AXI4-Stream interface is removed + * from the IP instance and is driven with the specified + * number of words. + * @XSDFEC_IN_BLOCK: In Block, configures the IP instance to expect a single + * DIN_WORDS value per input code block. The DIN_WORDS + * interface is present. + * @XSDFEC_PER_AXI_TRANSACTION: Per Transaction, configures the IP instance to + * expect one DIN_WORDS value per input transaction on the DIN interface. The + * DIN_WORDS interface is present. + * @XSDFEC_AXIS_WORDS_INCLUDE_MAX: Used to indicate out of bound Words + * Configurations. + * + * This enum is used to specify the DIN_WORDS configuration. + */ +enum xsdfec_axis_word_include { + XSDFEC_FIXED_VALUE = 0, + XSDFEC_IN_BLOCK, + XSDFEC_PER_AXI_TRANSACTION, + XSDFEC_AXIS_WORDS_INCLUDE_MAX, +}; + +/** + * struct xsdfec_irq - Enabling or Disabling Interrupts. + * @enable_isr: If true enables the ISR + * @enable_ecc_isr: If true enables the ECC ISR + */ +struct xsdfec_irq { + __s8 enable_isr; + __s8 enable_ecc_isr; +}; + +/** + * struct xsdfec_config - Configuration of SD-FEC core. + * @code: The codes being used by the SD-FEC instance + * @order: Order of Operation + * @din_width: Width of the DIN AXI4-Stream + * @din_word_include: How DIN_WORDS are inputted + * @dout_width: Width of the DOUT AXI4-Stream + * @dout_word_include: HOW DOUT_WORDS are outputted + * @irq: Enabling or disabling interrupts + * @bypass: Is the core being bypassed + * @code_wr_protect: Is write protection of LDPC codes enabled + */ +struct xsdfec_config { + __u32 code; + __u32 order; + __u32 din_width; + __u32 din_word_include; + __u32 dout_width; + __u32 dout_word_include; + struct xsdfec_irq irq; + __s8 bypass; + __s8 code_wr_protect; +}; + +/* + * XSDFEC IOCTL List + */ +#define XSDFEC_MAGIC 'f' +#endif /* __XILINX_SDFEC_H__ */ -- cgit v1.2.3 From 6f86ed820178ba35f87712548e6cd43b91608a6c Mon Sep 17 00:00:00 2001 From: Dragan Cvetic Date: Sat, 27 Jul 2019 09:33:52 +0100 Subject: misc: xilinx_sdfec: Add ability to configure turbo Add the capability to configure and retrieve turbo mode via the ioctls XSDFEC_SET_TURBO and XSDFEC_GET_TURBO. Add char device interface per DT node present and support file operations: - open(), - close(), - unlocked_ioctl(), - compat_ioctl(). Tested-by: Dragan Cvetic Signed-off-by: Derek Kiernan Signed-off-by: Dragan Cvetic Link: https://lore.kernel.org/r/1564216438-322406-3-git-send-email-dragan.cvetic@xilinx.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/xilinx_sdfec.c | 110 +++++++++++++++++++++++++++++++++++++++ include/uapi/misc/xilinx_sdfec.h | 67 ++++++++++++++++++++++++ 2 files changed, 177 insertions(+) (limited to 'include/uapi') diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index 24d9f79fe073..d3dba7e6f896 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -19,6 +19,7 @@ #include #include #include +#include #include @@ -106,6 +107,12 @@ static struct mutex dev_idr_lock; /* BYPASS Register */ #define XSDFEC_BYPASS_ADDR (0x3C) +/* Turbo Code Register */ +#define XSDFEC_TURBO_ADDR (0x100) +#define XSDFEC_TURBO_SCALE_MASK (0xFFF) +#define XSDFEC_TURBO_SCALE_BIT_POS (8) +#define XSDFEC_TURBO_SCALE_MAX (15) + /** * struct xsdfec_clks - For managing SD-FEC clocks * @core_clk: Main processing clock for core @@ -214,6 +221,55 @@ static void update_config_from_hw(struct xsdfec_dev *xsdfec) xsdfec->state = XSDFEC_STOPPED; } +static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg) +{ + struct xsdfec_turbo turbo; + int err; + u32 turbo_write; + + err = copy_from_user(&turbo, arg, sizeof(turbo)); + if (err) + return -EFAULT; + + if (turbo.alg >= XSDFEC_TURBO_ALG_MAX) + return -EINVAL; + + if (turbo.scale > XSDFEC_TURBO_SCALE_MAX) + return -EINVAL; + + /* Check to see what device tree says about the FEC codes */ + if (xsdfec->config.code == XSDFEC_LDPC_CODE) + return -EIO; + + turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK) + << XSDFEC_TURBO_SCALE_BIT_POS) | + turbo.alg; + xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write); + return err; +} + +static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg) +{ + u32 reg_value; + struct xsdfec_turbo turbo_params; + int err; + + if (xsdfec->config.code == XSDFEC_LDPC_CODE) + return -EIO; + + reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR); + + turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >> + XSDFEC_TURBO_SCALE_BIT_POS; + turbo_params.alg = reg_value & 0x1; + + err = copy_to_user(arg, &turbo_params, sizeof(turbo_params)); + if (err) + err = -EFAULT; + + return err; +} + static u32 xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg) { @@ -277,8 +333,62 @@ static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec) return 0; } +static int xsdfec_dev_open(struct inode *iptr, struct file *fptr) +{ + return 0; +} + +static int xsdfec_dev_release(struct inode *iptr, struct file *fptr) +{ + return 0; +} + +static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, + unsigned long data) +{ + struct xsdfec_dev *xsdfec; + void __user *arg = NULL; + int rval = -EINVAL; + + xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev); + + /* check if ioctl argument is present and valid */ + if (_IOC_DIR(cmd) != _IOC_NONE) { + arg = (void __user *)data; + if (!arg) + return rval; + } + + switch (cmd) { + case XSDFEC_SET_TURBO: + rval = xsdfec_set_turbo(xsdfec, arg); + break; + case XSDFEC_GET_TURBO: + rval = xsdfec_get_turbo(xsdfec, arg); + break; + default: + /* Should not get here */ + break; + } + return rval; +} + +#ifdef CONFIG_COMPAT +static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long data) +{ + return xsdfec_dev_ioctl(file, cmd, (unsigned long)compat_ptr(data)); +} +#endif + static const struct file_operations xsdfec_fops = { .owner = THIS_MODULE, + .open = xsdfec_dev_open, + .release = xsdfec_dev_release, + .unlocked_ioctl = xsdfec_dev_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = xsdfec_dev_compat_ioctl, +#endif }; static int xsdfec_parse_of(struct xsdfec_dev *xsdfec) diff --git a/include/uapi/misc/xilinx_sdfec.h b/include/uapi/misc/xilinx_sdfec.h index 330ea25ddfa1..34f32c37261d 100644 --- a/include/uapi/misc/xilinx_sdfec.h +++ b/include/uapi/misc/xilinx_sdfec.h @@ -40,6 +40,22 @@ enum xsdfec_order { XSDFEC_OUT_OF_ORDER, }; +/** + * enum xsdfec_turbo_alg - Turbo Algorithm Type. + * @XSDFEC_MAX_SCALE: Max Log-Map algorithm with extrinsic scaling. When + * scaling is set to this is equivalent to the Max Log-Map + * algorithm. + * @XSDFEC_MAX_STAR: Log-Map algorithm. + * @XSDFEC_TURBO_ALG_MAX: Used to indicate out of bound Turbo algorithms. + * + * This enum specifies which Turbo Decode algorithm is in use. + */ +enum xsdfec_turbo_alg { + XSDFEC_MAX_SCALE = 0, + XSDFEC_MAX_STAR, + XSDFEC_TURBO_ALG_MAX, +}; + /** * enum xsdfec_state - State. * @XSDFEC_INIT: Driver is initialized. @@ -97,6 +113,29 @@ enum xsdfec_axis_word_include { XSDFEC_AXIS_WORDS_INCLUDE_MAX, }; +/** + * struct xsdfec_turbo - User data for Turbo codes. + * @alg: Specifies which Turbo decode algorithm to use + * @scale: Specifies the extrinsic scaling to apply when the Max Scale algorithm + * has been selected + * + * Turbo code structure to communicate parameters to XSDFEC driver. + */ +struct xsdfec_turbo { + __u32 alg; + __u8 scale; +}; + +/** + * struct xsdfec_status - Status of SD-FEC core. + * @state: State of the SD-FEC core + * @activity: Describes if the SD-FEC instance is Active + */ +struct xsdfec_status { + __u32 state; + __s8 activity; +}; + /** * struct xsdfec_irq - Enabling or Disabling Interrupts. * @enable_isr: If true enables the ISR @@ -135,4 +174,32 @@ struct xsdfec_config { * XSDFEC IOCTL List */ #define XSDFEC_MAGIC 'f' +/** + * DOC: XSDFEC_SET_TURBO + * @Parameters + * + * @struct xsdfec_turbo * + * Pointer to the &struct xsdfec_turbo that contains the Turbo decode + * settings for the SD-FEC core + * + * @Description + * + * ioctl that sets the SD-FEC Turbo parameter values + * + * This can only be used when the driver is in the XSDFEC_STOPPED state + */ +#define XSDFEC_SET_TURBO _IOW(XSDFEC_MAGIC, 4, struct xsdfec_turbo) +/** + * DOC: XSDFEC_GET_TURBO + * @Parameters + * + * @struct xsdfec_turbo * + * Pointer to the &struct xsdfec_turbo that contains the current Turbo + * decode settings of the SD-FEC Block + * + * @Description + * + * ioctl that returns SD-FEC turbo param values + */ +#define XSDFEC_GET_TURBO _IOR(XSDFEC_MAGIC, 7, struct xsdfec_turbo) #endif /* __XILINX_SDFEC_H__ */ -- cgit v1.2.3 From 20ec628e8007ec75c2f884e00004f39eab6289b5 Mon Sep 17 00:00:00 2001 From: Dragan Cvetic Date: Sat, 27 Jul 2019 09:33:53 +0100 Subject: misc: xilinx_sdfec: Add ability to configure LDPC Add the capability to configure LDPC mode via the ioctl XSDFEC_ADD_LDPC_CODE_PARAMS. Tested-by: Dragan Cvetic Signed-off-by: Derek Kiernan Signed-off-by: Dragan Cvetic Link: https://lore.kernel.org/r/1564216438-322406-4-git-send-email-dragan.cvetic@xilinx.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/xilinx_sdfec.c | 324 +++++++++++++++++++++++++++++++++++++++ include/uapi/misc/xilinx_sdfec.h | 98 ++++++++++++ 2 files changed, 422 insertions(+) (limited to 'include/uapi') diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index d3dba7e6f896..9be4de07eee2 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -20,6 +20,7 @@ #include #include #include +#include #include @@ -113,6 +114,57 @@ static struct mutex dev_idr_lock; #define XSDFEC_TURBO_SCALE_BIT_POS (8) #define XSDFEC_TURBO_SCALE_MAX (15) +/* REG0 Register */ +#define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000) +#define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0) +#define XSDFEC_REG0_N_MIN (4) +#define XSDFEC_REG0_N_MAX (32768) +#define XSDFEC_REG0_N_MUL_P (256) +#define XSDFEC_REG0_N_LSB (0) +#define XSDFEC_REG0_K_MIN (2) +#define XSDFEC_REG0_K_MAX (32766) +#define XSDFEC_REG0_K_MUL_P (256) +#define XSDFEC_REG0_K_LSB (16) + +/* REG1 Register */ +#define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004) +#define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4) +#define XSDFEC_REG1_PSIZE_MIN (2) +#define XSDFEC_REG1_PSIZE_MAX (512) +#define XSDFEC_REG1_NO_PACKING_MASK (0x400) +#define XSDFEC_REG1_NO_PACKING_LSB (10) +#define XSDFEC_REG1_NM_MASK (0xFF800) +#define XSDFEC_REG1_NM_LSB (11) +#define XSDFEC_REG1_BYPASS_MASK (0x100000) + +/* REG2 Register */ +#define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008) +#define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8) +#define XSDFEC_REG2_NLAYERS_MIN (1) +#define XSDFEC_REG2_NLAYERS_MAX (256) +#define XSDFEC_REG2_NNMQC_MASK (0xFFE00) +#define XSDFEC_REG2_NMQC_LSB (9) +#define XSDFEC_REG2_NORM_TYPE_MASK (0x100000) +#define XSDFEC_REG2_NORM_TYPE_LSB (20) +#define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000) +#define XSDFEC_REG2_SPEICAL_QC_LSB (21) +#define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000) +#define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22) +#define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000) +#define XSDFEC_REG2_MAX_SCHEDULE_LSB (23) + +/* REG3 Register */ +#define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C) +#define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC) +#define XSDFEC_REG3_LA_OFF_LSB (8) +#define XSDFEC_REG3_QC_OFF_LSB (16) + +#define XSDFEC_LDPC_REG_JUMP (0x10) +#define XSDFEC_REG_WIDTH_JUMP (4) + +/* The maximum number of pinned pages */ +#define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1) + /** * struct xsdfec_clks - For managing SD-FEC clocks * @core_clk: Main processing clock for core @@ -270,6 +322,275 @@ static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg) return err; } +static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize, + u32 offset) +{ + u32 wdata; + + if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX || + (n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) { + dev_dbg(xsdfec->dev, "N value is not in range"); + return -EINVAL; + } + n <<= XSDFEC_REG0_N_LSB; + + if (k < XSDFEC_REG0_K_MIN || k > XSDFEC_REG0_K_MAX || + (k > XSDFEC_REG0_K_MUL_P * psize) || ((k % psize) != 0)) { + dev_dbg(xsdfec->dev, "K value is not in range"); + return -EINVAL; + } + k = k << XSDFEC_REG0_K_LSB; + wdata = k | n; + + if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) > + XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) { + dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x", + XSDFEC_LDPC_CODE_REG0_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP)); + return -EINVAL; + } + xsdfec_regwrite(xsdfec, + XSDFEC_LDPC_CODE_REG0_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP), + wdata); + return 0; +} + +static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize, + u32 no_packing, u32 nm, u32 offset) +{ + u32 wdata; + + if (psize < XSDFEC_REG1_PSIZE_MIN || psize > XSDFEC_REG1_PSIZE_MAX) { + dev_dbg(xsdfec->dev, "Psize is not in range"); + return -EINVAL; + } + + if (no_packing != 0 && no_packing != 1) + dev_dbg(xsdfec->dev, "No-packing bit register invalid"); + no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) & + XSDFEC_REG1_NO_PACKING_MASK); + + if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB)) + dev_dbg(xsdfec->dev, "NM is beyond 10 bits"); + nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK; + + wdata = nm | no_packing | psize; + if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) > + XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) { + dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x", + XSDFEC_LDPC_CODE_REG1_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP)); + return -EINVAL; + } + xsdfec_regwrite(xsdfec, + XSDFEC_LDPC_CODE_REG1_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP), + wdata); + return 0; +} + +static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc, + u32 norm_type, u32 special_qc, u32 no_final_parity, + u32 max_schedule, u32 offset) +{ + u32 wdata; + + if (nlayers < XSDFEC_REG2_NLAYERS_MIN || + nlayers > XSDFEC_REG2_NLAYERS_MAX) { + dev_dbg(xsdfec->dev, "Nlayers is not in range"); + return -EINVAL; + } + + if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB)) + dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits"); + nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK; + + if (norm_type > 1) + dev_dbg(xsdfec->dev, "Norm type is invalid"); + norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) & + XSDFEC_REG2_NORM_TYPE_MASK); + if (special_qc > 1) + dev_dbg(xsdfec->dev, "Special QC in invalid"); + special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) & + XSDFEC_REG2_SPECIAL_QC_MASK); + + if (no_final_parity > 1) + dev_dbg(xsdfec->dev, "No final parity check invalid"); + no_final_parity = + ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) & + XSDFEC_REG2_NO_FINAL_PARITY_MASK); + if (max_schedule & + ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >> XSDFEC_REG2_MAX_SCHEDULE_LSB)) + dev_dbg(xsdfec->dev, "Max Schdule exceeds 2 bits"); + max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) & + XSDFEC_REG2_MAX_SCHEDULE_MASK); + + wdata = (max_schedule | no_final_parity | special_qc | norm_type | + nmqc | nlayers); + + if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) > + XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) { + dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x", + XSDFEC_LDPC_CODE_REG2_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP)); + return -EINVAL; + } + xsdfec_regwrite(xsdfec, + XSDFEC_LDPC_CODE_REG2_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP), + wdata); + return 0; +} + +static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off, + u16 qc_off, u32 offset) +{ + u32 wdata; + + wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) | + (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off); + if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) > + XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) { + dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x", + XSDFEC_LDPC_CODE_REG3_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP)); + return -EINVAL; + } + xsdfec_regwrite(xsdfec, + XSDFEC_LDPC_CODE_REG3_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP), + wdata); + return 0; +} + +static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset, + u32 *src_ptr, u32 len, const u32 base_addr, + const u32 depth) +{ + u32 reg = 0; + u32 res; + u32 n, i; + u32 *addr = NULL; + struct page *page[MAX_NUM_PAGES]; + + /* + * Writes that go beyond the length of + * Shared Scale(SC) table should fail + */ + if ((XSDFEC_REG_WIDTH_JUMP * (offset + len)) > depth) { + dev_dbg(xsdfec->dev, "Write exceeds SC table length"); + return -EINVAL; + } + + n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE; + if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE) + n += 1; + + res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page); + if (res < n) { + for (i = 0; i < res; i++) + put_page(page[i]); + return -EINVAL; + } + + for (i = 0; i < n; i++) { + addr = kmap(page[i]); + do { + xsdfec_regwrite(xsdfec, + base_addr + ((offset + reg) * + XSDFEC_REG_WIDTH_JUMP), + addr[reg]); + reg++; + } while ((reg < len) && + ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)); + put_page(page[i]); + } + return reg; +} + +static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg) +{ + struct xsdfec_ldpc_params *ldpc; + int ret, n; + + ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL); + if (!ldpc) + return -ENOMEM; + + ret = copy_from_user(ldpc, arg, sizeof(*ldpc)); + if (ret) + goto err_out; + + if (xsdfec->config.code == XSDFEC_TURBO_CODE) { + ret = -EIO; + goto err_out; + } + + /* Verify Device has not started */ + if (xsdfec->state == XSDFEC_STARTED) { + ret = -EIO; + goto err_out; + } + + if (xsdfec->config.code_wr_protect) { + ret = -EIO; + goto err_out; + } + + /* Write Reg 0 */ + ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize, + ldpc->code_id); + if (ret) + goto err_out; + + /* Write Reg 1 */ + ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm, + ldpc->code_id); + if (ret) + goto err_out; + + /* Write Reg 2 */ + ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc, + ldpc->norm_type, ldpc->special_qc, + ldpc->no_final_parity, ldpc->max_schedule, + ldpc->code_id); + if (ret) + goto err_out; + + /* Write Reg 3 */ + ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off, + ldpc->qc_off, ldpc->code_id); + if (ret) + goto err_out; + + /* Write Shared Codes */ + n = ldpc->nlayers / 4; + if (ldpc->nlayers % 4) + n++; + + ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n, + XSDFEC_LDPC_SC_TABLE_ADDR_BASE, + XSDFEC_SC_TABLE_DEPTH); + if (ret < 0) + goto err_out; + + ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table, + ldpc->nlayers, XSDFEC_LDPC_LA_TABLE_ADDR_BASE, + XSDFEC_LA_TABLE_DEPTH); + if (ret < 0) + goto err_out; + + ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table, + ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE, + XSDFEC_QC_TABLE_DEPTH); + if (ret > 0) + ret = 0; +err_out: + kfree(ldpc); + return ret; +} + static u32 xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg) { @@ -366,6 +687,9 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, case XSDFEC_GET_TURBO: rval = xsdfec_get_turbo(xsdfec, arg); break; + case XSDFEC_ADD_LDPC_CODE_PARAMS: + rval = xsdfec_add_ldpc(xsdfec, arg); + break; default: /* Should not get here */ break; diff --git a/include/uapi/misc/xilinx_sdfec.h b/include/uapi/misc/xilinx_sdfec.h index 34f32c37261d..ce532a5e3e81 100644 --- a/include/uapi/misc/xilinx_sdfec.h +++ b/include/uapi/misc/xilinx_sdfec.h @@ -13,6 +13,22 @@ #include +/* Shared LDPC Tables */ +#define XSDFEC_LDPC_SC_TABLE_ADDR_BASE (0x10000) +#define XSDFEC_LDPC_SC_TABLE_ADDR_HIGH (0x10400) +#define XSDFEC_LDPC_LA_TABLE_ADDR_BASE (0x18000) +#define XSDFEC_LDPC_LA_TABLE_ADDR_HIGH (0x19000) +#define XSDFEC_LDPC_QC_TABLE_ADDR_BASE (0x20000) +#define XSDFEC_LDPC_QC_TABLE_ADDR_HIGH (0x28000) + +/* LDPC tables depth */ +#define XSDFEC_SC_TABLE_DEPTH \ + (XSDFEC_LDPC_SC_TABLE_ADDR_HIGH - XSDFEC_LDPC_SC_TABLE_ADDR_BASE) +#define XSDFEC_LA_TABLE_DEPTH \ + (XSDFEC_LDPC_LA_TABLE_ADDR_HIGH - XSDFEC_LDPC_LA_TABLE_ADDR_BASE) +#define XSDFEC_QC_TABLE_DEPTH \ + (XSDFEC_LDPC_QC_TABLE_ADDR_HIGH - XSDFEC_LDPC_QC_TABLE_ADDR_BASE) + /** * enum xsdfec_code - Code Type. * @XSDFEC_TURBO_CODE: Driver is configured for Turbo mode. @@ -126,6 +142,53 @@ struct xsdfec_turbo { __u8 scale; }; +/** + * struct xsdfec_ldpc_params - User data for LDPC codes. + * @n: Number of code word bits + * @k: Number of information bits + * @psize: Size of sub-matrix + * @nlayers: Number of layers in code + * @nqc: Quasi Cyclic Number + * @nmqc: Number of M-sized QC operations in parity check matrix + * @nm: Number of M-size vectors in N + * @norm_type: Normalization required or not + * @no_packing: Determines if multiple QC ops should be performed + * @special_qc: Sub-Matrix property for Circulant weight > 0 + * @no_final_parity: Decide if final parity check needs to be performed + * @max_schedule: Experimental code word scheduling limit + * @sc_off: SC offset + * @la_off: LA offset + * @qc_off: QC offset + * @sc_table: Pointer to SC Table which must be page aligned + * @la_table: Pointer to LA Table which must be page aligned + * @qc_table: Pointer to QC Table which must be page aligned + * @code_id: LDPC Code + * + * This structure describes the LDPC code that is passed to the driver by the + * application. + */ +struct xsdfec_ldpc_params { + __u32 n; + __u32 k; + __u32 psize; + __u32 nlayers; + __u32 nqc; + __u32 nmqc; + __u32 nm; + __u32 norm_type; + __u32 no_packing; + __u32 special_qc; + __u32 no_final_parity; + __u32 max_schedule; + __u32 sc_off; + __u32 la_off; + __u32 qc_off; + __u32 *sc_table; + __u32 *la_table; + __u32 *qc_table; + __u16 code_id; +}; + /** * struct xsdfec_status - Status of SD-FEC core. * @state: State of the SD-FEC core @@ -170,6 +233,20 @@ struct xsdfec_config { __s8 code_wr_protect; }; +/** + * struct xsdfec_ldpc_param_table_sizes - Used to store sizes of SD-FEC table + * entries for an individual LPDC code + * parameter. + * @sc_size: Size of SC table used + * @la_size: Size of LA table used + * @qc_size: Size of QC table used + */ +struct xsdfec_ldpc_param_table_sizes { + __u32 sc_size; + __u32 la_size; + __u32 qc_size; +}; + /* * XSDFEC IOCTL List */ @@ -189,6 +266,27 @@ struct xsdfec_config { * This can only be used when the driver is in the XSDFEC_STOPPED state */ #define XSDFEC_SET_TURBO _IOW(XSDFEC_MAGIC, 4, struct xsdfec_turbo) +/** + * DOC: XSDFEC_ADD_LDPC_CODE_PARAMS + * @Parameters + * + * @struct xsdfec_ldpc_params * + * Pointer to the &struct xsdfec_ldpc_params that contains the LDPC code + * parameters to be added to the SD-FEC Block + * + * @Description + * ioctl to add an LDPC code to the SD-FEC LDPC codes + * + * This can only be used when: + * + * - Driver is in the XSDFEC_STOPPED state + * + * - SD-FEC core is configured as LPDC + * + * - SD-FEC Code Write Protection is disabled + */ +#define XSDFEC_ADD_LDPC_CODE_PARAMS \ + _IOW(XSDFEC_MAGIC, 5, struct xsdfec_ldpc_params) /** * DOC: XSDFEC_GET_TURBO * @Parameters -- cgit v1.2.3 From 77dd39d924e650cd20696d790f861dfe26e0cb64 Mon Sep 17 00:00:00 2001 From: Dragan Cvetic Date: Sat, 27 Jul 2019 09:33:54 +0100 Subject: misc: xilinx_sdfec: Add ability to get/set config - Add capability to get SD-FEC config data using ioctl XSDFEC_GET_CONFIG. - Add capability to set SD-FEC data order using ioctl SDFEC_SET_ORDER. - Add capability to set SD-FEC bypass option using ioctl XSDFEC_SET_BYPASS. - Add capability to set SD-FEC active state using ioctl XSDFEC_IS_ACTIVE. Tested-by: Dragan Cvetic Signed-off-by: Derek Kiernan Signed-off-by: Dragan Cvetic Link: https://lore.kernel.org/r/1564216438-322406-5-git-send-email-dragan.cvetic@xilinx.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/xilinx_sdfec.c | 88 ++++++++++++++++++++++++++++++++++++++++ include/uapi/misc/xilinx_sdfec.h | 57 ++++++++++++++++++++++++++ 2 files changed, 145 insertions(+) (limited to 'include/uapi') diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index 9be4de07eee2..579f23617dd7 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -273,6 +273,17 @@ static void update_config_from_hw(struct xsdfec_dev *xsdfec) xsdfec->state = XSDFEC_STOPPED; } +static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg) +{ + int err; + + err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config)); + if (err) + err = -EFAULT; + + return err; +} + static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg) { struct xsdfec_turbo turbo; @@ -591,6 +602,71 @@ err_out: return ret; } +static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg) +{ + bool order_invalid; + enum xsdfec_order order; + int err; + + err = get_user(order, (enum xsdfec_order *)arg); + if (err) + return -EFAULT; + + order_invalid = (order != XSDFEC_MAINTAIN_ORDER) && + (order != XSDFEC_OUT_OF_ORDER); + if (order_invalid) + return -EINVAL; + + /* Verify Device has not started */ + if (xsdfec->state == XSDFEC_STARTED) + return -EIO; + + xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order); + + xsdfec->config.order = order; + + return 0; +} + +static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg) +{ + bool bypass; + int err; + + err = get_user(bypass, arg); + if (err) + return -EFAULT; + + /* Verify Device has not started */ + if (xsdfec->state == XSDFEC_STARTED) + return -EIO; + + if (bypass) + xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1); + else + xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0); + + xsdfec->config.bypass = bypass; + + return 0; +} + +static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg) +{ + u32 reg_value; + bool is_active; + int err; + + reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR); + /* using a double ! operator instead of casting */ + is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET); + err = put_user(is_active, arg); + if (err) + return -EFAULT; + + return err; +} + static u32 xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg) { @@ -681,6 +757,9 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, } switch (cmd) { + case XSDFEC_GET_CONFIG: + rval = xsdfec_get_config(xsdfec, arg); + break; case XSDFEC_SET_TURBO: rval = xsdfec_set_turbo(xsdfec, arg); break; @@ -690,6 +769,15 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, case XSDFEC_ADD_LDPC_CODE_PARAMS: rval = xsdfec_add_ldpc(xsdfec, arg); break; + case XSDFEC_SET_ORDER: + rval = xsdfec_set_order(xsdfec, arg); + break; + case XSDFEC_SET_BYPASS: + rval = xsdfec_set_bypass(xsdfec, arg); + break; + case XSDFEC_IS_ACTIVE: + rval = xsdfec_is_active(xsdfec, (bool __user *)arg); + break; default: /* Should not get here */ break; diff --git a/include/uapi/misc/xilinx_sdfec.h b/include/uapi/misc/xilinx_sdfec.h index ce532a5e3e81..b8897ce41821 100644 --- a/include/uapi/misc/xilinx_sdfec.h +++ b/include/uapi/misc/xilinx_sdfec.h @@ -287,6 +287,19 @@ struct xsdfec_ldpc_param_table_sizes { */ #define XSDFEC_ADD_LDPC_CODE_PARAMS \ _IOW(XSDFEC_MAGIC, 5, struct xsdfec_ldpc_params) +/** + * DOC: XSDFEC_GET_CONFIG + * @Parameters + * + * @struct xsdfec_config * + * Pointer to the &struct xsdfec_config that contains the current + * configuration settings of the SD-FEC Block + * + * @Description + * + * ioctl that returns SD-FEC core configuration + */ +#define XSDFEC_GET_CONFIG _IOR(XSDFEC_MAGIC, 6, struct xsdfec_config) /** * DOC: XSDFEC_GET_TURBO * @Parameters @@ -300,4 +313,48 @@ struct xsdfec_ldpc_param_table_sizes { * ioctl that returns SD-FEC turbo param values */ #define XSDFEC_GET_TURBO _IOR(XSDFEC_MAGIC, 7, struct xsdfec_turbo) +/** + * DOC: XSDFEC_SET_ORDER + * @Parameters + * + * @struct unsigned long * + * Pointer to the unsigned long that contains a value from the + * @enum xsdfec_order + * + * @Description + * + * ioctl that sets order, if order of blocks can change from input to output + * + * This can only be used when the driver is in the XSDFEC_STOPPED state + */ +#define XSDFEC_SET_ORDER _IOW(XSDFEC_MAGIC, 8, unsigned long) +/** + * DOC: XSDFEC_SET_BYPASS + * @Parameters + * + * @struct bool * + * Pointer to bool that sets the bypass value, where false results in + * normal operation and false results in the SD-FEC performing the + * configured operations (same number of cycles) but output data matches + * the input data + * + * @Description + * + * ioctl that sets bypass. + * + * This can only be used when the driver is in the XSDFEC_STOPPED state + */ +#define XSDFEC_SET_BYPASS _IOW(XSDFEC_MAGIC, 9, bool) +/** + * DOC: XSDFEC_IS_ACTIVE + * @Parameters + * + * @struct bool * + * Pointer to bool that returns true if the SD-FEC is processing data + * + * @Description + * + * ioctl that determines if SD-FEC is processing data + */ +#define XSDFEC_IS_ACTIVE _IOR(XSDFEC_MAGIC, 10, bool) #endif /* __XILINX_SDFEC_H__ */ -- cgit v1.2.3 From cc538f609dee49b73545569c49e3abd891fdd8b3 Mon Sep 17 00:00:00 2001 From: Dragan Cvetic Date: Sat, 27 Jul 2019 09:33:55 +0100 Subject: misc: xilinx_sdfec: Support poll file operation Support monitoring and detecting the SD-FEC error events through IRQ and poll file operation. The SD-FEC device can detect one-error or multi-error events. An error triggers an interrupt which creates and run the ONE_SHOT IRQ thread. The ONE_SHOT IRQ thread detects type of error and pass that information to the poll function. The file_operation callback poll(), collects the events and updates the statistics accordingly. The function poll blocks() on waiting queue which can be unblocked by ONE_SHOT IRQ handling thread. Support SD-FEC interrupt set ioctl callback. The SD-FEC can detect two type of errors: coding errors (ECC) and a data interface errors (TLAST). The errors are events which can trigger an IRQ if enabled. The driver can monitor and detect these errors through IRQ. Also the driver updates the statistical data. Tested-by: Dragan Cvetic Signed-off-by: Derek Kiernan Signed-off-by: Dragan Cvetic Link: https://lore.kernel.org/r/1564216438-322406-6-git-send-email-dragan.cvetic@xilinx.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/xilinx_sdfec.c | 240 ++++++++++++++++++++++++++++++++++++++- include/uapi/misc/xilinx_sdfec.h | 13 +++ 2 files changed, 249 insertions(+), 4 deletions(-) (limited to 'include/uapi') diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index 579f23617dd7..61775c5626ed 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -191,27 +191,43 @@ struct xsdfec_clks { * struct xsdfec_dev - Driver data for SDFEC * @miscdev: Misc device handle * @clks: Clocks managed by the SDFEC driver - * @regs: device physical base address - * @dev: pointer to device struct + * @waitq: Driver wait queue * @config: Configuration of the SDFEC device * @dev_name: Device name + * @flags: spinlock flags + * @regs: device physical base address + * @dev: pointer to device struct * @state: State of the SDFEC device * @error_data_lock: Error counter and states spinlock * @dev_id: Device ID + * @isr_err_count: Count of ISR errors + * @cecc_count: Count of Correctable ECC errors (SBE) + * @uecc_count: Count of Uncorrectable ECC errors (MBE) + * @irq: IRQ number + * @state_updated: indicates State updated by interrupt handler + * @stats_updated: indicates Stats updated by interrupt handler * * This structure contains necessary state for SDFEC driver to operate */ struct xsdfec_dev { struct miscdevice miscdev; struct xsdfec_clks clks; - void __iomem *regs; - struct device *dev; + wait_queue_head_t waitq; struct xsdfec_config config; char dev_name[DEV_NAME_LEN]; + unsigned long flags; + void __iomem *regs; + struct device *dev; enum xsdfec_state state; /* Spinlock to protect state_updated and stats_updated */ spinlock_t error_data_lock; int dev_id; + u32 isr_err_count; + u32 cecc_count; + u32 uecc_count; + int irq; + bool state_updated; + bool stats_updated; }; static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr, @@ -284,6 +300,90 @@ static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg) return err; } +static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable) +{ + u32 mask_read; + + if (enable) { + /* Enable */ + xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK); + mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); + if (mask_read & XSDFEC_ISR_MASK) { + dev_dbg(xsdfec->dev, + "SDFEC enabling irq with IER failed"); + return -EIO; + } + } else { + /* Disable */ + xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK); + mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); + if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) { + dev_dbg(xsdfec->dev, + "SDFEC disabling irq with IDR failed"); + return -EIO; + } + } + return 0; +} + +static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable) +{ + u32 mask_read; + + if (enable) { + /* Enable */ + xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR, + XSDFEC_ALL_ECC_ISR_MASK); + mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); + if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) { + dev_dbg(xsdfec->dev, + "SDFEC enabling ECC irq with ECC IER failed"); + return -EIO; + } + } else { + /* Disable */ + xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR, + XSDFEC_ALL_ECC_ISR_MASK); + mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); + if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) == + XSDFEC_ECC_ISR_MASK) || + ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) == + XSDFEC_PL_INIT_ECC_ISR_MASK))) { + dev_dbg(xsdfec->dev, + "SDFEC disable ECC irq with ECC IDR failed"); + return -EIO; + } + } + return 0; +} + +static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg) +{ + struct xsdfec_irq irq; + int err; + int isr_err; + int ecc_err; + + err = copy_from_user(&irq, arg, sizeof(irq)); + if (err) + return -EFAULT; + + /* Setup tlast related IRQ */ + isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr); + if (!isr_err) + xsdfec->config.irq.enable_isr = irq.enable_isr; + + /* Setup ECC related IRQ */ + ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr); + if (!ecc_err) + xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr; + + if (isr_err < 0 || ecc_err < 0) + err = -EIO; + + return err; +} + static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg) { struct xsdfec_turbo turbo; @@ -760,6 +860,9 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, case XSDFEC_GET_CONFIG: rval = xsdfec_get_config(xsdfec, arg); break; + case XSDFEC_SET_IRQ: + rval = xsdfec_set_irq(xsdfec, arg); + break; case XSDFEC_SET_TURBO: rval = xsdfec_set_turbo(xsdfec, arg); break; @@ -793,11 +896,36 @@ static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd, } #endif +static unsigned int xsdfec_poll(struct file *file, poll_table *wait) +{ + unsigned int mask = 0; + struct xsdfec_dev *xsdfec; + + xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev); + + if (!xsdfec) + return POLLNVAL | POLLHUP; + + poll_wait(file, &xsdfec->waitq, wait); + + /* XSDFEC ISR detected an error */ + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + if (xsdfec->state_updated) + mask |= POLLIN | POLLPRI; + + if (xsdfec->stats_updated) + mask |= POLLIN | POLLRDNORM; + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + + return mask; +} + static const struct file_operations xsdfec_fops = { .owner = THIS_MODULE, .open = xsdfec_dev_open, .release = xsdfec_dev_release, .unlocked_ioctl = xsdfec_dev_ioctl, + .poll = xsdfec_poll, #ifdef CONFIG_COMPAT .compat_ioctl = xsdfec_dev_compat_ioctl, #endif @@ -883,6 +1011,91 @@ static int xsdfec_parse_of(struct xsdfec_dev *xsdfec) return 0; } +static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id) +{ + struct xsdfec_dev *xsdfec = dev_id; + irqreturn_t ret = IRQ_HANDLED; + u32 ecc_err; + u32 isr_err; + u32 uecc_count; + u32 cecc_count; + u32 isr_err_count; + u32 aecc_count; + u32 tmp; + + WARN_ON(xsdfec->irq != irq); + + /* Mask Interrupts */ + xsdfec_isr_enable(xsdfec, false); + xsdfec_ecc_isr_enable(xsdfec, false); + /* Read ISR */ + ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR); + isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR); + /* Clear the interrupts */ + xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err); + xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err); + + tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK; + /* Count uncorrectable 2-bit errors */ + uecc_count = hweight32(tmp); + /* Count all ECC errors */ + aecc_count = hweight32(ecc_err); + /* Number of correctable 1-bit ECC error */ + cecc_count = aecc_count - 2 * uecc_count; + /* Count ISR errors */ + isr_err_count = hweight32(isr_err); + dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp, + uecc_count, aecc_count, cecc_count, isr_err_count); + dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count, + xsdfec->cecc_count, xsdfec->isr_err_count); + + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + /* Add new errors to a 2-bits counter */ + if (uecc_count) + xsdfec->uecc_count += uecc_count; + /* Add new errors to a 1-bits counter */ + if (cecc_count) + xsdfec->cecc_count += cecc_count; + /* Add new errors to a ISR counter */ + if (isr_err_count) + xsdfec->isr_err_count += isr_err_count; + + /* Update state/stats flag */ + if (uecc_count) { + if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK) + xsdfec->state = XSDFEC_NEEDS_RESET; + else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK) + xsdfec->state = XSDFEC_PL_RECONFIGURE; + xsdfec->stats_updated = true; + xsdfec->state_updated = true; + } + + if (cecc_count) + xsdfec->stats_updated = true; + + if (isr_err_count) { + xsdfec->state = XSDFEC_NEEDS_RESET; + xsdfec->stats_updated = true; + xsdfec->state_updated = true; + } + + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated, + xsdfec->stats_updated); + + /* Enable another polling */ + if (xsdfec->state_updated || xsdfec->stats_updated) + wake_up_interruptible(&xsdfec->waitq); + else + ret = IRQ_NONE; + + /* Unmask Interrupts */ + xsdfec_isr_enable(xsdfec, true); + xsdfec_ecc_isr_enable(xsdfec, true); + + return ret; +} + static int xsdfec_clk_init(struct platform_device *pdev, struct xsdfec_clks *clks) { @@ -1049,6 +1262,7 @@ static int xsdfec_probe(struct platform_device *pdev) struct device *dev; struct resource *res; int err; + bool irq_enabled = true; xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL); if (!xsdfec) @@ -1069,6 +1283,12 @@ static int xsdfec_probe(struct platform_device *pdev) goto err_xsdfec_dev; } + xsdfec->irq = platform_get_irq(pdev, 0); + if (xsdfec->irq < 0) { + dev_dbg(dev, "platform_get_irq failed"); + irq_enabled = false; + } + err = xsdfec_parse_of(xsdfec); if (err < 0) goto err_xsdfec_dev; @@ -1078,6 +1298,18 @@ static int xsdfec_probe(struct platform_device *pdev) /* Save driver private data */ platform_set_drvdata(pdev, xsdfec); + if (irq_enabled) { + init_waitqueue_head(&xsdfec->waitq); + /* Register IRQ thread */ + err = devm_request_threaded_irq(dev, xsdfec->irq, NULL, + xsdfec_irq_thread, IRQF_ONESHOT, + "xilinx-sdfec16", xsdfec); + if (err < 0) { + dev_err(dev, "unable to request IRQ%d", xsdfec->irq); + goto err_xsdfec_dev; + } + } + mutex_lock(&dev_idr_lock); err = idr_alloc(&dev_idr, xsdfec->dev_name, 0, 0, GFP_KERNEL); mutex_unlock(&dev_idr_lock); diff --git a/include/uapi/misc/xilinx_sdfec.h b/include/uapi/misc/xilinx_sdfec.h index b8897ce41821..85ee288dbb33 100644 --- a/include/uapi/misc/xilinx_sdfec.h +++ b/include/uapi/misc/xilinx_sdfec.h @@ -251,6 +251,19 @@ struct xsdfec_ldpc_param_table_sizes { * XSDFEC IOCTL List */ #define XSDFEC_MAGIC 'f' +/** + * DOC: XSDFEC_SET_IRQ + * @Parameters + * + * @struct xsdfec_irq * + * Pointer to the &struct xsdfec_irq that contains the interrupt settings + * for the SD-FEC core + * + * @Description + * + * ioctl to enable or disable irq + */ +#define XSDFEC_SET_IRQ _IOW(XSDFEC_MAGIC, 3, struct xsdfec_irq) /** * DOC: XSDFEC_SET_TURBO * @Parameters -- cgit v1.2.3 From 6bd6a690c2e7e710aa7ccefa4edc83f14099907e Mon Sep 17 00:00:00 2001 From: Dragan Cvetic Date: Sat, 27 Jul 2019 09:33:56 +0100 Subject: misc: xilinx_sdfec: Add stats & status ioctls SD-FEC statistic data are: - count of data interface errors (isr_err_count) - count of Correctable ECC errors (cecc_count) - count of Uncorrectable ECC errors (uecc_count) Add support: 1. clear stats ioctl callback which clears collected statistic data, 2. get stats ioctl callback which reads a collected statistic data, 3. set default configuration ioctl callback, 4. start ioctl callback enables SD-FEC HW, 5. stop ioctl callback disables SD-FEC HW. In a failed state driver enables the following ioctls: - get status - get statistics - clear stats - set default SD-FEC device configuration Tested-by: Santhosh Dyavanapally Tested by: Punnaiah Choudary Kalluri Tested-by: Derek Kiernan Tested-by: Dragan Cvetic Signed-off-by: Derek Kiernan Signed-off-by: Dragan Cvetic Link: https://lore.kernel.org/r/1564216438-322406-7-git-send-email-dragan.cvetic@xilinx.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/xilinx_sdfec.c | 125 +++++++++++++++++++++++++++++++++++++++ include/uapi/misc/xilinx_sdfec.h | 75 +++++++++++++++++++++++ 2 files changed, 200 insertions(+) (limited to 'include/uapi') diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index 61775c5626ed..45f127ac12f6 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -206,6 +206,7 @@ struct xsdfec_clks { * @irq: IRQ number * @state_updated: indicates State updated by interrupt handler * @stats_updated: indicates Stats updated by interrupt handler + * @intr_enabled: indicates IRQ enabled * * This structure contains necessary state for SDFEC driver to operate */ @@ -228,6 +229,7 @@ struct xsdfec_dev { int irq; bool state_updated; bool stats_updated; + bool intr_enabled; }; static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr, @@ -289,6 +291,25 @@ static void update_config_from_hw(struct xsdfec_dev *xsdfec) xsdfec->state = XSDFEC_STOPPED; } +static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg) +{ + struct xsdfec_status status; + int err; + + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + status.state = xsdfec->state; + xsdfec->state_updated = false; + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) & + XSDFEC_IS_ACTIVITY_SET); + + err = copy_to_user(arg, &status, sizeof(status)); + if (err) + err = -EFAULT; + + return err; +} + static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg) { int err; @@ -840,6 +861,82 @@ static int xsdfec_dev_release(struct inode *iptr, struct file *fptr) return 0; } +static int xsdfec_start(struct xsdfec_dev *xsdfec) +{ + u32 regread; + + regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR); + regread &= 0x1; + if (regread != xsdfec->config.code) { + dev_dbg(xsdfec->dev, + "%s SDFEC HW code does not match driver code, reg %d, code %d", + __func__, regread, xsdfec->config.code); + return -EINVAL; + } + + /* Set AXIS enable */ + xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, + XSDFEC_AXIS_ENABLE_MASK); + /* Done */ + xsdfec->state = XSDFEC_STARTED; + return 0; +} + +static int xsdfec_stop(struct xsdfec_dev *xsdfec) +{ + u32 regread; + + if (xsdfec->state != XSDFEC_STARTED) + dev_dbg(xsdfec->dev, "Device not started correctly"); + /* Disable AXIS_ENABLE Input interfaces only */ + regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR); + regread &= (~XSDFEC_AXIS_IN_ENABLE_MASK); + xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread); + /* Stop */ + xsdfec->state = XSDFEC_STOPPED; + return 0; +} + +static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec) +{ + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + xsdfec->isr_err_count = 0; + xsdfec->uecc_count = 0; + xsdfec->cecc_count = 0; + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + + return 0; +} + +static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg) +{ + int err; + struct xsdfec_stats user_stats; + + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + user_stats.isr_err_count = xsdfec->isr_err_count; + user_stats.cecc_count = xsdfec->cecc_count; + user_stats.uecc_count = xsdfec->uecc_count; + xsdfec->stats_updated = false; + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + + err = copy_to_user(arg, &user_stats, sizeof(user_stats)); + if (err) + err = -EFAULT; + + return err; +} + +static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec) +{ + /* Ensure registers are aligned with core configuration */ + xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code); + xsdfec_cfg_axi_streams(xsdfec); + update_config_from_hw(xsdfec); + + return 0; +} + static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, unsigned long data) { @@ -849,6 +946,16 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev); + /* In failed state allow only reset and get status IOCTLs */ + if (xsdfec->state == XSDFEC_NEEDS_RESET && + (cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS && + cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) { + return -EPERM; + } + + if (_IOC_TYPE(cmd) != XSDFEC_MAGIC) + return -ENOTTY; + /* check if ioctl argument is present and valid */ if (_IOC_DIR(cmd) != _IOC_NONE) { arg = (void __user *)data; @@ -857,9 +964,27 @@ static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, } switch (cmd) { + case XSDFEC_START_DEV: + rval = xsdfec_start(xsdfec); + break; + case XSDFEC_STOP_DEV: + rval = xsdfec_stop(xsdfec); + break; + case XSDFEC_CLEAR_STATS: + rval = xsdfec_clear_stats(xsdfec); + break; + case XSDFEC_GET_STATS: + rval = xsdfec_get_stats(xsdfec, arg); + break; + case XSDFEC_GET_STATUS: + rval = xsdfec_get_status(xsdfec, arg); + break; case XSDFEC_GET_CONFIG: rval = xsdfec_get_config(xsdfec, arg); break; + case XSDFEC_SET_DEFAULT_CONFIG: + rval = xsdfec_set_default_config(xsdfec); + break; case XSDFEC_SET_IRQ: rval = xsdfec_set_irq(xsdfec, arg); break; diff --git a/include/uapi/misc/xilinx_sdfec.h b/include/uapi/misc/xilinx_sdfec.h index 85ee288dbb33..ee1a42ae6f46 100644 --- a/include/uapi/misc/xilinx_sdfec.h +++ b/include/uapi/misc/xilinx_sdfec.h @@ -233,6 +233,21 @@ struct xsdfec_config { __s8 code_wr_protect; }; +/** + * struct xsdfec_stats - Stats retrived by ioctl XSDFEC_GET_STATS. Used + * to buffer atomic_t variables from struct + * xsdfec_dev. Counts are accumulated until + * the user clears them. + * @isr_err_count: Count of ISR errors + * @cecc_count: Count of Correctable ECC errors (SBE) + * @uecc_count: Count of Uncorrectable ECC errors (MBE) + */ +struct xsdfec_stats { + __u32 isr_err_count; + __u32 cecc_count; + __u32 uecc_count; +}; + /** * struct xsdfec_ldpc_param_table_sizes - Used to store sizes of SD-FEC table * entries for an individual LPDC code @@ -251,6 +266,32 @@ struct xsdfec_ldpc_param_table_sizes { * XSDFEC IOCTL List */ #define XSDFEC_MAGIC 'f' +/** + * DOC: XSDFEC_START_DEV + * + * @Description + * + * ioctl to start SD-FEC core + * + * This fails if the XSDFEC_SET_ORDER ioctl has not been previously called + */ +#define XSDFEC_START_DEV _IO(XSDFEC_MAGIC, 0) +/** + * DOC: XSDFEC_STOP_DEV + * + * @Description + * + * ioctl to stop the SD-FEC core + */ +#define XSDFEC_STOP_DEV _IO(XSDFEC_MAGIC, 1) +/** + * DOC: XSDFEC_GET_STATUS + * + * @Description + * + * ioctl that returns status of SD-FEC core + */ +#define XSDFEC_GET_STATUS _IOR(XSDFEC_MAGIC, 2, struct xsdfec_status) /** * DOC: XSDFEC_SET_IRQ * @Parameters @@ -370,4 +411,38 @@ struct xsdfec_ldpc_param_table_sizes { * ioctl that determines if SD-FEC is processing data */ #define XSDFEC_IS_ACTIVE _IOR(XSDFEC_MAGIC, 10, bool) +/** + * DOC: XSDFEC_CLEAR_STATS + * + * @Description + * + * ioctl that clears error stats collected during interrupts + */ +#define XSDFEC_CLEAR_STATS _IO(XSDFEC_MAGIC, 11) +/** + * DOC: XSDFEC_GET_STATS + * @Parameters + * + * @struct xsdfec_stats * + * Pointer to the &struct xsdfec_stats that will contain the updated stats + * values + * + * @Description + * + * ioctl that returns SD-FEC core stats + * + * This can only be used when the driver is in the XSDFEC_STOPPED state + */ +#define XSDFEC_GET_STATS _IOR(XSDFEC_MAGIC, 12, struct xsdfec_stats) +/** + * DOC: XSDFEC_SET_DEFAULT_CONFIG + * + * @Description + * + * ioctl that returns SD-FEC core to default config, use after a reset + * + * This can only be used when the driver is in the XSDFEC_STOPPED state + */ +#define XSDFEC_SET_DEFAULT_CONFIG _IO(XSDFEC_MAGIC, 13) + #endif /* __XILINX_SDFEC_H__ */ -- cgit v1.2.3 From 3a9477a06c6acb4234407b59999835a6f12f889d Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Thu, 15 Aug 2019 10:50:30 -0500 Subject: ASoC: SOF: ipc: add ALH parameters The only configuration parameter is the ALH stream ID. No range checking is done by the driver, the firmware should check that the stream is valid for a specific hardware. Bump the ABI Minor number to keep the alignment with SOF firmware Signed-off-by: Pierre-Louis Bossart Link: https://lore.kernel.org/r/20190815155032.29181-3-pierre-louis.bossart@linux.intel.com Signed-off-by: Mark Brown --- include/sound/sof/dai-intel.h | 9 +++++++++ include/sound/sof/dai.h | 1 + include/uapi/sound/sof/abi.h | 2 +- 3 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h index a81afd3fbd41..b03e2ec08694 100644 --- a/include/sound/sof/dai-intel.h +++ b/include/sound/sof/dai-intel.h @@ -179,4 +179,13 @@ struct sof_ipc_dai_dmic_params { struct sof_ipc_dai_dmic_pdm_ctrl pdm[0]; } __packed; +/* ALH Configuration Request - SOF_IPC_DAI_ALH_CONFIG */ +struct sof_ipc_dai_alh_params { + struct sof_ipc_hdr hdr; + uint32_t stream_id; + + /* reserved for future use */ + uint32_t reserved[15]; +} __packed; + #endif diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h index 3d174e20aa53..da9825ad41d4 100644 --- a/include/sound/sof/dai.h +++ b/include/sound/sof/dai.h @@ -70,6 +70,7 @@ struct sof_ipc_dai_config { struct sof_ipc_dai_ssp_params ssp; struct sof_ipc_dai_dmic_params dmic; struct sof_ipc_dai_hda_params hda; + struct sof_ipc_dai_alh_params alh; }; } __packed; diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h index dff70a42445a..a0fe0d4c4b66 100644 --- a/include/uapi/sound/sof/abi.h +++ b/include/uapi/sound/sof/abi.h @@ -26,7 +26,7 @@ /* SOF ABI version major, minor and patch numbers */ #define SOF_ABI_MAJOR 3 -#define SOF_ABI_MINOR 9 +#define SOF_ABI_MINOR 10 #define SOF_ABI_PATCH 0 /* SOF ABI version number. Format within 32bit word is MMmmmppp */ -- cgit v1.2.3 From f59b16ef4ccea1b52f1c4e4c60ce507dc0bcc0ad Mon Sep 17 00:00:00 2001 From: Daniel Baluta Date: Thu, 15 Aug 2019 14:20:15 -0500 Subject: ASoC: SOF: topology: Add dummy support for i.MX8 DAIs Add dummy support for SAI/ESAI digital audio interface IPs found on i.MX8 boards. Signed-off-by: Daniel Baluta Signed-off-by: Pierre-Louis Bossart Link: https://lore.kernel.org/r/20190815192018.30570-2-pierre-louis.bossart@linux.intel.com Signed-off-by: Mark Brown --- include/sound/sof/dai.h | 2 ++ include/uapi/sound/sof/tokens.h | 8 ++++++++ sound/soc/sof/topology.c | 30 ++++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+) (limited to 'include/uapi') diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h index da9825ad41d4..86494294274e 100644 --- a/include/sound/sof/dai.h +++ b/include/sound/sof/dai.h @@ -50,6 +50,8 @@ enum sof_ipc_dai_type { SOF_DAI_INTEL_DMIC, /**< Intel DMIC */ SOF_DAI_INTEL_HDA, /**< Intel HD/A */ SOF_DAI_INTEL_SOUNDWIRE, /**< Intel SoundWire */ + SOF_DAI_IMX_SAI, /**< i.MX SAI */ + SOF_DAI_IMX_ESAI, /**< i.MX ESAI */ }; /* general purpose DAI configuration */ diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h index 6435240cef13..8f996857fb24 100644 --- a/include/uapi/sound/sof/tokens.h +++ b/include/uapi/sound/sof/tokens.h @@ -106,4 +106,12 @@ /* for backward compatibility */ #define SOF_TKN_EFFECT_TYPE SOF_TKN_PROCESS_TYPE +/* SAI */ +#define SOF_TKN_IMX_SAI_FIRST_TOKEN 1000 +/* TODO: Add SAI tokens */ + +/* ESAI */ +#define SOF_TKN_IMX_ESAI_FIRST_TOKEN 1100 +/* TODO: Add ESAI tokens */ + #endif diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c index 9cffea142395..a215bf58b138 100644 --- a/sound/soc/sof/topology.c +++ b/sound/soc/sof/topology.c @@ -346,6 +346,8 @@ static const struct sof_dai_types sof_dais[] = { {"SSP", SOF_DAI_INTEL_SSP}, {"HDA", SOF_DAI_INTEL_HDA}, {"DMIC", SOF_DAI_INTEL_DMIC}, + {"SAI", SOF_DAI_IMX_SAI}, + {"ESAI", SOF_DAI_IMX_ESAI}, }; static enum sof_ipc_dai_type find_dai(const char *name) @@ -2513,6 +2515,26 @@ static int sof_link_ssp_load(struct snd_soc_component *scomp, int index, return ret; } +static int sof_link_sai_load(struct snd_soc_component *scomp, int index, + struct snd_soc_dai_link *link, + struct snd_soc_tplg_link_config *cfg, + struct snd_soc_tplg_hw_config *hw_config, + struct sof_ipc_dai_config *config) +{ + /*TODO: Add implementation */ + return 0; +} + +static int sof_link_esai_load(struct snd_soc_component *scomp, int index, + struct snd_soc_dai_link *link, + struct snd_soc_tplg_link_config *cfg, + struct snd_soc_tplg_hw_config *hw_config, + struct sof_ipc_dai_config *config) +{ + /*TODO: Add implementation */ + return 0; +} + static int sof_link_dmic_load(struct snd_soc_component *scomp, int index, struct snd_soc_dai_link *link, struct snd_soc_tplg_link_config *cfg, @@ -2837,6 +2859,14 @@ static int sof_link_load(struct snd_soc_component *scomp, int index, ret = sof_link_hda_load(scomp, index, link, cfg, hw_config, &config); break; + case SOF_DAI_IMX_SAI: + ret = sof_link_sai_load(scomp, index, link, cfg, hw_config, + &config); + break; + case SOF_DAI_IMX_ESAI: + ret = sof_link_esai_load(scomp, index, link, cfg, hw_config, + &config); + break; default: dev_err(sdev->dev, "error: invalid DAI type %d\n", config.type); ret = -EINVAL; -- cgit v1.2.3 From 5e58109b1ea454b93e455e0e8fc0bc4c226b8c0a Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sat, 17 Aug 2019 16:28:14 +0300 Subject: drop_monitor: Add support for packet alert mode for hardware drops In a similar fashion to software drops, extend drop monitor to send netlink events when packets are dropped by the underlying hardware. The main difference is that instead of encoding the program counter (PC) from which kfree_skb() was called in the netlink message, we encode the hardware trap name. The two are mostly equivalent since they should both help the user understand why the packet was dropped. Signed-off-by: Ido Schimmel Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/net_dropmon.h | 10 ++ net/core/drop_monitor.c | 304 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 310 insertions(+), 4 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 405b31cbf723..9f8fb1bb4aa4 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -83,6 +83,10 @@ enum net_dm_attr { NET_DM_ATTR_ORIG_LEN, /* u32 */ NET_DM_ATTR_QUEUE_LEN, /* u32 */ NET_DM_ATTR_STATS, /* nested */ + NET_DM_ATTR_HW_STATS, /* nested */ + NET_DM_ATTR_ORIGIN, /* u16 */ + NET_DM_ATTR_HW_TRAP_GROUP_NAME, /* string */ + NET_DM_ATTR_HW_TRAP_NAME, /* string */ __NET_DM_ATTR_MAX, NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1 @@ -101,6 +105,7 @@ enum net_dm_alert_mode { enum { NET_DM_ATTR_PORT_NETDEV_IFINDEX, /* u32 */ + NET_DM_ATTR_PORT_NETDEV_NAME, /* string */ __NET_DM_ATTR_PORT_MAX, NET_DM_ATTR_PORT_MAX = __NET_DM_ATTR_PORT_MAX - 1 @@ -113,4 +118,9 @@ enum { NET_DM_ATTR_STATS_MAX = __NET_DM_ATTR_STATS_MAX - 1 }; +enum net_dm_origin { + NET_DM_ORIGIN_SW, + NET_DM_ORIGIN_HW, +}; + #endif diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index a2c7f9162c9d..5a950b5af8fd 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -95,12 +95,16 @@ struct net_dm_alert_ops { void (*napi_poll_probe)(void *ignore, struct napi_struct *napi, int work, int budget); void (*work_item_func)(struct work_struct *work); + void (*hw_work_item_func)(struct work_struct *work); void (*hw_probe)(struct sk_buff *skb, const struct net_dm_hw_metadata *hw_metadata); }; struct net_dm_skb_cb { - void *pc; + union { + struct net_dm_hw_metadata *hw_metadata; + void *pc; + }; }; #define NET_DM_SKB_CB(__skb) ((struct net_dm_skb_cb *)&((__skb)->cb[0])) @@ -335,7 +339,9 @@ static size_t net_dm_in_port_size(void) /* NET_DM_ATTR_IN_PORT nest */ return nla_total_size(0) + /* NET_DM_ATTR_PORT_NETDEV_IFINDEX */ - nla_total_size(sizeof(u32)); + nla_total_size(sizeof(u32)) + + /* NET_DM_ATTR_PORT_NETDEV_NAME */ + nla_total_size(IFNAMSIZ + 1); } #define NET_DM_MAX_SYMBOL_LEN 40 @@ -347,6 +353,8 @@ static size_t net_dm_packet_report_size(size_t payload_len) size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize); return NLMSG_ALIGN(size) + + /* NET_DM_ATTR_ORIGIN */ + nla_total_size(sizeof(u16)) + /* NET_DM_ATTR_PC */ nla_total_size(sizeof(u64)) + /* NET_DM_ATTR_SYMBOL */ @@ -363,7 +371,8 @@ static size_t net_dm_packet_report_size(size_t payload_len) nla_total_size(payload_len); } -static int net_dm_packet_report_in_port_put(struct sk_buff *msg, int ifindex) +static int net_dm_packet_report_in_port_put(struct sk_buff *msg, int ifindex, + const char *name) { struct nlattr *attr; @@ -375,6 +384,9 @@ static int net_dm_packet_report_in_port_put(struct sk_buff *msg, int ifindex) nla_put_u32(msg, NET_DM_ATTR_PORT_NETDEV_IFINDEX, ifindex)) goto nla_put_failure; + if (name && nla_put_string(msg, NET_DM_ATTR_PORT_NETDEV_NAME, name)) + goto nla_put_failure; + nla_nest_end(msg, attr); return 0; @@ -399,6 +411,9 @@ static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb, if (!hdr) return -EMSGSIZE; + if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_SW)) + goto nla_put_failure; + if (nla_put_u64_64bit(msg, NET_DM_ATTR_PC, pc, NET_DM_ATTR_PAD)) goto nla_put_failure; @@ -406,7 +421,7 @@ static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb, if (nla_put_string(msg, NET_DM_ATTR_SYMBOL, buf)) goto nla_put_failure; - rc = net_dm_packet_report_in_port_put(msg, skb->skb_iif); + rc = net_dm_packet_report_in_port_put(msg, skb->skb_iif, NULL); if (rc) goto nla_put_failure; @@ -493,16 +508,249 @@ static void net_dm_packet_work(struct work_struct *work) net_dm_packet_report(skb); } +static size_t +net_dm_hw_packet_report_size(size_t payload_len, + const struct net_dm_hw_metadata *hw_metadata) +{ + size_t size; + + size = nlmsg_msg_size(GENL_HDRLEN + net_drop_monitor_family.hdrsize); + + return NLMSG_ALIGN(size) + + /* NET_DM_ATTR_ORIGIN */ + nla_total_size(sizeof(u16)) + + /* NET_DM_ATTR_HW_TRAP_GROUP_NAME */ + nla_total_size(strlen(hw_metadata->trap_group_name) + 1) + + /* NET_DM_ATTR_HW_TRAP_NAME */ + nla_total_size(strlen(hw_metadata->trap_name) + 1) + + /* NET_DM_ATTR_IN_PORT */ + net_dm_in_port_size() + + /* NET_DM_ATTR_TIMESTAMP */ + nla_total_size(sizeof(struct timespec)) + + /* NET_DM_ATTR_ORIG_LEN */ + nla_total_size(sizeof(u32)) + + /* NET_DM_ATTR_PROTO */ + nla_total_size(sizeof(u16)) + + /* NET_DM_ATTR_PAYLOAD */ + nla_total_size(payload_len); +} + +static int net_dm_hw_packet_report_fill(struct sk_buff *msg, + struct sk_buff *skb, size_t payload_len) +{ + struct net_dm_hw_metadata *hw_metadata; + struct nlattr *attr; + struct timespec ts; + void *hdr; + + hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata; + + hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0, + NET_DM_CMD_PACKET_ALERT); + if (!hdr) + return -EMSGSIZE; + + if (nla_put_u16(msg, NET_DM_ATTR_ORIGIN, NET_DM_ORIGIN_HW)) + goto nla_put_failure; + + if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_GROUP_NAME, + hw_metadata->trap_group_name)) + goto nla_put_failure; + + if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME, + hw_metadata->trap_name)) + goto nla_put_failure; + + if (hw_metadata->input_dev) { + struct net_device *dev = hw_metadata->input_dev; + int rc; + + rc = net_dm_packet_report_in_port_put(msg, dev->ifindex, + dev->name); + if (rc) + goto nla_put_failure; + } + + if (ktime_to_timespec_cond(skb->tstamp, &ts) && + nla_put(msg, NET_DM_ATTR_TIMESTAMP, sizeof(ts), &ts)) + goto nla_put_failure; + + if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len)) + goto nla_put_failure; + + if (!payload_len) + goto out; + + if (nla_put_u16(msg, NET_DM_ATTR_PROTO, be16_to_cpu(skb->protocol))) + goto nla_put_failure; + + attr = skb_put(msg, nla_total_size(payload_len)); + attr->nla_type = NET_DM_ATTR_PAYLOAD; + attr->nla_len = nla_attr_size(payload_len); + if (skb_copy_bits(skb, 0, nla_data(attr), payload_len)) + goto nla_put_failure; + +out: + genlmsg_end(msg, hdr); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static struct net_dm_hw_metadata * +net_dm_hw_metadata_clone(const struct net_dm_hw_metadata *hw_metadata) +{ + struct net_dm_hw_metadata *n_hw_metadata; + const char *trap_group_name; + const char *trap_name; + + n_hw_metadata = kmalloc(sizeof(*hw_metadata), GFP_ATOMIC); + if (!n_hw_metadata) + return NULL; + + trap_group_name = kmemdup(hw_metadata->trap_group_name, + strlen(hw_metadata->trap_group_name) + 1, + GFP_ATOMIC | __GFP_ZERO); + if (!trap_group_name) + goto free_hw_metadata; + n_hw_metadata->trap_group_name = trap_group_name; + + trap_name = kmemdup(hw_metadata->trap_name, + strlen(hw_metadata->trap_name) + 1, + GFP_ATOMIC | __GFP_ZERO); + if (!trap_name) + goto free_trap_group; + n_hw_metadata->trap_name = trap_name; + + n_hw_metadata->input_dev = hw_metadata->input_dev; + if (n_hw_metadata->input_dev) + dev_hold(n_hw_metadata->input_dev); + + return n_hw_metadata; + +free_trap_group: + kfree(trap_group_name); +free_hw_metadata: + kfree(n_hw_metadata); + return NULL; +} + +static void +net_dm_hw_metadata_free(const struct net_dm_hw_metadata *hw_metadata) +{ + if (hw_metadata->input_dev) + dev_put(hw_metadata->input_dev); + kfree(hw_metadata->trap_name); + kfree(hw_metadata->trap_group_name); + kfree(hw_metadata); +} + +static void net_dm_hw_packet_report(struct sk_buff *skb) +{ + struct net_dm_hw_metadata *hw_metadata; + struct sk_buff *msg; + size_t payload_len; + int rc; + + if (skb->data > skb_mac_header(skb)) + skb_push(skb, skb->data - skb_mac_header(skb)); + else + skb_pull(skb, skb_mac_header(skb) - skb->data); + + payload_len = min_t(size_t, skb->len, NET_DM_MAX_PACKET_SIZE); + if (net_dm_trunc_len) + payload_len = min_t(size_t, net_dm_trunc_len, payload_len); + + hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata; + msg = nlmsg_new(net_dm_hw_packet_report_size(payload_len, hw_metadata), + GFP_KERNEL); + if (!msg) + goto out; + + rc = net_dm_hw_packet_report_fill(msg, skb, payload_len); + if (rc) { + nlmsg_free(msg); + goto out; + } + + genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL); + +out: + net_dm_hw_metadata_free(NET_DM_SKB_CB(skb)->hw_metadata); + consume_skb(skb); +} + +static void net_dm_hw_packet_work(struct work_struct *work) +{ + struct per_cpu_dm_data *hw_data; + struct sk_buff_head list; + struct sk_buff *skb; + unsigned long flags; + + hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work); + + __skb_queue_head_init(&list); + + spin_lock_irqsave(&hw_data->drop_queue.lock, flags); + skb_queue_splice_tail_init(&hw_data->drop_queue, &list); + spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags); + + while ((skb = __skb_dequeue(&list))) + net_dm_hw_packet_report(skb); +} + static void net_dm_hw_packet_probe(struct sk_buff *skb, const struct net_dm_hw_metadata *hw_metadata) { + struct net_dm_hw_metadata *n_hw_metadata; + ktime_t tstamp = ktime_get_real(); + struct per_cpu_dm_data *hw_data; + struct sk_buff *nskb; + unsigned long flags; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return; + + n_hw_metadata = net_dm_hw_metadata_clone(hw_metadata); + if (!n_hw_metadata) + goto free; + + NET_DM_SKB_CB(nskb)->hw_metadata = n_hw_metadata; + nskb->tstamp = tstamp; + + hw_data = this_cpu_ptr(&dm_hw_cpu_data); + + spin_lock_irqsave(&hw_data->drop_queue.lock, flags); + if (skb_queue_len(&hw_data->drop_queue) < net_dm_queue_len) + __skb_queue_tail(&hw_data->drop_queue, nskb); + else + goto unlock_free; + spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags); + + schedule_work(&hw_data->dm_alert_work); + + return; + +unlock_free: + spin_unlock_irqrestore(&hw_data->drop_queue.lock, flags); + u64_stats_update_begin(&hw_data->stats.syncp); + hw_data->stats.dropped++; + u64_stats_update_end(&hw_data->stats.syncp); + net_dm_hw_metadata_free(n_hw_metadata); +free: + consume_skb(nskb); } static const struct net_dm_alert_ops net_dm_alert_packet_ops = { .kfree_skb_probe = net_dm_packet_trace_kfree_skb_hit, .napi_poll_probe = net_dm_packet_trace_napi_poll_hit, .work_item_func = net_dm_packet_work, + .hw_work_item_func = net_dm_hw_packet_work, .hw_probe = net_dm_hw_packet_probe, }; @@ -819,6 +1067,50 @@ nla_put_failure: return -EMSGSIZE; } +static void net_dm_hw_stats_read(struct net_dm_stats *stats) +{ + int cpu; + + memset(stats, 0, sizeof(*stats)); + for_each_possible_cpu(cpu) { + struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); + struct net_dm_stats *cpu_stats = &hw_data->stats; + unsigned int start; + u64 dropped; + + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + dropped = cpu_stats->dropped; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + stats->dropped += dropped; + } +} + +static int net_dm_hw_stats_put(struct sk_buff *msg) +{ + struct net_dm_stats stats; + struct nlattr *attr; + + net_dm_hw_stats_read(&stats); + + attr = nla_nest_start(msg, NET_DM_ATTR_HW_STATS); + if (!attr) + return -EMSGSIZE; + + if (nla_put_u64_64bit(msg, NET_DM_ATTR_STATS_DROPPED, + stats.dropped, NET_DM_ATTR_PAD)) + goto nla_put_failure; + + nla_nest_end(msg, attr); + + return 0; + +nla_put_failure: + nla_nest_cancel(msg, attr); + return -EMSGSIZE; +} + static int net_dm_stats_fill(struct sk_buff *msg, struct genl_info *info) { void *hdr; @@ -833,6 +1125,10 @@ static int net_dm_stats_fill(struct sk_buff *msg, struct genl_info *info) if (rc) goto nla_put_failure; + rc = net_dm_hw_stats_put(msg); + if (rc) + goto nla_put_failure; + genlmsg_end(msg, hdr); return 0; -- cgit v1.2.3 From d40e1deb930f4bd51a5214983e50aafc26db686e Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sat, 17 Aug 2019 16:28:15 +0300 Subject: drop_monitor: Add support for summary alert mode for hardware drops In summary alert mode a notification is sent with a list of recent drop reasons and a count of how many packets were dropped due to this reason. To avoid expensive operations in the context in which packets are dropped, each CPU holds an array whose number of entries is the maximum number of drop reasons that can be encoded in the netlink notification. Each entry stores the drop reason and a count. When a packet is dropped the array is traversed and a new entry is created or the count of an existing entry is incremented. Later, in process context, the array is replaced with a newly allocated copy and the old array is encoded in a netlink notification. To avoid breaking user space, the notification includes the ancillary header, which is 'struct net_dm_alert_msg' with number of entries set to '0'. Signed-off-by: Ido Schimmel Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/net_dropmon.h | 3 + net/core/drop_monitor.c | 195 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 196 insertions(+), 2 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 9f8fb1bb4aa4..3bddc9ec978c 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -87,6 +87,9 @@ enum net_dm_attr { NET_DM_ATTR_ORIGIN, /* u16 */ NET_DM_ATTR_HW_TRAP_GROUP_NAME, /* string */ NET_DM_ATTR_HW_TRAP_NAME, /* string */ + NET_DM_ATTR_HW_ENTRIES, /* nested */ + NET_DM_ATTR_HW_ENTRY, /* nested */ + NET_DM_ATTR_HW_TRAP_COUNT, /* u32 */ __NET_DM_ATTR_MAX, NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1 diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 5a950b5af8fd..807c79d606aa 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -58,9 +58,26 @@ struct net_dm_stats { struct u64_stats_sync syncp; }; +#define NET_DM_MAX_HW_TRAP_NAME_LEN 40 + +struct net_dm_hw_entry { + char trap_name[NET_DM_MAX_HW_TRAP_NAME_LEN]; + u32 count; +}; + +struct net_dm_hw_entries { + u32 num_entries; + struct net_dm_hw_entry entries[0]; +}; + struct per_cpu_dm_data { - spinlock_t lock; /* Protects 'skb' and 'send_timer' */ - struct sk_buff *skb; + spinlock_t lock; /* Protects 'skb', 'hw_entries' and + * 'send_timer' + */ + union { + struct sk_buff *skb; + struct net_dm_hw_entries *hw_entries; + }; struct sk_buff_head drop_queue; struct work_struct dm_alert_work; struct timer_list send_timer; @@ -275,16 +292,189 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi, rcu_read_unlock(); } +static struct net_dm_hw_entries * +net_dm_hw_reset_per_cpu_data(struct per_cpu_dm_data *hw_data) +{ + struct net_dm_hw_entries *hw_entries; + unsigned long flags; + + hw_entries = kzalloc(struct_size(hw_entries, entries, dm_hit_limit), + GFP_KERNEL); + if (!hw_entries) { + /* If the memory allocation failed, we try to perform another + * allocation in 1/10 second. Otherwise, the probe function + * will constantly bail out. + */ + mod_timer(&hw_data->send_timer, jiffies + HZ / 10); + } + + spin_lock_irqsave(&hw_data->lock, flags); + swap(hw_data->hw_entries, hw_entries); + spin_unlock_irqrestore(&hw_data->lock, flags); + + return hw_entries; +} + +static int net_dm_hw_entry_put(struct sk_buff *msg, + const struct net_dm_hw_entry *hw_entry) +{ + struct nlattr *attr; + + attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRY); + if (!attr) + return -EMSGSIZE; + + if (nla_put_string(msg, NET_DM_ATTR_HW_TRAP_NAME, hw_entry->trap_name)) + goto nla_put_failure; + + if (nla_put_u32(msg, NET_DM_ATTR_HW_TRAP_COUNT, hw_entry->count)) + goto nla_put_failure; + + nla_nest_end(msg, attr); + + return 0; + +nla_put_failure: + nla_nest_cancel(msg, attr); + return -EMSGSIZE; +} + +static int net_dm_hw_entries_put(struct sk_buff *msg, + const struct net_dm_hw_entries *hw_entries) +{ + struct nlattr *attr; + int i; + + attr = nla_nest_start(msg, NET_DM_ATTR_HW_ENTRIES); + if (!attr) + return -EMSGSIZE; + + for (i = 0; i < hw_entries->num_entries; i++) { + int rc; + + rc = net_dm_hw_entry_put(msg, &hw_entries->entries[i]); + if (rc) + goto nla_put_failure; + } + + nla_nest_end(msg, attr); + + return 0; + +nla_put_failure: + nla_nest_cancel(msg, attr); + return -EMSGSIZE; +} + +static int +net_dm_hw_summary_report_fill(struct sk_buff *msg, + const struct net_dm_hw_entries *hw_entries) +{ + struct net_dm_alert_msg anc_hdr = { 0 }; + void *hdr; + int rc; + + hdr = genlmsg_put(msg, 0, 0, &net_drop_monitor_family, 0, + NET_DM_CMD_ALERT); + if (!hdr) + return -EMSGSIZE; + + /* We need to put the ancillary header in order not to break user + * space. + */ + if (nla_put(msg, NLA_UNSPEC, sizeof(anc_hdr), &anc_hdr)) + goto nla_put_failure; + + rc = net_dm_hw_entries_put(msg, hw_entries); + if (rc) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static void net_dm_hw_summary_work(struct work_struct *work) +{ + struct net_dm_hw_entries *hw_entries; + struct per_cpu_dm_data *hw_data; + struct sk_buff *msg; + int rc; + + hw_data = container_of(work, struct per_cpu_dm_data, dm_alert_work); + + hw_entries = net_dm_hw_reset_per_cpu_data(hw_data); + if (!hw_entries) + return; + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + goto out; + + rc = net_dm_hw_summary_report_fill(msg, hw_entries); + if (rc) { + nlmsg_free(msg); + goto out; + } + + genlmsg_multicast(&net_drop_monitor_family, msg, 0, 0, GFP_KERNEL); + +out: + kfree(hw_entries); +} + static void net_dm_hw_summary_probe(struct sk_buff *skb, const struct net_dm_hw_metadata *hw_metadata) { + struct net_dm_hw_entries *hw_entries; + struct net_dm_hw_entry *hw_entry; + struct per_cpu_dm_data *hw_data; + unsigned long flags; + int i; + + hw_data = this_cpu_ptr(&dm_hw_cpu_data); + spin_lock_irqsave(&hw_data->lock, flags); + hw_entries = hw_data->hw_entries; + + if (!hw_entries) + goto out; + + for (i = 0; i < hw_entries->num_entries; i++) { + hw_entry = &hw_entries->entries[i]; + if (!strncmp(hw_entry->trap_name, hw_metadata->trap_name, + NET_DM_MAX_HW_TRAP_NAME_LEN - 1)) { + hw_entry->count++; + goto out; + } + } + if (WARN_ON_ONCE(hw_entries->num_entries == dm_hit_limit)) + goto out; + + hw_entry = &hw_entries->entries[hw_entries->num_entries]; + strlcpy(hw_entry->trap_name, hw_metadata->trap_name, + NET_DM_MAX_HW_TRAP_NAME_LEN - 1); + hw_entry->count = 1; + hw_entries->num_entries++; + + if (!timer_pending(&hw_data->send_timer)) { + hw_data->send_timer.expires = jiffies + dm_delay * HZ; + add_timer(&hw_data->send_timer); + } + +out: + spin_unlock_irqrestore(&hw_data->lock, flags); } static const struct net_dm_alert_ops net_dm_alert_summary_ops = { .kfree_skb_probe = trace_kfree_skb_hit, .napi_poll_probe = trace_napi_poll_hit, .work_item_func = send_dm_alert, + .hw_work_item_func = net_dm_hw_summary_work, .hw_probe = net_dm_hw_summary_probe, }; @@ -1309,6 +1499,7 @@ static void net_dm_hw_cpu_data_fini(int cpu) struct per_cpu_dm_data *hw_data; hw_data = &per_cpu(dm_hw_cpu_data, cpu); + kfree(hw_data->hw_entries); __net_dm_cpu_data_fini(hw_data); } -- cgit v1.2.3 From 8e94c3bc922e70225bd35891c8e6002bddd984eb Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sat, 17 Aug 2019 16:28:16 +0300 Subject: drop_monitor: Allow user to start monitoring hardware drops Drop monitor has start and stop commands, but so far these were only used to start and stop monitoring of software drops. Now that drop monitor can also monitor hardware drops, we should allow the user to control these as well. Do that by adding SW and HW flags to these commands. If no flag is specified, then only start / stop monitoring software drops. This is done in order to maintain backward-compatibility with existing user space applications. Signed-off-by: Ido Schimmel Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/uapi/linux/net_dropmon.h | 2 + net/core/drop_monitor.c | 124 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 123 insertions(+), 3 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 3bddc9ec978c..75a35dccb675 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -90,6 +90,8 @@ enum net_dm_attr { NET_DM_ATTR_HW_ENTRIES, /* nested */ NET_DM_ATTR_HW_ENTRY, /* nested */ NET_DM_ATTR_HW_TRAP_COUNT, /* u32 */ + NET_DM_ATTR_SW_DROPS, /* flag */ + NET_DM_ATTR_HW_DROPS, /* flag */ __NET_DM_ATTR_MAX, NET_DM_ATTR_MAX = __NET_DM_ATTR_MAX - 1 diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 807c79d606aa..bfc024024aa3 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -952,13 +952,82 @@ static const struct net_dm_alert_ops *net_dm_alert_ops_arr[] = { void net_dm_hw_report(struct sk_buff *skb, const struct net_dm_hw_metadata *hw_metadata) { + rcu_read_lock(); + if (!monitor_hw) - return; + goto out; net_dm_alert_ops_arr[net_dm_alert_mode]->hw_probe(skb, hw_metadata); + +out: + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(net_dm_hw_report); +static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack) +{ + const struct net_dm_alert_ops *ops; + int cpu; + + if (monitor_hw) { + NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already enabled"); + return -EAGAIN; + } + + ops = net_dm_alert_ops_arr[net_dm_alert_mode]; + + if (!try_module_get(THIS_MODULE)) { + NL_SET_ERR_MSG_MOD(extack, "Failed to take reference on module"); + return -ENODEV; + } + + for_each_possible_cpu(cpu) { + struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); + struct net_dm_hw_entries *hw_entries; + + INIT_WORK(&hw_data->dm_alert_work, ops->hw_work_item_func); + timer_setup(&hw_data->send_timer, sched_send_work, 0); + hw_entries = net_dm_hw_reset_per_cpu_data(hw_data); + kfree(hw_entries); + } + + monitor_hw = true; + + return 0; +} + +static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack) +{ + int cpu; + + if (!monitor_hw) + NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled"); + + monitor_hw = false; + + /* After this call returns we are guaranteed that no CPU is processing + * any hardware drops. + */ + synchronize_rcu(); + + for_each_possible_cpu(cpu) { + struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu); + struct sk_buff *skb; + + del_timer_sync(&hw_data->send_timer); + cancel_work_sync(&hw_data->dm_alert_work); + while ((skb = __skb_dequeue(&hw_data->drop_queue))) { + struct net_dm_hw_metadata *hw_metadata; + + hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata; + net_dm_hw_metadata_free(hw_metadata); + consume_skb(skb); + } + } + + module_put(THIS_MODULE); +} + static int net_dm_trace_on_set(struct netlink_ext_ack *extack) { const struct net_dm_alert_ops *ops; @@ -1153,14 +1222,61 @@ static int net_dm_cmd_config(struct sk_buff *skb, return 0; } +static int net_dm_monitor_start(bool set_sw, bool set_hw, + struct netlink_ext_ack *extack) +{ + bool sw_set = false; + int rc; + + if (set_sw) { + rc = set_all_monitor_traces(TRACE_ON, extack); + if (rc) + return rc; + sw_set = true; + } + + if (set_hw) { + rc = net_dm_hw_monitor_start(extack); + if (rc) + goto err_monitor_hw; + } + + return 0; + +err_monitor_hw: + if (sw_set) + set_all_monitor_traces(TRACE_OFF, extack); + return rc; +} + +static void net_dm_monitor_stop(bool set_sw, bool set_hw, + struct netlink_ext_ack *extack) +{ + if (set_hw) + net_dm_hw_monitor_stop(extack); + if (set_sw) + set_all_monitor_traces(TRACE_OFF, extack); +} + static int net_dm_cmd_trace(struct sk_buff *skb, struct genl_info *info) { + bool set_sw = !!info->attrs[NET_DM_ATTR_SW_DROPS]; + bool set_hw = !!info->attrs[NET_DM_ATTR_HW_DROPS]; + struct netlink_ext_ack *extack = info->extack; + + /* To maintain backward compatibility, we start / stop monitoring of + * software drops if no flag is specified. + */ + if (!set_sw && !set_hw) + set_sw = true; + switch (info->genlhdr->cmd) { case NET_DM_CMD_START: - return set_all_monitor_traces(TRACE_ON, info->extack); + return net_dm_monitor_start(set_sw, set_hw, extack); case NET_DM_CMD_STOP: - return set_all_monitor_traces(TRACE_OFF, info->extack); + net_dm_monitor_stop(set_sw, set_hw, extack); + return 0; } return -EOPNOTSUPP; @@ -1392,6 +1508,8 @@ static const struct nla_policy net_dm_nl_policy[NET_DM_ATTR_MAX + 1] = { [NET_DM_ATTR_ALERT_MODE] = { .type = NLA_U8 }, [NET_DM_ATTR_TRUNC_LEN] = { .type = NLA_U32 }, [NET_DM_ATTR_QUEUE_LEN] = { .type = NLA_U32 }, + [NET_DM_ATTR_SW_DROPS] = {. type = NLA_FLAG }, + [NET_DM_ATTR_HW_DROPS] = {. type = NLA_FLAG }, }; static const struct genl_ops dropmon_ops[] = { -- cgit v1.2.3 From 0f420b6c52e9799f664429e739a421fb4c5527a3 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Sat, 17 Aug 2019 16:28:17 +0300 Subject: devlink: Add packet trap infrastructure Add the basic packet trap infrastructure that allows device drivers to register their supported packet traps and trap groups with devlink. Each driver is expected to provide basic information about each supported trap, such as name and ID, but also the supported metadata types that will accompany each packet trapped via the trap. The currently supported metadata type is just the input port, but more will be added in the future. For example, output port and traffic class. Trap groups allow users to set the action of all member traps. In addition, users can retrieve per-group statistics in case per-trap statistics are too narrow. In the future, the trap group object can be extended with more attributes, such as policer settings which will limit the amount of traffic generated by member traps towards the CPU. Beside registering their packet traps with devlink, drivers are also expected to report trapped packets to devlink along with relevant metadata. devlink will maintain packets and bytes statistics for each packet trap and will potentially report the trapped packet with its metadata to user space via drop monitor netlink channel. The interface towards the drivers is simple and allows devlink to set the action of the trap. Currently, only two actions are supported: 'trap' and 'drop'. When set to 'trap', the device is expected to provide the sole copy of the packet to the driver which will pass it to devlink. When set to 'drop', the device is expected to drop the packet and not send a copy to the driver. In the future, more actions can be added, such as 'mirror'. Signed-off-by: Ido Schimmel Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- include/net/devlink.h | 129 +++++ include/uapi/linux/devlink.h | 62 +++ net/Kconfig | 1 + net/core/devlink.c | 1068 +++++++++++++++++++++++++++++++++++++++++- 4 files changed, 1255 insertions(+), 5 deletions(-) (limited to 'include/uapi') diff --git a/include/net/devlink.h b/include/net/devlink.h index 451268f64880..03b32e33e93e 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -31,6 +32,8 @@ struct devlink { struct list_head reporter_list; struct mutex reporters_lock; /* protects reporter_list */ struct devlink_dpipe_headers *dpipe_headers; + struct list_head trap_list; + struct list_head trap_group_list; const struct devlink_ops *ops; struct device *dev; possible_net_t _net; @@ -497,6 +500,89 @@ struct devlink_health_reporter_ops { struct devlink_fmsg *fmsg); }; +/** + * struct devlink_trap_group - Immutable packet trap group attributes. + * @name: Trap group name. + * @id: Trap group identifier. + * @generic: Whether the trap group is generic or not. + * + * Describes immutable attributes of packet trap groups that drivers register + * with devlink. + */ +struct devlink_trap_group { + const char *name; + u16 id; + bool generic; +}; + +#define DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT BIT(0) + +/** + * struct devlink_trap - Immutable packet trap attributes. + * @type: Trap type. + * @init_action: Initial trap action. + * @generic: Whether the trap is generic or not. + * @id: Trap identifier. + * @name: Trap name. + * @group: Immutable packet trap group attributes. + * @metadata_cap: Metadata types that can be provided by the trap. + * + * Describes immutable attributes of packet traps that drivers register with + * devlink. + */ +struct devlink_trap { + enum devlink_trap_type type; + enum devlink_trap_action init_action; + bool generic; + u16 id; + const char *name; + struct devlink_trap_group group; + u32 metadata_cap; +}; + +enum devlink_trap_generic_id { + /* Add new generic trap IDs above */ + __DEVLINK_TRAP_GENERIC_ID_MAX, + DEVLINK_TRAP_GENERIC_ID_MAX = __DEVLINK_TRAP_GENERIC_ID_MAX - 1, +}; + +enum devlink_trap_group_generic_id { + /* Add new generic trap group IDs above */ + __DEVLINK_TRAP_GROUP_GENERIC_ID_MAX, + DEVLINK_TRAP_GROUP_GENERIC_ID_MAX = + __DEVLINK_TRAP_GROUP_GENERIC_ID_MAX - 1, +}; + +#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group, _metadata_cap) \ + { \ + .type = DEVLINK_TRAP_TYPE_##_type, \ + .init_action = DEVLINK_TRAP_ACTION_##_init_action, \ + .generic = true, \ + .id = DEVLINK_TRAP_GENERIC_ID_##_id, \ + .name = DEVLINK_TRAP_GENERIC_NAME_##_id, \ + .group = _group, \ + .metadata_cap = _metadata_cap, \ + } + +#define DEVLINK_TRAP_DRIVER(_type, _init_action, _id, _name, _group, \ + _metadata_cap) \ + { \ + .type = DEVLINK_TRAP_TYPE_##_type, \ + .init_action = DEVLINK_TRAP_ACTION_##_init_action, \ + .generic = false, \ + .id = _id, \ + .name = _name, \ + .group = _group, \ + .metadata_cap = _metadata_cap, \ + } + +#define DEVLINK_TRAP_GROUP_GENERIC(_id) \ + { \ + .name = DEVLINK_TRAP_GROUP_GENERIC_NAME_##_id, \ + .id = DEVLINK_TRAP_GROUP_GENERIC_ID_##_id, \ + .generic = true, \ + } + struct devlink_ops { int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack); int (*port_type_set)(struct devlink_port *devlink_port, @@ -558,6 +644,38 @@ struct devlink_ops { int (*flash_update)(struct devlink *devlink, const char *file_name, const char *component, struct netlink_ext_ack *extack); + /** + * @trap_init: Trap initialization function. + * + * Should be used by device drivers to initialize the trap in the + * underlying device. Drivers should also store the provided trap + * context, so that they could efficiently pass it to + * devlink_trap_report() when the trap is triggered. + */ + int (*trap_init)(struct devlink *devlink, + const struct devlink_trap *trap, void *trap_ctx); + /** + * @trap_fini: Trap de-initialization function. + * + * Should be used by device drivers to de-initialize the trap in the + * underlying device. + */ + void (*trap_fini)(struct devlink *devlink, + const struct devlink_trap *trap, void *trap_ctx); + /** + * @trap_action_set: Trap action set function. + */ + int (*trap_action_set)(struct devlink *devlink, + const struct devlink_trap *trap, + enum devlink_trap_action action); + /** + * @trap_group_init: Trap group initialization function. + * + * Should be used by device drivers to initialize the trap group in the + * underlying device. + */ + int (*trap_group_init)(struct devlink *devlink, + const struct devlink_trap_group *group); }; static inline void *devlink_priv(struct devlink *devlink) @@ -774,6 +892,17 @@ void devlink_flash_update_status_notify(struct devlink *devlink, unsigned long done, unsigned long total); +int devlink_traps_register(struct devlink *devlink, + const struct devlink_trap *traps, + size_t traps_count, void *priv); +void devlink_traps_unregister(struct devlink *devlink, + const struct devlink_trap *traps, + size_t traps_count); +void devlink_trap_report(struct devlink *devlink, + struct sk_buff *skb, void *trap_ctx, + struct devlink_port *in_devlink_port); +void *devlink_trap_ctx_priv(void *trap_ctx); + #if IS_ENABLED(CONFIG_NET_DEVLINK) void devlink_compat_running_version(struct net_device *dev, diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index ffc993256527..546e75dd74ac 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -107,6 +107,16 @@ enum devlink_command { DEVLINK_CMD_FLASH_UPDATE_END, /* notification only */ DEVLINK_CMD_FLASH_UPDATE_STATUS, /* notification only */ + DEVLINK_CMD_TRAP_GET, /* can dump */ + DEVLINK_CMD_TRAP_SET, + DEVLINK_CMD_TRAP_NEW, + DEVLINK_CMD_TRAP_DEL, + + DEVLINK_CMD_TRAP_GROUP_GET, /* can dump */ + DEVLINK_CMD_TRAP_GROUP_SET, + DEVLINK_CMD_TRAP_GROUP_NEW, + DEVLINK_CMD_TRAP_GROUP_DEL, + /* add new commands above here */ __DEVLINK_CMD_MAX, DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1 @@ -194,6 +204,47 @@ enum devlink_param_fw_load_policy_value { DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH, }; +enum { + DEVLINK_ATTR_STATS_RX_PACKETS, /* u64 */ + DEVLINK_ATTR_STATS_RX_BYTES, /* u64 */ + + __DEVLINK_ATTR_STATS_MAX, + DEVLINK_ATTR_STATS_MAX = __DEVLINK_ATTR_STATS_MAX - 1 +}; + +/** + * enum devlink_trap_action - Packet trap action. + * @DEVLINK_TRAP_ACTION_DROP: Packet is dropped by the device and a copy is not + * sent to the CPU. + * @DEVLINK_TRAP_ACTION_TRAP: The sole copy of the packet is sent to the CPU. + */ +enum devlink_trap_action { + DEVLINK_TRAP_ACTION_DROP, + DEVLINK_TRAP_ACTION_TRAP, +}; + +/** + * enum devlink_trap_type - Packet trap type. + * @DEVLINK_TRAP_TYPE_DROP: Trap reason is a drop. Trapped packets are only + * processed by devlink and not injected to the + * kernel's Rx path. + * @DEVLINK_TRAP_TYPE_EXCEPTION: Trap reason is an exception. Packet was not + * forwarded as intended due to an exception + * (e.g., missing neighbour entry) and trapped to + * control plane for resolution. Trapped packets + * are processed by devlink and injected to + * the kernel's Rx path. + */ +enum devlink_trap_type { + DEVLINK_TRAP_TYPE_DROP, + DEVLINK_TRAP_TYPE_EXCEPTION, +}; + +enum { + /* Trap can report input port as metadata */ + DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT, +}; + enum devlink_attr { /* don't change the order or add anything between, this is ABI! */ DEVLINK_ATTR_UNSPEC, @@ -348,6 +399,17 @@ enum devlink_attr { DEVLINK_ATTR_PORT_PCI_PF_NUMBER, /* u16 */ DEVLINK_ATTR_PORT_PCI_VF_NUMBER, /* u16 */ + DEVLINK_ATTR_STATS, /* nested */ + + DEVLINK_ATTR_TRAP_NAME, /* string */ + /* enum devlink_trap_action */ + DEVLINK_ATTR_TRAP_ACTION, /* u8 */ + /* enum devlink_trap_type */ + DEVLINK_ATTR_TRAP_TYPE, /* u8 */ + DEVLINK_ATTR_TRAP_GENERIC, /* flag */ + DEVLINK_ATTR_TRAP_METADATA, /* nested */ + DEVLINK_ATTR_TRAP_GROUP_NAME, /* string */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/Kconfig b/net/Kconfig index 57f51a279ad6..3101bfcbdd7a 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -430,6 +430,7 @@ config NET_SOCK_MSG config NET_DEVLINK bool default n + imply NET_DROP_MONITOR config PAGE_POOL bool diff --git a/net/core/devlink.c b/net/core/devlink.c index d3dbb904bf3b..960ceaec98d6 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include #include @@ -25,6 +27,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -551,7 +554,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index)) goto nla_put_failure; - spin_lock(&devlink_port->type_lock); + spin_lock_bh(&devlink_port->type_lock); if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type)) goto nla_put_failure_type_locked; if (devlink_port->desired_type != DEVLINK_PORT_TYPE_NOTSET && @@ -576,7 +579,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, ibdev->name)) goto nla_put_failure_type_locked; } - spin_unlock(&devlink_port->type_lock); + spin_unlock_bh(&devlink_port->type_lock); if (devlink_nl_port_attrs_put(msg, devlink_port)) goto nla_put_failure; @@ -584,7 +587,7 @@ static int devlink_nl_port_fill(struct sk_buff *msg, struct devlink *devlink, return 0; nla_put_failure_type_locked: - spin_unlock(&devlink_port->type_lock); + spin_unlock_bh(&devlink_port->type_lock); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; @@ -5154,6 +5157,571 @@ devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb, return 0; } +struct devlink_stats { + u64 rx_bytes; + u64 rx_packets; + struct u64_stats_sync syncp; +}; + +/** + * struct devlink_trap_group_item - Packet trap group attributes. + * @group: Immutable packet trap group attributes. + * @refcount: Number of trap items using the group. + * @list: trap_group_list member. + * @stats: Trap group statistics. + * + * Describes packet trap group attributes. Created by devlink during trap + * registration. + */ +struct devlink_trap_group_item { + const struct devlink_trap_group *group; + refcount_t refcount; + struct list_head list; + struct devlink_stats __percpu *stats; +}; + +/** + * struct devlink_trap_item - Packet trap attributes. + * @trap: Immutable packet trap attributes. + * @group_item: Associated group item. + * @list: trap_list member. + * @action: Trap action. + * @stats: Trap statistics. + * @priv: Driver private information. + * + * Describes both mutable and immutable packet trap attributes. Created by + * devlink during trap registration and used for all trap related operations. + */ +struct devlink_trap_item { + const struct devlink_trap *trap; + struct devlink_trap_group_item *group_item; + struct list_head list; + enum devlink_trap_action action; + struct devlink_stats __percpu *stats; + void *priv; +}; + +static struct devlink_trap_item * +devlink_trap_item_lookup(struct devlink *devlink, const char *name) +{ + struct devlink_trap_item *trap_item; + + list_for_each_entry(trap_item, &devlink->trap_list, list) { + if (!strcmp(trap_item->trap->name, name)) + return trap_item; + } + + return NULL; +} + +static struct devlink_trap_item * +devlink_trap_item_get_from_info(struct devlink *devlink, + struct genl_info *info) +{ + struct nlattr *attr; + + if (!info->attrs[DEVLINK_ATTR_TRAP_NAME]) + return NULL; + attr = info->attrs[DEVLINK_ATTR_TRAP_NAME]; + + return devlink_trap_item_lookup(devlink, nla_data(attr)); +} + +static int +devlink_trap_action_get_from_info(struct genl_info *info, + enum devlink_trap_action *p_trap_action) +{ + u8 val; + + val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]); + switch (val) { + case DEVLINK_TRAP_ACTION_DROP: /* fall-through */ + case DEVLINK_TRAP_ACTION_TRAP: + *p_trap_action = val; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int devlink_trap_metadata_put(struct sk_buff *msg, + const struct devlink_trap *trap) +{ + struct nlattr *attr; + + attr = nla_nest_start(msg, DEVLINK_ATTR_TRAP_METADATA); + if (!attr) + return -EMSGSIZE; + + if ((trap->metadata_cap & DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT) && + nla_put_flag(msg, DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT)) + goto nla_put_failure; + + nla_nest_end(msg, attr); + + return 0; + +nla_put_failure: + nla_nest_cancel(msg, attr); + return -EMSGSIZE; +} + +static void devlink_trap_stats_read(struct devlink_stats __percpu *trap_stats, + struct devlink_stats *stats) +{ + int i; + + memset(stats, 0, sizeof(*stats)); + for_each_possible_cpu(i) { + struct devlink_stats *cpu_stats; + u64 rx_packets, rx_bytes; + unsigned int start; + + cpu_stats = per_cpu_ptr(trap_stats, i); + do { + start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); + rx_packets = cpu_stats->rx_packets; + rx_bytes = cpu_stats->rx_bytes; + } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); + + stats->rx_packets += rx_packets; + stats->rx_bytes += rx_bytes; + } +} + +static int devlink_trap_stats_put(struct sk_buff *msg, + struct devlink_stats __percpu *trap_stats) +{ + struct devlink_stats stats; + struct nlattr *attr; + + devlink_trap_stats_read(trap_stats, &stats); + + attr = nla_nest_start(msg, DEVLINK_ATTR_STATS); + if (!attr) + return -EMSGSIZE; + + if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_PACKETS, + stats.rx_packets, DEVLINK_ATTR_PAD)) + goto nla_put_failure; + + if (nla_put_u64_64bit(msg, DEVLINK_ATTR_STATS_RX_BYTES, + stats.rx_bytes, DEVLINK_ATTR_PAD)) + goto nla_put_failure; + + nla_nest_end(msg, attr); + + return 0; + +nla_put_failure: + nla_nest_cancel(msg, attr); + return -EMSGSIZE; +} + +static int devlink_nl_trap_fill(struct sk_buff *msg, struct devlink *devlink, + const struct devlink_trap_item *trap_item, + enum devlink_command cmd, u32 portid, u32 seq, + int flags) +{ + struct devlink_trap_group_item *group_item = trap_item->group_item; + void *hdr; + int err; + + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); + if (!hdr) + return -EMSGSIZE; + + if (devlink_nl_put_handle(msg, devlink)) + goto nla_put_failure; + + if (nla_put_string(msg, DEVLINK_ATTR_TRAP_GROUP_NAME, + group_item->group->name)) + goto nla_put_failure; + + if (nla_put_string(msg, DEVLINK_ATTR_TRAP_NAME, trap_item->trap->name)) + goto nla_put_failure; + + if (nla_put_u8(msg, DEVLINK_ATTR_TRAP_TYPE, trap_item->trap->type)) + goto nla_put_failure; + + if (trap_item->trap->generic && + nla_put_flag(msg, DEVLINK_ATTR_TRAP_GENERIC)) + goto nla_put_failure; + + if (nla_put_u8(msg, DEVLINK_ATTR_TRAP_ACTION, trap_item->action)) + goto nla_put_failure; + + err = devlink_trap_metadata_put(msg, trap_item->trap); + if (err) + goto nla_put_failure; + + err = devlink_trap_stats_put(msg, trap_item->stats); + if (err) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int devlink_nl_cmd_trap_get_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct netlink_ext_ack *extack = info->extack; + struct devlink *devlink = info->user_ptr[0]; + struct devlink_trap_item *trap_item; + struct sk_buff *msg; + int err; + + if (list_empty(&devlink->trap_list)) + return -EOPNOTSUPP; + + trap_item = devlink_trap_item_get_from_info(devlink, info); + if (!trap_item) { + NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap"); + return -ENOENT; + } + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + err = devlink_nl_trap_fill(msg, devlink, trap_item, + DEVLINK_CMD_TRAP_NEW, info->snd_portid, + info->snd_seq, 0); + if (err) + goto err_trap_fill; + + return genlmsg_reply(msg, info); + +err_trap_fill: + nlmsg_free(msg); + return err; +} + +static int devlink_nl_cmd_trap_get_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) +{ + struct devlink_trap_item *trap_item; + struct devlink *devlink; + int start = cb->args[0]; + int idx = 0; + int err; + + mutex_lock(&devlink_mutex); + list_for_each_entry(devlink, &devlink_list, list) { + if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) + continue; + mutex_lock(&devlink->lock); + list_for_each_entry(trap_item, &devlink->trap_list, list) { + if (idx < start) { + idx++; + continue; + } + err = devlink_nl_trap_fill(msg, devlink, trap_item, + DEVLINK_CMD_TRAP_NEW, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI); + if (err) { + mutex_unlock(&devlink->lock); + goto out; + } + idx++; + } + mutex_unlock(&devlink->lock); + } +out: + mutex_unlock(&devlink_mutex); + + cb->args[0] = idx; + return msg->len; +} + +static int __devlink_trap_action_set(struct devlink *devlink, + struct devlink_trap_item *trap_item, + enum devlink_trap_action trap_action, + struct netlink_ext_ack *extack) +{ + int err; + + if (trap_item->action != trap_action && + trap_item->trap->type != DEVLINK_TRAP_TYPE_DROP) { + NL_SET_ERR_MSG_MOD(extack, "Cannot change action of non-drop traps. Skipping"); + return 0; + } + + err = devlink->ops->trap_action_set(devlink, trap_item->trap, + trap_action); + if (err) + return err; + + trap_item->action = trap_action; + + return 0; +} + +static int devlink_trap_action_set(struct devlink *devlink, + struct devlink_trap_item *trap_item, + struct genl_info *info) +{ + enum devlink_trap_action trap_action; + int err; + + if (!info->attrs[DEVLINK_ATTR_TRAP_ACTION]) + return 0; + + err = devlink_trap_action_get_from_info(info, &trap_action); + if (err) { + NL_SET_ERR_MSG_MOD(info->extack, "Invalid trap action"); + return -EINVAL; + } + + return __devlink_trap_action_set(devlink, trap_item, trap_action, + info->extack); +} + +static int devlink_nl_cmd_trap_set_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct netlink_ext_ack *extack = info->extack; + struct devlink *devlink = info->user_ptr[0]; + struct devlink_trap_item *trap_item; + int err; + + if (list_empty(&devlink->trap_list)) + return -EOPNOTSUPP; + + trap_item = devlink_trap_item_get_from_info(devlink, info); + if (!trap_item) { + NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap"); + return -ENOENT; + } + + err = devlink_trap_action_set(devlink, trap_item, info); + if (err) + return err; + + return 0; +} + +static struct devlink_trap_group_item * +devlink_trap_group_item_lookup(struct devlink *devlink, const char *name) +{ + struct devlink_trap_group_item *group_item; + + list_for_each_entry(group_item, &devlink->trap_group_list, list) { + if (!strcmp(group_item->group->name, name)) + return group_item; + } + + return NULL; +} + +static struct devlink_trap_group_item * +devlink_trap_group_item_get_from_info(struct devlink *devlink, + struct genl_info *info) +{ + char *name; + + if (!info->attrs[DEVLINK_ATTR_TRAP_GROUP_NAME]) + return NULL; + name = nla_data(info->attrs[DEVLINK_ATTR_TRAP_GROUP_NAME]); + + return devlink_trap_group_item_lookup(devlink, name); +} + +static int +devlink_nl_trap_group_fill(struct sk_buff *msg, struct devlink *devlink, + const struct devlink_trap_group_item *group_item, + enum devlink_command cmd, u32 portid, u32 seq, + int flags) +{ + void *hdr; + int err; + + hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd); + if (!hdr) + return -EMSGSIZE; + + if (devlink_nl_put_handle(msg, devlink)) + goto nla_put_failure; + + if (nla_put_string(msg, DEVLINK_ATTR_TRAP_GROUP_NAME, + group_item->group->name)) + goto nla_put_failure; + + if (group_item->group->generic && + nla_put_flag(msg, DEVLINK_ATTR_TRAP_GENERIC)) + goto nla_put_failure; + + err = devlink_trap_stats_put(msg, group_item->stats); + if (err) + goto nla_put_failure; + + genlmsg_end(msg, hdr); + + return 0; + +nla_put_failure: + genlmsg_cancel(msg, hdr); + return -EMSGSIZE; +} + +static int devlink_nl_cmd_trap_group_get_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct netlink_ext_ack *extack = info->extack; + struct devlink *devlink = info->user_ptr[0]; + struct devlink_trap_group_item *group_item; + struct sk_buff *msg; + int err; + + if (list_empty(&devlink->trap_group_list)) + return -EOPNOTSUPP; + + group_item = devlink_trap_group_item_get_from_info(devlink, info); + if (!group_item) { + NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap group"); + return -ENOENT; + } + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + err = devlink_nl_trap_group_fill(msg, devlink, group_item, + DEVLINK_CMD_TRAP_GROUP_NEW, + info->snd_portid, info->snd_seq, 0); + if (err) + goto err_trap_group_fill; + + return genlmsg_reply(msg, info); + +err_trap_group_fill: + nlmsg_free(msg); + return err; +} + +static int devlink_nl_cmd_trap_group_get_dumpit(struct sk_buff *msg, + struct netlink_callback *cb) +{ + enum devlink_command cmd = DEVLINK_CMD_TRAP_GROUP_NEW; + struct devlink_trap_group_item *group_item; + u32 portid = NETLINK_CB(cb->skb).portid; + struct devlink *devlink; + int start = cb->args[0]; + int idx = 0; + int err; + + mutex_lock(&devlink_mutex); + list_for_each_entry(devlink, &devlink_list, list) { + if (!net_eq(devlink_net(devlink), sock_net(msg->sk))) + continue; + mutex_lock(&devlink->lock); + list_for_each_entry(group_item, &devlink->trap_group_list, + list) { + if (idx < start) { + idx++; + continue; + } + err = devlink_nl_trap_group_fill(msg, devlink, + group_item, cmd, + portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI); + if (err) { + mutex_unlock(&devlink->lock); + goto out; + } + idx++; + } + mutex_unlock(&devlink->lock); + } +out: + mutex_unlock(&devlink_mutex); + + cb->args[0] = idx; + return msg->len; +} + +static int +__devlink_trap_group_action_set(struct devlink *devlink, + struct devlink_trap_group_item *group_item, + enum devlink_trap_action trap_action, + struct netlink_ext_ack *extack) +{ + const char *group_name = group_item->group->name; + struct devlink_trap_item *trap_item; + int err; + + list_for_each_entry(trap_item, &devlink->trap_list, list) { + if (strcmp(trap_item->trap->group.name, group_name)) + continue; + err = __devlink_trap_action_set(devlink, trap_item, + trap_action, extack); + if (err) + return err; + } + + return 0; +} + +static int +devlink_trap_group_action_set(struct devlink *devlink, + struct devlink_trap_group_item *group_item, + struct genl_info *info) +{ + enum devlink_trap_action trap_action; + int err; + + if (!info->attrs[DEVLINK_ATTR_TRAP_ACTION]) + return 0; + + err = devlink_trap_action_get_from_info(info, &trap_action); + if (err) { + NL_SET_ERR_MSG_MOD(info->extack, "Invalid trap action"); + return -EINVAL; + } + + err = __devlink_trap_group_action_set(devlink, group_item, trap_action, + info->extack); + if (err) + return err; + + return 0; +} + +static int devlink_nl_cmd_trap_group_set_doit(struct sk_buff *skb, + struct genl_info *info) +{ + struct netlink_ext_ack *extack = info->extack; + struct devlink *devlink = info->user_ptr[0]; + struct devlink_trap_group_item *group_item; + int err; + + if (list_empty(&devlink->trap_group_list)) + return -EOPNOTSUPP; + + group_item = devlink_trap_group_item_get_from_info(devlink, info); + if (!group_item) { + NL_SET_ERR_MSG_MOD(extack, "Device did not register this trap group"); + return -ENOENT; + } + + err = devlink_trap_group_action_set(devlink, group_item, info); + if (err) + return err; + + return 0; +} + static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING }, @@ -5184,6 +5752,9 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = { [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 }, [DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME] = { .type = NLA_NUL_STRING }, [DEVLINK_ATTR_FLASH_UPDATE_COMPONENT] = { .type = NLA_NUL_STRING }, + [DEVLINK_ATTR_TRAP_NAME] = { .type = NLA_NUL_STRING }, + [DEVLINK_ATTR_TRAP_ACTION] = { .type = NLA_U8 }, + [DEVLINK_ATTR_TRAP_GROUP_NAME] = { .type = NLA_NUL_STRING }, }; static const struct genl_ops devlink_nl_ops[] = { @@ -5483,6 +6054,32 @@ static const struct genl_ops devlink_nl_ops[] = { .flags = GENL_ADMIN_PERM, .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, }, + { + .cmd = DEVLINK_CMD_TRAP_GET, + .doit = devlink_nl_cmd_trap_get_doit, + .dumpit = devlink_nl_cmd_trap_get_dumpit, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = DEVLINK_CMD_TRAP_SET, + .doit = devlink_nl_cmd_trap_set_doit, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, + { + .cmd = DEVLINK_CMD_TRAP_GROUP_GET, + .doit = devlink_nl_cmd_trap_group_get_doit, + .dumpit = devlink_nl_cmd_trap_group_get_dumpit, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + /* can be retrieved by unprivileged users */ + }, + { + .cmd = DEVLINK_CMD_TRAP_GROUP_SET, + .doit = devlink_nl_cmd_trap_group_set_doit, + .flags = GENL_ADMIN_PERM, + .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK, + }, }; static struct genl_family devlink_nl_family __ro_after_init = { @@ -5528,6 +6125,8 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size) INIT_LIST_HEAD(&devlink->param_list); INIT_LIST_HEAD(&devlink->region_list); INIT_LIST_HEAD(&devlink->reporter_list); + INIT_LIST_HEAD(&devlink->trap_list); + INIT_LIST_HEAD(&devlink->trap_group_list); mutex_init(&devlink->lock); mutex_init(&devlink->reporters_lock); return devlink; @@ -5574,6 +6173,8 @@ void devlink_free(struct devlink *devlink) { mutex_destroy(&devlink->reporters_lock); mutex_destroy(&devlink->lock); + WARN_ON(!list_empty(&devlink->trap_group_list)); + WARN_ON(!list_empty(&devlink->trap_list)); WARN_ON(!list_empty(&devlink->reporter_list)); WARN_ON(!list_empty(&devlink->region_list)); WARN_ON(!list_empty(&devlink->param_list)); @@ -5678,10 +6279,10 @@ static void __devlink_port_type_set(struct devlink_port *devlink_port, if (WARN_ON(!devlink_port->registered)) return; devlink_port_type_warn_cancel(devlink_port); - spin_lock(&devlink_port->type_lock); + spin_lock_bh(&devlink_port->type_lock); devlink_port->type = type; devlink_port->type_dev = type_dev; - spin_unlock(&devlink_port->type_lock); + spin_unlock_bh(&devlink_port->type_lock); devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW); } @@ -6834,6 +7435,463 @@ unlock: } EXPORT_SYMBOL_GPL(devlink_region_snapshot_create); +#define DEVLINK_TRAP(_id, _type) \ + { \ + .type = DEVLINK_TRAP_TYPE_##_type, \ + .id = DEVLINK_TRAP_GENERIC_ID_##_id, \ + .name = DEVLINK_TRAP_GENERIC_NAME_##_id, \ + } + +static const struct devlink_trap devlink_trap_generic[] = { +}; + +#define DEVLINK_TRAP_GROUP(_id) \ + { \ + .id = DEVLINK_TRAP_GROUP_GENERIC_ID_##_id, \ + .name = DEVLINK_TRAP_GROUP_GENERIC_NAME_##_id, \ + } + +static const struct devlink_trap_group devlink_trap_group_generic[] = { +}; + +static int devlink_trap_generic_verify(const struct devlink_trap *trap) +{ + if (trap->id > DEVLINK_TRAP_GENERIC_ID_MAX) + return -EINVAL; + + if (strcmp(trap->name, devlink_trap_generic[trap->id].name)) + return -EINVAL; + + if (trap->type != devlink_trap_generic[trap->id].type) + return -EINVAL; + + return 0; +} + +static int devlink_trap_driver_verify(const struct devlink_trap *trap) +{ + int i; + + if (trap->id <= DEVLINK_TRAP_GENERIC_ID_MAX) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(devlink_trap_generic); i++) { + if (!strcmp(trap->name, devlink_trap_generic[i].name)) + return -EEXIST; + } + + return 0; +} + +static int devlink_trap_verify(const struct devlink_trap *trap) +{ + if (!trap || !trap->name || !trap->group.name) + return -EINVAL; + + if (trap->generic) + return devlink_trap_generic_verify(trap); + else + return devlink_trap_driver_verify(trap); +} + +static int +devlink_trap_group_generic_verify(const struct devlink_trap_group *group) +{ + if (group->id > DEVLINK_TRAP_GROUP_GENERIC_ID_MAX) + return -EINVAL; + + if (strcmp(group->name, devlink_trap_group_generic[group->id].name)) + return -EINVAL; + + return 0; +} + +static int +devlink_trap_group_driver_verify(const struct devlink_trap_group *group) +{ + int i; + + if (group->id <= DEVLINK_TRAP_GROUP_GENERIC_ID_MAX) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(devlink_trap_group_generic); i++) { + if (!strcmp(group->name, devlink_trap_group_generic[i].name)) + return -EEXIST; + } + + return 0; +} + +static int devlink_trap_group_verify(const struct devlink_trap_group *group) +{ + if (group->generic) + return devlink_trap_group_generic_verify(group); + else + return devlink_trap_group_driver_verify(group); +} + +static void +devlink_trap_group_notify(struct devlink *devlink, + const struct devlink_trap_group_item *group_item, + enum devlink_command cmd) +{ + struct sk_buff *msg; + int err; + + WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_GROUP_NEW && + cmd != DEVLINK_CMD_TRAP_GROUP_DEL); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + err = devlink_nl_trap_group_fill(msg, devlink, group_item, cmd, 0, 0, + 0); + if (err) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), + msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); +} + +static struct devlink_trap_group_item * +devlink_trap_group_item_create(struct devlink *devlink, + const struct devlink_trap_group *group) +{ + struct devlink_trap_group_item *group_item; + int err; + + err = devlink_trap_group_verify(group); + if (err) + return ERR_PTR(err); + + group_item = kzalloc(sizeof(*group_item), GFP_KERNEL); + if (!group_item) + return ERR_PTR(-ENOMEM); + + group_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats); + if (!group_item->stats) { + err = -ENOMEM; + goto err_stats_alloc; + } + + group_item->group = group; + refcount_set(&group_item->refcount, 1); + + if (devlink->ops->trap_group_init) { + err = devlink->ops->trap_group_init(devlink, group); + if (err) + goto err_group_init; + } + + list_add_tail(&group_item->list, &devlink->trap_group_list); + devlink_trap_group_notify(devlink, group_item, + DEVLINK_CMD_TRAP_GROUP_NEW); + + return group_item; + +err_group_init: + free_percpu(group_item->stats); +err_stats_alloc: + kfree(group_item); + return ERR_PTR(err); +} + +static void +devlink_trap_group_item_destroy(struct devlink *devlink, + struct devlink_trap_group_item *group_item) +{ + devlink_trap_group_notify(devlink, group_item, + DEVLINK_CMD_TRAP_GROUP_DEL); + list_del(&group_item->list); + free_percpu(group_item->stats); + kfree(group_item); +} + +static struct devlink_trap_group_item * +devlink_trap_group_item_get(struct devlink *devlink, + const struct devlink_trap_group *group) +{ + struct devlink_trap_group_item *group_item; + + group_item = devlink_trap_group_item_lookup(devlink, group->name); + if (group_item) { + refcount_inc(&group_item->refcount); + return group_item; + } + + return devlink_trap_group_item_create(devlink, group); +} + +static void +devlink_trap_group_item_put(struct devlink *devlink, + struct devlink_trap_group_item *group_item) +{ + if (!refcount_dec_and_test(&group_item->refcount)) + return; + + devlink_trap_group_item_destroy(devlink, group_item); +} + +static int +devlink_trap_item_group_link(struct devlink *devlink, + struct devlink_trap_item *trap_item) +{ + struct devlink_trap_group_item *group_item; + + group_item = devlink_trap_group_item_get(devlink, + &trap_item->trap->group); + if (IS_ERR(group_item)) + return PTR_ERR(group_item); + + trap_item->group_item = group_item; + + return 0; +} + +static void +devlink_trap_item_group_unlink(struct devlink *devlink, + struct devlink_trap_item *trap_item) +{ + devlink_trap_group_item_put(devlink, trap_item->group_item); +} + +static void devlink_trap_notify(struct devlink *devlink, + const struct devlink_trap_item *trap_item, + enum devlink_command cmd) +{ + struct sk_buff *msg; + int err; + + WARN_ON_ONCE(cmd != DEVLINK_CMD_TRAP_NEW && + cmd != DEVLINK_CMD_TRAP_DEL); + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return; + + err = devlink_nl_trap_fill(msg, devlink, trap_item, cmd, 0, 0, 0); + if (err) { + nlmsg_free(msg); + return; + } + + genlmsg_multicast_netns(&devlink_nl_family, devlink_net(devlink), + msg, 0, DEVLINK_MCGRP_CONFIG, GFP_KERNEL); +} + +static int +devlink_trap_register(struct devlink *devlink, + const struct devlink_trap *trap, void *priv) +{ + struct devlink_trap_item *trap_item; + int err; + + if (devlink_trap_item_lookup(devlink, trap->name)) + return -EEXIST; + + trap_item = kzalloc(sizeof(*trap_item), GFP_KERNEL); + if (!trap_item) + return -ENOMEM; + + trap_item->stats = netdev_alloc_pcpu_stats(struct devlink_stats); + if (!trap_item->stats) { + err = -ENOMEM; + goto err_stats_alloc; + } + + trap_item->trap = trap; + trap_item->action = trap->init_action; + trap_item->priv = priv; + + err = devlink_trap_item_group_link(devlink, trap_item); + if (err) + goto err_group_link; + + err = devlink->ops->trap_init(devlink, trap, trap_item); + if (err) + goto err_trap_init; + + list_add_tail(&trap_item->list, &devlink->trap_list); + devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_NEW); + + return 0; + +err_trap_init: + devlink_trap_item_group_unlink(devlink, trap_item); +err_group_link: + free_percpu(trap_item->stats); +err_stats_alloc: + kfree(trap_item); + return err; +} + +static void devlink_trap_unregister(struct devlink *devlink, + const struct devlink_trap *trap) +{ + struct devlink_trap_item *trap_item; + + trap_item = devlink_trap_item_lookup(devlink, trap->name); + if (WARN_ON_ONCE(!trap_item)) + return; + + devlink_trap_notify(devlink, trap_item, DEVLINK_CMD_TRAP_DEL); + list_del(&trap_item->list); + if (devlink->ops->trap_fini) + devlink->ops->trap_fini(devlink, trap, trap_item); + devlink_trap_item_group_unlink(devlink, trap_item); + free_percpu(trap_item->stats); + kfree(trap_item); +} + +static void devlink_trap_disable(struct devlink *devlink, + const struct devlink_trap *trap) +{ + struct devlink_trap_item *trap_item; + + trap_item = devlink_trap_item_lookup(devlink, trap->name); + if (WARN_ON_ONCE(!trap_item)) + return; + + devlink->ops->trap_action_set(devlink, trap, DEVLINK_TRAP_ACTION_DROP); + trap_item->action = DEVLINK_TRAP_ACTION_DROP; +} + +/** + * devlink_traps_register - Register packet traps with devlink. + * @devlink: devlink. + * @traps: Packet traps. + * @traps_count: Count of provided packet traps. + * @priv: Driver private information. + * + * Return: Non-zero value on failure. + */ +int devlink_traps_register(struct devlink *devlink, + const struct devlink_trap *traps, + size_t traps_count, void *priv) +{ + int i, err; + + if (!devlink->ops->trap_init || !devlink->ops->trap_action_set) + return -EINVAL; + + mutex_lock(&devlink->lock); + for (i = 0; i < traps_count; i++) { + const struct devlink_trap *trap = &traps[i]; + + err = devlink_trap_verify(trap); + if (err) + goto err_trap_verify; + + err = devlink_trap_register(devlink, trap, priv); + if (err) + goto err_trap_register; + } + mutex_unlock(&devlink->lock); + + return 0; + +err_trap_register: +err_trap_verify: + for (i--; i >= 0; i--) + devlink_trap_unregister(devlink, &traps[i]); + mutex_unlock(&devlink->lock); + return err; +} +EXPORT_SYMBOL_GPL(devlink_traps_register); + +/** + * devlink_traps_unregister - Unregister packet traps from devlink. + * @devlink: devlink. + * @traps: Packet traps. + * @traps_count: Count of provided packet traps. + */ +void devlink_traps_unregister(struct devlink *devlink, + const struct devlink_trap *traps, + size_t traps_count) +{ + int i; + + mutex_lock(&devlink->lock); + /* Make sure we do not have any packets in-flight while unregistering + * traps by disabling all of them and waiting for a grace period. + */ + for (i = traps_count - 1; i >= 0; i--) + devlink_trap_disable(devlink, &traps[i]); + synchronize_rcu(); + for (i = traps_count - 1; i >= 0; i--) + devlink_trap_unregister(devlink, &traps[i]); + mutex_unlock(&devlink->lock); +} +EXPORT_SYMBOL_GPL(devlink_traps_unregister); + +static void +devlink_trap_stats_update(struct devlink_stats __percpu *trap_stats, + size_t skb_len) +{ + struct devlink_stats *stats; + + stats = this_cpu_ptr(trap_stats); + u64_stats_update_begin(&stats->syncp); + stats->rx_bytes += skb_len; + stats->rx_packets++; + u64_stats_update_end(&stats->syncp); +} + +static void +devlink_trap_report_metadata_fill(struct net_dm_hw_metadata *hw_metadata, + const struct devlink_trap_item *trap_item, + struct devlink_port *in_devlink_port) +{ + struct devlink_trap_group_item *group_item = trap_item->group_item; + + hw_metadata->trap_group_name = group_item->group->name; + hw_metadata->trap_name = trap_item->trap->name; + + spin_lock(&in_devlink_port->type_lock); + if (in_devlink_port->type == DEVLINK_PORT_TYPE_ETH) + hw_metadata->input_dev = in_devlink_port->type_dev; + spin_unlock(&in_devlink_port->type_lock); +} + +/** + * devlink_trap_report - Report trapped packet to drop monitor. + * @devlink: devlink. + * @skb: Trapped packet. + * @trap_ctx: Trap context. + * @in_devlink_port: Input devlink port. + */ +void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb, + void *trap_ctx, struct devlink_port *in_devlink_port) +{ + struct devlink_trap_item *trap_item = trap_ctx; + struct net_dm_hw_metadata hw_metadata = {}; + + devlink_trap_stats_update(trap_item->stats, skb->len); + devlink_trap_stats_update(trap_item->group_item->stats, skb->len); + + devlink_trap_report_metadata_fill(&hw_metadata, trap_item, + in_devlink_port); + net_dm_hw_report(skb, &hw_metadata); +} +EXPORT_SYMBOL_GPL(devlink_trap_report); + +/** + * devlink_trap_ctx_priv - Trap context to driver private information. + * @trap_ctx: Trap context. + * + * Return: Driver private information passed during registration. + */ +void *devlink_trap_ctx_priv(void *trap_ctx) +{ + struct devlink_trap_item *trap_item = trap_ctx; + + return trap_item->priv; +} +EXPORT_SYMBOL_GPL(devlink_trap_ctx_priv); + static void __devlink_compat_running_version(struct devlink *devlink, char *buf, size_t len) { -- cgit v1.2.3 From 77cd0d7b3f257fd0e3096b4fdcff1a7d38e99e10 Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Wed, 14 Aug 2019 09:27:17 +0200 Subject: xsk: add support for need_wakeup flag in AF_XDP rings This commit adds support for a new flag called need_wakeup in the AF_XDP Tx and fill rings. When this flag is set, it means that the application has to explicitly wake up the kernel Rx (for the bit in the fill ring) or kernel Tx (for bit in the Tx ring) processing by issuing a syscall. Poll() can wake up both depending on the flags submitted and sendto() will wake up tx processing only. The main reason for introducing this new flag is to be able to efficiently support the case when application and driver is executing on the same core. Previously, the driver was just busy-spinning on the fill ring if it ran out of buffers in the HW and there were none on the fill ring. This approach works when the application is running on another core as it can replenish the fill ring while the driver is busy-spinning. Though, this is a lousy approach if both of them are running on the same core as the probability of the fill ring getting more entries when the driver is busy-spinning is zero. With this new feature the driver now sets the need_wakeup flag and returns to the application. The application can then replenish the fill queue and then explicitly wake up the Rx processing in the kernel using the syscall poll(). For Tx, the flag is only set to one if the driver has no outstanding Tx completion interrupts. If it has some, the flag is zero as it will be woken up by a completion interrupt anyway. As a nice side effect, this new flag also improves the performance of the case where application and driver are running on two different cores as it reduces the number of syscalls to the kernel. The kernel tells user space if it needs to be woken up by a syscall, and this eliminates many of the syscalls. This flag needs some simple driver support. If the driver does not support this, the Rx flag is always zero and the Tx flag is always one. This makes any application relying on this feature default to the old behaviour of not requiring any syscalls in the Rx path and always having to call sendto() in the Tx path. For backwards compatibility reasons, this feature has to be explicitly turned on using a new bind flag (XDP_USE_NEED_WAKEUP). I recommend that you always turn it on as it so far always have had a positive performance impact. The name and inspiration of the flag has been taken from io_uring by Jens Axboe. Details about this feature in io_uring can be found in http://kernel.dk/io_uring.pdf, section 8.3. Signed-off-by: Magnus Karlsson Acked-by: Jonathan Lemon Signed-off-by: Daniel Borkmann --- include/net/xdp_sock.h | 33 +++++++++- include/uapi/linux/if_xdp.h | 13 ++++ net/xdp/xdp_umem.c | 9 +++ net/xdp/xsk.c | 146 ++++++++++++++++++++++++++++++++++++++------ net/xdp/xsk.h | 13 ++++ net/xdp/xsk_queue.h | 1 + 6 files changed, 195 insertions(+), 20 deletions(-) (limited to 'include/uapi') diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 69796d264f06..6aebea2b18cb 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -27,6 +27,9 @@ struct xdp_umem_fq_reuse { u64 handles[]; }; +/* Flags for the umem flags field. */ +#define XDP_UMEM_USES_NEED_WAKEUP (1 << 0) + struct xdp_umem { struct xsk_queue *fq; struct xsk_queue *cq; @@ -41,10 +44,12 @@ struct xdp_umem { struct work_struct work; struct page **pgs; u32 npgs; + u16 queue_id; + u8 need_wakeup; + u8 flags; int id; struct net_device *dev; struct xdp_umem_fq_reuse *fq_reuse; - u16 queue_id; bool zc; spinlock_t xsk_list_lock; struct list_head xsk_list; @@ -95,6 +100,11 @@ struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, struct xdp_umem_fq_reuse *newq); void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id); +void xsk_set_rx_need_wakeup(struct xdp_umem *umem); +void xsk_set_tx_need_wakeup(struct xdp_umem *umem); +void xsk_clear_rx_need_wakeup(struct xdp_umem *umem); +void xsk_clear_tx_need_wakeup(struct xdp_umem *umem); +bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem); static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) { @@ -241,6 +251,27 @@ static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) { } +static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem) +{ +} + +static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem) +{ +} + +static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) +{ +} + +static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) +{ +} + +static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) +{ + return false; +} + #endif /* CONFIG_XDP_SOCKETS */ #endif /* _LINUX_XDP_SOCK_H */ diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index faaa5ca2a117..62b80d57b72a 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -16,6 +16,15 @@ #define XDP_SHARED_UMEM (1 << 0) #define XDP_COPY (1 << 1) /* Force copy-mode */ #define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */ +/* If this option is set, the driver might go sleep and in that case + * the XDP_RING_NEED_WAKEUP flag in the fill and/or Tx rings will be + * set. If it is set, the application need to explicitly wake up the + * driver with a poll() (Rx and Tx) or sendto() (Tx only). If you are + * running the driver and the application on the same core, you should + * use this option so that the kernel will yield to the user space + * application. + */ +#define XDP_USE_NEED_WAKEUP (1 << 3) struct sockaddr_xdp { __u16 sxdp_family; @@ -25,10 +34,14 @@ struct sockaddr_xdp { __u32 sxdp_shared_umem_fd; }; +/* XDP_RING flags */ +#define XDP_RING_NEED_WAKEUP (1 << 0) + struct xdp_ring_offset { __u64 producer; __u64 consumer; __u64 desc; + __u64 flags; }; struct xdp_mmap_offsets { diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 6e2d4da4cdcd..cda6feb1edb0 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -106,6 +106,15 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, umem->dev = dev; umem->queue_id = queue_id; + if (flags & XDP_USE_NEED_WAKEUP) { + umem->flags |= XDP_UMEM_USES_NEED_WAKEUP; + /* Tx needs to be explicitly woken up the first time. + * Also for supporting drivers that do not implement this + * feature. They will always have to call sendto(). + */ + xsk_set_tx_need_wakeup(umem); + } + dev_hold(dev); if (force_copy) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 1fe40a936456..9f900b56b15b 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -55,6 +55,66 @@ void xsk_umem_discard_addr(struct xdp_umem *umem) } EXPORT_SYMBOL(xsk_umem_discard_addr); +void xsk_set_rx_need_wakeup(struct xdp_umem *umem) +{ + if (umem->need_wakeup & XDP_WAKEUP_RX) + return; + + umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP; + umem->need_wakeup |= XDP_WAKEUP_RX; +} +EXPORT_SYMBOL(xsk_set_rx_need_wakeup); + +void xsk_set_tx_need_wakeup(struct xdp_umem *umem) +{ + struct xdp_sock *xs; + + if (umem->need_wakeup & XDP_WAKEUP_TX) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(xs, &umem->xsk_list, list) { + xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; + } + rcu_read_unlock(); + + umem->need_wakeup |= XDP_WAKEUP_TX; +} +EXPORT_SYMBOL(xsk_set_tx_need_wakeup); + +void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) +{ + if (!(umem->need_wakeup & XDP_WAKEUP_RX)) + return; + + umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; + umem->need_wakeup &= ~XDP_WAKEUP_RX; +} +EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); + +void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) +{ + struct xdp_sock *xs; + + if (!(umem->need_wakeup & XDP_WAKEUP_TX)) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(xs, &umem->xsk_list, list) { + xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; + } + rcu_read_unlock(); + + umem->need_wakeup &= ~XDP_WAKEUP_TX; +} +EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); + +bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) +{ + return umem->flags & XDP_UMEM_USES_NEED_WAKEUP; +} +EXPORT_SYMBOL(xsk_umem_uses_need_wakeup); + static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) { void *to_buf, *from_buf; @@ -320,6 +380,12 @@ static unsigned int xsk_poll(struct file *file, struct socket *sock, unsigned int mask = datagram_poll(file, sock, wait); struct sock *sk = sock->sk; struct xdp_sock *xs = xdp_sk(sk); + struct net_device *dev = xs->dev; + struct xdp_umem *umem = xs->umem; + + if (umem->need_wakeup) + dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, + umem->need_wakeup); if (xs->rx && !xskq_empty_desc(xs->rx)) mask |= POLLIN | POLLRDNORM; @@ -428,7 +494,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) return -EINVAL; flags = sxdp->sxdp_flags; - if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY)) + if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | + XDP_USE_NEED_WAKEUP)) return -EINVAL; rtnl_lock(); @@ -455,7 +522,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) struct xdp_sock *umem_xs; struct socket *sock; - if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) { + if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || + (flags & XDP_USE_NEED_WAKEUP)) { /* Cannot specify flags for shared sockets. */ err = -EINVAL; goto out_unlock; @@ -550,6 +618,9 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, } q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; err = xsk_init_queue(entries, q, false); + if (!err && optname == XDP_TX_RING) + /* Tx needs to be explicitly woken up the first time */ + xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; mutex_unlock(&xs->mutex); return err; } @@ -611,6 +682,20 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, return -ENOPROTOOPT; } +static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) +{ + ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); + ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); + ring->desc = offsetof(struct xdp_rxtx_ring, desc); +} + +static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) +{ + ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); + ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); + ring->desc = offsetof(struct xdp_umem_ring, desc); +} + static int xsk_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { @@ -650,26 +735,49 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname, case XDP_MMAP_OFFSETS: { struct xdp_mmap_offsets off; + struct xdp_mmap_offsets_v1 off_v1; + bool flags_supported = true; + void *to_copy; - if (len < sizeof(off)) + if (len < sizeof(off_v1)) return -EINVAL; + else if (len < sizeof(off)) + flags_supported = false; + + if (flags_supported) { + /* xdp_ring_offset is identical to xdp_ring_offset_v1 + * except for the flags field added to the end. + */ + xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) + &off.rx); + xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) + &off.tx); + xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) + &off.fr); + xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) + &off.cr); + off.rx.flags = offsetof(struct xdp_rxtx_ring, + ptrs.flags); + off.tx.flags = offsetof(struct xdp_rxtx_ring, + ptrs.flags); + off.fr.flags = offsetof(struct xdp_umem_ring, + ptrs.flags); + off.cr.flags = offsetof(struct xdp_umem_ring, + ptrs.flags); + + len = sizeof(off); + to_copy = &off; + } else { + xsk_enter_rxtx_offsets(&off_v1.rx); + xsk_enter_rxtx_offsets(&off_v1.tx); + xsk_enter_umem_offsets(&off_v1.fr); + xsk_enter_umem_offsets(&off_v1.cr); + + len = sizeof(off_v1); + to_copy = &off_v1; + } - off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); - off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); - off.rx.desc = offsetof(struct xdp_rxtx_ring, desc); - off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); - off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); - off.tx.desc = offsetof(struct xdp_rxtx_ring, desc); - - off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer); - off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); - off.fr.desc = offsetof(struct xdp_umem_ring, desc); - off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer); - off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); - off.cr.desc = offsetof(struct xdp_umem_ring, desc); - - len = sizeof(off); - if (copy_to_user(optval, &off, len)) + if (copy_to_user(optval, to_copy, len)) return -EFAULT; if (put_user(len, optlen)) return -EFAULT; diff --git a/net/xdp/xsk.h b/net/xdp/xsk.h index ba8120610426..4cfd106bdb53 100644 --- a/net/xdp/xsk.h +++ b/net/xdp/xsk.h @@ -4,6 +4,19 @@ #ifndef XSK_H_ #define XSK_H_ +struct xdp_ring_offset_v1 { + __u64 producer; + __u64 consumer; + __u64 desc; +}; + +struct xdp_mmap_offsets_v1 { + struct xdp_ring_offset_v1 rx; + struct xdp_ring_offset_v1 tx; + struct xdp_ring_offset_v1 fr; + struct xdp_ring_offset_v1 cr; +}; + static inline struct xdp_sock *xdp_sk(struct sock *sk) { return (struct xdp_sock *)sk; diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index 909c5168ed0f..dd9e985c2461 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -16,6 +16,7 @@ struct xdp_ring { u32 producer ____cacheline_aligned_in_smp; u32 consumer ____cacheline_aligned_in_smp; + u32 flags; }; /* Used for the RX and TX queues for packets */ -- cgit v1.2.3 From 8f51dfc73bf181f2304e1498f55d5f452e060cbe Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Wed, 14 Aug 2019 10:37:49 -0700 Subject: bpf: support cloning sk storage on accept() Add new helper bpf_sk_storage_clone which optionally clones sk storage and call it from sk_clone_lock. Cc: Martin KaFai Lau Cc: Yonghong Song Acked-by: Martin KaFai Lau Acked-by: Yonghong Song Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann --- include/net/bpf_sk_storage.h | 10 +++++ include/uapi/linux/bpf.h | 3 ++ net/core/bpf_sk_storage.c | 104 +++++++++++++++++++++++++++++++++++++++++-- net/core/sock.c | 9 ++-- 4 files changed, 120 insertions(+), 6 deletions(-) (limited to 'include/uapi') diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h index b9dcb02e756b..8e4f831d2e52 100644 --- a/include/net/bpf_sk_storage.h +++ b/include/net/bpf_sk_storage.h @@ -10,4 +10,14 @@ void bpf_sk_storage_free(struct sock *sk); extern const struct bpf_func_proto bpf_sk_storage_get_proto; extern const struct bpf_func_proto bpf_sk_storage_delete_proto; +#ifdef CONFIG_BPF_SYSCALL +int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk); +#else +static inline int bpf_sk_storage_clone(const struct sock *sk, + struct sock *newsk) +{ + return 0; +} +#endif + #endif /* _BPF_SK_STORAGE_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4393bd4b2419..0ef594ac3899 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -337,6 +337,9 @@ enum bpf_attach_type { #define BPF_F_RDONLY_PROG (1U << 7) #define BPF_F_WRONLY_PROG (1U << 8) +/* Clone map from listener for newly accepted socket */ +#define BPF_F_CLONE (1U << 9) + /* flags for BPF_PROG_QUERY */ #define BPF_F_QUERY_EFFECTIVE (1U << 0) diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index 94c7f77ecb6b..da5639a5bd3b 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -12,6 +12,9 @@ static atomic_t cache_idx; +#define SK_STORAGE_CREATE_FLAG_MASK \ + (BPF_F_NO_PREALLOC | BPF_F_CLONE) + struct bucket { struct hlist_head list; raw_spinlock_t lock; @@ -209,7 +212,6 @@ static void selem_unlink_sk(struct bpf_sk_storage_elem *selem) kfree_rcu(sk_storage, rcu); } -/* sk_storage->lock must be held and sk_storage->list cannot be empty */ static void __selem_link_sk(struct bpf_sk_storage *sk_storage, struct bpf_sk_storage_elem *selem) { @@ -509,7 +511,7 @@ static int sk_storage_delete(struct sock *sk, struct bpf_map *map) return 0; } -/* Called by __sk_destruct() */ +/* Called by __sk_destruct() & bpf_sk_storage_clone() */ void bpf_sk_storage_free(struct sock *sk) { struct bpf_sk_storage_elem *selem; @@ -557,6 +559,11 @@ static void bpf_sk_storage_map_free(struct bpf_map *map) smap = (struct bpf_sk_storage_map *)map; + /* Note that this map might be concurrently cloned from + * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone + * RCU read section to finish before proceeding. New RCU + * read sections should be prevented via bpf_map_inc_not_zero. + */ synchronize_rcu(); /* bpf prog and the userspace can no longer access this map @@ -601,7 +608,9 @@ static void bpf_sk_storage_map_free(struct bpf_map *map) static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr) { - if (attr->map_flags != BPF_F_NO_PREALLOC || attr->max_entries || + if (attr->map_flags & ~SK_STORAGE_CREATE_FLAG_MASK || + !(attr->map_flags & BPF_F_NO_PREALLOC) || + attr->max_entries || attr->key_size != sizeof(int) || !attr->value_size || /* Enforce BTF for userspace sk dumping */ !attr->btf_key_type_id || !attr->btf_value_type_id) @@ -739,6 +748,95 @@ static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key) return err; } +static struct bpf_sk_storage_elem * +bpf_sk_storage_clone_elem(struct sock *newsk, + struct bpf_sk_storage_map *smap, + struct bpf_sk_storage_elem *selem) +{ + struct bpf_sk_storage_elem *copy_selem; + + copy_selem = selem_alloc(smap, newsk, NULL, true); + if (!copy_selem) + return NULL; + + if (map_value_has_spin_lock(&smap->map)) + copy_map_value_locked(&smap->map, SDATA(copy_selem)->data, + SDATA(selem)->data, true); + else + copy_map_value(&smap->map, SDATA(copy_selem)->data, + SDATA(selem)->data); + + return copy_selem; +} + +int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk) +{ + struct bpf_sk_storage *new_sk_storage = NULL; + struct bpf_sk_storage *sk_storage; + struct bpf_sk_storage_elem *selem; + int ret = 0; + + RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL); + + rcu_read_lock(); + sk_storage = rcu_dereference(sk->sk_bpf_storage); + + if (!sk_storage || hlist_empty(&sk_storage->list)) + goto out; + + hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) { + struct bpf_sk_storage_elem *copy_selem; + struct bpf_sk_storage_map *smap; + struct bpf_map *map; + + smap = rcu_dereference(SDATA(selem)->smap); + if (!(smap->map.map_flags & BPF_F_CLONE)) + continue; + + /* Note that for lockless listeners adding new element + * here can race with cleanup in bpf_sk_storage_map_free. + * Try to grab map refcnt to make sure that it's still + * alive and prevent concurrent removal. + */ + map = bpf_map_inc_not_zero(&smap->map, false); + if (IS_ERR(map)) + continue; + + copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem); + if (!copy_selem) { + ret = -ENOMEM; + bpf_map_put(map); + goto out; + } + + if (new_sk_storage) { + selem_link_map(smap, copy_selem); + __selem_link_sk(new_sk_storage, copy_selem); + } else { + ret = sk_storage_alloc(newsk, smap, copy_selem); + if (ret) { + kfree(copy_selem); + atomic_sub(smap->elem_size, + &newsk->sk_omem_alloc); + bpf_map_put(map); + goto out; + } + + new_sk_storage = rcu_dereference(copy_selem->sk_storage); + } + bpf_map_put(map); + } + +out: + rcu_read_unlock(); + + /* In case of an error, don't free anything explicitly here, the + * caller is responsible to call bpf_sk_storage_free. + */ + + return ret; +} + BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, void *, value, u64, flags) { diff --git a/net/core/sock.c b/net/core/sock.c index d57b0cc995a0..f5e801a9cea4 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1851,9 +1851,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) goto out; } RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); -#ifdef CONFIG_BPF_SYSCALL - RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL); -#endif + + if (bpf_sk_storage_clone(sk, newsk)) { + sk_free_unlock_clone(newsk); + newsk = NULL; + goto out; + } newsk->sk_err = 0; newsk->sk_err_soft = 0; -- cgit v1.2.3 From 89a26cd4b501e9511d3cd3d22327fc76a75a38b3 Mon Sep 17 00:00:00 2001 From: Juliana Rodrigueiro Date: Fri, 16 Aug 2019 17:02:22 +0200 Subject: netfilter: xt_nfacct: Fix alignment mismatch in xt_nfacct_match_info When running a 64-bit kernel with a 32-bit iptables binary, the size of the xt_nfacct_match_info struct diverges. kernel: sizeof(struct xt_nfacct_match_info) : 40 iptables: sizeof(struct xt_nfacct_match_info)) : 36 Trying to append nfacct related rules results in an unhelpful message. Although it is suggested to look for more information in dmesg, nothing can be found there. # iptables -A -m nfacct --nfacct-name iptables: Invalid argument. Run `dmesg' for more information. This patch fixes the memory misalignment by enforcing 8-byte alignment within the struct's first revision. This solution is often used in many other uapi netfilter headers. Signed-off-by: Juliana Rodrigueiro Acked-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/xt_nfacct.h | 5 +++++ net/netfilter/xt_nfacct.c | 36 ++++++++++++++++++++++---------- 2 files changed, 30 insertions(+), 11 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h index 5c8a4d760ee3..b5123ab8d54a 100644 --- a/include/uapi/linux/netfilter/xt_nfacct.h +++ b/include/uapi/linux/netfilter/xt_nfacct.h @@ -11,4 +11,9 @@ struct xt_nfacct_match_info { struct nf_acct *nfacct; }; +struct xt_nfacct_match_info_v1 { + char name[NFACCT_NAME_MAX]; + struct nf_acct *nfacct __attribute__((aligned(8))); +}; + #endif /* _XT_NFACCT_MATCH_H */ diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c index d0ab1adf5bff..5aab6df74e0f 100644 --- a/net/netfilter/xt_nfacct.c +++ b/net/netfilter/xt_nfacct.c @@ -54,25 +54,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par) nfnl_acct_put(info->nfacct); } -static struct xt_match nfacct_mt_reg __read_mostly = { - .name = "nfacct", - .family = NFPROTO_UNSPEC, - .checkentry = nfacct_mt_checkentry, - .match = nfacct_mt, - .destroy = nfacct_mt_destroy, - .matchsize = sizeof(struct xt_nfacct_match_info), - .usersize = offsetof(struct xt_nfacct_match_info, nfacct), - .me = THIS_MODULE, +static struct xt_match nfacct_mt_reg[] __read_mostly = { + { + .name = "nfacct", + .revision = 0, + .family = NFPROTO_UNSPEC, + .checkentry = nfacct_mt_checkentry, + .match = nfacct_mt, + .destroy = nfacct_mt_destroy, + .matchsize = sizeof(struct xt_nfacct_match_info), + .usersize = offsetof(struct xt_nfacct_match_info, nfacct), + .me = THIS_MODULE, + }, + { + .name = "nfacct", + .revision = 1, + .family = NFPROTO_UNSPEC, + .checkentry = nfacct_mt_checkentry, + .match = nfacct_mt, + .destroy = nfacct_mt_destroy, + .matchsize = sizeof(struct xt_nfacct_match_info_v1), + .usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct), + .me = THIS_MODULE, + }, }; static int __init nfacct_mt_init(void) { - return xt_register_match(&nfacct_mt_reg); + return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg)); } static void __exit nfacct_mt_exit(void) { - xt_unregister_match(&nfacct_mt_reg); + xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg)); } module_init(nfacct_mt_init); -- cgit v1.2.3 From 2b770bee787daec66ed86baf4fc83275f949c8ac Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Thu, 15 Aug 2019 11:44:45 -0300 Subject: media: videodev2.h: add V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM Add an enum_fmt format flag to specifically tag coded formats where full bytestream parsing is supported by the device. Some stateful decoders are capable of fully parsing a bytestream, but others require that userspace pre-parses the bytestream into frames or fields (see the corresponding pixelformat descriptions for details). If this flag is set, then this pre-parsing step is not required (but still possible, of course). Signed-off-by: Hans Verkuil Reviewed-by: Paul Kocialkowski Reviewed-by: Alexandre Courbot Signed-off-by: Mauro Carvalho Chehab --- Documentation/media/uapi/v4l/vidioc-enum-fmt.rst | 8 ++++++++ Documentation/media/videodev2.h.rst.exceptions | 1 + include/uapi/linux/videodev2.h | 5 +++-- 3 files changed, 12 insertions(+), 2 deletions(-) (limited to 'include/uapi') diff --git a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst index 822d6730e7d2..ebc05ce74bdf 100644 --- a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst +++ b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst @@ -127,6 +127,14 @@ one until ``EINVAL`` is returned. - This format is not native to the device but emulated through software (usually libv4l2), where possible try to use a native format instead for better performance. + * - ``V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM`` + - 0x0004 + - The hardware decoder for this compressed bytestream format (aka coded + format) is capable of parsing a continuous bytestream. Applications do + not need to parse the bytestream themselves to find the boundaries + between frames/fields. This flag can only be used in combination with + the ``V4L2_FMT_FLAG_COMPRESSED`` flag, since this applies to compressed + formats only. This flag is valid for stateful decoders only. Return Value diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions index 8e7d3492d248..a0640b6d0f68 100644 --- a/Documentation/media/videodev2.h.rst.exceptions +++ b/Documentation/media/videodev2.h.rst.exceptions @@ -180,6 +180,7 @@ replace define V4L2_PIX_FMT_FLAG_PREMUL_ALPHA reserved-formats # V4L2 format flags replace define V4L2_FMT_FLAG_COMPRESSED fmtdesc-flags replace define V4L2_FMT_FLAG_EMULATED fmtdesc-flags +replace define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM fmtdesc-flags # V4L2 timecode types replace define V4L2_TC_TYPE_24FPS timecode-type diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 2427bc4d8eba..67077d52c59d 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -774,8 +774,9 @@ struct v4l2_fmtdesc { __u32 reserved[4]; }; -#define V4L2_FMT_FLAG_COMPRESSED 0x0001 -#define V4L2_FMT_FLAG_EMULATED 0x0002 +#define V4L2_FMT_FLAG_COMPRESSED 0x0001 +#define V4L2_FMT_FLAG_EMULATED 0x0002 +#define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM 0x0004 /* Frame Size and frame rate enumeration */ /* -- cgit v1.2.3 From 60a039eb27f921abaef4778999db510dd75d5e48 Mon Sep 17 00:00:00 2001 From: Maxime Jourdan Date: Thu, 15 Aug 2019 11:44:46 -0300 Subject: media: videodev2.h: add V4L2_FMT_FLAG_DYN_RESOLUTION Add an enum_fmt format flag to specifically tag coded formats where dynamic resolution switching is supported by the device. This is useful for some codec drivers that can support dynamic resolution switching for one or more of their listed coded formats. It allows userspace to know whether it should extract the video parameters itself, or if it can rely on the device to send V4L2_EVENT_SOURCE_CHANGE when such changes are detected. Signed-off-by: Maxime Jourdan Reviewed-by: Paul Kocialkowski Reviewed-by: Alexandre Courbot Acked-by: Tomasz Figa Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab --- Documentation/media/uapi/v4l/vidioc-enum-fmt.rst | 8 ++++++++ Documentation/media/videodev2.h.rst.exceptions | 1 + include/uapi/linux/videodev2.h | 1 + 3 files changed, 10 insertions(+) (limited to 'include/uapi') diff --git a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst index ebc05ce74bdf..399ef1062bac 100644 --- a/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst +++ b/Documentation/media/uapi/v4l/vidioc-enum-fmt.rst @@ -135,6 +135,14 @@ one until ``EINVAL`` is returned. between frames/fields. This flag can only be used in combination with the ``V4L2_FMT_FLAG_COMPRESSED`` flag, since this applies to compressed formats only. This flag is valid for stateful decoders only. + * - ``V4L2_FMT_FLAG_DYN_RESOLUTION`` + - 0x0008 + - Dynamic resolution switching is supported by the device for this + compressed bytestream format (aka coded format). It will notify the user + via the event ``V4L2_EVENT_SOURCE_CHANGE`` when changes in the video + parameters are detected. This flag can only be used in combination + with the ``V4L2_FMT_FLAG_COMPRESSED`` flag, since this applies to + compressed formats only. It is also only applies to stateful codecs. Return Value diff --git a/Documentation/media/videodev2.h.rst.exceptions b/Documentation/media/videodev2.h.rst.exceptions index a0640b6d0f68..adeb6b7a15cb 100644 --- a/Documentation/media/videodev2.h.rst.exceptions +++ b/Documentation/media/videodev2.h.rst.exceptions @@ -181,6 +181,7 @@ replace define V4L2_PIX_FMT_FLAG_PREMUL_ALPHA reserved-formats replace define V4L2_FMT_FLAG_COMPRESSED fmtdesc-flags replace define V4L2_FMT_FLAG_EMULATED fmtdesc-flags replace define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM fmtdesc-flags +replace define V4L2_FMT_FLAG_DYN_RESOLUTION fmtdesc-flags # V4L2 timecode types replace define V4L2_TC_TYPE_24FPS timecode-type diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 67077d52c59d..530638dffd93 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -777,6 +777,7 @@ struct v4l2_fmtdesc { #define V4L2_FMT_FLAG_COMPRESSED 0x0001 #define V4L2_FMT_FLAG_EMULATED 0x0002 #define V4L2_FMT_FLAG_CONTINUOUS_BYTESTREAM 0x0004 +#define V4L2_FMT_FLAG_DYN_RESOLUTION 0x0008 /* Frame Size and frame rate enumeration */ /* -- cgit v1.2.3 From db2cb969e8aed2395620fe2cb0bffd194c02b4b1 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Tue, 6 Aug 2019 11:30:00 +0200 Subject: vfio: re-arrange vfio region definitions It is easy to miss already defined region types. Let's re-arrange the definitions a bit and add more comments to make it hopefully a bit clearer. No functional change. Signed-off-by: Cornelia Huck Signed-off-by: Alex Williamson --- include/uapi/linux/vfio.h | 45 ++++++++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 19 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 8f10748dac79..e809b22f6a60 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -295,15 +295,38 @@ struct vfio_region_info_cap_type { __u32 subtype; /* type specific */ }; +/* + * List of region types, global per bus driver. + * If you introduce a new type, please add it here. + */ + +/* PCI region type containing a PCI vendor part */ #define VFIO_REGION_TYPE_PCI_VENDOR_TYPE (1 << 31) #define VFIO_REGION_TYPE_PCI_VENDOR_MASK (0xffff) +#define VFIO_REGION_TYPE_GFX (1) +#define VFIO_REGION_TYPE_CCW (2) + +/* sub-types for VFIO_REGION_TYPE_PCI_* */ -/* 8086 Vendor sub-types */ +/* 8086 vendor PCI sub-types */ #define VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION (1) #define VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG (2) #define VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG (3) -#define VFIO_REGION_TYPE_GFX (1) +/* 10de vendor PCI sub-types */ +/* + * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space. + */ +#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1) + +/* 1014 vendor PCI sub-types */ +/* + * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU + * to do TLB invalidation on a GPU. + */ +#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1) + +/* sub-types for VFIO_REGION_TYPE_GFX */ #define VFIO_REGION_SUBTYPE_GFX_EDID (1) /** @@ -353,25 +376,9 @@ struct vfio_region_gfx_edid { #define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2 }; -#define VFIO_REGION_TYPE_CCW (2) -/* ccw sub-types */ +/* sub-types for VFIO_REGION_TYPE_CCW */ #define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1) -/* - * 10de vendor sub-type - * - * NVIDIA GPU NVlink2 RAM is coherent RAM mapped onto the host address space. - */ -#define VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM (1) - -/* - * 1014 vendor sub-type - * - * IBM NPU NVlink2 ATSD (Address Translation Shootdown) register of NPU - * to do TLB invalidation on a GPU. - */ -#define VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD (1) - /* * The MSIX mappable capability informs that MSIX data of a BAR can be mmapped * which allows direct access to non-MSIX registers which happened to be within -- cgit v1.2.3 From a717072007e8aedd3f951726d8cf55454860b30d Mon Sep 17 00:00:00 2001 From: Shameer Kolothum Date: Tue, 23 Jul 2019 17:06:36 +0100 Subject: vfio/type1: Add IOVA range capability support This allows the user-space to retrieve the supported IOVA range(s), excluding any non-relaxable reserved regions. The implementation is based on capability chains, added to VFIO_IOMMU_GET_INFO ioctl. Signed-off-by: Shameer Kolothum Reviewed-by: Eric Auger Signed-off-by: Alex Williamson --- drivers/vfio/vfio_iommu_type1.c | 101 ++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/vfio.h | 26 ++++++++++- 2 files changed, 126 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 56cf55776d6c..d0c5e768acb7 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -2138,6 +2138,73 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu) return ret; } +static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps, + struct vfio_iommu_type1_info_cap_iova_range *cap_iovas, + size_t size) +{ + struct vfio_info_cap_header *header; + struct vfio_iommu_type1_info_cap_iova_range *iova_cap; + + header = vfio_info_cap_add(caps, size, + VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE, 1); + if (IS_ERR(header)) + return PTR_ERR(header); + + iova_cap = container_of(header, + struct vfio_iommu_type1_info_cap_iova_range, + header); + iova_cap->nr_iovas = cap_iovas->nr_iovas; + memcpy(iova_cap->iova_ranges, cap_iovas->iova_ranges, + cap_iovas->nr_iovas * sizeof(*cap_iovas->iova_ranges)); + return 0; +} + +static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu, + struct vfio_info_cap *caps) +{ + struct vfio_iommu_type1_info_cap_iova_range *cap_iovas; + struct vfio_iova *iova; + size_t size; + int iovas = 0, i = 0, ret; + + mutex_lock(&iommu->lock); + + list_for_each_entry(iova, &iommu->iova_list, list) + iovas++; + + if (!iovas) { + /* + * Return 0 as a container with a single mdev device + * will have an empty list + */ + ret = 0; + goto out_unlock; + } + + size = sizeof(*cap_iovas) + (iovas * sizeof(*cap_iovas->iova_ranges)); + + cap_iovas = kzalloc(size, GFP_KERNEL); + if (!cap_iovas) { + ret = -ENOMEM; + goto out_unlock; + } + + cap_iovas->nr_iovas = iovas; + + list_for_each_entry(iova, &iommu->iova_list, list) { + cap_iovas->iova_ranges[i].start = iova->start; + cap_iovas->iova_ranges[i].end = iova->end; + i++; + } + + ret = vfio_iommu_iova_add_cap(caps, cap_iovas, size); + + kfree(cap_iovas); +out_unlock: + mutex_unlock(&iommu->lock); + return ret; +} + static long vfio_iommu_type1_ioctl(void *iommu_data, unsigned int cmd, unsigned long arg) { @@ -2159,19 +2226,53 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, } } else if (cmd == VFIO_IOMMU_GET_INFO) { struct vfio_iommu_type1_info info; + struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; + unsigned long capsz; + int ret; minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes); + /* For backward compatibility, cannot require this */ + capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset); + if (copy_from_user(&info, (void __user *)arg, minsz)) return -EFAULT; if (info.argsz < minsz) return -EINVAL; + if (info.argsz >= capsz) { + minsz = capsz; + info.cap_offset = 0; /* output, no-recopy necessary */ + } + info.flags = VFIO_IOMMU_INFO_PGSIZES; info.iova_pgsizes = vfio_pgsize_bitmap(iommu); + ret = vfio_iommu_iova_build_caps(iommu, &caps); + if (ret) + return ret; + + if (caps.size) { + info.flags |= VFIO_IOMMU_INFO_CAPS; + + if (info.argsz < sizeof(info) + caps.size) { + info.argsz = sizeof(info) + caps.size; + } else { + vfio_info_cap_shift(&caps, sizeof(info)); + if (copy_to_user((void __user *)arg + + sizeof(info), caps.buf, + caps.size)) { + kfree(caps.buf); + return -EFAULT; + } + info.cap_offset = sizeof(info); + } + + kfree(caps.buf); + } + return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0; diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 8f10748dac79..1259dccd09d2 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -714,7 +714,31 @@ struct vfio_iommu_type1_info { __u32 argsz; __u32 flags; #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */ - __u64 iova_pgsizes; /* Bitmap of supported page sizes */ +#define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */ + __u64 iova_pgsizes; /* Bitmap of supported page sizes */ + __u32 cap_offset; /* Offset within info struct of first cap */ +}; + +/* + * The IOVA capability allows to report the valid IOVA range(s) + * excluding any non-relaxable reserved regions exposed by + * devices attached to the container. Any DMA map attempt + * outside the valid iova range will return error. + * + * The structures below define version 1 of this capability. + */ +#define VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE 1 + +struct vfio_iova_range { + __u64 start; + __u64 end; +}; + +struct vfio_iommu_type1_info_cap_iova_range { + struct vfio_info_cap_header header; + __u32 nr_iovas; + __u32 reserved; + struct vfio_iova_range iova_ranges[]; }; #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12) -- cgit v1.2.3 From 99b60d56a35b18af267f275559a530db372bfad7 Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Fri, 16 Aug 2019 21:56:27 +0200 Subject: net: phy: add EEE-related constants Add EEE-related constants. This includes the new MMD EEE registers for NBase-T / 802.3bz. Signed-off-by: Heiner Kallweit Signed-off-by: David S. Miller --- include/uapi/linux/mdio.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/mdio.h b/include/uapi/linux/mdio.h index 0a552061ff1c..4bcb41c71b8c 100644 --- a/include/uapi/linux/mdio.h +++ b/include/uapi/linux/mdio.h @@ -45,11 +45,14 @@ #define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */ #define MDIO_AN_LPA 19 /* AN LP abilities (base page) */ #define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */ +#define MDIO_PCS_EEE_ABLE2 21 /* EEE Capability register 2 */ #define MDIO_PMA_NG_EXTABLE 21 /* 2.5G/5G PMA/PMD extended ability */ #define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */ #define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */ #define MDIO_AN_EEE_ADV 60 /* EEE advertisement */ #define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */ +#define MDIO_AN_EEE_ADV2 62 /* EEE advertisement 2 */ +#define MDIO_AN_EEE_LPABLE2 63 /* EEE link partner ability 2 */ /* Media-dependent registers. */ #define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */ @@ -276,6 +279,13 @@ #define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ #define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ #define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ +#define MDIO_EEE_40GR_FW 0x0100 /* 40G R fast wake */ +#define MDIO_EEE_40GR_DS 0x0200 /* 40G R deep sleep */ +#define MDIO_EEE_100GR_FW 0x1000 /* 100G R fast wake */ +#define MDIO_EEE_100GR_DS 0x2000 /* 100G R deep sleep */ + +#define MDIO_EEE_2_5GT 0x0001 /* 2.5GT EEE cap */ +#define MDIO_EEE_5GT 0x0002 /* 5GT EEE cap */ /* 2.5G/5G Extended abilities register. */ #define MDIO_PMA_NG_EXTABLE_2_5GBT 0x0001 /* 2.5GBASET ability */ -- cgit v1.2.3 From df2c71ffdfae58961981d7cbcccea93688fc4e96 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 19 Aug 2019 22:02:46 +0800 Subject: sctp: add SCTP_ASCONF_SUPPORTED sockopt SCTP_ASCONF_SUPPORTED sockopt is used to set enpoint's asconf flag. With this feature, each endpoint will have its own flag for its future asoc's asconf_capable, instead of netns asconf flag. Note that when both ep's asconf_enable and auth_enable are enabled, SCTP_CID_ASCONF and SCTP_CID_ASCONF_ACK should be added into auth_chunk_list. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/uapi/linux/sctp.h | 1 + net/sctp/socket.c | 82 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 83 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index b8f2c4d56532..9b9b82debc0d 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -134,6 +134,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_INTERLEAVING_SUPPORTED 125 #define SCTP_SENDMSG_CONNECT 126 #define SCTP_EVENT 127 +#define SCTP_ASCONF_SUPPORTED 128 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 559793f7f72a..b21a70708405 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4496,6 +4496,42 @@ static int sctp_setsockopt_event(struct sock *sk, char __user *optval, return retval; } +static int sctp_setsockopt_asconf_supported(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + struct sctp_endpoint *ep; + int retval = -EINVAL; + + if (optlen != sizeof(params)) + goto out; + + if (copy_from_user(¶ms, optval, optlen)) { + retval = -EFAULT; + goto out; + } + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) + goto out; + + ep = sctp_sk(sk)->ep; + ep->asconf_enable = !!params.assoc_value; + + if (ep->asconf_enable && ep->auth_enable) { + sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF); + sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF_ACK); + } + + retval = 0; + +out: + return retval; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -4696,6 +4732,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_EVENT: retval = sctp_setsockopt_event(sk, optval, optlen); break; + case SCTP_ASCONF_SUPPORTED: + retval = sctp_setsockopt_asconf_supported(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -7675,6 +7714,45 @@ static int sctp_getsockopt_event(struct sock *sk, int len, char __user *optval, return 0; } +static int sctp_getsockopt_asconf_supported(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + int retval = -EFAULT; + + if (len < sizeof(params)) { + retval = -EINVAL; + goto out; + } + + len = sizeof(params); + if (copy_from_user(¶ms, optval, len)) + goto out; + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) { + retval = -EINVAL; + goto out; + } + + params.assoc_value = asoc ? asoc->peer.asconf_capable + : sctp_sk(sk)->ep->asconf_enable; + + if (put_user(len, optlen)) + goto out; + + if (copy_to_user(optval, ¶ms, len)) + goto out; + + retval = 0; + +out: + return retval; +} + static int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -7876,6 +7954,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, case SCTP_EVENT: retval = sctp_getsockopt_event(sk, len, optval, optlen); break; + case SCTP_ASCONF_SUPPORTED: + retval = sctp_getsockopt_asconf_supported(sk, len, optval, + optlen); + break; default: retval = -ENOPROTOOPT; break; -- cgit v1.2.3 From 56dd525abd56f7acd7b44a52935726e3ada4916c Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 19 Aug 2019 22:02:49 +0800 Subject: sctp: add SCTP_AUTH_SUPPORTED sockopt SCTP_AUTH_SUPPORTED sockopt is used to set enpoint's auth flag. With this feature, each endpoint will have its own flag for its future asoc's auth_capable, instead of netns auth flag. Note that when both ep's auth_enable is enabled, endpoint auth related data should be initialized. If asconf_enable is also set, SCTP_CID_ASCONF/SCTP_CID_ASCONF_ACK should be added into auth_chunk_list. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/uapi/linux/sctp.h | 1 + net/sctp/socket.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index 9b9b82debc0d..62527aca8477 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -135,6 +135,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_SENDMSG_CONNECT 126 #define SCTP_EVENT 127 #define SCTP_ASCONF_SUPPORTED 128 +#define SCTP_AUTH_SUPPORTED 129 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 diff --git a/net/sctp/socket.c b/net/sctp/socket.c index dcde8d92c568..82bc25223cfe 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4520,6 +4520,46 @@ out: return retval; } +static int sctp_setsockopt_auth_supported(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + struct sctp_endpoint *ep; + int retval = -EINVAL; + + if (optlen != sizeof(params)) + goto out; + + if (copy_from_user(¶ms, optval, optlen)) { + retval = -EFAULT; + goto out; + } + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) + goto out; + + ep = sctp_sk(sk)->ep; + if (params.assoc_value) { + retval = sctp_auth_init(ep, GFP_KERNEL); + if (retval) + goto out; + if (ep->asconf_enable) { + sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF); + sctp_auth_ep_add_chunkid(ep, SCTP_CID_ASCONF_ACK); + } + } + + ep->auth_enable = !!params.assoc_value; + retval = 0; + +out: + return retval; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -4723,6 +4763,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_ASCONF_SUPPORTED: retval = sctp_setsockopt_asconf_supported(sk, optval, optlen); break; + case SCTP_AUTH_SUPPORTED: + retval = sctp_setsockopt_auth_supported(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -7746,6 +7789,45 @@ out: return retval; } +static int sctp_getsockopt_auth_supported(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + int retval = -EFAULT; + + if (len < sizeof(params)) { + retval = -EINVAL; + goto out; + } + + len = sizeof(params); + if (copy_from_user(¶ms, optval, len)) + goto out; + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) { + retval = -EINVAL; + goto out; + } + + params.assoc_value = asoc ? asoc->peer.auth_capable + : sctp_sk(sk)->ep->auth_enable; + + if (put_user(len, optlen)) + goto out; + + if (copy_to_user(optval, ¶ms, len)) + goto out; + + retval = 0; + +out: + return retval; +} + static int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -7951,6 +8033,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, retval = sctp_getsockopt_asconf_supported(sk, len, optval, optlen); break; + case SCTP_AUTH_SUPPORTED: + retval = sctp_getsockopt_auth_supported(sk, len, optval, + optlen); + break; default: retval = -ENOPROTOOPT; break; -- cgit v1.2.3 From 1b9ed84ecf268904d89edf2908426a8eb3b5a4ba Mon Sep 17 00:00:00 2001 From: Quentin Monnet Date: Tue, 20 Aug 2019 10:31:50 +0100 Subject: bpf: add new BPF_BTF_GET_NEXT_ID syscall command Add a new command for the bpf() system call: BPF_BTF_GET_NEXT_ID is used to cycle through all BTF objects loaded on the system. The motivation is to be able to inspect (list) all BTF objects presents on the system. Signed-off-by: Quentin Monnet Reviewed-by: Jakub Kicinski Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 3 +++ include/uapi/linux/bpf.h | 1 + kernel/bpf/btf.c | 4 ++-- kernel/bpf/syscall.c | 4 ++++ 4 files changed, 10 insertions(+), 2 deletions(-) (limited to 'include/uapi') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 15ae49862b82..5b9d22338606 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -24,6 +24,9 @@ struct seq_file; struct btf; struct btf_type; +extern struct idr btf_idr; +extern spinlock_t btf_idr_lock; + /* map is generic key/value storage optionally accesible by eBPF programs */ struct bpf_map_ops { /* funcs callable from userspace (via syscall) */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 0ef594ac3899..8aa6126f0b6e 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -106,6 +106,7 @@ enum bpf_cmd { BPF_TASK_FD_QUERY, BPF_MAP_LOOKUP_AND_DELETE_ELEM, BPF_MAP_FREEZE, + BPF_BTF_GET_NEXT_ID, }; enum bpf_map_type { diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 6b403dc18486..adb3adcebe3c 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -195,8 +195,8 @@ i < btf_type_vlen(struct_type); \ i++, member++) -static DEFINE_IDR(btf_idr); -static DEFINE_SPINLOCK(btf_idr_lock); +DEFINE_IDR(btf_idr); +DEFINE_SPINLOCK(btf_idr_lock); struct btf { void *data; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index cf8052b016e7..c0f62fd67c6b 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -2884,6 +2884,10 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz err = bpf_obj_get_next_id(&attr, uattr, &map_idr, &map_idr_lock); break; + case BPF_BTF_GET_NEXT_ID: + err = bpf_obj_get_next_id(&attr, uattr, + &btf_idr, &btf_idr_lock); + break; case BPF_PROG_GET_FD_BY_ID: err = bpf_prog_get_fd_by_id(&attr); break; -- cgit v1.2.3 From c5b9a7f826735228a38fab4a7b2707f032468c88 Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Fri, 2 Aug 2019 13:30:58 +0200 Subject: nl80211: add 6GHz band definition to enum nl80211_band In the 802.11ax specification a new band is introduced, which is also proposed by FCC for unlicensed use. This band is referred to as 6GHz spanning frequency range from 5925 to 7125 MHz. Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Leon Zegers Signed-off-by: Arend van Spriel Link: https://lore.kernel.org/r/1564745465-21234-2-git-send-email-arend.vanspriel@broadcom.com Signed-off-by: Johannes Berg --- include/uapi/linux/nl80211.h | 2 ++ net/mac80211/tx.c | 1 + 2 files changed, 3 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 822851d369ab..4d5988f47118 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -4543,6 +4543,7 @@ enum nl80211_txrate_gi { * @NL80211_BAND_2GHZ: 2.4 GHz ISM band * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 69.12 GHz) + * @NL80211_BAND_6GHZ: around 6 GHz band (5.9 - 7.2 GHz) * @NUM_NL80211_BANDS: number of bands, avoid using this in userspace * since newer kernel versions may support more bands */ @@ -4550,6 +4551,7 @@ enum nl80211_band { NL80211_BAND_2GHZ, NL80211_BAND_5GHZ, NL80211_BAND_60GHZ, + NL80211_BAND_6GHZ, NUM_NL80211_BANDS, }; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 235c6377a203..1fa422782905 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -162,6 +162,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, break; } case NL80211_BAND_5GHZ: + case NL80211_BAND_6GHZ: if (r->flags & IEEE80211_RATE_MANDATORY_A) mrate = r->bitrate; break; -- cgit v1.2.3 From 6c7a00339e2a64b068c986301f37bd31eb83d7e9 Mon Sep 17 00:00:00 2001 From: Ben Greear Date: Fri, 9 Aug 2019 11:00:00 -0700 Subject: cfg80211: Support assoc-at timer in sta-info Report timestamp of when sta became associated. This is the boottime clock, units are nano-seconds. Signed-off-by: Ben Greear Link: https://lore.kernel.org/r/20190809180001.26393-1-greearb@candelatech.com Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 2 ++ include/uapi/linux/nl80211.h | 3 +++ net/wireless/nl80211.c | 1 + 3 files changed, 6 insertions(+) (limited to 'include/uapi') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 8140c4837122..1e32b11e1730 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1330,6 +1330,7 @@ struct cfg80211_tid_stats { * indicate the relevant values in this struct for them * @connected_time: time(in secs) since a station is last connected * @inactive_time: time since last station activity (tx/rx) in milliseconds + * @assoc_at: bootime (ns) of the last association * @rx_bytes: bytes (size of MPDUs) received from this station * @tx_bytes: bytes (size of MPDUs) transmitted to this station * @llid: mesh local link id @@ -1390,6 +1391,7 @@ struct station_info { u64 filled; u32 connected_time; u32 inactive_time; + u64 assoc_at; u64 rx_bytes; u64 tx_bytes; u16 llid; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 4d5988f47118..04b58295c8d9 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3201,6 +3201,8 @@ enum nl80211_sta_bss_param { * sent to the station (u64, usec) * @NL80211_STA_INFO_AIRTIME_WEIGHT: current airtime weight for station (u16) * @NL80211_STA_INFO_AIRTIME_LINK_METRIC: airtime link metric for mesh station + * @NL80211_STA_INFO_ASSOC_AT_BOOTTIME: Timestamp (CLOCK_BOOTTIME, nanoseconds) + * of STA's association * @__NL80211_STA_INFO_AFTER_LAST: internal * @NL80211_STA_INFO_MAX: highest possible station info attribute */ @@ -3247,6 +3249,7 @@ enum nl80211_sta_info { NL80211_STA_INFO_TX_DURATION, NL80211_STA_INFO_AIRTIME_WEIGHT, NL80211_STA_INFO_AIRTIME_LINK_METRIC, + NL80211_STA_INFO_ASSOC_AT_BOOTTIME, /* keep last */ __NL80211_STA_INFO_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 9a642219a8c7..cacd96704647 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -5032,6 +5032,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 cmd, u32 portid, PUT_SINFO(CONNECTED_TIME, connected_time, u32); PUT_SINFO(INACTIVE_TIME, inactive_time, u32); + PUT_SINFO_U64(ASSOC_AT_BOOTTIME, assoc_at); if (sinfo->filled & (BIT_ULL(NL80211_STA_INFO_RX_BYTES) | BIT_ULL(NL80211_STA_INFO_RX_BYTES64)) && -- cgit v1.2.3 From 2a38075cd0beefa4da326380cf54c7b365ddc035 Mon Sep 17 00:00:00 2001 From: Alexei Avshalom Lazar Date: Sun, 18 Aug 2019 17:35:17 +0300 Subject: nl80211: Add support for EDMG channels 802.11ay specification defines Enhanced Directional Multi-Gigabit (EDMG) STA and AP which allow channel bonding of 2 channels and more. Introduce new NL attributes that are needed for enabling and configuring EDMG support. Two new attributes are used by kernel to publish driver's EDMG capabilities to the userspace: NL80211_BAND_ATTR_EDMG_CHANNELS - bitmap field that indicates the 2.16 GHz channel(s) that are supported by the driver. When this attribute is not set it means driver does not support EDMG. NL80211_BAND_ATTR_EDMG_BW_CONFIG - represent the channel bandwidth configurations supported by the driver. Additional two new attributes are used by the userspace for connect command and for AP configuration: NL80211_ATTR_WIPHY_EDMG_CHANNELS NL80211_ATTR_WIPHY_EDMG_BW_CONFIG New rate info flag - RATE_INFO_FLAGS_EDMG, can be reported from driver and used for bitrate calculation that will take into account EDMG according to the 802.11ay specification. Signed-off-by: Alexei Avshalom Lazar Link: https://lore.kernel.org/r/1566138918-3823-2-git-send-email-ailizaro@codeaurora.org Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/wil6210/cfg80211.c | 2 +- include/net/cfg80211.h | 86 ++++++++++++++- include/uapi/linux/nl80211.h | 24 +++++ net/mac80211/mlme.c | 2 +- net/mac80211/status.c | 6 +- net/wireless/chan.c | 159 ++++++++++++++++++++++++++++ net/wireless/nl80211.c | 37 +++++++ net/wireless/util.c | 42 +++++++- 8 files changed, 349 insertions(+), 9 deletions(-) (limited to 'include/uapi') diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 2fb4258941a5..2414f574bf69 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -351,7 +351,7 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid, BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) | BIT_ULL(NL80211_STA_INFO_TX_FAILED); - sinfo->txrate.flags = RATE_INFO_FLAGS_60G; + sinfo->txrate.flags = RATE_INFO_FLAGS_DMG; sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs); sinfo->rxrate.mcs = stats->last_mcs_rx; sinfo->rx_bytes = stats->rx_bytes; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 1e32b11e1730..5253e7f667bd 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -330,6 +330,60 @@ struct ieee80211_sband_iftype_data { struct ieee80211_sta_he_cap he_cap; }; +/** + * enum ieee80211_edmg_bw_config - allowed channel bandwidth configurations + * + * @IEEE80211_EDMG_BW_CONFIG_4: 2.16GHz + * @IEEE80211_EDMG_BW_CONFIG_5: 2.16GHz and 4.32GHz + * @IEEE80211_EDMG_BW_CONFIG_6: 2.16GHz, 4.32GHz and 6.48GHz + * @IEEE80211_EDMG_BW_CONFIG_7: 2.16GHz, 4.32GHz, 6.48GHz and 8.64GHz + * @IEEE80211_EDMG_BW_CONFIG_8: 2.16GHz and 2.16GHz + 2.16GHz + * @IEEE80211_EDMG_BW_CONFIG_9: 2.16GHz, 4.32GHz and 2.16GHz + 2.16GHz + * @IEEE80211_EDMG_BW_CONFIG_10: 2.16GHz, 4.32GHz, 6.48GHz and 2.16GHz+2.16GHz + * @IEEE80211_EDMG_BW_CONFIG_11: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz and + * 2.16GHz+2.16GHz + * @IEEE80211_EDMG_BW_CONFIG_12: 2.16GHz, 2.16GHz + 2.16GHz and + * 4.32GHz + 4.32GHz + * @IEEE80211_EDMG_BW_CONFIG_13: 2.16GHz, 4.32GHz, 2.16GHz + 2.16GHz and + * 4.32GHz + 4.32GHz + * @IEEE80211_EDMG_BW_CONFIG_14: 2.16GHz, 4.32GHz, 6.48GHz, 2.16GHz + 2.16GHz + * and 4.32GHz + 4.32GHz + * @IEEE80211_EDMG_BW_CONFIG_15: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz, + * 2.16GHz + 2.16GHz and 4.32GHz + 4.32GHz + */ +enum ieee80211_edmg_bw_config { + IEEE80211_EDMG_BW_CONFIG_4 = 4, + IEEE80211_EDMG_BW_CONFIG_5 = 5, + IEEE80211_EDMG_BW_CONFIG_6 = 6, + IEEE80211_EDMG_BW_CONFIG_7 = 7, + IEEE80211_EDMG_BW_CONFIG_8 = 8, + IEEE80211_EDMG_BW_CONFIG_9 = 9, + IEEE80211_EDMG_BW_CONFIG_10 = 10, + IEEE80211_EDMG_BW_CONFIG_11 = 11, + IEEE80211_EDMG_BW_CONFIG_12 = 12, + IEEE80211_EDMG_BW_CONFIG_13 = 13, + IEEE80211_EDMG_BW_CONFIG_14 = 14, + IEEE80211_EDMG_BW_CONFIG_15 = 15, +}; + +/** + * struct ieee80211_edmg - EDMG configuration + * + * This structure describes most essential parameters needed + * to describe 802.11ay EDMG configuration + * + * @channels: bitmap that indicates the 2.16 GHz channel(s) + * that are allowed to be used for transmissions. + * Bit 0 indicates channel 1, bit 1 indicates channel 2, etc. + * Set to 0 indicate EDMG not supported. + * @bw_config: Channel BW Configuration subfield encodes + * the allowed channel bandwidth configurations + */ +struct ieee80211_edmg { + u8 channels; + enum ieee80211_edmg_bw_config bw_config; +}; + /** * struct ieee80211_supported_band - frequency band definition * @@ -346,6 +400,7 @@ struct ieee80211_sband_iftype_data { * @n_bitrates: Number of bitrates in @bitrates * @ht_cap: HT capabilities in this band * @vht_cap: VHT capabilities in this band + * @edmg_cap: EDMG capabilities in this band * @n_iftype_data: number of iftype data entries * @iftype_data: interface type data entries. Note that the bits in * @types_mask inside this structure cannot overlap (i.e. only @@ -360,6 +415,7 @@ struct ieee80211_supported_band { int n_bitrates; struct ieee80211_sta_ht_cap ht_cap; struct ieee80211_sta_vht_cap vht_cap; + struct ieee80211_edmg edmg_cap; u16 n_iftype_data; const struct ieee80211_sband_iftype_data *iftype_data; }; @@ -527,12 +583,17 @@ struct key_params { * @center_freq1: center frequency of first segment * @center_freq2: center frequency of second segment * (only with 80+80 MHz) + * @edmg: define the EDMG channels configuration. + * If edmg is requested (i.e. the .channels member is non-zero), + * chan will define the primary channel and all other + * parameters are ignored. */ struct cfg80211_chan_def { struct ieee80211_channel *chan; enum nl80211_chan_width width; u32 center_freq1; u32 center_freq2; + struct ieee80211_edmg edmg; }; /** @@ -590,6 +651,19 @@ cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1, chandef1->center_freq2 == chandef2->center_freq2); } +/** + * cfg80211_chandef_is_edmg - check if chandef represents an EDMG channel + * + * @chandef: the channel definition + * + * Return: %true if EDMG defined, %false otherwise. + */ +static inline bool +cfg80211_chandef_is_edmg(const struct cfg80211_chan_def *chandef) +{ + return chandef->edmg.channels || chandef->edmg.bw_config; +} + /** * cfg80211_chandef_compatible - check if two channel definitions are compatible * @chandef1: first channel definition @@ -1177,15 +1251,17 @@ int cfg80211_check_station_change(struct wiphy *wiphy, * @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS * @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS * @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval - * @RATE_INFO_FLAGS_60G: 60GHz MCS + * @RATE_INFO_FLAGS_DMG: 60GHz MCS * @RATE_INFO_FLAGS_HE_MCS: HE MCS information + * @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode */ enum rate_info_flags { RATE_INFO_FLAGS_MCS = BIT(0), RATE_INFO_FLAGS_VHT_MCS = BIT(1), RATE_INFO_FLAGS_SHORT_GI = BIT(2), - RATE_INFO_FLAGS_60G = BIT(3), + RATE_INFO_FLAGS_DMG = BIT(3), RATE_INFO_FLAGS_HE_MCS = BIT(4), + RATE_INFO_FLAGS_EDMG = BIT(5), }; /** @@ -1225,6 +1301,7 @@ enum rate_info_bw { * @he_dcm: HE DCM value * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc, * only valid if bw is %RATE_INFO_BW_HE_RU) + * @n_bonded_ch: In case of EDMG the number of bonded channels (1-4) */ struct rate_info { u8 flags; @@ -1235,6 +1312,7 @@ struct rate_info { u8 he_gi; u8 he_dcm; u8 he_ru_alloc; + u8 n_bonded_ch; }; /** @@ -2438,6 +2516,9 @@ struct cfg80211_bss_selection { * @fils_erp_rrk_len: Length of @fils_erp_rrk in octets. * @want_1x: indicates user-space supports and wants to use 802.1X driver * offload of 4-way handshake. + * @edmg: define the EDMG channels. + * This may specify multiple channels and bonding options for the driver + * to choose from, based on BSS configuration. */ struct cfg80211_connect_params { struct ieee80211_channel *channel; @@ -2471,6 +2552,7 @@ struct cfg80211_connect_params { const u8 *fils_erp_rrk; size_t fils_erp_rrk_len; bool want_1x; + struct ieee80211_edmg edmg; }; /** diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 04b58295c8d9..bf7c4222f512 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -52,6 +52,11 @@ #define NL80211_MULTICAST_GROUP_NAN "nan" #define NL80211_MULTICAST_GROUP_TESTMODE "testmode" +#define NL80211_EDMG_BW_CONFIG_MIN 4 +#define NL80211_EDMG_BW_CONFIG_MAX 15 +#define NL80211_EDMG_CHANNELS_MIN 1 +#define NL80211_EDMG_CHANNELS_MAX 0x3c /* 0b00111100 */ + /** * DOC: Station handling * @@ -2361,6 +2366,13 @@ enum nl80211_commands { * @NL80211_ATTR_HE_OBSS_PD: nested attribute for OBSS Packet Detection * functionality. * + * @NL80211_ATTR_WIPHY_EDMG_CHANNELS: bitmap that indicates the 2.16 GHz + * channel(s) that are allowed to be used for EDMG transmissions. + * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251. (u8 attribute) + * @NL80211_ATTR_WIPHY_EDMG_BW_CONFIG: Channel BW Configuration subfield encodes + * the allowed channel bandwidth configurations. (u8 attribute) + * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -2820,6 +2832,9 @@ enum nl80211_attrs { NL80211_ATTR_HE_OBSS_PD, + NL80211_ATTR_WIPHY_EDMG_CHANNELS, + NL80211_ATTR_WIPHY_EDMG_BW_CONFIG, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -3431,6 +3446,12 @@ enum nl80211_band_iftype_attr { * @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE * @NL80211_BAND_ATTR_IFTYPE_DATA: nested array attribute, with each entry using * attributes from &enum nl80211_band_iftype_attr + * @NL80211_BAND_ATTR_EDMG_CHANNELS: bitmap that indicates the 2.16 GHz + * channel(s) that are allowed to be used for EDMG transmissions. + * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251. + * @NL80211_BAND_ATTR_EDMG_BW_CONFIG: Channel BW Configuration subfield encodes + * the allowed channel bandwidth configurations. + * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13. * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined * @__NL80211_BAND_ATTR_AFTER_LAST: internal use */ @@ -3448,6 +3469,9 @@ enum nl80211_band_attr { NL80211_BAND_ATTR_VHT_CAPA, NL80211_BAND_ATTR_IFTYPE_DATA, + NL80211_BAND_ATTR_EDMG_CHANNELS, + NL80211_BAND_ATTR_EDMG_BW_CONFIG, + /* keep last */ __NL80211_BAND_ATTR_AFTER_LAST, NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 641876982ab9..6471f552a942 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -158,10 +158,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap)); ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap); + memset(chandef, 0, sizeof(struct cfg80211_chan_def)); chandef->chan = channel; chandef->width = NL80211_CHAN_WIDTH_20_NOHT; chandef->center_freq1 = channel->center_freq; - chandef->center_freq2 = 0; if (!ht_oper || !sta_ht_cap.ht_supported) { ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; diff --git a/net/mac80211/status.c b/net/mac80211/status.c index f88f94d1f177..ab8ba5835ca0 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -262,7 +262,8 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info, /* IEEE80211_RADIOTAP_RATE rate */ if (status && status->rate && !(status->rate->flags & (RATE_INFO_FLAGS_MCS | - RATE_INFO_FLAGS_60G | + RATE_INFO_FLAGS_DMG | + RATE_INFO_FLAGS_EDMG | RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_HE_MCS))) len += 2; @@ -329,7 +330,8 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local, if (status && status->rate) { if (!(status->rate->flags & (RATE_INFO_FLAGS_MCS | - RATE_INFO_FLAGS_60G | + RATE_INFO_FLAGS_DMG | + RATE_INFO_FLAGS_EDMG | RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_HE_MCS))) legacy_rate = status->rate->legacy; diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 7c9d204838d4..e851cafd8e2f 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -14,6 +14,11 @@ #include "core.h" #include "rdev-ops.h" +static bool cfg80211_valid_60g_freq(u32 freq) +{ + return freq >= 58320 && freq <= 70200; +} + void cfg80211_chandef_create(struct cfg80211_chan_def *chandef, struct ieee80211_channel *chan, enum nl80211_channel_type chan_type) @@ -23,6 +28,8 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef, chandef->chan = chan; chandef->center_freq2 = 0; + chandef->edmg.bw_config = 0; + chandef->edmg.channels = 0; switch (chan_type) { case NL80211_CHAN_NO_HT: @@ -47,6 +54,91 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef, } EXPORT_SYMBOL(cfg80211_chandef_create); +static bool cfg80211_edmg_chandef_valid(const struct cfg80211_chan_def *chandef) +{ + int max_contiguous = 0; + int num_of_enabled = 0; + int contiguous = 0; + int i; + + if (!chandef->edmg.channels || !chandef->edmg.bw_config) + return false; + + if (!cfg80211_valid_60g_freq(chandef->chan->center_freq)) + return false; + + for (i = 0; i < 6; i++) { + if (chandef->edmg.channels & BIT(i)) { + contiguous++; + num_of_enabled++; + } else { + contiguous = 0; + } + + max_contiguous = max(contiguous, max_contiguous); + } + /* basic verification of edmg configuration according to + * IEEE P802.11ay/D4.0 section 9.4.2.251 + */ + /* check bw_config against contiguous edmg channels */ + switch (chandef->edmg.bw_config) { + case IEEE80211_EDMG_BW_CONFIG_4: + case IEEE80211_EDMG_BW_CONFIG_8: + case IEEE80211_EDMG_BW_CONFIG_12: + if (max_contiguous < 1) + return false; + break; + case IEEE80211_EDMG_BW_CONFIG_5: + case IEEE80211_EDMG_BW_CONFIG_9: + case IEEE80211_EDMG_BW_CONFIG_13: + if (max_contiguous < 2) + return false; + break; + case IEEE80211_EDMG_BW_CONFIG_6: + case IEEE80211_EDMG_BW_CONFIG_10: + case IEEE80211_EDMG_BW_CONFIG_14: + if (max_contiguous < 3) + return false; + break; + case IEEE80211_EDMG_BW_CONFIG_7: + case IEEE80211_EDMG_BW_CONFIG_11: + case IEEE80211_EDMG_BW_CONFIG_15: + if (max_contiguous < 4) + return false; + break; + + default: + return false; + } + + /* check bw_config against aggregated (non contiguous) edmg channels */ + switch (chandef->edmg.bw_config) { + case IEEE80211_EDMG_BW_CONFIG_4: + case IEEE80211_EDMG_BW_CONFIG_5: + case IEEE80211_EDMG_BW_CONFIG_6: + case IEEE80211_EDMG_BW_CONFIG_7: + break; + case IEEE80211_EDMG_BW_CONFIG_8: + case IEEE80211_EDMG_BW_CONFIG_9: + case IEEE80211_EDMG_BW_CONFIG_10: + case IEEE80211_EDMG_BW_CONFIG_11: + if (num_of_enabled < 2) + return false; + break; + case IEEE80211_EDMG_BW_CONFIG_12: + case IEEE80211_EDMG_BW_CONFIG_13: + case IEEE80211_EDMG_BW_CONFIG_14: + case IEEE80211_EDMG_BW_CONFIG_15: + if (num_of_enabled < 4 || max_contiguous < 2) + return false; + break; + default: + return false; + } + + return true; +} + bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) { u32 control_freq; @@ -112,6 +204,10 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) return false; } + if (cfg80211_chandef_is_edmg(chandef) && + !cfg80211_edmg_chandef_valid(chandef)) + return false; + return true; } EXPORT_SYMBOL(cfg80211_chandef_valid); @@ -721,12 +817,66 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy, return true; } +/* check if the operating channels are valid and supported */ +static bool cfg80211_edmg_usable(struct wiphy *wiphy, u8 edmg_channels, + enum ieee80211_edmg_bw_config edmg_bw_config, + int primary_channel, + struct ieee80211_edmg *edmg_cap) +{ + struct ieee80211_channel *chan; + int i, freq; + int channels_counter = 0; + + if (!edmg_channels && !edmg_bw_config) + return true; + + if ((!edmg_channels && edmg_bw_config) || + (edmg_channels && !edmg_bw_config)) + return false; + + if (!(edmg_channels & BIT(primary_channel - 1))) + return false; + + /* 60GHz channels 1..6 */ + for (i = 0; i < 6; i++) { + if (!(edmg_channels & BIT(i))) + continue; + + if (!(edmg_cap->channels & BIT(i))) + return false; + + channels_counter++; + + freq = ieee80211_channel_to_frequency(i + 1, + NL80211_BAND_60GHZ); + chan = ieee80211_get_channel(wiphy, freq); + if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) + return false; + } + + /* IEEE802.11 allows max 4 channels */ + if (channels_counter > 4) + return false; + + /* check bw_config is a subset of what driver supports + * (see IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13) + */ + if ((edmg_bw_config % 4) > (edmg_cap->bw_config % 4)) + return false; + + if (edmg_bw_config > edmg_cap->bw_config) + return false; + + return true; +} + bool cfg80211_chandef_usable(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, u32 prohibited_flags) { struct ieee80211_sta_ht_cap *ht_cap; struct ieee80211_sta_vht_cap *vht_cap; + struct ieee80211_edmg *edmg_cap; u32 width, control_freq, cap; if (WARN_ON(!cfg80211_chandef_valid(chandef))) @@ -734,6 +884,15 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy, ht_cap = &wiphy->bands[chandef->chan->band]->ht_cap; vht_cap = &wiphy->bands[chandef->chan->band]->vht_cap; + edmg_cap = &wiphy->bands[chandef->chan->band]->edmg_cap; + + if (edmg_cap->channels && + !cfg80211_edmg_usable(wiphy, + chandef->edmg.channels, + chandef->edmg.bw_config, + chandef->chan->hw_value, + edmg_cap)) + return false; control_freq = chandef->chan->center_freq; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index cacd96704647..4565d7385884 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -298,6 +298,13 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 }, + [NL80211_ATTR_WIPHY_EDMG_CHANNELS] = NLA_POLICY_RANGE(NLA_U8, + NL80211_EDMG_CHANNELS_MIN, + NL80211_EDMG_CHANNELS_MAX), + [NL80211_ATTR_WIPHY_EDMG_BW_CONFIG] = NLA_POLICY_RANGE(NLA_U8, + NL80211_EDMG_BW_CONFIG_MIN, + NL80211_EDMG_BW_CONFIG_MAX), + [NL80211_ATTR_CHANNEL_WIDTH] = { .type = NLA_U32 }, [NL80211_ATTR_CENTER_FREQ1] = { .type = NLA_U32 }, [NL80211_ATTR_CENTER_FREQ2] = { .type = NLA_U32 }, @@ -1574,6 +1581,15 @@ static int nl80211_send_band_rateinfo(struct sk_buff *msg, nla_nest_end(msg, nl_iftype_data); } + /* add EDMG info */ + if (sband->edmg_cap.channels && + (nla_put_u8(msg, NL80211_BAND_ATTR_EDMG_CHANNELS, + sband->edmg_cap.channels) || + nla_put_u8(msg, NL80211_BAND_ATTR_EDMG_BW_CONFIG, + sband->edmg_cap.bw_config))) + + return -ENOBUFS; + /* add bitrates */ nl_rates = nla_nest_start_noflag(msg, NL80211_BAND_ATTR_RATES); if (!nl_rates) @@ -2677,6 +2693,18 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev, nla_get_u32(attrs[NL80211_ATTR_CENTER_FREQ2]); } + if (info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]) { + chandef->edmg.channels = + nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]); + + if (info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]) + chandef->edmg.bw_config = + nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]); + } else { + chandef->edmg.bw_config = 0; + chandef->edmg.channels = 0; + } + if (!cfg80211_chandef_valid(chandef)) { NL_SET_ERR_MSG(extack, "invalid channel definition"); return -EINVAL; @@ -9894,6 +9922,15 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) return -EINVAL; } + if (info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]) { + connect.edmg.channels = + nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]); + + if (info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]) + connect.edmg.bw_config = + nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]); + } + if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) { connkeys = nl80211_parse_connkeys(rdev, info, NULL); if (IS_ERR(connkeys)) diff --git a/net/wireless/util.c b/net/wireless/util.c index 59ed0808c13e..c99939067bb0 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1043,7 +1043,7 @@ static u32 cfg80211_calculate_bitrate_ht(struct rate_info *rate) return (bitrate + 50000) / 100000; } -static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate) +static u32 cfg80211_calculate_bitrate_dmg(struct rate_info *rate) { static const u32 __mcs2bitrate[] = { /* control PHY */ @@ -1090,6 +1090,40 @@ static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate) return __mcs2bitrate[rate->mcs]; } +static u32 cfg80211_calculate_bitrate_edmg(struct rate_info *rate) +{ + static const u32 __mcs2bitrate[] = { + /* control PHY */ + [0] = 275, + /* SC PHY */ + [1] = 3850, + [2] = 7700, + [3] = 9625, + [4] = 11550, + [5] = 12512, /* 1251.25 mbps */ + [6] = 13475, + [7] = 15400, + [8] = 19250, + [9] = 23100, + [10] = 25025, + [11] = 26950, + [12] = 30800, + [13] = 38500, + [14] = 46200, + [15] = 50050, + [16] = 53900, + [17] = 57750, + [18] = 69300, + [19] = 75075, + [20] = 80850, + }; + + if (WARN_ON_ONCE(rate->mcs >= ARRAY_SIZE(__mcs2bitrate))) + return 0; + + return __mcs2bitrate[rate->mcs] * rate->n_bonded_ch; +} + static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate) { static const u32 base[4][10] = { @@ -1262,8 +1296,10 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate) { if (rate->flags & RATE_INFO_FLAGS_MCS) return cfg80211_calculate_bitrate_ht(rate); - if (rate->flags & RATE_INFO_FLAGS_60G) - return cfg80211_calculate_bitrate_60g(rate); + if (rate->flags & RATE_INFO_FLAGS_DMG) + return cfg80211_calculate_bitrate_dmg(rate); + if (rate->flags & RATE_INFO_FLAGS_EDMG) + return cfg80211_calculate_bitrate_edmg(rate); if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) return cfg80211_calculate_bitrate_vht(rate); if (rate->flags & RATE_INFO_FLAGS_HE_MCS) -- cgit v1.2.3 From 8050a395112db5bedb7caa6cb673e711a3c04cd9 Mon Sep 17 00:00:00 2001 From: Peter Wu Date: Wed, 21 Aug 2019 00:08:58 +0100 Subject: bpf: fix 'struct pt_reg' typo in documentation There is no 'struct pt_reg'. Signed-off-by: Peter Wu Reviewed-by: Quentin Monnet Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 8aa6126f0b6e..267544e140be 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1018,7 +1018,7 @@ union bpf_attr { * The realm of the route for the packet associated to *skb*, or 0 * if none was found. * - * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) + * int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) * Description * Write raw *data* blob into a special BPF perf event held by * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf @@ -1080,7 +1080,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags) + * int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags) * Description * Walk a user or a kernel stack and return its id. To achieve * this, the helper needs *ctx*, which is a pointer to the context @@ -1729,7 +1729,7 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_override_return(struct pt_reg *regs, u64 rc) + * int bpf_override_return(struct pt_regs *regs, u64 rc) * Description * Used for error injection, this helper uses kprobes to override * the return value of the probed function, and to set it to *rc*. -- cgit v1.2.3 From 55c33dfbeb831eb3ab7cc1a3e295b0d4d57f23a3 Mon Sep 17 00:00:00 2001 From: Peter Wu Date: Wed, 21 Aug 2019 00:08:59 +0100 Subject: bpf: clarify when bpf_trace_printk discards lines I opened /sys/kernel/tracing/trace once and kept reading from it. bpf_trace_printk somehow did not seem to work, no entries were appended to that trace file. It turns out that tracing is disabled when that file is open. Save the next person some time and document this. The trace file is described in Documentation/trace/ftrace.rst, however the implication "tracing is disabled" did not immediate translate to "bpf_trace_printk silently discards entries". Signed-off-by: Peter Wu Signed-off-by: Alexei Starovoitov --- include/uapi/linux/bpf.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 267544e140be..b5889257cc33 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -580,6 +580,8 @@ union bpf_attr { * limited to five). * * Each time the helper is called, it appends a line to the trace. + * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is + * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. * The format of the trace is customizable, and the exact output * one will get depends on the options set in * *\/sys/kernel/debug/tracing/trace_options* (see also the -- cgit v1.2.3 From 7542c6dedbc1caa284ca4cbd6b64f99023ff1b97 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 18 Jun 2019 12:09:26 +0900 Subject: jffs2: Remove C++ style comments from uapi header Linux kernel tolerates C++ style comments these days. Actually, the SPDX License tags for .c files start with //. On the other hand, uapi headers are written in more strict C, where the C++ comment style is forbidden. I simply dropped these lines instead of fixing the comment style. This code has been always commented out since it was added around Linux 2.4.9 (i.e. commented out for more than 17 years). 'Maybe later...' will never happen. Signed-off-by: Masahiro Yamada Acked-by: Richard Weinberger Signed-off-by: Richard Weinberger --- include/uapi/linux/jffs2.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/jffs2.h b/include/uapi/linux/jffs2.h index a18b719f49d4..784ba0b9690a 100644 --- a/include/uapi/linux/jffs2.h +++ b/include/uapi/linux/jffs2.h @@ -77,11 +77,6 @@ #define JFFS2_ACL_VERSION 0x0001 -// Maybe later... -//#define JFFS2_NODETYPE_CHECKPOINT (JFFS2_FEATURE_RWCOMPAT_DELETE | JFFS2_NODE_ACCURATE | 3) -//#define JFFS2_NODETYPE_OPTIONS (JFFS2_FEATURE_RWCOMPAT_COPY | JFFS2_NODE_ACCURATE | 4) - - #define JFFS2_INO_FLAG_PREREAD 1 /* Do read_inode() for this one at mount time, don't wait for it to happen later */ -- cgit v1.2.3 From 71e90b4654a9298f9e2375cc733d57b8bf92ce73 Mon Sep 17 00:00:00 2001 From: Daniel Rosenberg Date: Tue, 23 Jul 2019 16:05:27 -0700 Subject: fs: Reserve flag for casefolding In preparation for including the casefold feature within f2fs, elevate the EXT4_CASEFOLD_FL flag to FS_CASEFOLD_FL. Signed-off-by: Daniel Rosenberg Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- include/uapi/linux/fs.h | 1 + tools/include/uapi/linux/fs.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 59c71fa8c553..2a616aa3f686 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -311,6 +311,7 @@ struct fscrypt_key { #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ #define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */ #define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ +#define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */ #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h index 59c71fa8c553..2a616aa3f686 100644 --- a/tools/include/uapi/linux/fs.h +++ b/tools/include/uapi/linux/fs.h @@ -311,6 +311,7 @@ struct fscrypt_key { #define FS_NOCOW_FL 0x00800000 /* Do not cow file */ #define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */ #define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ +#define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */ #define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */ #define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ -- cgit v1.2.3 From bd1200b79510a68554890af2f48d92be6eb1daf8 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Fri, 23 Aug 2019 18:47:21 +0300 Subject: drop_monitor: Make timestamps y2038 safe Timestamps are currently communicated to user space as 'struct timespec', which is not considered y2038 safe since it uses a 32-bit signed value for seconds. Fix this while the API is still not part of any official kernel release by using 64-bit nanoseconds timestamps instead. Fixes: ca30707dee2b ("drop_monitor: Add packet alert mode") Fixes: 5e58109b1ea4 ("drop_monitor: Add support for packet alert mode for hardware drops") Signed-off-by: Ido Schimmel Acked-by: Neil Horman Signed-off-by: David S. Miller --- include/uapi/linux/net_dropmon.h | 2 +- net/core/drop_monitor.c | 14 ++++++-------- 2 files changed, 7 insertions(+), 9 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/net_dropmon.h b/include/uapi/linux/net_dropmon.h index 75a35dccb675..8bf79a9eb234 100644 --- a/include/uapi/linux/net_dropmon.h +++ b/include/uapi/linux/net_dropmon.h @@ -75,7 +75,7 @@ enum net_dm_attr { NET_DM_ATTR_PC, /* u64 */ NET_DM_ATTR_SYMBOL, /* string */ NET_DM_ATTR_IN_PORT, /* nested */ - NET_DM_ATTR_TIMESTAMP, /* struct timespec */ + NET_DM_ATTR_TIMESTAMP, /* u64 */ NET_DM_ATTR_PROTO, /* u16 */ NET_DM_ATTR_PAYLOAD, /* binary */ NET_DM_ATTR_PAD, diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index bfc024024aa3..cc60cc22e2db 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -552,7 +552,7 @@ static size_t net_dm_packet_report_size(size_t payload_len) /* NET_DM_ATTR_IN_PORT */ net_dm_in_port_size() + /* NET_DM_ATTR_TIMESTAMP */ - nla_total_size(sizeof(struct timespec)) + + nla_total_size(sizeof(u64)) + /* NET_DM_ATTR_ORIG_LEN */ nla_total_size(sizeof(u32)) + /* NET_DM_ATTR_PROTO */ @@ -592,7 +592,6 @@ static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb, u64 pc = (u64)(uintptr_t) NET_DM_SKB_CB(skb)->pc; char buf[NET_DM_MAX_SYMBOL_LEN]; struct nlattr *attr; - struct timespec ts; void *hdr; int rc; @@ -615,8 +614,8 @@ static int net_dm_packet_report_fill(struct sk_buff *msg, struct sk_buff *skb, if (rc) goto nla_put_failure; - if (ktime_to_timespec_cond(skb->tstamp, &ts) && - nla_put(msg, NET_DM_ATTR_TIMESTAMP, sizeof(ts), &ts)) + if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP, + ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD)) goto nla_put_failure; if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len)) @@ -716,7 +715,7 @@ net_dm_hw_packet_report_size(size_t payload_len, /* NET_DM_ATTR_IN_PORT */ net_dm_in_port_size() + /* NET_DM_ATTR_TIMESTAMP */ - nla_total_size(sizeof(struct timespec)) + + nla_total_size(sizeof(u64)) + /* NET_DM_ATTR_ORIG_LEN */ nla_total_size(sizeof(u32)) + /* NET_DM_ATTR_PROTO */ @@ -730,7 +729,6 @@ static int net_dm_hw_packet_report_fill(struct sk_buff *msg, { struct net_dm_hw_metadata *hw_metadata; struct nlattr *attr; - struct timespec ts; void *hdr; hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata; @@ -761,8 +759,8 @@ static int net_dm_hw_packet_report_fill(struct sk_buff *msg, goto nla_put_failure; } - if (ktime_to_timespec_cond(skb->tstamp, &ts) && - nla_put(msg, NET_DM_ATTR_TIMESTAMP, sizeof(ts), &ts)) + if (nla_put_u64_64bit(msg, NET_DM_ATTR_TIMESTAMP, + ktime_to_ns(skb->tstamp), NET_DM_ATTR_PAD)) goto nla_put_failure; if (nla_put_u32(msg, NET_DM_ATTR_ORIG_LEN, skb->len)) -- cgit v1.2.3 From 47e4937a4a7ca4184fd282791dfee76c6799966a Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 23 Aug 2019 05:36:59 +0800 Subject: erofs: move erofs out of staging EROFS filesystem has been merged into linux-staging for a year. EROFS is designed to be a better solution of saving extra storage space with guaranteed end-to-end performance for read-only files with the help of reduced metadata, fixed-sized output compression and decompression inplace technologies. In the past year, EROFS was greatly improved by many people as a staging driver, self-tested, betaed by a large number of our internal users, successfully applied to almost all in-service HUAWEI smartphones as the part of EMUI 9.1 and proven to be stable enough to be moved out of staging. EROFS is a self-contained filesystem driver. Although there are still some TODOs to be more generic, we have a dedicated team actively keeping on working on EROFS in order to make it better with the evolution of Linux kernel as the other in-kernel filesystems. As Pavel suggested, it's better to do as one commit since git can do moves and all histories will be saved in this way. Let's promote it from staging and enhance it more actively as a "real" part of kernel for more wider scenarios! Cc: Greg Kroah-Hartman Cc: Alexander Viro Cc: Andrew Morton Cc: Stephen Rothwell Cc: Theodore Ts'o Cc: Pavel Machek Cc: David Sterba Cc: Amir Goldstein Cc: Christoph Hellwig Cc: Darrick J . Wong Cc: Dave Chinner Cc: Jaegeuk Kim Cc: Jan Kara Cc: Richard Weinberger Cc: Linus Torvalds Cc: Chao Yu Cc: Miao Xie Cc: Li Guifu Cc: Fang Wei Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20190822213659.5501-1-hsiangkao@aol.com Signed-off-by: Greg Kroah-Hartman --- Documentation/filesystems/erofs.txt | 219 +++ MAINTAINERS | 14 +- drivers/staging/Kconfig | 2 - drivers/staging/Makefile | 1 - .../erofs/Documentation/filesystems/erofs.txt | 223 --- drivers/staging/erofs/Kconfig | 98 -- drivers/staging/erofs/Makefile | 13 - drivers/staging/erofs/TODO | 46 - drivers/staging/erofs/compress.h | 62 - drivers/staging/erofs/data.c | 425 ------ drivers/staging/erofs/decompressor.c | 360 ----- drivers/staging/erofs/dir.c | 141 -- drivers/staging/erofs/erofs_fs.h | 310 ----- drivers/staging/erofs/include/trace/events/erofs.h | 256 ---- drivers/staging/erofs/inode.c | 334 ----- drivers/staging/erofs/internal.h | 554 -------- drivers/staging/erofs/namei.c | 253 ---- drivers/staging/erofs/super.c | 671 --------- drivers/staging/erofs/tagptr.h | 110 -- drivers/staging/erofs/utils.c | 335 ----- drivers/staging/erofs/xattr.c | 705 ---------- drivers/staging/erofs/xattr.h | 94 -- drivers/staging/erofs/zdata.c | 1434 -------------------- drivers/staging/erofs/zdata.h | 195 --- drivers/staging/erofs/zmap.c | 468 ------- drivers/staging/erofs/zpvec.h | 159 --- fs/Kconfig | 1 + fs/Makefile | 1 + fs/erofs/Kconfig | 98 ++ fs/erofs/Makefile | 11 + fs/erofs/compress.h | 60 + fs/erofs/data.c | 423 ++++++ fs/erofs/decompressor.c | 358 +++++ fs/erofs/dir.c | 139 ++ fs/erofs/erofs_fs.h | 307 +++++ fs/erofs/inode.c | 332 +++++ fs/erofs/internal.h | 553 ++++++++ fs/erofs/namei.c | 251 ++++ fs/erofs/super.c | 669 +++++++++ fs/erofs/tagptr.h | 110 ++ fs/erofs/utils.c | 333 +++++ fs/erofs/xattr.c | 703 ++++++++++ fs/erofs/xattr.h | 92 ++ fs/erofs/zdata.c | 1432 +++++++++++++++++++ fs/erofs/zdata.h | 193 +++ fs/erofs/zmap.c | 466 +++++++ fs/erofs/zpvec.h | 157 +++ include/trace/events/erofs.h | 256 ++++ include/uapi/linux/magic.h | 1 + 49 files changed, 7172 insertions(+), 7256 deletions(-) create mode 100644 Documentation/filesystems/erofs.txt delete mode 100644 drivers/staging/erofs/Documentation/filesystems/erofs.txt delete mode 100644 drivers/staging/erofs/Kconfig delete mode 100644 drivers/staging/erofs/Makefile delete mode 100644 drivers/staging/erofs/TODO delete mode 100644 drivers/staging/erofs/compress.h delete mode 100644 drivers/staging/erofs/data.c delete mode 100644 drivers/staging/erofs/decompressor.c delete mode 100644 drivers/staging/erofs/dir.c delete mode 100644 drivers/staging/erofs/erofs_fs.h delete mode 100644 drivers/staging/erofs/include/trace/events/erofs.h delete mode 100644 drivers/staging/erofs/inode.c delete mode 100644 drivers/staging/erofs/internal.h delete mode 100644 drivers/staging/erofs/namei.c delete mode 100644 drivers/staging/erofs/super.c delete mode 100644 drivers/staging/erofs/tagptr.h delete mode 100644 drivers/staging/erofs/utils.c delete mode 100644 drivers/staging/erofs/xattr.c delete mode 100644 drivers/staging/erofs/xattr.h delete mode 100644 drivers/staging/erofs/zdata.c delete mode 100644 drivers/staging/erofs/zdata.h delete mode 100644 drivers/staging/erofs/zmap.c delete mode 100644 drivers/staging/erofs/zpvec.h create mode 100644 fs/erofs/Kconfig create mode 100644 fs/erofs/Makefile create mode 100644 fs/erofs/compress.h create mode 100644 fs/erofs/data.c create mode 100644 fs/erofs/decompressor.c create mode 100644 fs/erofs/dir.c create mode 100644 fs/erofs/erofs_fs.h create mode 100644 fs/erofs/inode.c create mode 100644 fs/erofs/internal.h create mode 100644 fs/erofs/namei.c create mode 100644 fs/erofs/super.c create mode 100644 fs/erofs/tagptr.h create mode 100644 fs/erofs/utils.c create mode 100644 fs/erofs/xattr.c create mode 100644 fs/erofs/xattr.h create mode 100644 fs/erofs/zdata.c create mode 100644 fs/erofs/zdata.h create mode 100644 fs/erofs/zmap.c create mode 100644 fs/erofs/zpvec.h create mode 100644 include/trace/events/erofs.h (limited to 'include/uapi') diff --git a/Documentation/filesystems/erofs.txt b/Documentation/filesystems/erofs.txt new file mode 100644 index 000000000000..38aa9126ec98 --- /dev/null +++ b/Documentation/filesystems/erofs.txt @@ -0,0 +1,219 @@ +Overview +======== + +EROFS file-system stands for Enhanced Read-Only File System. Different +from other read-only file systems, it aims to be designed for flexibility, +scalability, but be kept simple and high performance. + +It is designed as a better filesystem solution for the following scenarios: + - read-only storage media or + + - part of a fully trusted read-only solution, which means it needs to be + immutable and bit-for-bit identical to the official golden image for + their releases due to security and other considerations and + + - hope to save some extra storage space with guaranteed end-to-end performance + by using reduced metadata and transparent file compression, especially + for those embedded devices with limited memory (ex, smartphone); + +Here is the main features of EROFS: + - Little endian on-disk design; + + - Currently 4KB block size (nobh) and therefore maximum 16TB address space; + + - Metadata & data could be mixed by design; + + - 2 inode versions for different requirements: + v1 v2 + Inode metadata size: 32 bytes 64 bytes + Max file size: 4 GB 16 EB (also limited by max. vol size) + Max uids/gids: 65536 4294967296 + File creation time: no yes (64 + 32-bit timestamp) + Max hardlinks: 65536 4294967296 + Metadata reserved: 4 bytes 14 bytes + + - Support extended attributes (xattrs) as an option; + + - Support xattr inline and tail-end data inline for all files; + + - Support POSIX.1e ACLs by using xattrs; + + - Support transparent file compression as an option: + LZ4 algorithm with 4 KB fixed-output compression for high performance; + +The following git tree provides the file system user-space tools under +development (ex, formatting tool mkfs.erofs): +>> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git + +Bugs and patches are welcome, please kindly help us and send to the following +linux-erofs mailing list: +>> linux-erofs mailing list + +Mount options +============= + +fault_injection=%d Enable fault injection in all supported types with + specified injection rate. Supported injection type: + Type_Name Type_Value + FAULT_KMALLOC 0x000000001 + FAULT_READ_IO 0x000000002 +(no)user_xattr Setup Extended User Attributes. Note: xattr is enabled + by default if CONFIG_EROFS_FS_XATTR is selected. +(no)acl Setup POSIX Access Control List. Note: acl is enabled + by default if CONFIG_EROFS_FS_POSIX_ACL is selected. +cache_strategy=%s Select a strategy for cached decompression from now on: + disabled: In-place I/O decompression only; + readahead: Cache the last incomplete compressed physical + cluster for further reading. It still does + in-place I/O decompression for the rest + compressed physical clusters; + readaround: Cache the both ends of incomplete compressed + physical clusters for further reading. + It still does in-place I/O decompression + for the rest compressed physical clusters. + +Module parameters +================= +use_vmap=[0|1] Use vmap() instead of vm_map_ram() (default 0). + +On-disk details +=============== + +Summary +------- +Different from other read-only file systems, an EROFS volume is designed +to be as simple as possible: + + |-> aligned with the block size + ____________________________________________________________ + | |SB| | ... | Metadata | ... | Data | Metadata | ... | Data | + |_|__|_|_____|__________|_____|______|__________|_____|______| + 0 +1K + +All data areas should be aligned with the block size, but metadata areas +may not. All metadatas can be now observed in two different spaces (views): + 1. Inode metadata space + Each valid inode should be aligned with an inode slot, which is a fixed + value (32 bytes) and designed to be kept in line with v1 inode size. + + Each inode can be directly found with the following formula: + inode offset = meta_blkaddr * block_size + 32 * nid + + |-> aligned with 8B + |-> followed closely + + meta_blkaddr blocks |-> another slot + _____________________________________________________________________ + | ... | inode | xattrs | extents | data inline | ... | inode ... + |________|_______|(optional)|(optional)|__(optional)_|_____|__________ + |-> aligned with the inode slot size + . . + . . + . . + . . + . . + . . + .____________________________________________________|-> aligned with 4B + | xattr_ibody_header | shared xattrs | inline xattrs | + |____________________|_______________|_______________| + |-> 12 bytes <-|->x * 4 bytes<-| . + . . . + . . . + . . . + ._______________________________.______________________. + | id | id | id | id | ... | id | ent | ... | ent| ... | + |____|____|____|____|______|____|_____|_____|____|_____| + |-> aligned with 4B + |-> aligned with 4B + + Inode could be 32 or 64 bytes, which can be distinguished from a common + field which all inode versions have -- i_advise: + + __________________ __________________ + | i_advise | | i_advise | + |__________________| |__________________| + | ... | | ... | + | | | | + |__________________| 32 bytes | | + | | + |__________________| 64 bytes + + Xattrs, extents, data inline are followed by the corresponding inode with + proper alignes, and they could be optional for different data mappings, + _currently_ there are totally 3 valid data mappings supported: + + 1) flat file data without data inline (no extent); + 2) fixed-output size data compression (must have extents); + 3) flat file data with tail-end data inline (no extent); + + The size of the optional xattrs is indicated by i_xattr_count in inode + header. Large xattrs or xattrs shared by many different files can be + stored in shared xattrs metadata rather than inlined right after inode. + + 2. Shared xattrs metadata space + Shared xattrs space is similar to the above inode space, started with + a specific block indicated by xattr_blkaddr, organized one by one with + proper align. + + Each share xattr can also be directly found by the following formula: + xattr offset = xattr_blkaddr * block_size + 4 * xattr_id + + |-> aligned by 4 bytes + + xattr_blkaddr blocks |-> aligned with 4 bytes + _________________________________________________________________________ + | ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ... + |________|_____________|_____________|_____|______________|_______________ + +Directories +----------- +All directories are now organized in a compact on-disk format. Note that +each directory block is divided into index and name areas in order to support +random file lookup, and all directory entries are _strictly_ recorded in +alphabetical order in order to support improved prefix binary search +algorithm (could refer to the related source code). + + ___________________________ + / | + / ______________|________________ + / / | nameoff1 | nameoffN-1 + ____________.______________._______________v________________v__________ +| dirent | dirent | ... | dirent | filename | filename | ... | filename | +|___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____| + \ ^ + \ | * could have + \ | trailing '\0' + \________________________| nameoff0 + + Directory block + +Note that apart from the offset of the first filename, nameoff0 also indicates +the total number of directory entries in this block since it is no need to +introduce another on-disk field at all. + +Compression +----------- +Currently, EROFS supports 4KB fixed-output clustersize transparent file +compression, as illustrated below: + + |---- Variant-Length Extent ----|-------- VLE --------|----- VLE ----- + clusterofs clusterofs clusterofs + | | | logical data +_________v_______________________________v_____________________v_______________ +... | . | | . | | . | ... +____|____.________|_____________|________.____|_____________|__.__________|____ + |-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-| + size size size size size + . . . . + . . . . + . . . . + _______._____________._____________._____________._____________________ + ... | | | | ... physical data + _______|_____________|_____________|_____________|_____________________ + |-> cluster <-|-> cluster <-|-> cluster <-| + size size size + +Currently each on-disk physical cluster can contain 4KB (un)compressed data +at most. For each logical cluster, there is a corresponding on-disk index to +describe its cluster type, physical cluster address, etc. + +See "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details. + diff --git a/MAINTAINERS b/MAINTAINERS index 6847372cfab8..0f38cba2c581 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6046,6 +6046,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git F: drivers/video/fbdev/s1d13xxxfb.c F: include/video/s1d13xxxfb.h +EROFS FILE SYSTEM +M: Gao Xiang +M: Chao Yu +L: linux-erofs@lists.ozlabs.org +S: Maintained +F: fs/erofs/ + ERRSEQ ERROR TRACKING INFRASTRUCTURE M: Jeff Layton S: Maintained @@ -15229,13 +15236,6 @@ M: H Hartley Sweeten S: Odd Fixes F: drivers/staging/comedi/ -STAGING - EROFS FILE SYSTEM -M: Gao Xiang -M: Chao Yu -L: linux-erofs@lists.ozlabs.org -S: Maintained -F: drivers/staging/erofs/ - STAGING - FIELDBUS SUBSYSTEM M: Sven Van Asbroeck S: Maintained diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 7c96a01eef6c..d972ec8e71fb 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -112,8 +112,6 @@ source "drivers/staging/gasket/Kconfig" source "drivers/staging/axis-fifo/Kconfig" -source "drivers/staging/erofs/Kconfig" - source "drivers/staging/fieldbus/Kconfig" source "drivers/staging/kpc2000/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index fcaac9693b83..6018b9a4a077 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -46,7 +46,6 @@ obj-$(CONFIG_DMA_RALINK) += ralink-gdma/ obj-$(CONFIG_SOC_MT7621) += mt7621-dts/ obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ -obj-$(CONFIG_EROFS_FS) += erofs/ obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/ obj-$(CONFIG_KPC2000) += kpc2000/ obj-$(CONFIG_ISDN_CAPI) += isdn/ diff --git a/drivers/staging/erofs/Documentation/filesystems/erofs.txt b/drivers/staging/erofs/Documentation/filesystems/erofs.txt deleted file mode 100644 index 0eab600ca7ca..000000000000 --- a/drivers/staging/erofs/Documentation/filesystems/erofs.txt +++ /dev/null @@ -1,223 +0,0 @@ -Overview -======== - -EROFS file-system stands for Enhanced Read-Only File System. Different -from other read-only file systems, it aims to be designed for flexibility, -scalability, but be kept simple and high performance. - -It is designed as a better filesystem solution for the following scenarios: - - read-only storage media or - - - part of a fully trusted read-only solution, which means it needs to be - immutable and bit-for-bit identical to the official golden image for - their releases due to security and other considerations and - - - hope to save some extra storage space with guaranteed end-to-end performance - by using reduced metadata and transparent file compression, especially - for those embedded devices with limited memory (ex, smartphone); - -Here is the main features of EROFS: - - Little endian on-disk design; - - - Currently 4KB block size (nobh) and therefore maximum 16TB address space; - - - Metadata & data could be mixed by design; - - - 2 inode versions for different requirements: - v1 v2 - Inode metadata size: 32 bytes 64 bytes - Max file size: 4 GB 16 EB (also limited by max. vol size) - Max uids/gids: 65536 4294967296 - File creation time: no yes (64 + 32-bit timestamp) - Max hardlinks: 65536 4294967296 - Metadata reserved: 4 bytes 14 bytes - - - Support extended attributes (xattrs) as an option; - - - Support xattr inline and tail-end data inline for all files; - - - Support POSIX.1e ACLs by using xattrs; - - - Support transparent file compression as an option: - LZ4 algorithm with 4 KB fixed-output compression for high performance; - -The following git tree provides the file system user-space tools under -development (ex, formatting tool mkfs.erofs): ->> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git - -Bugs and patches are welcome, please kindly help us and send to the following -linux-erofs mailing list: ->> linux-erofs mailing list - -Note that EROFS is still working in progress as a Linux staging driver, -Cc the staging mailing list as well is highly recommended: ->> Linux Driver Project Developer List - -Mount options -============= - -fault_injection=%d Enable fault injection in all supported types with - specified injection rate. Supported injection type: - Type_Name Type_Value - FAULT_KMALLOC 0x000000001 - FAULT_READ_IO 0x000000002 -(no)user_xattr Setup Extended User Attributes. Note: xattr is enabled - by default if CONFIG_EROFS_FS_XATTR is selected. -(no)acl Setup POSIX Access Control List. Note: acl is enabled - by default if CONFIG_EROFS_FS_POSIX_ACL is selected. -cache_strategy=%s Select a strategy for cached decompression from now on: - disabled: In-place I/O decompression only; - readahead: Cache the last incomplete compressed physical - cluster for further reading. It still does - in-place I/O decompression for the rest - compressed physical clusters; - readaround: Cache the both ends of incomplete compressed - physical clusters for further reading. - It still does in-place I/O decompression - for the rest compressed physical clusters. - -Module parameters -================= -use_vmap=[0|1] Use vmap() instead of vm_map_ram() (default 0). - -On-disk details -=============== - -Summary -------- -Different from other read-only file systems, an EROFS volume is designed -to be as simple as possible: - - |-> aligned with the block size - ____________________________________________________________ - | |SB| | ... | Metadata | ... | Data | Metadata | ... | Data | - |_|__|_|_____|__________|_____|______|__________|_____|______| - 0 +1K - -All data areas should be aligned with the block size, but metadata areas -may not. All metadatas can be now observed in two different spaces (views): - 1. Inode metadata space - Each valid inode should be aligned with an inode slot, which is a fixed - value (32 bytes) and designed to be kept in line with v1 inode size. - - Each inode can be directly found with the following formula: - inode offset = meta_blkaddr * block_size + 32 * nid - - |-> aligned with 8B - |-> followed closely - + meta_blkaddr blocks |-> another slot - _____________________________________________________________________ - | ... | inode | xattrs | extents | data inline | ... | inode ... - |________|_______|(optional)|(optional)|__(optional)_|_____|__________ - |-> aligned with the inode slot size - . . - . . - . . - . . - . . - . . - .____________________________________________________|-> aligned with 4B - | xattr_ibody_header | shared xattrs | inline xattrs | - |____________________|_______________|_______________| - |-> 12 bytes <-|->x * 4 bytes<-| . - . . . - . . . - . . . - ._______________________________.______________________. - | id | id | id | id | ... | id | ent | ... | ent| ... | - |____|____|____|____|______|____|_____|_____|____|_____| - |-> aligned with 4B - |-> aligned with 4B - - Inode could be 32 or 64 bytes, which can be distinguished from a common - field which all inode versions have -- i_advise: - - __________________ __________________ - | i_advise | | i_advise | - |__________________| |__________________| - | ... | | ... | - | | | | - |__________________| 32 bytes | | - | | - |__________________| 64 bytes - - Xattrs, extents, data inline are followed by the corresponding inode with - proper alignes, and they could be optional for different data mappings, - _currently_ there are totally 3 valid data mappings supported: - - 1) flat file data without data inline (no extent); - 2) fixed-output size data compression (must have extents); - 3) flat file data with tail-end data inline (no extent); - - The size of the optional xattrs is indicated by i_xattr_count in inode - header. Large xattrs or xattrs shared by many different files can be - stored in shared xattrs metadata rather than inlined right after inode. - - 2. Shared xattrs metadata space - Shared xattrs space is similar to the above inode space, started with - a specific block indicated by xattr_blkaddr, organized one by one with - proper align. - - Each share xattr can also be directly found by the following formula: - xattr offset = xattr_blkaddr * block_size + 4 * xattr_id - - |-> aligned by 4 bytes - + xattr_blkaddr blocks |-> aligned with 4 bytes - _________________________________________________________________________ - | ... | xattr_entry | xattr data | ... | xattr_entry | xattr data ... - |________|_____________|_____________|_____|______________|_______________ - -Directories ------------ -All directories are now organized in a compact on-disk format. Note that -each directory block is divided into index and name areas in order to support -random file lookup, and all directory entries are _strictly_ recorded in -alphabetical order in order to support improved prefix binary search -algorithm (could refer to the related source code). - - ___________________________ - / | - / ______________|________________ - / / | nameoff1 | nameoffN-1 - ____________.______________._______________v________________v__________ -| dirent | dirent | ... | dirent | filename | filename | ... | filename | -|___.0___|____1___|_____|___N-1__|____0_____|____1_____|_____|___N-1____| - \ ^ - \ | * could have - \ | trailing '\0' - \________________________| nameoff0 - - Directory block - -Note that apart from the offset of the first filename, nameoff0 also indicates -the total number of directory entries in this block since it is no need to -introduce another on-disk field at all. - -Compression ------------ -Currently, EROFS supports 4KB fixed-output clustersize transparent file -compression, as illustrated below: - - |---- Variant-Length Extent ----|-------- VLE --------|----- VLE ----- - clusterofs clusterofs clusterofs - | | | logical data -_________v_______________________________v_____________________v_______________ -... | . | | . | | . | ... -____|____.________|_____________|________.____|_____________|__.__________|____ - |-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-|-> cluster <-| - size size size size size - . . . . - . . . . - . . . . - _______._____________._____________._____________._____________________ - ... | | | | ... physical data - _______|_____________|_____________|_____________|_____________________ - |-> cluster <-|-> cluster <-|-> cluster <-| - size size size - -Currently each on-disk physical cluster can contain 4KB (un)compressed data -at most. For each logical cluster, there is a corresponding on-disk index to -describe its cluster type, physical cluster address, etc. - -See "struct z_erofs_vle_decompressed_index" in erofs_fs.h for more details. - diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig deleted file mode 100644 index 16316d1adca3..000000000000 --- a/drivers/staging/erofs/Kconfig +++ /dev/null @@ -1,98 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -config EROFS_FS - tristate "EROFS filesystem support" - depends on BLOCK - help - EROFS (Enhanced Read-Only File System) is a lightweight - read-only file system with modern designs (eg. page-sized - blocks, inline xattrs/data, etc.) for scenarios which need - high-performance read-only requirements, e.g. Android OS - for mobile phones and LIVECDs. - - It also provides fixed-sized output compression support, - which improves storage density, keeps relatively higher - compression ratios, which is more useful to achieve high - performance for embedded devices with limited memory. - - If unsure, say N. - -config EROFS_FS_DEBUG - bool "EROFS debugging feature" - depends on EROFS_FS - help - Print debugging messages and enable more BUG_ONs which check - filesystem consistency and find potential issues aggressively, - which can be used for Android eng build, for example. - - For daily use, say N. - -config EROFS_FAULT_INJECTION - bool "EROFS fault injection facility" - depends on EROFS_FS - help - Test EROFS to inject faults such as ENOMEM, EIO, and so on. - If unsure, say N. - -config EROFS_FS_XATTR - bool "EROFS extended attributes" - depends on EROFS_FS - default y - help - Extended attributes are name:value pairs associated with inodes by - the kernel or by users (see the attr(5) manual page, or visit - for details). - - If unsure, say N. - -config EROFS_FS_POSIX_ACL - bool "EROFS Access Control Lists" - depends on EROFS_FS_XATTR - select FS_POSIX_ACL - default y - help - Posix Access Control Lists (ACLs) support permissions for users and - groups beyond the owner/group/world scheme. - - To learn more about Access Control Lists, visit the POSIX ACLs for - Linux website . - - If you don't know what Access Control Lists are, say N. - -config EROFS_FS_SECURITY - bool "EROFS Security Labels" - depends on EROFS_FS_XATTR - default y - help - Security labels provide an access control facility to support Linux - Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO - Linux. This option enables an extended attribute handler for file - security labels in the erofs filesystem, so that it requires enabling - the extended attribute support in advance. - - If you are not using a security module, say N. - -config EROFS_FS_ZIP - bool "EROFS Data Compression Support" - depends on EROFS_FS - select LZ4_DECOMPRESS - default y - help - Enable fixed-sized output compression for EROFS. - - If you don't want to enable compression feature, say N. - -config EROFS_FS_CLUSTER_PAGE_LIMIT - int "EROFS Cluster Pages Hard Limit" - depends on EROFS_FS_ZIP - range 1 256 - default "1" - help - Indicates maximum # of pages of a compressed - physical cluster. - - For example, if files in a image were compressed - into 8k-unit, hard limit should not be configured - less than 2. Otherwise, the image will be refused - to mount on this kernel. - diff --git a/drivers/staging/erofs/Makefile b/drivers/staging/erofs/Makefile deleted file mode 100644 index 5cdae21cb5af..000000000000 --- a/drivers/staging/erofs/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -EROFS_VERSION = "1.0pre1" - -ccflags-y += -DEROFS_VERSION=\"$(EROFS_VERSION)\" - -obj-$(CONFIG_EROFS_FS) += erofs.o -# staging requirement: to be self-contained in its own directory -ccflags-y += -I $(srctree)/$(src)/include -erofs-objs := super.o inode.o data.o namei.o dir.o utils.o -erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o -erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o - diff --git a/drivers/staging/erofs/TODO b/drivers/staging/erofs/TODO deleted file mode 100644 index a8608b2f72bd..000000000000 --- a/drivers/staging/erofs/TODO +++ /dev/null @@ -1,46 +0,0 @@ - -EROFS is still working in progress, thus it is not suitable -for all productive uses. play at your own risk :) - -TODO List: - - add the missing error handling code - (mainly existed in xattr and decompression submodules); - - - finalize erofs ondisk format design (which means that - minor on-disk revisions could happen later); - - - documentation and detailed technical analysis; - - - general code review and clean up - (including confusing variable names and code snippets); - - - support larger compressed clustersizes for selection - (currently erofs only works as expected with the page-sized - compressed cluster configuration, usually 4KB); - - - support more lossless data compression algorithms - in addition to LZ4 algorithms in VLE approach; - - - data deduplication and other useful features. - -The following git tree provides the file system user-space -tools under development (ex, formatting tool mkfs.erofs): ->> git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git - -The open-source development of erofs-utils is at the early stage. -Contact the original author Li Guifu and -the co-maintainer Fang Wei for the latest news -and more details. - -Code, suggestions, etc, are welcome. Please feel free to -ask and send patches, - -To: - linux-erofs mailing list - Gao Xiang - Chao Yu - -Cc: (for linux-kernel upstream patches) - Greg Kroah-Hartman - linux-staging mailing list - diff --git a/drivers/staging/erofs/compress.h b/drivers/staging/erofs/compress.h deleted file mode 100644 index 043013f9ef1b..000000000000 --- a/drivers/staging/erofs/compress.h +++ /dev/null @@ -1,62 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/compress.h - * - * Copyright (C) 2019 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_FS_COMPRESS_H -#define __EROFS_FS_COMPRESS_H - -#include "internal.h" - -enum { - Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX, - Z_EROFS_COMPRESSION_RUNTIME_MAX -}; - -struct z_erofs_decompress_req { - struct super_block *sb; - struct page **in, **out; - - unsigned short pageofs_out; - unsigned int inputsize, outputsize; - - /* indicate the algorithm will be used for decompression */ - unsigned int alg; - bool inplace_io, partial_decoding; -}; - -/* - * - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) - - * used to mark temporary allocated pages from other - * file/cached pages and NULL mapping pages. - */ -#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D) - -/* check if a page is marked as staging */ -static inline bool z_erofs_page_is_staging(struct page *page) -{ - return page->mapping == Z_EROFS_MAPPING_STAGING; -} - -static inline bool z_erofs_put_stagingpage(struct list_head *pagepool, - struct page *page) -{ - if (!z_erofs_page_is_staging(page)) - return false; - - /* staging pages should not be used by others at the same time */ - if (page_ref_count(page) > 1) - put_page(page); - else - list_add(&page->lru, pagepool); - return true; -} - -int z_erofs_decompress(struct z_erofs_decompress_req *rq, - struct list_head *pagepool); - -#endif - diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c deleted file mode 100644 index 72c4b4c5296b..000000000000 --- a/drivers/staging/erofs/data.c +++ /dev/null @@ -1,425 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/data.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "internal.h" -#include - -#include - -static inline void read_endio(struct bio *bio) -{ - struct super_block *const sb = bio->bi_private; - struct bio_vec *bvec; - blk_status_t err = bio->bi_status; - struct bvec_iter_all iter_all; - - if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) { - erofs_show_injection_info(FAULT_READ_IO); - err = BLK_STS_IOERR; - } - - bio_for_each_segment_all(bvec, bio, iter_all) { - struct page *page = bvec->bv_page; - - /* page is already locked */ - DBG_BUGON(PageUptodate(page)); - - if (unlikely(err)) - SetPageError(page); - else - SetPageUptodate(page); - - unlock_page(page); - /* page could be reclaimed now */ - } - bio_put(bio); -} - -/* prio -- true is used for dir */ -struct page *__erofs_get_meta_page(struct super_block *sb, - erofs_blk_t blkaddr, bool prio, bool nofail) -{ - struct inode *const bd_inode = sb->s_bdev->bd_inode; - struct address_space *const mapping = bd_inode->i_mapping; - /* prefer retrying in the allocator to blindly looping below */ - const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) | - (nofail ? __GFP_NOFAIL : 0); - unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0; - struct page *page; - int err; - -repeat: - page = find_or_create_page(mapping, blkaddr, gfp); - if (unlikely(!page)) { - DBG_BUGON(nofail); - return ERR_PTR(-ENOMEM); - } - DBG_BUGON(!PageLocked(page)); - - if (!PageUptodate(page)) { - struct bio *bio; - - bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail); - if (IS_ERR(bio)) { - DBG_BUGON(nofail); - err = PTR_ERR(bio); - goto err_out; - } - - err = bio_add_page(bio, page, PAGE_SIZE, 0); - if (unlikely(err != PAGE_SIZE)) { - err = -EFAULT; - goto err_out; - } - - __submit_bio(bio, REQ_OP_READ, - REQ_META | (prio ? REQ_PRIO : 0)); - - lock_page(page); - - /* this page has been truncated by others */ - if (unlikely(page->mapping != mapping)) { -unlock_repeat: - unlock_page(page); - put_page(page); - goto repeat; - } - - /* more likely a read error */ - if (unlikely(!PageUptodate(page))) { - if (io_retries) { - --io_retries; - goto unlock_repeat; - } - err = -EIO; - goto err_out; - } - } - return page; - -err_out: - unlock_page(page); - put_page(page); - return ERR_PTR(err); -} - -static int erofs_map_blocks_flatmode(struct inode *inode, - struct erofs_map_blocks *map, - int flags) -{ - int err = 0; - erofs_blk_t nblocks, lastblk; - u64 offset = map->m_la; - struct erofs_vnode *vi = EROFS_V(inode); - - trace_erofs_map_blocks_flatmode_enter(inode, map, flags); - - nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); - lastblk = nblocks - is_inode_flat_inline(inode); - - if (unlikely(offset >= inode->i_size)) { - /* leave out-of-bound access unmapped */ - map->m_flags = 0; - map->m_plen = 0; - goto out; - } - - /* there is no hole in flatmode */ - map->m_flags = EROFS_MAP_MAPPED; - - if (offset < blknr_to_addr(lastblk)) { - map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; - map->m_plen = blknr_to_addr(lastblk) - offset; - } else if (is_inode_flat_inline(inode)) { - /* 2 - inode inline B: inode, [xattrs], inline last blk... */ - struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - - map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize + - vi->xattr_isize + erofs_blkoff(map->m_la); - map->m_plen = inode->i_size - offset; - - /* inline data should be located in one meta block */ - if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { - errln("inline data cross block boundary @ nid %llu", - vi->nid); - DBG_BUGON(1); - err = -EFSCORRUPTED; - goto err_out; - } - - map->m_flags |= EROFS_MAP_META; - } else { - errln("internal error @ nid: %llu (size %llu), m_la 0x%llx", - vi->nid, inode->i_size, map->m_la); - DBG_BUGON(1); - err = -EIO; - goto err_out; - } - -out: - map->m_llen = map->m_plen; - -err_out: - trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0); - return err; -} - -int erofs_map_blocks(struct inode *inode, - struct erofs_map_blocks *map, int flags) -{ - if (unlikely(is_inode_layout_compression(inode))) { - int err = z_erofs_map_blocks_iter(inode, map, flags); - - if (map->mpage) { - put_page(map->mpage); - map->mpage = NULL; - } - return err; - } - return erofs_map_blocks_flatmode(inode, map, flags); -} - -static inline struct bio *erofs_read_raw_page(struct bio *bio, - struct address_space *mapping, - struct page *page, - erofs_off_t *last_block, - unsigned int nblocks, - bool ra) -{ - struct inode *const inode = mapping->host; - struct super_block *const sb = inode->i_sb; - erofs_off_t current_block = (erofs_off_t)page->index; - int err; - - DBG_BUGON(!nblocks); - - if (PageUptodate(page)) { - err = 0; - goto has_updated; - } - - /* note that for readpage case, bio also equals to NULL */ - if (bio && - /* not continuous */ - *last_block + 1 != current_block) { -submit_bio_retry: - __submit_bio(bio, REQ_OP_READ, 0); - bio = NULL; - } - - if (!bio) { - struct erofs_map_blocks map = { - .m_la = blknr_to_addr(current_block), - }; - erofs_blk_t blknr; - unsigned int blkoff; - - err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); - if (unlikely(err)) - goto err_out; - - /* zero out the holed page */ - if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) { - zero_user_segment(page, 0, PAGE_SIZE); - SetPageUptodate(page); - - /* imply err = 0, see erofs_map_blocks */ - goto has_updated; - } - - /* for RAW access mode, m_plen must be equal to m_llen */ - DBG_BUGON(map.m_plen != map.m_llen); - - blknr = erofs_blknr(map.m_pa); - blkoff = erofs_blkoff(map.m_pa); - - /* deal with inline page */ - if (map.m_flags & EROFS_MAP_META) { - void *vsrc, *vto; - struct page *ipage; - - DBG_BUGON(map.m_plen > PAGE_SIZE); - - ipage = erofs_get_meta_page(inode->i_sb, blknr, 0); - - if (IS_ERR(ipage)) { - err = PTR_ERR(ipage); - goto err_out; - } - - vsrc = kmap_atomic(ipage); - vto = kmap_atomic(page); - memcpy(vto, vsrc + blkoff, map.m_plen); - memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen); - kunmap_atomic(vto); - kunmap_atomic(vsrc); - flush_dcache_page(page); - - SetPageUptodate(page); - /* TODO: could we unlock the page earlier? */ - unlock_page(ipage); - put_page(ipage); - - /* imply err = 0, see erofs_map_blocks */ - goto has_updated; - } - - /* pa must be block-aligned for raw reading */ - DBG_BUGON(erofs_blkoff(map.m_pa)); - - /* max # of continuous pages */ - if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE)) - nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE); - if (nblocks > BIO_MAX_PAGES) - nblocks = BIO_MAX_PAGES; - - bio = erofs_grab_bio(sb, blknr, nblocks, sb, - read_endio, false); - if (IS_ERR(bio)) { - err = PTR_ERR(bio); - bio = NULL; - goto err_out; - } - } - - err = bio_add_page(bio, page, PAGE_SIZE, 0); - /* out of the extent or bio is full */ - if (err < PAGE_SIZE) - goto submit_bio_retry; - - *last_block = current_block; - - /* shift in advance in case of it followed by too many gaps */ - if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) { - /* err should reassign to 0 after submitting */ - err = 0; - goto submit_bio_out; - } - - return bio; - -err_out: - /* for sync reading, set page error immediately */ - if (!ra) { - SetPageError(page); - ClearPageUptodate(page); - } -has_updated: - unlock_page(page); - - /* if updated manually, continuous pages has a gap */ - if (bio) -submit_bio_out: - __submit_bio(bio, REQ_OP_READ, 0); - - return unlikely(err) ? ERR_PTR(err) : NULL; -} - -/* - * since we dont have write or truncate flows, so no inode - * locking needs to be held at the moment. - */ -static int erofs_raw_access_readpage(struct file *file, struct page *page) -{ - erofs_off_t last_block; - struct bio *bio; - - trace_erofs_readpage(page, true); - - bio = erofs_read_raw_page(NULL, page->mapping, - page, &last_block, 1, false); - - if (IS_ERR(bio)) - return PTR_ERR(bio); - - DBG_BUGON(bio); /* since we have only one bio -- must be NULL */ - return 0; -} - -static int erofs_raw_access_readpages(struct file *filp, - struct address_space *mapping, - struct list_head *pages, - unsigned int nr_pages) -{ - erofs_off_t last_block; - struct bio *bio = NULL; - gfp_t gfp = readahead_gfp_mask(mapping); - struct page *page = list_last_entry(pages, struct page, lru); - - trace_erofs_readpages(mapping->host, page, nr_pages, true); - - for (; nr_pages; --nr_pages) { - page = list_entry(pages->prev, struct page, lru); - - prefetchw(&page->flags); - list_del(&page->lru); - - if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) { - bio = erofs_read_raw_page(bio, mapping, page, - &last_block, nr_pages, true); - - /* all the page errors are ignored when readahead */ - if (IS_ERR(bio)) { - pr_err("%s, readahead error at page %lu of nid %llu\n", - __func__, page->index, - EROFS_V(mapping->host)->nid); - - bio = NULL; - } - } - - /* pages could still be locked */ - put_page(page); - } - DBG_BUGON(!list_empty(pages)); - - /* the rare case (end in gaps) */ - if (unlikely(bio)) - __submit_bio(bio, REQ_OP_READ, 0); - return 0; -} - -static int erofs_get_block(struct inode *inode, sector_t iblock, - struct buffer_head *bh, int create) -{ - struct erofs_map_blocks map = { - .m_la = iblock << 9, - }; - int err; - - err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); - if (err) - return err; - - if (map.m_flags & EROFS_MAP_MAPPED) - bh->b_blocknr = erofs_blknr(map.m_pa); - - return err; -} - -static sector_t erofs_bmap(struct address_space *mapping, sector_t block) -{ - struct inode *inode = mapping->host; - - if (is_inode_flat_inline(inode)) { - erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE; - - if (block >> LOG_SECTORS_PER_BLOCK >= blks) - return 0; - } - - return generic_block_bmap(mapping, block, erofs_get_block); -} - -/* for uncompressed (aligned) files and raw access for other files */ -const struct address_space_operations erofs_raw_access_aops = { - .readpage = erofs_raw_access_readpage, - .readpages = erofs_raw_access_readpages, - .bmap = erofs_bmap, -}; - diff --git a/drivers/staging/erofs/decompressor.c b/drivers/staging/erofs/decompressor.c deleted file mode 100644 index 32a811ac704a..000000000000 --- a/drivers/staging/erofs/decompressor.c +++ /dev/null @@ -1,360 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/decompressor.c - * - * Copyright (C) 2019 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "compress.h" -#include -#include - -#ifndef LZ4_DISTANCE_MAX /* history window size */ -#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ -#endif - -#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) -#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN -#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) -#endif - -struct z_erofs_decompressor { - /* - * if destpages have sparsed pages, fill them with bounce pages. - * it also check whether destpages indicate continuous physical memory. - */ - int (*prepare_destpages)(struct z_erofs_decompress_req *rq, - struct list_head *pagepool); - int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out); - char *name; -}; - -static bool use_vmap; -module_param(use_vmap, bool, 0444); -MODULE_PARM_DESC(use_vmap, "Use vmap() instead of vm_map_ram() (default 0)"); - -static int lz4_prepare_destpages(struct z_erofs_decompress_req *rq, - struct list_head *pagepool) -{ - const unsigned int nr = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; - unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, - BITS_PER_LONG)] = { 0 }; - void *kaddr = NULL; - unsigned int i, j, top; - - top = 0; - for (i = j = 0; i < nr; ++i, ++j) { - struct page *const page = rq->out[i]; - struct page *victim; - - if (j >= LZ4_MAX_DISTANCE_PAGES) - j = 0; - - /* 'valid' bounced can only be tested after a complete round */ - if (test_bit(j, bounced)) { - DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES); - DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES); - availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES]; - } - - if (page) { - __clear_bit(j, bounced); - if (kaddr) { - if (kaddr + PAGE_SIZE == page_address(page)) - kaddr += PAGE_SIZE; - else - kaddr = NULL; - } else if (!i) { - kaddr = page_address(page); - } - continue; - } - kaddr = NULL; - __set_bit(j, bounced); - - if (top) { - victim = availables[--top]; - get_page(victim); - } else { - victim = erofs_allocpage(pagepool, GFP_KERNEL, false); - if (unlikely(!victim)) - return -ENOMEM; - victim->mapping = Z_EROFS_MAPPING_STAGING; - } - rq->out[i] = victim; - } - return kaddr ? 1 : 0; -} - -static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, - u8 *src, unsigned int pageofs_in) -{ - /* - * if in-place decompression is ongoing, those decompressed - * pages should be copied in order to avoid being overlapped. - */ - struct page **in = rq->in; - u8 *const tmp = erofs_get_pcpubuf(0); - u8 *tmpp = tmp; - unsigned int inlen = rq->inputsize - pageofs_in; - unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in); - - while (tmpp < tmp + inlen) { - if (!src) - src = kmap_atomic(*in); - memcpy(tmpp, src + pageofs_in, count); - kunmap_atomic(src); - src = NULL; - tmpp += count; - pageofs_in = 0; - count = PAGE_SIZE; - ++in; - } - return tmp; -} - -static int lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) -{ - unsigned int inputmargin, inlen; - u8 *src; - bool copied, support_0padding; - int ret; - - if (rq->inputsize > PAGE_SIZE) - return -EOPNOTSUPP; - - src = kmap_atomic(*rq->in); - inputmargin = 0; - support_0padding = false; - - /* decompression inplace is only safe when 0padding is enabled */ - if (EROFS_SB(rq->sb)->requirements & EROFS_REQUIREMENT_LZ4_0PADDING) { - support_0padding = true; - - while (!src[inputmargin & ~PAGE_MASK]) - if (!(++inputmargin & ~PAGE_MASK)) - break; - - if (inputmargin >= rq->inputsize) { - kunmap_atomic(src); - return -EIO; - } - } - - copied = false; - inlen = rq->inputsize - inputmargin; - if (rq->inplace_io) { - const uint oend = (rq->pageofs_out + - rq->outputsize) & ~PAGE_MASK; - const uint nr = PAGE_ALIGN(rq->pageofs_out + - rq->outputsize) >> PAGE_SHIFT; - - if (rq->partial_decoding || !support_0padding || - rq->out[nr - 1] != rq->in[0] || - rq->inputsize - oend < - LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) { - src = generic_copy_inplace_data(rq, src, inputmargin); - inputmargin = 0; - copied = true; - } - } - - ret = LZ4_decompress_safe_partial(src + inputmargin, out, - inlen, rq->outputsize, - rq->outputsize); - if (ret < 0) { - errln("%s, failed to decompress, in[%p, %u, %u] out[%p, %u]", - __func__, src + inputmargin, inlen, inputmargin, - out, rq->outputsize); - WARN_ON(1); - print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, - 16, 1, src + inputmargin, inlen, true); - print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, - 16, 1, out, rq->outputsize, true); - ret = -EIO; - } - - if (copied) - erofs_put_pcpubuf(src); - else - kunmap_atomic(src); - return ret; -} - -static struct z_erofs_decompressor decompressors[] = { - [Z_EROFS_COMPRESSION_SHIFTED] = { - .name = "shifted" - }, - [Z_EROFS_COMPRESSION_LZ4] = { - .prepare_destpages = lz4_prepare_destpages, - .decompress = lz4_decompress, - .name = "lz4" - }, -}; - -static void copy_from_pcpubuf(struct page **out, const char *dst, - unsigned short pageofs_out, - unsigned int outputsize) -{ - const char *end = dst + outputsize; - const unsigned int righthalf = PAGE_SIZE - pageofs_out; - const char *cur = dst - pageofs_out; - - while (cur < end) { - struct page *const page = *out++; - - if (page) { - char *buf = kmap_atomic(page); - - if (cur >= dst) { - memcpy(buf, cur, min_t(uint, PAGE_SIZE, - end - cur)); - } else { - memcpy(buf + pageofs_out, cur + pageofs_out, - min_t(uint, righthalf, end - cur)); - } - kunmap_atomic(buf); - } - cur += PAGE_SIZE; - } -} - -static void *erofs_vmap(struct page **pages, unsigned int count) -{ - int i = 0; - - if (use_vmap) - return vmap(pages, count, VM_MAP, PAGE_KERNEL); - - while (1) { - void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL); - - /* retry two more times (totally 3 times) */ - if (addr || ++i >= 3) - return addr; - vm_unmap_aliases(); - } - return NULL; -} - -static void erofs_vunmap(const void *mem, unsigned int count) -{ - if (!use_vmap) - vm_unmap_ram(mem, count); - else - vunmap(mem); -} - -static int decompress_generic(struct z_erofs_decompress_req *rq, - struct list_head *pagepool) -{ - const unsigned int nrpages_out = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const struct z_erofs_decompressor *alg = decompressors + rq->alg; - unsigned int dst_maptype; - void *dst; - int ret; - - if (nrpages_out == 1 && !rq->inplace_io) { - DBG_BUGON(!*rq->out); - dst = kmap_atomic(*rq->out); - dst_maptype = 0; - goto dstmap_out; - } - - /* - * For the case of small output size (especially much less - * than PAGE_SIZE), memcpy the decompressed data rather than - * compressed data is preferred. - */ - if (rq->outputsize <= PAGE_SIZE * 7 / 8) { - dst = erofs_get_pcpubuf(0); - if (IS_ERR(dst)) - return PTR_ERR(dst); - - rq->inplace_io = false; - ret = alg->decompress(rq, dst); - if (!ret) - copy_from_pcpubuf(rq->out, dst, rq->pageofs_out, - rq->outputsize); - - erofs_put_pcpubuf(dst); - return ret; - } - - ret = alg->prepare_destpages(rq, pagepool); - if (ret < 0) { - return ret; - } else if (ret) { - dst = page_address(*rq->out); - dst_maptype = 1; - goto dstmap_out; - } - - dst = erofs_vmap(rq->out, nrpages_out); - if (!dst) - return -ENOMEM; - dst_maptype = 2; - -dstmap_out: - ret = alg->decompress(rq, dst + rq->pageofs_out); - - if (!dst_maptype) - kunmap_atomic(dst); - else if (dst_maptype == 2) - erofs_vunmap(dst, nrpages_out); - return ret; -} - -static int shifted_decompress(const struct z_erofs_decompress_req *rq, - struct list_head *pagepool) -{ - const unsigned int nrpages_out = - PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out; - unsigned char *src, *dst; - - if (nrpages_out > 2) { - DBG_BUGON(1); - return -EIO; - } - - if (rq->out[0] == *rq->in) { - DBG_BUGON(nrpages_out != 1); - return 0; - } - - src = kmap_atomic(*rq->in); - if (!rq->out[0]) { - dst = NULL; - } else { - dst = kmap_atomic(rq->out[0]); - memcpy(dst + rq->pageofs_out, src, righthalf); - } - - if (rq->out[1] == *rq->in) { - memmove(src, src + righthalf, rq->pageofs_out); - } else if (nrpages_out == 2) { - if (dst) - kunmap_atomic(dst); - DBG_BUGON(!rq->out[1]); - dst = kmap_atomic(rq->out[1]); - memcpy(dst, src + righthalf, rq->pageofs_out); - } - if (dst) - kunmap_atomic(dst); - kunmap_atomic(src); - return 0; -} - -int z_erofs_decompress(struct z_erofs_decompress_req *rq, - struct list_head *pagepool) -{ - if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED) - return shifted_decompress(rq, pagepool); - return decompress_generic(rq, pagepool); -} - diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c deleted file mode 100644 index 77ef856df9f3..000000000000 --- a/drivers/staging/erofs/dir.c +++ /dev/null @@ -1,141 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/dir.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "internal.h" - -static void debug_one_dentry(unsigned char d_type, const char *de_name, - unsigned int de_namelen) -{ -#ifdef CONFIG_EROFS_FS_DEBUG - /* since the on-disk name could not have the trailing '\0' */ - unsigned char dbg_namebuf[EROFS_NAME_LEN + 1]; - - memcpy(dbg_namebuf, de_name, de_namelen); - dbg_namebuf[de_namelen] = '\0'; - - debugln("found dirent %s de_len %u d_type %d", dbg_namebuf, - de_namelen, d_type); -#endif -} - -static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, - void *dentry_blk, unsigned int *ofs, - unsigned int nameoff, unsigned int maxsize) -{ - struct erofs_dirent *de = dentry_blk + *ofs; - const struct erofs_dirent *end = dentry_blk + nameoff; - - while (de < end) { - const char *de_name; - unsigned int de_namelen; - unsigned char d_type; - - d_type = fs_ftype_to_dtype(de->file_type); - - nameoff = le16_to_cpu(de->nameoff); - de_name = (char *)dentry_blk + nameoff; - - /* the last dirent in the block? */ - if (de + 1 >= end) - de_namelen = strnlen(de_name, maxsize - nameoff); - else - de_namelen = le16_to_cpu(de[1].nameoff) - nameoff; - - /* a corrupted entry is found */ - if (unlikely(nameoff + de_namelen > maxsize || - de_namelen > EROFS_NAME_LEN)) { - errln("bogus dirent @ nid %llu", EROFS_V(dir)->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } - - debug_one_dentry(d_type, de_name, de_namelen); - if (!dir_emit(ctx, de_name, de_namelen, - le64_to_cpu(de->nid), d_type)) - /* stopped by some reason */ - return 1; - ++de; - *ofs += sizeof(struct erofs_dirent); - } - *ofs = maxsize; - return 0; -} - -static int erofs_readdir(struct file *f, struct dir_context *ctx) -{ - struct inode *dir = file_inode(f); - struct address_space *mapping = dir->i_mapping; - const size_t dirsize = i_size_read(dir); - unsigned int i = ctx->pos / EROFS_BLKSIZ; - unsigned int ofs = ctx->pos % EROFS_BLKSIZ; - int err = 0; - bool initial = true; - - while (ctx->pos < dirsize) { - struct page *dentry_page; - struct erofs_dirent *de; - unsigned int nameoff, maxsize; - - dentry_page = read_mapping_page(mapping, i, NULL); - if (dentry_page == ERR_PTR(-ENOMEM)) { - err = -ENOMEM; - break; - } else if (IS_ERR(dentry_page)) { - errln("fail to readdir of logical block %u of nid %llu", - i, EROFS_V(dir)->nid); - err = -EFSCORRUPTED; - break; - } - - de = (struct erofs_dirent *)kmap(dentry_page); - - nameoff = le16_to_cpu(de->nameoff); - - if (unlikely(nameoff < sizeof(struct erofs_dirent) || - nameoff >= PAGE_SIZE)) { - errln("%s, invalid de[0].nameoff %u @ nid %llu", - __func__, nameoff, EROFS_V(dir)->nid); - err = -EFSCORRUPTED; - goto skip_this; - } - - maxsize = min_t(unsigned int, - dirsize - ctx->pos + ofs, PAGE_SIZE); - - /* search dirents at the arbitrary position */ - if (unlikely(initial)) { - initial = false; - - ofs = roundup(ofs, sizeof(struct erofs_dirent)); - if (unlikely(ofs >= nameoff)) - goto skip_this; - } - - err = erofs_fill_dentries(dir, ctx, de, &ofs, - nameoff, maxsize); -skip_this: - kunmap(dentry_page); - - put_page(dentry_page); - - ctx->pos = blknr_to_addr(i) + ofs; - - if (unlikely(err)) - break; - ++i; - ofs = 0; - } - return err < 0 ? err : 0; -} - -const struct file_operations erofs_dir_fops = { - .llseek = generic_file_llseek, - .read = generic_read_dir, - .iterate_shared = erofs_readdir, -}; - diff --git a/drivers/staging/erofs/erofs_fs.h b/drivers/staging/erofs/erofs_fs.h deleted file mode 100644 index 6db70f395937..000000000000 --- a/drivers/staging/erofs/erofs_fs.h +++ /dev/null @@ -1,310 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0 */ -/* - * linux/drivers/staging/erofs/erofs_fs.h - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_FS_H -#define __EROFS_FS_H - -/* Enhanced(Extended) ROM File System */ -#define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2 -#define EROFS_SUPER_OFFSET 1024 - -/* - * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be - * incompatible with this kernel version. - */ -#define EROFS_REQUIREMENT_LZ4_0PADDING 0x00000001 -#define EROFS_ALL_REQUIREMENTS EROFS_REQUIREMENT_LZ4_0PADDING - -struct erofs_super_block { -/* 0 */__le32 magic; /* in the little endian */ -/* 4 */__le32 checksum; /* crc32c(super_block) */ -/* 8 */__le32 features; /* (aka. feature_compat) */ -/* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */ -/* 13 */__u8 reserved; - -/* 14 */__le16 root_nid; -/* 16 */__le64 inos; /* total valid ino # (== f_files - f_favail) */ - -/* 24 */__le64 build_time; /* inode v1 time derivation */ -/* 32 */__le32 build_time_nsec; -/* 36 */__le32 blocks; /* used for statfs */ -/* 40 */__le32 meta_blkaddr; -/* 44 */__le32 xattr_blkaddr; -/* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */ -/* 64 */__u8 volume_name[16]; /* volume name */ -/* 80 */__le32 requirements; /* (aka. feature_incompat) */ - -/* 84 */__u8 reserved2[44]; -} __packed; /* 128 bytes */ - -/* - * erofs inode data mapping: - * 0 - inode plain without inline data A: - * inode, [xattrs], ... | ... | no-holed data - * 1 - inode VLE compression B (legacy): - * inode, [xattrs], extents ... | ... - * 2 - inode plain with inline data C: - * inode, [xattrs], last_inline_data, ... | ... | no-holed data - * 3 - inode compression D: - * inode, [xattrs], map_header, extents ... | ... - * 4~7 - reserved - */ -enum { - EROFS_INODE_FLAT_PLAIN, - EROFS_INODE_FLAT_COMPRESSION_LEGACY, - EROFS_INODE_FLAT_INLINE, - EROFS_INODE_FLAT_COMPRESSION, - EROFS_INODE_LAYOUT_MAX -}; - -static inline bool erofs_inode_is_data_compressed(unsigned int datamode) -{ - if (datamode == EROFS_INODE_FLAT_COMPRESSION) - return true; - return datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY; -} - -/* bit definitions of inode i_advise */ -#define EROFS_I_VERSION_BITS 1 -#define EROFS_I_DATA_MAPPING_BITS 3 - -#define EROFS_I_VERSION_BIT 0 -#define EROFS_I_DATA_MAPPING_BIT 1 - -struct erofs_inode_v1 { -/* 0 */__le16 i_advise; - -/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ -/* 2 */__le16 i_xattr_icount; -/* 4 */__le16 i_mode; -/* 6 */__le16 i_nlink; -/* 8 */__le32 i_size; -/* 12 */__le32 i_reserved; -/* 16 */union { - /* file total compressed blocks for data mapping 1 */ - __le32 compressed_blocks; - __le32 raw_blkaddr; - - /* for device files, used to indicate old/new device # */ - __le32 rdev; - } i_u __packed; -/* 20 */__le32 i_ino; /* only used for 32-bit stat compatibility */ -/* 24 */__le16 i_uid; -/* 26 */__le16 i_gid; -/* 28 */__le32 i_reserved2; -} __packed; - -/* 32 bytes on-disk inode */ -#define EROFS_INODE_LAYOUT_V1 0 -/* 64 bytes on-disk inode */ -#define EROFS_INODE_LAYOUT_V2 1 - -struct erofs_inode_v2 { -/* 0 */__le16 i_advise; - -/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ -/* 2 */__le16 i_xattr_icount; -/* 4 */__le16 i_mode; -/* 6 */__le16 i_reserved; -/* 8 */__le64 i_size; -/* 16 */union { - /* file total compressed blocks for data mapping 1 */ - __le32 compressed_blocks; - __le32 raw_blkaddr; - - /* for device files, used to indicate old/new device # */ - __le32 rdev; - } i_u __packed; - - /* only used for 32-bit stat compatibility */ -/* 20 */__le32 i_ino; - -/* 24 */__le32 i_uid; -/* 28 */__le32 i_gid; -/* 32 */__le64 i_ctime; -/* 40 */__le32 i_ctime_nsec; -/* 44 */__le32 i_nlink; -/* 48 */__u8 i_reserved2[16]; -} __packed; /* 64 bytes */ - -#define EROFS_MAX_SHARED_XATTRS (128) -/* h_shared_count between 129 ... 255 are special # */ -#define EROFS_SHARED_XATTR_EXTENT (255) - -/* - * inline xattrs (n == i_xattr_icount): - * erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes - * 12 bytes / \ - * / \ - * /-----------------------\ - * | erofs_xattr_entries+ | - * +-----------------------+ - * inline xattrs must starts in erofs_xattr_ibody_header, - * for read-only fs, no need to introduce h_refcount - */ -struct erofs_xattr_ibody_header { - __le32 h_reserved; - __u8 h_shared_count; - __u8 h_reserved2[7]; - __le32 h_shared_xattrs[0]; /* shared xattr id array */ -} __packed; - -/* Name indexes */ -#define EROFS_XATTR_INDEX_USER 1 -#define EROFS_XATTR_INDEX_POSIX_ACL_ACCESS 2 -#define EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT 3 -#define EROFS_XATTR_INDEX_TRUSTED 4 -#define EROFS_XATTR_INDEX_LUSTRE 5 -#define EROFS_XATTR_INDEX_SECURITY 6 - -/* xattr entry (for both inline & shared xattrs) */ -struct erofs_xattr_entry { - __u8 e_name_len; /* length of name */ - __u8 e_name_index; /* attribute name index */ - __le16 e_value_size; /* size of attribute value */ - /* followed by e_name and e_value */ - char e_name[0]; /* attribute name */ -} __packed; - -#define ondisk_xattr_ibody_size(count) ({\ - u32 __count = le16_to_cpu(count); \ - ((__count) == 0) ? 0 : \ - sizeof(struct erofs_xattr_ibody_header) + \ - sizeof(__u32) * ((__count) - 1); }) - -#define EROFS_XATTR_ALIGN(size) round_up(size, sizeof(struct erofs_xattr_entry)) -#define EROFS_XATTR_ENTRY_SIZE(entry) EROFS_XATTR_ALIGN( \ - sizeof(struct erofs_xattr_entry) + \ - (entry)->e_name_len + le16_to_cpu((entry)->e_value_size)) - -/* available compression algorithm types */ -enum { - Z_EROFS_COMPRESSION_LZ4, - Z_EROFS_COMPRESSION_MAX -}; - -/* - * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on) - * e.g. for 4k logical cluster size, 4B if compacted 2B is off; - * (4B) + 2B + (4B) if compacted 2B is on. - */ -#define Z_EROFS_ADVISE_COMPACTED_2B_BIT 0 - -#define Z_EROFS_ADVISE_COMPACTED_2B (1 << Z_EROFS_ADVISE_COMPACTED_2B_BIT) - -struct z_erofs_map_header { - __le32 h_reserved1; - __le16 h_advise; - /* - * bit 0-3 : algorithm type of head 1 (logical cluster type 01); - * bit 4-7 : algorithm type of head 2 (logical cluster type 11). - */ - __u8 h_algorithmtype; - /* - * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096; - * bit 3-4 : (physical - logical) cluster bits of head 1: - * For example, if logical clustersize = 4096, 1 for 8192. - * bit 5-7 : (physical - logical) cluster bits of head 2. - */ - __u8 h_clusterbits; -}; - -#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8 - -/* - * Z_EROFS Variable-sized Logical Extent cluster type: - * 0 - literal (uncompressed) cluster - * 1 - compressed cluster (for the head logical cluster) - * 2 - compressed cluster (for the other logical clusters) - * - * In detail, - * 0 - literal (uncompressed) cluster, - * di_advise = 0 - * di_clusterofs = the literal data offset of the cluster - * di_blkaddr = the blkaddr of the literal cluster - * - * 1 - compressed cluster (for the head logical cluster) - * di_advise = 1 - * di_clusterofs = the decompressed data offset of the cluster - * di_blkaddr = the blkaddr of the compressed cluster - * - * 2 - compressed cluster (for the other logical clusters) - * di_advise = 2 - * di_clusterofs = - * the decompressed data offset in its own head cluster - * di_u.delta[0] = distance to its corresponding head cluster - * di_u.delta[1] = distance to its corresponding tail cluster - * (di_advise could be 0, 1 or 2) - */ -enum { - Z_EROFS_VLE_CLUSTER_TYPE_PLAIN, - Z_EROFS_VLE_CLUSTER_TYPE_HEAD, - Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD, - Z_EROFS_VLE_CLUSTER_TYPE_RESERVED, - Z_EROFS_VLE_CLUSTER_TYPE_MAX -}; - -#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2 -#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0 - -struct z_erofs_vle_decompressed_index { - __le16 di_advise; - /* where to decompress in the head cluster */ - __le16 di_clusterofs; - - union { - /* for the head cluster */ - __le32 blkaddr; - /* - * for the rest clusters - * eg. for 4k page-sized cluster, maximum 4K*64k = 256M) - * [0] - pointing to the head cluster - * [1] - pointing to the tail cluster - */ - __le16 delta[2]; - } di_u __packed; /* 8 bytes */ -} __packed; - -#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \ - (round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \ - sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING) - -/* dirent sorts in alphabet order, thus we can do binary search */ -struct erofs_dirent { - __le64 nid; /* 0, node number */ - __le16 nameoff; /* 8, start offset of file name */ - __u8 file_type; /* 10, file type */ - __u8 reserved; /* 11, reserved */ -} __packed; - -/* - * EROFS file types should match generic FT_* types and - * it seems no need to add BUILD_BUG_ONs since potential - * unmatchness will break other fses as well... - */ - -#define EROFS_NAME_LEN 255 - -/* check the EROFS on-disk layout strictly at compile time */ -static inline void erofs_check_ondisk_layout_definitions(void) -{ - BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128); - BUILD_BUG_ON(sizeof(struct erofs_inode_v1) != 32); - BUILD_BUG_ON(sizeof(struct erofs_inode_v2) != 64); - BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12); - BUILD_BUG_ON(sizeof(struct erofs_xattr_entry) != 4); - BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8); - BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8); - BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12); - - BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) < - Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1); -} - -#endif - diff --git a/drivers/staging/erofs/include/trace/events/erofs.h b/drivers/staging/erofs/include/trace/events/erofs.h deleted file mode 100644 index bfb2da9c4eee..000000000000 --- a/drivers/staging/erofs/include/trace/events/erofs.h +++ /dev/null @@ -1,256 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM erofs - -#if !defined(_TRACE_EROFS_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_EROFS_H - -#include - -#define show_dev(dev) MAJOR(dev), MINOR(dev) -#define show_dev_nid(entry) show_dev(entry->dev), entry->nid - -#define show_file_type(type) \ - __print_symbolic(type, \ - { 0, "FILE" }, \ - { 1, "DIR" }) - -#define show_map_flags(flags) __print_flags(flags, "|", \ - { EROFS_GET_BLOCKS_RAW, "RAW" }) - -#define show_mflags(flags) __print_flags(flags, "", \ - { EROFS_MAP_MAPPED, "M" }, \ - { EROFS_MAP_META, "I" }, \ - { EROFS_MAP_ZIPPED, "Z" }) - -TRACE_EVENT(erofs_lookup, - - TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), - - TP_ARGS(dir, dentry, flags), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __field(erofs_nid_t, nid ) - __field(const char *, name ) - __field(unsigned int, flags ) - ), - - TP_fast_assign( - __entry->dev = dir->i_sb->s_dev; - __entry->nid = EROFS_V(dir)->nid; - __entry->name = dentry->d_name.name; - __entry->flags = flags; - ), - - TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x", - show_dev_nid(__entry), - __entry->name, - __entry->flags) -); - -TRACE_EVENT(erofs_fill_inode, - TP_PROTO(struct inode *inode, int isdir), - TP_ARGS(inode, isdir), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __field(erofs_nid_t, nid ) - __field(erofs_blk_t, blkaddr ) - __field(unsigned int, ofs ) - __field(int, isdir ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - __entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid)); - __entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid)); - __entry->isdir = isdir; - ), - - TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u, isdir %d", - show_dev_nid(__entry), - __entry->blkaddr, __entry->ofs, - __entry->isdir) -); - -TRACE_EVENT(erofs_readpage, - - TP_PROTO(struct page *page, bool raw), - - TP_ARGS(page, raw), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __field(erofs_nid_t, nid ) - __field(int, dir ) - __field(pgoff_t, index ) - __field(int, uptodate) - __field(bool, raw ) - ), - - TP_fast_assign( - __entry->dev = page->mapping->host->i_sb->s_dev; - __entry->nid = EROFS_V(page->mapping->host)->nid; - __entry->dir = S_ISDIR(page->mapping->host->i_mode); - __entry->index = page->index; - __entry->uptodate = PageUptodate(page); - __entry->raw = raw; - ), - - TP_printk("dev = (%d,%d), nid = %llu, %s, index = %lu, uptodate = %d " - "raw = %d", - show_dev_nid(__entry), - show_file_type(__entry->dir), - (unsigned long)__entry->index, - __entry->uptodate, - __entry->raw) -); - -TRACE_EVENT(erofs_readpages, - - TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage, - bool raw), - - TP_ARGS(inode, page, nrpage, raw), - - TP_STRUCT__entry( - __field(dev_t, dev ) - __field(erofs_nid_t, nid ) - __field(pgoff_t, start ) - __field(unsigned int, nrpage ) - __field(bool, raw ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - __entry->start = page->index; - __entry->nrpage = nrpage; - __entry->raw = raw; - ), - - TP_printk("dev = (%d,%d), nid = %llu, start = %lu nrpage = %u raw = %d", - show_dev_nid(__entry), - (unsigned long)__entry->start, - __entry->nrpage, - __entry->raw) -); - -DECLARE_EVENT_CLASS(erofs__map_blocks_enter, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags), - - TP_ARGS(inode, map, flags), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( erofs_nid_t, nid ) - __field( erofs_off_t, la ) - __field( u64, llen ) - __field( unsigned int, flags ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - __entry->la = map->m_la; - __entry->llen = map->m_llen; - __entry->flags = flags; - ), - - TP_printk("dev = (%d,%d), nid = %llu, la %llu llen %llu flags %s", - show_dev_nid(__entry), - __entry->la, __entry->llen, - __entry->flags ? show_map_flags(__entry->flags) : "NULL") -); - -DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned flags), - - TP_ARGS(inode, map, flags) -); - -DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags), - - TP_ARGS(inode, map, flags) -); - -DECLARE_EVENT_CLASS(erofs__map_blocks_exit, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags, int ret), - - TP_ARGS(inode, map, flags, ret), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( erofs_nid_t, nid ) - __field( unsigned int, flags ) - __field( erofs_off_t, la ) - __field( erofs_off_t, pa ) - __field( u64, llen ) - __field( u64, plen ) - __field( unsigned int, mflags ) - __field( int, ret ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - __entry->flags = flags; - __entry->la = map->m_la; - __entry->pa = map->m_pa; - __entry->llen = map->m_llen; - __entry->plen = map->m_plen; - __entry->mflags = map->m_flags; - __entry->ret = ret; - ), - - TP_printk("dev = (%d,%d), nid = %llu, flags %s " - "la %llu pa %llu llen %llu plen %llu mflags %s ret %d", - show_dev_nid(__entry), - __entry->flags ? show_map_flags(__entry->flags) : "NULL", - __entry->la, __entry->pa, __entry->llen, __entry->plen, - show_mflags(__entry->mflags), __entry->ret) -); - -DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned flags, int ret), - - TP_ARGS(inode, map, flags, ret) -); - -DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit, - TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, - unsigned int flags, int ret), - - TP_ARGS(inode, map, flags, ret) -); - -TRACE_EVENT(erofs_destroy_inode, - TP_PROTO(struct inode *inode), - - TP_ARGS(inode), - - TP_STRUCT__entry( - __field( dev_t, dev ) - __field( erofs_nid_t, nid ) - ), - - TP_fast_assign( - __entry->dev = inode->i_sb->s_dev; - __entry->nid = EROFS_V(inode)->nid; - ), - - TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry)) -); - -#endif /* _TRACE_EROFS_H */ - - /* This part must be outside protection */ -#include diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c deleted file mode 100644 index cbc2c342a37f..000000000000 --- a/drivers/staging/erofs/inode.c +++ /dev/null @@ -1,334 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/inode.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "xattr.h" - -#include - -/* no locking */ -static int read_inode(struct inode *inode, void *data) -{ - struct erofs_vnode *vi = EROFS_V(inode); - struct erofs_inode_v1 *v1 = data; - const unsigned int advise = le16_to_cpu(v1->i_advise); - erofs_blk_t nblks = 0; - - vi->datamode = __inode_data_mapping(advise); - - if (unlikely(vi->datamode >= EROFS_INODE_LAYOUT_MAX)) { - errln("unsupported data mapping %u of nid %llu", - vi->datamode, vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; - } - - if (__inode_version(advise) == EROFS_INODE_LAYOUT_V2) { - struct erofs_inode_v2 *v2 = data; - - vi->inode_isize = sizeof(struct erofs_inode_v2); - vi->xattr_isize = ondisk_xattr_ibody_size(v2->i_xattr_icount); - - inode->i_mode = le16_to_cpu(v2->i_mode); - if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || - S_ISLNK(inode->i_mode)) - vi->raw_blkaddr = le32_to_cpu(v2->i_u.raw_blkaddr); - else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) - inode->i_rdev = - new_decode_dev(le32_to_cpu(v2->i_u.rdev)); - else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) - inode->i_rdev = 0; - else - goto bogusimode; - - i_uid_write(inode, le32_to_cpu(v2->i_uid)); - i_gid_write(inode, le32_to_cpu(v2->i_gid)); - set_nlink(inode, le32_to_cpu(v2->i_nlink)); - - /* ns timestamp */ - inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = - le64_to_cpu(v2->i_ctime); - inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = - le32_to_cpu(v2->i_ctime_nsec); - - inode->i_size = le64_to_cpu(v2->i_size); - - /* total blocks for compressed files */ - if (is_inode_layout_compression(inode)) - nblks = le32_to_cpu(v2->i_u.compressed_blocks); - } else if (__inode_version(advise) == EROFS_INODE_LAYOUT_V1) { - struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - - vi->inode_isize = sizeof(struct erofs_inode_v1); - vi->xattr_isize = ondisk_xattr_ibody_size(v1->i_xattr_icount); - - inode->i_mode = le16_to_cpu(v1->i_mode); - if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || - S_ISLNK(inode->i_mode)) - vi->raw_blkaddr = le32_to_cpu(v1->i_u.raw_blkaddr); - else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) - inode->i_rdev = - new_decode_dev(le32_to_cpu(v1->i_u.rdev)); - else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) - inode->i_rdev = 0; - else - goto bogusimode; - - i_uid_write(inode, le16_to_cpu(v1->i_uid)); - i_gid_write(inode, le16_to_cpu(v1->i_gid)); - set_nlink(inode, le16_to_cpu(v1->i_nlink)); - - /* use build time to derive all file time */ - inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = - sbi->build_time; - inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = - sbi->build_time_nsec; - - inode->i_size = le32_to_cpu(v1->i_size); - if (is_inode_layout_compression(inode)) - nblks = le32_to_cpu(v1->i_u.compressed_blocks); - } else { - errln("unsupported on-disk inode version %u of nid %llu", - __inode_version(advise), vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; - } - - if (!nblks) - /* measure inode.i_blocks as generic filesystems */ - inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9; - else - inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK; - return 0; - -bogusimode: - errln("bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; -} - -/* - * try_lock can be required since locking order is: - * file data(fs_inode) - * meta(bd_inode) - * but the majority of the callers is "iget", - * in that case we are pretty sure no deadlock since - * no data operations exist. However I tend to - * try_lock since it takes no much overhead and - * will success immediately. - */ -static int fill_inline_data(struct inode *inode, void *data, - unsigned int m_pofs) -{ - struct erofs_vnode *vi = EROFS_V(inode); - struct erofs_sb_info *sbi = EROFS_I_SB(inode); - - /* should be inode inline C */ - if (!is_inode_flat_inline(inode)) - return 0; - - /* fast symlink (following ext4) */ - if (S_ISLNK(inode->i_mode) && inode->i_size < PAGE_SIZE) { - char *lnk = erofs_kmalloc(sbi, inode->i_size + 1, GFP_KERNEL); - - if (unlikely(!lnk)) - return -ENOMEM; - - m_pofs += vi->inode_isize + vi->xattr_isize; - - /* inline symlink data shouldn't across page boundary as well */ - if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) { - kfree(lnk); - errln("inline data cross block boundary @ nid %llu", - vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } - - /* get in-page inline data */ - memcpy(lnk, data + m_pofs, inode->i_size); - lnk[inode->i_size] = '\0'; - - inode->i_link = lnk; - set_inode_fast_symlink(inode); - } - return 0; -} - -static int fill_inode(struct inode *inode, int isdir) -{ - struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - struct erofs_vnode *vi = EROFS_V(inode); - struct page *page; - void *data; - int err; - erofs_blk_t blkaddr; - unsigned int ofs; - erofs_off_t inode_loc; - - trace_erofs_fill_inode(inode, isdir); - inode_loc = iloc(sbi, vi->nid); - blkaddr = erofs_blknr(inode_loc); - ofs = erofs_blkoff(inode_loc); - - debugln("%s, reading inode nid %llu at %u of blkaddr %u", - __func__, vi->nid, ofs, blkaddr); - - page = erofs_get_meta_page(inode->i_sb, blkaddr, isdir); - - if (IS_ERR(page)) { - errln("failed to get inode (nid: %llu) page, err %ld", - vi->nid, PTR_ERR(page)); - return PTR_ERR(page); - } - - DBG_BUGON(!PageUptodate(page)); - data = page_address(page); - - err = read_inode(inode, data + ofs); - if (!err) { - /* setup the new inode */ - if (S_ISREG(inode->i_mode)) { - inode->i_op = &erofs_generic_iops; - inode->i_fop = &generic_ro_fops; - } else if (S_ISDIR(inode->i_mode)) { - inode->i_op = &erofs_dir_iops; - inode->i_fop = &erofs_dir_fops; - } else if (S_ISLNK(inode->i_mode)) { - /* by default, page_get_link is used for symlink */ - inode->i_op = &erofs_symlink_iops; - inode_nohighmem(inode); - } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || - S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { - inode->i_op = &erofs_generic_iops; - init_special_inode(inode, inode->i_mode, inode->i_rdev); - goto out_unlock; - } else { - err = -EFSCORRUPTED; - goto out_unlock; - } - - if (is_inode_layout_compression(inode)) { - err = z_erofs_fill_inode(inode); - goto out_unlock; - } - - inode->i_mapping->a_ops = &erofs_raw_access_aops; - - /* fill last page if inline data is available */ - err = fill_inline_data(inode, data, ofs); - } - -out_unlock: - unlock_page(page); - put_page(page); - return err; -} - -/* - * erofs nid is 64bits, but i_ino is 'unsigned long', therefore - * we should do more for 32-bit platform to find the right inode. - */ -#if BITS_PER_LONG == 32 -static int erofs_ilookup_test_actor(struct inode *inode, void *opaque) -{ - const erofs_nid_t nid = *(erofs_nid_t *)opaque; - - return EROFS_V(inode)->nid == nid; -} - -static int erofs_iget_set_actor(struct inode *inode, void *opaque) -{ - const erofs_nid_t nid = *(erofs_nid_t *)opaque; - - inode->i_ino = erofs_inode_hash(nid); - return 0; -} -#endif - -static inline struct inode *erofs_iget_locked(struct super_block *sb, - erofs_nid_t nid) -{ - const unsigned long hashval = erofs_inode_hash(nid); - -#if BITS_PER_LONG >= 64 - /* it is safe to use iget_locked for >= 64-bit platform */ - return iget_locked(sb, hashval); -#else - return iget5_locked(sb, hashval, erofs_ilookup_test_actor, - erofs_iget_set_actor, &nid); -#endif -} - -struct inode *erofs_iget(struct super_block *sb, - erofs_nid_t nid, - bool isdir) -{ - struct inode *inode = erofs_iget_locked(sb, nid); - - if (unlikely(!inode)) - return ERR_PTR(-ENOMEM); - - if (inode->i_state & I_NEW) { - int err; - struct erofs_vnode *vi = EROFS_V(inode); - - vi->nid = nid; - - err = fill_inode(inode, isdir); - if (likely(!err)) - unlock_new_inode(inode); - else { - iget_failed(inode); - inode = ERR_PTR(err); - } - } - return inode; -} - -int erofs_getattr(const struct path *path, struct kstat *stat, - u32 request_mask, unsigned int query_flags) -{ - struct inode *const inode = d_inode(path->dentry); - - if (is_inode_layout_compression(inode)) - stat->attributes |= STATX_ATTR_COMPRESSED; - - stat->attributes |= STATX_ATTR_IMMUTABLE; - stat->attributes_mask |= (STATX_ATTR_COMPRESSED | - STATX_ATTR_IMMUTABLE); - - generic_fillattr(inode, stat); - return 0; -} - -const struct inode_operations erofs_generic_iops = { - .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR - .listxattr = erofs_listxattr, -#endif - .get_acl = erofs_get_acl, -}; - -const struct inode_operations erofs_symlink_iops = { - .get_link = page_get_link, - .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR - .listxattr = erofs_listxattr, -#endif - .get_acl = erofs_get_acl, -}; - -const struct inode_operations erofs_fast_symlink_iops = { - .get_link = simple_get_link, - .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR - .listxattr = erofs_listxattr, -#endif - .get_acl = erofs_get_acl, -}; - diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h deleted file mode 100644 index 0e8d58546c52..000000000000 --- a/drivers/staging/erofs/internal.h +++ /dev/null @@ -1,554 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/internal.h - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_INTERNAL_H -#define __EROFS_INTERNAL_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include "erofs_fs.h" - -/* redefine pr_fmt "erofs: " */ -#undef pr_fmt -#define pr_fmt(fmt) "erofs: " fmt - -#define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__) -#define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__) -#ifdef CONFIG_EROFS_FS_DEBUG -#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__) -#define DBG_BUGON BUG_ON -#else -#define debugln(x, ...) ((void)0) -#define DBG_BUGON(x) ((void)(x)) -#endif /* !CONFIG_EROFS_FS_DEBUG */ - -enum { - FAULT_KMALLOC, - FAULT_READ_IO, - FAULT_MAX, -}; - -#ifdef CONFIG_EROFS_FAULT_INJECTION -extern const char *erofs_fault_name[FAULT_MAX]; -#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) - -struct erofs_fault_info { - atomic_t inject_ops; - unsigned int inject_rate; - unsigned int inject_type; -}; -#endif /* CONFIG_EROFS_FAULT_INJECTION */ - -/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */ -#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1 - -typedef u64 erofs_nid_t; -typedef u64 erofs_off_t; -/* data type for filesystem-wide blocks number */ -typedef u32 erofs_blk_t; - -struct erofs_sb_info { -#ifdef CONFIG_EROFS_FS_ZIP - /* list for all registered superblocks, mainly for shrinker */ - struct list_head list; - struct mutex umount_mutex; - - /* the dedicated workstation for compression */ - struct radix_tree_root workstn_tree; - - /* threshold for decompression synchronously */ - unsigned int max_sync_decompress_pages; - - unsigned int shrinker_run_no; - - /* current strategy of how to use managed cache */ - unsigned char cache_strategy; - - /* pseudo inode to manage cached pages */ - struct inode *managed_cache; -#endif /* CONFIG_EROFS_FS_ZIP */ - u32 blocks; - u32 meta_blkaddr; -#ifdef CONFIG_EROFS_FS_XATTR - u32 xattr_blkaddr; -#endif - - /* inode slot unit size in bit shift */ - unsigned char islotbits; - - u32 build_time_nsec; - u64 build_time; - - /* what we really care is nid, rather than ino.. */ - erofs_nid_t root_nid; - /* used for statfs, f_files - f_favail */ - u64 inos; - - u8 uuid[16]; /* 128-bit uuid for volume */ - u8 volume_name[16]; /* volume name */ - u32 requirements; - - unsigned int mount_opt; - -#ifdef CONFIG_EROFS_FAULT_INJECTION - struct erofs_fault_info fault_info; /* For fault injection */ -#endif -}; - -#ifdef CONFIG_EROFS_FAULT_INJECTION -#define erofs_show_injection_info(type) \ - infoln("inject %s in %s of %pS", erofs_fault_name[type], \ - __func__, __builtin_return_address(0)) - -static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) -{ - struct erofs_fault_info *ffi = &sbi->fault_info; - - if (!ffi->inject_rate) - return false; - - if (!IS_FAULT_SET(ffi, type)) - return false; - - atomic_inc(&ffi->inject_ops); - if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { - atomic_set(&ffi->inject_ops, 0); - return true; - } - return false; -} -#else -static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) -{ - return false; -} - -static inline void erofs_show_injection_info(int type) -{ -} -#endif /* !CONFIG_EROFS_FAULT_INJECTION */ - -static inline void *erofs_kmalloc(struct erofs_sb_info *sbi, - size_t size, gfp_t flags) -{ - if (time_to_inject(sbi, FAULT_KMALLOC)) { - erofs_show_injection_info(FAULT_KMALLOC); - return NULL; - } - return kmalloc(size, flags); -} - -#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info) -#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info) - -/* Mount flags set via mount options or defaults */ -#define EROFS_MOUNT_XATTR_USER 0x00000010 -#define EROFS_MOUNT_POSIX_ACL 0x00000020 -#define EROFS_MOUNT_FAULT_INJECTION 0x00000040 - -#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option) -#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option) -#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option) - -#ifdef CONFIG_EROFS_FS_ZIP -enum { - EROFS_ZIP_CACHE_DISABLED, - EROFS_ZIP_CACHE_READAHEAD, - EROFS_ZIP_CACHE_READAROUND -}; - -#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL) - -/* basic unit of the workstation of a super_block */ -struct erofs_workgroup { - /* the workgroup index in the workstation */ - pgoff_t index; - - /* overall workgroup reference count */ - atomic_t refcount; -}; - -#if defined(CONFIG_SMP) -static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, - int val) -{ - preempt_disable(); - if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) { - preempt_enable(); - return false; - } - return true; -} - -static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, - int orig_val) -{ - /* - * other observers should notice all modifications - * in the freezing period. - */ - smp_mb(); - atomic_set(&grp->refcount, orig_val); - preempt_enable(); -} - -static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) -{ - return atomic_cond_read_relaxed(&grp->refcount, - VAL != EROFS_LOCKED_MAGIC); -} -#else -static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, - int val) -{ - preempt_disable(); - /* no need to spin on UP platforms, let's just disable preemption. */ - if (val != atomic_read(&grp->refcount)) { - preempt_enable(); - return false; - } - return true; -} - -static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, - int orig_val) -{ - preempt_enable(); -} - -static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) -{ - int v = atomic_read(&grp->refcount); - - /* workgroup is never freezed on uniprocessor systems */ - DBG_BUGON(v == EROFS_LOCKED_MAGIC); - return v; -} -#endif /* !CONFIG_SMP */ - -/* hard limit of pages per compressed cluster */ -#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT) -#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES -#else -#define EROFS_PCPUBUF_NR_PAGES 0 -#endif /* !CONFIG_EROFS_FS_ZIP */ - -/* we strictly follow PAGE_SIZE and no buffer head yet */ -#define LOG_BLOCK_SIZE PAGE_SHIFT - -#undef LOG_SECTORS_PER_BLOCK -#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) - -#undef SECTORS_PER_BLOCK -#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK) - -#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE) - -#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ) -#error erofs cannot be used in this platform -#endif - -#define EROFS_IO_MAX_RETRIES_NOFAIL 5 - -#define ROOT_NID(sb) ((sb)->root_nid) - -#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ) -#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ) -#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ) - -static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid) -{ - return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits); -} - -/* atomic flag definitions */ -#define EROFS_V_EA_INITED_BIT 0 -#define EROFS_V_Z_INITED_BIT 1 - -/* bitlock definitions (arranged in reverse order) */ -#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1) -#define EROFS_V_BL_Z_BIT (BITS_PER_LONG - 2) - -struct erofs_vnode { - erofs_nid_t nid; - - /* atomic flags (including bitlocks) */ - unsigned long flags; - - unsigned char datamode; - unsigned char inode_isize; - unsigned short xattr_isize; - - unsigned int xattr_shared_count; - unsigned int *xattr_shared_xattrs; - - union { - erofs_blk_t raw_blkaddr; -#ifdef CONFIG_EROFS_FS_ZIP - struct { - unsigned short z_advise; - unsigned char z_algorithmtype[2]; - unsigned char z_logical_clusterbits; - unsigned char z_physical_clusterbits[2]; - }; -#endif /* CONFIG_EROFS_FS_ZIP */ - }; - /* the corresponding vfs inode */ - struct inode vfs_inode; -}; - -#define EROFS_V(ptr) \ - container_of(ptr, struct erofs_vnode, vfs_inode) - -#define __inode_advise(x, bit, bits) \ - (((x) >> (bit)) & ((1 << (bits)) - 1)) - -#define __inode_version(advise) \ - __inode_advise(advise, EROFS_I_VERSION_BIT, \ - EROFS_I_VERSION_BITS) - -#define __inode_data_mapping(advise) \ - __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\ - EROFS_I_DATA_MAPPING_BITS) - -static inline unsigned long inode_datablocks(struct inode *inode) -{ - /* since i_size cannot be changed */ - return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); -} - -static inline bool is_inode_layout_compression(struct inode *inode) -{ - return erofs_inode_is_data_compressed(EROFS_V(inode)->datamode); -} - -static inline bool is_inode_flat_inline(struct inode *inode) -{ - return EROFS_V(inode)->datamode == EROFS_INODE_FLAT_INLINE; -} - -extern const struct super_operations erofs_sops; - -extern const struct address_space_operations erofs_raw_access_aops; -#ifdef CONFIG_EROFS_FS_ZIP -extern const struct address_space_operations z_erofs_vle_normalaccess_aops; -#endif - -/* - * Logical to physical block mapping, used by erofs_map_blocks() - * - * Different with other file systems, it is used for 2 access modes: - * - * 1) RAW access mode: - * - * Users pass a valid (m_lblk, m_lofs -- usually 0) pair, - * and get the valid m_pblk, m_pofs and the longest m_len(in bytes). - * - * Note that m_lblk in the RAW access mode refers to the number of - * the compressed ondisk block rather than the uncompressed - * in-memory block for the compressed file. - * - * m_pofs equals to m_lofs except for the inline data page. - * - * 2) Normal access mode: - * - * If the inode is not compressed, it has no difference with - * the RAW access mode. However, if the inode is compressed, - * users should pass a valid (m_lblk, m_lofs) pair, and get - * the needed m_pblk, m_pofs, m_len to get the compressed data - * and the updated m_lblk, m_lofs which indicates the start - * of the corresponding uncompressed data in the file. - */ -enum { - BH_Zipped = BH_PrivateStart, - BH_FullMapped, -}; - -/* Has a disk mapping */ -#define EROFS_MAP_MAPPED (1 << BH_Mapped) -/* Located in metadata (could be copied from bd_inode) */ -#define EROFS_MAP_META (1 << BH_Meta) -/* The extent has been compressed */ -#define EROFS_MAP_ZIPPED (1 << BH_Zipped) -/* The length of extent is full */ -#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped) - -struct erofs_map_blocks { - erofs_off_t m_pa, m_la; - u64 m_plen, m_llen; - - unsigned int m_flags; - - struct page *mpage; -}; - -/* Flags used by erofs_map_blocks() */ -#define EROFS_GET_BLOCKS_RAW 0x0001 - -/* zmap.c */ -#ifdef CONFIG_EROFS_FS_ZIP -int z_erofs_fill_inode(struct inode *inode); -int z_erofs_map_blocks_iter(struct inode *inode, - struct erofs_map_blocks *map, - int flags); -#else -static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; } -static inline int z_erofs_map_blocks_iter(struct inode *inode, - struct erofs_map_blocks *map, - int flags) -{ - return -EOPNOTSUPP; -} -#endif /* !CONFIG_EROFS_FS_ZIP */ - -/* data.c */ -static inline struct bio *erofs_grab_bio(struct super_block *sb, - erofs_blk_t blkaddr, - unsigned int nr_pages, - void *bi_private, bio_end_io_t endio, - bool nofail) -{ - const gfp_t gfp = GFP_NOIO; - struct bio *bio; - - do { - if (nr_pages == 1) { - bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1); - if (unlikely(!bio)) { - DBG_BUGON(nofail); - return ERR_PTR(-ENOMEM); - } - break; - } - bio = bio_alloc(gfp, nr_pages); - nr_pages /= 2; - } while (unlikely(!bio)); - - bio->bi_end_io = endio; - bio_set_dev(bio, sb->s_bdev); - bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK; - bio->bi_private = bi_private; - return bio; -} - -static inline void __submit_bio(struct bio *bio, unsigned int op, - unsigned int op_flags) -{ - bio_set_op_attrs(bio, op, op_flags); - submit_bio(bio); -} - -struct page *__erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr, - bool prio, bool nofail); - -static inline struct page *erofs_get_meta_page(struct super_block *sb, - erofs_blk_t blkaddr, bool prio) -{ - return __erofs_get_meta_page(sb, blkaddr, prio, false); -} - -int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int); - -static inline struct page *erofs_get_inline_page(struct inode *inode, - erofs_blk_t blkaddr) -{ - return erofs_get_meta_page(inode->i_sb, blkaddr, - S_ISDIR(inode->i_mode)); -} - -/* inode.c */ -static inline unsigned long erofs_inode_hash(erofs_nid_t nid) -{ -#if BITS_PER_LONG == 32 - return (nid >> 32) ^ (nid & 0xffffffff); -#else - return nid; -#endif -} - -extern const struct inode_operations erofs_generic_iops; -extern const struct inode_operations erofs_symlink_iops; -extern const struct inode_operations erofs_fast_symlink_iops; - -static inline void set_inode_fast_symlink(struct inode *inode) -{ - inode->i_op = &erofs_fast_symlink_iops; -} - -static inline bool is_inode_fast_symlink(struct inode *inode) -{ - return inode->i_op == &erofs_fast_symlink_iops; -} - -struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir); -int erofs_getattr(const struct path *path, struct kstat *stat, - u32 request_mask, unsigned int query_flags); - -/* namei.c */ -extern const struct inode_operations erofs_dir_iops; - -int erofs_namei(struct inode *dir, struct qstr *name, - erofs_nid_t *nid, unsigned int *d_type); - -/* dir.c */ -extern const struct file_operations erofs_dir_fops; - -/* utils.c / zdata.c */ -struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail); - -#if (EROFS_PCPUBUF_NR_PAGES > 0) -void *erofs_get_pcpubuf(unsigned int pagenr); -#define erofs_put_pcpubuf(buf) do { \ - (void)&(buf); \ - preempt_enable(); \ -} while (0) -#else -static inline void *erofs_get_pcpubuf(unsigned int pagenr) -{ - return ERR_PTR(-EOPNOTSUPP); -} - -#define erofs_put_pcpubuf(buf) do {} while (0) -#endif - -#ifdef CONFIG_EROFS_FS_ZIP -int erofs_workgroup_put(struct erofs_workgroup *grp); -struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, - pgoff_t index, bool *tag); -int erofs_register_workgroup(struct super_block *sb, - struct erofs_workgroup *grp, bool tag); -void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); -void erofs_shrinker_register(struct super_block *sb); -void erofs_shrinker_unregister(struct super_block *sb); -int __init erofs_init_shrinker(void); -void erofs_exit_shrinker(void); -int __init z_erofs_init_zip_subsystem(void); -void z_erofs_exit_zip_subsystem(void); -int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, - struct erofs_workgroup *egrp); -int erofs_try_to_free_cached_page(struct address_space *mapping, - struct page *page); -#else -static inline void erofs_shrinker_register(struct super_block *sb) {} -static inline void erofs_shrinker_unregister(struct super_block *sb) {} -static inline int erofs_init_shrinker(void) { return 0; } -static inline void erofs_exit_shrinker(void) {} -static inline int z_erofs_init_zip_subsystem(void) { return 0; } -static inline void z_erofs_exit_zip_subsystem(void) {} -#endif /* !CONFIG_EROFS_FS_ZIP */ - -#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ - -#endif /* __EROFS_INTERNAL_H */ - diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c deleted file mode 100644 index 8334a910acef..000000000000 --- a/drivers/staging/erofs/namei.c +++ /dev/null @@ -1,253 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/namei.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "xattr.h" - -#include - -struct erofs_qstr { - const unsigned char *name; - const unsigned char *end; -}; - -/* based on the end of qn is accurate and it must have the trailing '\0' */ -static inline int dirnamecmp(const struct erofs_qstr *qn, - const struct erofs_qstr *qd, - unsigned int *matched) -{ - unsigned int i = *matched; - - /* - * on-disk error, let's only BUG_ON in the debugging mode. - * otherwise, it will return 1 to just skip the invalid name - * and go on (in consideration of the lookup performance). - */ - DBG_BUGON(qd->name > qd->end); - - /* qd could not have trailing '\0' */ - /* However it is absolutely safe if < qd->end */ - while (qd->name + i < qd->end && qd->name[i] != '\0') { - if (qn->name[i] != qd->name[i]) { - *matched = i; - return qn->name[i] > qd->name[i] ? 1 : -1; - } - ++i; - } - *matched = i; - /* See comments in __d_alloc on the terminating NUL character */ - return qn->name[i] == '\0' ? 0 : 1; -} - -#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1)) - -static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name, - u8 *data, - unsigned int dirblksize, - const int ndirents) -{ - int head, back; - unsigned int startprfx, endprfx; - struct erofs_dirent *const de = (struct erofs_dirent *)data; - - /* since the 1st dirent has been evaluated previously */ - head = 1; - back = ndirents - 1; - startprfx = endprfx = 0; - - while (head <= back) { - const int mid = head + (back - head) / 2; - const int nameoff = nameoff_from_disk(de[mid].nameoff, - dirblksize); - unsigned int matched = min(startprfx, endprfx); - struct erofs_qstr dname = { - .name = data + nameoff, - .end = unlikely(mid >= ndirents - 1) ? - data + dirblksize : - data + nameoff_from_disk(de[mid + 1].nameoff, - dirblksize) - }; - - /* string comparison without already matched prefix */ - int ret = dirnamecmp(name, &dname, &matched); - - if (unlikely(!ret)) { - return de + mid; - } else if (ret > 0) { - head = mid + 1; - startprfx = matched; - } else { - back = mid - 1; - endprfx = matched; - } - } - - return ERR_PTR(-ENOENT); -} - -static struct page *find_target_block_classic(struct inode *dir, - struct erofs_qstr *name, - int *_ndirents) -{ - unsigned int startprfx, endprfx; - int head, back; - struct address_space *const mapping = dir->i_mapping; - struct page *candidate = ERR_PTR(-ENOENT); - - startprfx = endprfx = 0; - head = 0; - back = inode_datablocks(dir) - 1; - - while (head <= back) { - const int mid = head + (back - head) / 2; - struct page *page = read_mapping_page(mapping, mid, NULL); - - if (!IS_ERR(page)) { - struct erofs_dirent *de = kmap_atomic(page); - const int nameoff = nameoff_from_disk(de->nameoff, - EROFS_BLKSIZ); - const int ndirents = nameoff / sizeof(*de); - int diff; - unsigned int matched; - struct erofs_qstr dname; - - if (unlikely(!ndirents)) { - kunmap_atomic(de); - put_page(page); - errln("corrupted dir block %d @ nid %llu", - mid, EROFS_V(dir)->nid); - DBG_BUGON(1); - page = ERR_PTR(-EFSCORRUPTED); - goto out; - } - - matched = min(startprfx, endprfx); - - dname.name = (u8 *)de + nameoff; - if (ndirents == 1) - dname.end = (u8 *)de + EROFS_BLKSIZ; - else - dname.end = (u8 *)de + - nameoff_from_disk(de[1].nameoff, - EROFS_BLKSIZ); - - /* string comparison without already matched prefix */ - diff = dirnamecmp(name, &dname, &matched); - kunmap_atomic(de); - - if (unlikely(!diff)) { - *_ndirents = 0; - goto out; - } else if (diff > 0) { - head = mid + 1; - startprfx = matched; - - if (!IS_ERR(candidate)) - put_page(candidate); - candidate = page; - *_ndirents = ndirents; - } else { - put_page(page); - - back = mid - 1; - endprfx = matched; - } - continue; - } -out: /* free if the candidate is valid */ - if (!IS_ERR(candidate)) - put_page(candidate); - return page; - } - return candidate; -} - -int erofs_namei(struct inode *dir, - struct qstr *name, - erofs_nid_t *nid, unsigned int *d_type) -{ - int ndirents; - struct page *page; - void *data; - struct erofs_dirent *de; - struct erofs_qstr qn; - - if (unlikely(!dir->i_size)) - return -ENOENT; - - qn.name = name->name; - qn.end = name->name + name->len; - - ndirents = 0; - page = find_target_block_classic(dir, &qn, &ndirents); - - if (IS_ERR(page)) - return PTR_ERR(page); - - data = kmap_atomic(page); - /* the target page has been mapped */ - if (ndirents) - de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents); - else - de = (struct erofs_dirent *)data; - - if (!IS_ERR(de)) { - *nid = le64_to_cpu(de->nid); - *d_type = de->file_type; - } - - kunmap_atomic(data); - put_page(page); - - return PTR_ERR_OR_ZERO(de); -} - -/* NOTE: i_mutex is already held by vfs */ -static struct dentry *erofs_lookup(struct inode *dir, - struct dentry *dentry, - unsigned int flags) -{ - int err; - erofs_nid_t nid; - unsigned int d_type; - struct inode *inode; - - DBG_BUGON(!d_really_is_negative(dentry)); - /* dentry must be unhashed in lookup, no need to worry about */ - DBG_BUGON(!d_unhashed(dentry)); - - trace_erofs_lookup(dir, dentry, flags); - - /* file name exceeds fs limit */ - if (unlikely(dentry->d_name.len > EROFS_NAME_LEN)) - return ERR_PTR(-ENAMETOOLONG); - - /* false uninitialized warnings on gcc 4.8.x */ - err = erofs_namei(dir, &dentry->d_name, &nid, &d_type); - - if (err == -ENOENT) { - /* negative dentry */ - inode = NULL; - } else if (unlikely(err)) { - inode = ERR_PTR(err); - } else { - debugln("%s, %s (nid %llu) found, d_type %u", __func__, - dentry->d_name.name, nid, d_type); - inode = erofs_iget(dir->i_sb, nid, d_type == FT_DIR); - } - return d_splice_alias(inode, dentry); -} - -const struct inode_operations erofs_dir_iops = { - .lookup = erofs_lookup, - .getattr = erofs_getattr, -#ifdef CONFIG_EROFS_FS_XATTR - .listxattr = erofs_listxattr, -#endif - .get_acl = erofs_get_acl, -}; - diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c deleted file mode 100644 index 2da471010a86..000000000000 --- a/drivers/staging/erofs/super.c +++ /dev/null @@ -1,671 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/super.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include -#include -#include -#include -#include -#include "xattr.h" - -#define CREATE_TRACE_POINTS -#include - -static struct kmem_cache *erofs_inode_cachep __read_mostly; - -static void init_once(void *ptr) -{ - struct erofs_vnode *vi = ptr; - - inode_init_once(&vi->vfs_inode); -} - -static int __init erofs_init_inode_cache(void) -{ - erofs_inode_cachep = kmem_cache_create("erofs_inode", - sizeof(struct erofs_vnode), 0, - SLAB_RECLAIM_ACCOUNT, - init_once); - - return erofs_inode_cachep ? 0 : -ENOMEM; -} - -static void erofs_exit_inode_cache(void) -{ - kmem_cache_destroy(erofs_inode_cachep); -} - -static struct inode *alloc_inode(struct super_block *sb) -{ - struct erofs_vnode *vi = - kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL); - - if (!vi) - return NULL; - - /* zero out everything except vfs_inode */ - memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode)); - return &vi->vfs_inode; -} - -static void free_inode(struct inode *inode) -{ - struct erofs_vnode *vi = EROFS_V(inode); - - /* be careful RCU symlink path (see ext4_inode_info->i_data)! */ - if (is_inode_fast_symlink(inode)) - kfree(inode->i_link); - - kfree(vi->xattr_shared_xattrs); - - kmem_cache_free(erofs_inode_cachep, vi); -} - -static bool check_layout_compatibility(struct super_block *sb, - struct erofs_super_block *layout) -{ - const unsigned int requirements = le32_to_cpu(layout->requirements); - - EROFS_SB(sb)->requirements = requirements; - - /* check if current kernel meets all mandatory requirements */ - if (requirements & (~EROFS_ALL_REQUIREMENTS)) { - errln("unidentified requirements %x, please upgrade kernel version", - requirements & ~EROFS_ALL_REQUIREMENTS); - return false; - } - return true; -} - -static int superblock_read(struct super_block *sb) -{ - struct erofs_sb_info *sbi; - struct buffer_head *bh; - struct erofs_super_block *layout; - unsigned int blkszbits; - int ret; - - bh = sb_bread(sb, 0); - - if (!bh) { - errln("cannot read erofs superblock"); - return -EIO; - } - - sbi = EROFS_SB(sb); - layout = (struct erofs_super_block *)((u8 *)bh->b_data - + EROFS_SUPER_OFFSET); - - ret = -EINVAL; - if (le32_to_cpu(layout->magic) != EROFS_SUPER_MAGIC_V1) { - errln("cannot find valid erofs superblock"); - goto out; - } - - blkszbits = layout->blkszbits; - /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ - if (unlikely(blkszbits != LOG_BLOCK_SIZE)) { - errln("blksize %u isn't supported on this platform", - 1 << blkszbits); - goto out; - } - - if (!check_layout_compatibility(sb, layout)) - goto out; - - sbi->blocks = le32_to_cpu(layout->blocks); - sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr); -#ifdef CONFIG_EROFS_FS_XATTR - sbi->xattr_blkaddr = le32_to_cpu(layout->xattr_blkaddr); -#endif - sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1; - sbi->root_nid = le16_to_cpu(layout->root_nid); - sbi->inos = le64_to_cpu(layout->inos); - - sbi->build_time = le64_to_cpu(layout->build_time); - sbi->build_time_nsec = le32_to_cpu(layout->build_time_nsec); - - memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid)); - - ret = strscpy(sbi->volume_name, layout->volume_name, - sizeof(layout->volume_name)); - if (ret < 0) { /* -E2BIG */ - errln("bad volume name without NIL terminator"); - ret = -EFSCORRUPTED; - goto out; - } - ret = 0; -out: - brelse(bh); - return ret; -} - -#ifdef CONFIG_EROFS_FAULT_INJECTION -const char *erofs_fault_name[FAULT_MAX] = { - [FAULT_KMALLOC] = "kmalloc", - [FAULT_READ_IO] = "read IO error", -}; - -static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, - unsigned int rate) -{ - struct erofs_fault_info *ffi = &sbi->fault_info; - - if (rate) { - atomic_set(&ffi->inject_ops, 0); - ffi->inject_rate = rate; - ffi->inject_type = (1 << FAULT_MAX) - 1; - } else { - memset(ffi, 0, sizeof(struct erofs_fault_info)); - } - - set_opt(sbi, FAULT_INJECTION); -} - -static int erofs_build_fault_attr(struct erofs_sb_info *sbi, - substring_t *args) -{ - int rate = 0; - - if (args->from && match_int(args, &rate)) - return -EINVAL; - - __erofs_build_fault_attr(sbi, rate); - return 0; -} - -static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) -{ - return sbi->fault_info.inject_rate; -} -#else -static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, - unsigned int rate) -{ -} - -static int erofs_build_fault_attr(struct erofs_sb_info *sbi, - substring_t *args) -{ - infoln("fault_injection options not supported"); - return 0; -} - -static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) -{ - return 0; -} -#endif - -#ifdef CONFIG_EROFS_FS_ZIP -static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, - substring_t *args) -{ - const char *cs = match_strdup(args); - int err = 0; - - if (!cs) { - errln("Not enough memory to store cache strategy"); - return -ENOMEM; - } - - if (!strcmp(cs, "disabled")) { - sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED; - } else if (!strcmp(cs, "readahead")) { - sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD; - } else if (!strcmp(cs, "readaround")) { - sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; - } else { - errln("Unrecognized cache strategy \"%s\"", cs); - err = -EINVAL; - } - kfree(cs); - return err; -} -#else -static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, - substring_t *args) -{ - infoln("EROFS compression is disabled, so cache strategy is ignored"); - return 0; -} -#endif - -/* set up default EROFS parameters */ -static void default_options(struct erofs_sb_info *sbi) -{ -#ifdef CONFIG_EROFS_FS_ZIP - sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; - sbi->max_sync_decompress_pages = 3; -#endif -#ifdef CONFIG_EROFS_FS_XATTR - set_opt(sbi, XATTR_USER); -#endif -#ifdef CONFIG_EROFS_FS_POSIX_ACL - set_opt(sbi, POSIX_ACL); -#endif -} - -enum { - Opt_user_xattr, - Opt_nouser_xattr, - Opt_acl, - Opt_noacl, - Opt_fault_injection, - Opt_cache_strategy, - Opt_err -}; - -static match_table_t erofs_tokens = { - {Opt_user_xattr, "user_xattr"}, - {Opt_nouser_xattr, "nouser_xattr"}, - {Opt_acl, "acl"}, - {Opt_noacl, "noacl"}, - {Opt_fault_injection, "fault_injection=%u"}, - {Opt_cache_strategy, "cache_strategy=%s"}, - {Opt_err, NULL} -}; - -static int parse_options(struct super_block *sb, char *options) -{ - substring_t args[MAX_OPT_ARGS]; - char *p; - int err; - - if (!options) - return 0; - - while ((p = strsep(&options, ","))) { - int token; - - if (!*p) - continue; - - args[0].to = args[0].from = NULL; - token = match_token(p, erofs_tokens, args); - - switch (token) { -#ifdef CONFIG_EROFS_FS_XATTR - case Opt_user_xattr: - set_opt(EROFS_SB(sb), XATTR_USER); - break; - case Opt_nouser_xattr: - clear_opt(EROFS_SB(sb), XATTR_USER); - break; -#else - case Opt_user_xattr: - infoln("user_xattr options not supported"); - break; - case Opt_nouser_xattr: - infoln("nouser_xattr options not supported"); - break; -#endif -#ifdef CONFIG_EROFS_FS_POSIX_ACL - case Opt_acl: - set_opt(EROFS_SB(sb), POSIX_ACL); - break; - case Opt_noacl: - clear_opt(EROFS_SB(sb), POSIX_ACL); - break; -#else - case Opt_acl: - infoln("acl options not supported"); - break; - case Opt_noacl: - infoln("noacl options not supported"); - break; -#endif - case Opt_fault_injection: - err = erofs_build_fault_attr(EROFS_SB(sb), args); - if (err) - return err; - break; - case Opt_cache_strategy: - err = erofs_build_cache_strategy(EROFS_SB(sb), args); - if (err) - return err; - break; - default: - errln("Unrecognized mount option \"%s\" or missing value", p); - return -EINVAL; - } - } - return 0; -} - -#ifdef CONFIG_EROFS_FS_ZIP -static const struct address_space_operations managed_cache_aops; - -static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask) -{ - int ret = 1; /* 0 - busy */ - struct address_space *const mapping = page->mapping; - - DBG_BUGON(!PageLocked(page)); - DBG_BUGON(mapping->a_ops != &managed_cache_aops); - - if (PagePrivate(page)) - ret = erofs_try_to_free_cached_page(mapping, page); - - return ret; -} - -static void managed_cache_invalidatepage(struct page *page, - unsigned int offset, - unsigned int length) -{ - const unsigned int stop = length + offset; - - DBG_BUGON(!PageLocked(page)); - - /* Check for potential overflow in debug mode */ - DBG_BUGON(stop > PAGE_SIZE || stop < length); - - if (offset == 0 && stop == PAGE_SIZE) - while (!managed_cache_releasepage(page, GFP_NOFS)) - cond_resched(); -} - -static const struct address_space_operations managed_cache_aops = { - .releasepage = managed_cache_releasepage, - .invalidatepage = managed_cache_invalidatepage, -}; - -static int erofs_init_managed_cache(struct super_block *sb) -{ - struct erofs_sb_info *const sbi = EROFS_SB(sb); - struct inode *const inode = new_inode(sb); - - if (unlikely(!inode)) - return -ENOMEM; - - set_nlink(inode, 1); - inode->i_size = OFFSET_MAX; - - inode->i_mapping->a_ops = &managed_cache_aops; - mapping_set_gfp_mask(inode->i_mapping, - GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE); - sbi->managed_cache = inode; - return 0; -} -#else -static int erofs_init_managed_cache(struct super_block *sb) { return 0; } -#endif - -static int erofs_fill_super(struct super_block *sb, void *data, int silent) -{ - struct inode *inode; - struct erofs_sb_info *sbi; - int err; - - infoln("fill_super, device -> %s", sb->s_id); - infoln("options -> %s", (char *)data); - - sb->s_magic = EROFS_SUPER_MAGIC; - - if (unlikely(!sb_set_blocksize(sb, EROFS_BLKSIZ))) { - errln("failed to set erofs blksize"); - return -EINVAL; - } - - sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); - if (unlikely(!sbi)) - return -ENOMEM; - - sb->s_fs_info = sbi; - err = superblock_read(sb); - if (err) - return err; - - sb->s_flags |= SB_RDONLY | SB_NOATIME; - sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_time_gran = 1; - - sb->s_op = &erofs_sops; - -#ifdef CONFIG_EROFS_FS_XATTR - sb->s_xattr = erofs_xattr_handlers; -#endif - /* set erofs default mount options */ - default_options(sbi); - - err = parse_options(sb, data); - if (unlikely(err)) - return err; - - if (!silent) - infoln("root inode @ nid %llu", ROOT_NID(sbi)); - - if (test_opt(sbi, POSIX_ACL)) - sb->s_flags |= SB_POSIXACL; - else - sb->s_flags &= ~SB_POSIXACL; - -#ifdef CONFIG_EROFS_FS_ZIP - INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC); -#endif - - /* get the root inode */ - inode = erofs_iget(sb, ROOT_NID(sbi), true); - if (IS_ERR(inode)) - return PTR_ERR(inode); - - if (unlikely(!S_ISDIR(inode->i_mode))) { - errln("rootino(nid %llu) is not a directory(i_mode %o)", - ROOT_NID(sbi), inode->i_mode); - iput(inode); - return -EINVAL; - } - - sb->s_root = d_make_root(inode); - if (unlikely(!sb->s_root)) - return -ENOMEM; - - erofs_shrinker_register(sb); - /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */ - err = erofs_init_managed_cache(sb); - if (unlikely(err)) - return err; - - if (!silent) - infoln("mounted on %s with opts: %s.", sb->s_id, (char *)data); - return 0; -} - -static struct dentry *erofs_mount(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data) -{ - return mount_bdev(fs_type, flags, dev_name, data, erofs_fill_super); -} - -/* - * could be triggered after deactivate_locked_super() - * is called, thus including umount and failed to initialize. - */ -static void erofs_kill_sb(struct super_block *sb) -{ - struct erofs_sb_info *sbi; - - WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC); - infoln("unmounting for %s", sb->s_id); - - kill_block_super(sb); - - sbi = EROFS_SB(sb); - if (!sbi) - return; - kfree(sbi); - sb->s_fs_info = NULL; -} - -/* called when ->s_root is non-NULL */ -static void erofs_put_super(struct super_block *sb) -{ - struct erofs_sb_info *const sbi = EROFS_SB(sb); - - DBG_BUGON(!sbi); - - erofs_shrinker_unregister(sb); -#ifdef CONFIG_EROFS_FS_ZIP - iput(sbi->managed_cache); - sbi->managed_cache = NULL; -#endif -} - -static struct file_system_type erofs_fs_type = { - .owner = THIS_MODULE, - .name = "erofs", - .mount = erofs_mount, - .kill_sb = erofs_kill_sb, - .fs_flags = FS_REQUIRES_DEV, -}; -MODULE_ALIAS_FS("erofs"); - -static int __init erofs_module_init(void) -{ - int err; - - erofs_check_ondisk_layout_definitions(); - infoln("initializing erofs " EROFS_VERSION); - - err = erofs_init_inode_cache(); - if (err) - goto icache_err; - - err = erofs_init_shrinker(); - if (err) - goto shrinker_err; - - err = z_erofs_init_zip_subsystem(); - if (err) - goto zip_err; - - err = register_filesystem(&erofs_fs_type); - if (err) - goto fs_err; - - infoln("successfully to initialize erofs"); - return 0; - -fs_err: - z_erofs_exit_zip_subsystem(); -zip_err: - erofs_exit_shrinker(); -shrinker_err: - erofs_exit_inode_cache(); -icache_err: - return err; -} - -static void __exit erofs_module_exit(void) -{ - unregister_filesystem(&erofs_fs_type); - z_erofs_exit_zip_subsystem(); - erofs_exit_shrinker(); - erofs_exit_inode_cache(); - infoln("successfully finalize erofs"); -} - -/* get filesystem statistics */ -static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) -{ - struct super_block *sb = dentry->d_sb; - struct erofs_sb_info *sbi = EROFS_SB(sb); - u64 id = huge_encode_dev(sb->s_bdev->bd_dev); - - buf->f_type = sb->s_magic; - buf->f_bsize = EROFS_BLKSIZ; - buf->f_blocks = sbi->blocks; - buf->f_bfree = buf->f_bavail = 0; - - buf->f_files = ULLONG_MAX; - buf->f_ffree = ULLONG_MAX - sbi->inos; - - buf->f_namelen = EROFS_NAME_LEN; - - buf->f_fsid.val[0] = (u32)id; - buf->f_fsid.val[1] = (u32)(id >> 32); - return 0; -} - -static int erofs_show_options(struct seq_file *seq, struct dentry *root) -{ - struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb); - -#ifdef CONFIG_EROFS_FS_XATTR - if (test_opt(sbi, XATTR_USER)) - seq_puts(seq, ",user_xattr"); - else - seq_puts(seq, ",nouser_xattr"); -#endif -#ifdef CONFIG_EROFS_FS_POSIX_ACL - if (test_opt(sbi, POSIX_ACL)) - seq_puts(seq, ",acl"); - else - seq_puts(seq, ",noacl"); -#endif - if (test_opt(sbi, FAULT_INJECTION)) - seq_printf(seq, ",fault_injection=%u", - erofs_get_fault_rate(sbi)); -#ifdef CONFIG_EROFS_FS_ZIP - if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) { - seq_puts(seq, ",cache_strategy=disabled"); - } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) { - seq_puts(seq, ",cache_strategy=readahead"); - } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) { - seq_puts(seq, ",cache_strategy=readaround"); - } else { - seq_puts(seq, ",cache_strategy=(unknown)"); - DBG_BUGON(1); - } -#endif - return 0; -} - -static int erofs_remount(struct super_block *sb, int *flags, char *data) -{ - struct erofs_sb_info *sbi = EROFS_SB(sb); - unsigned int org_mnt_opt = sbi->mount_opt; - unsigned int org_inject_rate = erofs_get_fault_rate(sbi); - int err; - - DBG_BUGON(!sb_rdonly(sb)); - err = parse_options(sb, data); - if (err) - goto out; - - if (test_opt(sbi, POSIX_ACL)) - sb->s_flags |= SB_POSIXACL; - else - sb->s_flags &= ~SB_POSIXACL; - - *flags |= SB_RDONLY; - return 0; -out: - __erofs_build_fault_attr(sbi, org_inject_rate); - sbi->mount_opt = org_mnt_opt; - - return err; -} - -const struct super_operations erofs_sops = { - .put_super = erofs_put_super, - .alloc_inode = alloc_inode, - .free_inode = free_inode, - .statfs = erofs_statfs, - .show_options = erofs_show_options, - .remount_fs = erofs_remount, -}; - -module_init(erofs_module_init); -module_exit(erofs_module_exit); - -MODULE_DESCRIPTION("Enhanced ROM File System"); -MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc."); -MODULE_LICENSE("GPL"); - diff --git a/drivers/staging/erofs/tagptr.h b/drivers/staging/erofs/tagptr.h deleted file mode 100644 index a72897c86744..000000000000 --- a/drivers/staging/erofs/tagptr.h +++ /dev/null @@ -1,110 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * A tagged pointer implementation - * - * Copyright (C) 2018 Gao Xiang - */ -#ifndef __EROFS_FS_TAGPTR_H -#define __EROFS_FS_TAGPTR_H - -#include -#include - -/* - * the name of tagged pointer types are tagptr{1, 2, 3...}_t - * avoid directly using the internal structs __tagptr{1, 2, 3...} - */ -#define __MAKE_TAGPTR(n) \ -typedef struct __tagptr##n { \ - uintptr_t v; \ -} tagptr##n##_t; - -__MAKE_TAGPTR(1) -__MAKE_TAGPTR(2) -__MAKE_TAGPTR(3) -__MAKE_TAGPTR(4) - -#undef __MAKE_TAGPTR - -extern void __compiletime_error("bad tagptr tags") - __bad_tagptr_tags(void); - -extern void __compiletime_error("bad tagptr type") - __bad_tagptr_type(void); - -/* fix the broken usage of "#define tagptr2_t tagptr3_t" by users */ -#define __tagptr_mask_1(ptr, n) \ - __builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \ - (1UL << (n)) - 1 : - -#define __tagptr_mask(ptr) (\ - __tagptr_mask_1(ptr, 1) ( \ - __tagptr_mask_1(ptr, 2) ( \ - __tagptr_mask_1(ptr, 3) ( \ - __tagptr_mask_1(ptr, 4) ( \ - __bad_tagptr_type(), 0))))) - -/* generate a tagged pointer from a raw value */ -#define tagptr_init(type, val) \ - ((typeof(type)){ .v = (uintptr_t)(val) }) - -/* - * directly cast a tagged pointer to the native pointer type, which - * could be used for backward compatibility of existing code. - */ -#define tagptr_cast_ptr(tptr) ((void *)(tptr).v) - -/* encode tagged pointers */ -#define tagptr_fold(type, ptr, _tags) ({ \ - const typeof(_tags) tags = (_tags); \ - if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \ - __bad_tagptr_tags(); \ -tagptr_init(type, (uintptr_t)(ptr) | tags); }) - -/* decode tagged pointers */ -#define tagptr_unfold_ptr(tptr) \ - ((void *)((tptr).v & ~__tagptr_mask(tptr))) - -#define tagptr_unfold_tags(tptr) \ - ((tptr).v & __tagptr_mask(tptr)) - -/* operations for the tagger pointer */ -#define tagptr_eq(_tptr1, _tptr2) ({ \ - typeof(_tptr1) tptr1 = (_tptr1); \ - typeof(_tptr2) tptr2 = (_tptr2); \ - (void)(&tptr1 == &tptr2); \ -(tptr1).v == (tptr2).v; }) - -/* lock-free CAS operation */ -#define tagptr_cmpxchg(_ptptr, _o, _n) ({ \ - typeof(_ptptr) ptptr = (_ptptr); \ - typeof(_o) o = (_o); \ - typeof(_n) n = (_n); \ - (void)(&o == &n); \ - (void)(&o == ptptr); \ -tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); }) - -/* wrap WRITE_ONCE if atomic update is needed */ -#define tagptr_replace_tags(_ptptr, tags) ({ \ - typeof(_ptptr) ptptr = (_ptptr); \ - *ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \ -*ptptr; }) - -#define tagptr_set_tags(_ptptr, _tags) ({ \ - typeof(_ptptr) ptptr = (_ptptr); \ - const typeof(_tags) tags = (_tags); \ - if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \ - __bad_tagptr_tags(); \ - ptptr->v |= tags; \ -*ptptr; }) - -#define tagptr_clear_tags(_ptptr, _tags) ({ \ - typeof(_ptptr) ptptr = (_ptptr); \ - const typeof(_tags) tags = (_tags); \ - if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \ - __bad_tagptr_tags(); \ - ptptr->v &= ~tags; \ -*ptptr; }) - -#endif /* __EROFS_FS_TAGPTR_H */ - diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c deleted file mode 100644 index 814c2ee037ae..000000000000 --- a/drivers/staging/erofs/utils.c +++ /dev/null @@ -1,335 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/utils.c - * - * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "internal.h" -#include - -struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) -{ - struct page *page; - - if (!list_empty(pool)) { - page = lru_to_page(pool); - DBG_BUGON(page_ref_count(page) != 1); - list_del(&page->lru); - } else { - page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0); - } - return page; -} - -#if (EROFS_PCPUBUF_NR_PAGES > 0) -static struct { - u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES]; -} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS]; - -void *erofs_get_pcpubuf(unsigned int pagenr) -{ - preempt_disable(); - return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE]; -} -#endif - -#ifdef CONFIG_EROFS_FS_ZIP -/* global shrink count (for all mounted EROFS instances) */ -static atomic_long_t erofs_global_shrink_cnt; - -#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount) -#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount) - -static int erofs_workgroup_get(struct erofs_workgroup *grp) -{ - int o; - -repeat: - o = erofs_wait_on_workgroup_freezed(grp); - if (unlikely(o <= 0)) - return -1; - - if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o)) - goto repeat; - - /* decrease refcount paired by erofs_workgroup_put */ - if (unlikely(o == 1)) - atomic_long_dec(&erofs_global_shrink_cnt); - return 0; -} - -struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, - pgoff_t index, bool *tag) -{ - struct erofs_sb_info *sbi = EROFS_SB(sb); - struct erofs_workgroup *grp; - -repeat: - rcu_read_lock(); - grp = radix_tree_lookup(&sbi->workstn_tree, index); - if (grp) { - *tag = xa_pointer_tag(grp); - grp = xa_untag_pointer(grp); - - if (erofs_workgroup_get(grp)) { - /* prefer to relax rcu read side */ - rcu_read_unlock(); - goto repeat; - } - - DBG_BUGON(index != grp->index); - } - rcu_read_unlock(); - return grp; -} - -int erofs_register_workgroup(struct super_block *sb, - struct erofs_workgroup *grp, - bool tag) -{ - struct erofs_sb_info *sbi; - int err; - - /* grp shouldn't be broken or used before */ - if (unlikely(atomic_read(&grp->refcount) != 1)) { - DBG_BUGON(1); - return -EINVAL; - } - - err = radix_tree_preload(GFP_NOFS); - if (err) - return err; - - sbi = EROFS_SB(sb); - xa_lock(&sbi->workstn_tree); - - grp = xa_tag_pointer(grp, tag); - - /* - * Bump up reference count before making this workgroup - * visible to other users in order to avoid potential UAF - * without serialized by workstn_lock. - */ - __erofs_workgroup_get(grp); - - err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp); - if (unlikely(err)) - /* - * it's safe to decrease since the workgroup isn't visible - * and refcount >= 2 (cannot be freezed). - */ - __erofs_workgroup_put(grp); - - xa_unlock(&sbi->workstn_tree); - radix_tree_preload_end(); - return err; -} - -static void __erofs_workgroup_free(struct erofs_workgroup *grp) -{ - atomic_long_dec(&erofs_global_shrink_cnt); - erofs_workgroup_free_rcu(grp); -} - -int erofs_workgroup_put(struct erofs_workgroup *grp) -{ - int count = atomic_dec_return(&grp->refcount); - - if (count == 1) - atomic_long_inc(&erofs_global_shrink_cnt); - else if (!count) - __erofs_workgroup_free(grp); - return count; -} - -static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) -{ - erofs_workgroup_unfreeze(grp, 0); - __erofs_workgroup_free(grp); -} - -static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, - struct erofs_workgroup *grp, - bool cleanup) -{ - /* - * If managed cache is on, refcount of workgroups - * themselves could be < 0 (freezed). In other words, - * there is no guarantee that all refcounts > 0. - */ - if (!erofs_workgroup_try_to_freeze(grp, 1)) - return false; - - /* - * Note that all cached pages should be unattached - * before deleted from the radix tree. Otherwise some - * cached pages could be still attached to the orphan - * old workgroup when the new one is available in the tree. - */ - if (erofs_try_to_free_all_cached_pages(sbi, grp)) { - erofs_workgroup_unfreeze(grp, 1); - return false; - } - - /* - * It's impossible to fail after the workgroup is freezed, - * however in order to avoid some race conditions, add a - * DBG_BUGON to observe this in advance. - */ - DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree, - grp->index)) != grp); - - /* - * If managed cache is on, last refcount should indicate - * the related workstation. - */ - erofs_workgroup_unfreeze_final(grp); - return true; -} - -static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, - unsigned long nr_shrink, - bool cleanup) -{ - pgoff_t first_index = 0; - void *batch[PAGEVEC_SIZE]; - unsigned int freed = 0; - - int i, found; -repeat: - xa_lock(&sbi->workstn_tree); - - found = radix_tree_gang_lookup(&sbi->workstn_tree, - batch, first_index, PAGEVEC_SIZE); - - for (i = 0; i < found; ++i) { - struct erofs_workgroup *grp = xa_untag_pointer(batch[i]); - - first_index = grp->index + 1; - - /* try to shrink each valid workgroup */ - if (!erofs_try_to_release_workgroup(sbi, grp, cleanup)) - continue; - - ++freed; - if (unlikely(!--nr_shrink)) - break; - } - xa_unlock(&sbi->workstn_tree); - - if (i && nr_shrink) - goto repeat; - return freed; -} - -/* protected by 'erofs_sb_list_lock' */ -static unsigned int shrinker_run_no; - -/* protects the mounted 'erofs_sb_list' */ -static DEFINE_SPINLOCK(erofs_sb_list_lock); -static LIST_HEAD(erofs_sb_list); - -void erofs_shrinker_register(struct super_block *sb) -{ - struct erofs_sb_info *sbi = EROFS_SB(sb); - - mutex_init(&sbi->umount_mutex); - - spin_lock(&erofs_sb_list_lock); - list_add(&sbi->list, &erofs_sb_list); - spin_unlock(&erofs_sb_list_lock); -} - -void erofs_shrinker_unregister(struct super_block *sb) -{ - struct erofs_sb_info *const sbi = EROFS_SB(sb); - - mutex_lock(&sbi->umount_mutex); - erofs_shrink_workstation(sbi, ~0UL, true); - - spin_lock(&erofs_sb_list_lock); - list_del(&sbi->list); - spin_unlock(&erofs_sb_list_lock); - mutex_unlock(&sbi->umount_mutex); -} - -static unsigned long erofs_shrink_count(struct shrinker *shrink, - struct shrink_control *sc) -{ - return atomic_long_read(&erofs_global_shrink_cnt); -} - -static unsigned long erofs_shrink_scan(struct shrinker *shrink, - struct shrink_control *sc) -{ - struct erofs_sb_info *sbi; - struct list_head *p; - - unsigned long nr = sc->nr_to_scan; - unsigned int run_no; - unsigned long freed = 0; - - spin_lock(&erofs_sb_list_lock); - do { - run_no = ++shrinker_run_no; - } while (run_no == 0); - - /* Iterate over all mounted superblocks and try to shrink them */ - p = erofs_sb_list.next; - while (p != &erofs_sb_list) { - sbi = list_entry(p, struct erofs_sb_info, list); - - /* - * We move the ones we do to the end of the list, so we stop - * when we see one we have already done. - */ - if (sbi->shrinker_run_no == run_no) - break; - - if (!mutex_trylock(&sbi->umount_mutex)) { - p = p->next; - continue; - } - - spin_unlock(&erofs_sb_list_lock); - sbi->shrinker_run_no = run_no; - - freed += erofs_shrink_workstation(sbi, nr, false); - - spin_lock(&erofs_sb_list_lock); - /* Get the next list element before we move this one */ - p = p->next; - - /* - * Move this one to the end of the list to provide some - * fairness. - */ - list_move_tail(&sbi->list, &erofs_sb_list); - mutex_unlock(&sbi->umount_mutex); - - if (freed >= nr) - break; - } - spin_unlock(&erofs_sb_list_lock); - return freed; -} - -static struct shrinker erofs_shrinker_info = { - .scan_objects = erofs_shrink_scan, - .count_objects = erofs_shrink_count, - .seeks = DEFAULT_SEEKS, -}; - -int __init erofs_init_shrinker(void) -{ - return register_shrinker(&erofs_shrinker_info); -} - -void erofs_exit_shrinker(void) -{ - unregister_shrinker(&erofs_shrinker_info); -} -#endif /* !CONFIG_EROFS_FS_ZIP */ - diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c deleted file mode 100644 index e7e5840e3f9d..000000000000 --- a/drivers/staging/erofs/xattr.c +++ /dev/null @@ -1,705 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/xattr.c - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include -#include "xattr.h" - -struct xattr_iter { - struct super_block *sb; - struct page *page; - void *kaddr; - - erofs_blk_t blkaddr; - unsigned int ofs; -}; - -static inline void xattr_iter_end(struct xattr_iter *it, bool atomic) -{ - /* the only user of kunmap() is 'init_inode_xattrs' */ - if (unlikely(!atomic)) - kunmap(it->page); - else - kunmap_atomic(it->kaddr); - - unlock_page(it->page); - put_page(it->page); -} - -static inline void xattr_iter_end_final(struct xattr_iter *it) -{ - if (!it->page) - return; - - xattr_iter_end(it, true); -} - -static int init_inode_xattrs(struct inode *inode) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct xattr_iter it; - unsigned int i; - struct erofs_xattr_ibody_header *ih; - struct super_block *sb; - struct erofs_sb_info *sbi; - bool atomic_map; - int ret = 0; - - /* the most case is that xattrs of this inode are initialized. */ - if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) - return 0; - - if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE)) - return -ERESTARTSYS; - - /* someone has initialized xattrs for us? */ - if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) - goto out_unlock; - - /* - * bypass all xattr operations if ->xattr_isize is not greater than - * sizeof(struct erofs_xattr_ibody_header), in detail: - * 1) it is not enough to contain erofs_xattr_ibody_header then - * ->xattr_isize should be 0 (it means no xattr); - * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk - * undefined right now (maybe use later with some new sb feature). - */ - if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) { - errln("xattr_isize %d of nid %llu is not supported yet", - vi->xattr_isize, vi->nid); - ret = -EOPNOTSUPP; - goto out_unlock; - } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) { - if (unlikely(vi->xattr_isize)) { - errln("bogus xattr ibody @ nid %llu", vi->nid); - DBG_BUGON(1); - ret = -EFSCORRUPTED; - goto out_unlock; /* xattr ondisk layout error */ - } - ret = -ENOATTR; - goto out_unlock; - } - - sb = inode->i_sb; - sbi = EROFS_SB(sb); - it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize); - it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize); - - it.page = erofs_get_inline_page(inode, it.blkaddr); - if (IS_ERR(it.page)) { - ret = PTR_ERR(it.page); - goto out_unlock; - } - - /* read in shared xattr array (non-atomic, see kmalloc below) */ - it.kaddr = kmap(it.page); - atomic_map = false; - - ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs); - - vi->xattr_shared_count = ih->h_shared_count; - vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count, - sizeof(uint), GFP_KERNEL); - if (!vi->xattr_shared_xattrs) { - xattr_iter_end(&it, atomic_map); - ret = -ENOMEM; - goto out_unlock; - } - - /* let's skip ibody header */ - it.ofs += sizeof(struct erofs_xattr_ibody_header); - - for (i = 0; i < vi->xattr_shared_count; ++i) { - if (unlikely(it.ofs >= EROFS_BLKSIZ)) { - /* cannot be unaligned */ - DBG_BUGON(it.ofs != EROFS_BLKSIZ); - xattr_iter_end(&it, atomic_map); - - it.page = erofs_get_meta_page(sb, ++it.blkaddr, - S_ISDIR(inode->i_mode)); - if (IS_ERR(it.page)) { - kfree(vi->xattr_shared_xattrs); - vi->xattr_shared_xattrs = NULL; - ret = PTR_ERR(it.page); - goto out_unlock; - } - - it.kaddr = kmap_atomic(it.page); - atomic_map = true; - it.ofs = 0; - } - vi->xattr_shared_xattrs[i] = - le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs)); - it.ofs += sizeof(__le32); - } - xattr_iter_end(&it, atomic_map); - - set_bit(EROFS_V_EA_INITED_BIT, &vi->flags); - -out_unlock: - clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags); - return ret; -} - -/* - * the general idea for these return values is - * if 0 is returned, go on processing the current xattr; - * 1 (> 0) is returned, skip this round to process the next xattr; - * -err (< 0) is returned, an error (maybe ENOXATTR) occurred - * and need to be handled - */ -struct xattr_iter_handlers { - int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry); - int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf, - unsigned int len); - int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz); - void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf, - unsigned int len); -}; - -static inline int xattr_iter_fixup(struct xattr_iter *it) -{ - if (it->ofs < EROFS_BLKSIZ) - return 0; - - xattr_iter_end(it, true); - - it->blkaddr += erofs_blknr(it->ofs); - - it->page = erofs_get_meta_page(it->sb, it->blkaddr, false); - if (IS_ERR(it->page)) { - int err = PTR_ERR(it->page); - - it->page = NULL; - return err; - } - - it->kaddr = kmap_atomic(it->page); - it->ofs = erofs_blkoff(it->ofs); - return 0; -} - -static int inline_xattr_iter_begin(struct xattr_iter *it, - struct inode *inode) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb); - unsigned int xattr_header_sz, inline_xattr_ofs; - - xattr_header_sz = inlinexattr_header_size(inode); - if (unlikely(xattr_header_sz >= vi->xattr_isize)) { - DBG_BUGON(xattr_header_sz > vi->xattr_isize); - return -ENOATTR; - } - - inline_xattr_ofs = vi->inode_isize + xattr_header_sz; - - it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs); - it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs); - - it->page = erofs_get_inline_page(inode, it->blkaddr); - if (IS_ERR(it->page)) - return PTR_ERR(it->page); - - it->kaddr = kmap_atomic(it->page); - return vi->xattr_isize - xattr_header_sz; -} - -/* - * Regardless of success or failure, `xattr_foreach' will end up with - * `ofs' pointing to the next xattr item rather than an arbitrary position. - */ -static int xattr_foreach(struct xattr_iter *it, - const struct xattr_iter_handlers *op, - unsigned int *tlimit) -{ - struct erofs_xattr_entry entry; - unsigned int value_sz, processed, slice; - int err; - - /* 0. fixup blkaddr, ofs, ipage */ - err = xattr_iter_fixup(it); - if (err) - return err; - - /* - * 1. read xattr entry to the memory, - * since we do EROFS_XATTR_ALIGN - * therefore entry should be in the page - */ - entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs); - if (tlimit) { - unsigned int entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry); - - /* xattr on-disk corruption: xattr entry beyond xattr_isize */ - if (unlikely(*tlimit < entry_sz)) { - DBG_BUGON(1); - return -EFSCORRUPTED; - } - *tlimit -= entry_sz; - } - - it->ofs += sizeof(struct erofs_xattr_entry); - value_sz = le16_to_cpu(entry.e_value_size); - - /* handle entry */ - err = op->entry(it, &entry); - if (err) { - it->ofs += entry.e_name_len + value_sz; - goto out; - } - - /* 2. handle xattr name (ofs will finally be at the end of name) */ - processed = 0; - - while (processed < entry.e_name_len) { - if (it->ofs >= EROFS_BLKSIZ) { - DBG_BUGON(it->ofs > EROFS_BLKSIZ); - - err = xattr_iter_fixup(it); - if (err) - goto out; - it->ofs = 0; - } - - slice = min_t(unsigned int, PAGE_SIZE - it->ofs, - entry.e_name_len - processed); - - /* handle name */ - err = op->name(it, processed, it->kaddr + it->ofs, slice); - if (err) { - it->ofs += entry.e_name_len - processed + value_sz; - goto out; - } - - it->ofs += slice; - processed += slice; - } - - /* 3. handle xattr value */ - processed = 0; - - if (op->alloc_buffer) { - err = op->alloc_buffer(it, value_sz); - if (err) { - it->ofs += value_sz; - goto out; - } - } - - while (processed < value_sz) { - if (it->ofs >= EROFS_BLKSIZ) { - DBG_BUGON(it->ofs > EROFS_BLKSIZ); - - err = xattr_iter_fixup(it); - if (err) - goto out; - it->ofs = 0; - } - - slice = min_t(unsigned int, PAGE_SIZE - it->ofs, - value_sz - processed); - op->value(it, processed, it->kaddr + it->ofs, slice); - it->ofs += slice; - processed += slice; - } - -out: - /* xattrs should be 4-byte aligned (on-disk constraint) */ - it->ofs = EROFS_XATTR_ALIGN(it->ofs); - return err < 0 ? err : 0; -} - -struct getxattr_iter { - struct xattr_iter it; - - char *buffer; - int buffer_size, index; - struct qstr name; -}; - -static int xattr_entrymatch(struct xattr_iter *_it, - struct erofs_xattr_entry *entry) -{ - struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); - - return (it->index != entry->e_name_index || - it->name.len != entry->e_name_len) ? -ENOATTR : 0; -} - -static int xattr_namematch(struct xattr_iter *_it, - unsigned int processed, char *buf, unsigned int len) -{ - struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); - - return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0; -} - -static int xattr_checkbuffer(struct xattr_iter *_it, - unsigned int value_sz) -{ - struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); - int err = it->buffer_size < value_sz ? -ERANGE : 0; - - it->buffer_size = value_sz; - return !it->buffer ? 1 : err; -} - -static void xattr_copyvalue(struct xattr_iter *_it, - unsigned int processed, - char *buf, unsigned int len) -{ - struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); - - memcpy(it->buffer + processed, buf, len); -} - -static const struct xattr_iter_handlers find_xattr_handlers = { - .entry = xattr_entrymatch, - .name = xattr_namematch, - .alloc_buffer = xattr_checkbuffer, - .value = xattr_copyvalue -}; - -static int inline_getxattr(struct inode *inode, struct getxattr_iter *it) -{ - int ret; - unsigned int remaining; - - ret = inline_xattr_iter_begin(&it->it, inode); - if (ret < 0) - return ret; - - remaining = ret; - while (remaining) { - ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining); - if (ret != -ENOATTR) - break; - } - xattr_iter_end_final(&it->it); - - return ret ? ret : it->buffer_size; -} - -static int shared_getxattr(struct inode *inode, struct getxattr_iter *it) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct super_block *const sb = inode->i_sb; - struct erofs_sb_info *const sbi = EROFS_SB(sb); - unsigned int i; - int ret = -ENOATTR; - - for (i = 0; i < vi->xattr_shared_count; ++i) { - erofs_blk_t blkaddr = - xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); - - it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); - - if (!i || blkaddr != it->it.blkaddr) { - if (i) - xattr_iter_end(&it->it, true); - - it->it.page = erofs_get_meta_page(sb, blkaddr, false); - if (IS_ERR(it->it.page)) - return PTR_ERR(it->it.page); - - it->it.kaddr = kmap_atomic(it->it.page); - it->it.blkaddr = blkaddr; - } - - ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL); - if (ret != -ENOATTR) - break; - } - if (vi->xattr_shared_count) - xattr_iter_end_final(&it->it); - - return ret ? ret : it->buffer_size; -} - -static bool erofs_xattr_user_list(struct dentry *dentry) -{ - return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER); -} - -static bool erofs_xattr_trusted_list(struct dentry *dentry) -{ - return capable(CAP_SYS_ADMIN); -} - -int erofs_getxattr(struct inode *inode, int index, - const char *name, - void *buffer, size_t buffer_size) -{ - int ret; - struct getxattr_iter it; - - if (unlikely(!name)) - return -EINVAL; - - ret = init_inode_xattrs(inode); - if (ret) - return ret; - - it.index = index; - - it.name.len = strlen(name); - if (it.name.len > EROFS_NAME_LEN) - return -ERANGE; - it.name.name = name; - - it.buffer = buffer; - it.buffer_size = buffer_size; - - it.it.sb = inode->i_sb; - ret = inline_getxattr(inode, &it); - if (ret == -ENOATTR) - ret = shared_getxattr(inode, &it); - return ret; -} - -static int erofs_xattr_generic_get(const struct xattr_handler *handler, - struct dentry *unused, struct inode *inode, - const char *name, void *buffer, size_t size) -{ - struct erofs_sb_info *const sbi = EROFS_I_SB(inode); - - switch (handler->flags) { - case EROFS_XATTR_INDEX_USER: - if (!test_opt(sbi, XATTR_USER)) - return -EOPNOTSUPP; - break; - case EROFS_XATTR_INDEX_TRUSTED: - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - break; - case EROFS_XATTR_INDEX_SECURITY: - break; - default: - return -EINVAL; - } - - return erofs_getxattr(inode, handler->flags, name, buffer, size); -} - -const struct xattr_handler erofs_xattr_user_handler = { - .prefix = XATTR_USER_PREFIX, - .flags = EROFS_XATTR_INDEX_USER, - .list = erofs_xattr_user_list, - .get = erofs_xattr_generic_get, -}; - -const struct xattr_handler erofs_xattr_trusted_handler = { - .prefix = XATTR_TRUSTED_PREFIX, - .flags = EROFS_XATTR_INDEX_TRUSTED, - .list = erofs_xattr_trusted_list, - .get = erofs_xattr_generic_get, -}; - -#ifdef CONFIG_EROFS_FS_SECURITY -const struct xattr_handler __maybe_unused erofs_xattr_security_handler = { - .prefix = XATTR_SECURITY_PREFIX, - .flags = EROFS_XATTR_INDEX_SECURITY, - .get = erofs_xattr_generic_get, -}; -#endif - -const struct xattr_handler *erofs_xattr_handlers[] = { - &erofs_xattr_user_handler, -#ifdef CONFIG_EROFS_FS_POSIX_ACL - &posix_acl_access_xattr_handler, - &posix_acl_default_xattr_handler, -#endif - &erofs_xattr_trusted_handler, -#ifdef CONFIG_EROFS_FS_SECURITY - &erofs_xattr_security_handler, -#endif - NULL, -}; - -struct listxattr_iter { - struct xattr_iter it; - - struct dentry *dentry; - char *buffer; - int buffer_size, buffer_ofs; -}; - -static int xattr_entrylist(struct xattr_iter *_it, - struct erofs_xattr_entry *entry) -{ - struct listxattr_iter *it = - container_of(_it, struct listxattr_iter, it); - unsigned int prefix_len; - const char *prefix; - - const struct xattr_handler *h = - erofs_xattr_handler(entry->e_name_index); - - if (!h || (h->list && !h->list(it->dentry))) - return 1; - - prefix = xattr_prefix(h); - prefix_len = strlen(prefix); - - if (!it->buffer) { - it->buffer_ofs += prefix_len + entry->e_name_len + 1; - return 1; - } - - if (it->buffer_ofs + prefix_len - + entry->e_name_len + 1 > it->buffer_size) - return -ERANGE; - - memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len); - it->buffer_ofs += prefix_len; - return 0; -} - -static int xattr_namelist(struct xattr_iter *_it, - unsigned int processed, char *buf, unsigned int len) -{ - struct listxattr_iter *it = - container_of(_it, struct listxattr_iter, it); - - memcpy(it->buffer + it->buffer_ofs, buf, len); - it->buffer_ofs += len; - return 0; -} - -static int xattr_skipvalue(struct xattr_iter *_it, - unsigned int value_sz) -{ - struct listxattr_iter *it = - container_of(_it, struct listxattr_iter, it); - - it->buffer[it->buffer_ofs++] = '\0'; - return 1; -} - -static const struct xattr_iter_handlers list_xattr_handlers = { - .entry = xattr_entrylist, - .name = xattr_namelist, - .alloc_buffer = xattr_skipvalue, - .value = NULL -}; - -static int inline_listxattr(struct listxattr_iter *it) -{ - int ret; - unsigned int remaining; - - ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry)); - if (ret < 0) - return ret; - - remaining = ret; - while (remaining) { - ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining); - if (ret) - break; - } - xattr_iter_end_final(&it->it); - return ret ? ret : it->buffer_ofs; -} - -static int shared_listxattr(struct listxattr_iter *it) -{ - struct inode *const inode = d_inode(it->dentry); - struct erofs_vnode *const vi = EROFS_V(inode); - struct super_block *const sb = inode->i_sb; - struct erofs_sb_info *const sbi = EROFS_SB(sb); - unsigned int i; - int ret = 0; - - for (i = 0; i < vi->xattr_shared_count; ++i) { - erofs_blk_t blkaddr = - xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); - - it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); - if (!i || blkaddr != it->it.blkaddr) { - if (i) - xattr_iter_end(&it->it, true); - - it->it.page = erofs_get_meta_page(sb, blkaddr, false); - if (IS_ERR(it->it.page)) - return PTR_ERR(it->it.page); - - it->it.kaddr = kmap_atomic(it->it.page); - it->it.blkaddr = blkaddr; - } - - ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL); - if (ret) - break; - } - if (vi->xattr_shared_count) - xattr_iter_end_final(&it->it); - - return ret ? ret : it->buffer_ofs; -} - -ssize_t erofs_listxattr(struct dentry *dentry, - char *buffer, size_t buffer_size) -{ - int ret; - struct listxattr_iter it; - - ret = init_inode_xattrs(d_inode(dentry)); - if (ret) - return ret; - - it.dentry = dentry; - it.buffer = buffer; - it.buffer_size = buffer_size; - it.buffer_ofs = 0; - - it.it.sb = dentry->d_sb; - - ret = inline_listxattr(&it); - if (ret < 0 && ret != -ENOATTR) - return ret; - return shared_listxattr(&it); -} - -#ifdef CONFIG_EROFS_FS_POSIX_ACL -struct posix_acl *erofs_get_acl(struct inode *inode, int type) -{ - struct posix_acl *acl; - int prefix, rc; - char *value = NULL; - - switch (type) { - case ACL_TYPE_ACCESS: - prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS; - break; - case ACL_TYPE_DEFAULT: - prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT; - break; - default: - return ERR_PTR(-EINVAL); - } - - rc = erofs_getxattr(inode, prefix, "", NULL, 0); - if (rc > 0) { - value = kmalloc(rc, GFP_KERNEL); - if (!value) - return ERR_PTR(-ENOMEM); - rc = erofs_getxattr(inode, prefix, "", value, rc); - } - - if (rc == -ENOATTR) - acl = NULL; - else if (rc < 0) - acl = ERR_PTR(rc); - else - acl = posix_acl_from_xattr(&init_user_ns, value, rc); - kfree(value); - return acl; -} -#endif - diff --git a/drivers/staging/erofs/xattr.h b/drivers/staging/erofs/xattr.h deleted file mode 100644 index e20249647541..000000000000 --- a/drivers/staging/erofs/xattr.h +++ /dev/null @@ -1,94 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/xattr.h - * - * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_XATTR_H -#define __EROFS_XATTR_H - -#include "internal.h" -#include -#include - -/* Attribute not found */ -#define ENOATTR ENODATA - -static inline unsigned int inlinexattr_header_size(struct inode *inode) -{ - return sizeof(struct erofs_xattr_ibody_header) - + sizeof(u32) * EROFS_V(inode)->xattr_shared_count; -} - -static inline erofs_blk_t xattrblock_addr(struct erofs_sb_info *sbi, - unsigned int xattr_id) -{ -#ifdef CONFIG_EROFS_FS_XATTR - return sbi->xattr_blkaddr + - xattr_id * sizeof(__u32) / EROFS_BLKSIZ; -#else - return 0; -#endif -} - -static inline unsigned int xattrblock_offset(struct erofs_sb_info *sbi, - unsigned int xattr_id) -{ - return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ; -} - -#ifdef CONFIG_EROFS_FS_XATTR -extern const struct xattr_handler erofs_xattr_user_handler; -extern const struct xattr_handler erofs_xattr_trusted_handler; -#ifdef CONFIG_EROFS_FS_SECURITY -extern const struct xattr_handler erofs_xattr_security_handler; -#endif - -static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx) -{ -static const struct xattr_handler *xattr_handler_map[] = { - [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler, -#ifdef CONFIG_EROFS_FS_POSIX_ACL - [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler, - [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = - &posix_acl_default_xattr_handler, -#endif - [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler, -#ifdef CONFIG_EROFS_FS_SECURITY - [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler, -#endif -}; - - return idx && idx < ARRAY_SIZE(xattr_handler_map) ? - xattr_handler_map[idx] : NULL; -} - -extern const struct xattr_handler *erofs_xattr_handlers[]; - -int erofs_getxattr(struct inode *, int, const char *, void *, size_t); -ssize_t erofs_listxattr(struct dentry *, char *, size_t); -#else -static inline int erofs_getxattr(struct inode *inode, int index, - const char *name, void *buffer, - size_t buffer_size) -{ - return -EOPNOTSUPP; -} - -static inline ssize_t erofs_listxattr(struct dentry *dentry, - char *buffer, size_t buffer_size) -{ - return -EOPNOTSUPP; -} -#endif /* !CONFIG_EROFS_FS_XATTR */ - -#ifdef CONFIG_EROFS_FS_POSIX_ACL -struct posix_acl *erofs_get_acl(struct inode *inode, int type); -#else -#define erofs_get_acl (NULL) -#endif - -#endif - diff --git a/drivers/staging/erofs/zdata.c b/drivers/staging/erofs/zdata.c deleted file mode 100644 index 60d7c20db87d..000000000000 --- a/drivers/staging/erofs/zdata.c +++ /dev/null @@ -1,1434 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/zdata.c - * - * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "zdata.h" -#include "compress.h" -#include - -#include - -/* - * a compressed_pages[] placeholder in order to avoid - * being filled with file pages for in-place decompression. - */ -#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D) - -/* how to allocate cached pages for a pcluster */ -enum z_erofs_cache_alloctype { - DONTALLOC, /* don't allocate any cached pages */ - DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */ -}; - -/* - * tagged pointer with 1-bit tag for all compressed pages - * tag 0 - the page is just found with an extra page reference - */ -typedef tagptr1_t compressed_page_t; - -#define tag_compressed_page_justfound(page) \ - tagptr_fold(compressed_page_t, page, 1) - -static struct workqueue_struct *z_erofs_workqueue __read_mostly; -static struct kmem_cache *pcluster_cachep __read_mostly; - -void z_erofs_exit_zip_subsystem(void) -{ - destroy_workqueue(z_erofs_workqueue); - kmem_cache_destroy(pcluster_cachep); -} - -static inline int init_unzip_workqueue(void) -{ - const unsigned int onlinecpus = num_possible_cpus(); - const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE; - - /* - * no need to spawn too many threads, limiting threads could minimum - * scheduling overhead, perhaps per-CPU threads should be better? - */ - z_erofs_workqueue = alloc_workqueue("erofs_unzipd", flags, - onlinecpus + onlinecpus / 4); - return z_erofs_workqueue ? 0 : -ENOMEM; -} - -static void init_once(void *ptr) -{ - struct z_erofs_pcluster *pcl = ptr; - struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); - unsigned int i; - - mutex_init(&cl->lock); - cl->nr_pages = 0; - cl->vcnt = 0; - for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i) - pcl->compressed_pages[i] = NULL; -} - -static void init_always(struct z_erofs_pcluster *pcl) -{ - struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); - - atomic_set(&pcl->obj.refcount, 1); - - DBG_BUGON(cl->nr_pages); - DBG_BUGON(cl->vcnt); -} - -int __init z_erofs_init_zip_subsystem(void) -{ - pcluster_cachep = kmem_cache_create("erofs_compress", - Z_EROFS_WORKGROUP_SIZE, 0, - SLAB_RECLAIM_ACCOUNT, init_once); - if (pcluster_cachep) { - if (!init_unzip_workqueue()) - return 0; - - kmem_cache_destroy(pcluster_cachep); - } - return -ENOMEM; -} - -enum z_erofs_collectmode { - COLLECT_SECONDARY, - COLLECT_PRIMARY, - /* - * The current collection was the tail of an exist chain, in addition - * that the previous processed chained collections are all decided to - * be hooked up to it. - * A new chain will be created for the remaining collections which are - * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED, - * the next collection cannot reuse the whole page safely in - * the following scenario: - * ________________________________________________________________ - * | tail (partial) page | head (partial) page | - * | (belongs to the next cl) | (belongs to the current cl) | - * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| - */ - COLLECT_PRIMARY_HOOKED, - COLLECT_PRIMARY_FOLLOWED_NOINPLACE, - /* - * The current collection has been linked with the owned chain, and - * could also be linked with the remaining collections, which means - * if the processing page is the tail page of the collection, thus - * the current collection can safely use the whole page (since - * the previous collection is under control) for in-place I/O, as - * illustrated below: - * ________________________________________________________________ - * | tail (partial) page | head (partial) page | - * | (of the current cl) | (of the previous collection) | - * | PRIMARY_FOLLOWED or | | - * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________| - * - * [ (*) the above page can be used as inplace I/O. ] - */ - COLLECT_PRIMARY_FOLLOWED, -}; - -struct z_erofs_collector { - struct z_erofs_pagevec_ctor vector; - - struct z_erofs_pcluster *pcl, *tailpcl; - struct z_erofs_collection *cl; - struct page **compressedpages; - z_erofs_next_pcluster_t owned_head; - - enum z_erofs_collectmode mode; -}; - -struct z_erofs_decompress_frontend { - struct inode *const inode; - - struct z_erofs_collector clt; - struct erofs_map_blocks map; - - /* used for applying cache strategy on the fly */ - bool backmost; - erofs_off_t headoffset; -}; - -#define COLLECTOR_INIT() { \ - .owned_head = Z_EROFS_PCLUSTER_TAIL, \ - .mode = COLLECT_PRIMARY_FOLLOWED } - -#define DECOMPRESS_FRONTEND_INIT(__i) { \ - .inode = __i, .clt = COLLECTOR_INIT(), \ - .backmost = true, } - -static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES]; -static DEFINE_MUTEX(z_pagemap_global_lock); - -static void preload_compressed_pages(struct z_erofs_collector *clt, - struct address_space *mc, - enum z_erofs_cache_alloctype type, - struct list_head *pagepool) -{ - const struct z_erofs_pcluster *pcl = clt->pcl; - const unsigned int clusterpages = BIT(pcl->clusterbits); - struct page **pages = clt->compressedpages; - pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages); - bool standalone = true; - - if (clt->mode < COLLECT_PRIMARY_FOLLOWED) - return; - - for (; pages < pcl->compressed_pages + clusterpages; ++pages) { - struct page *page; - compressed_page_t t; - - /* the compressed page was loaded before */ - if (READ_ONCE(*pages)) - continue; - - page = find_get_page(mc, index); - - if (page) { - t = tag_compressed_page_justfound(page); - } else if (type == DELAYEDALLOC) { - t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED); - } else { /* DONTALLOC */ - if (standalone) - clt->compressedpages = pages; - standalone = false; - continue; - } - - if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t))) - continue; - - if (page) - put_page(page); - } - - if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */ - clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE; -} - -/* called by erofs_shrinker to get rid of all compressed_pages */ -int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, - struct erofs_workgroup *grp) -{ - struct z_erofs_pcluster *const pcl = - container_of(grp, struct z_erofs_pcluster, obj); - struct address_space *const mapping = MNGD_MAPPING(sbi); - const unsigned int clusterpages = BIT(pcl->clusterbits); - int i; - - /* - * refcount of workgroup is now freezed as 1, - * therefore no need to worry about available decompression users. - */ - for (i = 0; i < clusterpages; ++i) { - struct page *page = pcl->compressed_pages[i]; - - if (!page) - continue; - - /* block other users from reclaiming or migrating the page */ - if (!trylock_page(page)) - return -EBUSY; - - if (unlikely(page->mapping != mapping)) - continue; - - /* barrier is implied in the following 'unlock_page' */ - WRITE_ONCE(pcl->compressed_pages[i], NULL); - set_page_private(page, 0); - ClearPagePrivate(page); - - unlock_page(page); - put_page(page); - } - return 0; -} - -int erofs_try_to_free_cached_page(struct address_space *mapping, - struct page *page) -{ - struct z_erofs_pcluster *const pcl = (void *)page_private(page); - const unsigned int clusterpages = BIT(pcl->clusterbits); - int ret = 0; /* 0 - busy */ - - if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) { - unsigned int i; - - for (i = 0; i < clusterpages; ++i) { - if (pcl->compressed_pages[i] == page) { - WRITE_ONCE(pcl->compressed_pages[i], NULL); - ret = 1; - break; - } - } - erofs_workgroup_unfreeze(&pcl->obj, 1); - - if (ret) { - ClearPagePrivate(page); - put_page(page); - } - } - return ret; -} - -/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */ -static inline bool try_inplace_io(struct z_erofs_collector *clt, - struct page *page) -{ - struct z_erofs_pcluster *const pcl = clt->pcl; - const unsigned int clusterpages = BIT(pcl->clusterbits); - - while (clt->compressedpages < pcl->compressed_pages + clusterpages) { - if (!cmpxchg(clt->compressedpages++, NULL, page)) - return true; - } - return false; -} - -/* callers must be with collection lock held */ -static int z_erofs_attach_page(struct z_erofs_collector *clt, - struct page *page, - enum z_erofs_page_type type) -{ - int ret; - bool occupied; - - /* give priority for inplaceio */ - if (clt->mode >= COLLECT_PRIMARY && - type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && - try_inplace_io(clt, page)) - return 0; - - ret = z_erofs_pagevec_enqueue(&clt->vector, - page, type, &occupied); - clt->cl->vcnt += (unsigned int)ret; - - return ret ? 0 : -EAGAIN; -} - -static enum z_erofs_collectmode -try_to_claim_pcluster(struct z_erofs_pcluster *pcl, - z_erofs_next_pcluster_t *owned_head) -{ - /* let's claim these following types of pclusters */ -retry: - if (pcl->next == Z_EROFS_PCLUSTER_NIL) { - /* type 1, nil pcluster */ - if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, - *owned_head) != Z_EROFS_PCLUSTER_NIL) - goto retry; - - *owned_head = &pcl->next; - /* lucky, I am the followee :) */ - return COLLECT_PRIMARY_FOLLOWED; - } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) { - /* - * type 2, link to the end of a existing open chain, - * be careful that its submission itself is governed - * by the original owned chain. - */ - if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, - *owned_head) != Z_EROFS_PCLUSTER_TAIL) - goto retry; - *owned_head = Z_EROFS_PCLUSTER_TAIL; - return COLLECT_PRIMARY_HOOKED; - } - return COLLECT_PRIMARY; /* :( better luck next time */ -} - -static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, - struct inode *inode, - struct erofs_map_blocks *map) -{ - struct erofs_workgroup *grp; - struct z_erofs_pcluster *pcl; - struct z_erofs_collection *cl; - unsigned int length; - bool tag; - - grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag); - if (!grp) - return NULL; - - pcl = container_of(grp, struct z_erofs_pcluster, obj); - if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) { - DBG_BUGON(1); - erofs_workgroup_put(grp); - return ERR_PTR(-EFSCORRUPTED); - } - - cl = z_erofs_primarycollection(pcl); - if (unlikely(cl->pageofs != (map->m_la & ~PAGE_MASK))) { - DBG_BUGON(1); - erofs_workgroup_put(grp); - return ERR_PTR(-EFSCORRUPTED); - } - - length = READ_ONCE(pcl->length); - if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) { - if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) { - DBG_BUGON(1); - erofs_workgroup_put(grp); - return ERR_PTR(-EFSCORRUPTED); - } - } else { - unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT; - - if (map->m_flags & EROFS_MAP_FULL_MAPPED) - llen |= Z_EROFS_PCLUSTER_FULL_LENGTH; - - while (llen > length && - length != cmpxchg_relaxed(&pcl->length, length, llen)) { - cpu_relax(); - length = READ_ONCE(pcl->length); - } - } - mutex_lock(&cl->lock); - /* used to check tail merging loop due to corrupted images */ - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) - clt->tailpcl = pcl; - clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head); - /* clean tailpcl if the current owned_head is Z_EROFS_PCLUSTER_TAIL */ - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) - clt->tailpcl = NULL; - clt->pcl = pcl; - clt->cl = cl; - return cl; -} - -static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, - struct inode *inode, - struct erofs_map_blocks *map) -{ - struct z_erofs_pcluster *pcl; - struct z_erofs_collection *cl; - int err; - - /* no available workgroup, let's allocate one */ - pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS); - if (unlikely(!pcl)) - return ERR_PTR(-ENOMEM); - - init_always(pcl); - pcl->obj.index = map->m_pa >> PAGE_SHIFT; - - pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) | - (map->m_flags & EROFS_MAP_FULL_MAPPED ? - Z_EROFS_PCLUSTER_FULL_LENGTH : 0); - - if (map->m_flags & EROFS_MAP_ZIPPED) - pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4; - else - pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; - - pcl->clusterbits = EROFS_V(inode)->z_physical_clusterbits[0]; - pcl->clusterbits -= PAGE_SHIFT; - - /* new pclusters should be claimed as type 1, primary and followed */ - pcl->next = clt->owned_head; - clt->mode = COLLECT_PRIMARY_FOLLOWED; - - cl = z_erofs_primarycollection(pcl); - cl->pageofs = map->m_la & ~PAGE_MASK; - - /* - * lock all primary followed works before visible to others - * and mutex_trylock *never* fails for a new pcluster. - */ - mutex_trylock(&cl->lock); - - err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0); - if (err) { - mutex_unlock(&cl->lock); - kmem_cache_free(pcluster_cachep, pcl); - return ERR_PTR(-EAGAIN); - } - /* used to check tail merging loop due to corrupted images */ - if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) - clt->tailpcl = pcl; - clt->owned_head = &pcl->next; - clt->pcl = pcl; - clt->cl = cl; - return cl; -} - -static int z_erofs_collector_begin(struct z_erofs_collector *clt, - struct inode *inode, - struct erofs_map_blocks *map) -{ - struct z_erofs_collection *cl; - - DBG_BUGON(clt->cl); - - /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */ - DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL); - DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - - if (!PAGE_ALIGNED(map->m_pa)) { - DBG_BUGON(1); - return -EINVAL; - } - -repeat: - cl = cllookup(clt, inode, map); - if (!cl) { - cl = clregister(clt, inode, map); - - if (unlikely(cl == ERR_PTR(-EAGAIN))) - goto repeat; - } - - if (IS_ERR(cl)) - return PTR_ERR(cl); - - z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS, - cl->pagevec, cl->vcnt); - - clt->compressedpages = clt->pcl->compressed_pages; - if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */ - clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES; - return 0; -} - -/* - * keep in mind that no referenced pclusters will be freed - * only after a RCU grace period. - */ -static void z_erofs_rcu_callback(struct rcu_head *head) -{ - struct z_erofs_collection *const cl = - container_of(head, struct z_erofs_collection, rcu); - - kmem_cache_free(pcluster_cachep, - container_of(cl, struct z_erofs_pcluster, - primary_collection)); -} - -void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) -{ - struct z_erofs_pcluster *const pcl = - container_of(grp, struct z_erofs_pcluster, obj); - struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl); - - call_rcu(&cl->rcu, z_erofs_rcu_callback); -} - -static void z_erofs_collection_put(struct z_erofs_collection *cl) -{ - struct z_erofs_pcluster *const pcl = - container_of(cl, struct z_erofs_pcluster, primary_collection); - - erofs_workgroup_put(&pcl->obj); -} - -static bool z_erofs_collector_end(struct z_erofs_collector *clt) -{ - struct z_erofs_collection *cl = clt->cl; - - if (!cl) - return false; - - z_erofs_pagevec_ctor_exit(&clt->vector, false); - mutex_unlock(&cl->lock); - - /* - * if all pending pages are added, don't hold its reference - * any longer if the pcluster isn't hosted by ourselves. - */ - if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE) - z_erofs_collection_put(cl); - - clt->cl = NULL; - return true; -} - -static inline struct page *__stagingpage_alloc(struct list_head *pagepool, - gfp_t gfp) -{ - struct page *page = erofs_allocpage(pagepool, gfp, true); - - page->mapping = Z_EROFS_MAPPING_STAGING; - return page; -} - -static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, - unsigned int cachestrategy, - erofs_off_t la) -{ - if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) - return false; - - if (fe->backmost) - return true; - - return cachestrategy >= EROFS_ZIP_CACHE_READAROUND && - la < fe->headoffset; -} - -static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, - struct page *page, - struct list_head *pagepool) -{ - struct inode *const inode = fe->inode; - struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode); - struct erofs_map_blocks *const map = &fe->map; - struct z_erofs_collector *const clt = &fe->clt; - const loff_t offset = page_offset(page); - bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED); - - enum z_erofs_cache_alloctype cache_strategy; - enum z_erofs_page_type page_type; - unsigned int cur, end, spiltted, index; - int err = 0; - - /* register locked file pages as online pages in pack */ - z_erofs_onlinepage_init(page); - - spiltted = 0; - end = PAGE_SIZE; -repeat: - cur = end - 1; - - /* lucky, within the range of the current map_blocks */ - if (offset + cur >= map->m_la && - offset + cur < map->m_la + map->m_llen) { - /* didn't get a valid collection previously (very rare) */ - if (!clt->cl) - goto restart_now; - goto hitted; - } - - /* go ahead the next map_blocks */ - debugln("%s: [out-of-range] pos %llu", __func__, offset + cur); - - if (z_erofs_collector_end(clt)) - fe->backmost = false; - - map->m_la = offset + cur; - map->m_llen = 0; - err = z_erofs_map_blocks_iter(inode, map, 0); - if (unlikely(err)) - goto err_out; - -restart_now: - if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) - goto hitted; - - err = z_erofs_collector_begin(clt, inode, map); - if (unlikely(err)) - goto err_out; - - /* preload all compressed pages (maybe downgrade role if necessary) */ - if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la)) - cache_strategy = DELAYEDALLOC; - else - cache_strategy = DONTALLOC; - - preload_compressed_pages(clt, MNGD_MAPPING(sbi), - cache_strategy, pagepool); - - tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED); -hitted: - cur = end - min_t(unsigned int, offset + end - map->m_la, end); - if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) { - zero_user_segment(page, cur, end); - goto next_part; - } - - /* let's derive page type */ - page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD : - (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : - (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : - Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED)); - - if (cur) - tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED); - -retry: - err = z_erofs_attach_page(clt, page, page_type); - /* should allocate an additional staging page for pagevec */ - if (err == -EAGAIN) { - struct page *const newpage = - __stagingpage_alloc(pagepool, GFP_NOFS); - - err = z_erofs_attach_page(clt, newpage, - Z_EROFS_PAGE_TYPE_EXCLUSIVE); - if (likely(!err)) - goto retry; - } - - if (unlikely(err)) - goto err_out; - - index = page->index - (map->m_la >> PAGE_SHIFT); - - z_erofs_onlinepage_fixup(page, index, true); - - /* bump up the number of spiltted parts of a page */ - ++spiltted; - /* also update nr_pages */ - clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1); -next_part: - /* can be used for verification */ - map->m_llen = offset + cur - map->m_la; - - end = cur; - if (end > 0) - goto repeat; - -out: - z_erofs_onlinepage_endio(page); - - debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu", - __func__, page, spiltted, map->m_llen); - return err; - - /* if some error occurred while processing this page */ -err_out: - SetPageError(page); - goto out; -} - -static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) -{ - tagptr1_t t = tagptr_init(tagptr1_t, ptr); - struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t); - bool background = tagptr_unfold_tags(t); - - if (!background) { - unsigned long flags; - - spin_lock_irqsave(&io->u.wait.lock, flags); - if (!atomic_add_return(bios, &io->pending_bios)) - wake_up_locked(&io->u.wait); - spin_unlock_irqrestore(&io->u.wait.lock, flags); - return; - } - - if (!atomic_add_return(bios, &io->pending_bios)) - queue_work(z_erofs_workqueue, &io->u.work); -} - -static inline void z_erofs_vle_read_endio(struct bio *bio) -{ - struct erofs_sb_info *sbi = NULL; - blk_status_t err = bio->bi_status; - struct bio_vec *bvec; - struct bvec_iter_all iter_all; - - bio_for_each_segment_all(bvec, bio, iter_all) { - struct page *page = bvec->bv_page; - bool cachemngd = false; - - DBG_BUGON(PageUptodate(page)); - DBG_BUGON(!page->mapping); - - if (unlikely(!sbi && !z_erofs_page_is_staging(page))) { - sbi = EROFS_SB(page->mapping->host->i_sb); - - if (time_to_inject(sbi, FAULT_READ_IO)) { - erofs_show_injection_info(FAULT_READ_IO); - err = BLK_STS_IOERR; - } - } - - /* sbi should already be gotten if the page is managed */ - if (sbi) - cachemngd = erofs_page_is_managed(sbi, page); - - if (unlikely(err)) - SetPageError(page); - else if (cachemngd) - SetPageUptodate(page); - - if (cachemngd) - unlock_page(page); - } - - z_erofs_vle_unzip_kickoff(bio->bi_private, -1); - bio_put(bio); -} - -static int z_erofs_decompress_pcluster(struct super_block *sb, - struct z_erofs_pcluster *pcl, - struct list_head *pagepool) -{ - struct erofs_sb_info *const sbi = EROFS_SB(sb); - const unsigned int clusterpages = BIT(pcl->clusterbits); - struct z_erofs_pagevec_ctor ctor; - unsigned int i, outputsize, llen, nr_pages; - struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES]; - struct page **pages, **compressed_pages, *page; - - enum z_erofs_page_type page_type; - bool overlapped, partial; - struct z_erofs_collection *cl; - int err; - - might_sleep(); - cl = z_erofs_primarycollection(pcl); - DBG_BUGON(!READ_ONCE(cl->nr_pages)); - - mutex_lock(&cl->lock); - nr_pages = cl->nr_pages; - - if (likely(nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES)) { - pages = pages_onstack; - } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES && - mutex_trylock(&z_pagemap_global_lock)) { - pages = z_pagemap_global; - } else { - gfp_t gfp_flags = GFP_KERNEL; - - if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES) - gfp_flags |= __GFP_NOFAIL; - - pages = kvmalloc_array(nr_pages, sizeof(struct page *), - gfp_flags); - - /* fallback to global pagemap for the lowmem scenario */ - if (unlikely(!pages)) { - mutex_lock(&z_pagemap_global_lock); - pages = z_pagemap_global; - } - } - - for (i = 0; i < nr_pages; ++i) - pages[i] = NULL; - - err = 0; - z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS, - cl->pagevec, 0); - - for (i = 0; i < cl->vcnt; ++i) { - unsigned int pagenr; - - page = z_erofs_pagevec_dequeue(&ctor, &page_type); - - /* all pages in pagevec ought to be valid */ - DBG_BUGON(!page); - DBG_BUGON(!page->mapping); - - if (z_erofs_put_stagingpage(pagepool, page)) - continue; - - if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD) - pagenr = 0; - else - pagenr = z_erofs_onlinepage_index(page); - - DBG_BUGON(pagenr >= nr_pages); - - /* - * currently EROFS doesn't support multiref(dedup), - * so here erroring out one multiref page. - */ - if (unlikely(pages[pagenr])) { - DBG_BUGON(1); - SetPageError(pages[pagenr]); - z_erofs_onlinepage_endio(pages[pagenr]); - err = -EFSCORRUPTED; - } - pages[pagenr] = page; - } - z_erofs_pagevec_ctor_exit(&ctor, true); - - overlapped = false; - compressed_pages = pcl->compressed_pages; - - for (i = 0; i < clusterpages; ++i) { - unsigned int pagenr; - - page = compressed_pages[i]; - - /* all compressed pages ought to be valid */ - DBG_BUGON(!page); - DBG_BUGON(!page->mapping); - - if (!z_erofs_page_is_staging(page)) { - if (erofs_page_is_managed(sbi, page)) { - if (unlikely(!PageUptodate(page))) - err = -EIO; - continue; - } - - /* - * only if non-head page can be selected - * for inplace decompression - */ - pagenr = z_erofs_onlinepage_index(page); - - DBG_BUGON(pagenr >= nr_pages); - if (unlikely(pages[pagenr])) { - DBG_BUGON(1); - SetPageError(pages[pagenr]); - z_erofs_onlinepage_endio(pages[pagenr]); - err = -EFSCORRUPTED; - } - pages[pagenr] = page; - - overlapped = true; - } - - /* PG_error needs checking for inplaced and staging pages */ - if (unlikely(PageError(page))) { - DBG_BUGON(PageUptodate(page)); - err = -EIO; - } - } - - if (unlikely(err)) - goto out; - - llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT; - if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) { - outputsize = llen; - partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH); - } else { - outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs; - partial = true; - } - - err = z_erofs_decompress(&(struct z_erofs_decompress_req) { - .sb = sb, - .in = compressed_pages, - .out = pages, - .pageofs_out = cl->pageofs, - .inputsize = PAGE_SIZE, - .outputsize = outputsize, - .alg = pcl->algorithmformat, - .inplace_io = overlapped, - .partial_decoding = partial - }, pagepool); - -out: - /* must handle all compressed pages before endding pages */ - for (i = 0; i < clusterpages; ++i) { - page = compressed_pages[i]; - - if (erofs_page_is_managed(sbi, page)) - continue; - - /* recycle all individual staging pages */ - (void)z_erofs_put_stagingpage(pagepool, page); - - WRITE_ONCE(compressed_pages[i], NULL); - } - - for (i = 0; i < nr_pages; ++i) { - page = pages[i]; - if (!page) - continue; - - DBG_BUGON(!page->mapping); - - /* recycle all individual staging pages */ - if (z_erofs_put_stagingpage(pagepool, page)) - continue; - - if (unlikely(err < 0)) - SetPageError(page); - - z_erofs_onlinepage_endio(page); - } - - if (pages == z_pagemap_global) - mutex_unlock(&z_pagemap_global_lock); - else if (unlikely(pages != pages_onstack)) - kvfree(pages); - - cl->nr_pages = 0; - cl->vcnt = 0; - - /* all cl locks MUST be taken before the following line */ - WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); - - /* all cl locks SHOULD be released right now */ - mutex_unlock(&cl->lock); - - z_erofs_collection_put(cl); - return err; -} - -static void z_erofs_vle_unzip_all(struct super_block *sb, - struct z_erofs_unzip_io *io, - struct list_head *pagepool) -{ - z_erofs_next_pcluster_t owned = io->head; - - while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { - struct z_erofs_pcluster *pcl; - - /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */ - DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); - - /* no possible that 'owned' equals NULL */ - DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); - - pcl = container_of(owned, struct z_erofs_pcluster, next); - owned = READ_ONCE(pcl->next); - - z_erofs_decompress_pcluster(sb, pcl, pagepool); - } -} - -static void z_erofs_vle_unzip_wq(struct work_struct *work) -{ - struct z_erofs_unzip_io_sb *iosb = - container_of(work, struct z_erofs_unzip_io_sb, io.u.work); - LIST_HEAD(pagepool); - - DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool); - - put_pages_list(&pagepool); - kvfree(iosb); -} - -static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, - unsigned int nr, - struct list_head *pagepool, - struct address_space *mc, - gfp_t gfp) -{ - /* determined at compile time to avoid too many #ifdefs */ - const bool nocache = __builtin_constant_p(mc) ? !mc : false; - const pgoff_t index = pcl->obj.index; - bool tocache = false; - - struct address_space *mapping; - struct page *oldpage, *page; - - compressed_page_t t; - int justfound; - -repeat: - page = READ_ONCE(pcl->compressed_pages[nr]); - oldpage = page; - - if (!page) - goto out_allocpage; - - /* - * the cached page has not been allocated and - * an placeholder is out there, prepare it now. - */ - if (!nocache && page == PAGE_UNALLOCATED) { - tocache = true; - goto out_allocpage; - } - - /* process the target tagged pointer */ - t = tagptr_init(compressed_page_t, page); - justfound = tagptr_unfold_tags(t); - page = tagptr_unfold_ptr(t); - - mapping = READ_ONCE(page->mapping); - - /* - * if managed cache is disabled, it's no way to - * get such a cached-like page. - */ - if (nocache) { - /* if managed cache is disabled, it is impossible `justfound' */ - DBG_BUGON(justfound); - - /* and it should be locked, not uptodate, and not truncated */ - DBG_BUGON(!PageLocked(page)); - DBG_BUGON(PageUptodate(page)); - DBG_BUGON(!mapping); - goto out; - } - - /* - * unmanaged (file) pages are all locked solidly, - * therefore it is impossible for `mapping' to be NULL. - */ - if (mapping && mapping != mc) - /* ought to be unmanaged pages */ - goto out; - - lock_page(page); - - /* only true if page reclaim goes wrong, should never happen */ - DBG_BUGON(justfound && PagePrivate(page)); - - /* the page is still in manage cache */ - if (page->mapping == mc) { - WRITE_ONCE(pcl->compressed_pages[nr], page); - - ClearPageError(page); - if (!PagePrivate(page)) { - /* - * impossible to be !PagePrivate(page) for - * the current restriction as well if - * the page is already in compressed_pages[]. - */ - DBG_BUGON(!justfound); - - justfound = 0; - set_page_private(page, (unsigned long)pcl); - SetPagePrivate(page); - } - - /* no need to submit io if it is already up-to-date */ - if (PageUptodate(page)) { - unlock_page(page); - page = NULL; - } - goto out; - } - - /* - * the managed page has been truncated, it's unsafe to - * reuse this one, let's allocate a new cache-managed page. - */ - DBG_BUGON(page->mapping); - DBG_BUGON(!justfound); - - tocache = true; - unlock_page(page); - put_page(page); -out_allocpage: - page = __stagingpage_alloc(pagepool, gfp); - if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { - list_add(&page->lru, pagepool); - cpu_relax(); - goto repeat; - } - if (nocache || !tocache) - goto out; - if (add_to_page_cache_lru(page, mc, index + nr, gfp)) { - page->mapping = Z_EROFS_MAPPING_STAGING; - goto out; - } - - set_page_private(page, (unsigned long)pcl); - SetPagePrivate(page); -out: /* the only exit (for tracing and debugging) */ - return page; -} - -static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb, - struct z_erofs_unzip_io *io, - bool foreground) -{ - struct z_erofs_unzip_io_sb *iosb; - - if (foreground) { - /* waitqueue available for foreground io */ - DBG_BUGON(!io); - - init_waitqueue_head(&io->u.wait); - atomic_set(&io->pending_bios, 0); - goto out; - } - - iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL); - DBG_BUGON(!iosb); - - /* initialize fields in the allocated descriptor */ - io = &iosb->io; - iosb->sb = sb; - INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq); -out: - io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; - return io; -} - -/* define decompression jobqueue types */ -enum { - JQ_BYPASS, - JQ_SUBMIT, - NR_JOBQUEUES, -}; - -static void *jobqueueset_init(struct super_block *sb, - z_erofs_next_pcluster_t qtail[], - struct z_erofs_unzip_io *q[], - struct z_erofs_unzip_io *fgq, - bool forcefg) -{ - /* - * if managed cache is enabled, bypass jobqueue is needed, - * no need to read from device for all pclusters in this queue. - */ - q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true); - qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; - - q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg); - qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; - - return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg)); -} - -static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, - z_erofs_next_pcluster_t qtail[], - z_erofs_next_pcluster_t owned_head) -{ - z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; - z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; - - DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - if (owned_head == Z_EROFS_PCLUSTER_TAIL) - owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED; - - WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); - - WRITE_ONCE(*submit_qtail, owned_head); - WRITE_ONCE(*bypass_qtail, &pcl->next); - - qtail[JQ_BYPASS] = &pcl->next; -} - -static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[], - unsigned int nr_bios, - bool force_fg) -{ - /* - * although background is preferred, no one is pending for submission. - * don't issue workqueue for decompression but drop it directly instead. - */ - if (force_fg || nr_bios) - return false; - - kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io)); - return true; -} - -static bool z_erofs_vle_submit_all(struct super_block *sb, - z_erofs_next_pcluster_t owned_head, - struct list_head *pagepool, - struct z_erofs_unzip_io *fgq, - bool force_fg) -{ - struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb); - z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; - struct z_erofs_unzip_io *q[NR_JOBQUEUES]; - struct bio *bio; - void *bi_private; - /* since bio will be NULL, no need to initialize last_index */ - pgoff_t uninitialized_var(last_index); - bool force_submit = false; - unsigned int nr_bios; - - if (unlikely(owned_head == Z_EROFS_PCLUSTER_TAIL)) - return false; - - force_submit = false; - bio = NULL; - nr_bios = 0; - bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg); - - /* by default, all need io submission */ - q[JQ_SUBMIT]->head = owned_head; - - do { - struct z_erofs_pcluster *pcl; - unsigned int clusterpages; - pgoff_t first_index; - struct page *page; - unsigned int i = 0, bypass = 0; - int err; - - /* no possible 'owned_head' equals the following */ - DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); - - pcl = container_of(owned_head, struct z_erofs_pcluster, next); - - clusterpages = BIT(pcl->clusterbits); - - /* close the main owned chain at first */ - owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, - Z_EROFS_PCLUSTER_TAIL_CLOSED); - - first_index = pcl->obj.index; - force_submit |= (first_index != last_index + 1); - -repeat: - page = pickup_page_for_submission(pcl, i, pagepool, - MNGD_MAPPING(sbi), - GFP_NOFS); - if (!page) { - force_submit = true; - ++bypass; - goto skippage; - } - - if (bio && force_submit) { -submit_bio_retry: - __submit_bio(bio, REQ_OP_READ, 0); - bio = NULL; - } - - if (!bio) { - bio = erofs_grab_bio(sb, first_index + i, - BIO_MAX_PAGES, bi_private, - z_erofs_vle_read_endio, true); - ++nr_bios; - } - - err = bio_add_page(bio, page, PAGE_SIZE, 0); - if (err < PAGE_SIZE) - goto submit_bio_retry; - - force_submit = false; - last_index = first_index + i; -skippage: - if (++i < clusterpages) - goto repeat; - - if (bypass < clusterpages) - qtail[JQ_SUBMIT] = &pcl->next; - else - move_to_bypass_jobqueue(pcl, qtail, owned_head); - } while (owned_head != Z_EROFS_PCLUSTER_TAIL); - - if (bio) - __submit_bio(bio, REQ_OP_READ, 0); - - if (postsubmit_is_all_bypassed(q, nr_bios, force_fg)) - return true; - - z_erofs_vle_unzip_kickoff(bi_private, nr_bios); - return true; -} - -static void z_erofs_submit_and_unzip(struct super_block *sb, - struct z_erofs_collector *clt, - struct list_head *pagepool, - bool force_fg) -{ - struct z_erofs_unzip_io io[NR_JOBQUEUES]; - - if (!z_erofs_vle_submit_all(sb, clt->owned_head, - pagepool, io, force_fg)) - return; - - /* decompress no I/O pclusters immediately */ - z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool); - - if (!force_fg) - return; - - /* wait until all bios are completed */ - wait_event(io[JQ_SUBMIT].u.wait, - !atomic_read(&io[JQ_SUBMIT].pending_bios)); - - /* let's synchronous decompression */ - z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool); -} - -static int z_erofs_vle_normalaccess_readpage(struct file *file, - struct page *page) -{ - struct inode *const inode = page->mapping->host; - struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); - int err; - LIST_HEAD(pagepool); - - trace_erofs_readpage(page, false); - - f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; - - err = z_erofs_do_read_page(&f, page, &pagepool); - (void)z_erofs_collector_end(&f.clt); - - /* if some compressed cluster ready, need submit them anyway */ - z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true); - - if (err) - errln("%s, failed to read, err [%d]", __func__, err); - - if (f.map.mpage) - put_page(f.map.mpage); - - /* clean up the remaining free pages */ - put_pages_list(&pagepool); - return err; -} - -static bool should_decompress_synchronously(struct erofs_sb_info *sbi, - unsigned int nr) -{ - return nr <= sbi->max_sync_decompress_pages; -} - -static int z_erofs_vle_normalaccess_readpages(struct file *filp, - struct address_space *mapping, - struct list_head *pages, - unsigned int nr_pages) -{ - struct inode *const inode = mapping->host; - struct erofs_sb_info *const sbi = EROFS_I_SB(inode); - - bool sync = should_decompress_synchronously(sbi, nr_pages); - struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); - gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); - struct page *head = NULL; - LIST_HEAD(pagepool); - - trace_erofs_readpages(mapping->host, lru_to_page(pages), - nr_pages, false); - - f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT; - - for (; nr_pages; --nr_pages) { - struct page *page = lru_to_page(pages); - - prefetchw(&page->flags); - list_del(&page->lru); - - /* - * A pure asynchronous readahead is indicated if - * a PG_readahead marked page is hitted at first. - * Let's also do asynchronous decompression for this case. - */ - sync &= !(PageReadahead(page) && !head); - - if (add_to_page_cache_lru(page, mapping, page->index, gfp)) { - list_add(&page->lru, &pagepool); - continue; - } - - set_page_private(page, (unsigned long)head); - head = page; - } - - while (head) { - struct page *page = head; - int err; - - /* traversal in reverse order */ - head = (void *)page_private(page); - - err = z_erofs_do_read_page(&f, page, &pagepool); - if (err) { - struct erofs_vnode *vi = EROFS_V(inode); - - errln("%s, readahead error at page %lu of nid %llu", - __func__, page->index, vi->nid); - } - put_page(page); - } - - (void)z_erofs_collector_end(&f.clt); - - z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync); - - if (f.map.mpage) - put_page(f.map.mpage); - - /* clean up the remaining free pages */ - put_pages_list(&pagepool); - return 0; -} - -const struct address_space_operations z_erofs_vle_normalaccess_aops = { - .readpage = z_erofs_vle_normalaccess_readpage, - .readpages = z_erofs_vle_normalaccess_readpages, -}; - diff --git a/drivers/staging/erofs/zdata.h b/drivers/staging/erofs/zdata.h deleted file mode 100644 index e11fe1959ca2..000000000000 --- a/drivers/staging/erofs/zdata.h +++ /dev/null @@ -1,195 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/zdata.h - * - * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_FS_ZDATA_H -#define __EROFS_FS_ZDATA_H - -#include "internal.h" -#include "zpvec.h" - -#define Z_EROFS_NR_INLINE_PAGEVECS 3 - -/* - * Structure fields follow one of the following exclusion rules. - * - * I: Modifiable by initialization/destruction paths and read-only - * for everyone else; - * - * L: Field should be protected by pageset lock; - * - * A: Field should be accessed / updated in atomic for parallelized code. - */ -struct z_erofs_collection { - struct mutex lock; - - /* I: page offset of start position of decompression */ - unsigned short pageofs; - - /* L: maximum relative page index in pagevec[] */ - unsigned short nr_pages; - - /* L: total number of pages in pagevec[] */ - unsigned int vcnt; - - union { - /* L: inline a certain number of pagevecs for bootstrap */ - erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS]; - - /* I: can be used to free the pcluster by RCU. */ - struct rcu_head rcu; - }; -}; - -#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001 -#define Z_EROFS_PCLUSTER_LENGTH_BIT 1 - -/* - * let's leave a type here in case of introducing - * another tagged pointer later. - */ -typedef void *z_erofs_next_pcluster_t; - -struct z_erofs_pcluster { - struct erofs_workgroup obj; - struct z_erofs_collection primary_collection; - - /* A: point to next chained pcluster or TAILs */ - z_erofs_next_pcluster_t next; - - /* A: compressed pages (including multi-usage pages) */ - struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES]; - - /* A: lower limit of decompressed length and if full length or not */ - unsigned int length; - - /* I: compression algorithm format */ - unsigned char algorithmformat; - /* I: bit shift of physical cluster size */ - unsigned char clusterbits; -}; - -#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection) - -/* let's avoid the valid 32-bit kernel addresses */ - -/* the chained workgroup has't submitted io (still open) */ -#define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE) -/* the chained workgroup has already submitted io */ -#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD) - -#define Z_EROFS_PCLUSTER_NIL (NULL) - -#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster) - -struct z_erofs_unzip_io { - atomic_t pending_bios; - z_erofs_next_pcluster_t head; - - union { - wait_queue_head_t wait; - struct work_struct work; - } u; -}; - -struct z_erofs_unzip_io_sb { - struct z_erofs_unzip_io io; - struct super_block *sb; -}; - -#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) -static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, - struct page *page) -{ - return page->mapping == MNGD_MAPPING(sbi); -} - -#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2 -#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1) -#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS) - -/* - * waiters (aka. ongoing_packs): # to unlock the page - * sub-index: 0 - for partial page, >= 1 full page sub-index - */ -typedef atomic_t z_erofs_onlinepage_t; - -/* type punning */ -union z_erofs_onlinepage_converter { - z_erofs_onlinepage_t *o; - unsigned long *v; -}; - -static inline unsigned int z_erofs_onlinepage_index(struct page *page) -{ - union z_erofs_onlinepage_converter u; - - DBG_BUGON(!PagePrivate(page)); - u.v = &page_private(page); - - return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; -} - -static inline void z_erofs_onlinepage_init(struct page *page) -{ - union { - z_erofs_onlinepage_t o; - unsigned long v; - /* keep from being unlocked in advance */ - } u = { .o = ATOMIC_INIT(1) }; - - set_page_private(page, u.v); - smp_wmb(); - SetPagePrivate(page); -} - -static inline void z_erofs_onlinepage_fixup(struct page *page, - uintptr_t index, bool down) -{ - unsigned long *p, o, v, id; -repeat: - p = &page_private(page); - o = READ_ONCE(*p); - - id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; - if (id) { - if (!index) - return; - - DBG_BUGON(id != index); - } - - v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | - ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); - if (cmpxchg(p, o, v) != o) - goto repeat; -} - -static inline void z_erofs_onlinepage_endio(struct page *page) -{ - union z_erofs_onlinepage_converter u; - unsigned int v; - - DBG_BUGON(!PagePrivate(page)); - u.v = &page_private(page); - - v = atomic_dec_return(u.o); - if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) { - ClearPagePrivate(page); - if (!PageError(page)) - SetPageUptodate(page); - unlock_page(page); - } - debugln("%s, page %p value %x", __func__, page, atomic_read(u.o)); -} - -#define Z_EROFS_VMAP_ONSTACK_PAGES \ - min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U) -#define Z_EROFS_VMAP_GLOBAL_PAGES 2048 - -#endif - diff --git a/drivers/staging/erofs/zmap.c b/drivers/staging/erofs/zmap.c deleted file mode 100644 index 774dacbc5b32..000000000000 --- a/drivers/staging/erofs/zmap.c +++ /dev/null @@ -1,468 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * linux/drivers/staging/erofs/zmap.c - * - * Copyright (C) 2018-2019 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#include "internal.h" -#include -#include - -int z_erofs_fill_inode(struct inode *inode) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - - if (vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { - vi->z_advise = 0; - vi->z_algorithmtype[0] = 0; - vi->z_algorithmtype[1] = 0; - vi->z_logical_clusterbits = LOG_BLOCK_SIZE; - vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits; - vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits; - set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); - } - - inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops; - return 0; -} - -static int fill_inode_lazy(struct inode *inode) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct super_block *const sb = inode->i_sb; - int err; - erofs_off_t pos; - struct page *page; - void *kaddr; - struct z_erofs_map_header *h; - - if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) - return 0; - - if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_Z_BIT, TASK_KILLABLE)) - return -ERESTARTSYS; - - err = 0; - if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) - goto out_unlock; - - DBG_BUGON(vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY); - - pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + - vi->xattr_isize, 8); - page = erofs_get_meta_page(sb, erofs_blknr(pos), false); - if (IS_ERR(page)) { - err = PTR_ERR(page); - goto out_unlock; - } - - kaddr = kmap_atomic(page); - - h = kaddr + erofs_blkoff(pos); - vi->z_advise = le16_to_cpu(h->h_advise); - vi->z_algorithmtype[0] = h->h_algorithmtype & 15; - vi->z_algorithmtype[1] = h->h_algorithmtype >> 4; - - if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) { - errln("unknown compression format %u for nid %llu, please upgrade kernel", - vi->z_algorithmtype[0], vi->nid); - err = -EOPNOTSUPP; - goto unmap_done; - } - - vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); - vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits + - ((h->h_clusterbits >> 3) & 3); - - if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) { - errln("unsupported physical clusterbits %u for nid %llu, please upgrade kernel", - vi->z_physical_clusterbits[0], vi->nid); - err = -EOPNOTSUPP; - goto unmap_done; - } - - vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits + - ((h->h_clusterbits >> 5) & 7); - set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); -unmap_done: - kunmap_atomic(kaddr); - unlock_page(page); - put_page(page); -out_unlock: - clear_and_wake_up_bit(EROFS_V_BL_Z_BIT, &vi->flags); - return err; -} - -struct z_erofs_maprecorder { - struct inode *inode; - struct erofs_map_blocks *map; - void *kaddr; - - unsigned long lcn; - /* compression extent information gathered */ - u8 type; - u16 clusterofs; - u16 delta[2]; - erofs_blk_t pblk; -}; - -static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, - erofs_blk_t eblk) -{ - struct super_block *const sb = m->inode->i_sb; - struct erofs_map_blocks *const map = m->map; - struct page *mpage = map->mpage; - - if (mpage) { - if (mpage->index == eblk) { - if (!m->kaddr) - m->kaddr = kmap_atomic(mpage); - return 0; - } - - if (m->kaddr) { - kunmap_atomic(m->kaddr); - m->kaddr = NULL; - } - put_page(mpage); - } - - mpage = erofs_get_meta_page(sb, eblk, false); - if (IS_ERR(mpage)) { - map->mpage = NULL; - return PTR_ERR(mpage); - } - m->kaddr = kmap_atomic(mpage); - unlock_page(mpage); - map->mpage = mpage; - return 0; -} - -static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, - unsigned long lcn) -{ - struct inode *const inode = m->inode; - struct erofs_vnode *const vi = EROFS_V(inode); - const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid); - const erofs_off_t pos = - Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize + - vi->xattr_isize) + - lcn * sizeof(struct z_erofs_vle_decompressed_index); - struct z_erofs_vle_decompressed_index *di; - unsigned int advise, type; - int err; - - err = z_erofs_reload_indexes(m, erofs_blknr(pos)); - if (err) - return err; - - m->lcn = lcn; - di = m->kaddr + erofs_blkoff(pos); - - advise = le16_to_cpu(di->di_advise); - type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) & - ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1); - switch (type) { - case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: - m->clusterofs = 1 << vi->z_logical_clusterbits; - m->delta[0] = le16_to_cpu(di->di_u.delta[0]); - m->delta[1] = le16_to_cpu(di->di_u.delta[1]); - break; - case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: - m->clusterofs = le16_to_cpu(di->di_clusterofs); - m->pblk = le32_to_cpu(di->di_u.blkaddr); - break; - default: - DBG_BUGON(1); - return -EOPNOTSUPP; - } - m->type = type; - return 0; -} - -static unsigned int decode_compactedbits(unsigned int lobits, - unsigned int lomask, - u8 *in, unsigned int pos, u8 *type) -{ - const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7); - const unsigned int lo = v & lomask; - - *type = (v >> lobits) & 3; - return lo; -} - -static int unpack_compacted_index(struct z_erofs_maprecorder *m, - unsigned int amortizedshift, - unsigned int eofs) -{ - struct erofs_vnode *const vi = EROFS_V(m->inode); - const unsigned int lclusterbits = vi->z_logical_clusterbits; - const unsigned int lomask = (1 << lclusterbits) - 1; - unsigned int vcnt, base, lo, encodebits, nblk; - int i; - u8 *in, type; - - if (1 << amortizedshift == 4) - vcnt = 2; - else if (1 << amortizedshift == 2 && lclusterbits == 12) - vcnt = 16; - else - return -EOPNOTSUPP; - - encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; - base = round_down(eofs, vcnt << amortizedshift); - in = m->kaddr + base; - - i = (eofs - base) >> amortizedshift; - - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * i, &type); - m->type = type; - if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { - m->clusterofs = 1 << lclusterbits; - if (i + 1 != vcnt) { - m->delta[0] = lo; - return 0; - } - /* - * since the last lcluster in the pack is special, - * of which lo saves delta[1] rather than delta[0]. - * Hence, get delta[0] by the previous lcluster indirectly. - */ - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * (i - 1), &type); - if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) - lo = 0; - m->delta[0] = lo + 1; - return 0; - } - m->clusterofs = lo; - m->delta[0] = 0; - /* figout out blkaddr (pblk) for HEAD lclusters */ - nblk = 1; - while (i > 0) { - --i; - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * i, &type); - if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) - i -= lo; - - if (i >= 0) - ++nblk; - } - in += (vcnt << amortizedshift) - sizeof(__le32); - m->pblk = le32_to_cpu(*(__le32 *)in) + nblk; - return 0; -} - -static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, - unsigned long lcn) -{ - struct inode *const inode = m->inode; - struct erofs_vnode *const vi = EROFS_V(inode); - const unsigned int lclusterbits = vi->z_logical_clusterbits; - const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) + - vi->inode_isize + vi->xattr_isize, 8) + - sizeof(struct z_erofs_map_header); - const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); - unsigned int compacted_4b_initial, compacted_2b; - unsigned int amortizedshift; - erofs_off_t pos; - int err; - - if (lclusterbits != 12) - return -EOPNOTSUPP; - - if (lcn >= totalidx) - return -EINVAL; - - m->lcn = lcn; - /* used to align to 32-byte (compacted_2b) alignment */ - compacted_4b_initial = (32 - ebase % 32) / 4; - if (compacted_4b_initial == 32 / 4) - compacted_4b_initial = 0; - - if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) - compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); - else - compacted_2b = 0; - - pos = ebase; - if (lcn < compacted_4b_initial) { - amortizedshift = 2; - goto out; - } - pos += compacted_4b_initial * 4; - lcn -= compacted_4b_initial; - - if (lcn < compacted_2b) { - amortizedshift = 1; - goto out; - } - pos += compacted_2b * 2; - lcn -= compacted_2b; - amortizedshift = 2; -out: - pos += lcn * (1 << amortizedshift); - err = z_erofs_reload_indexes(m, erofs_blknr(pos)); - if (err) - return err; - return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos)); -} - -static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, - unsigned int lcn) -{ - const unsigned int datamode = EROFS_V(m->inode)->datamode; - - if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) - return vle_legacy_load_cluster_from_disk(m, lcn); - - if (datamode == EROFS_INODE_FLAT_COMPRESSION) - return compacted_load_cluster_from_disk(m, lcn); - - return -EINVAL; -} - -static int vle_extent_lookback(struct z_erofs_maprecorder *m, - unsigned int lookback_distance) -{ - struct erofs_vnode *const vi = EROFS_V(m->inode); - struct erofs_map_blocks *const map = m->map; - const unsigned int lclusterbits = vi->z_logical_clusterbits; - unsigned long lcn = m->lcn; - int err; - - if (lcn < lookback_distance) { - errln("bogus lookback distance @ nid %llu", vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } - - /* load extent head logical cluster if needed */ - lcn -= lookback_distance; - err = vle_load_cluster_from_disk(m, lcn); - if (err) - return err; - - switch (m->type) { - case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: - if (unlikely(!m->delta[0])) { - errln("invalid lookback distance 0 at nid %llu", - vi->nid); - DBG_BUGON(1); - return -EFSCORRUPTED; - } - return vle_extent_lookback(m, m->delta[0]); - case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: - map->m_flags &= ~EROFS_MAP_ZIPPED; - /* fallthrough */ - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: - map->m_la = (lcn << lclusterbits) | m->clusterofs; - break; - default: - errln("unknown type %u at lcn %lu of nid %llu", - m->type, lcn, vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; - } - return 0; -} - -int z_erofs_map_blocks_iter(struct inode *inode, - struct erofs_map_blocks *map, - int flags) -{ - struct erofs_vnode *const vi = EROFS_V(inode); - struct z_erofs_maprecorder m = { - .inode = inode, - .map = map, - }; - int err = 0; - unsigned int lclusterbits, endoff; - unsigned long long ofs, end; - - trace_z_erofs_map_blocks_iter_enter(inode, map, flags); - - /* when trying to read beyond EOF, leave it unmapped */ - if (unlikely(map->m_la >= inode->i_size)) { - map->m_llen = map->m_la + 1 - inode->i_size; - map->m_la = inode->i_size; - map->m_flags = 0; - goto out; - } - - err = fill_inode_lazy(inode); - if (err) - goto out; - - lclusterbits = vi->z_logical_clusterbits; - ofs = map->m_la; - m.lcn = ofs >> lclusterbits; - endoff = ofs & ((1 << lclusterbits) - 1); - - err = vle_load_cluster_from_disk(&m, m.lcn); - if (err) - goto unmap_out; - - map->m_flags = EROFS_MAP_ZIPPED; /* by default, compressed */ - end = (m.lcn + 1ULL) << lclusterbits; - - switch (m.type) { - case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: - if (endoff >= m.clusterofs) - map->m_flags &= ~EROFS_MAP_ZIPPED; - /* fallthrough */ - case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: - if (endoff >= m.clusterofs) { - map->m_la = (m.lcn << lclusterbits) | m.clusterofs; - break; - } - /* m.lcn should be >= 1 if endoff < m.clusterofs */ - if (unlikely(!m.lcn)) { - errln("invalid logical cluster 0 at nid %llu", - vi->nid); - err = -EFSCORRUPTED; - goto unmap_out; - } - end = (m.lcn << lclusterbits) | m.clusterofs; - map->m_flags |= EROFS_MAP_FULL_MAPPED; - m.delta[0] = 1; - /* fallthrough */ - case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: - /* get the correspoinding first chunk */ - err = vle_extent_lookback(&m, m.delta[0]); - if (unlikely(err)) - goto unmap_out; - break; - default: - errln("unknown type %u at offset %llu of nid %llu", - m.type, ofs, vi->nid); - err = -EOPNOTSUPP; - goto unmap_out; - } - - map->m_llen = end - map->m_la; - map->m_plen = 1 << lclusterbits; - map->m_pa = blknr_to_addr(m.pblk); - map->m_flags |= EROFS_MAP_MAPPED; - -unmap_out: - if (m.kaddr) - kunmap_atomic(m.kaddr); - -out: - debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o", - __func__, map->m_la, map->m_pa, - map->m_llen, map->m_plen, map->m_flags); - - trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); - - /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */ - DBG_BUGON(err < 0 && err != -ENOMEM); - return err; -} - diff --git a/drivers/staging/erofs/zpvec.h b/drivers/staging/erofs/zpvec.h deleted file mode 100644 index 9798f5627786..000000000000 --- a/drivers/staging/erofs/zpvec.h +++ /dev/null @@ -1,159 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * linux/drivers/staging/erofs/zpvec.h - * - * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ - * Created by Gao Xiang - */ -#ifndef __EROFS_FS_ZPVEC_H -#define __EROFS_FS_ZPVEC_H - -#include "tagptr.h" - -/* page type in pagevec for decompress subsystem */ -enum z_erofs_page_type { - /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */ - Z_EROFS_PAGE_TYPE_EXCLUSIVE, - - Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED, - - Z_EROFS_VLE_PAGE_TYPE_HEAD, - Z_EROFS_VLE_PAGE_TYPE_MAX -}; - -extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0") - __bad_page_type_exclusive(void); - -/* pagevec tagged pointer */ -typedef tagptr2_t erofs_vtptr_t; - -/* pagevec collector */ -struct z_erofs_pagevec_ctor { - struct page *curr, *next; - erofs_vtptr_t *pages; - - unsigned int nr, index; -}; - -static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor, - bool atomic) -{ - if (!ctor->curr) - return; - - if (atomic) - kunmap_atomic(ctor->pages); - else - kunmap(ctor->curr); -} - -static inline struct page * -z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor, - unsigned int nr) -{ - unsigned int index; - - /* keep away from occupied pages */ - if (ctor->next) - return ctor->next; - - for (index = 0; index < nr; ++index) { - const erofs_vtptr_t t = ctor->pages[index]; - const unsigned int tags = tagptr_unfold_tags(t); - - if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE) - return tagptr_unfold_ptr(t); - } - DBG_BUGON(nr >= ctor->nr); - return NULL; -} - -static inline void -z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor, - bool atomic) -{ - struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr); - - z_erofs_pagevec_ctor_exit(ctor, atomic); - - ctor->curr = next; - ctor->next = NULL; - ctor->pages = atomic ? - kmap_atomic(ctor->curr) : kmap(ctor->curr); - - ctor->nr = PAGE_SIZE / sizeof(struct page *); - ctor->index = 0; -} - -static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor, - unsigned int nr, - erofs_vtptr_t *pages, - unsigned int i) -{ - ctor->nr = nr; - ctor->curr = ctor->next = NULL; - ctor->pages = pages; - - if (i >= nr) { - i -= nr; - z_erofs_pagevec_ctor_pagedown(ctor, false); - while (i > ctor->nr) { - i -= ctor->nr; - z_erofs_pagevec_ctor_pagedown(ctor, false); - } - } - ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i); - ctor->index = i; -} - -static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor, - struct page *page, - enum z_erofs_page_type type, - bool *occupied) -{ - *occupied = false; - if (unlikely(!ctor->next && type)) - if (ctor->index + 1 == ctor->nr) - return false; - - if (unlikely(ctor->index >= ctor->nr)) - z_erofs_pagevec_ctor_pagedown(ctor, false); - - /* exclusive page type must be 0 */ - if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL) - __bad_page_type_exclusive(); - - /* should remind that collector->next never equal to 1, 2 */ - if (type == (uintptr_t)ctor->next) { - ctor->next = page; - *occupied = true; - } - ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type); - return true; -} - -static inline struct page * -z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor, - enum z_erofs_page_type *type) -{ - erofs_vtptr_t t; - - if (unlikely(ctor->index >= ctor->nr)) { - DBG_BUGON(!ctor->next); - z_erofs_pagevec_ctor_pagedown(ctor, true); - } - - t = ctor->pages[ctor->index]; - - *type = tagptr_unfold_tags(t); - - /* should remind that collector->next never equal to 1, 2 */ - if (*type == (uintptr_t)ctor->next) - ctor->next = tagptr_unfold_ptr(t); - - ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0); - return tagptr_unfold_ptr(t); -} -#endif - diff --git a/fs/Kconfig b/fs/Kconfig index bfb1c6095c7a..669d46550e6d 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -261,6 +261,7 @@ source "fs/romfs/Kconfig" source "fs/pstore/Kconfig" source "fs/sysv/Kconfig" source "fs/ufs/Kconfig" +source "fs/erofs/Kconfig" endif # MISC_FILESYSTEMS diff --git a/fs/Makefile b/fs/Makefile index d60089fd689b..b2e4973a0bea 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -130,3 +130,4 @@ obj-$(CONFIG_F2FS_FS) += f2fs/ obj-$(CONFIG_CEPH_FS) += ceph/ obj-$(CONFIG_PSTORE) += pstore/ obj-$(CONFIG_EFIVAR_FS) += efivarfs/ +obj-$(CONFIG_EROFS_FS) += erofs/ diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig new file mode 100644 index 000000000000..16316d1adca3 --- /dev/null +++ b/fs/erofs/Kconfig @@ -0,0 +1,98 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config EROFS_FS + tristate "EROFS filesystem support" + depends on BLOCK + help + EROFS (Enhanced Read-Only File System) is a lightweight + read-only file system with modern designs (eg. page-sized + blocks, inline xattrs/data, etc.) for scenarios which need + high-performance read-only requirements, e.g. Android OS + for mobile phones and LIVECDs. + + It also provides fixed-sized output compression support, + which improves storage density, keeps relatively higher + compression ratios, which is more useful to achieve high + performance for embedded devices with limited memory. + + If unsure, say N. + +config EROFS_FS_DEBUG + bool "EROFS debugging feature" + depends on EROFS_FS + help + Print debugging messages and enable more BUG_ONs which check + filesystem consistency and find potential issues aggressively, + which can be used for Android eng build, for example. + + For daily use, say N. + +config EROFS_FAULT_INJECTION + bool "EROFS fault injection facility" + depends on EROFS_FS + help + Test EROFS to inject faults such as ENOMEM, EIO, and so on. + If unsure, say N. + +config EROFS_FS_XATTR + bool "EROFS extended attributes" + depends on EROFS_FS + default y + help + Extended attributes are name:value pairs associated with inodes by + the kernel or by users (see the attr(5) manual page, or visit + for details). + + If unsure, say N. + +config EROFS_FS_POSIX_ACL + bool "EROFS Access Control Lists" + depends on EROFS_FS_XATTR + select FS_POSIX_ACL + default y + help + Posix Access Control Lists (ACLs) support permissions for users and + groups beyond the owner/group/world scheme. + + To learn more about Access Control Lists, visit the POSIX ACLs for + Linux website . + + If you don't know what Access Control Lists are, say N. + +config EROFS_FS_SECURITY + bool "EROFS Security Labels" + depends on EROFS_FS_XATTR + default y + help + Security labels provide an access control facility to support Linux + Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO + Linux. This option enables an extended attribute handler for file + security labels in the erofs filesystem, so that it requires enabling + the extended attribute support in advance. + + If you are not using a security module, say N. + +config EROFS_FS_ZIP + bool "EROFS Data Compression Support" + depends on EROFS_FS + select LZ4_DECOMPRESS + default y + help + Enable fixed-sized output compression for EROFS. + + If you don't want to enable compression feature, say N. + +config EROFS_FS_CLUSTER_PAGE_LIMIT + int "EROFS Cluster Pages Hard Limit" + depends on EROFS_FS_ZIP + range 1 256 + default "1" + help + Indicates maximum # of pages of a compressed + physical cluster. + + For example, if files in a image were compressed + into 8k-unit, hard limit should not be configured + less than 2. Otherwise, the image will be refused + to mount on this kernel. + diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile new file mode 100644 index 000000000000..46f2aa4ba46c --- /dev/null +++ b/fs/erofs/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-only + +EROFS_VERSION = "1.0" + +ccflags-y += -DEROFS_VERSION=\"$(EROFS_VERSION)\" + +obj-$(CONFIG_EROFS_FS) += erofs.o +erofs-objs := super.o inode.o data.o namei.o dir.o utils.o +erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o +erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o + diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h new file mode 100644 index 000000000000..07d279fd5d67 --- /dev/null +++ b/fs/erofs/compress.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_FS_COMPRESS_H +#define __EROFS_FS_COMPRESS_H + +#include "internal.h" + +enum { + Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX, + Z_EROFS_COMPRESSION_RUNTIME_MAX +}; + +struct z_erofs_decompress_req { + struct super_block *sb; + struct page **in, **out; + + unsigned short pageofs_out; + unsigned int inputsize, outputsize; + + /* indicate the algorithm will be used for decompression */ + unsigned int alg; + bool inplace_io, partial_decoding; +}; + +/* + * - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) - + * used to mark temporary allocated pages from other + * file/cached pages and NULL mapping pages. + */ +#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D) + +/* check if a page is marked as staging */ +static inline bool z_erofs_page_is_staging(struct page *page) +{ + return page->mapping == Z_EROFS_MAPPING_STAGING; +} + +static inline bool z_erofs_put_stagingpage(struct list_head *pagepool, + struct page *page) +{ + if (!z_erofs_page_is_staging(page)) + return false; + + /* staging pages should not be used by others at the same time */ + if (page_ref_count(page) > 1) + put_page(page); + else + list_add(&page->lru, pagepool); + return true; +} + +int z_erofs_decompress(struct z_erofs_decompress_req *rq, + struct list_head *pagepool); + +#endif + diff --git a/fs/erofs/data.c b/fs/erofs/data.c new file mode 100644 index 000000000000..fda16ec8863e --- /dev/null +++ b/fs/erofs/data.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "internal.h" +#include + +#include + +static inline void read_endio(struct bio *bio) +{ + struct super_block *const sb = bio->bi_private; + struct bio_vec *bvec; + blk_status_t err = bio->bi_status; + struct bvec_iter_all iter_all; + + if (time_to_inject(EROFS_SB(sb), FAULT_READ_IO)) { + erofs_show_injection_info(FAULT_READ_IO); + err = BLK_STS_IOERR; + } + + bio_for_each_segment_all(bvec, bio, iter_all) { + struct page *page = bvec->bv_page; + + /* page is already locked */ + DBG_BUGON(PageUptodate(page)); + + if (unlikely(err)) + SetPageError(page); + else + SetPageUptodate(page); + + unlock_page(page); + /* page could be reclaimed now */ + } + bio_put(bio); +} + +/* prio -- true is used for dir */ +struct page *__erofs_get_meta_page(struct super_block *sb, + erofs_blk_t blkaddr, bool prio, bool nofail) +{ + struct inode *const bd_inode = sb->s_bdev->bd_inode; + struct address_space *const mapping = bd_inode->i_mapping; + /* prefer retrying in the allocator to blindly looping below */ + const gfp_t gfp = mapping_gfp_constraint(mapping, ~__GFP_FS) | + (nofail ? __GFP_NOFAIL : 0); + unsigned int io_retries = nofail ? EROFS_IO_MAX_RETRIES_NOFAIL : 0; + struct page *page; + int err; + +repeat: + page = find_or_create_page(mapping, blkaddr, gfp); + if (unlikely(!page)) { + DBG_BUGON(nofail); + return ERR_PTR(-ENOMEM); + } + DBG_BUGON(!PageLocked(page)); + + if (!PageUptodate(page)) { + struct bio *bio; + + bio = erofs_grab_bio(sb, blkaddr, 1, sb, read_endio, nofail); + if (IS_ERR(bio)) { + DBG_BUGON(nofail); + err = PTR_ERR(bio); + goto err_out; + } + + err = bio_add_page(bio, page, PAGE_SIZE, 0); + if (unlikely(err != PAGE_SIZE)) { + err = -EFAULT; + goto err_out; + } + + __submit_bio(bio, REQ_OP_READ, + REQ_META | (prio ? REQ_PRIO : 0)); + + lock_page(page); + + /* this page has been truncated by others */ + if (unlikely(page->mapping != mapping)) { +unlock_repeat: + unlock_page(page); + put_page(page); + goto repeat; + } + + /* more likely a read error */ + if (unlikely(!PageUptodate(page))) { + if (io_retries) { + --io_retries; + goto unlock_repeat; + } + err = -EIO; + goto err_out; + } + } + return page; + +err_out: + unlock_page(page); + put_page(page); + return ERR_PTR(err); +} + +static int erofs_map_blocks_flatmode(struct inode *inode, + struct erofs_map_blocks *map, + int flags) +{ + int err = 0; + erofs_blk_t nblocks, lastblk; + u64 offset = map->m_la; + struct erofs_vnode *vi = EROFS_V(inode); + + trace_erofs_map_blocks_flatmode_enter(inode, map, flags); + + nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE); + lastblk = nblocks - is_inode_flat_inline(inode); + + if (unlikely(offset >= inode->i_size)) { + /* leave out-of-bound access unmapped */ + map->m_flags = 0; + map->m_plen = 0; + goto out; + } + + /* there is no hole in flatmode */ + map->m_flags = EROFS_MAP_MAPPED; + + if (offset < blknr_to_addr(lastblk)) { + map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; + map->m_plen = blknr_to_addr(lastblk) - offset; + } else if (is_inode_flat_inline(inode)) { + /* 2 - inode inline B: inode, [xattrs], inline last blk... */ + struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); + + map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize + + vi->xattr_isize + erofs_blkoff(map->m_la); + map->m_plen = inode->i_size - offset; + + /* inline data should be located in one meta block */ + if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { + errln("inline data cross block boundary @ nid %llu", + vi->nid); + DBG_BUGON(1); + err = -EFSCORRUPTED; + goto err_out; + } + + map->m_flags |= EROFS_MAP_META; + } else { + errln("internal error @ nid: %llu (size %llu), m_la 0x%llx", + vi->nid, inode->i_size, map->m_la); + DBG_BUGON(1); + err = -EIO; + goto err_out; + } + +out: + map->m_llen = map->m_plen; + +err_out: + trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0); + return err; +} + +int erofs_map_blocks(struct inode *inode, + struct erofs_map_blocks *map, int flags) +{ + if (unlikely(is_inode_layout_compression(inode))) { + int err = z_erofs_map_blocks_iter(inode, map, flags); + + if (map->mpage) { + put_page(map->mpage); + map->mpage = NULL; + } + return err; + } + return erofs_map_blocks_flatmode(inode, map, flags); +} + +static inline struct bio *erofs_read_raw_page(struct bio *bio, + struct address_space *mapping, + struct page *page, + erofs_off_t *last_block, + unsigned int nblocks, + bool ra) +{ + struct inode *const inode = mapping->host; + struct super_block *const sb = inode->i_sb; + erofs_off_t current_block = (erofs_off_t)page->index; + int err; + + DBG_BUGON(!nblocks); + + if (PageUptodate(page)) { + err = 0; + goto has_updated; + } + + /* note that for readpage case, bio also equals to NULL */ + if (bio && + /* not continuous */ + *last_block + 1 != current_block) { +submit_bio_retry: + __submit_bio(bio, REQ_OP_READ, 0); + bio = NULL; + } + + if (!bio) { + struct erofs_map_blocks map = { + .m_la = blknr_to_addr(current_block), + }; + erofs_blk_t blknr; + unsigned int blkoff; + + err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); + if (unlikely(err)) + goto err_out; + + /* zero out the holed page */ + if (unlikely(!(map.m_flags & EROFS_MAP_MAPPED))) { + zero_user_segment(page, 0, PAGE_SIZE); + SetPageUptodate(page); + + /* imply err = 0, see erofs_map_blocks */ + goto has_updated; + } + + /* for RAW access mode, m_plen must be equal to m_llen */ + DBG_BUGON(map.m_plen != map.m_llen); + + blknr = erofs_blknr(map.m_pa); + blkoff = erofs_blkoff(map.m_pa); + + /* deal with inline page */ + if (map.m_flags & EROFS_MAP_META) { + void *vsrc, *vto; + struct page *ipage; + + DBG_BUGON(map.m_plen > PAGE_SIZE); + + ipage = erofs_get_meta_page(inode->i_sb, blknr, 0); + + if (IS_ERR(ipage)) { + err = PTR_ERR(ipage); + goto err_out; + } + + vsrc = kmap_atomic(ipage); + vto = kmap_atomic(page); + memcpy(vto, vsrc + blkoff, map.m_plen); + memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen); + kunmap_atomic(vto); + kunmap_atomic(vsrc); + flush_dcache_page(page); + + SetPageUptodate(page); + /* TODO: could we unlock the page earlier? */ + unlock_page(ipage); + put_page(ipage); + + /* imply err = 0, see erofs_map_blocks */ + goto has_updated; + } + + /* pa must be block-aligned for raw reading */ + DBG_BUGON(erofs_blkoff(map.m_pa)); + + /* max # of continuous pages */ + if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE)) + nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE); + if (nblocks > BIO_MAX_PAGES) + nblocks = BIO_MAX_PAGES; + + bio = erofs_grab_bio(sb, blknr, nblocks, sb, + read_endio, false); + if (IS_ERR(bio)) { + err = PTR_ERR(bio); + bio = NULL; + goto err_out; + } + } + + err = bio_add_page(bio, page, PAGE_SIZE, 0); + /* out of the extent or bio is full */ + if (err < PAGE_SIZE) + goto submit_bio_retry; + + *last_block = current_block; + + /* shift in advance in case of it followed by too many gaps */ + if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) { + /* err should reassign to 0 after submitting */ + err = 0; + goto submit_bio_out; + } + + return bio; + +err_out: + /* for sync reading, set page error immediately */ + if (!ra) { + SetPageError(page); + ClearPageUptodate(page); + } +has_updated: + unlock_page(page); + + /* if updated manually, continuous pages has a gap */ + if (bio) +submit_bio_out: + __submit_bio(bio, REQ_OP_READ, 0); + + return unlikely(err) ? ERR_PTR(err) : NULL; +} + +/* + * since we dont have write or truncate flows, so no inode + * locking needs to be held at the moment. + */ +static int erofs_raw_access_readpage(struct file *file, struct page *page) +{ + erofs_off_t last_block; + struct bio *bio; + + trace_erofs_readpage(page, true); + + bio = erofs_read_raw_page(NULL, page->mapping, + page, &last_block, 1, false); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + DBG_BUGON(bio); /* since we have only one bio -- must be NULL */ + return 0; +} + +static int erofs_raw_access_readpages(struct file *filp, + struct address_space *mapping, + struct list_head *pages, + unsigned int nr_pages) +{ + erofs_off_t last_block; + struct bio *bio = NULL; + gfp_t gfp = readahead_gfp_mask(mapping); + struct page *page = list_last_entry(pages, struct page, lru); + + trace_erofs_readpages(mapping->host, page, nr_pages, true); + + for (; nr_pages; --nr_pages) { + page = list_entry(pages->prev, struct page, lru); + + prefetchw(&page->flags); + list_del(&page->lru); + + if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) { + bio = erofs_read_raw_page(bio, mapping, page, + &last_block, nr_pages, true); + + /* all the page errors are ignored when readahead */ + if (IS_ERR(bio)) { + pr_err("%s, readahead error at page %lu of nid %llu\n", + __func__, page->index, + EROFS_V(mapping->host)->nid); + + bio = NULL; + } + } + + /* pages could still be locked */ + put_page(page); + } + DBG_BUGON(!list_empty(pages)); + + /* the rare case (end in gaps) */ + if (unlikely(bio)) + __submit_bio(bio, REQ_OP_READ, 0); + return 0; +} + +static int erofs_get_block(struct inode *inode, sector_t iblock, + struct buffer_head *bh, int create) +{ + struct erofs_map_blocks map = { + .m_la = iblock << 9, + }; + int err; + + err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); + if (err) + return err; + + if (map.m_flags & EROFS_MAP_MAPPED) + bh->b_blocknr = erofs_blknr(map.m_pa); + + return err; +} + +static sector_t erofs_bmap(struct address_space *mapping, sector_t block) +{ + struct inode *inode = mapping->host; + + if (is_inode_flat_inline(inode)) { + erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE; + + if (block >> LOG_SECTORS_PER_BLOCK >= blks) + return 0; + } + + return generic_block_bmap(mapping, block, erofs_get_block); +} + +/* for uncompressed (aligned) files and raw access for other files */ +const struct address_space_operations erofs_raw_access_aops = { + .readpage = erofs_raw_access_readpage, + .readpages = erofs_raw_access_readpages, + .bmap = erofs_bmap, +}; + diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c new file mode 100644 index 000000000000..5f4b7f302863 --- /dev/null +++ b/fs/erofs/decompressor.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2019 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "compress.h" +#include +#include + +#ifndef LZ4_DISTANCE_MAX /* history window size */ +#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */ +#endif + +#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1) +#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN +#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32) +#endif + +struct z_erofs_decompressor { + /* + * if destpages have sparsed pages, fill them with bounce pages. + * it also check whether destpages indicate continuous physical memory. + */ + int (*prepare_destpages)(struct z_erofs_decompress_req *rq, + struct list_head *pagepool); + int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out); + char *name; +}; + +static bool use_vmap; +module_param(use_vmap, bool, 0444); +MODULE_PARM_DESC(use_vmap, "Use vmap() instead of vm_map_ram() (default 0)"); + +static int lz4_prepare_destpages(struct z_erofs_decompress_req *rq, + struct list_head *pagepool) +{ + const unsigned int nr = + PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; + struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL }; + unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES, + BITS_PER_LONG)] = { 0 }; + void *kaddr = NULL; + unsigned int i, j, top; + + top = 0; + for (i = j = 0; i < nr; ++i, ++j) { + struct page *const page = rq->out[i]; + struct page *victim; + + if (j >= LZ4_MAX_DISTANCE_PAGES) + j = 0; + + /* 'valid' bounced can only be tested after a complete round */ + if (test_bit(j, bounced)) { + DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES); + DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES); + availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES]; + } + + if (page) { + __clear_bit(j, bounced); + if (kaddr) { + if (kaddr + PAGE_SIZE == page_address(page)) + kaddr += PAGE_SIZE; + else + kaddr = NULL; + } else if (!i) { + kaddr = page_address(page); + } + continue; + } + kaddr = NULL; + __set_bit(j, bounced); + + if (top) { + victim = availables[--top]; + get_page(victim); + } else { + victim = erofs_allocpage(pagepool, GFP_KERNEL, false); + if (unlikely(!victim)) + return -ENOMEM; + victim->mapping = Z_EROFS_MAPPING_STAGING; + } + rq->out[i] = victim; + } + return kaddr ? 1 : 0; +} + +static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq, + u8 *src, unsigned int pageofs_in) +{ + /* + * if in-place decompression is ongoing, those decompressed + * pages should be copied in order to avoid being overlapped. + */ + struct page **in = rq->in; + u8 *const tmp = erofs_get_pcpubuf(0); + u8 *tmpp = tmp; + unsigned int inlen = rq->inputsize - pageofs_in; + unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in); + + while (tmpp < tmp + inlen) { + if (!src) + src = kmap_atomic(*in); + memcpy(tmpp, src + pageofs_in, count); + kunmap_atomic(src); + src = NULL; + tmpp += count; + pageofs_in = 0; + count = PAGE_SIZE; + ++in; + } + return tmp; +} + +static int lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out) +{ + unsigned int inputmargin, inlen; + u8 *src; + bool copied, support_0padding; + int ret; + + if (rq->inputsize > PAGE_SIZE) + return -EOPNOTSUPP; + + src = kmap_atomic(*rq->in); + inputmargin = 0; + support_0padding = false; + + /* decompression inplace is only safe when 0padding is enabled */ + if (EROFS_SB(rq->sb)->requirements & EROFS_REQUIREMENT_LZ4_0PADDING) { + support_0padding = true; + + while (!src[inputmargin & ~PAGE_MASK]) + if (!(++inputmargin & ~PAGE_MASK)) + break; + + if (inputmargin >= rq->inputsize) { + kunmap_atomic(src); + return -EIO; + } + } + + copied = false; + inlen = rq->inputsize - inputmargin; + if (rq->inplace_io) { + const uint oend = (rq->pageofs_out + + rq->outputsize) & ~PAGE_MASK; + const uint nr = PAGE_ALIGN(rq->pageofs_out + + rq->outputsize) >> PAGE_SHIFT; + + if (rq->partial_decoding || !support_0padding || + rq->out[nr - 1] != rq->in[0] || + rq->inputsize - oend < + LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) { + src = generic_copy_inplace_data(rq, src, inputmargin); + inputmargin = 0; + copied = true; + } + } + + ret = LZ4_decompress_safe_partial(src + inputmargin, out, + inlen, rq->outputsize, + rq->outputsize); + if (ret < 0) { + errln("%s, failed to decompress, in[%p, %u, %u] out[%p, %u]", + __func__, src + inputmargin, inlen, inputmargin, + out, rq->outputsize); + WARN_ON(1); + print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, + 16, 1, src + inputmargin, inlen, true); + print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, + 16, 1, out, rq->outputsize, true); + ret = -EIO; + } + + if (copied) + erofs_put_pcpubuf(src); + else + kunmap_atomic(src); + return ret; +} + +static struct z_erofs_decompressor decompressors[] = { + [Z_EROFS_COMPRESSION_SHIFTED] = { + .name = "shifted" + }, + [Z_EROFS_COMPRESSION_LZ4] = { + .prepare_destpages = lz4_prepare_destpages, + .decompress = lz4_decompress, + .name = "lz4" + }, +}; + +static void copy_from_pcpubuf(struct page **out, const char *dst, + unsigned short pageofs_out, + unsigned int outputsize) +{ + const char *end = dst + outputsize; + const unsigned int righthalf = PAGE_SIZE - pageofs_out; + const char *cur = dst - pageofs_out; + + while (cur < end) { + struct page *const page = *out++; + + if (page) { + char *buf = kmap_atomic(page); + + if (cur >= dst) { + memcpy(buf, cur, min_t(uint, PAGE_SIZE, + end - cur)); + } else { + memcpy(buf + pageofs_out, cur + pageofs_out, + min_t(uint, righthalf, end - cur)); + } + kunmap_atomic(buf); + } + cur += PAGE_SIZE; + } +} + +static void *erofs_vmap(struct page **pages, unsigned int count) +{ + int i = 0; + + if (use_vmap) + return vmap(pages, count, VM_MAP, PAGE_KERNEL); + + while (1) { + void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL); + + /* retry two more times (totally 3 times) */ + if (addr || ++i >= 3) + return addr; + vm_unmap_aliases(); + } + return NULL; +} + +static void erofs_vunmap(const void *mem, unsigned int count) +{ + if (!use_vmap) + vm_unmap_ram(mem, count); + else + vunmap(mem); +} + +static int decompress_generic(struct z_erofs_decompress_req *rq, + struct list_head *pagepool) +{ + const unsigned int nrpages_out = + PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; + const struct z_erofs_decompressor *alg = decompressors + rq->alg; + unsigned int dst_maptype; + void *dst; + int ret; + + if (nrpages_out == 1 && !rq->inplace_io) { + DBG_BUGON(!*rq->out); + dst = kmap_atomic(*rq->out); + dst_maptype = 0; + goto dstmap_out; + } + + /* + * For the case of small output size (especially much less + * than PAGE_SIZE), memcpy the decompressed data rather than + * compressed data is preferred. + */ + if (rq->outputsize <= PAGE_SIZE * 7 / 8) { + dst = erofs_get_pcpubuf(0); + if (IS_ERR(dst)) + return PTR_ERR(dst); + + rq->inplace_io = false; + ret = alg->decompress(rq, dst); + if (!ret) + copy_from_pcpubuf(rq->out, dst, rq->pageofs_out, + rq->outputsize); + + erofs_put_pcpubuf(dst); + return ret; + } + + ret = alg->prepare_destpages(rq, pagepool); + if (ret < 0) { + return ret; + } else if (ret) { + dst = page_address(*rq->out); + dst_maptype = 1; + goto dstmap_out; + } + + dst = erofs_vmap(rq->out, nrpages_out); + if (!dst) + return -ENOMEM; + dst_maptype = 2; + +dstmap_out: + ret = alg->decompress(rq, dst + rq->pageofs_out); + + if (!dst_maptype) + kunmap_atomic(dst); + else if (dst_maptype == 2) + erofs_vunmap(dst, nrpages_out); + return ret; +} + +static int shifted_decompress(const struct z_erofs_decompress_req *rq, + struct list_head *pagepool) +{ + const unsigned int nrpages_out = + PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; + const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out; + unsigned char *src, *dst; + + if (nrpages_out > 2) { + DBG_BUGON(1); + return -EIO; + } + + if (rq->out[0] == *rq->in) { + DBG_BUGON(nrpages_out != 1); + return 0; + } + + src = kmap_atomic(*rq->in); + if (!rq->out[0]) { + dst = NULL; + } else { + dst = kmap_atomic(rq->out[0]); + memcpy(dst + rq->pageofs_out, src, righthalf); + } + + if (rq->out[1] == *rq->in) { + memmove(src, src + righthalf, rq->pageofs_out); + } else if (nrpages_out == 2) { + if (dst) + kunmap_atomic(dst); + DBG_BUGON(!rq->out[1]); + dst = kmap_atomic(rq->out[1]); + memcpy(dst, src + righthalf, rq->pageofs_out); + } + if (dst) + kunmap_atomic(dst); + kunmap_atomic(src); + return 0; +} + +int z_erofs_decompress(struct z_erofs_decompress_req *rq, + struct list_head *pagepool) +{ + if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED) + return shifted_decompress(rq, pagepool); + return decompress_generic(rq, pagepool); +} + diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c new file mode 100644 index 000000000000..1976e60e5174 --- /dev/null +++ b/fs/erofs/dir.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "internal.h" + +static void debug_one_dentry(unsigned char d_type, const char *de_name, + unsigned int de_namelen) +{ +#ifdef CONFIG_EROFS_FS_DEBUG + /* since the on-disk name could not have the trailing '\0' */ + unsigned char dbg_namebuf[EROFS_NAME_LEN + 1]; + + memcpy(dbg_namebuf, de_name, de_namelen); + dbg_namebuf[de_namelen] = '\0'; + + debugln("found dirent %s de_len %u d_type %d", dbg_namebuf, + de_namelen, d_type); +#endif +} + +static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx, + void *dentry_blk, unsigned int *ofs, + unsigned int nameoff, unsigned int maxsize) +{ + struct erofs_dirent *de = dentry_blk + *ofs; + const struct erofs_dirent *end = dentry_blk + nameoff; + + while (de < end) { + const char *de_name; + unsigned int de_namelen; + unsigned char d_type; + + d_type = fs_ftype_to_dtype(de->file_type); + + nameoff = le16_to_cpu(de->nameoff); + de_name = (char *)dentry_blk + nameoff; + + /* the last dirent in the block? */ + if (de + 1 >= end) + de_namelen = strnlen(de_name, maxsize - nameoff); + else + de_namelen = le16_to_cpu(de[1].nameoff) - nameoff; + + /* a corrupted entry is found */ + if (unlikely(nameoff + de_namelen > maxsize || + de_namelen > EROFS_NAME_LEN)) { + errln("bogus dirent @ nid %llu", EROFS_V(dir)->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; + } + + debug_one_dentry(d_type, de_name, de_namelen); + if (!dir_emit(ctx, de_name, de_namelen, + le64_to_cpu(de->nid), d_type)) + /* stopped by some reason */ + return 1; + ++de; + *ofs += sizeof(struct erofs_dirent); + } + *ofs = maxsize; + return 0; +} + +static int erofs_readdir(struct file *f, struct dir_context *ctx) +{ + struct inode *dir = file_inode(f); + struct address_space *mapping = dir->i_mapping; + const size_t dirsize = i_size_read(dir); + unsigned int i = ctx->pos / EROFS_BLKSIZ; + unsigned int ofs = ctx->pos % EROFS_BLKSIZ; + int err = 0; + bool initial = true; + + while (ctx->pos < dirsize) { + struct page *dentry_page; + struct erofs_dirent *de; + unsigned int nameoff, maxsize; + + dentry_page = read_mapping_page(mapping, i, NULL); + if (dentry_page == ERR_PTR(-ENOMEM)) { + err = -ENOMEM; + break; + } else if (IS_ERR(dentry_page)) { + errln("fail to readdir of logical block %u of nid %llu", + i, EROFS_V(dir)->nid); + err = -EFSCORRUPTED; + break; + } + + de = (struct erofs_dirent *)kmap(dentry_page); + + nameoff = le16_to_cpu(de->nameoff); + + if (unlikely(nameoff < sizeof(struct erofs_dirent) || + nameoff >= PAGE_SIZE)) { + errln("%s, invalid de[0].nameoff %u @ nid %llu", + __func__, nameoff, EROFS_V(dir)->nid); + err = -EFSCORRUPTED; + goto skip_this; + } + + maxsize = min_t(unsigned int, + dirsize - ctx->pos + ofs, PAGE_SIZE); + + /* search dirents at the arbitrary position */ + if (unlikely(initial)) { + initial = false; + + ofs = roundup(ofs, sizeof(struct erofs_dirent)); + if (unlikely(ofs >= nameoff)) + goto skip_this; + } + + err = erofs_fill_dentries(dir, ctx, de, &ofs, + nameoff, maxsize); +skip_this: + kunmap(dentry_page); + + put_page(dentry_page); + + ctx->pos = blknr_to_addr(i) + ofs; + + if (unlikely(err)) + break; + ++i; + ofs = 0; + } + return err < 0 ? err : 0; +} + +const struct file_operations erofs_dir_fops = { + .llseek = generic_file_llseek, + .read = generic_read_dir, + .iterate_shared = erofs_readdir, +}; + diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h new file mode 100644 index 000000000000..afa7d45ca958 --- /dev/null +++ b/fs/erofs/erofs_fs.h @@ -0,0 +1,307 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0 */ +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_FS_H +#define __EROFS_FS_H + +/* Enhanced(Extended) ROM File System */ +#define EROFS_SUPER_OFFSET 1024 + +/* + * Any bits that aren't in EROFS_ALL_REQUIREMENTS should be + * incompatible with this kernel version. + */ +#define EROFS_REQUIREMENT_LZ4_0PADDING 0x00000001 +#define EROFS_ALL_REQUIREMENTS EROFS_REQUIREMENT_LZ4_0PADDING + +struct erofs_super_block { +/* 0 */__le32 magic; /* in the little endian */ +/* 4 */__le32 checksum; /* crc32c(super_block) */ +/* 8 */__le32 features; /* (aka. feature_compat) */ +/* 12 */__u8 blkszbits; /* support block_size == PAGE_SIZE only */ +/* 13 */__u8 reserved; + +/* 14 */__le16 root_nid; +/* 16 */__le64 inos; /* total valid ino # (== f_files - f_favail) */ + +/* 24 */__le64 build_time; /* inode v1 time derivation */ +/* 32 */__le32 build_time_nsec; +/* 36 */__le32 blocks; /* used for statfs */ +/* 40 */__le32 meta_blkaddr; +/* 44 */__le32 xattr_blkaddr; +/* 48 */__u8 uuid[16]; /* 128-bit uuid for volume */ +/* 64 */__u8 volume_name[16]; /* volume name */ +/* 80 */__le32 requirements; /* (aka. feature_incompat) */ + +/* 84 */__u8 reserved2[44]; +} __packed; /* 128 bytes */ + +/* + * erofs inode data mapping: + * 0 - inode plain without inline data A: + * inode, [xattrs], ... | ... | no-holed data + * 1 - inode VLE compression B (legacy): + * inode, [xattrs], extents ... | ... + * 2 - inode plain with inline data C: + * inode, [xattrs], last_inline_data, ... | ... | no-holed data + * 3 - inode compression D: + * inode, [xattrs], map_header, extents ... | ... + * 4~7 - reserved + */ +enum { + EROFS_INODE_FLAT_PLAIN, + EROFS_INODE_FLAT_COMPRESSION_LEGACY, + EROFS_INODE_FLAT_INLINE, + EROFS_INODE_FLAT_COMPRESSION, + EROFS_INODE_LAYOUT_MAX +}; + +static inline bool erofs_inode_is_data_compressed(unsigned int datamode) +{ + if (datamode == EROFS_INODE_FLAT_COMPRESSION) + return true; + return datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY; +} + +/* bit definitions of inode i_advise */ +#define EROFS_I_VERSION_BITS 1 +#define EROFS_I_DATA_MAPPING_BITS 3 + +#define EROFS_I_VERSION_BIT 0 +#define EROFS_I_DATA_MAPPING_BIT 1 + +struct erofs_inode_v1 { +/* 0 */__le16 i_advise; + +/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ +/* 2 */__le16 i_xattr_icount; +/* 4 */__le16 i_mode; +/* 6 */__le16 i_nlink; +/* 8 */__le32 i_size; +/* 12 */__le32 i_reserved; +/* 16 */union { + /* file total compressed blocks for data mapping 1 */ + __le32 compressed_blocks; + __le32 raw_blkaddr; + + /* for device files, used to indicate old/new device # */ + __le32 rdev; + } i_u __packed; +/* 20 */__le32 i_ino; /* only used for 32-bit stat compatibility */ +/* 24 */__le16 i_uid; +/* 26 */__le16 i_gid; +/* 28 */__le32 i_reserved2; +} __packed; + +/* 32 bytes on-disk inode */ +#define EROFS_INODE_LAYOUT_V1 0 +/* 64 bytes on-disk inode */ +#define EROFS_INODE_LAYOUT_V2 1 + +struct erofs_inode_v2 { +/* 0 */__le16 i_advise; + +/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */ +/* 2 */__le16 i_xattr_icount; +/* 4 */__le16 i_mode; +/* 6 */__le16 i_reserved; +/* 8 */__le64 i_size; +/* 16 */union { + /* file total compressed blocks for data mapping 1 */ + __le32 compressed_blocks; + __le32 raw_blkaddr; + + /* for device files, used to indicate old/new device # */ + __le32 rdev; + } i_u __packed; + + /* only used for 32-bit stat compatibility */ +/* 20 */__le32 i_ino; + +/* 24 */__le32 i_uid; +/* 28 */__le32 i_gid; +/* 32 */__le64 i_ctime; +/* 40 */__le32 i_ctime_nsec; +/* 44 */__le32 i_nlink; +/* 48 */__u8 i_reserved2[16]; +} __packed; /* 64 bytes */ + +#define EROFS_MAX_SHARED_XATTRS (128) +/* h_shared_count between 129 ... 255 are special # */ +#define EROFS_SHARED_XATTR_EXTENT (255) + +/* + * inline xattrs (n == i_xattr_icount): + * erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes + * 12 bytes / \ + * / \ + * /-----------------------\ + * | erofs_xattr_entries+ | + * +-----------------------+ + * inline xattrs must starts in erofs_xattr_ibody_header, + * for read-only fs, no need to introduce h_refcount + */ +struct erofs_xattr_ibody_header { + __le32 h_reserved; + __u8 h_shared_count; + __u8 h_reserved2[7]; + __le32 h_shared_xattrs[0]; /* shared xattr id array */ +} __packed; + +/* Name indexes */ +#define EROFS_XATTR_INDEX_USER 1 +#define EROFS_XATTR_INDEX_POSIX_ACL_ACCESS 2 +#define EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT 3 +#define EROFS_XATTR_INDEX_TRUSTED 4 +#define EROFS_XATTR_INDEX_LUSTRE 5 +#define EROFS_XATTR_INDEX_SECURITY 6 + +/* xattr entry (for both inline & shared xattrs) */ +struct erofs_xattr_entry { + __u8 e_name_len; /* length of name */ + __u8 e_name_index; /* attribute name index */ + __le16 e_value_size; /* size of attribute value */ + /* followed by e_name and e_value */ + char e_name[0]; /* attribute name */ +} __packed; + +#define ondisk_xattr_ibody_size(count) ({\ + u32 __count = le16_to_cpu(count); \ + ((__count) == 0) ? 0 : \ + sizeof(struct erofs_xattr_ibody_header) + \ + sizeof(__u32) * ((__count) - 1); }) + +#define EROFS_XATTR_ALIGN(size) round_up(size, sizeof(struct erofs_xattr_entry)) +#define EROFS_XATTR_ENTRY_SIZE(entry) EROFS_XATTR_ALIGN( \ + sizeof(struct erofs_xattr_entry) + \ + (entry)->e_name_len + le16_to_cpu((entry)->e_value_size)) + +/* available compression algorithm types */ +enum { + Z_EROFS_COMPRESSION_LZ4, + Z_EROFS_COMPRESSION_MAX +}; + +/* + * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on) + * e.g. for 4k logical cluster size, 4B if compacted 2B is off; + * (4B) + 2B + (4B) if compacted 2B is on. + */ +#define Z_EROFS_ADVISE_COMPACTED_2B_BIT 0 + +#define Z_EROFS_ADVISE_COMPACTED_2B (1 << Z_EROFS_ADVISE_COMPACTED_2B_BIT) + +struct z_erofs_map_header { + __le32 h_reserved1; + __le16 h_advise; + /* + * bit 0-3 : algorithm type of head 1 (logical cluster type 01); + * bit 4-7 : algorithm type of head 2 (logical cluster type 11). + */ + __u8 h_algorithmtype; + /* + * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096; + * bit 3-4 : (physical - logical) cluster bits of head 1: + * For example, if logical clustersize = 4096, 1 for 8192. + * bit 5-7 : (physical - logical) cluster bits of head 2. + */ + __u8 h_clusterbits; +}; + +#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8 + +/* + * Z_EROFS Variable-sized Logical Extent cluster type: + * 0 - literal (uncompressed) cluster + * 1 - compressed cluster (for the head logical cluster) + * 2 - compressed cluster (for the other logical clusters) + * + * In detail, + * 0 - literal (uncompressed) cluster, + * di_advise = 0 + * di_clusterofs = the literal data offset of the cluster + * di_blkaddr = the blkaddr of the literal cluster + * + * 1 - compressed cluster (for the head logical cluster) + * di_advise = 1 + * di_clusterofs = the decompressed data offset of the cluster + * di_blkaddr = the blkaddr of the compressed cluster + * + * 2 - compressed cluster (for the other logical clusters) + * di_advise = 2 + * di_clusterofs = + * the decompressed data offset in its own head cluster + * di_u.delta[0] = distance to its corresponding head cluster + * di_u.delta[1] = distance to its corresponding tail cluster + * (di_advise could be 0, 1 or 2) + */ +enum { + Z_EROFS_VLE_CLUSTER_TYPE_PLAIN, + Z_EROFS_VLE_CLUSTER_TYPE_HEAD, + Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD, + Z_EROFS_VLE_CLUSTER_TYPE_RESERVED, + Z_EROFS_VLE_CLUSTER_TYPE_MAX +}; + +#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2 +#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0 + +struct z_erofs_vle_decompressed_index { + __le16 di_advise; + /* where to decompress in the head cluster */ + __le16 di_clusterofs; + + union { + /* for the head cluster */ + __le32 blkaddr; + /* + * for the rest clusters + * eg. for 4k page-sized cluster, maximum 4K*64k = 256M) + * [0] - pointing to the head cluster + * [1] - pointing to the tail cluster + */ + __le16 delta[2]; + } di_u __packed; /* 8 bytes */ +} __packed; + +#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \ + (round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \ + sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING) + +/* dirent sorts in alphabet order, thus we can do binary search */ +struct erofs_dirent { + __le64 nid; /* 0, node number */ + __le16 nameoff; /* 8, start offset of file name */ + __u8 file_type; /* 10, file type */ + __u8 reserved; /* 11, reserved */ +} __packed; + +/* + * EROFS file types should match generic FT_* types and + * it seems no need to add BUILD_BUG_ONs since potential + * unmatchness will break other fses as well... + */ + +#define EROFS_NAME_LEN 255 + +/* check the EROFS on-disk layout strictly at compile time */ +static inline void erofs_check_ondisk_layout_definitions(void) +{ + BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128); + BUILD_BUG_ON(sizeof(struct erofs_inode_v1) != 32); + BUILD_BUG_ON(sizeof(struct erofs_inode_v2) != 64); + BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12); + BUILD_BUG_ON(sizeof(struct erofs_xattr_entry) != 4); + BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8); + BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8); + BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12); + + BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) < + Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1); +} + +#endif + diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c new file mode 100644 index 000000000000..80f4fe919ee7 --- /dev/null +++ b/fs/erofs/inode.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "xattr.h" + +#include + +/* no locking */ +static int read_inode(struct inode *inode, void *data) +{ + struct erofs_vnode *vi = EROFS_V(inode); + struct erofs_inode_v1 *v1 = data; + const unsigned int advise = le16_to_cpu(v1->i_advise); + erofs_blk_t nblks = 0; + + vi->datamode = __inode_data_mapping(advise); + + if (unlikely(vi->datamode >= EROFS_INODE_LAYOUT_MAX)) { + errln("unsupported data mapping %u of nid %llu", + vi->datamode, vi->nid); + DBG_BUGON(1); + return -EOPNOTSUPP; + } + + if (__inode_version(advise) == EROFS_INODE_LAYOUT_V2) { + struct erofs_inode_v2 *v2 = data; + + vi->inode_isize = sizeof(struct erofs_inode_v2); + vi->xattr_isize = ondisk_xattr_ibody_size(v2->i_xattr_icount); + + inode->i_mode = le16_to_cpu(v2->i_mode); + if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode)) + vi->raw_blkaddr = le32_to_cpu(v2->i_u.raw_blkaddr); + else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) + inode->i_rdev = + new_decode_dev(le32_to_cpu(v2->i_u.rdev)); + else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) + inode->i_rdev = 0; + else + goto bogusimode; + + i_uid_write(inode, le32_to_cpu(v2->i_uid)); + i_gid_write(inode, le32_to_cpu(v2->i_gid)); + set_nlink(inode, le32_to_cpu(v2->i_nlink)); + + /* ns timestamp */ + inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = + le64_to_cpu(v2->i_ctime); + inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = + le32_to_cpu(v2->i_ctime_nsec); + + inode->i_size = le64_to_cpu(v2->i_size); + + /* total blocks for compressed files */ + if (is_inode_layout_compression(inode)) + nblks = le32_to_cpu(v2->i_u.compressed_blocks); + } else if (__inode_version(advise) == EROFS_INODE_LAYOUT_V1) { + struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); + + vi->inode_isize = sizeof(struct erofs_inode_v1); + vi->xattr_isize = ondisk_xattr_ibody_size(v1->i_xattr_icount); + + inode->i_mode = le16_to_cpu(v1->i_mode); + if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || + S_ISLNK(inode->i_mode)) + vi->raw_blkaddr = le32_to_cpu(v1->i_u.raw_blkaddr); + else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) + inode->i_rdev = + new_decode_dev(le32_to_cpu(v1->i_u.rdev)); + else if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) + inode->i_rdev = 0; + else + goto bogusimode; + + i_uid_write(inode, le16_to_cpu(v1->i_uid)); + i_gid_write(inode, le16_to_cpu(v1->i_gid)); + set_nlink(inode, le16_to_cpu(v1->i_nlink)); + + /* use build time to derive all file time */ + inode->i_mtime.tv_sec = inode->i_ctime.tv_sec = + sbi->build_time; + inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = + sbi->build_time_nsec; + + inode->i_size = le32_to_cpu(v1->i_size); + if (is_inode_layout_compression(inode)) + nblks = le32_to_cpu(v1->i_u.compressed_blocks); + } else { + errln("unsupported on-disk inode version %u of nid %llu", + __inode_version(advise), vi->nid); + DBG_BUGON(1); + return -EOPNOTSUPP; + } + + if (!nblks) + /* measure inode.i_blocks as generic filesystems */ + inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9; + else + inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK; + return 0; + +bogusimode: + errln("bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; +} + +/* + * try_lock can be required since locking order is: + * file data(fs_inode) + * meta(bd_inode) + * but the majority of the callers is "iget", + * in that case we are pretty sure no deadlock since + * no data operations exist. However I tend to + * try_lock since it takes no much overhead and + * will success immediately. + */ +static int fill_inline_data(struct inode *inode, void *data, + unsigned int m_pofs) +{ + struct erofs_vnode *vi = EROFS_V(inode); + struct erofs_sb_info *sbi = EROFS_I_SB(inode); + + /* should be inode inline C */ + if (!is_inode_flat_inline(inode)) + return 0; + + /* fast symlink (following ext4) */ + if (S_ISLNK(inode->i_mode) && inode->i_size < PAGE_SIZE) { + char *lnk = erofs_kmalloc(sbi, inode->i_size + 1, GFP_KERNEL); + + if (unlikely(!lnk)) + return -ENOMEM; + + m_pofs += vi->inode_isize + vi->xattr_isize; + + /* inline symlink data shouldn't across page boundary as well */ + if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) { + kfree(lnk); + errln("inline data cross block boundary @ nid %llu", + vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; + } + + /* get in-page inline data */ + memcpy(lnk, data + m_pofs, inode->i_size); + lnk[inode->i_size] = '\0'; + + inode->i_link = lnk; + set_inode_fast_symlink(inode); + } + return 0; +} + +static int fill_inode(struct inode *inode, int isdir) +{ + struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); + struct erofs_vnode *vi = EROFS_V(inode); + struct page *page; + void *data; + int err; + erofs_blk_t blkaddr; + unsigned int ofs; + erofs_off_t inode_loc; + + trace_erofs_fill_inode(inode, isdir); + inode_loc = iloc(sbi, vi->nid); + blkaddr = erofs_blknr(inode_loc); + ofs = erofs_blkoff(inode_loc); + + debugln("%s, reading inode nid %llu at %u of blkaddr %u", + __func__, vi->nid, ofs, blkaddr); + + page = erofs_get_meta_page(inode->i_sb, blkaddr, isdir); + + if (IS_ERR(page)) { + errln("failed to get inode (nid: %llu) page, err %ld", + vi->nid, PTR_ERR(page)); + return PTR_ERR(page); + } + + DBG_BUGON(!PageUptodate(page)); + data = page_address(page); + + err = read_inode(inode, data + ofs); + if (!err) { + /* setup the new inode */ + if (S_ISREG(inode->i_mode)) { + inode->i_op = &erofs_generic_iops; + inode->i_fop = &generic_ro_fops; + } else if (S_ISDIR(inode->i_mode)) { + inode->i_op = &erofs_dir_iops; + inode->i_fop = &erofs_dir_fops; + } else if (S_ISLNK(inode->i_mode)) { + /* by default, page_get_link is used for symlink */ + inode->i_op = &erofs_symlink_iops; + inode_nohighmem(inode); + } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || + S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { + inode->i_op = &erofs_generic_iops; + init_special_inode(inode, inode->i_mode, inode->i_rdev); + goto out_unlock; + } else { + err = -EFSCORRUPTED; + goto out_unlock; + } + + if (is_inode_layout_compression(inode)) { + err = z_erofs_fill_inode(inode); + goto out_unlock; + } + + inode->i_mapping->a_ops = &erofs_raw_access_aops; + + /* fill last page if inline data is available */ + err = fill_inline_data(inode, data, ofs); + } + +out_unlock: + unlock_page(page); + put_page(page); + return err; +} + +/* + * erofs nid is 64bits, but i_ino is 'unsigned long', therefore + * we should do more for 32-bit platform to find the right inode. + */ +#if BITS_PER_LONG == 32 +static int erofs_ilookup_test_actor(struct inode *inode, void *opaque) +{ + const erofs_nid_t nid = *(erofs_nid_t *)opaque; + + return EROFS_V(inode)->nid == nid; +} + +static int erofs_iget_set_actor(struct inode *inode, void *opaque) +{ + const erofs_nid_t nid = *(erofs_nid_t *)opaque; + + inode->i_ino = erofs_inode_hash(nid); + return 0; +} +#endif + +static inline struct inode *erofs_iget_locked(struct super_block *sb, + erofs_nid_t nid) +{ + const unsigned long hashval = erofs_inode_hash(nid); + +#if BITS_PER_LONG >= 64 + /* it is safe to use iget_locked for >= 64-bit platform */ + return iget_locked(sb, hashval); +#else + return iget5_locked(sb, hashval, erofs_ilookup_test_actor, + erofs_iget_set_actor, &nid); +#endif +} + +struct inode *erofs_iget(struct super_block *sb, + erofs_nid_t nid, + bool isdir) +{ + struct inode *inode = erofs_iget_locked(sb, nid); + + if (unlikely(!inode)) + return ERR_PTR(-ENOMEM); + + if (inode->i_state & I_NEW) { + int err; + struct erofs_vnode *vi = EROFS_V(inode); + + vi->nid = nid; + + err = fill_inode(inode, isdir); + if (likely(!err)) + unlock_new_inode(inode); + else { + iget_failed(inode); + inode = ERR_PTR(err); + } + } + return inode; +} + +int erofs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags) +{ + struct inode *const inode = d_inode(path->dentry); + + if (is_inode_layout_compression(inode)) + stat->attributes |= STATX_ATTR_COMPRESSED; + + stat->attributes |= STATX_ATTR_IMMUTABLE; + stat->attributes_mask |= (STATX_ATTR_COMPRESSED | + STATX_ATTR_IMMUTABLE); + + generic_fillattr(inode, stat); + return 0; +} + +const struct inode_operations erofs_generic_iops = { + .getattr = erofs_getattr, +#ifdef CONFIG_EROFS_FS_XATTR + .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, +}; + +const struct inode_operations erofs_symlink_iops = { + .get_link = page_get_link, + .getattr = erofs_getattr, +#ifdef CONFIG_EROFS_FS_XATTR + .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, +}; + +const struct inode_operations erofs_fast_symlink_iops = { + .get_link = simple_get_link, + .getattr = erofs_getattr, +#ifdef CONFIG_EROFS_FS_XATTR + .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, +}; + diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h new file mode 100644 index 000000000000..620b73fcc416 --- /dev/null +++ b/fs/erofs/internal.h @@ -0,0 +1,553 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_INTERNAL_H +#define __EROFS_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "erofs_fs.h" + +/* redefine pr_fmt "erofs: " */ +#undef pr_fmt +#define pr_fmt(fmt) "erofs: " fmt + +#define errln(x, ...) pr_err(x "\n", ##__VA_ARGS__) +#define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__) +#ifdef CONFIG_EROFS_FS_DEBUG +#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__) +#define DBG_BUGON BUG_ON +#else +#define debugln(x, ...) ((void)0) +#define DBG_BUGON(x) ((void)(x)) +#endif /* !CONFIG_EROFS_FS_DEBUG */ + +enum { + FAULT_KMALLOC, + FAULT_READ_IO, + FAULT_MAX, +}; + +#ifdef CONFIG_EROFS_FAULT_INJECTION +extern const char *erofs_fault_name[FAULT_MAX]; +#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type))) + +struct erofs_fault_info { + atomic_t inject_ops; + unsigned int inject_rate; + unsigned int inject_type; +}; +#endif /* CONFIG_EROFS_FAULT_INJECTION */ + +/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */ +#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1 + +typedef u64 erofs_nid_t; +typedef u64 erofs_off_t; +/* data type for filesystem-wide blocks number */ +typedef u32 erofs_blk_t; + +struct erofs_sb_info { +#ifdef CONFIG_EROFS_FS_ZIP + /* list for all registered superblocks, mainly for shrinker */ + struct list_head list; + struct mutex umount_mutex; + + /* the dedicated workstation for compression */ + struct radix_tree_root workstn_tree; + + /* threshold for decompression synchronously */ + unsigned int max_sync_decompress_pages; + + unsigned int shrinker_run_no; + + /* current strategy of how to use managed cache */ + unsigned char cache_strategy; + + /* pseudo inode to manage cached pages */ + struct inode *managed_cache; +#endif /* CONFIG_EROFS_FS_ZIP */ + u32 blocks; + u32 meta_blkaddr; +#ifdef CONFIG_EROFS_FS_XATTR + u32 xattr_blkaddr; +#endif + + /* inode slot unit size in bit shift */ + unsigned char islotbits; + + u32 build_time_nsec; + u64 build_time; + + /* what we really care is nid, rather than ino.. */ + erofs_nid_t root_nid; + /* used for statfs, f_files - f_favail */ + u64 inos; + + u8 uuid[16]; /* 128-bit uuid for volume */ + u8 volume_name[16]; /* volume name */ + u32 requirements; + + unsigned int mount_opt; + +#ifdef CONFIG_EROFS_FAULT_INJECTION + struct erofs_fault_info fault_info; /* For fault injection */ +#endif +}; + +#ifdef CONFIG_EROFS_FAULT_INJECTION +#define erofs_show_injection_info(type) \ + infoln("inject %s in %s of %pS", erofs_fault_name[type], \ + __func__, __builtin_return_address(0)) + +static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) +{ + struct erofs_fault_info *ffi = &sbi->fault_info; + + if (!ffi->inject_rate) + return false; + + if (!IS_FAULT_SET(ffi, type)) + return false; + + atomic_inc(&ffi->inject_ops); + if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) { + atomic_set(&ffi->inject_ops, 0); + return true; + } + return false; +} +#else +static inline bool time_to_inject(struct erofs_sb_info *sbi, int type) +{ + return false; +} + +static inline void erofs_show_injection_info(int type) +{ +} +#endif /* !CONFIG_EROFS_FAULT_INJECTION */ + +static inline void *erofs_kmalloc(struct erofs_sb_info *sbi, + size_t size, gfp_t flags) +{ + if (time_to_inject(sbi, FAULT_KMALLOC)) { + erofs_show_injection_info(FAULT_KMALLOC); + return NULL; + } + return kmalloc(size, flags); +} + +#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info) +#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info) + +/* Mount flags set via mount options or defaults */ +#define EROFS_MOUNT_XATTR_USER 0x00000010 +#define EROFS_MOUNT_POSIX_ACL 0x00000020 +#define EROFS_MOUNT_FAULT_INJECTION 0x00000040 + +#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option) +#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option) +#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option) + +#ifdef CONFIG_EROFS_FS_ZIP +enum { + EROFS_ZIP_CACHE_DISABLED, + EROFS_ZIP_CACHE_READAHEAD, + EROFS_ZIP_CACHE_READAROUND +}; + +#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL) + +/* basic unit of the workstation of a super_block */ +struct erofs_workgroup { + /* the workgroup index in the workstation */ + pgoff_t index; + + /* overall workgroup reference count */ + atomic_t refcount; +}; + +#if defined(CONFIG_SMP) +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, + int val) +{ + preempt_disable(); + if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) { + preempt_enable(); + return false; + } + return true; +} + +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, + int orig_val) +{ + /* + * other observers should notice all modifications + * in the freezing period. + */ + smp_mb(); + atomic_set(&grp->refcount, orig_val); + preempt_enable(); +} + +static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) +{ + return atomic_cond_read_relaxed(&grp->refcount, + VAL != EROFS_LOCKED_MAGIC); +} +#else +static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp, + int val) +{ + preempt_disable(); + /* no need to spin on UP platforms, let's just disable preemption. */ + if (val != atomic_read(&grp->refcount)) { + preempt_enable(); + return false; + } + return true; +} + +static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp, + int orig_val) +{ + preempt_enable(); +} + +static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp) +{ + int v = atomic_read(&grp->refcount); + + /* workgroup is never freezed on uniprocessor systems */ + DBG_BUGON(v == EROFS_LOCKED_MAGIC); + return v; +} +#endif /* !CONFIG_SMP */ + +/* hard limit of pages per compressed cluster */ +#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT) +#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES +#else +#define EROFS_PCPUBUF_NR_PAGES 0 +#endif /* !CONFIG_EROFS_FS_ZIP */ + +/* we strictly follow PAGE_SIZE and no buffer head yet */ +#define LOG_BLOCK_SIZE PAGE_SHIFT + +#undef LOG_SECTORS_PER_BLOCK +#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) + +#undef SECTORS_PER_BLOCK +#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK) + +#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE) + +#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ) +#error erofs cannot be used in this platform +#endif + +#define EROFS_IO_MAX_RETRIES_NOFAIL 5 + +#define ROOT_NID(sb) ((sb)->root_nid) + +#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ) +#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ) +#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ) + +static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid) +{ + return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits); +} + +/* atomic flag definitions */ +#define EROFS_V_EA_INITED_BIT 0 +#define EROFS_V_Z_INITED_BIT 1 + +/* bitlock definitions (arranged in reverse order) */ +#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1) +#define EROFS_V_BL_Z_BIT (BITS_PER_LONG - 2) + +struct erofs_vnode { + erofs_nid_t nid; + + /* atomic flags (including bitlocks) */ + unsigned long flags; + + unsigned char datamode; + unsigned char inode_isize; + unsigned short xattr_isize; + + unsigned int xattr_shared_count; + unsigned int *xattr_shared_xattrs; + + union { + erofs_blk_t raw_blkaddr; +#ifdef CONFIG_EROFS_FS_ZIP + struct { + unsigned short z_advise; + unsigned char z_algorithmtype[2]; + unsigned char z_logical_clusterbits; + unsigned char z_physical_clusterbits[2]; + }; +#endif /* CONFIG_EROFS_FS_ZIP */ + }; + /* the corresponding vfs inode */ + struct inode vfs_inode; +}; + +#define EROFS_V(ptr) \ + container_of(ptr, struct erofs_vnode, vfs_inode) + +#define __inode_advise(x, bit, bits) \ + (((x) >> (bit)) & ((1 << (bits)) - 1)) + +#define __inode_version(advise) \ + __inode_advise(advise, EROFS_I_VERSION_BIT, \ + EROFS_I_VERSION_BITS) + +#define __inode_data_mapping(advise) \ + __inode_advise(advise, EROFS_I_DATA_MAPPING_BIT,\ + EROFS_I_DATA_MAPPING_BITS) + +static inline unsigned long inode_datablocks(struct inode *inode) +{ + /* since i_size cannot be changed */ + return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); +} + +static inline bool is_inode_layout_compression(struct inode *inode) +{ + return erofs_inode_is_data_compressed(EROFS_V(inode)->datamode); +} + +static inline bool is_inode_flat_inline(struct inode *inode) +{ + return EROFS_V(inode)->datamode == EROFS_INODE_FLAT_INLINE; +} + +extern const struct super_operations erofs_sops; + +extern const struct address_space_operations erofs_raw_access_aops; +#ifdef CONFIG_EROFS_FS_ZIP +extern const struct address_space_operations z_erofs_vle_normalaccess_aops; +#endif + +/* + * Logical to physical block mapping, used by erofs_map_blocks() + * + * Different with other file systems, it is used for 2 access modes: + * + * 1) RAW access mode: + * + * Users pass a valid (m_lblk, m_lofs -- usually 0) pair, + * and get the valid m_pblk, m_pofs and the longest m_len(in bytes). + * + * Note that m_lblk in the RAW access mode refers to the number of + * the compressed ondisk block rather than the uncompressed + * in-memory block for the compressed file. + * + * m_pofs equals to m_lofs except for the inline data page. + * + * 2) Normal access mode: + * + * If the inode is not compressed, it has no difference with + * the RAW access mode. However, if the inode is compressed, + * users should pass a valid (m_lblk, m_lofs) pair, and get + * the needed m_pblk, m_pofs, m_len to get the compressed data + * and the updated m_lblk, m_lofs which indicates the start + * of the corresponding uncompressed data in the file. + */ +enum { + BH_Zipped = BH_PrivateStart, + BH_FullMapped, +}; + +/* Has a disk mapping */ +#define EROFS_MAP_MAPPED (1 << BH_Mapped) +/* Located in metadata (could be copied from bd_inode) */ +#define EROFS_MAP_META (1 << BH_Meta) +/* The extent has been compressed */ +#define EROFS_MAP_ZIPPED (1 << BH_Zipped) +/* The length of extent is full */ +#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped) + +struct erofs_map_blocks { + erofs_off_t m_pa, m_la; + u64 m_plen, m_llen; + + unsigned int m_flags; + + struct page *mpage; +}; + +/* Flags used by erofs_map_blocks() */ +#define EROFS_GET_BLOCKS_RAW 0x0001 + +/* zmap.c */ +#ifdef CONFIG_EROFS_FS_ZIP +int z_erofs_fill_inode(struct inode *inode); +int z_erofs_map_blocks_iter(struct inode *inode, + struct erofs_map_blocks *map, + int flags); +#else +static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; } +static inline int z_erofs_map_blocks_iter(struct inode *inode, + struct erofs_map_blocks *map, + int flags) +{ + return -EOPNOTSUPP; +} +#endif /* !CONFIG_EROFS_FS_ZIP */ + +/* data.c */ +static inline struct bio *erofs_grab_bio(struct super_block *sb, + erofs_blk_t blkaddr, + unsigned int nr_pages, + void *bi_private, bio_end_io_t endio, + bool nofail) +{ + const gfp_t gfp = GFP_NOIO; + struct bio *bio; + + do { + if (nr_pages == 1) { + bio = bio_alloc(gfp | (nofail ? __GFP_NOFAIL : 0), 1); + if (unlikely(!bio)) { + DBG_BUGON(nofail); + return ERR_PTR(-ENOMEM); + } + break; + } + bio = bio_alloc(gfp, nr_pages); + nr_pages /= 2; + } while (unlikely(!bio)); + + bio->bi_end_io = endio; + bio_set_dev(bio, sb->s_bdev); + bio->bi_iter.bi_sector = (sector_t)blkaddr << LOG_SECTORS_PER_BLOCK; + bio->bi_private = bi_private; + return bio; +} + +static inline void __submit_bio(struct bio *bio, unsigned int op, + unsigned int op_flags) +{ + bio_set_op_attrs(bio, op, op_flags); + submit_bio(bio); +} + +struct page *__erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr, + bool prio, bool nofail); + +static inline struct page *erofs_get_meta_page(struct super_block *sb, + erofs_blk_t blkaddr, bool prio) +{ + return __erofs_get_meta_page(sb, blkaddr, prio, false); +} + +int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int); + +static inline struct page *erofs_get_inline_page(struct inode *inode, + erofs_blk_t blkaddr) +{ + return erofs_get_meta_page(inode->i_sb, blkaddr, + S_ISDIR(inode->i_mode)); +} + +/* inode.c */ +static inline unsigned long erofs_inode_hash(erofs_nid_t nid) +{ +#if BITS_PER_LONG == 32 + return (nid >> 32) ^ (nid & 0xffffffff); +#else + return nid; +#endif +} + +extern const struct inode_operations erofs_generic_iops; +extern const struct inode_operations erofs_symlink_iops; +extern const struct inode_operations erofs_fast_symlink_iops; + +static inline void set_inode_fast_symlink(struct inode *inode) +{ + inode->i_op = &erofs_fast_symlink_iops; +} + +static inline bool is_inode_fast_symlink(struct inode *inode) +{ + return inode->i_op == &erofs_fast_symlink_iops; +} + +struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir); +int erofs_getattr(const struct path *path, struct kstat *stat, + u32 request_mask, unsigned int query_flags); + +/* namei.c */ +extern const struct inode_operations erofs_dir_iops; + +int erofs_namei(struct inode *dir, struct qstr *name, + erofs_nid_t *nid, unsigned int *d_type); + +/* dir.c */ +extern const struct file_operations erofs_dir_fops; + +/* utils.c / zdata.c */ +struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail); + +#if (EROFS_PCPUBUF_NR_PAGES > 0) +void *erofs_get_pcpubuf(unsigned int pagenr); +#define erofs_put_pcpubuf(buf) do { \ + (void)&(buf); \ + preempt_enable(); \ +} while (0) +#else +static inline void *erofs_get_pcpubuf(unsigned int pagenr) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +#define erofs_put_pcpubuf(buf) do {} while (0) +#endif + +#ifdef CONFIG_EROFS_FS_ZIP +int erofs_workgroup_put(struct erofs_workgroup *grp); +struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, + pgoff_t index, bool *tag); +int erofs_register_workgroup(struct super_block *sb, + struct erofs_workgroup *grp, bool tag); +void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); +void erofs_shrinker_register(struct super_block *sb); +void erofs_shrinker_unregister(struct super_block *sb); +int __init erofs_init_shrinker(void); +void erofs_exit_shrinker(void); +int __init z_erofs_init_zip_subsystem(void); +void z_erofs_exit_zip_subsystem(void); +int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, + struct erofs_workgroup *egrp); +int erofs_try_to_free_cached_page(struct address_space *mapping, + struct page *page); +#else +static inline void erofs_shrinker_register(struct super_block *sb) {} +static inline void erofs_shrinker_unregister(struct super_block *sb) {} +static inline int erofs_init_shrinker(void) { return 0; } +static inline void erofs_exit_shrinker(void) {} +static inline int z_erofs_init_zip_subsystem(void) { return 0; } +static inline void z_erofs_exit_zip_subsystem(void) {} +#endif /* !CONFIG_EROFS_FS_ZIP */ + +#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */ + +#endif /* __EROFS_INTERNAL_H */ + diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c new file mode 100644 index 000000000000..8832b5d95d91 --- /dev/null +++ b/fs/erofs/namei.c @@ -0,0 +1,251 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "xattr.h" + +#include + +struct erofs_qstr { + const unsigned char *name; + const unsigned char *end; +}; + +/* based on the end of qn is accurate and it must have the trailing '\0' */ +static inline int dirnamecmp(const struct erofs_qstr *qn, + const struct erofs_qstr *qd, + unsigned int *matched) +{ + unsigned int i = *matched; + + /* + * on-disk error, let's only BUG_ON in the debugging mode. + * otherwise, it will return 1 to just skip the invalid name + * and go on (in consideration of the lookup performance). + */ + DBG_BUGON(qd->name > qd->end); + + /* qd could not have trailing '\0' */ + /* However it is absolutely safe if < qd->end */ + while (qd->name + i < qd->end && qd->name[i] != '\0') { + if (qn->name[i] != qd->name[i]) { + *matched = i; + return qn->name[i] > qd->name[i] ? 1 : -1; + } + ++i; + } + *matched = i; + /* See comments in __d_alloc on the terminating NUL character */ + return qn->name[i] == '\0' ? 0 : 1; +} + +#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1)) + +static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name, + u8 *data, + unsigned int dirblksize, + const int ndirents) +{ + int head, back; + unsigned int startprfx, endprfx; + struct erofs_dirent *const de = (struct erofs_dirent *)data; + + /* since the 1st dirent has been evaluated previously */ + head = 1; + back = ndirents - 1; + startprfx = endprfx = 0; + + while (head <= back) { + const int mid = head + (back - head) / 2; + const int nameoff = nameoff_from_disk(de[mid].nameoff, + dirblksize); + unsigned int matched = min(startprfx, endprfx); + struct erofs_qstr dname = { + .name = data + nameoff, + .end = unlikely(mid >= ndirents - 1) ? + data + dirblksize : + data + nameoff_from_disk(de[mid + 1].nameoff, + dirblksize) + }; + + /* string comparison without already matched prefix */ + int ret = dirnamecmp(name, &dname, &matched); + + if (unlikely(!ret)) { + return de + mid; + } else if (ret > 0) { + head = mid + 1; + startprfx = matched; + } else { + back = mid - 1; + endprfx = matched; + } + } + + return ERR_PTR(-ENOENT); +} + +static struct page *find_target_block_classic(struct inode *dir, + struct erofs_qstr *name, + int *_ndirents) +{ + unsigned int startprfx, endprfx; + int head, back; + struct address_space *const mapping = dir->i_mapping; + struct page *candidate = ERR_PTR(-ENOENT); + + startprfx = endprfx = 0; + head = 0; + back = inode_datablocks(dir) - 1; + + while (head <= back) { + const int mid = head + (back - head) / 2; + struct page *page = read_mapping_page(mapping, mid, NULL); + + if (!IS_ERR(page)) { + struct erofs_dirent *de = kmap_atomic(page); + const int nameoff = nameoff_from_disk(de->nameoff, + EROFS_BLKSIZ); + const int ndirents = nameoff / sizeof(*de); + int diff; + unsigned int matched; + struct erofs_qstr dname; + + if (unlikely(!ndirents)) { + kunmap_atomic(de); + put_page(page); + errln("corrupted dir block %d @ nid %llu", + mid, EROFS_V(dir)->nid); + DBG_BUGON(1); + page = ERR_PTR(-EFSCORRUPTED); + goto out; + } + + matched = min(startprfx, endprfx); + + dname.name = (u8 *)de + nameoff; + if (ndirents == 1) + dname.end = (u8 *)de + EROFS_BLKSIZ; + else + dname.end = (u8 *)de + + nameoff_from_disk(de[1].nameoff, + EROFS_BLKSIZ); + + /* string comparison without already matched prefix */ + diff = dirnamecmp(name, &dname, &matched); + kunmap_atomic(de); + + if (unlikely(!diff)) { + *_ndirents = 0; + goto out; + } else if (diff > 0) { + head = mid + 1; + startprfx = matched; + + if (!IS_ERR(candidate)) + put_page(candidate); + candidate = page; + *_ndirents = ndirents; + } else { + put_page(page); + + back = mid - 1; + endprfx = matched; + } + continue; + } +out: /* free if the candidate is valid */ + if (!IS_ERR(candidate)) + put_page(candidate); + return page; + } + return candidate; +} + +int erofs_namei(struct inode *dir, + struct qstr *name, + erofs_nid_t *nid, unsigned int *d_type) +{ + int ndirents; + struct page *page; + void *data; + struct erofs_dirent *de; + struct erofs_qstr qn; + + if (unlikely(!dir->i_size)) + return -ENOENT; + + qn.name = name->name; + qn.end = name->name + name->len; + + ndirents = 0; + page = find_target_block_classic(dir, &qn, &ndirents); + + if (IS_ERR(page)) + return PTR_ERR(page); + + data = kmap_atomic(page); + /* the target page has been mapped */ + if (ndirents) + de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents); + else + de = (struct erofs_dirent *)data; + + if (!IS_ERR(de)) { + *nid = le64_to_cpu(de->nid); + *d_type = de->file_type; + } + + kunmap_atomic(data); + put_page(page); + + return PTR_ERR_OR_ZERO(de); +} + +/* NOTE: i_mutex is already held by vfs */ +static struct dentry *erofs_lookup(struct inode *dir, + struct dentry *dentry, + unsigned int flags) +{ + int err; + erofs_nid_t nid; + unsigned int d_type; + struct inode *inode; + + DBG_BUGON(!d_really_is_negative(dentry)); + /* dentry must be unhashed in lookup, no need to worry about */ + DBG_BUGON(!d_unhashed(dentry)); + + trace_erofs_lookup(dir, dentry, flags); + + /* file name exceeds fs limit */ + if (unlikely(dentry->d_name.len > EROFS_NAME_LEN)) + return ERR_PTR(-ENAMETOOLONG); + + /* false uninitialized warnings on gcc 4.8.x */ + err = erofs_namei(dir, &dentry->d_name, &nid, &d_type); + + if (err == -ENOENT) { + /* negative dentry */ + inode = NULL; + } else if (unlikely(err)) { + inode = ERR_PTR(err); + } else { + debugln("%s, %s (nid %llu) found, d_type %u", __func__, + dentry->d_name.name, nid, d_type); + inode = erofs_iget(dir->i_sb, nid, d_type == FT_DIR); + } + return d_splice_alias(inode, dentry); +} + +const struct inode_operations erofs_dir_iops = { + .lookup = erofs_lookup, + .getattr = erofs_getattr, +#ifdef CONFIG_EROFS_FS_XATTR + .listxattr = erofs_listxattr, +#endif + .get_acl = erofs_get_acl, +}; + diff --git a/fs/erofs/super.c b/fs/erofs/super.c new file mode 100644 index 000000000000..6d3a9bcb8daa --- /dev/null +++ b/fs/erofs/super.c @@ -0,0 +1,669 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include +#include +#include +#include +#include +#include "xattr.h" + +#define CREATE_TRACE_POINTS +#include + +static struct kmem_cache *erofs_inode_cachep __read_mostly; + +static void init_once(void *ptr) +{ + struct erofs_vnode *vi = ptr; + + inode_init_once(&vi->vfs_inode); +} + +static int __init erofs_init_inode_cache(void) +{ + erofs_inode_cachep = kmem_cache_create("erofs_inode", + sizeof(struct erofs_vnode), 0, + SLAB_RECLAIM_ACCOUNT, + init_once); + + return erofs_inode_cachep ? 0 : -ENOMEM; +} + +static void erofs_exit_inode_cache(void) +{ + kmem_cache_destroy(erofs_inode_cachep); +} + +static struct inode *alloc_inode(struct super_block *sb) +{ + struct erofs_vnode *vi = + kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL); + + if (!vi) + return NULL; + + /* zero out everything except vfs_inode */ + memset(vi, 0, offsetof(struct erofs_vnode, vfs_inode)); + return &vi->vfs_inode; +} + +static void free_inode(struct inode *inode) +{ + struct erofs_vnode *vi = EROFS_V(inode); + + /* be careful RCU symlink path (see ext4_inode_info->i_data)! */ + if (is_inode_fast_symlink(inode)) + kfree(inode->i_link); + + kfree(vi->xattr_shared_xattrs); + + kmem_cache_free(erofs_inode_cachep, vi); +} + +static bool check_layout_compatibility(struct super_block *sb, + struct erofs_super_block *layout) +{ + const unsigned int requirements = le32_to_cpu(layout->requirements); + + EROFS_SB(sb)->requirements = requirements; + + /* check if current kernel meets all mandatory requirements */ + if (requirements & (~EROFS_ALL_REQUIREMENTS)) { + errln("unidentified requirements %x, please upgrade kernel version", + requirements & ~EROFS_ALL_REQUIREMENTS); + return false; + } + return true; +} + +static int superblock_read(struct super_block *sb) +{ + struct erofs_sb_info *sbi; + struct buffer_head *bh; + struct erofs_super_block *layout; + unsigned int blkszbits; + int ret; + + bh = sb_bread(sb, 0); + + if (!bh) { + errln("cannot read erofs superblock"); + return -EIO; + } + + sbi = EROFS_SB(sb); + layout = (struct erofs_super_block *)((u8 *)bh->b_data + + EROFS_SUPER_OFFSET); + + ret = -EINVAL; + if (le32_to_cpu(layout->magic) != EROFS_SUPER_MAGIC_V1) { + errln("cannot find valid erofs superblock"); + goto out; + } + + blkszbits = layout->blkszbits; + /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ + if (unlikely(blkszbits != LOG_BLOCK_SIZE)) { + errln("blksize %u isn't supported on this platform", + 1 << blkszbits); + goto out; + } + + if (!check_layout_compatibility(sb, layout)) + goto out; + + sbi->blocks = le32_to_cpu(layout->blocks); + sbi->meta_blkaddr = le32_to_cpu(layout->meta_blkaddr); +#ifdef CONFIG_EROFS_FS_XATTR + sbi->xattr_blkaddr = le32_to_cpu(layout->xattr_blkaddr); +#endif + sbi->islotbits = ffs(sizeof(struct erofs_inode_v1)) - 1; + sbi->root_nid = le16_to_cpu(layout->root_nid); + sbi->inos = le64_to_cpu(layout->inos); + + sbi->build_time = le64_to_cpu(layout->build_time); + sbi->build_time_nsec = le32_to_cpu(layout->build_time_nsec); + + memcpy(&sb->s_uuid, layout->uuid, sizeof(layout->uuid)); + + ret = strscpy(sbi->volume_name, layout->volume_name, + sizeof(layout->volume_name)); + if (ret < 0) { /* -E2BIG */ + errln("bad volume name without NIL terminator"); + ret = -EFSCORRUPTED; + goto out; + } + ret = 0; +out: + brelse(bh); + return ret; +} + +#ifdef CONFIG_EROFS_FAULT_INJECTION +const char *erofs_fault_name[FAULT_MAX] = { + [FAULT_KMALLOC] = "kmalloc", + [FAULT_READ_IO] = "read IO error", +}; + +static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, + unsigned int rate) +{ + struct erofs_fault_info *ffi = &sbi->fault_info; + + if (rate) { + atomic_set(&ffi->inject_ops, 0); + ffi->inject_rate = rate; + ffi->inject_type = (1 << FAULT_MAX) - 1; + } else { + memset(ffi, 0, sizeof(struct erofs_fault_info)); + } + + set_opt(sbi, FAULT_INJECTION); +} + +static int erofs_build_fault_attr(struct erofs_sb_info *sbi, + substring_t *args) +{ + int rate = 0; + + if (args->from && match_int(args, &rate)) + return -EINVAL; + + __erofs_build_fault_attr(sbi, rate); + return 0; +} + +static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) +{ + return sbi->fault_info.inject_rate; +} +#else +static void __erofs_build_fault_attr(struct erofs_sb_info *sbi, + unsigned int rate) +{ +} + +static int erofs_build_fault_attr(struct erofs_sb_info *sbi, + substring_t *args) +{ + infoln("fault_injection options not supported"); + return 0; +} + +static unsigned int erofs_get_fault_rate(struct erofs_sb_info *sbi) +{ + return 0; +} +#endif + +#ifdef CONFIG_EROFS_FS_ZIP +static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, + substring_t *args) +{ + const char *cs = match_strdup(args); + int err = 0; + + if (!cs) { + errln("Not enough memory to store cache strategy"); + return -ENOMEM; + } + + if (!strcmp(cs, "disabled")) { + sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED; + } else if (!strcmp(cs, "readahead")) { + sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD; + } else if (!strcmp(cs, "readaround")) { + sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; + } else { + errln("Unrecognized cache strategy \"%s\"", cs); + err = -EINVAL; + } + kfree(cs); + return err; +} +#else +static int erofs_build_cache_strategy(struct erofs_sb_info *sbi, + substring_t *args) +{ + infoln("EROFS compression is disabled, so cache strategy is ignored"); + return 0; +} +#endif + +/* set up default EROFS parameters */ +static void default_options(struct erofs_sb_info *sbi) +{ +#ifdef CONFIG_EROFS_FS_ZIP + sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; + sbi->max_sync_decompress_pages = 3; +#endif +#ifdef CONFIG_EROFS_FS_XATTR + set_opt(sbi, XATTR_USER); +#endif +#ifdef CONFIG_EROFS_FS_POSIX_ACL + set_opt(sbi, POSIX_ACL); +#endif +} + +enum { + Opt_user_xattr, + Opt_nouser_xattr, + Opt_acl, + Opt_noacl, + Opt_fault_injection, + Opt_cache_strategy, + Opt_err +}; + +static match_table_t erofs_tokens = { + {Opt_user_xattr, "user_xattr"}, + {Opt_nouser_xattr, "nouser_xattr"}, + {Opt_acl, "acl"}, + {Opt_noacl, "noacl"}, + {Opt_fault_injection, "fault_injection=%u"}, + {Opt_cache_strategy, "cache_strategy=%s"}, + {Opt_err, NULL} +}; + +static int parse_options(struct super_block *sb, char *options) +{ + substring_t args[MAX_OPT_ARGS]; + char *p; + int err; + + if (!options) + return 0; + + while ((p = strsep(&options, ","))) { + int token; + + if (!*p) + continue; + + args[0].to = args[0].from = NULL; + token = match_token(p, erofs_tokens, args); + + switch (token) { +#ifdef CONFIG_EROFS_FS_XATTR + case Opt_user_xattr: + set_opt(EROFS_SB(sb), XATTR_USER); + break; + case Opt_nouser_xattr: + clear_opt(EROFS_SB(sb), XATTR_USER); + break; +#else + case Opt_user_xattr: + infoln("user_xattr options not supported"); + break; + case Opt_nouser_xattr: + infoln("nouser_xattr options not supported"); + break; +#endif +#ifdef CONFIG_EROFS_FS_POSIX_ACL + case Opt_acl: + set_opt(EROFS_SB(sb), POSIX_ACL); + break; + case Opt_noacl: + clear_opt(EROFS_SB(sb), POSIX_ACL); + break; +#else + case Opt_acl: + infoln("acl options not supported"); + break; + case Opt_noacl: + infoln("noacl options not supported"); + break; +#endif + case Opt_fault_injection: + err = erofs_build_fault_attr(EROFS_SB(sb), args); + if (err) + return err; + break; + case Opt_cache_strategy: + err = erofs_build_cache_strategy(EROFS_SB(sb), args); + if (err) + return err; + break; + default: + errln("Unrecognized mount option \"%s\" or missing value", p); + return -EINVAL; + } + } + return 0; +} + +#ifdef CONFIG_EROFS_FS_ZIP +static const struct address_space_operations managed_cache_aops; + +static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask) +{ + int ret = 1; /* 0 - busy */ + struct address_space *const mapping = page->mapping; + + DBG_BUGON(!PageLocked(page)); + DBG_BUGON(mapping->a_ops != &managed_cache_aops); + + if (PagePrivate(page)) + ret = erofs_try_to_free_cached_page(mapping, page); + + return ret; +} + +static void managed_cache_invalidatepage(struct page *page, + unsigned int offset, + unsigned int length) +{ + const unsigned int stop = length + offset; + + DBG_BUGON(!PageLocked(page)); + + /* Check for potential overflow in debug mode */ + DBG_BUGON(stop > PAGE_SIZE || stop < length); + + if (offset == 0 && stop == PAGE_SIZE) + while (!managed_cache_releasepage(page, GFP_NOFS)) + cond_resched(); +} + +static const struct address_space_operations managed_cache_aops = { + .releasepage = managed_cache_releasepage, + .invalidatepage = managed_cache_invalidatepage, +}; + +static int erofs_init_managed_cache(struct super_block *sb) +{ + struct erofs_sb_info *const sbi = EROFS_SB(sb); + struct inode *const inode = new_inode(sb); + + if (unlikely(!inode)) + return -ENOMEM; + + set_nlink(inode, 1); + inode->i_size = OFFSET_MAX; + + inode->i_mapping->a_ops = &managed_cache_aops; + mapping_set_gfp_mask(inode->i_mapping, + GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE); + sbi->managed_cache = inode; + return 0; +} +#else +static int erofs_init_managed_cache(struct super_block *sb) { return 0; } +#endif + +static int erofs_fill_super(struct super_block *sb, void *data, int silent) +{ + struct inode *inode; + struct erofs_sb_info *sbi; + int err; + + infoln("fill_super, device -> %s", sb->s_id); + infoln("options -> %s", (char *)data); + + sb->s_magic = EROFS_SUPER_MAGIC; + + if (unlikely(!sb_set_blocksize(sb, EROFS_BLKSIZ))) { + errln("failed to set erofs blksize"); + return -EINVAL; + } + + sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); + if (unlikely(!sbi)) + return -ENOMEM; + + sb->s_fs_info = sbi; + err = superblock_read(sb); + if (err) + return err; + + sb->s_flags |= SB_RDONLY | SB_NOATIME; + sb->s_maxbytes = MAX_LFS_FILESIZE; + sb->s_time_gran = 1; + + sb->s_op = &erofs_sops; + +#ifdef CONFIG_EROFS_FS_XATTR + sb->s_xattr = erofs_xattr_handlers; +#endif + /* set erofs default mount options */ + default_options(sbi); + + err = parse_options(sb, data); + if (unlikely(err)) + return err; + + if (!silent) + infoln("root inode @ nid %llu", ROOT_NID(sbi)); + + if (test_opt(sbi, POSIX_ACL)) + sb->s_flags |= SB_POSIXACL; + else + sb->s_flags &= ~SB_POSIXACL; + +#ifdef CONFIG_EROFS_FS_ZIP + INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC); +#endif + + /* get the root inode */ + inode = erofs_iget(sb, ROOT_NID(sbi), true); + if (IS_ERR(inode)) + return PTR_ERR(inode); + + if (unlikely(!S_ISDIR(inode->i_mode))) { + errln("rootino(nid %llu) is not a directory(i_mode %o)", + ROOT_NID(sbi), inode->i_mode); + iput(inode); + return -EINVAL; + } + + sb->s_root = d_make_root(inode); + if (unlikely(!sb->s_root)) + return -ENOMEM; + + erofs_shrinker_register(sb); + /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */ + err = erofs_init_managed_cache(sb); + if (unlikely(err)) + return err; + + if (!silent) + infoln("mounted on %s with opts: %s.", sb->s_id, (char *)data); + return 0; +} + +static struct dentry *erofs_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) +{ + return mount_bdev(fs_type, flags, dev_name, data, erofs_fill_super); +} + +/* + * could be triggered after deactivate_locked_super() + * is called, thus including umount and failed to initialize. + */ +static void erofs_kill_sb(struct super_block *sb) +{ + struct erofs_sb_info *sbi; + + WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC); + infoln("unmounting for %s", sb->s_id); + + kill_block_super(sb); + + sbi = EROFS_SB(sb); + if (!sbi) + return; + kfree(sbi); + sb->s_fs_info = NULL; +} + +/* called when ->s_root is non-NULL */ +static void erofs_put_super(struct super_block *sb) +{ + struct erofs_sb_info *const sbi = EROFS_SB(sb); + + DBG_BUGON(!sbi); + + erofs_shrinker_unregister(sb); +#ifdef CONFIG_EROFS_FS_ZIP + iput(sbi->managed_cache); + sbi->managed_cache = NULL; +#endif +} + +static struct file_system_type erofs_fs_type = { + .owner = THIS_MODULE, + .name = "erofs", + .mount = erofs_mount, + .kill_sb = erofs_kill_sb, + .fs_flags = FS_REQUIRES_DEV, +}; +MODULE_ALIAS_FS("erofs"); + +static int __init erofs_module_init(void) +{ + int err; + + erofs_check_ondisk_layout_definitions(); + infoln("initializing erofs " EROFS_VERSION); + + err = erofs_init_inode_cache(); + if (err) + goto icache_err; + + err = erofs_init_shrinker(); + if (err) + goto shrinker_err; + + err = z_erofs_init_zip_subsystem(); + if (err) + goto zip_err; + + err = register_filesystem(&erofs_fs_type); + if (err) + goto fs_err; + + infoln("successfully to initialize erofs"); + return 0; + +fs_err: + z_erofs_exit_zip_subsystem(); +zip_err: + erofs_exit_shrinker(); +shrinker_err: + erofs_exit_inode_cache(); +icache_err: + return err; +} + +static void __exit erofs_module_exit(void) +{ + unregister_filesystem(&erofs_fs_type); + z_erofs_exit_zip_subsystem(); + erofs_exit_shrinker(); + erofs_exit_inode_cache(); + infoln("successfully finalize erofs"); +} + +/* get filesystem statistics */ +static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct super_block *sb = dentry->d_sb; + struct erofs_sb_info *sbi = EROFS_SB(sb); + u64 id = huge_encode_dev(sb->s_bdev->bd_dev); + + buf->f_type = sb->s_magic; + buf->f_bsize = EROFS_BLKSIZ; + buf->f_blocks = sbi->blocks; + buf->f_bfree = buf->f_bavail = 0; + + buf->f_files = ULLONG_MAX; + buf->f_ffree = ULLONG_MAX - sbi->inos; + + buf->f_namelen = EROFS_NAME_LEN; + + buf->f_fsid.val[0] = (u32)id; + buf->f_fsid.val[1] = (u32)(id >> 32); + return 0; +} + +static int erofs_show_options(struct seq_file *seq, struct dentry *root) +{ + struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb); + +#ifdef CONFIG_EROFS_FS_XATTR + if (test_opt(sbi, XATTR_USER)) + seq_puts(seq, ",user_xattr"); + else + seq_puts(seq, ",nouser_xattr"); +#endif +#ifdef CONFIG_EROFS_FS_POSIX_ACL + if (test_opt(sbi, POSIX_ACL)) + seq_puts(seq, ",acl"); + else + seq_puts(seq, ",noacl"); +#endif + if (test_opt(sbi, FAULT_INJECTION)) + seq_printf(seq, ",fault_injection=%u", + erofs_get_fault_rate(sbi)); +#ifdef CONFIG_EROFS_FS_ZIP + if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) { + seq_puts(seq, ",cache_strategy=disabled"); + } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) { + seq_puts(seq, ",cache_strategy=readahead"); + } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) { + seq_puts(seq, ",cache_strategy=readaround"); + } else { + seq_puts(seq, ",cache_strategy=(unknown)"); + DBG_BUGON(1); + } +#endif + return 0; +} + +static int erofs_remount(struct super_block *sb, int *flags, char *data) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + unsigned int org_mnt_opt = sbi->mount_opt; + unsigned int org_inject_rate = erofs_get_fault_rate(sbi); + int err; + + DBG_BUGON(!sb_rdonly(sb)); + err = parse_options(sb, data); + if (err) + goto out; + + if (test_opt(sbi, POSIX_ACL)) + sb->s_flags |= SB_POSIXACL; + else + sb->s_flags &= ~SB_POSIXACL; + + *flags |= SB_RDONLY; + return 0; +out: + __erofs_build_fault_attr(sbi, org_inject_rate); + sbi->mount_opt = org_mnt_opt; + + return err; +} + +const struct super_operations erofs_sops = { + .put_super = erofs_put_super, + .alloc_inode = alloc_inode, + .free_inode = free_inode, + .statfs = erofs_statfs, + .show_options = erofs_show_options, + .remount_fs = erofs_remount, +}; + +module_init(erofs_module_init); +module_exit(erofs_module_exit); + +MODULE_DESCRIPTION("Enhanced ROM File System"); +MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc."); +MODULE_LICENSE("GPL"); + diff --git a/fs/erofs/tagptr.h b/fs/erofs/tagptr.h new file mode 100644 index 000000000000..a72897c86744 --- /dev/null +++ b/fs/erofs/tagptr.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * A tagged pointer implementation + * + * Copyright (C) 2018 Gao Xiang + */ +#ifndef __EROFS_FS_TAGPTR_H +#define __EROFS_FS_TAGPTR_H + +#include +#include + +/* + * the name of tagged pointer types are tagptr{1, 2, 3...}_t + * avoid directly using the internal structs __tagptr{1, 2, 3...} + */ +#define __MAKE_TAGPTR(n) \ +typedef struct __tagptr##n { \ + uintptr_t v; \ +} tagptr##n##_t; + +__MAKE_TAGPTR(1) +__MAKE_TAGPTR(2) +__MAKE_TAGPTR(3) +__MAKE_TAGPTR(4) + +#undef __MAKE_TAGPTR + +extern void __compiletime_error("bad tagptr tags") + __bad_tagptr_tags(void); + +extern void __compiletime_error("bad tagptr type") + __bad_tagptr_type(void); + +/* fix the broken usage of "#define tagptr2_t tagptr3_t" by users */ +#define __tagptr_mask_1(ptr, n) \ + __builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \ + (1UL << (n)) - 1 : + +#define __tagptr_mask(ptr) (\ + __tagptr_mask_1(ptr, 1) ( \ + __tagptr_mask_1(ptr, 2) ( \ + __tagptr_mask_1(ptr, 3) ( \ + __tagptr_mask_1(ptr, 4) ( \ + __bad_tagptr_type(), 0))))) + +/* generate a tagged pointer from a raw value */ +#define tagptr_init(type, val) \ + ((typeof(type)){ .v = (uintptr_t)(val) }) + +/* + * directly cast a tagged pointer to the native pointer type, which + * could be used for backward compatibility of existing code. + */ +#define tagptr_cast_ptr(tptr) ((void *)(tptr).v) + +/* encode tagged pointers */ +#define tagptr_fold(type, ptr, _tags) ({ \ + const typeof(_tags) tags = (_tags); \ + if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \ + __bad_tagptr_tags(); \ +tagptr_init(type, (uintptr_t)(ptr) | tags); }) + +/* decode tagged pointers */ +#define tagptr_unfold_ptr(tptr) \ + ((void *)((tptr).v & ~__tagptr_mask(tptr))) + +#define tagptr_unfold_tags(tptr) \ + ((tptr).v & __tagptr_mask(tptr)) + +/* operations for the tagger pointer */ +#define tagptr_eq(_tptr1, _tptr2) ({ \ + typeof(_tptr1) tptr1 = (_tptr1); \ + typeof(_tptr2) tptr2 = (_tptr2); \ + (void)(&tptr1 == &tptr2); \ +(tptr1).v == (tptr2).v; }) + +/* lock-free CAS operation */ +#define tagptr_cmpxchg(_ptptr, _o, _n) ({ \ + typeof(_ptptr) ptptr = (_ptptr); \ + typeof(_o) o = (_o); \ + typeof(_n) n = (_n); \ + (void)(&o == &n); \ + (void)(&o == ptptr); \ +tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); }) + +/* wrap WRITE_ONCE if atomic update is needed */ +#define tagptr_replace_tags(_ptptr, tags) ({ \ + typeof(_ptptr) ptptr = (_ptptr); \ + *ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \ +*ptptr; }) + +#define tagptr_set_tags(_ptptr, _tags) ({ \ + typeof(_ptptr) ptptr = (_ptptr); \ + const typeof(_tags) tags = (_tags); \ + if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \ + __bad_tagptr_tags(); \ + ptptr->v |= tags; \ +*ptptr; }) + +#define tagptr_clear_tags(_ptptr, _tags) ({ \ + typeof(_ptptr) ptptr = (_ptptr); \ + const typeof(_tags) tags = (_tags); \ + if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \ + __bad_tagptr_tags(); \ + ptptr->v &= ~tags; \ +*ptptr; }) + +#endif /* __EROFS_FS_TAGPTR_H */ + diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c new file mode 100644 index 000000000000..1dd041aa0f5a --- /dev/null +++ b/fs/erofs/utils.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "internal.h" +#include + +struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) +{ + struct page *page; + + if (!list_empty(pool)) { + page = lru_to_page(pool); + DBG_BUGON(page_ref_count(page) != 1); + list_del(&page->lru); + } else { + page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0); + } + return page; +} + +#if (EROFS_PCPUBUF_NR_PAGES > 0) +static struct { + u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES]; +} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS]; + +void *erofs_get_pcpubuf(unsigned int pagenr) +{ + preempt_disable(); + return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE]; +} +#endif + +#ifdef CONFIG_EROFS_FS_ZIP +/* global shrink count (for all mounted EROFS instances) */ +static atomic_long_t erofs_global_shrink_cnt; + +#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount) +#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount) + +static int erofs_workgroup_get(struct erofs_workgroup *grp) +{ + int o; + +repeat: + o = erofs_wait_on_workgroup_freezed(grp); + if (unlikely(o <= 0)) + return -1; + + if (unlikely(atomic_cmpxchg(&grp->refcount, o, o + 1) != o)) + goto repeat; + + /* decrease refcount paired by erofs_workgroup_put */ + if (unlikely(o == 1)) + atomic_long_dec(&erofs_global_shrink_cnt); + return 0; +} + +struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, + pgoff_t index, bool *tag) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + struct erofs_workgroup *grp; + +repeat: + rcu_read_lock(); + grp = radix_tree_lookup(&sbi->workstn_tree, index); + if (grp) { + *tag = xa_pointer_tag(grp); + grp = xa_untag_pointer(grp); + + if (erofs_workgroup_get(grp)) { + /* prefer to relax rcu read side */ + rcu_read_unlock(); + goto repeat; + } + + DBG_BUGON(index != grp->index); + } + rcu_read_unlock(); + return grp; +} + +int erofs_register_workgroup(struct super_block *sb, + struct erofs_workgroup *grp, + bool tag) +{ + struct erofs_sb_info *sbi; + int err; + + /* grp shouldn't be broken or used before */ + if (unlikely(atomic_read(&grp->refcount) != 1)) { + DBG_BUGON(1); + return -EINVAL; + } + + err = radix_tree_preload(GFP_NOFS); + if (err) + return err; + + sbi = EROFS_SB(sb); + xa_lock(&sbi->workstn_tree); + + grp = xa_tag_pointer(grp, tag); + + /* + * Bump up reference count before making this workgroup + * visible to other users in order to avoid potential UAF + * without serialized by workstn_lock. + */ + __erofs_workgroup_get(grp); + + err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp); + if (unlikely(err)) + /* + * it's safe to decrease since the workgroup isn't visible + * and refcount >= 2 (cannot be freezed). + */ + __erofs_workgroup_put(grp); + + xa_unlock(&sbi->workstn_tree); + radix_tree_preload_end(); + return err; +} + +static void __erofs_workgroup_free(struct erofs_workgroup *grp) +{ + atomic_long_dec(&erofs_global_shrink_cnt); + erofs_workgroup_free_rcu(grp); +} + +int erofs_workgroup_put(struct erofs_workgroup *grp) +{ + int count = atomic_dec_return(&grp->refcount); + + if (count == 1) + atomic_long_inc(&erofs_global_shrink_cnt); + else if (!count) + __erofs_workgroup_free(grp); + return count; +} + +static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) +{ + erofs_workgroup_unfreeze(grp, 0); + __erofs_workgroup_free(grp); +} + +static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, + struct erofs_workgroup *grp, + bool cleanup) +{ + /* + * If managed cache is on, refcount of workgroups + * themselves could be < 0 (freezed). In other words, + * there is no guarantee that all refcounts > 0. + */ + if (!erofs_workgroup_try_to_freeze(grp, 1)) + return false; + + /* + * Note that all cached pages should be unattached + * before deleted from the radix tree. Otherwise some + * cached pages could be still attached to the orphan + * old workgroup when the new one is available in the tree. + */ + if (erofs_try_to_free_all_cached_pages(sbi, grp)) { + erofs_workgroup_unfreeze(grp, 1); + return false; + } + + /* + * It's impossible to fail after the workgroup is freezed, + * however in order to avoid some race conditions, add a + * DBG_BUGON to observe this in advance. + */ + DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree, + grp->index)) != grp); + + /* + * If managed cache is on, last refcount should indicate + * the related workstation. + */ + erofs_workgroup_unfreeze_final(grp); + return true; +} + +static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, + unsigned long nr_shrink, + bool cleanup) +{ + pgoff_t first_index = 0; + void *batch[PAGEVEC_SIZE]; + unsigned int freed = 0; + + int i, found; +repeat: + xa_lock(&sbi->workstn_tree); + + found = radix_tree_gang_lookup(&sbi->workstn_tree, + batch, first_index, PAGEVEC_SIZE); + + for (i = 0; i < found; ++i) { + struct erofs_workgroup *grp = xa_untag_pointer(batch[i]); + + first_index = grp->index + 1; + + /* try to shrink each valid workgroup */ + if (!erofs_try_to_release_workgroup(sbi, grp, cleanup)) + continue; + + ++freed; + if (unlikely(!--nr_shrink)) + break; + } + xa_unlock(&sbi->workstn_tree); + + if (i && nr_shrink) + goto repeat; + return freed; +} + +/* protected by 'erofs_sb_list_lock' */ +static unsigned int shrinker_run_no; + +/* protects the mounted 'erofs_sb_list' */ +static DEFINE_SPINLOCK(erofs_sb_list_lock); +static LIST_HEAD(erofs_sb_list); + +void erofs_shrinker_register(struct super_block *sb) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + + mutex_init(&sbi->umount_mutex); + + spin_lock(&erofs_sb_list_lock); + list_add(&sbi->list, &erofs_sb_list); + spin_unlock(&erofs_sb_list_lock); +} + +void erofs_shrinker_unregister(struct super_block *sb) +{ + struct erofs_sb_info *const sbi = EROFS_SB(sb); + + mutex_lock(&sbi->umount_mutex); + erofs_shrink_workstation(sbi, ~0UL, true); + + spin_lock(&erofs_sb_list_lock); + list_del(&sbi->list); + spin_unlock(&erofs_sb_list_lock); + mutex_unlock(&sbi->umount_mutex); +} + +static unsigned long erofs_shrink_count(struct shrinker *shrink, + struct shrink_control *sc) +{ + return atomic_long_read(&erofs_global_shrink_cnt); +} + +static unsigned long erofs_shrink_scan(struct shrinker *shrink, + struct shrink_control *sc) +{ + struct erofs_sb_info *sbi; + struct list_head *p; + + unsigned long nr = sc->nr_to_scan; + unsigned int run_no; + unsigned long freed = 0; + + spin_lock(&erofs_sb_list_lock); + do { + run_no = ++shrinker_run_no; + } while (run_no == 0); + + /* Iterate over all mounted superblocks and try to shrink them */ + p = erofs_sb_list.next; + while (p != &erofs_sb_list) { + sbi = list_entry(p, struct erofs_sb_info, list); + + /* + * We move the ones we do to the end of the list, so we stop + * when we see one we have already done. + */ + if (sbi->shrinker_run_no == run_no) + break; + + if (!mutex_trylock(&sbi->umount_mutex)) { + p = p->next; + continue; + } + + spin_unlock(&erofs_sb_list_lock); + sbi->shrinker_run_no = run_no; + + freed += erofs_shrink_workstation(sbi, nr, false); + + spin_lock(&erofs_sb_list_lock); + /* Get the next list element before we move this one */ + p = p->next; + + /* + * Move this one to the end of the list to provide some + * fairness. + */ + list_move_tail(&sbi->list, &erofs_sb_list); + mutex_unlock(&sbi->umount_mutex); + + if (freed >= nr) + break; + } + spin_unlock(&erofs_sb_list_lock); + return freed; +} + +static struct shrinker erofs_shrinker_info = { + .scan_objects = erofs_shrink_scan, + .count_objects = erofs_shrink_count, + .seeks = DEFAULT_SEEKS, +}; + +int __init erofs_init_shrinker(void) +{ + return register_shrinker(&erofs_shrinker_info); +} + +void erofs_exit_shrinker(void) +{ + unregister_shrinker(&erofs_shrinker_info); +} +#endif /* !CONFIG_EROFS_FS_ZIP */ + diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c new file mode 100644 index 000000000000..a8286998a079 --- /dev/null +++ b/fs/erofs/xattr.c @@ -0,0 +1,703 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include +#include "xattr.h" + +struct xattr_iter { + struct super_block *sb; + struct page *page; + void *kaddr; + + erofs_blk_t blkaddr; + unsigned int ofs; +}; + +static inline void xattr_iter_end(struct xattr_iter *it, bool atomic) +{ + /* the only user of kunmap() is 'init_inode_xattrs' */ + if (unlikely(!atomic)) + kunmap(it->page); + else + kunmap_atomic(it->kaddr); + + unlock_page(it->page); + put_page(it->page); +} + +static inline void xattr_iter_end_final(struct xattr_iter *it) +{ + if (!it->page) + return; + + xattr_iter_end(it, true); +} + +static int init_inode_xattrs(struct inode *inode) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct xattr_iter it; + unsigned int i; + struct erofs_xattr_ibody_header *ih; + struct super_block *sb; + struct erofs_sb_info *sbi; + bool atomic_map; + int ret = 0; + + /* the most case is that xattrs of this inode are initialized. */ + if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) + return 0; + + if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE)) + return -ERESTARTSYS; + + /* someone has initialized xattrs for us? */ + if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags)) + goto out_unlock; + + /* + * bypass all xattr operations if ->xattr_isize is not greater than + * sizeof(struct erofs_xattr_ibody_header), in detail: + * 1) it is not enough to contain erofs_xattr_ibody_header then + * ->xattr_isize should be 0 (it means no xattr); + * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk + * undefined right now (maybe use later with some new sb feature). + */ + if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) { + errln("xattr_isize %d of nid %llu is not supported yet", + vi->xattr_isize, vi->nid); + ret = -EOPNOTSUPP; + goto out_unlock; + } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) { + if (unlikely(vi->xattr_isize)) { + errln("bogus xattr ibody @ nid %llu", vi->nid); + DBG_BUGON(1); + ret = -EFSCORRUPTED; + goto out_unlock; /* xattr ondisk layout error */ + } + ret = -ENOATTR; + goto out_unlock; + } + + sb = inode->i_sb; + sbi = EROFS_SB(sb); + it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize); + it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize); + + it.page = erofs_get_inline_page(inode, it.blkaddr); + if (IS_ERR(it.page)) { + ret = PTR_ERR(it.page); + goto out_unlock; + } + + /* read in shared xattr array (non-atomic, see kmalloc below) */ + it.kaddr = kmap(it.page); + atomic_map = false; + + ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs); + + vi->xattr_shared_count = ih->h_shared_count; + vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count, + sizeof(uint), GFP_KERNEL); + if (!vi->xattr_shared_xattrs) { + xattr_iter_end(&it, atomic_map); + ret = -ENOMEM; + goto out_unlock; + } + + /* let's skip ibody header */ + it.ofs += sizeof(struct erofs_xattr_ibody_header); + + for (i = 0; i < vi->xattr_shared_count; ++i) { + if (unlikely(it.ofs >= EROFS_BLKSIZ)) { + /* cannot be unaligned */ + DBG_BUGON(it.ofs != EROFS_BLKSIZ); + xattr_iter_end(&it, atomic_map); + + it.page = erofs_get_meta_page(sb, ++it.blkaddr, + S_ISDIR(inode->i_mode)); + if (IS_ERR(it.page)) { + kfree(vi->xattr_shared_xattrs); + vi->xattr_shared_xattrs = NULL; + ret = PTR_ERR(it.page); + goto out_unlock; + } + + it.kaddr = kmap_atomic(it.page); + atomic_map = true; + it.ofs = 0; + } + vi->xattr_shared_xattrs[i] = + le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs)); + it.ofs += sizeof(__le32); + } + xattr_iter_end(&it, atomic_map); + + set_bit(EROFS_V_EA_INITED_BIT, &vi->flags); + +out_unlock: + clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags); + return ret; +} + +/* + * the general idea for these return values is + * if 0 is returned, go on processing the current xattr; + * 1 (> 0) is returned, skip this round to process the next xattr; + * -err (< 0) is returned, an error (maybe ENOXATTR) occurred + * and need to be handled + */ +struct xattr_iter_handlers { + int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry); + int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf, + unsigned int len); + int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz); + void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf, + unsigned int len); +}; + +static inline int xattr_iter_fixup(struct xattr_iter *it) +{ + if (it->ofs < EROFS_BLKSIZ) + return 0; + + xattr_iter_end(it, true); + + it->blkaddr += erofs_blknr(it->ofs); + + it->page = erofs_get_meta_page(it->sb, it->blkaddr, false); + if (IS_ERR(it->page)) { + int err = PTR_ERR(it->page); + + it->page = NULL; + return err; + } + + it->kaddr = kmap_atomic(it->page); + it->ofs = erofs_blkoff(it->ofs); + return 0; +} + +static int inline_xattr_iter_begin(struct xattr_iter *it, + struct inode *inode) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb); + unsigned int xattr_header_sz, inline_xattr_ofs; + + xattr_header_sz = inlinexattr_header_size(inode); + if (unlikely(xattr_header_sz >= vi->xattr_isize)) { + DBG_BUGON(xattr_header_sz > vi->xattr_isize); + return -ENOATTR; + } + + inline_xattr_ofs = vi->inode_isize + xattr_header_sz; + + it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs); + it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs); + + it->page = erofs_get_inline_page(inode, it->blkaddr); + if (IS_ERR(it->page)) + return PTR_ERR(it->page); + + it->kaddr = kmap_atomic(it->page); + return vi->xattr_isize - xattr_header_sz; +} + +/* + * Regardless of success or failure, `xattr_foreach' will end up with + * `ofs' pointing to the next xattr item rather than an arbitrary position. + */ +static int xattr_foreach(struct xattr_iter *it, + const struct xattr_iter_handlers *op, + unsigned int *tlimit) +{ + struct erofs_xattr_entry entry; + unsigned int value_sz, processed, slice; + int err; + + /* 0. fixup blkaddr, ofs, ipage */ + err = xattr_iter_fixup(it); + if (err) + return err; + + /* + * 1. read xattr entry to the memory, + * since we do EROFS_XATTR_ALIGN + * therefore entry should be in the page + */ + entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs); + if (tlimit) { + unsigned int entry_sz = EROFS_XATTR_ENTRY_SIZE(&entry); + + /* xattr on-disk corruption: xattr entry beyond xattr_isize */ + if (unlikely(*tlimit < entry_sz)) { + DBG_BUGON(1); + return -EFSCORRUPTED; + } + *tlimit -= entry_sz; + } + + it->ofs += sizeof(struct erofs_xattr_entry); + value_sz = le16_to_cpu(entry.e_value_size); + + /* handle entry */ + err = op->entry(it, &entry); + if (err) { + it->ofs += entry.e_name_len + value_sz; + goto out; + } + + /* 2. handle xattr name (ofs will finally be at the end of name) */ + processed = 0; + + while (processed < entry.e_name_len) { + if (it->ofs >= EROFS_BLKSIZ) { + DBG_BUGON(it->ofs > EROFS_BLKSIZ); + + err = xattr_iter_fixup(it); + if (err) + goto out; + it->ofs = 0; + } + + slice = min_t(unsigned int, PAGE_SIZE - it->ofs, + entry.e_name_len - processed); + + /* handle name */ + err = op->name(it, processed, it->kaddr + it->ofs, slice); + if (err) { + it->ofs += entry.e_name_len - processed + value_sz; + goto out; + } + + it->ofs += slice; + processed += slice; + } + + /* 3. handle xattr value */ + processed = 0; + + if (op->alloc_buffer) { + err = op->alloc_buffer(it, value_sz); + if (err) { + it->ofs += value_sz; + goto out; + } + } + + while (processed < value_sz) { + if (it->ofs >= EROFS_BLKSIZ) { + DBG_BUGON(it->ofs > EROFS_BLKSIZ); + + err = xattr_iter_fixup(it); + if (err) + goto out; + it->ofs = 0; + } + + slice = min_t(unsigned int, PAGE_SIZE - it->ofs, + value_sz - processed); + op->value(it, processed, it->kaddr + it->ofs, slice); + it->ofs += slice; + processed += slice; + } + +out: + /* xattrs should be 4-byte aligned (on-disk constraint) */ + it->ofs = EROFS_XATTR_ALIGN(it->ofs); + return err < 0 ? err : 0; +} + +struct getxattr_iter { + struct xattr_iter it; + + char *buffer; + int buffer_size, index; + struct qstr name; +}; + +static int xattr_entrymatch(struct xattr_iter *_it, + struct erofs_xattr_entry *entry) +{ + struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); + + return (it->index != entry->e_name_index || + it->name.len != entry->e_name_len) ? -ENOATTR : 0; +} + +static int xattr_namematch(struct xattr_iter *_it, + unsigned int processed, char *buf, unsigned int len) +{ + struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); + + return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0; +} + +static int xattr_checkbuffer(struct xattr_iter *_it, + unsigned int value_sz) +{ + struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); + int err = it->buffer_size < value_sz ? -ERANGE : 0; + + it->buffer_size = value_sz; + return !it->buffer ? 1 : err; +} + +static void xattr_copyvalue(struct xattr_iter *_it, + unsigned int processed, + char *buf, unsigned int len) +{ + struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); + + memcpy(it->buffer + processed, buf, len); +} + +static const struct xattr_iter_handlers find_xattr_handlers = { + .entry = xattr_entrymatch, + .name = xattr_namematch, + .alloc_buffer = xattr_checkbuffer, + .value = xattr_copyvalue +}; + +static int inline_getxattr(struct inode *inode, struct getxattr_iter *it) +{ + int ret; + unsigned int remaining; + + ret = inline_xattr_iter_begin(&it->it, inode); + if (ret < 0) + return ret; + + remaining = ret; + while (remaining) { + ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining); + if (ret != -ENOATTR) + break; + } + xattr_iter_end_final(&it->it); + + return ret ? ret : it->buffer_size; +} + +static int shared_getxattr(struct inode *inode, struct getxattr_iter *it) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct super_block *const sb = inode->i_sb; + struct erofs_sb_info *const sbi = EROFS_SB(sb); + unsigned int i; + int ret = -ENOATTR; + + for (i = 0; i < vi->xattr_shared_count; ++i) { + erofs_blk_t blkaddr = + xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); + + it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); + + if (!i || blkaddr != it->it.blkaddr) { + if (i) + xattr_iter_end(&it->it, true); + + it->it.page = erofs_get_meta_page(sb, blkaddr, false); + if (IS_ERR(it->it.page)) + return PTR_ERR(it->it.page); + + it->it.kaddr = kmap_atomic(it->it.page); + it->it.blkaddr = blkaddr; + } + + ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL); + if (ret != -ENOATTR) + break; + } + if (vi->xattr_shared_count) + xattr_iter_end_final(&it->it); + + return ret ? ret : it->buffer_size; +} + +static bool erofs_xattr_user_list(struct dentry *dentry) +{ + return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER); +} + +static bool erofs_xattr_trusted_list(struct dentry *dentry) +{ + return capable(CAP_SYS_ADMIN); +} + +int erofs_getxattr(struct inode *inode, int index, + const char *name, + void *buffer, size_t buffer_size) +{ + int ret; + struct getxattr_iter it; + + if (unlikely(!name)) + return -EINVAL; + + ret = init_inode_xattrs(inode); + if (ret) + return ret; + + it.index = index; + + it.name.len = strlen(name); + if (it.name.len > EROFS_NAME_LEN) + return -ERANGE; + it.name.name = name; + + it.buffer = buffer; + it.buffer_size = buffer_size; + + it.it.sb = inode->i_sb; + ret = inline_getxattr(inode, &it); + if (ret == -ENOATTR) + ret = shared_getxattr(inode, &it); + return ret; +} + +static int erofs_xattr_generic_get(const struct xattr_handler *handler, + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) +{ + struct erofs_sb_info *const sbi = EROFS_I_SB(inode); + + switch (handler->flags) { + case EROFS_XATTR_INDEX_USER: + if (!test_opt(sbi, XATTR_USER)) + return -EOPNOTSUPP; + break; + case EROFS_XATTR_INDEX_TRUSTED: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + break; + case EROFS_XATTR_INDEX_SECURITY: + break; + default: + return -EINVAL; + } + + return erofs_getxattr(inode, handler->flags, name, buffer, size); +} + +const struct xattr_handler erofs_xattr_user_handler = { + .prefix = XATTR_USER_PREFIX, + .flags = EROFS_XATTR_INDEX_USER, + .list = erofs_xattr_user_list, + .get = erofs_xattr_generic_get, +}; + +const struct xattr_handler erofs_xattr_trusted_handler = { + .prefix = XATTR_TRUSTED_PREFIX, + .flags = EROFS_XATTR_INDEX_TRUSTED, + .list = erofs_xattr_trusted_list, + .get = erofs_xattr_generic_get, +}; + +#ifdef CONFIG_EROFS_FS_SECURITY +const struct xattr_handler __maybe_unused erofs_xattr_security_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .flags = EROFS_XATTR_INDEX_SECURITY, + .get = erofs_xattr_generic_get, +}; +#endif + +const struct xattr_handler *erofs_xattr_handlers[] = { + &erofs_xattr_user_handler, +#ifdef CONFIG_EROFS_FS_POSIX_ACL + &posix_acl_access_xattr_handler, + &posix_acl_default_xattr_handler, +#endif + &erofs_xattr_trusted_handler, +#ifdef CONFIG_EROFS_FS_SECURITY + &erofs_xattr_security_handler, +#endif + NULL, +}; + +struct listxattr_iter { + struct xattr_iter it; + + struct dentry *dentry; + char *buffer; + int buffer_size, buffer_ofs; +}; + +static int xattr_entrylist(struct xattr_iter *_it, + struct erofs_xattr_entry *entry) +{ + struct listxattr_iter *it = + container_of(_it, struct listxattr_iter, it); + unsigned int prefix_len; + const char *prefix; + + const struct xattr_handler *h = + erofs_xattr_handler(entry->e_name_index); + + if (!h || (h->list && !h->list(it->dentry))) + return 1; + + prefix = xattr_prefix(h); + prefix_len = strlen(prefix); + + if (!it->buffer) { + it->buffer_ofs += prefix_len + entry->e_name_len + 1; + return 1; + } + + if (it->buffer_ofs + prefix_len + + entry->e_name_len + 1 > it->buffer_size) + return -ERANGE; + + memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len); + it->buffer_ofs += prefix_len; + return 0; +} + +static int xattr_namelist(struct xattr_iter *_it, + unsigned int processed, char *buf, unsigned int len) +{ + struct listxattr_iter *it = + container_of(_it, struct listxattr_iter, it); + + memcpy(it->buffer + it->buffer_ofs, buf, len); + it->buffer_ofs += len; + return 0; +} + +static int xattr_skipvalue(struct xattr_iter *_it, + unsigned int value_sz) +{ + struct listxattr_iter *it = + container_of(_it, struct listxattr_iter, it); + + it->buffer[it->buffer_ofs++] = '\0'; + return 1; +} + +static const struct xattr_iter_handlers list_xattr_handlers = { + .entry = xattr_entrylist, + .name = xattr_namelist, + .alloc_buffer = xattr_skipvalue, + .value = NULL +}; + +static int inline_listxattr(struct listxattr_iter *it) +{ + int ret; + unsigned int remaining; + + ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry)); + if (ret < 0) + return ret; + + remaining = ret; + while (remaining) { + ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining); + if (ret) + break; + } + xattr_iter_end_final(&it->it); + return ret ? ret : it->buffer_ofs; +} + +static int shared_listxattr(struct listxattr_iter *it) +{ + struct inode *const inode = d_inode(it->dentry); + struct erofs_vnode *const vi = EROFS_V(inode); + struct super_block *const sb = inode->i_sb; + struct erofs_sb_info *const sbi = EROFS_SB(sb); + unsigned int i; + int ret = 0; + + for (i = 0; i < vi->xattr_shared_count; ++i) { + erofs_blk_t blkaddr = + xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); + + it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); + if (!i || blkaddr != it->it.blkaddr) { + if (i) + xattr_iter_end(&it->it, true); + + it->it.page = erofs_get_meta_page(sb, blkaddr, false); + if (IS_ERR(it->it.page)) + return PTR_ERR(it->it.page); + + it->it.kaddr = kmap_atomic(it->it.page); + it->it.blkaddr = blkaddr; + } + + ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL); + if (ret) + break; + } + if (vi->xattr_shared_count) + xattr_iter_end_final(&it->it); + + return ret ? ret : it->buffer_ofs; +} + +ssize_t erofs_listxattr(struct dentry *dentry, + char *buffer, size_t buffer_size) +{ + int ret; + struct listxattr_iter it; + + ret = init_inode_xattrs(d_inode(dentry)); + if (ret) + return ret; + + it.dentry = dentry; + it.buffer = buffer; + it.buffer_size = buffer_size; + it.buffer_ofs = 0; + + it.it.sb = dentry->d_sb; + + ret = inline_listxattr(&it); + if (ret < 0 && ret != -ENOATTR) + return ret; + return shared_listxattr(&it); +} + +#ifdef CONFIG_EROFS_FS_POSIX_ACL +struct posix_acl *erofs_get_acl(struct inode *inode, int type) +{ + struct posix_acl *acl; + int prefix, rc; + char *value = NULL; + + switch (type) { + case ACL_TYPE_ACCESS: + prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS; + break; + case ACL_TYPE_DEFAULT: + prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT; + break; + default: + return ERR_PTR(-EINVAL); + } + + rc = erofs_getxattr(inode, prefix, "", NULL, 0); + if (rc > 0) { + value = kmalloc(rc, GFP_KERNEL); + if (!value) + return ERR_PTR(-ENOMEM); + rc = erofs_getxattr(inode, prefix, "", value, rc); + } + + if (rc == -ENOATTR) + acl = NULL; + else if (rc < 0) + acl = ERR_PTR(rc); + else + acl = posix_acl_from_xattr(&init_user_ns, value, rc); + kfree(value); + return acl; +} +#endif + diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h new file mode 100644 index 000000000000..c5ca47d814dd --- /dev/null +++ b/fs/erofs/xattr.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017-2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_XATTR_H +#define __EROFS_XATTR_H + +#include "internal.h" +#include +#include + +/* Attribute not found */ +#define ENOATTR ENODATA + +static inline unsigned int inlinexattr_header_size(struct inode *inode) +{ + return sizeof(struct erofs_xattr_ibody_header) + + sizeof(u32) * EROFS_V(inode)->xattr_shared_count; +} + +static inline erofs_blk_t xattrblock_addr(struct erofs_sb_info *sbi, + unsigned int xattr_id) +{ +#ifdef CONFIG_EROFS_FS_XATTR + return sbi->xattr_blkaddr + + xattr_id * sizeof(__u32) / EROFS_BLKSIZ; +#else + return 0; +#endif +} + +static inline unsigned int xattrblock_offset(struct erofs_sb_info *sbi, + unsigned int xattr_id) +{ + return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ; +} + +#ifdef CONFIG_EROFS_FS_XATTR +extern const struct xattr_handler erofs_xattr_user_handler; +extern const struct xattr_handler erofs_xattr_trusted_handler; +#ifdef CONFIG_EROFS_FS_SECURITY +extern const struct xattr_handler erofs_xattr_security_handler; +#endif + +static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx) +{ +static const struct xattr_handler *xattr_handler_map[] = { + [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler, +#ifdef CONFIG_EROFS_FS_POSIX_ACL + [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler, + [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = + &posix_acl_default_xattr_handler, +#endif + [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler, +#ifdef CONFIG_EROFS_FS_SECURITY + [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler, +#endif +}; + + return idx && idx < ARRAY_SIZE(xattr_handler_map) ? + xattr_handler_map[idx] : NULL; +} + +extern const struct xattr_handler *erofs_xattr_handlers[]; + +int erofs_getxattr(struct inode *, int, const char *, void *, size_t); +ssize_t erofs_listxattr(struct dentry *, char *, size_t); +#else +static inline int erofs_getxattr(struct inode *inode, int index, + const char *name, void *buffer, + size_t buffer_size) +{ + return -EOPNOTSUPP; +} + +static inline ssize_t erofs_listxattr(struct dentry *dentry, + char *buffer, size_t buffer_size) +{ + return -EOPNOTSUPP; +} +#endif /* !CONFIG_EROFS_FS_XATTR */ + +#ifdef CONFIG_EROFS_FS_POSIX_ACL +struct posix_acl *erofs_get_acl(struct inode *inode, int type); +#else +#define erofs_get_acl (NULL) +#endif + +#endif + diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c new file mode 100644 index 000000000000..b32ad585237c --- /dev/null +++ b/fs/erofs/zdata.c @@ -0,0 +1,1432 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "zdata.h" +#include "compress.h" +#include + +#include + +/* + * a compressed_pages[] placeholder in order to avoid + * being filled with file pages for in-place decompression. + */ +#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D) + +/* how to allocate cached pages for a pcluster */ +enum z_erofs_cache_alloctype { + DONTALLOC, /* don't allocate any cached pages */ + DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */ +}; + +/* + * tagged pointer with 1-bit tag for all compressed pages + * tag 0 - the page is just found with an extra page reference + */ +typedef tagptr1_t compressed_page_t; + +#define tag_compressed_page_justfound(page) \ + tagptr_fold(compressed_page_t, page, 1) + +static struct workqueue_struct *z_erofs_workqueue __read_mostly; +static struct kmem_cache *pcluster_cachep __read_mostly; + +void z_erofs_exit_zip_subsystem(void) +{ + destroy_workqueue(z_erofs_workqueue); + kmem_cache_destroy(pcluster_cachep); +} + +static inline int init_unzip_workqueue(void) +{ + const unsigned int onlinecpus = num_possible_cpus(); + const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE; + + /* + * no need to spawn too many threads, limiting threads could minimum + * scheduling overhead, perhaps per-CPU threads should be better? + */ + z_erofs_workqueue = alloc_workqueue("erofs_unzipd", flags, + onlinecpus + onlinecpus / 4); + return z_erofs_workqueue ? 0 : -ENOMEM; +} + +static void init_once(void *ptr) +{ + struct z_erofs_pcluster *pcl = ptr; + struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); + unsigned int i; + + mutex_init(&cl->lock); + cl->nr_pages = 0; + cl->vcnt = 0; + for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i) + pcl->compressed_pages[i] = NULL; +} + +static void init_always(struct z_erofs_pcluster *pcl) +{ + struct z_erofs_collection *cl = z_erofs_primarycollection(pcl); + + atomic_set(&pcl->obj.refcount, 1); + + DBG_BUGON(cl->nr_pages); + DBG_BUGON(cl->vcnt); +} + +int __init z_erofs_init_zip_subsystem(void) +{ + pcluster_cachep = kmem_cache_create("erofs_compress", + Z_EROFS_WORKGROUP_SIZE, 0, + SLAB_RECLAIM_ACCOUNT, init_once); + if (pcluster_cachep) { + if (!init_unzip_workqueue()) + return 0; + + kmem_cache_destroy(pcluster_cachep); + } + return -ENOMEM; +} + +enum z_erofs_collectmode { + COLLECT_SECONDARY, + COLLECT_PRIMARY, + /* + * The current collection was the tail of an exist chain, in addition + * that the previous processed chained collections are all decided to + * be hooked up to it. + * A new chain will be created for the remaining collections which are + * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED, + * the next collection cannot reuse the whole page safely in + * the following scenario: + * ________________________________________________________________ + * | tail (partial) page | head (partial) page | + * | (belongs to the next cl) | (belongs to the current cl) | + * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| + */ + COLLECT_PRIMARY_HOOKED, + COLLECT_PRIMARY_FOLLOWED_NOINPLACE, + /* + * The current collection has been linked with the owned chain, and + * could also be linked with the remaining collections, which means + * if the processing page is the tail page of the collection, thus + * the current collection can safely use the whole page (since + * the previous collection is under control) for in-place I/O, as + * illustrated below: + * ________________________________________________________________ + * | tail (partial) page | head (partial) page | + * | (of the current cl) | (of the previous collection) | + * | PRIMARY_FOLLOWED or | | + * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________| + * + * [ (*) the above page can be used as inplace I/O. ] + */ + COLLECT_PRIMARY_FOLLOWED, +}; + +struct z_erofs_collector { + struct z_erofs_pagevec_ctor vector; + + struct z_erofs_pcluster *pcl, *tailpcl; + struct z_erofs_collection *cl; + struct page **compressedpages; + z_erofs_next_pcluster_t owned_head; + + enum z_erofs_collectmode mode; +}; + +struct z_erofs_decompress_frontend { + struct inode *const inode; + + struct z_erofs_collector clt; + struct erofs_map_blocks map; + + /* used for applying cache strategy on the fly */ + bool backmost; + erofs_off_t headoffset; +}; + +#define COLLECTOR_INIT() { \ + .owned_head = Z_EROFS_PCLUSTER_TAIL, \ + .mode = COLLECT_PRIMARY_FOLLOWED } + +#define DECOMPRESS_FRONTEND_INIT(__i) { \ + .inode = __i, .clt = COLLECTOR_INIT(), \ + .backmost = true, } + +static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES]; +static DEFINE_MUTEX(z_pagemap_global_lock); + +static void preload_compressed_pages(struct z_erofs_collector *clt, + struct address_space *mc, + enum z_erofs_cache_alloctype type, + struct list_head *pagepool) +{ + const struct z_erofs_pcluster *pcl = clt->pcl; + const unsigned int clusterpages = BIT(pcl->clusterbits); + struct page **pages = clt->compressedpages; + pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages); + bool standalone = true; + + if (clt->mode < COLLECT_PRIMARY_FOLLOWED) + return; + + for (; pages < pcl->compressed_pages + clusterpages; ++pages) { + struct page *page; + compressed_page_t t; + + /* the compressed page was loaded before */ + if (READ_ONCE(*pages)) + continue; + + page = find_get_page(mc, index); + + if (page) { + t = tag_compressed_page_justfound(page); + } else if (type == DELAYEDALLOC) { + t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED); + } else { /* DONTALLOC */ + if (standalone) + clt->compressedpages = pages; + standalone = false; + continue; + } + + if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t))) + continue; + + if (page) + put_page(page); + } + + if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */ + clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE; +} + +/* called by erofs_shrinker to get rid of all compressed_pages */ +int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, + struct erofs_workgroup *grp) +{ + struct z_erofs_pcluster *const pcl = + container_of(grp, struct z_erofs_pcluster, obj); + struct address_space *const mapping = MNGD_MAPPING(sbi); + const unsigned int clusterpages = BIT(pcl->clusterbits); + int i; + + /* + * refcount of workgroup is now freezed as 1, + * therefore no need to worry about available decompression users. + */ + for (i = 0; i < clusterpages; ++i) { + struct page *page = pcl->compressed_pages[i]; + + if (!page) + continue; + + /* block other users from reclaiming or migrating the page */ + if (!trylock_page(page)) + return -EBUSY; + + if (unlikely(page->mapping != mapping)) + continue; + + /* barrier is implied in the following 'unlock_page' */ + WRITE_ONCE(pcl->compressed_pages[i], NULL); + set_page_private(page, 0); + ClearPagePrivate(page); + + unlock_page(page); + put_page(page); + } + return 0; +} + +int erofs_try_to_free_cached_page(struct address_space *mapping, + struct page *page) +{ + struct z_erofs_pcluster *const pcl = (void *)page_private(page); + const unsigned int clusterpages = BIT(pcl->clusterbits); + int ret = 0; /* 0 - busy */ + + if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) { + unsigned int i; + + for (i = 0; i < clusterpages; ++i) { + if (pcl->compressed_pages[i] == page) { + WRITE_ONCE(pcl->compressed_pages[i], NULL); + ret = 1; + break; + } + } + erofs_workgroup_unfreeze(&pcl->obj, 1); + + if (ret) { + ClearPagePrivate(page); + put_page(page); + } + } + return ret; +} + +/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */ +static inline bool try_inplace_io(struct z_erofs_collector *clt, + struct page *page) +{ + struct z_erofs_pcluster *const pcl = clt->pcl; + const unsigned int clusterpages = BIT(pcl->clusterbits); + + while (clt->compressedpages < pcl->compressed_pages + clusterpages) { + if (!cmpxchg(clt->compressedpages++, NULL, page)) + return true; + } + return false; +} + +/* callers must be with collection lock held */ +static int z_erofs_attach_page(struct z_erofs_collector *clt, + struct page *page, + enum z_erofs_page_type type) +{ + int ret; + bool occupied; + + /* give priority for inplaceio */ + if (clt->mode >= COLLECT_PRIMARY && + type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && + try_inplace_io(clt, page)) + return 0; + + ret = z_erofs_pagevec_enqueue(&clt->vector, + page, type, &occupied); + clt->cl->vcnt += (unsigned int)ret; + + return ret ? 0 : -EAGAIN; +} + +static enum z_erofs_collectmode +try_to_claim_pcluster(struct z_erofs_pcluster *pcl, + z_erofs_next_pcluster_t *owned_head) +{ + /* let's claim these following types of pclusters */ +retry: + if (pcl->next == Z_EROFS_PCLUSTER_NIL) { + /* type 1, nil pcluster */ + if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL, + *owned_head) != Z_EROFS_PCLUSTER_NIL) + goto retry; + + *owned_head = &pcl->next; + /* lucky, I am the followee :) */ + return COLLECT_PRIMARY_FOLLOWED; + } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) { + /* + * type 2, link to the end of a existing open chain, + * be careful that its submission itself is governed + * by the original owned chain. + */ + if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, + *owned_head) != Z_EROFS_PCLUSTER_TAIL) + goto retry; + *owned_head = Z_EROFS_PCLUSTER_TAIL; + return COLLECT_PRIMARY_HOOKED; + } + return COLLECT_PRIMARY; /* :( better luck next time */ +} + +static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt, + struct inode *inode, + struct erofs_map_blocks *map) +{ + struct erofs_workgroup *grp; + struct z_erofs_pcluster *pcl; + struct z_erofs_collection *cl; + unsigned int length; + bool tag; + + grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag); + if (!grp) + return NULL; + + pcl = container_of(grp, struct z_erofs_pcluster, obj); + if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) { + DBG_BUGON(1); + erofs_workgroup_put(grp); + return ERR_PTR(-EFSCORRUPTED); + } + + cl = z_erofs_primarycollection(pcl); + if (unlikely(cl->pageofs != (map->m_la & ~PAGE_MASK))) { + DBG_BUGON(1); + erofs_workgroup_put(grp); + return ERR_PTR(-EFSCORRUPTED); + } + + length = READ_ONCE(pcl->length); + if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) { + if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) { + DBG_BUGON(1); + erofs_workgroup_put(grp); + return ERR_PTR(-EFSCORRUPTED); + } + } else { + unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT; + + if (map->m_flags & EROFS_MAP_FULL_MAPPED) + llen |= Z_EROFS_PCLUSTER_FULL_LENGTH; + + while (llen > length && + length != cmpxchg_relaxed(&pcl->length, length, llen)) { + cpu_relax(); + length = READ_ONCE(pcl->length); + } + } + mutex_lock(&cl->lock); + /* used to check tail merging loop due to corrupted images */ + if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) + clt->tailpcl = pcl; + clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head); + /* clean tailpcl if the current owned_head is Z_EROFS_PCLUSTER_TAIL */ + if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) + clt->tailpcl = NULL; + clt->pcl = pcl; + clt->cl = cl; + return cl; +} + +static struct z_erofs_collection *clregister(struct z_erofs_collector *clt, + struct inode *inode, + struct erofs_map_blocks *map) +{ + struct z_erofs_pcluster *pcl; + struct z_erofs_collection *cl; + int err; + + /* no available workgroup, let's allocate one */ + pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS); + if (unlikely(!pcl)) + return ERR_PTR(-ENOMEM); + + init_always(pcl); + pcl->obj.index = map->m_pa >> PAGE_SHIFT; + + pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) | + (map->m_flags & EROFS_MAP_FULL_MAPPED ? + Z_EROFS_PCLUSTER_FULL_LENGTH : 0); + + if (map->m_flags & EROFS_MAP_ZIPPED) + pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4; + else + pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED; + + pcl->clusterbits = EROFS_V(inode)->z_physical_clusterbits[0]; + pcl->clusterbits -= PAGE_SHIFT; + + /* new pclusters should be claimed as type 1, primary and followed */ + pcl->next = clt->owned_head; + clt->mode = COLLECT_PRIMARY_FOLLOWED; + + cl = z_erofs_primarycollection(pcl); + cl->pageofs = map->m_la & ~PAGE_MASK; + + /* + * lock all primary followed works before visible to others + * and mutex_trylock *never* fails for a new pcluster. + */ + mutex_trylock(&cl->lock); + + err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0); + if (err) { + mutex_unlock(&cl->lock); + kmem_cache_free(pcluster_cachep, pcl); + return ERR_PTR(-EAGAIN); + } + /* used to check tail merging loop due to corrupted images */ + if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL) + clt->tailpcl = pcl; + clt->owned_head = &pcl->next; + clt->pcl = pcl; + clt->cl = cl; + return cl; +} + +static int z_erofs_collector_begin(struct z_erofs_collector *clt, + struct inode *inode, + struct erofs_map_blocks *map) +{ + struct z_erofs_collection *cl; + + DBG_BUGON(clt->cl); + + /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */ + DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL); + DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + + if (!PAGE_ALIGNED(map->m_pa)) { + DBG_BUGON(1); + return -EINVAL; + } + +repeat: + cl = cllookup(clt, inode, map); + if (!cl) { + cl = clregister(clt, inode, map); + + if (unlikely(cl == ERR_PTR(-EAGAIN))) + goto repeat; + } + + if (IS_ERR(cl)) + return PTR_ERR(cl); + + z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS, + cl->pagevec, cl->vcnt); + + clt->compressedpages = clt->pcl->compressed_pages; + if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */ + clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES; + return 0; +} + +/* + * keep in mind that no referenced pclusters will be freed + * only after a RCU grace period. + */ +static void z_erofs_rcu_callback(struct rcu_head *head) +{ + struct z_erofs_collection *const cl = + container_of(head, struct z_erofs_collection, rcu); + + kmem_cache_free(pcluster_cachep, + container_of(cl, struct z_erofs_pcluster, + primary_collection)); +} + +void erofs_workgroup_free_rcu(struct erofs_workgroup *grp) +{ + struct z_erofs_pcluster *const pcl = + container_of(grp, struct z_erofs_pcluster, obj); + struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl); + + call_rcu(&cl->rcu, z_erofs_rcu_callback); +} + +static void z_erofs_collection_put(struct z_erofs_collection *cl) +{ + struct z_erofs_pcluster *const pcl = + container_of(cl, struct z_erofs_pcluster, primary_collection); + + erofs_workgroup_put(&pcl->obj); +} + +static bool z_erofs_collector_end(struct z_erofs_collector *clt) +{ + struct z_erofs_collection *cl = clt->cl; + + if (!cl) + return false; + + z_erofs_pagevec_ctor_exit(&clt->vector, false); + mutex_unlock(&cl->lock); + + /* + * if all pending pages are added, don't hold its reference + * any longer if the pcluster isn't hosted by ourselves. + */ + if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE) + z_erofs_collection_put(cl); + + clt->cl = NULL; + return true; +} + +static inline struct page *__stagingpage_alloc(struct list_head *pagepool, + gfp_t gfp) +{ + struct page *page = erofs_allocpage(pagepool, gfp, true); + + page->mapping = Z_EROFS_MAPPING_STAGING; + return page; +} + +static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, + unsigned int cachestrategy, + erofs_off_t la) +{ + if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED) + return false; + + if (fe->backmost) + return true; + + return cachestrategy >= EROFS_ZIP_CACHE_READAROUND && + la < fe->headoffset; +} + +static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, + struct page *page, + struct list_head *pagepool) +{ + struct inode *const inode = fe->inode; + struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode); + struct erofs_map_blocks *const map = &fe->map; + struct z_erofs_collector *const clt = &fe->clt; + const loff_t offset = page_offset(page); + bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED); + + enum z_erofs_cache_alloctype cache_strategy; + enum z_erofs_page_type page_type; + unsigned int cur, end, spiltted, index; + int err = 0; + + /* register locked file pages as online pages in pack */ + z_erofs_onlinepage_init(page); + + spiltted = 0; + end = PAGE_SIZE; +repeat: + cur = end - 1; + + /* lucky, within the range of the current map_blocks */ + if (offset + cur >= map->m_la && + offset + cur < map->m_la + map->m_llen) { + /* didn't get a valid collection previously (very rare) */ + if (!clt->cl) + goto restart_now; + goto hitted; + } + + /* go ahead the next map_blocks */ + debugln("%s: [out-of-range] pos %llu", __func__, offset + cur); + + if (z_erofs_collector_end(clt)) + fe->backmost = false; + + map->m_la = offset + cur; + map->m_llen = 0; + err = z_erofs_map_blocks_iter(inode, map, 0); + if (unlikely(err)) + goto err_out; + +restart_now: + if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) + goto hitted; + + err = z_erofs_collector_begin(clt, inode, map); + if (unlikely(err)) + goto err_out; + + /* preload all compressed pages (maybe downgrade role if necessary) */ + if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la)) + cache_strategy = DELAYEDALLOC; + else + cache_strategy = DONTALLOC; + + preload_compressed_pages(clt, MNGD_MAPPING(sbi), + cache_strategy, pagepool); + + tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED); +hitted: + cur = end - min_t(unsigned int, offset + end - map->m_la, end); + if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) { + zero_user_segment(page, cur, end); + goto next_part; + } + + /* let's derive page type */ + page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD : + (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : + (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE : + Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED)); + + if (cur) + tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED); + +retry: + err = z_erofs_attach_page(clt, page, page_type); + /* should allocate an additional staging page for pagevec */ + if (err == -EAGAIN) { + struct page *const newpage = + __stagingpage_alloc(pagepool, GFP_NOFS); + + err = z_erofs_attach_page(clt, newpage, + Z_EROFS_PAGE_TYPE_EXCLUSIVE); + if (likely(!err)) + goto retry; + } + + if (unlikely(err)) + goto err_out; + + index = page->index - (map->m_la >> PAGE_SHIFT); + + z_erofs_onlinepage_fixup(page, index, true); + + /* bump up the number of spiltted parts of a page */ + ++spiltted; + /* also update nr_pages */ + clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1); +next_part: + /* can be used for verification */ + map->m_llen = offset + cur - map->m_la; + + end = cur; + if (end > 0) + goto repeat; + +out: + z_erofs_onlinepage_endio(page); + + debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu", + __func__, page, spiltted, map->m_llen); + return err; + + /* if some error occurred while processing this page */ +err_out: + SetPageError(page); + goto out; +} + +static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) +{ + tagptr1_t t = tagptr_init(tagptr1_t, ptr); + struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t); + bool background = tagptr_unfold_tags(t); + + if (!background) { + unsigned long flags; + + spin_lock_irqsave(&io->u.wait.lock, flags); + if (!atomic_add_return(bios, &io->pending_bios)) + wake_up_locked(&io->u.wait); + spin_unlock_irqrestore(&io->u.wait.lock, flags); + return; + } + + if (!atomic_add_return(bios, &io->pending_bios)) + queue_work(z_erofs_workqueue, &io->u.work); +} + +static inline void z_erofs_vle_read_endio(struct bio *bio) +{ + struct erofs_sb_info *sbi = NULL; + blk_status_t err = bio->bi_status; + struct bio_vec *bvec; + struct bvec_iter_all iter_all; + + bio_for_each_segment_all(bvec, bio, iter_all) { + struct page *page = bvec->bv_page; + bool cachemngd = false; + + DBG_BUGON(PageUptodate(page)); + DBG_BUGON(!page->mapping); + + if (unlikely(!sbi && !z_erofs_page_is_staging(page))) { + sbi = EROFS_SB(page->mapping->host->i_sb); + + if (time_to_inject(sbi, FAULT_READ_IO)) { + erofs_show_injection_info(FAULT_READ_IO); + err = BLK_STS_IOERR; + } + } + + /* sbi should already be gotten if the page is managed */ + if (sbi) + cachemngd = erofs_page_is_managed(sbi, page); + + if (unlikely(err)) + SetPageError(page); + else if (cachemngd) + SetPageUptodate(page); + + if (cachemngd) + unlock_page(page); + } + + z_erofs_vle_unzip_kickoff(bio->bi_private, -1); + bio_put(bio); +} + +static int z_erofs_decompress_pcluster(struct super_block *sb, + struct z_erofs_pcluster *pcl, + struct list_head *pagepool) +{ + struct erofs_sb_info *const sbi = EROFS_SB(sb); + const unsigned int clusterpages = BIT(pcl->clusterbits); + struct z_erofs_pagevec_ctor ctor; + unsigned int i, outputsize, llen, nr_pages; + struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES]; + struct page **pages, **compressed_pages, *page; + + enum z_erofs_page_type page_type; + bool overlapped, partial; + struct z_erofs_collection *cl; + int err; + + might_sleep(); + cl = z_erofs_primarycollection(pcl); + DBG_BUGON(!READ_ONCE(cl->nr_pages)); + + mutex_lock(&cl->lock); + nr_pages = cl->nr_pages; + + if (likely(nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES)) { + pages = pages_onstack; + } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES && + mutex_trylock(&z_pagemap_global_lock)) { + pages = z_pagemap_global; + } else { + gfp_t gfp_flags = GFP_KERNEL; + + if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES) + gfp_flags |= __GFP_NOFAIL; + + pages = kvmalloc_array(nr_pages, sizeof(struct page *), + gfp_flags); + + /* fallback to global pagemap for the lowmem scenario */ + if (unlikely(!pages)) { + mutex_lock(&z_pagemap_global_lock); + pages = z_pagemap_global; + } + } + + for (i = 0; i < nr_pages; ++i) + pages[i] = NULL; + + err = 0; + z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS, + cl->pagevec, 0); + + for (i = 0; i < cl->vcnt; ++i) { + unsigned int pagenr; + + page = z_erofs_pagevec_dequeue(&ctor, &page_type); + + /* all pages in pagevec ought to be valid */ + DBG_BUGON(!page); + DBG_BUGON(!page->mapping); + + if (z_erofs_put_stagingpage(pagepool, page)) + continue; + + if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD) + pagenr = 0; + else + pagenr = z_erofs_onlinepage_index(page); + + DBG_BUGON(pagenr >= nr_pages); + + /* + * currently EROFS doesn't support multiref(dedup), + * so here erroring out one multiref page. + */ + if (unlikely(pages[pagenr])) { + DBG_BUGON(1); + SetPageError(pages[pagenr]); + z_erofs_onlinepage_endio(pages[pagenr]); + err = -EFSCORRUPTED; + } + pages[pagenr] = page; + } + z_erofs_pagevec_ctor_exit(&ctor, true); + + overlapped = false; + compressed_pages = pcl->compressed_pages; + + for (i = 0; i < clusterpages; ++i) { + unsigned int pagenr; + + page = compressed_pages[i]; + + /* all compressed pages ought to be valid */ + DBG_BUGON(!page); + DBG_BUGON(!page->mapping); + + if (!z_erofs_page_is_staging(page)) { + if (erofs_page_is_managed(sbi, page)) { + if (unlikely(!PageUptodate(page))) + err = -EIO; + continue; + } + + /* + * only if non-head page can be selected + * for inplace decompression + */ + pagenr = z_erofs_onlinepage_index(page); + + DBG_BUGON(pagenr >= nr_pages); + if (unlikely(pages[pagenr])) { + DBG_BUGON(1); + SetPageError(pages[pagenr]); + z_erofs_onlinepage_endio(pages[pagenr]); + err = -EFSCORRUPTED; + } + pages[pagenr] = page; + + overlapped = true; + } + + /* PG_error needs checking for inplaced and staging pages */ + if (unlikely(PageError(page))) { + DBG_BUGON(PageUptodate(page)); + err = -EIO; + } + } + + if (unlikely(err)) + goto out; + + llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT; + if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) { + outputsize = llen; + partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH); + } else { + outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs; + partial = true; + } + + err = z_erofs_decompress(&(struct z_erofs_decompress_req) { + .sb = sb, + .in = compressed_pages, + .out = pages, + .pageofs_out = cl->pageofs, + .inputsize = PAGE_SIZE, + .outputsize = outputsize, + .alg = pcl->algorithmformat, + .inplace_io = overlapped, + .partial_decoding = partial + }, pagepool); + +out: + /* must handle all compressed pages before endding pages */ + for (i = 0; i < clusterpages; ++i) { + page = compressed_pages[i]; + + if (erofs_page_is_managed(sbi, page)) + continue; + + /* recycle all individual staging pages */ + (void)z_erofs_put_stagingpage(pagepool, page); + + WRITE_ONCE(compressed_pages[i], NULL); + } + + for (i = 0; i < nr_pages; ++i) { + page = pages[i]; + if (!page) + continue; + + DBG_BUGON(!page->mapping); + + /* recycle all individual staging pages */ + if (z_erofs_put_stagingpage(pagepool, page)) + continue; + + if (unlikely(err < 0)) + SetPageError(page); + + z_erofs_onlinepage_endio(page); + } + + if (pages == z_pagemap_global) + mutex_unlock(&z_pagemap_global_lock); + else if (unlikely(pages != pages_onstack)) + kvfree(pages); + + cl->nr_pages = 0; + cl->vcnt = 0; + + /* all cl locks MUST be taken before the following line */ + WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL); + + /* all cl locks SHOULD be released right now */ + mutex_unlock(&cl->lock); + + z_erofs_collection_put(cl); + return err; +} + +static void z_erofs_vle_unzip_all(struct super_block *sb, + struct z_erofs_unzip_io *io, + struct list_head *pagepool) +{ + z_erofs_next_pcluster_t owned = io->head; + + while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { + struct z_erofs_pcluster *pcl; + + /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */ + DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); + + /* no possible that 'owned' equals NULL */ + DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); + + pcl = container_of(owned, struct z_erofs_pcluster, next); + owned = READ_ONCE(pcl->next); + + z_erofs_decompress_pcluster(sb, pcl, pagepool); + } +} + +static void z_erofs_vle_unzip_wq(struct work_struct *work) +{ + struct z_erofs_unzip_io_sb *iosb = + container_of(work, struct z_erofs_unzip_io_sb, io.u.work); + LIST_HEAD(pagepool); + + DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool); + + put_pages_list(&pagepool); + kvfree(iosb); +} + +static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, + unsigned int nr, + struct list_head *pagepool, + struct address_space *mc, + gfp_t gfp) +{ + /* determined at compile time to avoid too many #ifdefs */ + const bool nocache = __builtin_constant_p(mc) ? !mc : false; + const pgoff_t index = pcl->obj.index; + bool tocache = false; + + struct address_space *mapping; + struct page *oldpage, *page; + + compressed_page_t t; + int justfound; + +repeat: + page = READ_ONCE(pcl->compressed_pages[nr]); + oldpage = page; + + if (!page) + goto out_allocpage; + + /* + * the cached page has not been allocated and + * an placeholder is out there, prepare it now. + */ + if (!nocache && page == PAGE_UNALLOCATED) { + tocache = true; + goto out_allocpage; + } + + /* process the target tagged pointer */ + t = tagptr_init(compressed_page_t, page); + justfound = tagptr_unfold_tags(t); + page = tagptr_unfold_ptr(t); + + mapping = READ_ONCE(page->mapping); + + /* + * if managed cache is disabled, it's no way to + * get such a cached-like page. + */ + if (nocache) { + /* if managed cache is disabled, it is impossible `justfound' */ + DBG_BUGON(justfound); + + /* and it should be locked, not uptodate, and not truncated */ + DBG_BUGON(!PageLocked(page)); + DBG_BUGON(PageUptodate(page)); + DBG_BUGON(!mapping); + goto out; + } + + /* + * unmanaged (file) pages are all locked solidly, + * therefore it is impossible for `mapping' to be NULL. + */ + if (mapping && mapping != mc) + /* ought to be unmanaged pages */ + goto out; + + lock_page(page); + + /* only true if page reclaim goes wrong, should never happen */ + DBG_BUGON(justfound && PagePrivate(page)); + + /* the page is still in manage cache */ + if (page->mapping == mc) { + WRITE_ONCE(pcl->compressed_pages[nr], page); + + ClearPageError(page); + if (!PagePrivate(page)) { + /* + * impossible to be !PagePrivate(page) for + * the current restriction as well if + * the page is already in compressed_pages[]. + */ + DBG_BUGON(!justfound); + + justfound = 0; + set_page_private(page, (unsigned long)pcl); + SetPagePrivate(page); + } + + /* no need to submit io if it is already up-to-date */ + if (PageUptodate(page)) { + unlock_page(page); + page = NULL; + } + goto out; + } + + /* + * the managed page has been truncated, it's unsafe to + * reuse this one, let's allocate a new cache-managed page. + */ + DBG_BUGON(page->mapping); + DBG_BUGON(!justfound); + + tocache = true; + unlock_page(page); + put_page(page); +out_allocpage: + page = __stagingpage_alloc(pagepool, gfp); + if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { + list_add(&page->lru, pagepool); + cpu_relax(); + goto repeat; + } + if (nocache || !tocache) + goto out; + if (add_to_page_cache_lru(page, mc, index + nr, gfp)) { + page->mapping = Z_EROFS_MAPPING_STAGING; + goto out; + } + + set_page_private(page, (unsigned long)pcl); + SetPagePrivate(page); +out: /* the only exit (for tracing and debugging) */ + return page; +} + +static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb, + struct z_erofs_unzip_io *io, + bool foreground) +{ + struct z_erofs_unzip_io_sb *iosb; + + if (foreground) { + /* waitqueue available for foreground io */ + DBG_BUGON(!io); + + init_waitqueue_head(&io->u.wait); + atomic_set(&io->pending_bios, 0); + goto out; + } + + iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL); + DBG_BUGON(!iosb); + + /* initialize fields in the allocated descriptor */ + io = &iosb->io; + iosb->sb = sb; + INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq); +out: + io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; + return io; +} + +/* define decompression jobqueue types */ +enum { + JQ_BYPASS, + JQ_SUBMIT, + NR_JOBQUEUES, +}; + +static void *jobqueueset_init(struct super_block *sb, + z_erofs_next_pcluster_t qtail[], + struct z_erofs_unzip_io *q[], + struct z_erofs_unzip_io *fgq, + bool forcefg) +{ + /* + * if managed cache is enabled, bypass jobqueue is needed, + * no need to read from device for all pclusters in this queue. + */ + q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true); + qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; + + q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg); + qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; + + return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg)); +} + +static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, + z_erofs_next_pcluster_t qtail[], + z_erofs_next_pcluster_t owned_head) +{ + z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; + z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; + + DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + if (owned_head == Z_EROFS_PCLUSTER_TAIL) + owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED; + + WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); + + WRITE_ONCE(*submit_qtail, owned_head); + WRITE_ONCE(*bypass_qtail, &pcl->next); + + qtail[JQ_BYPASS] = &pcl->next; +} + +static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[], + unsigned int nr_bios, + bool force_fg) +{ + /* + * although background is preferred, no one is pending for submission. + * don't issue workqueue for decompression but drop it directly instead. + */ + if (force_fg || nr_bios) + return false; + + kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io)); + return true; +} + +static bool z_erofs_vle_submit_all(struct super_block *sb, + z_erofs_next_pcluster_t owned_head, + struct list_head *pagepool, + struct z_erofs_unzip_io *fgq, + bool force_fg) +{ + struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb); + z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; + struct z_erofs_unzip_io *q[NR_JOBQUEUES]; + struct bio *bio; + void *bi_private; + /* since bio will be NULL, no need to initialize last_index */ + pgoff_t uninitialized_var(last_index); + bool force_submit = false; + unsigned int nr_bios; + + if (unlikely(owned_head == Z_EROFS_PCLUSTER_TAIL)) + return false; + + force_submit = false; + bio = NULL; + nr_bios = 0; + bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg); + + /* by default, all need io submission */ + q[JQ_SUBMIT]->head = owned_head; + + do { + struct z_erofs_pcluster *pcl; + unsigned int clusterpages; + pgoff_t first_index; + struct page *page; + unsigned int i = 0, bypass = 0; + int err; + + /* no possible 'owned_head' equals the following */ + DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); + + pcl = container_of(owned_head, struct z_erofs_pcluster, next); + + clusterpages = BIT(pcl->clusterbits); + + /* close the main owned chain at first */ + owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, + Z_EROFS_PCLUSTER_TAIL_CLOSED); + + first_index = pcl->obj.index; + force_submit |= (first_index != last_index + 1); + +repeat: + page = pickup_page_for_submission(pcl, i, pagepool, + MNGD_MAPPING(sbi), + GFP_NOFS); + if (!page) { + force_submit = true; + ++bypass; + goto skippage; + } + + if (bio && force_submit) { +submit_bio_retry: + __submit_bio(bio, REQ_OP_READ, 0); + bio = NULL; + } + + if (!bio) { + bio = erofs_grab_bio(sb, first_index + i, + BIO_MAX_PAGES, bi_private, + z_erofs_vle_read_endio, true); + ++nr_bios; + } + + err = bio_add_page(bio, page, PAGE_SIZE, 0); + if (err < PAGE_SIZE) + goto submit_bio_retry; + + force_submit = false; + last_index = first_index + i; +skippage: + if (++i < clusterpages) + goto repeat; + + if (bypass < clusterpages) + qtail[JQ_SUBMIT] = &pcl->next; + else + move_to_bypass_jobqueue(pcl, qtail, owned_head); + } while (owned_head != Z_EROFS_PCLUSTER_TAIL); + + if (bio) + __submit_bio(bio, REQ_OP_READ, 0); + + if (postsubmit_is_all_bypassed(q, nr_bios, force_fg)) + return true; + + z_erofs_vle_unzip_kickoff(bi_private, nr_bios); + return true; +} + +static void z_erofs_submit_and_unzip(struct super_block *sb, + struct z_erofs_collector *clt, + struct list_head *pagepool, + bool force_fg) +{ + struct z_erofs_unzip_io io[NR_JOBQUEUES]; + + if (!z_erofs_vle_submit_all(sb, clt->owned_head, + pagepool, io, force_fg)) + return; + + /* decompress no I/O pclusters immediately */ + z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool); + + if (!force_fg) + return; + + /* wait until all bios are completed */ + wait_event(io[JQ_SUBMIT].u.wait, + !atomic_read(&io[JQ_SUBMIT].pending_bios)); + + /* let's synchronous decompression */ + z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool); +} + +static int z_erofs_vle_normalaccess_readpage(struct file *file, + struct page *page) +{ + struct inode *const inode = page->mapping->host; + struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); + int err; + LIST_HEAD(pagepool); + + trace_erofs_readpage(page, false); + + f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT; + + err = z_erofs_do_read_page(&f, page, &pagepool); + (void)z_erofs_collector_end(&f.clt); + + /* if some compressed cluster ready, need submit them anyway */ + z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true); + + if (err) + errln("%s, failed to read, err [%d]", __func__, err); + + if (f.map.mpage) + put_page(f.map.mpage); + + /* clean up the remaining free pages */ + put_pages_list(&pagepool); + return err; +} + +static bool should_decompress_synchronously(struct erofs_sb_info *sbi, + unsigned int nr) +{ + return nr <= sbi->max_sync_decompress_pages; +} + +static int z_erofs_vle_normalaccess_readpages(struct file *filp, + struct address_space *mapping, + struct list_head *pages, + unsigned int nr_pages) +{ + struct inode *const inode = mapping->host; + struct erofs_sb_info *const sbi = EROFS_I_SB(inode); + + bool sync = should_decompress_synchronously(sbi, nr_pages); + struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode); + gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); + struct page *head = NULL; + LIST_HEAD(pagepool); + + trace_erofs_readpages(mapping->host, lru_to_page(pages), + nr_pages, false); + + f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT; + + for (; nr_pages; --nr_pages) { + struct page *page = lru_to_page(pages); + + prefetchw(&page->flags); + list_del(&page->lru); + + /* + * A pure asynchronous readahead is indicated if + * a PG_readahead marked page is hitted at first. + * Let's also do asynchronous decompression for this case. + */ + sync &= !(PageReadahead(page) && !head); + + if (add_to_page_cache_lru(page, mapping, page->index, gfp)) { + list_add(&page->lru, &pagepool); + continue; + } + + set_page_private(page, (unsigned long)head); + head = page; + } + + while (head) { + struct page *page = head; + int err; + + /* traversal in reverse order */ + head = (void *)page_private(page); + + err = z_erofs_do_read_page(&f, page, &pagepool); + if (err) { + struct erofs_vnode *vi = EROFS_V(inode); + + errln("%s, readahead error at page %lu of nid %llu", + __func__, page->index, vi->nid); + } + put_page(page); + } + + (void)z_erofs_collector_end(&f.clt); + + z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync); + + if (f.map.mpage) + put_page(f.map.mpage); + + /* clean up the remaining free pages */ + put_pages_list(&pagepool); + return 0; +} + +const struct address_space_operations z_erofs_vle_normalaccess_aops = { + .readpage = z_erofs_vle_normalaccess_readpage, + .readpages = z_erofs_vle_normalaccess_readpages, +}; + diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h new file mode 100644 index 000000000000..4fc547bc01f9 --- /dev/null +++ b/fs/erofs/zdata.h @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_FS_ZDATA_H +#define __EROFS_FS_ZDATA_H + +#include "internal.h" +#include "zpvec.h" + +#define Z_EROFS_NR_INLINE_PAGEVECS 3 + +/* + * Structure fields follow one of the following exclusion rules. + * + * I: Modifiable by initialization/destruction paths and read-only + * for everyone else; + * + * L: Field should be protected by pageset lock; + * + * A: Field should be accessed / updated in atomic for parallelized code. + */ +struct z_erofs_collection { + struct mutex lock; + + /* I: page offset of start position of decompression */ + unsigned short pageofs; + + /* L: maximum relative page index in pagevec[] */ + unsigned short nr_pages; + + /* L: total number of pages in pagevec[] */ + unsigned int vcnt; + + union { + /* L: inline a certain number of pagevecs for bootstrap */ + erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS]; + + /* I: can be used to free the pcluster by RCU. */ + struct rcu_head rcu; + }; +}; + +#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001 +#define Z_EROFS_PCLUSTER_LENGTH_BIT 1 + +/* + * let's leave a type here in case of introducing + * another tagged pointer later. + */ +typedef void *z_erofs_next_pcluster_t; + +struct z_erofs_pcluster { + struct erofs_workgroup obj; + struct z_erofs_collection primary_collection; + + /* A: point to next chained pcluster or TAILs */ + z_erofs_next_pcluster_t next; + + /* A: compressed pages (including multi-usage pages) */ + struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES]; + + /* A: lower limit of decompressed length and if full length or not */ + unsigned int length; + + /* I: compression algorithm format */ + unsigned char algorithmformat; + /* I: bit shift of physical cluster size */ + unsigned char clusterbits; +}; + +#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection) + +/* let's avoid the valid 32-bit kernel addresses */ + +/* the chained workgroup has't submitted io (still open) */ +#define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE) +/* the chained workgroup has already submitted io */ +#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD) + +#define Z_EROFS_PCLUSTER_NIL (NULL) + +#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster) + +struct z_erofs_unzip_io { + atomic_t pending_bios; + z_erofs_next_pcluster_t head; + + union { + wait_queue_head_t wait; + struct work_struct work; + } u; +}; + +struct z_erofs_unzip_io_sb { + struct z_erofs_unzip_io io; + struct super_block *sb; +}; + +#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping) +static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, + struct page *page) +{ + return page->mapping == MNGD_MAPPING(sbi); +} + +#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2 +#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1) +#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS) + +/* + * waiters (aka. ongoing_packs): # to unlock the page + * sub-index: 0 - for partial page, >= 1 full page sub-index + */ +typedef atomic_t z_erofs_onlinepage_t; + +/* type punning */ +union z_erofs_onlinepage_converter { + z_erofs_onlinepage_t *o; + unsigned long *v; +}; + +static inline unsigned int z_erofs_onlinepage_index(struct page *page) +{ + union z_erofs_onlinepage_converter u; + + DBG_BUGON(!PagePrivate(page)); + u.v = &page_private(page); + + return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; +} + +static inline void z_erofs_onlinepage_init(struct page *page) +{ + union { + z_erofs_onlinepage_t o; + unsigned long v; + /* keep from being unlocked in advance */ + } u = { .o = ATOMIC_INIT(1) }; + + set_page_private(page, u.v); + smp_wmb(); + SetPagePrivate(page); +} + +static inline void z_erofs_onlinepage_fixup(struct page *page, + uintptr_t index, bool down) +{ + unsigned long *p, o, v, id; +repeat: + p = &page_private(page); + o = READ_ONCE(*p); + + id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT; + if (id) { + if (!index) + return; + + DBG_BUGON(id != index); + } + + v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) | + ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down); + if (cmpxchg(p, o, v) != o) + goto repeat; +} + +static inline void z_erofs_onlinepage_endio(struct page *page) +{ + union z_erofs_onlinepage_converter u; + unsigned int v; + + DBG_BUGON(!PagePrivate(page)); + u.v = &page_private(page); + + v = atomic_dec_return(u.o); + if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) { + ClearPagePrivate(page); + if (!PageError(page)) + SetPageUptodate(page); + unlock_page(page); + } + debugln("%s, page %p value %x", __func__, page, atomic_read(u.o)); +} + +#define Z_EROFS_VMAP_ONSTACK_PAGES \ + min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U) +#define Z_EROFS_VMAP_GLOBAL_PAGES 2048 + +#endif + diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c new file mode 100644 index 000000000000..4dc9cec01297 --- /dev/null +++ b/fs/erofs/zmap.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2018-2019 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#include "internal.h" +#include +#include + +int z_erofs_fill_inode(struct inode *inode) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + + if (vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) { + vi->z_advise = 0; + vi->z_algorithmtype[0] = 0; + vi->z_algorithmtype[1] = 0; + vi->z_logical_clusterbits = LOG_BLOCK_SIZE; + vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits; + vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits; + set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); + } + + inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops; + return 0; +} + +static int fill_inode_lazy(struct inode *inode) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct super_block *const sb = inode->i_sb; + int err; + erofs_off_t pos; + struct page *page; + void *kaddr; + struct z_erofs_map_header *h; + + if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) + return 0; + + if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_Z_BIT, TASK_KILLABLE)) + return -ERESTARTSYS; + + err = 0; + if (test_bit(EROFS_V_Z_INITED_BIT, &vi->flags)) + goto out_unlock; + + DBG_BUGON(vi->datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY); + + pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize + + vi->xattr_isize, 8); + page = erofs_get_meta_page(sb, erofs_blknr(pos), false); + if (IS_ERR(page)) { + err = PTR_ERR(page); + goto out_unlock; + } + + kaddr = kmap_atomic(page); + + h = kaddr + erofs_blkoff(pos); + vi->z_advise = le16_to_cpu(h->h_advise); + vi->z_algorithmtype[0] = h->h_algorithmtype & 15; + vi->z_algorithmtype[1] = h->h_algorithmtype >> 4; + + if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) { + errln("unknown compression format %u for nid %llu, please upgrade kernel", + vi->z_algorithmtype[0], vi->nid); + err = -EOPNOTSUPP; + goto unmap_done; + } + + vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7); + vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits + + ((h->h_clusterbits >> 3) & 3); + + if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) { + errln("unsupported physical clusterbits %u for nid %llu, please upgrade kernel", + vi->z_physical_clusterbits[0], vi->nid); + err = -EOPNOTSUPP; + goto unmap_done; + } + + vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits + + ((h->h_clusterbits >> 5) & 7); + set_bit(EROFS_V_Z_INITED_BIT, &vi->flags); +unmap_done: + kunmap_atomic(kaddr); + unlock_page(page); + put_page(page); +out_unlock: + clear_and_wake_up_bit(EROFS_V_BL_Z_BIT, &vi->flags); + return err; +} + +struct z_erofs_maprecorder { + struct inode *inode; + struct erofs_map_blocks *map; + void *kaddr; + + unsigned long lcn; + /* compression extent information gathered */ + u8 type; + u16 clusterofs; + u16 delta[2]; + erofs_blk_t pblk; +}; + +static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m, + erofs_blk_t eblk) +{ + struct super_block *const sb = m->inode->i_sb; + struct erofs_map_blocks *const map = m->map; + struct page *mpage = map->mpage; + + if (mpage) { + if (mpage->index == eblk) { + if (!m->kaddr) + m->kaddr = kmap_atomic(mpage); + return 0; + } + + if (m->kaddr) { + kunmap_atomic(m->kaddr); + m->kaddr = NULL; + } + put_page(mpage); + } + + mpage = erofs_get_meta_page(sb, eblk, false); + if (IS_ERR(mpage)) { + map->mpage = NULL; + return PTR_ERR(mpage); + } + m->kaddr = kmap_atomic(mpage); + unlock_page(mpage); + map->mpage = mpage; + return 0; +} + +static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, + unsigned long lcn) +{ + struct inode *const inode = m->inode; + struct erofs_vnode *const vi = EROFS_V(inode); + const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid); + const erofs_off_t pos = + Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize + + vi->xattr_isize) + + lcn * sizeof(struct z_erofs_vle_decompressed_index); + struct z_erofs_vle_decompressed_index *di; + unsigned int advise, type; + int err; + + err = z_erofs_reload_indexes(m, erofs_blknr(pos)); + if (err) + return err; + + m->lcn = lcn; + di = m->kaddr + erofs_blkoff(pos); + + advise = le16_to_cpu(di->di_advise); + type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) & + ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1); + switch (type) { + case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + m->clusterofs = 1 << vi->z_logical_clusterbits; + m->delta[0] = le16_to_cpu(di->di_u.delta[0]); + m->delta[1] = le16_to_cpu(di->di_u.delta[1]); + break; + case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: + case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: + m->clusterofs = le16_to_cpu(di->di_clusterofs); + m->pblk = le32_to_cpu(di->di_u.blkaddr); + break; + default: + DBG_BUGON(1); + return -EOPNOTSUPP; + } + m->type = type; + return 0; +} + +static unsigned int decode_compactedbits(unsigned int lobits, + unsigned int lomask, + u8 *in, unsigned int pos, u8 *type) +{ + const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7); + const unsigned int lo = v & lomask; + + *type = (v >> lobits) & 3; + return lo; +} + +static int unpack_compacted_index(struct z_erofs_maprecorder *m, + unsigned int amortizedshift, + unsigned int eofs) +{ + struct erofs_vnode *const vi = EROFS_V(m->inode); + const unsigned int lclusterbits = vi->z_logical_clusterbits; + const unsigned int lomask = (1 << lclusterbits) - 1; + unsigned int vcnt, base, lo, encodebits, nblk; + int i; + u8 *in, type; + + if (1 << amortizedshift == 4) + vcnt = 2; + else if (1 << amortizedshift == 2 && lclusterbits == 12) + vcnt = 16; + else + return -EOPNOTSUPP; + + encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; + base = round_down(eofs, vcnt << amortizedshift); + in = m->kaddr + base; + + i = (eofs - base) >> amortizedshift; + + lo = decode_compactedbits(lclusterbits, lomask, + in, encodebits * i, &type); + m->type = type; + if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) { + m->clusterofs = 1 << lclusterbits; + if (i + 1 != vcnt) { + m->delta[0] = lo; + return 0; + } + /* + * since the last lcluster in the pack is special, + * of which lo saves delta[1] rather than delta[0]. + * Hence, get delta[0] by the previous lcluster indirectly. + */ + lo = decode_compactedbits(lclusterbits, lomask, + in, encodebits * (i - 1), &type); + if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) + lo = 0; + m->delta[0] = lo + 1; + return 0; + } + m->clusterofs = lo; + m->delta[0] = 0; + /* figout out blkaddr (pblk) for HEAD lclusters */ + nblk = 1; + while (i > 0) { + --i; + lo = decode_compactedbits(lclusterbits, lomask, + in, encodebits * i, &type); + if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) + i -= lo; + + if (i >= 0) + ++nblk; + } + in += (vcnt << amortizedshift) - sizeof(__le32); + m->pblk = le32_to_cpu(*(__le32 *)in) + nblk; + return 0; +} + +static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, + unsigned long lcn) +{ + struct inode *const inode = m->inode; + struct erofs_vnode *const vi = EROFS_V(inode); + const unsigned int lclusterbits = vi->z_logical_clusterbits; + const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) + + vi->inode_isize + vi->xattr_isize, 8) + + sizeof(struct z_erofs_map_header); + const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ); + unsigned int compacted_4b_initial, compacted_2b; + unsigned int amortizedshift; + erofs_off_t pos; + int err; + + if (lclusterbits != 12) + return -EOPNOTSUPP; + + if (lcn >= totalidx) + return -EINVAL; + + m->lcn = lcn; + /* used to align to 32-byte (compacted_2b) alignment */ + compacted_4b_initial = (32 - ebase % 32) / 4; + if (compacted_4b_initial == 32 / 4) + compacted_4b_initial = 0; + + if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) + compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); + else + compacted_2b = 0; + + pos = ebase; + if (lcn < compacted_4b_initial) { + amortizedshift = 2; + goto out; + } + pos += compacted_4b_initial * 4; + lcn -= compacted_4b_initial; + + if (lcn < compacted_2b) { + amortizedshift = 1; + goto out; + } + pos += compacted_2b * 2; + lcn -= compacted_2b; + amortizedshift = 2; +out: + pos += lcn * (1 << amortizedshift); + err = z_erofs_reload_indexes(m, erofs_blknr(pos)); + if (err) + return err; + return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos)); +} + +static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m, + unsigned int lcn) +{ + const unsigned int datamode = EROFS_V(m->inode)->datamode; + + if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY) + return vle_legacy_load_cluster_from_disk(m, lcn); + + if (datamode == EROFS_INODE_FLAT_COMPRESSION) + return compacted_load_cluster_from_disk(m, lcn); + + return -EINVAL; +} + +static int vle_extent_lookback(struct z_erofs_maprecorder *m, + unsigned int lookback_distance) +{ + struct erofs_vnode *const vi = EROFS_V(m->inode); + struct erofs_map_blocks *const map = m->map; + const unsigned int lclusterbits = vi->z_logical_clusterbits; + unsigned long lcn = m->lcn; + int err; + + if (lcn < lookback_distance) { + errln("bogus lookback distance @ nid %llu", vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; + } + + /* load extent head logical cluster if needed */ + lcn -= lookback_distance; + err = vle_load_cluster_from_disk(m, lcn); + if (err) + return err; + + switch (m->type) { + case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + if (unlikely(!m->delta[0])) { + errln("invalid lookback distance 0 at nid %llu", + vi->nid); + DBG_BUGON(1); + return -EFSCORRUPTED; + } + return vle_extent_lookback(m, m->delta[0]); + case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: + map->m_flags &= ~EROFS_MAP_ZIPPED; + /* fallthrough */ + case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: + map->m_la = (lcn << lclusterbits) | m->clusterofs; + break; + default: + errln("unknown type %u at lcn %lu of nid %llu", + m->type, lcn, vi->nid); + DBG_BUGON(1); + return -EOPNOTSUPP; + } + return 0; +} + +int z_erofs_map_blocks_iter(struct inode *inode, + struct erofs_map_blocks *map, + int flags) +{ + struct erofs_vnode *const vi = EROFS_V(inode); + struct z_erofs_maprecorder m = { + .inode = inode, + .map = map, + }; + int err = 0; + unsigned int lclusterbits, endoff; + unsigned long long ofs, end; + + trace_z_erofs_map_blocks_iter_enter(inode, map, flags); + + /* when trying to read beyond EOF, leave it unmapped */ + if (unlikely(map->m_la >= inode->i_size)) { + map->m_llen = map->m_la + 1 - inode->i_size; + map->m_la = inode->i_size; + map->m_flags = 0; + goto out; + } + + err = fill_inode_lazy(inode); + if (err) + goto out; + + lclusterbits = vi->z_logical_clusterbits; + ofs = map->m_la; + m.lcn = ofs >> lclusterbits; + endoff = ofs & ((1 << lclusterbits) - 1); + + err = vle_load_cluster_from_disk(&m, m.lcn); + if (err) + goto unmap_out; + + map->m_flags = EROFS_MAP_ZIPPED; /* by default, compressed */ + end = (m.lcn + 1ULL) << lclusterbits; + + switch (m.type) { + case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: + if (endoff >= m.clusterofs) + map->m_flags &= ~EROFS_MAP_ZIPPED; + /* fallthrough */ + case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: + if (endoff >= m.clusterofs) { + map->m_la = (m.lcn << lclusterbits) | m.clusterofs; + break; + } + /* m.lcn should be >= 1 if endoff < m.clusterofs */ + if (unlikely(!m.lcn)) { + errln("invalid logical cluster 0 at nid %llu", + vi->nid); + err = -EFSCORRUPTED; + goto unmap_out; + } + end = (m.lcn << lclusterbits) | m.clusterofs; + map->m_flags |= EROFS_MAP_FULL_MAPPED; + m.delta[0] = 1; + /* fallthrough */ + case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: + /* get the correspoinding first chunk */ + err = vle_extent_lookback(&m, m.delta[0]); + if (unlikely(err)) + goto unmap_out; + break; + default: + errln("unknown type %u at offset %llu of nid %llu", + m.type, ofs, vi->nid); + err = -EOPNOTSUPP; + goto unmap_out; + } + + map->m_llen = end - map->m_la; + map->m_plen = 1 << lclusterbits; + map->m_pa = blknr_to_addr(m.pblk); + map->m_flags |= EROFS_MAP_MAPPED; + +unmap_out: + if (m.kaddr) + kunmap_atomic(m.kaddr); + +out: + debugln("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o", + __func__, map->m_la, map->m_pa, + map->m_llen, map->m_plen, map->m_flags); + + trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); + + /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */ + DBG_BUGON(err < 0 && err != -ENOMEM); + return err; +} + diff --git a/fs/erofs/zpvec.h b/fs/erofs/zpvec.h new file mode 100644 index 000000000000..bd3cee16491c --- /dev/null +++ b/fs/erofs/zpvec.h @@ -0,0 +1,157 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2018 HUAWEI, Inc. + * http://www.huawei.com/ + * Created by Gao Xiang + */ +#ifndef __EROFS_FS_ZPVEC_H +#define __EROFS_FS_ZPVEC_H + +#include "tagptr.h" + +/* page type in pagevec for decompress subsystem */ +enum z_erofs_page_type { + /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */ + Z_EROFS_PAGE_TYPE_EXCLUSIVE, + + Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED, + + Z_EROFS_VLE_PAGE_TYPE_HEAD, + Z_EROFS_VLE_PAGE_TYPE_MAX +}; + +extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0") + __bad_page_type_exclusive(void); + +/* pagevec tagged pointer */ +typedef tagptr2_t erofs_vtptr_t; + +/* pagevec collector */ +struct z_erofs_pagevec_ctor { + struct page *curr, *next; + erofs_vtptr_t *pages; + + unsigned int nr, index; +}; + +static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor, + bool atomic) +{ + if (!ctor->curr) + return; + + if (atomic) + kunmap_atomic(ctor->pages); + else + kunmap(ctor->curr); +} + +static inline struct page * +z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor, + unsigned int nr) +{ + unsigned int index; + + /* keep away from occupied pages */ + if (ctor->next) + return ctor->next; + + for (index = 0; index < nr; ++index) { + const erofs_vtptr_t t = ctor->pages[index]; + const unsigned int tags = tagptr_unfold_tags(t); + + if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE) + return tagptr_unfold_ptr(t); + } + DBG_BUGON(nr >= ctor->nr); + return NULL; +} + +static inline void +z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor, + bool atomic) +{ + struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr); + + z_erofs_pagevec_ctor_exit(ctor, atomic); + + ctor->curr = next; + ctor->next = NULL; + ctor->pages = atomic ? + kmap_atomic(ctor->curr) : kmap(ctor->curr); + + ctor->nr = PAGE_SIZE / sizeof(struct page *); + ctor->index = 0; +} + +static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor, + unsigned int nr, + erofs_vtptr_t *pages, + unsigned int i) +{ + ctor->nr = nr; + ctor->curr = ctor->next = NULL; + ctor->pages = pages; + + if (i >= nr) { + i -= nr; + z_erofs_pagevec_ctor_pagedown(ctor, false); + while (i > ctor->nr) { + i -= ctor->nr; + z_erofs_pagevec_ctor_pagedown(ctor, false); + } + } + ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i); + ctor->index = i; +} + +static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor, + struct page *page, + enum z_erofs_page_type type, + bool *occupied) +{ + *occupied = false; + if (unlikely(!ctor->next && type)) + if (ctor->index + 1 == ctor->nr) + return false; + + if (unlikely(ctor->index >= ctor->nr)) + z_erofs_pagevec_ctor_pagedown(ctor, false); + + /* exclusive page type must be 0 */ + if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL) + __bad_page_type_exclusive(); + + /* should remind that collector->next never equal to 1, 2 */ + if (type == (uintptr_t)ctor->next) { + ctor->next = page; + *occupied = true; + } + ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type); + return true; +} + +static inline struct page * +z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor, + enum z_erofs_page_type *type) +{ + erofs_vtptr_t t; + + if (unlikely(ctor->index >= ctor->nr)) { + DBG_BUGON(!ctor->next); + z_erofs_pagevec_ctor_pagedown(ctor, true); + } + + t = ctor->pages[ctor->index]; + + *type = tagptr_unfold_tags(t); + + /* should remind that collector->next never equal to 1, 2 */ + if (*type == (uintptr_t)ctor->next) + ctor->next = tagptr_unfold_ptr(t); + + ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0); + return tagptr_unfold_ptr(t); +} +#endif + diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h new file mode 100644 index 000000000000..bfb2da9c4eee --- /dev/null +++ b/include/trace/events/erofs.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM erofs + +#if !defined(_TRACE_EROFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_EROFS_H + +#include + +#define show_dev(dev) MAJOR(dev), MINOR(dev) +#define show_dev_nid(entry) show_dev(entry->dev), entry->nid + +#define show_file_type(type) \ + __print_symbolic(type, \ + { 0, "FILE" }, \ + { 1, "DIR" }) + +#define show_map_flags(flags) __print_flags(flags, "|", \ + { EROFS_GET_BLOCKS_RAW, "RAW" }) + +#define show_mflags(flags) __print_flags(flags, "", \ + { EROFS_MAP_MAPPED, "M" }, \ + { EROFS_MAP_META, "I" }, \ + { EROFS_MAP_ZIPPED, "Z" }) + +TRACE_EVENT(erofs_lookup, + + TP_PROTO(struct inode *dir, struct dentry *dentry, unsigned int flags), + + TP_ARGS(dir, dentry, flags), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(const char *, name ) + __field(unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = dir->i_sb->s_dev; + __entry->nid = EROFS_V(dir)->nid; + __entry->name = dentry->d_name.name; + __entry->flags = flags; + ), + + TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x", + show_dev_nid(__entry), + __entry->name, + __entry->flags) +); + +TRACE_EVENT(erofs_fill_inode, + TP_PROTO(struct inode *inode, int isdir), + TP_ARGS(inode, isdir), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(erofs_blk_t, blkaddr ) + __field(unsigned int, ofs ) + __field(int, isdir ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + __entry->blkaddr = erofs_blknr(iloc(EROFS_I_SB(inode), __entry->nid)); + __entry->ofs = erofs_blkoff(iloc(EROFS_I_SB(inode), __entry->nid)); + __entry->isdir = isdir; + ), + + TP_printk("dev = (%d,%d), nid = %llu, blkaddr %u ofs %u, isdir %d", + show_dev_nid(__entry), + __entry->blkaddr, __entry->ofs, + __entry->isdir) +); + +TRACE_EVENT(erofs_readpage, + + TP_PROTO(struct page *page, bool raw), + + TP_ARGS(page, raw), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(int, dir ) + __field(pgoff_t, index ) + __field(int, uptodate) + __field(bool, raw ) + ), + + TP_fast_assign( + __entry->dev = page->mapping->host->i_sb->s_dev; + __entry->nid = EROFS_V(page->mapping->host)->nid; + __entry->dir = S_ISDIR(page->mapping->host->i_mode); + __entry->index = page->index; + __entry->uptodate = PageUptodate(page); + __entry->raw = raw; + ), + + TP_printk("dev = (%d,%d), nid = %llu, %s, index = %lu, uptodate = %d " + "raw = %d", + show_dev_nid(__entry), + show_file_type(__entry->dir), + (unsigned long)__entry->index, + __entry->uptodate, + __entry->raw) +); + +TRACE_EVENT(erofs_readpages, + + TP_PROTO(struct inode *inode, struct page *page, unsigned int nrpage, + bool raw), + + TP_ARGS(inode, page, nrpage, raw), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(erofs_nid_t, nid ) + __field(pgoff_t, start ) + __field(unsigned int, nrpage ) + __field(bool, raw ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + __entry->start = page->index; + __entry->nrpage = nrpage; + __entry->raw = raw; + ), + + TP_printk("dev = (%d,%d), nid = %llu, start = %lu nrpage = %u raw = %d", + show_dev_nid(__entry), + (unsigned long)__entry->start, + __entry->nrpage, + __entry->raw) +); + +DECLARE_EVENT_CLASS(erofs__map_blocks_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags), + + TP_ARGS(inode, map, flags), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( erofs_nid_t, nid ) + __field( erofs_off_t, la ) + __field( u64, llen ) + __field( unsigned int, flags ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + __entry->la = map->m_la; + __entry->llen = map->m_llen; + __entry->flags = flags; + ), + + TP_printk("dev = (%d,%d), nid = %llu, la %llu llen %llu flags %s", + show_dev_nid(__entry), + __entry->la, __entry->llen, + __entry->flags ? show_map_flags(__entry->flags) : "NULL") +); + +DEFINE_EVENT(erofs__map_blocks_enter, erofs_map_blocks_flatmode_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned flags), + + TP_ARGS(inode, map, flags) +); + +DEFINE_EVENT(erofs__map_blocks_enter, z_erofs_map_blocks_iter_enter, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags), + + TP_ARGS(inode, map, flags) +); + +DECLARE_EVENT_CLASS(erofs__map_blocks_exit, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags, int ret), + + TP_ARGS(inode, map, flags, ret), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( erofs_nid_t, nid ) + __field( unsigned int, flags ) + __field( erofs_off_t, la ) + __field( erofs_off_t, pa ) + __field( u64, llen ) + __field( u64, plen ) + __field( unsigned int, mflags ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + __entry->flags = flags; + __entry->la = map->m_la; + __entry->pa = map->m_pa; + __entry->llen = map->m_llen; + __entry->plen = map->m_plen; + __entry->mflags = map->m_flags; + __entry->ret = ret; + ), + + TP_printk("dev = (%d,%d), nid = %llu, flags %s " + "la %llu pa %llu llen %llu plen %llu mflags %s ret %d", + show_dev_nid(__entry), + __entry->flags ? show_map_flags(__entry->flags) : "NULL", + __entry->la, __entry->pa, __entry->llen, __entry->plen, + show_mflags(__entry->mflags), __entry->ret) +); + +DEFINE_EVENT(erofs__map_blocks_exit, erofs_map_blocks_flatmode_exit, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned flags, int ret), + + TP_ARGS(inode, map, flags, ret) +); + +DEFINE_EVENT(erofs__map_blocks_exit, z_erofs_map_blocks_iter_exit, + TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, + unsigned int flags, int ret), + + TP_ARGS(inode, map, flags, ret) +); + +TRACE_EVENT(erofs_destroy_inode, + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( erofs_nid_t, nid ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->nid = EROFS_V(inode)->nid; + ), + + TP_printk("dev = (%d,%d), nid = %llu", show_dev_nid(__entry)) +); + +#endif /* _TRACE_EROFS_H */ + + /* This part must be outside protection */ +#include diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h index 1274c692e59c..903cc2d2750b 100644 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@ -19,6 +19,7 @@ #define SQUASHFS_MAGIC 0x73717368 #define ECRYPTFS_SUPER_MAGIC 0xf15f #define EFS_SUPER_MAGIC 0x414A53 +#define EROFS_SUPER_MAGIC_V1 0xE0F5E1E2 #define EXT2_SUPER_MAGIC 0xEF53 #define EXT3_SUPER_MAGIC 0xEF53 #define XENFS_SUPER_MAGIC 0xabba1974 -- cgit v1.2.3 From e0e6d062822529dbe9be21939359b0d1e065bb0f Mon Sep 17 00:00:00 2001 From: Zhu Yanjun Date: Fri, 23 Aug 2019 21:04:16 -0400 Subject: net: rds: add service level support in rds-info >From IB specific 7.6.5 SERVICE LEVEL, Service Level (SL) is used to identify different flows within an IBA subnet. It is carried in the local route header of the packet. Before this commit, run "rds-info -I". The outputs are as below: " RDS IB Connections: LocalAddr RemoteAddr Tos SL LocalDev RemoteDev 192.2.95.3 192.2.95.1 2 0 fe80::21:28:1a:39 fe80::21:28:10:b9 192.2.95.3 192.2.95.1 1 0 fe80::21:28:1a:39 fe80::21:28:10:b9 192.2.95.3 192.2.95.1 0 0 fe80::21:28:1a:39 fe80::21:28:10:b9 " After this commit, the output is as below: " RDS IB Connections: LocalAddr RemoteAddr Tos SL LocalDev RemoteDev 192.2.95.3 192.2.95.1 2 2 fe80::21:28:1a:39 fe80::21:28:10:b9 192.2.95.3 192.2.95.1 1 1 fe80::21:28:1a:39 fe80::21:28:10:b9 192.2.95.3 192.2.95.1 0 0 fe80::21:28:1a:39 fe80::21:28:10:b9 " The commit fe3475af3bdf ("net: rds: add per rds connection cache statistics") adds cache_allocs in struct rds_info_rdma_connection as below: struct rds_info_rdma_connection { ... __u32 rdma_mr_max; __u32 rdma_mr_size; __u8 tos; __u32 cache_allocs; }; The peer struct in rds-tools of struct rds_info_rdma_connection is as below: struct rds_info_rdma_connection { ... uint32_t rdma_mr_max; uint32_t rdma_mr_size; uint8_t tos; uint8_t sl; uint32_t cache_allocs; }; The difference between userspace and kernel is the member variable sl. In the kernel struct, the member variable sl is missing. This will introduce risks. So it is necessary to use this commit to avoid this risk. Fixes: fe3475af3bdf ("net: rds: add per rds connection cache statistics") CC: Joe Jin CC: JUNXIAO_BI Suggested-by: Gerd Rausch Signed-off-by: Zhu Yanjun Acked-by: Santosh Shilimkar Signed-off-by: David S. Miller --- include/uapi/linux/rds.h | 2 ++ net/rds/ib.c | 16 ++++++++++------ net/rds/ib.h | 1 + net/rds/ib_cm.c | 3 +++ net/rds/rdma_transport.c | 10 ++++++++-- 5 files changed, 24 insertions(+), 8 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h index fd6b5f66e2c5..cba368e55863 100644 --- a/include/uapi/linux/rds.h +++ b/include/uapi/linux/rds.h @@ -250,6 +250,7 @@ struct rds_info_rdma_connection { __u32 rdma_mr_max; __u32 rdma_mr_size; __u8 tos; + __u8 sl; __u32 cache_allocs; }; @@ -265,6 +266,7 @@ struct rds6_info_rdma_connection { __u32 rdma_mr_max; __u32 rdma_mr_size; __u8 tos; + __u8 sl; __u32 cache_allocs; }; diff --git a/net/rds/ib.c b/net/rds/ib.c index ec05d91aa9a2..45acab2de0cf 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c @@ -291,7 +291,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn, void *buffer) { struct rds_info_rdma_connection *iinfo = buffer; - struct rds_ib_connection *ic; + struct rds_ib_connection *ic = conn->c_transport_data; /* We will only ever look at IB transports */ if (conn->c_trans != &rds_ib_transport) @@ -301,15 +301,16 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn, iinfo->src_addr = conn->c_laddr.s6_addr32[3]; iinfo->dst_addr = conn->c_faddr.s6_addr32[3]; - iinfo->tos = conn->c_tos; + if (ic) { + iinfo->tos = conn->c_tos; + iinfo->sl = ic->i_sl; + } memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid)); memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid)); if (rds_conn_state(conn) == RDS_CONN_UP) { struct rds_ib_device *rds_ibdev; - ic = conn->c_transport_data; - rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid, (union ib_gid *)&iinfo->dst_gid); @@ -329,7 +330,7 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, void *buffer) { struct rds6_info_rdma_connection *iinfo6 = buffer; - struct rds_ib_connection *ic; + struct rds_ib_connection *ic = conn->c_transport_data; /* We will only ever look at IB transports */ if (conn->c_trans != &rds_ib_transport) @@ -337,6 +338,10 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, iinfo6->src_addr = conn->c_laddr; iinfo6->dst_addr = conn->c_faddr; + if (ic) { + iinfo6->tos = conn->c_tos; + iinfo6->sl = ic->i_sl; + } memset(&iinfo6->src_gid, 0, sizeof(iinfo6->src_gid)); memset(&iinfo6->dst_gid, 0, sizeof(iinfo6->dst_gid)); @@ -344,7 +349,6 @@ static int rds6_ib_conn_info_visitor(struct rds_connection *conn, if (rds_conn_state(conn) == RDS_CONN_UP) { struct rds_ib_device *rds_ibdev; - ic = conn->c_transport_data; rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, (union ib_gid *)&iinfo6->dst_gid); rds_ibdev = ic->rds_ibdev; diff --git a/net/rds/ib.h b/net/rds/ib.h index 303c6ee8bdb7..f2b558e8b5ea 100644 --- a/net/rds/ib.h +++ b/net/rds/ib.h @@ -220,6 +220,7 @@ struct rds_ib_connection { /* Send/Recv vectors */ int i_scq_vector; int i_rcq_vector; + u8 i_sl; }; /* This assumes that atomic_t is at least 32 bits */ diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index fddaa09f7b0d..233f1368162b 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -152,6 +152,9 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even RDS_PROTOCOL_MINOR(conn->c_version), ic->i_flowctl ? ", flow control" : ""); + /* receive sl from the peer */ + ic->i_sl = ic->i_cm_id->route.path_rec->sl; + atomic_set(&ic->i_cq_quiesce, 0); /* Init rings and fill recv. this needs to wait until protocol diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 9986d6065c4d..5f741e51b4ba 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c @@ -43,6 +43,9 @@ static struct rdma_cm_id *rds_rdma_listen_id; static struct rdma_cm_id *rds6_rdma_listen_id; #endif +/* Per IB specification 7.7.3, service level is a 4-bit field. */ +#define TOS_TO_SL(tos) ((tos) & 0xF) + static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, struct rdma_cm_event *event, bool isv6) @@ -97,10 +100,13 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, struct rds_ib_connection *ibic; ibic = conn->c_transport_data; - if (ibic && ibic->i_cm_id == cm_id) + if (ibic && ibic->i_cm_id == cm_id) { + cm_id->route.path_rec[0].sl = + TOS_TO_SL(conn->c_tos); ret = trans->cm_initiate_connect(cm_id, isv6); - else + } else { rds_conn_drop(conn); + } } break; -- cgit v1.2.3 From 63d10e12b00dfc8d8387bea9eaab376881335731 Mon Sep 17 00:00:00 2001 From: Ander Juaristi Date: Sat, 17 Aug 2019 13:17:53 +0200 Subject: netfilter: nft_meta: support for time matching This patch introduces meta matches in the kernel for time (a UNIX timestamp), day (a day of week, represented as an integer between 0-6), and hour (an hour in the current day, or: number of seconds since midnight). All values are taken as unsigned 64-bit integers. The 'time' keyword is internally converted to nanoseconds by nft in userspace, and hence the timestamp is taken in nanoseconds as well. Signed-off-by: Ander Juaristi Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 6 +++++ net/netfilter/nft_meta.c | 46 ++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 82abaa183fc3..b83b62eb4b01 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -799,6 +799,9 @@ enum nft_exthdr_attributes { * @NFT_META_OIFKIND: packet output interface kind name (dev->rtnl_link_ops->kind) * @NFT_META_BRI_IIFPVID: packet input bridge port pvid * @NFT_META_BRI_IIFVPROTO: packet input bridge vlan proto + * @NFT_META_TIME_NS: time since epoch (in nanoseconds) + * @NFT_META_TIME_DAY: day of week (from 0 = Sunday to 6 = Saturday) + * @NFT_META_TIME_HOUR: hour of day (in seconds) */ enum nft_meta_keys { NFT_META_LEN, @@ -831,6 +834,9 @@ enum nft_meta_keys { NFT_META_OIFKIND, NFT_META_BRI_IIFPVID, NFT_META_BRI_IIFVPROTO, + NFT_META_TIME_NS, + NFT_META_TIME_DAY, + NFT_META_TIME_HOUR, }; /** diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index f69afb9ff3cb..317e3a9e8c5b 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -26,8 +26,36 @@ #include /* NF_BR_PRE_ROUTING */ +#define NFT_META_SECS_PER_MINUTE 60 +#define NFT_META_SECS_PER_HOUR 3600 +#define NFT_META_SECS_PER_DAY 86400 +#define NFT_META_DAYS_PER_WEEK 7 + static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state); +static u8 nft_meta_weekday(unsigned long secs) +{ + unsigned int dse; + u8 wday; + + secs -= NFT_META_SECS_PER_MINUTE * sys_tz.tz_minuteswest; + dse = secs / NFT_META_SECS_PER_DAY; + wday = (4 + dse) % NFT_META_DAYS_PER_WEEK; + + return wday; +} + +static u32 nft_meta_hour(unsigned long secs) +{ + struct tm tm; + + time64_to_tm(secs, 0, &tm); + + return tm.tm_hour * NFT_META_SECS_PER_HOUR + + tm.tm_min * NFT_META_SECS_PER_MINUTE + + tm.tm_sec; +} + void nft_meta_get_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) @@ -218,6 +246,15 @@ void nft_meta_get_eval(const struct nft_expr *expr, goto err; strncpy((char *)dest, out->rtnl_link_ops->kind, IFNAMSIZ); break; + case NFT_META_TIME_NS: + nft_reg_store64(dest, ktime_get_real_ns()); + break; + case NFT_META_TIME_DAY: + nft_reg_store8(dest, nft_meta_weekday(get_seconds())); + break; + case NFT_META_TIME_HOUR: + *dest = nft_meta_hour(get_seconds()); + break; default: WARN_ON(1); goto err; @@ -330,6 +367,15 @@ int nft_meta_get_init(const struct nft_ctx *ctx, len = sizeof(u8); break; #endif + case NFT_META_TIME_NS: + len = sizeof(u64); + break; + case NFT_META_TIME_DAY: + len = sizeof(u8); + break; + case NFT_META_TIME_HOUR: + len = sizeof(u32); + break; default: return -EOPNOTSUPP; } -- cgit v1.2.3 From 65af4a10743b766e319fb53812c5926c6d98b100 Mon Sep 17 00:00:00 2001 From: Michael Braun Date: Tue, 20 Aug 2019 15:11:46 +0200 Subject: netfilter: nfnetlink_log: add support for VLAN information Currently, there is no vlan information (e.g. when used with a vlan aware bridge) passed to userspache, HWHEADER will contain an 08 00 (ip) suffix even for tagged ip packets. Therefore, add an extra netlink attribute that passes the vlan information to userspace similarly to 15824ab29f for nfqueue. Signed-off-by: Michael Braun Reviewed-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nfnetlink_log.h | 11 ++++++ net/netfilter/nfnetlink_log.c | 57 ++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/netfilter/nfnetlink_log.h b/include/uapi/linux/netfilter/nfnetlink_log.h index 20983cb195a0..45c8d3b027e0 100644 --- a/include/uapi/linux/netfilter/nfnetlink_log.h +++ b/include/uapi/linux/netfilter/nfnetlink_log.h @@ -33,6 +33,15 @@ struct nfulnl_msg_packet_timestamp { __aligned_be64 usec; }; +enum nfulnl_vlan_attr { + NFULA_VLAN_UNSPEC, + NFULA_VLAN_PROTO, /* __be16 skb vlan_proto */ + NFULA_VLAN_TCI, /* __be16 skb htons(vlan_tci) */ + __NFULA_VLAN_MAX, +}; + +#define NFULA_VLAN_MAX (__NFULA_VLAN_MAX + 1) + enum nfulnl_attr_type { NFULA_UNSPEC, NFULA_PACKET_HDR, @@ -54,6 +63,8 @@ enum nfulnl_attr_type { NFULA_HWLEN, /* hardware header length */ NFULA_CT, /* nf_conntrack_netlink.h */ NFULA_CT_INFO, /* enum ip_conntrack_info */ + NFULA_VLAN, /* nested attribute: packet vlan info */ + NFULA_L2HDR, /* full L2 header */ __NFULA_MAX }; diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index d69e1863e536..0ba020ca38e6 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -385,6 +385,57 @@ nfulnl_timer(struct timer_list *t) instance_put(inst); } +static u32 nfulnl_get_bridge_size(const struct sk_buff *skb) +{ + u32 size = 0; + + if (!skb_mac_header_was_set(skb)) + return 0; + + if (skb_vlan_tag_present(skb)) { + size += nla_total_size(0); /* nested */ + size += nla_total_size(sizeof(u16)); /* id */ + size += nla_total_size(sizeof(u16)); /* tag */ + } + + if (skb->network_header > skb->mac_header) + size += nla_total_size(skb->network_header - skb->mac_header); + + return size; +} + +static int nfulnl_put_bridge(struct nfulnl_instance *inst, const struct sk_buff *skb) +{ + if (!skb_mac_header_was_set(skb)) + return 0; + + if (skb_vlan_tag_present(skb)) { + struct nlattr *nest; + + nest = nla_nest_start(inst->skb, NFULA_VLAN); + if (!nest) + goto nla_put_failure; + + if (nla_put_be16(inst->skb, NFULA_VLAN_TCI, htons(skb->vlan_tci)) || + nla_put_be16(inst->skb, NFULA_VLAN_PROTO, skb->vlan_proto)) + goto nla_put_failure; + + nla_nest_end(inst->skb, nest); + } + + if (skb->mac_header < skb->network_header) { + int len = (int)(skb->network_header - skb->mac_header); + + if (nla_put(inst->skb, NFULA_L2HDR, len, skb_mac_header(skb))) + goto nla_put_failure; + } + + return 0; + +nla_put_failure: + return -1; +} + /* This is an inline function, we don't really care about a long * list of arguments */ static inline int @@ -580,6 +631,10 @@ __build_packet_message(struct nfnl_log_net *log, NFULA_CT, NFULA_CT_INFO) < 0) goto nla_put_failure; + if ((pf == NFPROTO_NETDEV || pf == NFPROTO_BRIDGE) && + nfulnl_put_bridge(inst, skb) < 0) + goto nla_put_failure; + if (data_len) { struct nlattr *nla; int size = nla_attr_size(data_len); @@ -687,6 +742,8 @@ nfulnl_log_packet(struct net *net, size += nfnl_ct->build_size(ct); } } + if (pf == NFPROTO_NETDEV || pf == NFPROTO_BRIDGE) + size += nfulnl_get_bridge_size(skb); qthreshold = inst->qthreshold; /* per-rule qthreshold overrides per-instance */ -- cgit v1.2.3 From d8abe88450beb96a66e434323eb6ab737654b840 Mon Sep 17 00:00:00 2001 From: Mark Zhang Date: Mon, 19 Aug 2019 14:36:26 +0300 Subject: RDMA/mlx5: RDMA_RX flow type support for user applications Currently user applications can only steer TCP/IP(NIC RX/RX) traffic. This patch adds RDMA_RX as a new flow type to allow the user to insert steering rules to control RDMA traffic. Two destinations are supported(but not set at the same time): devx flow table object and QP. Signed-off-by: Mark Zhang Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/20190819113626.20284-4-leon@kernel.org Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/flow.c | 13 +++++++++++-- drivers/infiniband/hw/mlx5/main.c | 7 +++++++ drivers/infiniband/hw/mlx5/mlx5_ib.h | 1 + include/uapi/rdma/mlx5_user_ioctl_verbs.h | 1 + 4 files changed, 20 insertions(+), 2 deletions(-) (limited to 'include/uapi') diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c index b8841355fcd5..571bb8539cdb 100644 --- a/drivers/infiniband/hw/mlx5/flow.c +++ b/drivers/infiniband/hw/mlx5/flow.c @@ -32,6 +32,9 @@ mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type, case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB: *namespace = MLX5_FLOW_NAMESPACE_FDB; break; + case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX: + *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX; + break; default: return -EINVAL; } @@ -101,6 +104,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !dest_devx) return -EINVAL; + /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */ + if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) && + ((!dest_devx && !dest_qp) || (dest_devx && dest_qp))) + return -EINVAL; + if (dest_devx) { devx_obj = uverbs_attr_get_obj( attrs, MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX); @@ -112,8 +120,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( */ if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type)) return -EINVAL; - /* Allow only flow table as dest when inserting to FDB */ - if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && + /* Allow only flow table as dest when inserting to FDB or RDMA_RX */ + if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB || + fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) && dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) return -EINVAL; } else if (dest_qp) { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 1ec0e667110e..07aecba16019 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3963,6 +3963,11 @@ _get_flow_table(struct mlx5_ib_dev *dev, esw_encap) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; priority = FDB_BYPASS_PATH; + } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) { + max_table_size = + BIT(MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, + log_max_ft_size)); + priority = fs_matcher->priority; } max_table_size = min_t(int, max_table_size, MLX5_FS_MAX_ENTRIES); @@ -3977,6 +3982,8 @@ _get_flow_table(struct mlx5_ib_dev *dev, prio = &dev->flow_db->egress_prios[priority]; else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB) prio = &dev->flow_db->fdb; + else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) + prio = &dev->flow_db->rdma_rx[priority]; if (!prio) return ERR_PTR(-EINVAL); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index cb41a7e6255a..6abfbf3a69b7 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -200,6 +200,7 @@ struct mlx5_ib_flow_db { struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS]; struct mlx5_ib_flow_prio fdb; + struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT]; struct mlx5_flow_table *lag_demux_ft; /* Protect flow steering bypass flow tables * when add/del flow rules. diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h index 7e9900b0e746..88b6ca70c2fe 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h +++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h @@ -43,6 +43,7 @@ enum mlx5_ib_uapi_flow_table_type { MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0, MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1, MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2, + MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX = 0x3, }; enum mlx5_ib_uapi_flow_action_packet_reformat_type { -- cgit v1.2.3 From d0a8d877da976c244092ce859683b2fa116217db Mon Sep 17 00:00:00 2001 From: Ander Juaristi Date: Sat, 17 Aug 2019 13:26:52 +0200 Subject: netfilter: nft_dynset: support for element deletion This patch implements the delete operation from the ruleset. It implements a new delete() function in nft_set_rhash. It is simpler to use than the already existing remove(), because it only takes the set and the key as arguments, whereas remove() expects a full nft_set_elem structure. Signed-off-by: Ander Juaristi Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_tables.h | 10 +++++++++- include/uapi/linux/netfilter/nf_tables.h | 1 + net/netfilter/nft_dynset.c | 6 ++++++ net/netfilter/nft_set_hash.c | 19 +++++++++++++++++++ 4 files changed, 35 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 64765140657b..498665158ee0 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -302,17 +302,23 @@ struct nft_expr; * struct nft_set_ops - nf_tables set operations * * @lookup: look up an element within the set + * @update: update an element if exists, add it if doesn't exist + * @delete: delete an element * @insert: insert new element into set * @activate: activate new element in the next generation * @deactivate: lookup for element and deactivate it in the next generation * @flush: deactivate element in the next generation * @remove: remove element from set - * @walk: iterate over all set elemeennts + * @walk: iterate over all set elements * @get: get set elements * @privsize: function to return size of set private data * @init: initialize private data of new set instance * @destroy: destroy private data of set instance * @elemsize: element private size + * + * Operations lookup, update and delete have simpler interfaces, are faster + * and currently only used in the packet path. All the rest are slower, + * control plane functions. */ struct nft_set_ops { bool (*lookup)(const struct net *net, @@ -327,6 +333,8 @@ struct nft_set_ops { const struct nft_expr *expr, struct nft_regs *regs, const struct nft_set_ext **ext); + bool (*delete)(const struct nft_set *set, + const u32 *key); int (*insert)(const struct net *net, const struct nft_set *set, diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index b83b62eb4b01..0ff932dadc8e 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -636,6 +636,7 @@ enum nft_lookup_attributes { enum nft_dynset_ops { NFT_DYNSET_OP_ADD, NFT_DYNSET_OP_UPDATE, + NFT_DYNSET_OP_DELETE, }; enum nft_dynset_flags { diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 33833a0cb989..8887295414dc 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -84,6 +84,11 @@ void nft_dynset_eval(const struct nft_expr *expr, const struct nft_expr *sexpr; u64 timeout; + if (priv->op == NFT_DYNSET_OP_DELETE) { + set->ops->delete(set, ®s->data[priv->sreg_key]); + return; + } + if (set->ops->update(set, ®s->data[priv->sreg_key], nft_dynset_new, expr, regs, &ext)) { sexpr = NULL; @@ -161,6 +166,7 @@ static int nft_dynset_init(const struct nft_ctx *ctx, priv->op = ntohl(nla_get_be32(tb[NFTA_DYNSET_OP])); switch (priv->op) { case NFT_DYNSET_OP_ADD: + case NFT_DYNSET_OP_DELETE: break; case NFT_DYNSET_OP_UPDATE: if (!(set->flags & NFT_SET_TIMEOUT)) diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index c490451fcebf..b331a3c9a3a8 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c @@ -234,6 +234,24 @@ static void nft_rhash_remove(const struct net *net, rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params); } +static bool nft_rhash_delete(const struct nft_set *set, + const u32 *key) +{ + struct nft_rhash *priv = nft_set_priv(set); + struct nft_rhash_cmp_arg arg = { + .genmask = NFT_GENMASK_ANY, + .set = set, + .key = key, + }; + struct nft_rhash_elem *he; + + he = rhashtable_lookup(&priv->ht, &arg, nft_rhash_params); + if (he == NULL) + return false; + + return rhashtable_remove_fast(&priv->ht, &he->node, nft_rhash_params) == 0; +} + static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_iter *iter) { @@ -662,6 +680,7 @@ struct nft_set_type nft_set_rhash_type __read_mostly = { .remove = nft_rhash_remove, .lookup = nft_rhash_lookup, .update = nft_rhash_update, + .delete = nft_rhash_delete, .walk = nft_rhash_walk, .get = nft_rhash_get, }, -- cgit v1.2.3 From 10d274e880eb208ec6a76261a9f8f8155020f771 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 22 Aug 2019 22:52:12 -0700 Subject: bpf: introduce verifier internal test flag Introduce BPF_F_TEST_STATE_FREQ flag to stress test parentage chain and state pruning. Signed-off-by: Alexei Starovoitov Acked-by: Song Liu Signed-off-by: Daniel Borkmann --- include/linux/bpf_verifier.h | 1 + include/uapi/linux/bpf.h | 3 +++ kernel/bpf/syscall.c | 1 + kernel/bpf/verifier.c | 5 ++++- 4 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 5fe99f322b1c..26a6d58ca78c 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -355,6 +355,7 @@ struct bpf_verifier_env { struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ int stack_size; /* number of states to be processed */ bool strict_alignment; /* perform strict pointer alignment checks */ + bool test_state_freq; /* test verifier with different pruning frequency */ struct bpf_verifier_state *cur_state; /* current verifier state */ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ struct bpf_verifier_state_list *free_list; diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index b5889257cc33..5d2fb183ee2d 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -285,6 +285,9 @@ enum bpf_attach_type { */ #define BPF_F_TEST_RND_HI32 (1U << 2) +/* The verifier internal test flag. Behavior is undefined */ +#define BPF_F_TEST_STATE_FREQ (1U << 3) + /* When BPF ldimm64's insn[0].src_reg != 0 then this can have * two extensions: * diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index c0f62fd67c6b..ca60eafa6922 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1629,6 +1629,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | BPF_F_ANY_ALIGNMENT | + BPF_F_TEST_STATE_FREQ | BPF_F_TEST_RND_HI32)) return -EINVAL; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 10c0ff93f52b..f340cfe53c6e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -7222,7 +7222,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) struct bpf_verifier_state_list *sl, **pprev; struct bpf_verifier_state *cur = env->cur_state, *new; int i, j, err, states_cnt = 0; - bool add_new_state = false; + bool add_new_state = env->test_state_freq ? true : false; cur->last_insn_idx = env->prev_insn_idx; if (!env->insn_aux_data[insn_idx].prune_point) @@ -9262,6 +9262,9 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, env->allow_ptr_leaks = is_priv; + if (is_priv) + env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; + ret = replace_map_fd_with_map_ptr(env); if (ret < 0) goto skip_full_check; -- cgit v1.2.3 From d5886b919a720ff859aebf569cb0f353b1d977a6 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Mon, 26 Aug 2019 16:30:04 +0800 Subject: sctp: allow users to set ep ecn flag by sockopt SCTP_ECN_SUPPORTED sockopt will be added to allow users to change ep ecn flag, and it's similar with other feature flags. Signed-off-by: Xin Long Signed-off-by: David S. Miller --- include/uapi/linux/sctp.h | 1 + net/sctp/socket.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index 62527aca8477..6d5b164af55c 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h @@ -136,6 +136,7 @@ typedef __s32 sctp_assoc_t; #define SCTP_EVENT 127 #define SCTP_ASCONF_SUPPORTED 128 #define SCTP_AUTH_SUPPORTED 129 +#define SCTP_ECN_SUPPORTED 130 /* PR-SCTP policies */ #define SCTP_PR_SCTP_NONE 0x0000 diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 82bc25223cfe..3e50a9712fb1 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4560,6 +4560,34 @@ out: return retval; } +static int sctp_setsockopt_ecn_supported(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + int retval = -EINVAL; + + if (optlen != sizeof(params)) + goto out; + + if (copy_from_user(¶ms, optval, optlen)) { + retval = -EFAULT; + goto out; + } + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) + goto out; + + sctp_sk(sk)->ep->ecn_enable = !!params.assoc_value; + retval = 0; + +out: + return retval; +} + /* API 6.2 setsockopt(), getsockopt() * * Applications use setsockopt() and getsockopt() to set or retrieve @@ -4766,6 +4794,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname, case SCTP_AUTH_SUPPORTED: retval = sctp_setsockopt_auth_supported(sk, optval, optlen); break; + case SCTP_ECN_SUPPORTED: + retval = sctp_setsockopt_ecn_supported(sk, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; @@ -7828,6 +7859,45 @@ out: return retval; } +static int sctp_getsockopt_ecn_supported(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + int retval = -EFAULT; + + if (len < sizeof(params)) { + retval = -EINVAL; + goto out; + } + + len = sizeof(params); + if (copy_from_user(¶ms, optval, len)) + goto out; + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id != SCTP_FUTURE_ASSOC && + sctp_style(sk, UDP)) { + retval = -EINVAL; + goto out; + } + + params.assoc_value = asoc ? asoc->peer.ecn_capable + : sctp_sk(sk)->ep->ecn_enable; + + if (put_user(len, optlen)) + goto out; + + if (copy_to_user(optval, ¶ms, len)) + goto out; + + retval = 0; + +out: + return retval; +} + static int sctp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { @@ -8037,6 +8107,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname, retval = sctp_getsockopt_auth_supported(sk, len, optval, optlen); break; + case SCTP_ECN_SUPPORTED: + retval = sctp_getsockopt_ecn_supported(sk, len, optval, optlen); + break; default: retval = -ENOPROTOOPT; break; -- cgit v1.2.3 From ab43762ef010967e4ccd53627f70a2eecbeafefb Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Tue, 6 Aug 2019 11:46:00 +0300 Subject: perf: Allow normal events to output AUX data In some cases, ordinary (non-AUX) events can generate data for AUX events. For example, PEBS events can come out as records in the Intel PT stream instead of their usual DS records, if configured to do so. One requirement for such events is to consistently schedule together, to ensure that the data from the "AUX output" events isn't lost while their corresponding AUX event is not scheduled. We use grouping to provide this guarantee: an "AUX output" event can be added to a group where an AUX event is a group leader, and provided that the former supports writing to the latter. Signed-off-by: Alexander Shishkin Signed-off-by: Peter Zijlstra (Intel) Cc: Ingo Molnar Cc: Arnaldo Carvalho de Melo Cc: kan.liang@linux.intel.com Link: https://lkml.kernel.org/r/20190806084606.4021-2-alexander.shishkin@linux.intel.com --- include/linux/perf_event.h | 14 +++++++ include/uapi/linux/perf_event.h | 3 +- kernel/events/core.c | 93 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 109 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e8ad3c590a23..61448c19a132 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -246,6 +246,7 @@ struct perf_event; #define PERF_PMU_CAP_ITRACE 0x20 #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 #define PERF_PMU_CAP_NO_EXCLUDE 0x80 +#define PERF_PMU_CAP_AUX_OUTPUT 0x100 /** * struct pmu - generic performance monitoring unit @@ -446,6 +447,16 @@ struct pmu { void (*addr_filters_sync) (struct perf_event *event); /* optional */ + /* + * Check if event can be used for aux_output purposes for + * events of this PMU. + * + * Runs from perf_event_open(). Should return 0 for "no match" + * or non-zero for "match". + */ + int (*aux_output_match) (struct perf_event *event); + /* optional */ + /* * Filter events for PMU-specific reasons. */ @@ -681,6 +692,9 @@ struct perf_event { struct perf_addr_filter_range *addr_filter_ranges; unsigned long addr_filters_gen; + /* for aux_output events */ + struct perf_event *aux_event; + void (*destroy)(struct perf_event *); struct rcu_head rcu_head; diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 7198ddd0c6b1..bb7b271397a6 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -374,7 +374,8 @@ struct perf_event_attr { namespaces : 1, /* include namespaces data */ ksymbol : 1, /* include ksymbol events */ bpf_event : 1, /* include bpf events */ - __reserved_1 : 33; + aux_output : 1, /* generate AUX records instead of events */ + __reserved_1 : 32; union { __u32 wakeup_events; /* wakeup every n events */ diff --git a/kernel/events/core.c b/kernel/events/core.c index 0463c1151bae..2aad959e6def 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1887,6 +1887,89 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) ctx->generation++; } +static int +perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) +{ + if (!has_aux(aux_event)) + return 0; + + if (!event->pmu->aux_output_match) + return 0; + + return event->pmu->aux_output_match(aux_event); +} + +static void put_event(struct perf_event *event); +static void event_sched_out(struct perf_event *event, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx); + +static void perf_put_aux_event(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); + struct perf_event *iter; + + /* + * If event uses aux_event tear down the link + */ + if (event->aux_event) { + iter = event->aux_event; + event->aux_event = NULL; + put_event(iter); + return; + } + + /* + * If the event is an aux_event, tear down all links to + * it from other events. + */ + for_each_sibling_event(iter, event->group_leader) { + if (iter->aux_event != event) + continue; + + iter->aux_event = NULL; + put_event(event); + + /* + * If it's ACTIVE, schedule it out and put it into ERROR + * state so that we don't try to schedule it again. Note + * that perf_event_enable() will clear the ERROR status. + */ + event_sched_out(iter, cpuctx, ctx); + perf_event_set_state(event, PERF_EVENT_STATE_ERROR); + } +} + +static int perf_get_aux_event(struct perf_event *event, + struct perf_event *group_leader) +{ + /* + * Our group leader must be an aux event if we want to be + * an aux_output. This way, the aux event will precede its + * aux_output events in the group, and therefore will always + * schedule first. + */ + if (!group_leader) + return 0; + + if (!perf_aux_output_match(event, group_leader)) + return 0; + + if (!atomic_long_inc_not_zero(&group_leader->refcount)) + return 0; + + /* + * Link aux_outputs to their aux event; this is undone in + * perf_group_detach() by perf_put_aux_event(). When the + * group in torn down, the aux_output events loose their + * link to the aux_event and can't schedule any more. + */ + event->aux_event = group_leader; + + return 1; +} + static void perf_group_detach(struct perf_event *event) { struct perf_event *sibling, *tmp; @@ -1902,6 +1985,8 @@ static void perf_group_detach(struct perf_event *event) event->attach_state &= ~PERF_ATTACH_GROUP; + perf_put_aux_event(event); + /* * If this is a sibling, remove it from its group. */ @@ -10426,6 +10511,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, goto err_ns; } + if (event->attr.aux_output && + !(pmu->capabilities & PERF_PMU_CAP_AUX_OUTPUT)) { + err = -EOPNOTSUPP; + goto err_pmu; + } + err = exclusive_event_init(event); if (err) goto err_pmu; @@ -11082,6 +11173,8 @@ SYSCALL_DEFINE5(perf_event_open, } } + if (event->attr.aux_output && !perf_get_aux_event(event, group_leader)) + goto err_locked; /* * Must be under the same ctx::mutex as perf_install_in_context(), -- cgit v1.2.3 From cca3854010c5aebf3a06be0de04f26c4f1ae5810 Mon Sep 17 00:00:00 2001 From: Thinh Nguyen Date: Mon, 19 Aug 2019 18:36:12 -0700 Subject: usb: gadget: composite: Set recommended BESL values Set the recommended BESL deep and baseline values based on the gadget's configuration parameters to the extended BOS descriptor. This feature helps to optimize power savings by maximizing the opportunity for longer L1 residency time. Signed-off-by: Thinh Nguyen Signed-off-by: Felipe Balbi --- drivers/usb/gadget/composite.c | 38 ++++++++++++++++++++++++++------------ include/uapi/linux/usb/ch9.h | 2 ++ 2 files changed, 28 insertions(+), 12 deletions(-) (limited to 'include/uapi') diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 9118b42c70b6..7ed009dc0f92 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -612,6 +612,7 @@ static int bos_desc(struct usb_composite_dev *cdev) struct usb_ext_cap_descriptor *usb_ext; struct usb_dcd_config_params dcd_config_params; struct usb_bos_descriptor *bos = cdev->req->buf; + unsigned int besl = 0; bos->bLength = USB_DT_BOS_SIZE; bos->bDescriptorType = USB_DT_BOS; @@ -619,6 +620,29 @@ static int bos_desc(struct usb_composite_dev *cdev) bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE); bos->bNumDeviceCaps = 0; + /* Get Controller configuration */ + if (cdev->gadget->ops->get_config_params) { + cdev->gadget->ops->get_config_params(cdev->gadget, + &dcd_config_params); + } else { + dcd_config_params.besl_baseline = + USB_DEFAULT_BESL_UNSPECIFIED; + dcd_config_params.besl_deep = + USB_DEFAULT_BESL_UNSPECIFIED; + dcd_config_params.bU1devExitLat = + USB_DEFAULT_U1_DEV_EXIT_LAT; + dcd_config_params.bU2DevExitLat = + cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT); + } + + if (dcd_config_params.besl_baseline != USB_DEFAULT_BESL_UNSPECIFIED) + besl = USB_BESL_BASELINE_VALID | + USB_SET_BESL_BASELINE(dcd_config_params.besl_baseline); + + if (dcd_config_params.besl_deep != USB_DEFAULT_BESL_UNSPECIFIED) + besl |= USB_BESL_DEEP_VALID | + USB_SET_BESL_DEEP(dcd_config_params.besl_deep); + /* * A SuperSpeed device shall include the USB2.0 extension descriptor * and shall support LPM when operating in USB2.0 HS mode. @@ -629,7 +653,8 @@ static int bos_desc(struct usb_composite_dev *cdev) usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE; usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY; usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT; - usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT); + usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | + USB_BESL_SUPPORT | besl); /* * The Superspeed USB Capability descriptor shall be implemented by all @@ -650,17 +675,6 @@ static int bos_desc(struct usb_composite_dev *cdev) USB_HIGH_SPEED_OPERATION | USB_5GBPS_OPERATION); ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION; - - /* Get Controller configuration */ - if (cdev->gadget->ops->get_config_params) { - cdev->gadget->ops->get_config_params(cdev->gadget, - &dcd_config_params); - } else { - dcd_config_params.bU1devExitLat = - USB_DEFAULT_U1_DEV_EXIT_LAT; - dcd_config_params.bU2DevExitLat = - cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT); - } ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat; ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat; } diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index d5a5caec8fbc..2b623f36af6b 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -894,6 +894,8 @@ struct usb_ext_cap_descriptor { /* Link Power Management */ #define USB_BESL_SUPPORT (1 << 2) /* supports BESL */ #define USB_BESL_BASELINE_VALID (1 << 3) /* Baseline BESL valid*/ #define USB_BESL_DEEP_VALID (1 << 4) /* Deep BESL valid */ +#define USB_SET_BESL_BASELINE(p) (((p) & 0xf) << 8) +#define USB_SET_BESL_DEEP(p) (((p) & 0xf) << 12) #define USB_GET_BESL_BASELINE(p) (((p) & (0xf << 8)) >> 8) #define USB_GET_BESL_DEEP(p) (((p) & (0xf << 12)) >> 12) } __attribute__((packed)); -- cgit v1.2.3 From c8cd6e7f159e6f8d79a23df4aeaa7a540415951b Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Wed, 28 Aug 2019 12:20:42 +0200 Subject: cfg80211: add local BSS receive time to survey information This is useful for checking how much airtime is being used up by other transmissions on the channel, e.g. by calculating (time_rx - time_bss_rx) or (time_busy - time_bss_rx - time_tx) Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20190828102042.58016-1-nbd@nbd.name Signed-off-by: Johannes Berg --- include/net/cfg80211.h | 4 ++++ include/uapi/linux/nl80211.h | 3 +++ net/wireless/nl80211.c | 4 ++++ 3 files changed, 11 insertions(+) (limited to 'include/uapi') diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 5253e7f667bd..ff45c3e1abff 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -768,6 +768,7 @@ ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef) * @SURVEY_INFO_TIME_RX: receive time was filled in * @SURVEY_INFO_TIME_TX: transmit time was filled in * @SURVEY_INFO_TIME_SCAN: scan time was filled in + * @SURVEY_INFO_TIME_BSS_RX: local BSS receive time was filled in * * Used by the driver to indicate which info in &struct survey_info * it has filled in during the get_survey(). @@ -781,6 +782,7 @@ enum survey_info_flags { SURVEY_INFO_TIME_RX = BIT(5), SURVEY_INFO_TIME_TX = BIT(6), SURVEY_INFO_TIME_SCAN = BIT(7), + SURVEY_INFO_TIME_BSS_RX = BIT(8), }; /** @@ -797,6 +799,7 @@ enum survey_info_flags { * @time_rx: amount of time the radio spent receiving data * @time_tx: amount of time the radio spent transmitting data * @time_scan: amount of time the radio spent for scanning + * @time_bss_rx: amount of time the radio spent receiving data on a local BSS * * Used by dump_survey() to report back per-channel survey information. * @@ -811,6 +814,7 @@ struct survey_info { u64 time_rx; u64 time_tx; u64 time_scan; + u64 time_bss_rx; u32 filled; s8 noise; }; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index bf7c4222f512..beee59c831a7 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -3870,6 +3870,8 @@ enum nl80211_user_reg_hint_type { * @NL80211_SURVEY_INFO_TIME_SCAN: time the radio spent for scan * (on this channel or globally) * @NL80211_SURVEY_INFO_PAD: attribute used for padding for 64-bit alignment + * @NL80211_SURVEY_INFO_TIME_BSS_RX: amount of time the radio spent + * receiving frames destined to the local BSS * @NL80211_SURVEY_INFO_MAX: highest survey info attribute number * currently defined * @__NL80211_SURVEY_INFO_AFTER_LAST: internal use @@ -3886,6 +3888,7 @@ enum nl80211_survey_info { NL80211_SURVEY_INFO_TIME_TX, NL80211_SURVEY_INFO_TIME_SCAN, NL80211_SURVEY_INFO_PAD, + NL80211_SURVEY_INFO_TIME_BSS_RX, /* keep last */ __NL80211_SURVEY_INFO_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 8a6cef949210..3e30e18d1d89 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -8806,6 +8806,10 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq, nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_SCAN, survey->time_scan, NL80211_SURVEY_INFO_PAD)) goto nla_put_failure; + if ((survey->filled & SURVEY_INFO_TIME_BSS_RX) && + nla_put_u64_64bit(msg, NL80211_SURVEY_INFO_TIME_BSS_RX, + survey->time_bss_rx, NL80211_SURVEY_INFO_PAD)) + goto nla_put_failure; nla_nest_end(msg, infoattr); -- cgit v1.2.3 From c05cd3645814724bdeb32a2b4d953b12bdea5f8c Mon Sep 17 00:00:00 2001 From: Kevin Laatz Date: Tue, 27 Aug 2019 02:25:22 +0000 Subject: xsk: add support to allow unaligned chunk placement Currently, addresses are chunk size aligned. This means, we are very restricted in terms of where we can place chunk within the umem. For example, if we have a chunk size of 2k, then our chunks can only be placed at 0,2k,4k,6k,8k... and so on (ie. every 2k starting from 0). This patch introduces the ability to use unaligned chunks. With these changes, we are no longer bound to having to place chunks at a 2k (or whatever your chunk size is) interval. Since we are no longer dealing with aligned chunks, they can now cross page boundaries. Checks for page contiguity have been added in order to keep track of which pages are followed by a physically contiguous page. Signed-off-by: Kevin Laatz Signed-off-by: Ciara Loftus Signed-off-by: Bruce Richardson Acked-by: Jonathan Lemon Signed-off-by: Daniel Borkmann --- include/net/xdp_sock.h | 75 ++++++++++++++++++++++++++++++++++-- include/uapi/linux/if_xdp.h | 9 +++++ net/xdp/xdp_umem.c | 19 ++++++--- net/xdp/xsk.c | 94 ++++++++++++++++++++++++++++++++++++--------- net/xdp/xsk_diag.c | 2 +- net/xdp/xsk_queue.h | 70 +++++++++++++++++++++++++++++---- 6 files changed, 233 insertions(+), 36 deletions(-) (limited to 'include/uapi') diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index f023b9940d64..c9398ce7960f 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -16,6 +16,13 @@ struct net_device; struct xsk_queue; +/* Masks for xdp_umem_page flags. + * The low 12-bits of the addr will be 0 since this is the page address, so we + * can use them for flags. + */ +#define XSK_NEXT_PG_CONTIG_SHIFT 0 +#define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT) + struct xdp_umem_page { void *addr; dma_addr_t dma; @@ -27,8 +34,12 @@ struct xdp_umem_fq_reuse { u64 handles[]; }; -/* Flags for the umem flags field. */ -#define XDP_UMEM_USES_NEED_WAKEUP (1 << 0) +/* Flags for the umem flags field. + * + * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public + * flags. See inlude/uapi/include/linux/if_xdp.h. + */ +#define XDP_UMEM_USES_NEED_WAKEUP (1 << 1) struct xdp_umem { struct xsk_queue *fq; @@ -124,14 +135,36 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, int xsk_map_inc(struct xsk_map *map); void xsk_map_put(struct xsk_map *map); +static inline u64 xsk_umem_extract_addr(u64 addr) +{ + return addr & XSK_UNALIGNED_BUF_ADDR_MASK; +} + +static inline u64 xsk_umem_extract_offset(u64 addr) +{ + return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; +} + +static inline u64 xsk_umem_add_offset_to_addr(u64 addr) +{ + return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr); +} + static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) { - return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1)); + unsigned long page_addr; + + addr = xsk_umem_add_offset_to_addr(addr); + page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr; + + return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK); } static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) { - return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); + addr = xsk_umem_add_offset_to_addr(addr); + + return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK); } /* Reuse-queue aware version of FILL queue helpers */ @@ -172,6 +205,19 @@ static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) rq->handles[rq->length++] = addr; } + +/* Handle the offset appropriately depending on aligned or unaligned mode. + * For unaligned mode, we store the offset in the upper 16-bits of the address. + * For aligned mode, we simply add the offset to the address. + */ +static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address, + u64 offset) +{ + if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) + return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); + else + return address + offset; +} #else static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) { @@ -241,6 +287,21 @@ static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, return NULL; } +static inline u64 xsk_umem_extract_addr(u64 addr) +{ + return 0; +} + +static inline u64 xsk_umem_extract_offset(u64 addr) +{ + return 0; +} + +static inline u64 xsk_umem_add_offset_to_addr(u64 addr) +{ + return 0; +} + static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) { return NULL; @@ -290,6 +351,12 @@ static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) return false; } +static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle, + u64 offset) +{ + return 0; +} + #endif /* CONFIG_XDP_SOCKETS */ #endif /* _LINUX_XDP_SOCK_H */ diff --git a/include/uapi/linux/if_xdp.h b/include/uapi/linux/if_xdp.h index 62b80d57b72a..be328c59389d 100644 --- a/include/uapi/linux/if_xdp.h +++ b/include/uapi/linux/if_xdp.h @@ -26,6 +26,9 @@ */ #define XDP_USE_NEED_WAKEUP (1 << 3) +/* Flags for xsk_umem_config flags */ +#define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0) + struct sockaddr_xdp { __u16 sxdp_family; __u16 sxdp_flags; @@ -66,6 +69,7 @@ struct xdp_umem_reg { __u64 len; /* Length of packet data area */ __u32 chunk_size; __u32 headroom; + __u32 flags; }; struct xdp_statistics { @@ -87,6 +91,11 @@ struct xdp_options { #define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL #define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL +/* Masks for unaligned chunks mode */ +#define XSK_UNALIGNED_BUF_OFFSET_SHIFT 48 +#define XSK_UNALIGNED_BUF_ADDR_MASK \ + ((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1) + /* Rx/Tx descriptor */ struct xdp_desc { __u64 addr; diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 2d65779282a1..e997b263a0dd 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -340,6 +340,7 @@ static int xdp_umem_account_pages(struct xdp_umem *umem) static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) { + bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; u32 chunk_size = mr->chunk_size, headroom = mr->headroom; unsigned int chunks, chunks_per_page; u64 addr = mr->addr, size = mr->len; @@ -355,7 +356,11 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) return -EINVAL; } - if (!is_power_of_2(chunk_size)) + if (mr->flags & ~(XDP_UMEM_UNALIGNED_CHUNK_FLAG | + XDP_UMEM_USES_NEED_WAKEUP)) + return -EINVAL; + + if (!unaligned_chunks && !is_power_of_2(chunk_size)) return -EINVAL; if (!PAGE_ALIGNED(addr)) { @@ -372,9 +377,11 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) if (chunks == 0) return -EINVAL; - chunks_per_page = PAGE_SIZE / chunk_size; - if (chunks < chunks_per_page || chunks % chunks_per_page) - return -EINVAL; + if (!unaligned_chunks) { + chunks_per_page = PAGE_SIZE / chunk_size; + if (chunks < chunks_per_page || chunks % chunks_per_page) + return -EINVAL; + } headroom = ALIGN(headroom, 64); @@ -383,13 +390,15 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) return -EINVAL; umem->address = (unsigned long)addr; - umem->chunk_mask = ~((u64)chunk_size - 1); + umem->chunk_mask = unaligned_chunks ? XSK_UNALIGNED_BUF_ADDR_MASK + : ~((u64)chunk_size - 1); umem->size = size; umem->headroom = headroom; umem->chunk_size_nohr = chunk_size - headroom; umem->npgs = size / PAGE_SIZE; umem->pgs = NULL; umem->user = NULL; + umem->flags = mr->flags; INIT_LIST_HEAD(&umem->xsk_list); spin_lock_init(&umem->xsk_list_lock); diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index ee4428a892fa..187fd157fcff 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -45,7 +45,7 @@ EXPORT_SYMBOL(xsk_umem_has_addrs); u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) { - return xskq_peek_addr(umem->fq, addr); + return xskq_peek_addr(umem->fq, addr, umem); } EXPORT_SYMBOL(xsk_umem_peek_addr); @@ -115,21 +115,43 @@ bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) } EXPORT_SYMBOL(xsk_umem_uses_need_wakeup); +/* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for + * each page. This is only required in copy mode. + */ +static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf, + u32 len, u32 metalen) +{ + void *to_buf = xdp_umem_get_data(umem, addr); + + addr = xsk_umem_add_offset_to_addr(addr); + if (xskq_crosses_non_contig_pg(umem, addr, len + metalen)) { + void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr; + u64 page_start = addr & ~(PAGE_SIZE - 1); + u64 first_len = PAGE_SIZE - (addr - page_start); + + memcpy(to_buf, from_buf, first_len + metalen); + memcpy(next_pg_addr, from_buf + first_len, len - first_len); + + return; + } + + memcpy(to_buf, from_buf, len + metalen); +} + static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) { - void *to_buf, *from_buf; + u64 offset = xs->umem->headroom; + u64 addr, memcpy_addr; + void *from_buf; u32 metalen; - u64 addr; int err; - if (!xskq_peek_addr(xs->umem->fq, &addr) || + if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) || len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) { xs->rx_dropped++; return -ENOSPC; } - addr += xs->umem->headroom; - if (unlikely(xdp_data_meta_unsupported(xdp))) { from_buf = xdp->data; metalen = 0; @@ -138,9 +160,11 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) metalen = xdp->data - xdp->data_meta; } - to_buf = xdp_umem_get_data(xs->umem, addr); - memcpy(to_buf, from_buf, len + metalen); - addr += metalen; + memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset); + __xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen); + + offset += metalen; + addr = xsk_umem_adjust_offset(xs->umem, addr, offset); err = xskq_produce_batch_desc(xs->rx, addr, len); if (!err) { xskq_discard_addr(xs->umem->fq); @@ -185,6 +209,7 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) { u32 metalen = xdp->data - xdp->data_meta; u32 len = xdp->data_end - xdp->data; + u64 offset = xs->umem->headroom; void *buffer; u64 addr; int err; @@ -196,17 +221,17 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) goto out_unlock; } - if (!xskq_peek_addr(xs->umem->fq, &addr) || + if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) || len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) { err = -ENOSPC; goto out_drop; } - addr += xs->umem->headroom; - + addr = xsk_umem_adjust_offset(xs->umem, addr, offset); buffer = xdp_umem_get_data(xs->umem, addr); memcpy(buffer, xdp->data_meta, len + metalen); - addr += metalen; + + addr = xsk_umem_adjust_offset(xs->umem, addr, metalen); err = xskq_produce_batch_desc(xs->rx, addr, len); if (err) goto out_drop; @@ -250,7 +275,7 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc) rcu_read_lock(); list_for_each_entry_rcu(xs, &umem->xsk_list, list) { - if (!xskq_peek_desc(xs->tx, desc)) + if (!xskq_peek_desc(xs->tx, desc, umem)) continue; if (xskq_produce_addr_lazy(umem->cq, desc->addr)) @@ -304,7 +329,7 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m, if (xs->queue_id >= xs->dev->real_num_tx_queues) goto out; - while (xskq_peek_desc(xs->tx, &desc)) { + while (xskq_peek_desc(xs->tx, &desc, xs->umem)) { char *buffer; u64 addr; u32 len; @@ -333,7 +358,7 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m, skb->dev = xs->dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; - skb_shinfo(skb)->destructor_arg = (void *)(long)addr; + skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr; skb->destructor = xsk_destruct_skb; err = dev_direct_xmit(skb, xs->queue_id); @@ -526,6 +551,24 @@ static struct socket *xsk_lookup_xsk_from_fd(int fd) return sock; } +/* Check if umem pages are contiguous. + * If zero-copy mode, use the DMA address to do the page contiguity check + * For all other modes we use addr (kernel virtual address) + * Store the result in the low bits of addr. + */ +static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags) +{ + struct xdp_umem_page *pgs = umem->pages; + int i, is_contig; + + for (i = 0; i < umem->npgs - 1; i++) { + is_contig = (flags & XDP_ZEROCOPY) ? + (pgs[i].dma + PAGE_SIZE == pgs[i + 1].dma) : + (pgs[i].addr + PAGE_SIZE == pgs[i + 1].addr); + pgs[i].addr += is_contig << XSK_NEXT_PG_CONTIG_SHIFT; + } +} + static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; @@ -616,6 +659,8 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) err = xdp_umem_assign_dev(xs->umem, dev, qid, flags); if (err) goto out_unlock; + + xsk_check_page_contiguity(xs->umem, flags); } xs->dev = dev; @@ -636,6 +681,13 @@ out_release: return err; } +struct xdp_umem_reg_v1 { + __u64 addr; /* Start of packet data area */ + __u64 len; /* Length of packet data area */ + __u32 chunk_size; + __u32 headroom; +}; + static int xsk_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { @@ -673,10 +725,16 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname, } case XDP_UMEM_REG: { - struct xdp_umem_reg mr; + size_t mr_size = sizeof(struct xdp_umem_reg); + struct xdp_umem_reg mr = {}; struct xdp_umem *umem; - if (copy_from_user(&mr, optval, sizeof(mr))) + if (optlen < sizeof(struct xdp_umem_reg_v1)) + return -EINVAL; + else if (optlen < sizeof(mr)) + mr_size = sizeof(struct xdp_umem_reg_v1); + + if (copy_from_user(&mr, optval, mr_size)) return -EFAULT; mutex_lock(&xs->mutex); diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c index d5e06c8e0cbf..9986a759fe06 100644 --- a/net/xdp/xsk_diag.c +++ b/net/xdp/xsk_diag.c @@ -56,7 +56,7 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb) du.id = umem->id; du.size = umem->size; du.num_pages = umem->npgs; - du.chunk_size = (__u32)(~umem->chunk_mask + 1); + du.chunk_size = umem->chunk_size_nohr + umem->headroom; du.headroom = umem->headroom; du.ifindex = umem->dev ? umem->dev->ifindex : 0; du.queue_id = umem->queue_id; diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index dd9e985c2461..eddae4688862 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -134,6 +134,17 @@ static inline bool xskq_has_addrs(struct xsk_queue *q, u32 cnt) /* UMEM queue */ +static inline bool xskq_crosses_non_contig_pg(struct xdp_umem *umem, u64 addr, + u64 length) +{ + bool cross_pg = (addr & (PAGE_SIZE - 1)) + length > PAGE_SIZE; + bool next_pg_contig = + (unsigned long)umem->pages[(addr >> PAGE_SHIFT)].addr & + XSK_NEXT_PG_CONTIG_MASK; + + return cross_pg && !next_pg_contig; +} + static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr) { if (addr >= q->size) { @@ -144,23 +155,51 @@ static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr) return true; } -static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr) +static inline bool xskq_is_valid_addr_unaligned(struct xsk_queue *q, u64 addr, + u64 length, + struct xdp_umem *umem) +{ + u64 base_addr = xsk_umem_extract_addr(addr); + + addr = xsk_umem_add_offset_to_addr(addr); + if (base_addr >= q->size || addr >= q->size || + xskq_crosses_non_contig_pg(umem, addr, length)) { + q->invalid_descs++; + return false; + } + + return true; +} + +static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr, + struct xdp_umem *umem) { while (q->cons_tail != q->cons_head) { struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; unsigned int idx = q->cons_tail & q->ring_mask; *addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask; + + if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) { + if (xskq_is_valid_addr_unaligned(q, *addr, + umem->chunk_size_nohr, + umem)) + return addr; + goto out; + } + if (xskq_is_valid_addr(q, *addr)) return addr; +out: q->cons_tail++; } return NULL; } -static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr) +static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr, + struct xdp_umem *umem) { if (q->cons_tail == q->cons_head) { smp_mb(); /* D, matches A */ @@ -171,7 +210,7 @@ static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr) smp_rmb(); } - return xskq_validate_addr(q, addr); + return xskq_validate_addr(q, addr, umem); } static inline void xskq_discard_addr(struct xsk_queue *q) @@ -230,8 +269,21 @@ static inline int xskq_reserve_addr(struct xsk_queue *q) /* Rx/Tx queue */ -static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d) +static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d, + struct xdp_umem *umem) { + if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) { + if (!xskq_is_valid_addr_unaligned(q, d->addr, d->len, umem)) + return false; + + if (d->len > umem->chunk_size_nohr || d->options) { + q->invalid_descs++; + return false; + } + + return true; + } + if (!xskq_is_valid_addr(q, d->addr)) return false; @@ -245,14 +297,15 @@ static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d) } static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q, - struct xdp_desc *desc) + struct xdp_desc *desc, + struct xdp_umem *umem) { while (q->cons_tail != q->cons_head) { struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; unsigned int idx = q->cons_tail & q->ring_mask; *desc = READ_ONCE(ring->desc[idx]); - if (xskq_is_valid_desc(q, desc)) + if (xskq_is_valid_desc(q, desc, umem)) return desc; q->cons_tail++; @@ -262,7 +315,8 @@ static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q, } static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q, - struct xdp_desc *desc) + struct xdp_desc *desc, + struct xdp_umem *umem) { if (q->cons_tail == q->cons_head) { smp_mb(); /* D, matches A */ @@ -273,7 +327,7 @@ static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q, smp_rmb(); /* C, matches B */ } - return xskq_validate_desc(q, desc); + return xskq_validate_desc(q, desc, umem); } static inline void xskq_discard_desc(struct xsk_queue *q) -- cgit v1.2.3 From 61723b393292f1e4ea27f8d123384d50b176c29d Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Fri, 30 Aug 2019 12:25:48 +0200 Subject: tcp: ulp: add functions to dump ulp-specific information currently, only getsockopt(TCP_ULP) can be invoked to know if a ULP is on top of a TCP socket. Extend idiag_get_aux() and idiag_get_aux_size(), introduced by commit b37e88407c1d ("inet_diag: allow protocols to provide additional data"), to report the ULP name and other information that can be made available by the ULP through optional functions. Users having CAP_NET_ADMIN privileges will then be able to retrieve this information through inet_diag_handler, if they specify INET_DIAG_INFO in the request. Signed-off-by: Davide Caratti Signed-off-by: David S. Miller --- include/net/tcp.h | 3 +++ include/uapi/linux/inet_diag.h | 8 +++++++ net/ipv4/tcp_diag.c | 52 +++++++++++++++++++++++++++++++++++++++++- 3 files changed, 62 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/net/tcp.h b/include/net/tcp.h index 77fe87f7a992..c9a3f9688223 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2122,6 +2122,9 @@ struct tcp_ulp_ops { void (*update)(struct sock *sk, struct proto *p); /* cleanup ulp */ void (*release)(struct sock *sk); + /* diagnostic */ + int (*get_info)(const struct sock *sk, struct sk_buff *skb); + size_t (*get_info_size)(const struct sock *sk); char name[TCP_ULP_NAME_MAX]; struct module *owner; diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index e8baca85bac6..e2c6273274f3 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -153,11 +153,19 @@ enum { INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */ INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */ INET_DIAG_MD5SIG, + INET_DIAG_ULP_INFO, __INET_DIAG_MAX, }; #define INET_DIAG_MAX (__INET_DIAG_MAX - 1) +enum { + INET_ULP_INFO_UNSPEC, + INET_ULP_INFO_NAME, + __INET_ULP_INFO_MAX, +}; +#define INET_ULP_INFO_MAX (__INET_ULP_INFO_MAX - 1) + /* INET_DIAG_MEM */ struct inet_diag_meminfo { diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index a3a386236d93..babc156deabb 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c @@ -81,13 +81,42 @@ static int tcp_diag_put_md5sig(struct sk_buff *skb, } #endif +static int tcp_diag_put_ulp(struct sk_buff *skb, struct sock *sk, + const struct tcp_ulp_ops *ulp_ops) +{ + struct nlattr *nest; + int err; + + nest = nla_nest_start_noflag(skb, INET_DIAG_ULP_INFO); + if (!nest) + return -EMSGSIZE; + + err = nla_put_string(skb, INET_ULP_INFO_NAME, ulp_ops->name); + if (err) + goto nla_failure; + + if (ulp_ops->get_info) + err = ulp_ops->get_info(sk, skb); + if (err) + goto nla_failure; + + nla_nest_end(skb, nest); + return 0; + +nla_failure: + nla_nest_cancel(skb, nest); + return err; +} + static int tcp_diag_get_aux(struct sock *sk, bool net_admin, struct sk_buff *skb) { + struct inet_connection_sock *icsk = inet_csk(sk); + int err = 0; + #ifdef CONFIG_TCP_MD5SIG if (net_admin) { struct tcp_md5sig_info *md5sig; - int err = 0; rcu_read_lock(); md5sig = rcu_dereference(tcp_sk(sk)->md5sig_info); @@ -99,11 +128,21 @@ static int tcp_diag_get_aux(struct sock *sk, bool net_admin, } #endif + if (net_admin) { + const struct tcp_ulp_ops *ulp_ops; + + ulp_ops = icsk->icsk_ulp_ops; + if (ulp_ops) + err = tcp_diag_put_ulp(skb, sk, ulp_ops); + if (err) + return err; + } return 0; } static size_t tcp_diag_get_aux_size(struct sock *sk, bool net_admin) { + struct inet_connection_sock *icsk = inet_csk(sk); size_t size = 0; #ifdef CONFIG_TCP_MD5SIG @@ -124,6 +163,17 @@ static size_t tcp_diag_get_aux_size(struct sock *sk, bool net_admin) } #endif + if (net_admin) { + const struct tcp_ulp_ops *ulp_ops; + + ulp_ops = icsk->icsk_ulp_ops; + if (ulp_ops) { + size += nla_total_size(0) + + nla_total_size(TCP_ULP_NAME_MAX); + if (ulp_ops->get_info_size) + size += ulp_ops->get_info_size(sk); + } + } return size; } -- cgit v1.2.3 From 26811cc9f55acf835f7fdadc5ff2bbd6f06bc3ac Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Fri, 30 Aug 2019 12:25:49 +0200 Subject: net: tls: export protocol version, cipher, tx_conf/rx_conf to socket diag When an application configures kernel TLS on top of a TCP socket, it's now possible for inet_diag_handler() to collect information regarding the protocol version, the cipher type and TX / RX configuration, in case INET_DIAG_INFO is requested. Signed-off-by: Davide Caratti Reviewed-by: Jakub Kicinski Signed-off-by: David S. Miller --- include/net/tls.h | 17 +++++++++++ include/uapi/linux/inet_diag.h | 1 + include/uapi/linux/tls.h | 15 ++++++++++ net/tls/tls_main.c | 64 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 97 insertions(+) (limited to 'include/uapi') diff --git a/include/net/tls.h b/include/net/tls.h index 4997742475cd..ec3c3ed2c6c3 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -431,6 +431,23 @@ static inline bool is_tx_ready(struct tls_sw_context_tx *ctx) return READ_ONCE(rec->tx_ready); } +static inline u16 tls_user_config(struct tls_context *ctx, bool tx) +{ + u16 config = tx ? ctx->tx_conf : ctx->rx_conf; + + switch (config) { + case TLS_BASE: + return TLS_CONF_BASE; + case TLS_SW: + return TLS_CONF_SW; + case TLS_HW: + return TLS_CONF_HW; + case TLS_HW_RECORD: + return TLS_CONF_HW_RECORD; + } + return 0; +} + struct sk_buff * tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, struct sk_buff *skb); diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index e2c6273274f3..a1ff345b3f33 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h @@ -162,6 +162,7 @@ enum { enum { INET_ULP_INFO_UNSPEC, INET_ULP_INFO_NAME, + INET_ULP_INFO_TLS, __INET_ULP_INFO_MAX, }; #define INET_ULP_INFO_MAX (__INET_ULP_INFO_MAX - 1) diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index 5b9c26753e46..bcd2869ed472 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -109,4 +109,19 @@ struct tls12_crypto_info_aes_ccm_128 { unsigned char rec_seq[TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE]; }; +enum { + TLS_INFO_UNSPEC, + TLS_INFO_VERSION, + TLS_INFO_CIPHER, + TLS_INFO_TXCONF, + TLS_INFO_RXCONF, + __TLS_INFO_MAX, +}; +#define TLS_INFO_MAX (__TLS_INFO_MAX - 1) + +#define TLS_CONF_BASE 1 +#define TLS_CONF_SW 2 +#define TLS_CONF_HW 3 +#define TLS_CONF_HW_RECORD 4 + #endif /* _UAPI_LINUX_TLS_H */ diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index f8f2d2c3d627..277f7c209fed 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -39,6 +39,7 @@ #include #include #include +#include #include @@ -835,6 +836,67 @@ static void tls_update(struct sock *sk, struct proto *p) } } +static int tls_get_info(const struct sock *sk, struct sk_buff *skb) +{ + u16 version, cipher_type; + struct tls_context *ctx; + struct nlattr *start; + int err; + + start = nla_nest_start_noflag(skb, INET_ULP_INFO_TLS); + if (!start) + return -EMSGSIZE; + + rcu_read_lock(); + ctx = rcu_dereference(inet_csk(sk)->icsk_ulp_data); + if (!ctx) { + err = 0; + goto nla_failure; + } + version = ctx->prot_info.version; + if (version) { + err = nla_put_u16(skb, TLS_INFO_VERSION, version); + if (err) + goto nla_failure; + } + cipher_type = ctx->prot_info.cipher_type; + if (cipher_type) { + err = nla_put_u16(skb, TLS_INFO_CIPHER, cipher_type); + if (err) + goto nla_failure; + } + err = nla_put_u16(skb, TLS_INFO_TXCONF, tls_user_config(ctx, true)); + if (err) + goto nla_failure; + + err = nla_put_u16(skb, TLS_INFO_RXCONF, tls_user_config(ctx, false)); + if (err) + goto nla_failure; + + rcu_read_unlock(); + nla_nest_end(skb, start); + return 0; + +nla_failure: + rcu_read_unlock(); + nla_nest_cancel(skb, start); + return err; +} + +static size_t tls_get_info_size(const struct sock *sk) +{ + size_t size = 0; + + size += nla_total_size(0) + /* INET_ULP_INFO_TLS */ + nla_total_size(sizeof(u16)) + /* TLS_INFO_VERSION */ + nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */ + nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */ + nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */ + 0; + + return size; +} + void tls_register_device(struct tls_device *device) { spin_lock_bh(&device_spinlock); @@ -856,6 +918,8 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { .owner = THIS_MODULE, .init = tls_init, .update = tls_update, + .get_info = tls_get_info, + .get_info_size = tls_get_info_size, }; static int __init tls_register(void) -- cgit v1.2.3 From bd0d9d159988d1ebb97ea244ae4dfa8365ed7d85 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 9 Aug 2019 22:27:30 +0200 Subject: serial: remove ks8695 driver The platform is getting removed, so there are no more users of this driver. Signed-off-by: Arnd Bergmann Link: https://lore.kernel.org/r/20190809202749.742267-3-arnd@arndb.de Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/Kconfig | 17 - drivers/tty/serial/Makefile | 1 - drivers/tty/serial/serial_ks8695.c | 698 ------------------------------------- include/uapi/linux/serial_core.h | 3 - 4 files changed, 719 deletions(-) delete mode 100644 drivers/tty/serial/serial_ks8695.c (limited to 'include/uapi') diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 3083dbae35f7..7041107ea78d 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -197,23 +197,6 @@ config SERIAL_KGDB_NMI If unsure, say N. -config SERIAL_KS8695 - bool "Micrel KS8695 (Centaur) serial port support" - depends on ARCH_KS8695 - select SERIAL_CORE - help - This selects the Micrel Centaur KS8695 UART. Say Y here. - -config SERIAL_KS8695_CONSOLE - bool "Support for console on KS8695 (Centaur) serial port" - depends on SERIAL_KS8695=y - select SERIAL_CORE_CONSOLE - help - Say Y here if you wish to use a KS8695 (Centaur) UART as the - system console (the system console is the device which - receives all kernel messages and warnings and which allows - logins in single user mode). - config SERIAL_MESON tristate "Meson serial port support" depends on ARCH_MESON diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index 15a0fccadf7e..7f744136489e 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -59,7 +59,6 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o obj-$(CONFIG_SERIAL_MSM) += msm_serial.o obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o -obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o obj-$(CONFIG_SERIAL_ST_ASC) += st-asc.o diff --git a/drivers/tty/serial/serial_ks8695.c b/drivers/tty/serial/serial_ks8695.c deleted file mode 100644 index b461d791188c..000000000000 --- a/drivers/tty/serial/serial_ks8695.c +++ /dev/null @@ -1,698 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0+ -/* - * Driver for KS8695 serial ports - * - * Based on drivers/serial/serial_amba.c, by Kam Lee. - * - * Copyright 2002-2005 Micrel Inc. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include - -#if defined(CONFIG_SERIAL_KS8695_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) -#define SUPPORT_SYSRQ -#endif - -#include - - -#define SERIAL_KS8695_MAJOR 204 -#define SERIAL_KS8695_MINOR 16 -#define SERIAL_KS8695_DEVNAME "ttyAM" - -#define SERIAL_KS8695_NR 1 - -/* - * Access macros for the KS8695 UART - */ -#define UART_GET_CHAR(p) (__raw_readl((p)->membase + KS8695_URRB) & 0xFF) -#define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + KS8695_URTH) -#define UART_GET_FCR(p) __raw_readl((p)->membase + KS8695_URFC) -#define UART_PUT_FCR(p, c) __raw_writel((c), (p)->membase + KS8695_URFC) -#define UART_GET_MSR(p) __raw_readl((p)->membase + KS8695_URMS) -#define UART_GET_LSR(p) __raw_readl((p)->membase + KS8695_URLS) -#define UART_GET_LCR(p) __raw_readl((p)->membase + KS8695_URLC) -#define UART_PUT_LCR(p, c) __raw_writel((c), (p)->membase + KS8695_URLC) -#define UART_GET_MCR(p) __raw_readl((p)->membase + KS8695_URMC) -#define UART_PUT_MCR(p, c) __raw_writel((c), (p)->membase + KS8695_URMC) -#define UART_GET_BRDR(p) __raw_readl((p)->membase + KS8695_URBD) -#define UART_PUT_BRDR(p, c) __raw_writel((c), (p)->membase + KS8695_URBD) - -#define KS8695_CLR_TX_INT() __raw_writel(1 << KS8695_IRQ_UART_TX, KS8695_IRQ_VA + KS8695_INTST) - -#define UART_DUMMY_LSR_RX 0x100 -#define UART_PORT_SIZE (KS8695_USR - KS8695_URRB + 4) - -static inline int tx_enabled(struct uart_port *port) -{ - return port->unused[0] & 1; -} - -static inline int rx_enabled(struct uart_port *port) -{ - return port->unused[0] & 2; -} - -static inline int ms_enabled(struct uart_port *port) -{ - return port->unused[0] & 4; -} - -static inline void ms_enable(struct uart_port *port, int enabled) -{ - if(enabled) - port->unused[0] |= 4; - else - port->unused[0] &= ~4; -} - -static inline void rx_enable(struct uart_port *port, int enabled) -{ - if(enabled) - port->unused[0] |= 2; - else - port->unused[0] &= ~2; -} - -static inline void tx_enable(struct uart_port *port, int enabled) -{ - if(enabled) - port->unused[0] |= 1; - else - port->unused[0] &= ~1; -} - - -#ifdef SUPPORT_SYSRQ -static struct console ks8695_console; -#endif - -static void ks8695uart_stop_tx(struct uart_port *port) -{ - if (tx_enabled(port)) { - /* use disable_irq_nosync() and not disable_irq() to avoid self - * imposed deadlock by not waiting for irq handler to end, - * since this ks8695uart_stop_tx() is called from interrupt context. - */ - disable_irq_nosync(KS8695_IRQ_UART_TX); - tx_enable(port, 0); - } -} - -static void ks8695uart_start_tx(struct uart_port *port) -{ - if (!tx_enabled(port)) { - enable_irq(KS8695_IRQ_UART_TX); - tx_enable(port, 1); - } -} - -static void ks8695uart_stop_rx(struct uart_port *port) -{ - if (rx_enabled(port)) { - disable_irq(KS8695_IRQ_UART_RX); - rx_enable(port, 0); - } -} - -static void ks8695uart_enable_ms(struct uart_port *port) -{ - if (!ms_enabled(port)) { - enable_irq(KS8695_IRQ_UART_MODEM_STATUS); - ms_enable(port,1); - } -} - -static void ks8695uart_disable_ms(struct uart_port *port) -{ - if (ms_enabled(port)) { - disable_irq(KS8695_IRQ_UART_MODEM_STATUS); - ms_enable(port,0); - } -} - -static irqreturn_t ks8695uart_rx_chars(int irq, void *dev_id) -{ - struct uart_port *port = dev_id; - unsigned int status, ch, lsr, flg, max_count = 256; - - status = UART_GET_LSR(port); /* clears pending LSR interrupts */ - while ((status & URLS_URDR) && max_count--) { - ch = UART_GET_CHAR(port); - flg = TTY_NORMAL; - - port->icount.rx++; - - /* - * Note that the error handling code is - * out of the main execution path - */ - lsr = UART_GET_LSR(port) | UART_DUMMY_LSR_RX; - if (unlikely(lsr & (URLS_URBI | URLS_URPE | URLS_URFE | URLS_URROE))) { - if (lsr & URLS_URBI) { - lsr &= ~(URLS_URFE | URLS_URPE); - port->icount.brk++; - if (uart_handle_break(port)) - goto ignore_char; - } - if (lsr & URLS_URPE) - port->icount.parity++; - if (lsr & URLS_URFE) - port->icount.frame++; - if (lsr & URLS_URROE) - port->icount.overrun++; - - lsr &= port->read_status_mask; - - if (lsr & URLS_URBI) - flg = TTY_BREAK; - else if (lsr & URLS_URPE) - flg = TTY_PARITY; - else if (lsr & URLS_URFE) - flg = TTY_FRAME; - } - - if (uart_handle_sysrq_char(port, ch)) - goto ignore_char; - - uart_insert_char(port, lsr, URLS_URROE, ch, flg); - -ignore_char: - status = UART_GET_LSR(port); - } - tty_flip_buffer_push(&port->state->port); - - return IRQ_HANDLED; -} - - -static irqreturn_t ks8695uart_tx_chars(int irq, void *dev_id) -{ - struct uart_port *port = dev_id; - struct circ_buf *xmit = &port->state->xmit; - unsigned int count; - - if (port->x_char) { - KS8695_CLR_TX_INT(); - UART_PUT_CHAR(port, port->x_char); - port->icount.tx++; - port->x_char = 0; - return IRQ_HANDLED; - } - - if (uart_tx_stopped(port) || uart_circ_empty(xmit)) { - ks8695uart_stop_tx(port); - return IRQ_HANDLED; - } - - count = 16; /* fifo size */ - while (!uart_circ_empty(xmit) && (count-- > 0)) { - KS8695_CLR_TX_INT(); - UART_PUT_CHAR(port, xmit->buf[xmit->tail]); - - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - port->icount.tx++; - } - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) - uart_write_wakeup(port); - - if (uart_circ_empty(xmit)) - ks8695uart_stop_tx(port); - - return IRQ_HANDLED; -} - -static irqreturn_t ks8695uart_modem_status(int irq, void *dev_id) -{ - struct uart_port *port = dev_id; - unsigned int status; - - /* - * clear modem interrupt by reading MSR - */ - status = UART_GET_MSR(port); - - if (status & URMS_URDDCD) - uart_handle_dcd_change(port, status & URMS_URDDCD); - - if (status & URMS_URDDST) - port->icount.dsr++; - - if (status & URMS_URDCTS) - uart_handle_cts_change(port, status & URMS_URDCTS); - - if (status & URMS_URTERI) - port->icount.rng++; - - wake_up_interruptible(&port->state->port.delta_msr_wait); - - return IRQ_HANDLED; -} - -static unsigned int ks8695uart_tx_empty(struct uart_port *port) -{ - return (UART_GET_LSR(port) & URLS_URTE) ? TIOCSER_TEMT : 0; -} - -static unsigned int ks8695uart_get_mctrl(struct uart_port *port) -{ - unsigned int result = 0; - unsigned int status; - - status = UART_GET_MSR(port); - if (status & URMS_URDCD) - result |= TIOCM_CAR; - if (status & URMS_URDSR) - result |= TIOCM_DSR; - if (status & URMS_URCTS) - result |= TIOCM_CTS; - if (status & URMS_URRI) - result |= TIOCM_RI; - - return result; -} - -static void ks8695uart_set_mctrl(struct uart_port *port, u_int mctrl) -{ - unsigned int mcr; - - mcr = UART_GET_MCR(port); - if (mctrl & TIOCM_RTS) - mcr |= URMC_URRTS; - else - mcr &= ~URMC_URRTS; - - if (mctrl & TIOCM_DTR) - mcr |= URMC_URDTR; - else - mcr &= ~URMC_URDTR; - - UART_PUT_MCR(port, mcr); -} - -static void ks8695uart_break_ctl(struct uart_port *port, int break_state) -{ - unsigned int lcr; - - lcr = UART_GET_LCR(port); - - if (break_state == -1) - lcr |= URLC_URSBC; - else - lcr &= ~URLC_URSBC; - - UART_PUT_LCR(port, lcr); -} - -static int ks8695uart_startup(struct uart_port *port) -{ - int retval; - - irq_modify_status(KS8695_IRQ_UART_TX, IRQ_NOREQUEST, IRQ_NOAUTOEN); - tx_enable(port, 0); - rx_enable(port, 1); - ms_enable(port, 1); - - /* - * Allocate the IRQ - */ - retval = request_irq(KS8695_IRQ_UART_TX, ks8695uart_tx_chars, 0, "UART TX", port); - if (retval) - goto err_tx; - - retval = request_irq(KS8695_IRQ_UART_RX, ks8695uart_rx_chars, 0, "UART RX", port); - if (retval) - goto err_rx; - - retval = request_irq(KS8695_IRQ_UART_LINE_STATUS, ks8695uart_rx_chars, 0, "UART LineStatus", port); - if (retval) - goto err_ls; - - retval = request_irq(KS8695_IRQ_UART_MODEM_STATUS, ks8695uart_modem_status, 0, "UART ModemStatus", port); - if (retval) - goto err_ms; - - return 0; - -err_ms: - free_irq(KS8695_IRQ_UART_LINE_STATUS, port); -err_ls: - free_irq(KS8695_IRQ_UART_RX, port); -err_rx: - free_irq(KS8695_IRQ_UART_TX, port); -err_tx: - return retval; -} - -static void ks8695uart_shutdown(struct uart_port *port) -{ - /* - * Free the interrupt - */ - free_irq(KS8695_IRQ_UART_RX, port); - free_irq(KS8695_IRQ_UART_TX, port); - free_irq(KS8695_IRQ_UART_MODEM_STATUS, port); - free_irq(KS8695_IRQ_UART_LINE_STATUS, port); - - /* disable break condition and fifos */ - UART_PUT_LCR(port, UART_GET_LCR(port) & ~URLC_URSBC); - UART_PUT_FCR(port, UART_GET_FCR(port) & ~URFC_URFE); -} - -static void ks8695uart_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) -{ - unsigned int lcr, fcr = 0; - unsigned long flags; - unsigned int baud, quot; - - /* - * Ask the core to calculate the divisor for us. - */ - baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); - quot = uart_get_divisor(port, baud); - - switch (termios->c_cflag & CSIZE) { - case CS5: - lcr = URCL_5; - break; - case CS6: - lcr = URCL_6; - break; - case CS7: - lcr = URCL_7; - break; - default: - lcr = URCL_8; - break; - } - - /* stop bits */ - if (termios->c_cflag & CSTOPB) - lcr |= URLC_URSB; - - /* parity */ - if (termios->c_cflag & PARENB) { - if (termios->c_cflag & CMSPAR) { /* Mark or Space parity */ - if (termios->c_cflag & PARODD) - lcr |= URPE_MARK; - else - lcr |= URPE_SPACE; - } - else if (termios->c_cflag & PARODD) - lcr |= URPE_ODD; - else - lcr |= URPE_EVEN; - } - - if (port->fifosize > 1) - fcr = URFC_URFRT_8 | URFC_URTFR | URFC_URRFR | URFC_URFE; - - spin_lock_irqsave(&port->lock, flags); - - /* - * Update the per-port timeout. - */ - uart_update_timeout(port, termios->c_cflag, baud); - - port->read_status_mask = URLS_URROE; - if (termios->c_iflag & INPCK) - port->read_status_mask |= (URLS_URFE | URLS_URPE); - if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) - port->read_status_mask |= URLS_URBI; - - /* - * Characters to ignore - */ - port->ignore_status_mask = 0; - if (termios->c_iflag & IGNPAR) - port->ignore_status_mask |= (URLS_URFE | URLS_URPE); - if (termios->c_iflag & IGNBRK) { - port->ignore_status_mask |= URLS_URBI; - /* - * If we're ignoring parity and break indicators, - * ignore overruns too (for real raw support). - */ - if (termios->c_iflag & IGNPAR) - port->ignore_status_mask |= URLS_URROE; - } - - /* - * Ignore all characters if CREAD is not set. - */ - if ((termios->c_cflag & CREAD) == 0) - port->ignore_status_mask |= UART_DUMMY_LSR_RX; - - /* first, disable everything */ - if (UART_ENABLE_MS(port, termios->c_cflag)) - ks8695uart_enable_ms(port); - else - ks8695uart_disable_ms(port); - - /* Set baud rate */ - UART_PUT_BRDR(port, quot); - - UART_PUT_LCR(port, lcr); - UART_PUT_FCR(port, fcr); - - spin_unlock_irqrestore(&port->lock, flags); -} - -static const char *ks8695uart_type(struct uart_port *port) -{ - return port->type == PORT_KS8695 ? "KS8695" : NULL; -} - -/* - * Release the memory region(s) being used by 'port' - */ -static void ks8695uart_release_port(struct uart_port *port) -{ - release_mem_region(port->mapbase, UART_PORT_SIZE); -} - -/* - * Request the memory region(s) being used by 'port' - */ -static int ks8695uart_request_port(struct uart_port *port) -{ - return request_mem_region(port->mapbase, UART_PORT_SIZE, - "serial_ks8695") != NULL ? 0 : -EBUSY; -} - -/* - * Configure/autoconfigure the port. - */ -static void ks8695uart_config_port(struct uart_port *port, int flags) -{ - if (flags & UART_CONFIG_TYPE) { - port->type = PORT_KS8695; - ks8695uart_request_port(port); - } -} - -/* - * verify the new serial_struct (for TIOCSSERIAL). - */ -static int ks8695uart_verify_port(struct uart_port *port, struct serial_struct *ser) -{ - int ret = 0; - - if (ser->type != PORT_UNKNOWN && ser->type != PORT_KS8695) - ret = -EINVAL; - if (ser->irq != port->irq) - ret = -EINVAL; - if (ser->baud_base < 9600) - ret = -EINVAL; - return ret; -} - -static struct uart_ops ks8695uart_pops = { - .tx_empty = ks8695uart_tx_empty, - .set_mctrl = ks8695uart_set_mctrl, - .get_mctrl = ks8695uart_get_mctrl, - .stop_tx = ks8695uart_stop_tx, - .start_tx = ks8695uart_start_tx, - .stop_rx = ks8695uart_stop_rx, - .enable_ms = ks8695uart_enable_ms, - .break_ctl = ks8695uart_break_ctl, - .startup = ks8695uart_startup, - .shutdown = ks8695uart_shutdown, - .set_termios = ks8695uart_set_termios, - .type = ks8695uart_type, - .release_port = ks8695uart_release_port, - .request_port = ks8695uart_request_port, - .config_port = ks8695uart_config_port, - .verify_port = ks8695uart_verify_port, -}; - -static struct uart_port ks8695uart_ports[SERIAL_KS8695_NR] = { - { - .membase = KS8695_UART_VA, - .mapbase = KS8695_UART_PA, - .iotype = SERIAL_IO_MEM, - .irq = KS8695_IRQ_UART_TX, - .uartclk = KS8695_CLOCK_RATE * 16, - .fifosize = 16, - .ops = &ks8695uart_pops, - .flags = UPF_BOOT_AUTOCONF, - .line = 0, - } -}; - -#ifdef CONFIG_SERIAL_KS8695_CONSOLE -static void ks8695_console_putchar(struct uart_port *port, int ch) -{ - while (!(UART_GET_LSR(port) & URLS_URTHRE)) - barrier(); - - UART_PUT_CHAR(port, ch); -} - -static void ks8695_console_write(struct console *co, const char *s, u_int count) -{ - struct uart_port *port = ks8695uart_ports + co->index; - - uart_console_write(port, s, count, ks8695_console_putchar); -} - -static void __init ks8695_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits) -{ - unsigned int lcr; - - lcr = UART_GET_LCR(port); - - switch (lcr & URLC_PARITY) { - case URPE_ODD: - *parity = 'o'; - break; - case URPE_EVEN: - *parity = 'e'; - break; - default: - *parity = 'n'; - } - - switch (lcr & URLC_URCL) { - case URCL_5: - *bits = 5; - break; - case URCL_6: - *bits = 6; - break; - case URCL_7: - *bits = 7; - break; - default: - *bits = 8; - } - - *baud = port->uartclk / (UART_GET_BRDR(port) & 0x0FFF); - *baud /= 16; - *baud &= 0xFFFFFFF0; -} - -static int __init ks8695_console_setup(struct console *co, char *options) -{ - struct uart_port *port; - int baud = 115200; - int bits = 8; - int parity = 'n'; - int flow = 'n'; - - /* - * Check whether an invalid uart number has been specified, and - * if so, search for the first available port that does have - * console support. - */ - port = uart_get_console(ks8695uart_ports, SERIAL_KS8695_NR, co); - - if (options) - uart_parse_options(options, &baud, &parity, &bits, &flow); - else - ks8695_console_get_options(port, &baud, &parity, &bits); - - return uart_set_options(port, co, baud, parity, bits, flow); -} - -static struct uart_driver ks8695_reg; - -static struct console ks8695_console = { - .name = SERIAL_KS8695_DEVNAME, - .write = ks8695_console_write, - .device = uart_console_device, - .setup = ks8695_console_setup, - .flags = CON_PRINTBUFFER, - .index = -1, - .data = &ks8695_reg, -}; - -static int __init ks8695_console_init(void) -{ - add_preferred_console(SERIAL_KS8695_DEVNAME, 0, NULL); - register_console(&ks8695_console); - return 0; -} - -console_initcall(ks8695_console_init); - -#define KS8695_CONSOLE &ks8695_console -#else -#define KS8695_CONSOLE NULL -#endif - -static struct uart_driver ks8695_reg = { - .owner = THIS_MODULE, - .driver_name = "serial_ks8695", - .dev_name = SERIAL_KS8695_DEVNAME, - .major = SERIAL_KS8695_MAJOR, - .minor = SERIAL_KS8695_MINOR, - .nr = SERIAL_KS8695_NR, - .cons = KS8695_CONSOLE, -}; - -static int __init ks8695uart_init(void) -{ - int i, ret; - - printk(KERN_INFO "Serial: Micrel KS8695 UART driver\n"); - - ret = uart_register_driver(&ks8695_reg); - if (ret) - return ret; - - for (i = 0; i < SERIAL_KS8695_NR; i++) - uart_add_one_port(&ks8695_reg, &ks8695uart_ports[0]); - - return 0; -} - -static void __exit ks8695uart_exit(void) -{ - int i; - - for (i = 0; i < SERIAL_KS8695_NR; i++) - uart_remove_one_port(&ks8695_reg, &ks8695uart_ports[0]); - uart_unregister_driver(&ks8695_reg); -} - -module_init(ks8695uart_init); -module_exit(ks8695uart_exit); - -MODULE_DESCRIPTION("KS8695 serial port driver"); -MODULE_AUTHOR("Micrel Inc."); -MODULE_LICENSE("GPL"); diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index 3cc3af1c2ee1..e8dc1787c3c6 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -161,9 +161,6 @@ /* Blackfin bf5xx */ #define PORT_BFIN 75 -/* Micrel KS8695 */ -#define PORT_KS8695 76 - /* Broadcom SB1250, etc. SOC */ #define PORT_SB1250_DUART 77 -- cgit v1.2.3 From 8515dbc1f51b4f728908cdb993c849c83743aba7 Mon Sep 17 00:00:00 2001 From: Kai-Heng Feng Date: Sat, 10 Aug 2019 03:01:29 +0800 Subject: serial: 8250_pci: Add support for Sunix serial boards Add support to Sunix serial boards with up to 16 ports. Sunix board need its own setup callback instead of using Timedia's, to properly support more than 4 ports. Cc: Morris Ku Cc: Debbie Liu Signed-off-by: Kai-Heng Feng Link: https://lore.kernel.org/r/20190809190130.30773-1-kai.heng.feng@canonical.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_pci.c | 93 ++++++++++++++++++++++++++++++------- drivers/tty/serial/8250/8250_port.c | 8 ++++ include/uapi/linux/serial_core.h | 3 ++ 3 files changed, 87 insertions(+), 17 deletions(-) (limited to 'include/uapi') diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index d0be326a17e5..c0d10a35bf70 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1803,6 +1803,30 @@ pci_wch_ch38x_setup(struct serial_private *priv, return pci_default_setup(priv, board, port, idx); } +static int +pci_sunix_setup(struct serial_private *priv, + const struct pciserial_board *board, + struct uart_8250_port *port, int idx) +{ + int bar; + int offset; + + port->port.flags |= UPF_FIXED_TYPE; + port->port.type = PORT_SUNIX; + + if (idx < 4) { + bar = 0; + offset = idx * board->uart_offset; + } else { + bar = 1; + idx -= 4; + idx = div_s64_rem(idx, 4, &offset); + offset = idx * 64 + offset * board->uart_offset; + } + + return setup_port(priv, port, bar, offset, 0); +} + #define PCI_VENDOR_ID_SBSMODULARIO 0x124B #define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B #define PCI_DEVICE_ID_OCTPRO 0x0001 @@ -2490,21 +2514,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { .setup = pci_timedia_setup, }, /* - * SUNIX (Timedia) cards - * Do not "probe" for these cards as there is at least one combination - * card that should be handled by parport_pc that doesn't match the - * rule in pci_timedia_probe. - * It is part number is MIO5079A but its subdevice ID is 0x0102. - * There are some boards with part number SER5037AL that report - * subdevice ID 0x0002. + * Sunix PCI serial boards */ { .vendor = PCI_VENDOR_ID_SUNIX, .device = PCI_DEVICE_ID_SUNIX_1999, .subvendor = PCI_VENDOR_ID_SUNIX, .subdevice = PCI_ANY_ID, - .init = pci_timedia_init, - .setup = pci_timedia_setup, + .setup = pci_sunix_setup, }, /* * Xircom cards @@ -2965,6 +2982,11 @@ enum pci_board_num_t { pbn_pericom_PI7C9X7952, pbn_pericom_PI7C9X7954, pbn_pericom_PI7C9X7958, + pbn_sunix_pci_1s, + pbn_sunix_pci_2s, + pbn_sunix_pci_4s, + pbn_sunix_pci_8s, + pbn_sunix_pci_16s, }; /* @@ -3751,6 +3773,31 @@ static struct pciserial_board pci_boards[] = { .base_baud = 921600, .uart_offset = 0x8, }, + [pbn_sunix_pci_1s] = { + .num_ports = 1, + .base_baud = 921600, + .uart_offset = 0x8, + }, + [pbn_sunix_pci_2s] = { + .num_ports = 2, + .base_baud = 921600, + .uart_offset = 0x8, + }, + [pbn_sunix_pci_4s] = { + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 0x8, + }, + [pbn_sunix_pci_8s] = { + .num_ports = 8, + .base_baud = 921600, + .uart_offset = 0x8, + }, + [pbn_sunix_pci_16s] = { + .num_ports = 16, + .base_baud = 921600, + .uart_offset = 0x8, + }, }; static const struct pci_device_id blacklist[] = { @@ -4788,17 +4835,29 @@ static const struct pci_device_id serial_pci_tbl[] = { pbn_b0_bt_1_921600 }, /* - * SUNIX (TIMEDIA) + * Sunix PCI serial boards */ { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, - PCI_VENDOR_ID_SUNIX, PCI_ANY_ID, - PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xffff00, - pbn_b0_bt_1_921600 }, - + PCI_VENDOR_ID_SUNIX, 0x0001, 0, 0, + pbn_sunix_pci_1s }, { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, - PCI_VENDOR_ID_SUNIX, PCI_ANY_ID, - PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, - pbn_b0_bt_1_921600 }, + PCI_VENDOR_ID_SUNIX, 0x0002, 0, 0, + pbn_sunix_pci_2s }, + { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, + PCI_VENDOR_ID_SUNIX, 0x0004, 0, 0, + pbn_sunix_pci_4s }, + { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, + PCI_VENDOR_ID_SUNIX, 0x0084, 0, 0, + pbn_sunix_pci_4s }, + { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, + PCI_VENDOR_ID_SUNIX, 0x0008, 0, 0, + pbn_sunix_pci_8s }, + { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, + PCI_VENDOR_ID_SUNIX, 0x0088, 0, 0, + pbn_sunix_pci_8s }, + { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, + PCI_VENDOR_ID_SUNIX, 0x0010, 0, 0, + pbn_sunix_pci_16s }, /* * AFAVLAB serial card, from Harald Welte diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 706645f89132..8407166610ce 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -301,6 +301,14 @@ static const struct serial8250_config uart_config[] = { .rxtrig_bytes = {1, 4, 8, 14}, .flags = UART_CAP_FIFO, }, + [PORT_SUNIX] = { + .name = "Sunix", + .fifo_size = 128, + .tx_loadsz = 128, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, + .rxtrig_bytes = {1, 32, 64, 112}, + .flags = UART_CAP_FIFO | UART_CAP_SLEEP, + }, }; /* Uart divisor latch read */ diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index e8dc1787c3c6..bd0d9d176a66 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -287,4 +287,7 @@ /* SiFive UART */ #define PORT_SIFIVE_V0 120 +/* Sunix UART */ +#define PORT_SUNIX 121 + #endif /* _UAPILINUX_SERIAL_CORE_H */ -- cgit v1.2.3 From 09864c1cdf5c537bd01bff45181406e422ea988c Mon Sep 17 00:00:00 2001 From: Stefan-gabriel Mirea Date: Fri, 9 Aug 2019 11:29:16 +0000 Subject: tty: serial: Add linflexuart driver for S32V234 Introduce support for LINFlex driver, based on: - the version of Freescale LPUART driver after commit b3e3bf2ef2c7 ("Merge 4.0-rc7 into tty-next"); - commit abf1e0a98083 ("tty: serial: fsl_lpuart: lock port on console write"). In this basic version, the driver can be tested using initramfs and relies on the clocks and pin muxing set up by U-Boot. Remarks concerning the earlycon support: - LinFlexD does not allow character transmissions in the INIT mode (see section 47.4.2.1 in the reference manual[1]). Therefore, a mutual exclusion between the first linflex_setup_watermark/linflex_set_termios executions and linflex_earlycon_putchar was employed and the characters normally sent to earlycon during initialization are kept in a buffer and sent afterwards. - Empirically, character transmission is also forbidden within the last 1-2 ms before entering the INIT mode, so we use an explicit timeout (PREINIT_DELAY) between linflex_earlycon_putchar and the first call to linflex_setup_watermark. - U-Boot currently uses the UART FIFO mode, while this driver makes the transition to the buffer mode. Therefore, the earlycon putchar function matches the U-Boot behavior before initializations and the Linux behavior after. [1] https://www.nxp.com/webapp/Download?colCode=S32V234RM Signed-off-by: Stoica Cosmin-Stefan Signed-off-by: Adrian.Nitu Signed-off-by: Larisa Grigore Signed-off-by: Ana Nedelcu Signed-off-by: Mihaela Martinas Signed-off-by: Matthew Nunez [stefan-gabriel.mirea@nxp.com: Reduced for upstreaming and implemented earlycon support] Signed-off-by: Stefan-Gabriel Mirea Link: https://lore.kernel.org/r/20190809112853.15846-6-stefan-gabriel.mirea@nxp.com Signed-off-by: Greg Kroah-Hartman --- Documentation/admin-guide/kernel-parameters.txt | 6 + drivers/tty/serial/Kconfig | 15 + drivers/tty/serial/Makefile | 1 + drivers/tty/serial/fsl_linflexuart.c | 942 ++++++++++++++++++++++++ include/uapi/linux/serial_core.h | 3 + 5 files changed, 967 insertions(+) create mode 100644 drivers/tty/serial/fsl_linflexuart.c (limited to 'include/uapi') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 47d981a86e2f..9ee66d61986b 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1090,6 +1090,12 @@ the framebuffer, pass the 'ram' option so that it is mapped with the correct attributes. + linflex, + Use early console provided by Freescale LinFlex UART + serial driver for NXP S32V234 SoCs. A valid base + address must be provided, and the serial port must + already be setup and configured. + earlyprintk= [X86,SH,ARM,M68k,S390] earlyprintk=vga earlyprintk=sclp diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 7041107ea78d..db524e2c6db3 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1416,6 +1416,21 @@ config SERIAL_FSL_LPUART_CONSOLE If you have enabled the lpuart serial port on the Freescale SoCs, you can make it the console by answering Y to this option. +config SERIAL_FSL_LINFLEXUART + tristate "Freescale linflexuart serial port support" + select SERIAL_CORE + help + Support for the on-chip linflexuart on some Freescale SOCs. + +config SERIAL_FSL_LINFLEXUART_CONSOLE + bool "Console on Freescale linflexuart serial port" + depends on SERIAL_FSL_LINFLEXUART=y + select SERIAL_CORE_CONSOLE + select SERIAL_EARLYCON + help + If you have enabled the linflexuart serial port on the Freescale + SoCs, you can make it the console by answering Y to this option. + config SERIAL_CONEXANT_DIGICOLOR tristate "Conexant Digicolor CX92xxx USART serial port support" depends on OF diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index 7f744136489e..490d74533fbd 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -80,6 +80,7 @@ obj-$(CONFIG_SERIAL_EFM32_UART) += efm32-uart.o obj-$(CONFIG_SERIAL_ARC) += arc_uart.o obj-$(CONFIG_SERIAL_RP2) += rp2.o obj-$(CONFIG_SERIAL_FSL_LPUART) += fsl_lpuart.o +obj-$(CONFIG_SERIAL_FSL_LINFLEXUART) += fsl_linflexuart.o obj-$(CONFIG_SERIAL_CONEXANT_DIGICOLOR) += digicolor-usart.o obj-$(CONFIG_SERIAL_MEN_Z135) += men_z135_uart.o obj-$(CONFIG_SERIAL_SPRD) += sprd_serial.o diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c new file mode 100644 index 000000000000..26b9601a0952 --- /dev/null +++ b/drivers/tty/serial/fsl_linflexuart.c @@ -0,0 +1,942 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Freescale linflexuart serial port driver + * + * Copyright 2012-2016 Freescale Semiconductor, Inc. + * Copyright 2017-2018 NXP + */ + +#if defined(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE) && \ + defined(CONFIG_MAGIC_SYSRQ) +#define SUPPORT_SYSRQ +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* All registers are 32-bit width */ + +#define LINCR1 0x0000 /* LIN control register */ +#define LINIER 0x0004 /* LIN interrupt enable register */ +#define LINSR 0x0008 /* LIN status register */ +#define LINESR 0x000C /* LIN error status register */ +#define UARTCR 0x0010 /* UART mode control register */ +#define UARTSR 0x0014 /* UART mode status register */ +#define LINTCSR 0x0018 /* LIN timeout control status register */ +#define LINOCR 0x001C /* LIN output compare register */ +#define LINTOCR 0x0020 /* LIN timeout control register */ +#define LINFBRR 0x0024 /* LIN fractional baud rate register */ +#define LINIBRR 0x0028 /* LIN integer baud rate register */ +#define LINCFR 0x002C /* LIN checksum field register */ +#define LINCR2 0x0030 /* LIN control register 2 */ +#define BIDR 0x0034 /* Buffer identifier register */ +#define BDRL 0x0038 /* Buffer data register least significant */ +#define BDRM 0x003C /* Buffer data register most significant */ +#define IFER 0x0040 /* Identifier filter enable register */ +#define IFMI 0x0044 /* Identifier filter match index */ +#define IFMR 0x0048 /* Identifier filter mode register */ +#define GCR 0x004C /* Global control register */ +#define UARTPTO 0x0050 /* UART preset timeout register */ +#define UARTCTO 0x0054 /* UART current timeout register */ + +/* + * Register field definitions + */ + +#define LINFLEXD_LINCR1_INIT BIT(0) +#define LINFLEXD_LINCR1_MME BIT(4) +#define LINFLEXD_LINCR1_BF BIT(7) + +#define LINFLEXD_LINSR_LINS_INITMODE BIT(12) +#define LINFLEXD_LINSR_LINS_MASK (0xF << 12) + +#define LINFLEXD_LINIER_SZIE BIT(15) +#define LINFLEXD_LINIER_OCIE BIT(14) +#define LINFLEXD_LINIER_BEIE BIT(13) +#define LINFLEXD_LINIER_CEIE BIT(12) +#define LINFLEXD_LINIER_HEIE BIT(11) +#define LINFLEXD_LINIER_FEIE BIT(8) +#define LINFLEXD_LINIER_BOIE BIT(7) +#define LINFLEXD_LINIER_LSIE BIT(6) +#define LINFLEXD_LINIER_WUIE BIT(5) +#define LINFLEXD_LINIER_DBFIE BIT(4) +#define LINFLEXD_LINIER_DBEIETOIE BIT(3) +#define LINFLEXD_LINIER_DRIE BIT(2) +#define LINFLEXD_LINIER_DTIE BIT(1) +#define LINFLEXD_LINIER_HRIE BIT(0) + +#define LINFLEXD_UARTCR_OSR_MASK (0xF << 24) +#define LINFLEXD_UARTCR_OSR(uartcr) (((uartcr) \ + & LINFLEXD_UARTCR_OSR_MASK) >> 24) + +#define LINFLEXD_UARTCR_ROSE BIT(23) + +#define LINFLEXD_UARTCR_RFBM BIT(9) +#define LINFLEXD_UARTCR_TFBM BIT(8) +#define LINFLEXD_UARTCR_WL1 BIT(7) +#define LINFLEXD_UARTCR_PC1 BIT(6) + +#define LINFLEXD_UARTCR_RXEN BIT(5) +#define LINFLEXD_UARTCR_TXEN BIT(4) +#define LINFLEXD_UARTCR_PC0 BIT(3) + +#define LINFLEXD_UARTCR_PCE BIT(2) +#define LINFLEXD_UARTCR_WL0 BIT(1) +#define LINFLEXD_UARTCR_UART BIT(0) + +#define LINFLEXD_UARTSR_SZF BIT(15) +#define LINFLEXD_UARTSR_OCF BIT(14) +#define LINFLEXD_UARTSR_PE3 BIT(13) +#define LINFLEXD_UARTSR_PE2 BIT(12) +#define LINFLEXD_UARTSR_PE1 BIT(11) +#define LINFLEXD_UARTSR_PE0 BIT(10) +#define LINFLEXD_UARTSR_RMB BIT(9) +#define LINFLEXD_UARTSR_FEF BIT(8) +#define LINFLEXD_UARTSR_BOF BIT(7) +#define LINFLEXD_UARTSR_RPS BIT(6) +#define LINFLEXD_UARTSR_WUF BIT(5) +#define LINFLEXD_UARTSR_4 BIT(4) + +#define LINFLEXD_UARTSR_TO BIT(3) + +#define LINFLEXD_UARTSR_DRFRFE BIT(2) +#define LINFLEXD_UARTSR_DTFTFF BIT(1) +#define LINFLEXD_UARTSR_NF BIT(0) +#define LINFLEXD_UARTSR_PE (LINFLEXD_UARTSR_PE0 |\ + LINFLEXD_UARTSR_PE1 |\ + LINFLEXD_UARTSR_PE2 |\ + LINFLEXD_UARTSR_PE3) + +#define LINFLEX_LDIV_MULTIPLIER (16) + +#define DRIVER_NAME "fsl-linflexuart" +#define DEV_NAME "ttyLF" +#define UART_NR 4 + +#define EARLYCON_BUFFER_INITIAL_CAP 8 + +#define PREINIT_DELAY 2000 /* us */ + +static const struct of_device_id linflex_dt_ids[] = { + { + .compatible = "fsl,s32-linflexuart", + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, linflex_dt_ids); + +#ifdef CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE +static struct uart_port *earlycon_port; +static bool linflex_earlycon_same_instance; +static spinlock_t init_lock; +static bool during_init; + +static struct { + char *content; + unsigned int len, cap; +} earlycon_buf; +#endif + +static void linflex_stop_tx(struct uart_port *port) +{ + unsigned long ier; + + ier = readl(port->membase + LINIER); + ier &= ~(LINFLEXD_LINIER_DTIE); + writel(ier, port->membase + LINIER); +} + +static void linflex_stop_rx(struct uart_port *port) +{ + unsigned long ier; + + ier = readl(port->membase + LINIER); + writel(ier & ~LINFLEXD_LINIER_DRIE, port->membase + LINIER); +} + +static inline void linflex_transmit_buffer(struct uart_port *sport) +{ + struct circ_buf *xmit = &sport->state->xmit; + unsigned char c; + unsigned long status; + + while (!uart_circ_empty(xmit)) { + c = xmit->buf[xmit->tail]; + writeb(c, sport->membase + BDRL); + + /* Waiting for data transmission completed. */ + while (((status = readl(sport->membase + UARTSR)) & + LINFLEXD_UARTSR_DTFTFF) != + LINFLEXD_UARTSR_DTFTFF) + ; + + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + sport->icount.tx++; + + writel(status | LINFLEXD_UARTSR_DTFTFF, + sport->membase + UARTSR); + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(sport); + + if (uart_circ_empty(xmit)) + linflex_stop_tx(sport); +} + +static void linflex_start_tx(struct uart_port *port) +{ + unsigned long ier; + + linflex_transmit_buffer(port); + ier = readl(port->membase + LINIER); + writel(ier | LINFLEXD_LINIER_DTIE, port->membase + LINIER); +} + +static irqreturn_t linflex_txint(int irq, void *dev_id) +{ + struct uart_port *sport = dev_id; + struct circ_buf *xmit = &sport->state->xmit; + unsigned long flags; + unsigned long status; + + spin_lock_irqsave(&sport->lock, flags); + + if (sport->x_char) { + writeb(sport->x_char, sport->membase + BDRL); + + /* waiting for data transmission completed */ + while (((status = readl(sport->membase + UARTSR)) & + LINFLEXD_UARTSR_DTFTFF) != LINFLEXD_UARTSR_DTFTFF) + ; + + writel(status | LINFLEXD_UARTSR_DTFTFF, + sport->membase + UARTSR); + + goto out; + } + + if (uart_circ_empty(xmit) || uart_tx_stopped(sport)) { + linflex_stop_tx(sport); + goto out; + } + + linflex_transmit_buffer(sport); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(sport); + +out: + spin_unlock_irqrestore(&sport->lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t linflex_rxint(int irq, void *dev_id) +{ + struct uart_port *sport = dev_id; + unsigned int flg; + struct tty_port *port = &sport->state->port; + unsigned long flags, status; + unsigned char rx; + + spin_lock_irqsave(&sport->lock, flags); + + status = readl(sport->membase + UARTSR); + while (status & LINFLEXD_UARTSR_RMB) { + rx = readb(sport->membase + BDRM); + flg = TTY_NORMAL; + sport->icount.rx++; + + if (status & (LINFLEXD_UARTSR_BOF | LINFLEXD_UARTSR_SZF | + LINFLEXD_UARTSR_FEF | LINFLEXD_UARTSR_PE)) { + if (status & LINFLEXD_UARTSR_SZF) + status |= LINFLEXD_UARTSR_SZF; + if (status & LINFLEXD_UARTSR_BOF) + status |= LINFLEXD_UARTSR_BOF; + if (status & LINFLEXD_UARTSR_FEF) + status |= LINFLEXD_UARTSR_FEF; + if (status & LINFLEXD_UARTSR_PE) + status |= LINFLEXD_UARTSR_PE; + } + + writel(status | LINFLEXD_UARTSR_RMB | LINFLEXD_UARTSR_DRFRFE, + sport->membase + UARTSR); + status = readl(sport->membase + UARTSR); + + if (uart_handle_sysrq_char(sport, (unsigned char)rx)) + continue; + +#ifdef SUPPORT_SYSRQ + sport->sysrq = 0; +#endif + tty_insert_flip_char(port, rx, flg); + } + + spin_unlock_irqrestore(&sport->lock, flags); + + tty_flip_buffer_push(port); + + return IRQ_HANDLED; +} + +static irqreturn_t linflex_int(int irq, void *dev_id) +{ + struct uart_port *sport = dev_id; + unsigned long status; + + status = readl(sport->membase + UARTSR); + + if (status & LINFLEXD_UARTSR_DRFRFE) + linflex_rxint(irq, dev_id); + if (status & LINFLEXD_UARTSR_DTFTFF) + linflex_txint(irq, dev_id); + + return IRQ_HANDLED; +} + +/* return TIOCSER_TEMT when transmitter is not busy */ +static unsigned int linflex_tx_empty(struct uart_port *port) +{ + unsigned long status; + + status = readl(port->membase + UARTSR) & LINFLEXD_UARTSR_DTFTFF; + + return status ? TIOCSER_TEMT : 0; +} + +static unsigned int linflex_get_mctrl(struct uart_port *port) +{ + return 0; +} + +static void linflex_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static void linflex_break_ctl(struct uart_port *port, int break_state) +{ +} + +static void linflex_setup_watermark(struct uart_port *sport) +{ + unsigned long cr, ier, cr1; + + /* Disable transmission/reception */ + ier = readl(sport->membase + LINIER); + ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE); + writel(ier, sport->membase + LINIER); + + cr = readl(sport->membase + UARTCR); + cr &= ~(LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN); + writel(cr, sport->membase + UARTCR); + + /* Enter initialization mode by setting INIT bit */ + + /* set the Linflex in master mode and activate by-pass filter */ + cr1 = LINFLEXD_LINCR1_BF | LINFLEXD_LINCR1_MME + | LINFLEXD_LINCR1_INIT; + writel(cr1, sport->membase + LINCR1); + + /* wait for init mode entry */ + while ((readl(sport->membase + LINSR) + & LINFLEXD_LINSR_LINS_MASK) + != LINFLEXD_LINSR_LINS_INITMODE) + ; + + /* + * UART = 0x1; - Linflex working in UART mode + * TXEN = 0x1; - Enable transmission of data now + * RXEn = 0x1; - Receiver enabled + * WL0 = 0x1; - 8 bit data + * PCE = 0x0; - No parity + */ + + /* set UART bit to allow writing other bits */ + writel(LINFLEXD_UARTCR_UART, sport->membase + UARTCR); + + cr = (LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN | + LINFLEXD_UARTCR_WL0 | LINFLEXD_UARTCR_UART); + + writel(cr, sport->membase + UARTCR); + + cr1 &= ~(LINFLEXD_LINCR1_INIT); + + writel(cr1, sport->membase + LINCR1); + + ier = readl(sport->membase + LINIER); + ier |= LINFLEXD_LINIER_DRIE; + ier |= LINFLEXD_LINIER_DTIE; + + writel(ier, sport->membase + LINIER); +} + +static int linflex_startup(struct uart_port *port) +{ + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + linflex_setup_watermark(port); + + spin_unlock_irqrestore(&port->lock, flags); + + ret = devm_request_irq(port->dev, port->irq, linflex_int, 0, + DRIVER_NAME, port); + + return ret; +} + +static void linflex_shutdown(struct uart_port *port) +{ + unsigned long ier; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* disable interrupts */ + ier = readl(port->membase + LINIER); + ier &= ~(LINFLEXD_LINIER_DRIE | LINFLEXD_LINIER_DTIE); + writel(ier, port->membase + LINIER); + + spin_unlock_irqrestore(&port->lock, flags); + + devm_free_irq(port->dev, port->irq, port); +} + +static void +linflex_set_termios(struct uart_port *port, struct ktermios *termios, + struct ktermios *old) +{ + unsigned long flags; + unsigned long cr, old_cr, cr1; + unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; + + cr = readl(port->membase + UARTCR); + old_cr = cr; + + /* Enter initialization mode by setting INIT bit */ + cr1 = readl(port->membase + LINCR1); + cr1 |= LINFLEXD_LINCR1_INIT; + writel(cr1, port->membase + LINCR1); + + /* wait for init mode entry */ + while ((readl(port->membase + LINSR) + & LINFLEXD_LINSR_LINS_MASK) + != LINFLEXD_LINSR_LINS_INITMODE) + ; + + /* + * only support CS8 and CS7, and for CS7 must enable PE. + * supported mode: + * - (7,e/o,1) + * - (8,n,1) + * - (8,e/o,1) + */ + /* enter the UART into configuration mode */ + + while ((termios->c_cflag & CSIZE) != CS8 && + (termios->c_cflag & CSIZE) != CS7) { + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= old_csize; + old_csize = CS8; + } + + if ((termios->c_cflag & CSIZE) == CS7) { + /* Word length: WL1WL0:00 */ + cr = old_cr & ~LINFLEXD_UARTCR_WL1 & ~LINFLEXD_UARTCR_WL0; + } + + if ((termios->c_cflag & CSIZE) == CS8) { + /* Word length: WL1WL0:01 */ + cr = (old_cr | LINFLEXD_UARTCR_WL0) & ~LINFLEXD_UARTCR_WL1; + } + + if (termios->c_cflag & CMSPAR) { + if ((termios->c_cflag & CSIZE) != CS8) { + termios->c_cflag &= ~CSIZE; + termios->c_cflag |= CS8; + } + /* has a space/sticky bit */ + cr |= LINFLEXD_UARTCR_WL0; + } + + if (termios->c_cflag & CSTOPB) + termios->c_cflag &= ~CSTOPB; + + /* parity must be enabled when CS7 to match 8-bits format */ + if ((termios->c_cflag & CSIZE) == CS7) + termios->c_cflag |= PARENB; + + if ((termios->c_cflag & PARENB)) { + cr |= LINFLEXD_UARTCR_PCE; + if (termios->c_cflag & PARODD) + cr = (cr | LINFLEXD_UARTCR_PC0) & + (~LINFLEXD_UARTCR_PC1); + else + cr = cr & (~LINFLEXD_UARTCR_PC1 & + ~LINFLEXD_UARTCR_PC0); + } else { + cr &= ~LINFLEXD_UARTCR_PCE; + } + + spin_lock_irqsave(&port->lock, flags); + + port->read_status_mask = 0; + + if (termios->c_iflag & INPCK) + port->read_status_mask |= (LINFLEXD_UARTSR_FEF | + LINFLEXD_UARTSR_PE0 | + LINFLEXD_UARTSR_PE1 | + LINFLEXD_UARTSR_PE2 | + LINFLEXD_UARTSR_PE3); + if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) + port->read_status_mask |= LINFLEXD_UARTSR_FEF; + + /* characters to ignore */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= LINFLEXD_UARTSR_PE; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= LINFLEXD_UARTSR_PE; + /* + * if we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= LINFLEXD_UARTSR_BOF; + } + + writel(cr, port->membase + UARTCR); + + cr1 &= ~(LINFLEXD_LINCR1_INIT); + + writel(cr1, port->membase + LINCR1); + + spin_unlock_irqrestore(&port->lock, flags); +} + +static const char *linflex_type(struct uart_port *port) +{ + return "FSL_LINFLEX"; +} + +static void linflex_release_port(struct uart_port *port) +{ + /* nothing to do */ +} + +static int linflex_request_port(struct uart_port *port) +{ + return 0; +} + +/* configure/auto-configure the port */ +static void linflex_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_LINFLEXUART; +} + +static const struct uart_ops linflex_pops = { + .tx_empty = linflex_tx_empty, + .set_mctrl = linflex_set_mctrl, + .get_mctrl = linflex_get_mctrl, + .stop_tx = linflex_stop_tx, + .start_tx = linflex_start_tx, + .stop_rx = linflex_stop_rx, + .break_ctl = linflex_break_ctl, + .startup = linflex_startup, + .shutdown = linflex_shutdown, + .set_termios = linflex_set_termios, + .type = linflex_type, + .request_port = linflex_request_port, + .release_port = linflex_release_port, + .config_port = linflex_config_port, +}; + +static struct uart_port *linflex_ports[UART_NR]; + +#ifdef CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE +static void linflex_console_putchar(struct uart_port *port, int ch) +{ + unsigned long cr; + + cr = readl(port->membase + UARTCR); + + writeb(ch, port->membase + BDRL); + + if (!(cr & LINFLEXD_UARTCR_TFBM)) + while ((readl(port->membase + UARTSR) & + LINFLEXD_UARTSR_DTFTFF) + != LINFLEXD_UARTSR_DTFTFF) + ; + else + while (readl(port->membase + UARTSR) & + LINFLEXD_UARTSR_DTFTFF) + ; + + if (!(cr & LINFLEXD_UARTCR_TFBM)) { + writel((readl(port->membase + UARTSR) | + LINFLEXD_UARTSR_DTFTFF), + port->membase + UARTSR); + } +} + +static void linflex_earlycon_putchar(struct uart_port *port, int ch) +{ + unsigned long flags; + char *ret; + + if (!linflex_earlycon_same_instance) { + linflex_console_putchar(port, ch); + return; + } + + spin_lock_irqsave(&init_lock, flags); + if (!during_init) + goto outside_init; + + if (earlycon_buf.len >= 1 << CONFIG_LOG_BUF_SHIFT) + goto init_release; + + if (!earlycon_buf.cap) { + earlycon_buf.content = kmalloc(EARLYCON_BUFFER_INITIAL_CAP, + GFP_ATOMIC); + earlycon_buf.cap = earlycon_buf.content ? + EARLYCON_BUFFER_INITIAL_CAP : 0; + } else if (earlycon_buf.len == earlycon_buf.cap) { + ret = krealloc(earlycon_buf.content, earlycon_buf.cap << 1, + GFP_ATOMIC); + if (ret) { + earlycon_buf.content = ret; + earlycon_buf.cap <<= 1; + } + } + + if (earlycon_buf.len < earlycon_buf.cap) + earlycon_buf.content[earlycon_buf.len++] = ch; + + goto init_release; + +outside_init: + linflex_console_putchar(port, ch); +init_release: + spin_unlock_irqrestore(&init_lock, flags); +} + +static void linflex_string_write(struct uart_port *sport, const char *s, + unsigned int count) +{ + unsigned long cr, ier = 0; + + ier = readl(sport->membase + LINIER); + linflex_stop_tx(sport); + + cr = readl(sport->membase + UARTCR); + cr |= (LINFLEXD_UARTCR_TXEN); + writel(cr, sport->membase + UARTCR); + + uart_console_write(sport, s, count, linflex_console_putchar); + + writel(ier, sport->membase + LINIER); +} + +static void +linflex_console_write(struct console *co, const char *s, unsigned int count) +{ + struct uart_port *sport = linflex_ports[co->index]; + unsigned long flags; + int locked = 1; + + if (sport->sysrq) + locked = 0; + else if (oops_in_progress) + locked = spin_trylock_irqsave(&sport->lock, flags); + else + spin_lock_irqsave(&sport->lock, flags); + + linflex_string_write(sport, s, count); + + if (locked) + spin_unlock_irqrestore(&sport->lock, flags); +} + +/* + * if the port was already initialised (eg, by a boot loader), + * try to determine the current setup. + */ +static void __init +linflex_console_get_options(struct uart_port *sport, int *parity, int *bits) +{ + unsigned long cr; + + cr = readl(sport->membase + UARTCR); + cr &= LINFLEXD_UARTCR_RXEN | LINFLEXD_UARTCR_TXEN; + + if (!cr) + return; + + /* ok, the port was enabled */ + + *parity = 'n'; + if (cr & LINFLEXD_UARTCR_PCE) { + if (cr & LINFLEXD_UARTCR_PC0) + *parity = 'o'; + else + *parity = 'e'; + } + + if ((cr & LINFLEXD_UARTCR_WL0) && ((cr & LINFLEXD_UARTCR_WL1) == 0)) { + if (cr & LINFLEXD_UARTCR_PCE) + *bits = 9; + else + *bits = 8; + } +} + +static int __init linflex_console_setup(struct console *co, char *options) +{ + struct uart_port *sport; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + int ret; + int i; + unsigned long flags; + /* + * check whether an invalid uart number has been specified, and + * if so, search for the first available port that does have + * console support. + */ + if (co->index == -1 || co->index >= ARRAY_SIZE(linflex_ports)) + co->index = 0; + + sport = linflex_ports[co->index]; + if (!sport) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + else + linflex_console_get_options(sport, &parity, &bits); + + if (earlycon_port && sport->mapbase == earlycon_port->mapbase) { + linflex_earlycon_same_instance = true; + + spin_lock_irqsave(&init_lock, flags); + during_init = true; + spin_unlock_irqrestore(&init_lock, flags); + + /* Workaround for character loss or output of many invalid + * characters, when INIT mode is entered shortly after a + * character has just been printed. + */ + udelay(PREINIT_DELAY); + } + + linflex_setup_watermark(sport); + + ret = uart_set_options(sport, co, baud, parity, bits, flow); + + if (!linflex_earlycon_same_instance) + goto done; + + spin_lock_irqsave(&init_lock, flags); + + /* Emptying buffer */ + if (earlycon_buf.len) { + for (i = 0; i < earlycon_buf.len; i++) + linflex_console_putchar(earlycon_port, + earlycon_buf.content[i]); + + kfree(earlycon_buf.content); + earlycon_buf.len = 0; + } + + during_init = false; + spin_unlock_irqrestore(&init_lock, flags); + +done: + return ret; +} + +static struct uart_driver linflex_reg; +static struct console linflex_console = { + .name = DEV_NAME, + .write = linflex_console_write, + .device = uart_console_device, + .setup = linflex_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &linflex_reg, +}; + +static void linflex_earlycon_write(struct console *con, const char *s, + unsigned int n) +{ + struct earlycon_device *dev = con->data; + + uart_console_write(&dev->port, s, n, linflex_earlycon_putchar); +} + +static int __init linflex_early_console_setup(struct earlycon_device *device, + const char *options) +{ + if (!device->port.membase) + return -ENODEV; + + device->con->write = linflex_earlycon_write; + earlycon_port = &device->port; + + return 0; +} + +OF_EARLYCON_DECLARE(linflex, "fsl,s32-linflexuart", + linflex_early_console_setup); + +#define LINFLEX_CONSOLE (&linflex_console) +#else +#define LINFLEX_CONSOLE NULL +#endif + +static struct uart_driver linflex_reg = { + .owner = THIS_MODULE, + .driver_name = DRIVER_NAME, + .dev_name = DEV_NAME, + .nr = ARRAY_SIZE(linflex_ports), + .cons = LINFLEX_CONSOLE, +}; + +static int linflex_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct uart_port *sport; + struct resource *res; + int ret; + + sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); + if (!sport) + return -ENOMEM; + + ret = of_alias_get_id(np, "serial"); + if (ret < 0) { + dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); + return ret; + } + if (ret >= UART_NR) { + dev_err(&pdev->dev, "driver limited to %d serial ports\n", + UART_NR); + return -ENOMEM; + } + + sport->line = ret; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + sport->mapbase = res->start; + sport->membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(sport->membase)) + return PTR_ERR(sport->membase); + + sport->dev = &pdev->dev; + sport->type = PORT_LINFLEXUART; + sport->iotype = UPIO_MEM; + sport->irq = platform_get_irq(pdev, 0); + sport->ops = &linflex_pops; + sport->flags = UPF_BOOT_AUTOCONF; + + linflex_ports[sport->line] = sport; + + platform_set_drvdata(pdev, sport); + + ret = uart_add_one_port(&linflex_reg, sport); + if (ret) + return ret; + + return 0; +} + +static int linflex_remove(struct platform_device *pdev) +{ + struct uart_port *sport = platform_get_drvdata(pdev); + + uart_remove_one_port(&linflex_reg, sport); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int linflex_suspend(struct device *dev) +{ + struct uart_port *sport = dev_get_drvdata(dev); + + uart_suspend_port(&linflex_reg, sport); + + return 0; +} + +static int linflex_resume(struct device *dev) +{ + struct uart_port *sport = dev_get_drvdata(dev); + + uart_resume_port(&linflex_reg, sport); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(linflex_pm_ops, linflex_suspend, linflex_resume); + +static struct platform_driver linflex_driver = { + .probe = linflex_probe, + .remove = linflex_remove, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .of_match_table = linflex_dt_ids, + .pm = &linflex_pm_ops, + }, +}; + +static int __init linflex_serial_init(void) +{ + int ret; + + ret = uart_register_driver(&linflex_reg); + if (ret) + return ret; + + ret = platform_driver_register(&linflex_driver); + if (ret) + uart_unregister_driver(&linflex_reg); + +#ifdef CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE + spin_lock_init(&init_lock); +#endif + + return ret; +} + +static void __exit linflex_serial_exit(void) +{ + platform_driver_unregister(&linflex_driver); + uart_unregister_driver(&linflex_reg); +} + +module_init(linflex_serial_init); +module_exit(linflex_serial_exit); + +MODULE_DESCRIPTION("Freescale linflex serial port driver"); +MODULE_LICENSE("GPL v2"); diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h index bd0d9d176a66..0f4f87a6fd54 100644 --- a/include/uapi/linux/serial_core.h +++ b/include/uapi/linux/serial_core.h @@ -290,4 +290,7 @@ /* Sunix UART */ #define PORT_SUNIX 121 +/* Freescale Linflex UART */ +#define PORT_LINFLEXUART 121 + #endif /* _UAPILINUX_SERIAL_CORE_H */ -- cgit v1.2.3 From a7b121b4b8b0bcc14fc1c2a81d34096109a65dd6 Mon Sep 17 00:00:00 2001 From: Martin Hundebøll Date: Mon, 12 Aug 2019 23:12:43 +0200 Subject: tty: n_gsm: add ioctl to map serial device to mux'ed tty MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Guessing the first tty for a gsm0710 multiplexed serial device is not currently possible, which makes it racy to use with multiple modems. Add a way to map the physical serial tty to its related mux devices using an ioctl. Signed-off-by: Martin Hundebøll Link: https://lore.kernel.org/r/20190812211243.98686-1-martin@geanix.com Signed-off-by: Greg Kroah-Hartman --- Documentation/driver-api/serial/n_gsm.rst | 11 +++++++++-- drivers/tty/n_gsm.c | 4 ++++ include/uapi/linux/gsmmux.h | 2 ++ 3 files changed, 15 insertions(+), 2 deletions(-) (limited to 'include/uapi') diff --git a/Documentation/driver-api/serial/n_gsm.rst b/Documentation/driver-api/serial/n_gsm.rst index 0ba731ab00b2..286e7ff4d2d9 100644 --- a/Documentation/driver-api/serial/n_gsm.rst +++ b/Documentation/driver-api/serial/n_gsm.rst @@ -18,10 +18,13 @@ How to use it 2. switch the serial line to using the n_gsm line discipline by using TIOCSETD ioctl, 3. configure the mux using GSMIOC_GETCONF / GSMIOC_SETCONF ioctl, +4. obtain base gsmtty number for the used serial port, Major parts of the initialization program : (a good starting point is util-linux-ng/sys-utils/ldattach.c):: + #include + #include #include #include #define DEFAULT_SPEED B115200 @@ -30,6 +33,7 @@ Major parts of the initialization program : int ldisc = N_GSM0710; struct gsm_config c; struct termios configuration; + uint32_t first; /* open the serial port connected to the modem */ fd = open(SERIAL_PORT, O_RDWR | O_NOCTTY | O_NDELAY); @@ -58,19 +62,22 @@ Major parts of the initialization program : c.mtu = 127; /* set the new configuration */ ioctl(fd, GSMIOC_SETCONF, &c); + /* get first gsmtty device node */ + ioctl(fd, GSMIOC_GETFIRST, &first); + printf("first muxed line: /dev/gsmtty%i\n", first); /* and wait for ever to keep the line discipline enabled */ daemon(0,0); pause(); -4. use these devices as plain serial ports. +5. use these devices as plain serial ports. for example, it's possible: - and to use gnokii to send / receive SMS on ttygsm1 - to use ppp to establish a datalink on ttygsm2 -5. first close all virtual ports before closing the physical port. +6. first close all virtual ports before closing the physical port. Note that after closing the physical port the modem is still in multiplexing mode. This may prevent a successful re-opening of the port later. To avoid diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index a60be591f1fc..d30525946892 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -2613,6 +2613,7 @@ static int gsmld_ioctl(struct tty_struct *tty, struct file *file, { struct gsm_config c; struct gsm_mux *gsm = tty->disc_data; + unsigned int base; switch (cmd) { case GSMIOC_GETCONF: @@ -2624,6 +2625,9 @@ static int gsmld_ioctl(struct tty_struct *tty, struct file *file, if (copy_from_user(&c, (void *)arg, sizeof(c))) return -EFAULT; return gsm_config(gsm, &c); + case GSMIOC_GETFIRST: + base = mux_num_to_base(gsm); + return put_user(base + 1, (__u32 __user *)arg); default: return n_tty_ioctl_helper(tty, file, cmd, arg); } diff --git a/include/uapi/linux/gsmmux.h b/include/uapi/linux/gsmmux.h index 101d3c469acb..cb8693b39cb7 100644 --- a/include/uapi/linux/gsmmux.h +++ b/include/uapi/linux/gsmmux.h @@ -37,5 +37,7 @@ struct gsm_netconfig { #define GSMIOC_ENABLE_NET _IOW('G', 2, struct gsm_netconfig) #define GSMIOC_DISABLE_NET _IO('G', 3) +/* get the base tty number for a configured gsmmux tty */ +#define GSMIOC_GETFIRST _IOR('G', 4, __u32) #endif -- cgit v1.2.3 From 2a0c9aaa6247c817e45bfc1aaa5eaeafe7a331d6 Mon Sep 17 00:00:00 2001 From: Kurt Van Dijck Date: Mon, 8 Oct 2018 11:48:34 +0200 Subject: can: add socket type for CAN_J1939 This patch is a preparation for SAE J1939 and adds CAN_J1939 socket type. Signed-off-by: Kurt Van Dijck Signed-off-by: Oleksij Rempel Acked-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- include/uapi/linux/can.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h index 0afb7d8e867f..06d92d6be6e6 100644 --- a/include/uapi/linux/can.h +++ b/include/uapi/linux/can.h @@ -157,7 +157,8 @@ struct canfd_frame { #define CAN_TP20 4 /* VAG Transport Protocol v2.0 */ #define CAN_MCNET 5 /* Bosch MCNet */ #define CAN_ISOTP 6 /* ISO 15765-2 Transport Protocol */ -#define CAN_NPROTO 7 +#define CAN_J1939 7 /* SAE J1939 */ +#define CAN_NPROTO 8 #define SOL_CAN_BASE 100 -- cgit v1.2.3 From f5223e9eee651e005c0f6d6d078909087601b7e9 Mon Sep 17 00:00:00 2001 From: Kurt Van Dijck Date: Mon, 8 Oct 2018 11:48:35 +0200 Subject: can: extend sockaddr_can to include j1939 members This patch prepares struct sockaddr_can for SAE J1939. Signed-off-by: Kurt Van Dijck Signed-off-by: Oleksij Rempel Acked-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- include/uapi/linux/can.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h index 06d92d6be6e6..1e988fdeba34 100644 --- a/include/uapi/linux/can.h +++ b/include/uapi/linux/can.h @@ -175,6 +175,23 @@ struct sockaddr_can { /* transport protocol class address information (e.g. ISOTP) */ struct { canid_t rx_id, tx_id; } tp; + /* J1939 address information */ + struct { + /* 8 byte name when using dynamic addressing */ + __u64 name; + + /* pgn: + * 8 bit: PS in PDU2 case, else 0 + * 8 bit: PF + * 1 bit: DP + * 1 bit: reserved + */ + __u32 pgn; + + /* 1 byte address */ + __u8 addr; + } j1939; + /* reserved for future CAN protocols address information */ } can_addr; }; -- cgit v1.2.3 From 9d71dd0c70099914fcd063135da3c580865e924c Mon Sep 17 00:00:00 2001 From: The j1939 authors Date: Mon, 8 Oct 2018 11:48:36 +0200 Subject: can: add support of SAE J1939 protocol SAE J1939 is the vehicle bus recommended practice used for communication and diagnostics among vehicle components. Originating in the car and heavy-duty truck industry in the United States, it is now widely used in other parts of the world. J1939, ISO 11783 and NMEA 2000 all share the same high level protocol. SAE J1939 can be considered the replacement for the older SAE J1708 and SAE J1587 specifications. Acked-by: Oliver Hartkopp Signed-off-by: Bastian Stender Signed-off-by: Elenita Hinds Signed-off-by: kbuild test robot Signed-off-by: Kurt Van Dijck Signed-off-by: Maxime Jayat Signed-off-by: Robin van der Gracht Signed-off-by: Oleksij Rempel Signed-off-by: Marc Kleine-Budde --- Documentation/networking/index.rst | 1 + Documentation/networking/j1939.rst | 422 ++++++++ MAINTAINERS | 10 + include/linux/can/can-ml.h | 3 + include/uapi/linux/can/j1939.h | 99 ++ net/can/Kconfig | 2 + net/can/Makefile | 2 + net/can/j1939/Kconfig | 15 + net/can/j1939/Makefile | 10 + net/can/j1939/address-claim.c | 230 ++++ net/can/j1939/bus.c | 333 ++++++ net/can/j1939/j1939-priv.h | 338 ++++++ net/can/j1939/main.c | 403 +++++++ net/can/j1939/socket.c | 1160 +++++++++++++++++++++ net/can/j1939/transport.c | 2027 ++++++++++++++++++++++++++++++++++++ 15 files changed, 5055 insertions(+) create mode 100644 Documentation/networking/j1939.rst create mode 100644 include/uapi/linux/can/j1939.h create mode 100644 net/can/j1939/Kconfig create mode 100644 net/can/j1939/Makefile create mode 100644 net/can/j1939/address-claim.c create mode 100644 net/can/j1939/bus.c create mode 100644 net/can/j1939/j1939-priv.h create mode 100644 net/can/j1939/main.c create mode 100644 net/can/j1939/socket.c create mode 100644 net/can/j1939/transport.c (limited to 'include/uapi') diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 37eabc17894c..0481d0ffebed 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -17,6 +17,7 @@ Contents: devlink-trap devlink-trap-netdevsim ieee802154 + j1939 kapi z8530book msg_zerocopy diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst new file mode 100644 index 000000000000..ce7e7a044e08 --- /dev/null +++ b/Documentation/networking/j1939.rst @@ -0,0 +1,422 @@ +.. SPDX-License-Identifier: (GPL-2.0 OR MIT) + +=================== +J1939 Documentation +=================== + +Overview / What Is J1939 +======================== + +SAE J1939 defines a higher layer protocol on CAN. It implements a more +sophisticated addressing scheme and extends the maximum packet size above 8 +bytes. Several derived specifications exist, which differ from the original +J1939 on the application level, like MilCAN A, NMEA2000 and especially +ISO-11783 (ISOBUS). This last one specifies the so-called ETP (Extended +Transport Protocol) which is has been included in this implementation. This +results in a maximum packet size of ((2 ^ 24) - 1) * 7 bytes == 111 MiB. + +Specifications used +------------------- + +* SAE J1939-21 : data link layer +* SAE J1939-81 : network management +* ISO 11783-6 : Virtual Terminal (Extended Transport Protocol) + +.. _j1939-motivation: + +Motivation +========== + +Given the fact there's something like SocketCAN with an API similar to BSD +sockets, we found some reasons to justify a kernel implementation for the +addressing and transport methods used by J1939. + +* **Addressing:** when a process on an ECU communicates via J1939, it should + not necessarily know its source address. Although at least one process per + ECU should know the source address. Other processes should be able to reuse + that address. This way, address parameters for different processes + cooperating for the same ECU, are not duplicated. This way of working is + closely related to the UNIX concept where programs do just one thing, and do + it well. + +* **Dynamic addressing:** Address Claiming in J1939 is time critical. + Furthermore data transport should be handled properly during the address + negotiation. Putting this functionality in the kernel eliminates it as a + requirement for _every_ user space process that communicates via J1939. This + results in a consistent J1939 bus with proper addressing. + +* **Transport:** both TP & ETP reuse some PGNs to relay big packets over them. + Different processes may thus use the same TP & ETP PGNs without actually + knowing it. The individual TP & ETP sessions _must_ be serialized + (synchronized) between different processes. The kernel solves this problem + properly and eliminates the serialization (synchronization) as a requirement + for _every_ user space process that communicates via J1939. + +J1939 defines some other features (relaying, gateway, fast packet transport, +...). In-kernel code for these would not contribute to protocol stability. +Therefore, these parts are left to user space. + +The J1939 sockets operate on CAN network devices (see SocketCAN). Any J1939 +user space library operating on CAN raw sockets will still operate properly. +Since such library does not communicate with the in-kernel implementation, care +must be taken that these two do not interfere. In practice, this means they +cannot share ECU addresses. A single ECU (or virtual ECU) address is used by +the library exclusively, or by the in-kernel system exclusively. + +J1939 concepts +============== + +PGN +--- + +The PGN (Parameter Group Number) is a number to identify a packet. The PGN +is composed as follows: +1 bit : Reserved Bit +1 bit : Data Page +8 bits : PF (PDU Format) +8 bits : PS (PDU Specific) + +In J1939-21 distinction is made between PDU1 format (where PF < 240) and PDU2 +format (where PF >= 240). Furthermore, when using PDU2 format, the PS-field +contains a so-called Group Extension, which is part of the PGN. When using PDU2 +format, the Group Extension is set in the PS-field. + +On the other hand, when using PDU1 format, the PS-field contains a so-called +Destination Address, which is _not_ part of the PGN. When communicating a PGN +from user space to kernel (or visa versa) and PDU2 format is used, the PS-field +of the PGN shall be set to zero. The Destination Address shall be set +elsewhere. + +Regarding PGN mapping to 29-bit CAN identifier, the Destination Address shall +be get/set from/to the appropriate bits of the identifier by the kernel. + + +Addressing +---------- + +Both static and dynamic addressing methods can be used. + +For static addresses, no extra checks are made by the kernel, and provided +addresses are considered right. This responsibility is for the OEM or system +integrator. + +For dynamic addressing, so-called Address Claiming, extra support is foreseen +in the kernel. In J1939 any ECU is known by it's 64-bit NAME. At the moment of +a successful address claim, the kernel keeps track of both NAME and source +address being claimed. This serves as a base for filter schemes. By default, +packets with a destination that is not locally, will be rejected. + +Mixed mode packets (from a static to a dynamic address or vice versa) are +allowed. The BSD sockets define separate API calls for getting/setting the +local & remote address and are applicable for J1939 sockets. + +Filtering +--------- + +J1939 defines white list filters per socket that a user can set in order to +receive a subset of the J1939 traffic. Filtering can be based on: + +* SA +* SOURCE_NAME +* PGN + +When multiple filters are in place for a single socket, and a packet comes in +that matches several of those filters, the packet is only received once for +that socket. + +How to Use J1939 +================ + +API Calls +--------- + +On CAN, you first need to open a socket for communicating over a CAN network. +To use J1939, #include . From there, will be +included too. To open a socket, use: + +.. code-block:: C + + s = socket(PF_CAN, SOCK_DGRAM, CAN_J1939); + +J1939 does use SOCK_DGRAM sockets. In the J1939 specification, connections are +mentioned in the context of transport protocol sessions. These still deliver +packets to the other end (using several CAN packets). SOCK_STREAM is not +supported. + +After the successful creation of the socket, you would normally use the bind(2) +and/or connect(2) system call to bind the socket to a CAN interface. After +binding and/or connecting the socket, you can read(2) and write(2) from/to the +socket or use send(2), sendto(2), sendmsg(2) and the recv*() counterpart +operations on the socket as usual. There are also J1939 specific socket options +described below. + +In order to send data, a bind(2) must have been successful. bind(2) assigns a +local address to a socket. + +Different from CAN is that the payload data is just the data that get send, +without it's header info. The header info is derived from the sockaddr supplied +to bind(2), connect(2), sendto(2) and recvfrom(2). A write(2) with size 4 will +result in a packet with 4 bytes. + +The sockaddr structure has extensions for use with J1939 as specified below: + +.. code-block:: C + + struct sockaddr_can { + sa_family_t can_family; + int can_ifindex; + union { + struct { + __u64 name; + /* pgn: + * 8 bit: PS in PDU2 case, else 0 + * 8 bit: PF + * 1 bit: DP + * 1 bit: reserved + */ + __u32 pgn; + __u8 addr; + } j1939; + } can_addr; + } + +can_family & can_ifindex serve the same purpose as for other SocketCAN sockets. + +can_addr.j1939.pgn specifies the PGN (max 0x3ffff). Individual bits are +specified above. + +can_addr.j1939.name contains the 64-bit J1939 NAME. + +can_addr.j1939.addr contains the address. + +The bind(2) system call assigns the local address, i.e. the source address when +sending packages. If a PGN during bind(2) is set, it's used as a RX filter. +I.e. only packets with a matching PGN are received. If an ADDR or NAME is set +it is used as a receive filter, too. It will match the destination NAME or ADDR +of the incoming packet. The NAME filter will work only if appropriate Address +Claiming for this name was done on the CAN bus and registered/cached by the +kernel. + +On the other hand connect(2) assigns the remote address, i.e. the destination +address. The PGN from connect(2) is used as the default PGN when sending +packets. If ADDR or NAME is set it will be used as the default destination ADDR +or NAME. Further a set ADDR or NAME during connect(2) is used as a receive +filter. It will match the source NAME or ADDR of the incoming packet. + +Both write(2) and send(2) will send a packet with local address from bind(2) and +the remote address from connect(2). Use sendto(2) to overwrite the destination +address. + +If can_addr.j1939.name is set (!= 0) the NAME is looked up by the kernel and +the corresponding ADDR is used. If can_addr.j1939.name is not set (== 0), +can_addr.j1939.addr is used. + +When creating a socket, reasonable defaults are set. Some options can be +modified with setsockopt(2) & getsockopt(2). + +RX path related options: + +- SO_J1939_FILTER - configure array of filters +- SO_J1939_PROMISC - disable filters set by bind(2) and connect(2) + +By default no broadcast packets can be send or received. To enable sending or +receiving broadcast packets use the socket option SO_BROADCAST: + +.. code-block:: C + + int value = 1; + setsockopt(sock, SOL_SOCKET, SO_BROADCAST, &value, sizeof(value)); + +The following diagram illustrates the RX path: + +.. code:: + + +--------------------+ + | incoming packet | + +--------------------+ + | + V + +--------------------+ + | SO_J1939_PROMISC? | + +--------------------+ + | | + no | | yes + | | + .---------' `---------. + | | + +---------------------------+ | + | bind() + connect() + | | + | SOCK_BROADCAST filter | | + +---------------------------+ | + | | + |<---------------------' + V + +---------------------------+ + | SO_J1939_FILTER | + +---------------------------+ + | + V + +---------------------------+ + | socket recv() | + +---------------------------+ + +TX path related options: +SO_J1939_SEND_PRIO - change default send priority for the socket + +Message Flags during send() and Related System Calls +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +send(2), sendto(2) and sendmsg(2) take a 'flags' argument. Currently +supported flags are: + +* MSG_DONTWAIT, i.e. non-blocking operation. + +recvmsg(2) +^^^^^^^^^ + +In most cases recvmsg(2) is needed if you want to extract more information than +recvfrom(2) can provide. For example package priority and timestamp. The +Destination Address, name and packet priority (if applicable) are attached to +the msghdr in the recvmsg(2) call. They can be extracted using cmsg(3) macros, +with cmsg_level == SOL_J1939 && cmsg_type == SCM_J1939_DEST_ADDR, +SCM_J1939_DEST_NAME or SCM_J1939_PRIO. The returned data is a uint8_t for +priority and dst_addr, and uint64_t for dst_name. + +.. code-block:: C + + uint8_t priority, dst_addr; + uint64_t dst_name; + + for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) { + switch (cmsg->cmsg_level) { + case SOL_CAN_J1939: + if (cmsg->cmsg_type == SCM_J1939_DEST_ADDR) + dst_addr = *CMSG_DATA(cmsg); + else if (cmsg->cmsg_type == SCM_J1939_DEST_NAME) + memcpy(&dst_name, CMSG_DATA(cmsg), cmsg->cmsg_len - CMSG_LEN(0)); + else if (cmsg->cmsg_type == SCM_J1939_PRIO) + priority = *CMSG_DATA(cmsg); + break; + } + } + +Dynamic Addressing +------------------ + +Distinction has to be made between using the claimed address and doing an +address claim. To use an already claimed address, one has to fill in the +j1939.name member and provide it to bind(2). If the name had claimed an address +earlier, all further messages being sent will use that address. And the +j1939.addr member will be ignored. + +An exception on this is PGN 0x0ee00. This is the "Address Claim/Cannot Claim +Address" message and the kernel will use the j1939.addr member for that PGN if +necessary. + +To claim an address following code example can be used: + +.. code-block:: C + + struct sockaddr_can baddr = { + .can_family = AF_CAN, + .can_addr.j1939 = { + .name = name, + .addr = J1939_IDLE_ADDR, + .pgn = J1939_NO_PGN, /* to disable bind() rx filter for PGN */ + }, + .can_ifindex = if_nametoindex("can0"), + }; + + bind(sock, (struct sockaddr *)&baddr, sizeof(baddr)); + + /* for Address Claiming broadcast must be allowed */ + int value = 1; + setsockopt(sock, SOL_SOCKET, SO_BROADCAST, &value, sizeof(value)); + + /* configured advanced RX filter with PGN needed for Address Claiming */ + const struct j1939_filter filt[] = { + { + .pgn = J1939_PGN_ADDRESS_CLAIMED, + .pgn_mask = J1939_PGN_PDU1_MAX, + }, { + .pgn = J1939_PGN_ADDRESS_REQUEST, + .pgn_mask = J1939_PGN_PDU1_MAX, + }, { + .pgn = J1939_PGN_ADDRESS_COMMANDED, + .pgn_mask = J1939_PGN_MAX, + }, + }; + + setsockopt(sock, SOL_CAN_J1939, SO_J1939_FILTER, &filt, sizeof(filt)); + + uint64_t dat = htole64(name); + const struct sockaddr_can saddr = { + .can_family = AF_CAN, + .can_addr.j1939 = { + .pgn = J1939_PGN_ADDRESS_CLAIMED, + .addr = J1939_NO_ADDR, + }, + }; + + /* Afterwards do a sendto(2) with data set to the NAME (Little Endian). If the + * NAME provided, does not match the j1939.name provided to bind(2), EPROTO + * will be returned. + */ + sendto(sock, dat, sizeof(dat), 0, (const struct sockaddr *)&saddr, sizeof(saddr)); + +If no-one else contests the address claim within 250ms after transmission, the +kernel marks the NAME-SA assignment as valid. The valid assignment will be kept +among other valid NAME-SA assignments. From that point, any socket bound to the +NAME can send packets. + +If another ECU claims the address, the kernel will mark the NAME-SA expired. +No socket bound to the NAME can send packets (other than address claims). To +claim another address, some socket bound to NAME, must bind(2) again, but with +only j1939.addr changed to the new SA, and must then send a valid address claim +packet. This restarts the state machine in the kernel (and any other +participant on the bus) for this NAME. + +can-utils also include the jacd tool, so it can be used as code example or as +default Address Claiming daemon. + +Send Examples +------------- + +Static Addressing +^^^^^^^^^^^^^^^^^ + +This example will send a PGN (0x12300) from SA 0x20 to DA 0x30. + +Bind: + +.. code-block:: C + + struct sockaddr_can baddr = { + .can_family = AF_CAN, + .can_addr.j1939 = { + .name = J1939_NO_NAME, + .addr = 0x20, + .pgn = J1939_NO_PGN, + }, + .can_ifindex = if_nametoindex("can0"), + }; + + bind(sock, (struct sockaddr *)&baddr, sizeof(baddr)); + +Now, the socket 'sock' is bound to the SA 0x20. Since no connect(2) was called, +at this point we can use only sendto(2) or sendmsg(2). + +Send: + +.. code-block:: C + + const struct sockaddr_can saddr = { + .can_family = AF_CAN, + .can_addr.j1939 = { + .name = J1939_NO_NAME; + .pgn = 0x30, + .addr = 0x12300, + }, + }; + + sendto(sock, dat, sizeof(dat), 0, (const struct sockaddr *)&saddr, sizeof(saddr)); diff --git a/MAINTAINERS b/MAINTAINERS index a081c477d1d1..844f416437c4 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3669,6 +3669,16 @@ F: include/uapi/linux/can/bcm.h F: include/uapi/linux/can/raw.h F: include/uapi/linux/can/gw.h +CAN-J1939 NETWORK LAYER +M: Robin van der Gracht +M: Oleksij Rempel +R: Pengutronix Kernel Team +L: linux-can@vger.kernel.org +S: Maintained +F: Documentation/networking/j1939.txt +F: net/can/j1939/ +F: include/uapi/linux/can/j1939.h + CAPABILITIES M: Serge Hallyn L: linux-security-module@vger.kernel.org diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h index 79ccf6bfa232..2f5d731ae251 100644 --- a/include/linux/can/can-ml.h +++ b/include/linux/can/can-ml.h @@ -60,6 +60,9 @@ struct can_dev_rcv_lists { struct can_ml_priv { struct can_dev_rcv_lists dev_rcv_lists; +#ifdef CAN_J1939 + struct j1939_priv *j1939_priv; +#endif }; #endif /* CAN_ML_H */ diff --git a/include/uapi/linux/can/j1939.h b/include/uapi/linux/can/j1939.h new file mode 100644 index 000000000000..c32325342d30 --- /dev/null +++ b/include/uapi/linux/can/j1939.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * j1939.h + * + * Copyright (c) 2010-2011 EIA Electronics + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _UAPI_CAN_J1939_H_ +#define _UAPI_CAN_J1939_H_ + +#include +#include +#include + +#define J1939_MAX_UNICAST_ADDR 0xfd +#define J1939_IDLE_ADDR 0xfe +#define J1939_NO_ADDR 0xff /* == broadcast or no addr */ +#define J1939_NO_NAME 0 +#define J1939_PGN_REQUEST 0x0ea00 /* Request PG */ +#define J1939_PGN_ADDRESS_CLAIMED 0x0ee00 /* Address Claimed */ +#define J1939_PGN_ADDRESS_COMMANDED 0x0fed8 /* Commanded Address */ +#define J1939_PGN_PDU1_MAX 0x3ff00 +#define J1939_PGN_MAX 0x3ffff +#define J1939_NO_PGN 0x40000 + +/* J1939 Parameter Group Number + * + * bit 0-7 : PDU Specific (PS) + * bit 8-15 : PDU Format (PF) + * bit 16 : Data Page (DP) + * bit 17 : Reserved (R) + * bit 19-31 : set to zero + */ +typedef __u32 pgn_t; + +/* J1939 Priority + * + * bit 0-2 : Priority (P) + * bit 3-7 : set to zero + */ +typedef __u8 priority_t; + +/* J1939 NAME + * + * bit 0-20 : Identity Number + * bit 21-31 : Manufacturer Code + * bit 32-34 : ECU Instance + * bit 35-39 : Function Instance + * bit 40-47 : Function + * bit 48 : Reserved + * bit 49-55 : Vehicle System + * bit 56-59 : Vehicle System Instance + * bit 60-62 : Industry Group + * bit 63 : Arbitrary Address Capable + */ +typedef __u64 name_t; + +/* J1939 socket options */ +#define SOL_CAN_J1939 (SOL_CAN_BASE + CAN_J1939) +enum { + SO_J1939_FILTER = 1, /* set filters */ + SO_J1939_PROMISC = 2, /* set/clr promiscuous mode */ + SO_J1939_SEND_PRIO = 3, + SO_J1939_ERRQUEUE = 4, +}; + +enum { + SCM_J1939_DEST_ADDR = 1, + SCM_J1939_DEST_NAME = 2, + SCM_J1939_PRIO = 3, + SCM_J1939_ERRQUEUE = 4, +}; + +enum { + J1939_NLA_PAD, + J1939_NLA_BYTES_ACKED, +}; + +enum { + J1939_EE_INFO_NONE, + J1939_EE_INFO_TX_ABORT, +}; + +struct j1939_filter { + name_t name; + name_t name_mask; + pgn_t pgn; + pgn_t pgn_mask; + __u8 addr; + __u8 addr_mask; +}; + +#define J1939_FILTER_MAX 512 /* maximum number of j1939_filter set via setsockopt() */ + +#endif /* !_UAPI_CAN_J1939_H_ */ diff --git a/net/can/Kconfig b/net/can/Kconfig index d4319aa3e1b1..d77042752457 100644 --- a/net/can/Kconfig +++ b/net/can/Kconfig @@ -53,6 +53,8 @@ config CAN_GW They can be modified with AND/OR/XOR/SET operations as configured by the netlink configuration interface known e.g. from iptables. +source "net/can/j1939/Kconfig" + source "drivers/net/can/Kconfig" endif diff --git a/net/can/Makefile b/net/can/Makefile index 1242bbbfe57f..08bd217fc051 100644 --- a/net/can/Makefile +++ b/net/can/Makefile @@ -15,3 +15,5 @@ can-bcm-y := bcm.o obj-$(CONFIG_CAN_GW) += can-gw.o can-gw-y := gw.o + +obj-$(CONFIG_CAN_J1939) += j1939/ diff --git a/net/can/j1939/Kconfig b/net/can/j1939/Kconfig new file mode 100644 index 000000000000..2998298b71ec --- /dev/null +++ b/net/can/j1939/Kconfig @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# SAE J1939 network layer core configuration +# + +config CAN_J1939 + tristate "SAE J1939" + depends on CAN + help + SAE J1939 + Say Y to have in-kernel support for j1939 socket type. This + allows communication according to SAE j1939. + The relevant parts in kernel are + SAE j1939-21 (datalink & transport protocol) + & SAE j1939-81 (network management). diff --git a/net/can/j1939/Makefile b/net/can/j1939/Makefile new file mode 100644 index 000000000000..19181bdae173 --- /dev/null +++ b/net/can/j1939/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CAN_J1939) += can-j1939.o + +can-j1939-objs := \ + address-claim.o \ + bus.o \ + main.o \ + socket.o \ + transport.o diff --git a/net/can/j1939/address-claim.c b/net/can/j1939/address-claim.c new file mode 100644 index 000000000000..f33c47327927 --- /dev/null +++ b/net/can/j1939/address-claim.c @@ -0,0 +1,230 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2010-2011 EIA Electronics, +// Kurt Van Dijck +// Copyright (c) 2010-2011 EIA Electronics, +// Pieter Beyens +// Copyright (c) 2017-2019 Pengutronix, +// Marc Kleine-Budde +// Copyright (c) 2017-2019 Pengutronix, +// Oleksij Rempel + +/* J1939 Address Claiming. + * Address Claiming in the kernel + * - keeps track of the AC states of ECU's, + * - resolves NAME<=>SA taking into account the AC states of ECU's. + * + * All Address Claim msgs (including host-originated msg) are processed + * at the receive path (a sent msg is always received again via CAN echo). + * As such, the processing of AC msgs is done in the order on which msgs + * are sent on the bus. + * + * This module doesn't send msgs itself (e.g. replies on Address Claims), + * this is the responsibility of a user space application or daemon. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include + +#include "j1939-priv.h" + +static inline name_t j1939_skb_to_name(const struct sk_buff *skb) +{ + return le64_to_cpup((__le64 *)skb->data); +} + +static inline bool j1939_ac_msg_is_request(struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + int req_pgn; + + if (skb->len < 3 || skcb->addr.pgn != J1939_PGN_REQUEST) + return false; + + req_pgn = skb->data[0] | (skb->data[1] << 8) | (skb->data[2] << 16); + + return req_pgn == J1939_PGN_ADDRESS_CLAIMED; +} + +static int j1939_ac_verify_outgoing(struct j1939_priv *priv, + struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + + if (skb->len != 8) { + netdev_notice(priv->ndev, "tx address claim with dlc %i\n", + skb->len); + return -EPROTO; + } + + if (skcb->addr.src_name != j1939_skb_to_name(skb)) { + netdev_notice(priv->ndev, "tx address claim with different name\n"); + return -EPROTO; + } + + if (skcb->addr.sa == J1939_NO_ADDR) { + netdev_notice(priv->ndev, "tx address claim with broadcast sa\n"); + return -EPROTO; + } + + /* ac must always be a broadcast */ + if (skcb->addr.dst_name || skcb->addr.da != J1939_NO_ADDR) { + netdev_notice(priv->ndev, "tx address claim with dest, not broadcast\n"); + return -EPROTO; + } + return 0; +} + +int j1939_ac_fixup(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + int ret; + u8 addr; + + /* network mgmt: address claiming msgs */ + if (skcb->addr.pgn == J1939_PGN_ADDRESS_CLAIMED) { + struct j1939_ecu *ecu; + + ret = j1939_ac_verify_outgoing(priv, skb); + /* return both when failure & when successful */ + if (ret < 0) + return ret; + ecu = j1939_ecu_get_by_name(priv, skcb->addr.src_name); + if (!ecu) + return -ENODEV; + + if (ecu->addr != skcb->addr.sa) + /* hold further traffic for ecu, remove from parent */ + j1939_ecu_unmap(ecu); + j1939_ecu_put(ecu); + } else if (skcb->addr.src_name) { + /* assign source address */ + addr = j1939_name_to_addr(priv, skcb->addr.src_name); + if (!j1939_address_is_unicast(addr) && + !j1939_ac_msg_is_request(skb)) { + netdev_notice(priv->ndev, "tx drop: invalid sa for name 0x%016llx\n", + skcb->addr.src_name); + return -EADDRNOTAVAIL; + } + skcb->addr.sa = addr; + } + + /* assign destination address */ + if (skcb->addr.dst_name) { + addr = j1939_name_to_addr(priv, skcb->addr.dst_name); + if (!j1939_address_is_unicast(addr)) { + netdev_notice(priv->ndev, "tx drop: invalid da for name 0x%016llx\n", + skcb->addr.dst_name); + return -EADDRNOTAVAIL; + } + skcb->addr.da = addr; + } + return 0; +} + +static void j1939_ac_process(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_ecu *ecu, *prev; + name_t name; + + if (skb->len != 8) { + netdev_notice(priv->ndev, "rx address claim with wrong dlc %i\n", + skb->len); + return; + } + + name = j1939_skb_to_name(skb); + skcb->addr.src_name = name; + if (!name) { + netdev_notice(priv->ndev, "rx address claim without name\n"); + return; + } + + if (!j1939_address_is_valid(skcb->addr.sa)) { + netdev_notice(priv->ndev, "rx address claim with broadcast sa\n"); + return; + } + + write_lock_bh(&priv->lock); + + /* Few words on the ECU ref counting: + * + * First we get an ECU handle, either with + * j1939_ecu_get_by_name_locked() (increments the ref counter) + * or j1939_ecu_create_locked() (initializes an ECU object + * with a ref counter of 1). + * + * j1939_ecu_unmap_locked() will decrement the ref counter, + * but only if the ECU was mapped before. So "ecu" still + * belongs to us. + * + * j1939_ecu_timer_start() will increment the ref counter + * before it starts the timer, so we can put the ecu when + * leaving this function. + */ + ecu = j1939_ecu_get_by_name_locked(priv, name); + if (!ecu && j1939_address_is_unicast(skcb->addr.sa)) + ecu = j1939_ecu_create_locked(priv, name); + + if (IS_ERR_OR_NULL(ecu)) + goto out_unlock_bh; + + /* cancel pending (previous) address claim */ + j1939_ecu_timer_cancel(ecu); + + if (j1939_address_is_idle(skcb->addr.sa)) { + j1939_ecu_unmap_locked(ecu); + goto out_ecu_put; + } + + /* save new addr */ + if (ecu->addr != skcb->addr.sa) + j1939_ecu_unmap_locked(ecu); + ecu->addr = skcb->addr.sa; + + prev = j1939_ecu_get_by_addr_locked(priv, skcb->addr.sa); + if (prev) { + if (ecu->name > prev->name) { + j1939_ecu_unmap_locked(ecu); + j1939_ecu_put(prev); + goto out_ecu_put; + } else { + /* kick prev if less or equal */ + j1939_ecu_unmap_locked(prev); + j1939_ecu_put(prev); + } + } + + j1939_ecu_timer_start(ecu); + out_ecu_put: + j1939_ecu_put(ecu); + out_unlock_bh: + write_unlock_bh(&priv->lock); +} + +void j1939_ac_recv(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_ecu *ecu; + + /* network mgmt */ + if (skcb->addr.pgn == J1939_PGN_ADDRESS_CLAIMED) { + j1939_ac_process(priv, skb); + } else if (j1939_address_is_unicast(skcb->addr.sa)) { + /* assign source name */ + ecu = j1939_ecu_get_by_addr(priv, skcb->addr.sa); + if (ecu) { + skcb->addr.src_name = ecu->name; + j1939_ecu_put(ecu); + } + } + + /* assign destination name */ + ecu = j1939_ecu_get_by_addr(priv, skcb->addr.da); + if (ecu) { + skcb->addr.dst_name = ecu->name; + j1939_ecu_put(ecu); + } +} diff --git a/net/can/j1939/bus.c b/net/can/j1939/bus.c new file mode 100644 index 000000000000..486687901602 --- /dev/null +++ b/net/can/j1939/bus.c @@ -0,0 +1,333 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2010-2011 EIA Electronics, +// Kurt Van Dijck +// Copyright (c) 2017-2019 Pengutronix, +// Marc Kleine-Budde +// Copyright (c) 2017-2019 Pengutronix, +// Oleksij Rempel + +/* bus for j1939 remote devices + * Since rtnetlink, no real bus is used. + */ + +#include + +#include "j1939-priv.h" + +static void __j1939_ecu_release(struct kref *kref) +{ + struct j1939_ecu *ecu = container_of(kref, struct j1939_ecu, kref); + struct j1939_priv *priv = ecu->priv; + + list_del(&ecu->list); + kfree(ecu); + j1939_priv_put(priv); +} + +void j1939_ecu_put(struct j1939_ecu *ecu) +{ + kref_put(&ecu->kref, __j1939_ecu_release); +} + +static void j1939_ecu_get(struct j1939_ecu *ecu) +{ + kref_get(&ecu->kref); +} + +static bool j1939_ecu_is_mapped_locked(struct j1939_ecu *ecu) +{ + struct j1939_priv *priv = ecu->priv; + + lockdep_assert_held(&priv->lock); + + return j1939_ecu_find_by_addr_locked(priv, ecu->addr) == ecu; +} + +/* ECU device interface */ +/* map ECU to a bus address space */ +static void j1939_ecu_map_locked(struct j1939_ecu *ecu) +{ + struct j1939_priv *priv = ecu->priv; + struct j1939_addr_ent *ent; + + lockdep_assert_held(&priv->lock); + + if (!j1939_address_is_unicast(ecu->addr)) + return; + + ent = &priv->ents[ecu->addr]; + + if (ent->ecu) { + netdev_warn(priv->ndev, "Trying to map already mapped ECU, addr: 0x%02x, name: 0x%016llx. Skip it.\n", + ecu->addr, ecu->name); + return; + } + + j1939_ecu_get(ecu); + ent->ecu = ecu; + ent->nusers += ecu->nusers; +} + +/* unmap ECU from a bus address space */ +void j1939_ecu_unmap_locked(struct j1939_ecu *ecu) +{ + struct j1939_priv *priv = ecu->priv; + struct j1939_addr_ent *ent; + + lockdep_assert_held(&priv->lock); + + if (!j1939_address_is_unicast(ecu->addr)) + return; + + if (!j1939_ecu_is_mapped_locked(ecu)) + return; + + ent = &priv->ents[ecu->addr]; + ent->ecu = NULL; + ent->nusers -= ecu->nusers; + j1939_ecu_put(ecu); +} + +void j1939_ecu_unmap(struct j1939_ecu *ecu) +{ + write_lock_bh(&ecu->priv->lock); + j1939_ecu_unmap_locked(ecu); + write_unlock_bh(&ecu->priv->lock); +} + +void j1939_ecu_unmap_all(struct j1939_priv *priv) +{ + int i; + + write_lock_bh(&priv->lock); + for (i = 0; i < ARRAY_SIZE(priv->ents); i++) + if (priv->ents[i].ecu) + j1939_ecu_unmap_locked(priv->ents[i].ecu); + write_unlock_bh(&priv->lock); +} + +void j1939_ecu_timer_start(struct j1939_ecu *ecu) +{ + /* The ECU is held here and released in the + * j1939_ecu_timer_handler() or j1939_ecu_timer_cancel(). + */ + j1939_ecu_get(ecu); + + /* Schedule timer in 250 msec to commit address change. */ + hrtimer_start(&ecu->ac_timer, ms_to_ktime(250), + HRTIMER_MODE_REL_SOFT); +} + +void j1939_ecu_timer_cancel(struct j1939_ecu *ecu) +{ + if (hrtimer_cancel(&ecu->ac_timer)) + j1939_ecu_put(ecu); +} + +static enum hrtimer_restart j1939_ecu_timer_handler(struct hrtimer *hrtimer) +{ + struct j1939_ecu *ecu = + container_of(hrtimer, struct j1939_ecu, ac_timer); + struct j1939_priv *priv = ecu->priv; + + write_lock_bh(&priv->lock); + /* TODO: can we test if ecu->addr is unicast before starting + * the timer? + */ + j1939_ecu_map_locked(ecu); + + /* The corresponding j1939_ecu_get() is in + * j1939_ecu_timer_start(). + */ + j1939_ecu_put(ecu); + write_unlock_bh(&priv->lock); + + return HRTIMER_NORESTART; +} + +struct j1939_ecu *j1939_ecu_create_locked(struct j1939_priv *priv, name_t name) +{ + struct j1939_ecu *ecu; + + lockdep_assert_held(&priv->lock); + + ecu = kzalloc(sizeof(*ecu), gfp_any()); + if (!ecu) + return ERR_PTR(-ENOMEM); + kref_init(&ecu->kref); + ecu->addr = J1939_IDLE_ADDR; + ecu->name = name; + + hrtimer_init(&ecu->ac_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT); + ecu->ac_timer.function = j1939_ecu_timer_handler; + INIT_LIST_HEAD(&ecu->list); + + j1939_priv_get(priv); + ecu->priv = priv; + list_add_tail(&ecu->list, &priv->ecus); + + return ecu; +} + +struct j1939_ecu *j1939_ecu_find_by_addr_locked(struct j1939_priv *priv, + u8 addr) +{ + lockdep_assert_held(&priv->lock); + + return priv->ents[addr].ecu; +} + +struct j1939_ecu *j1939_ecu_get_by_addr_locked(struct j1939_priv *priv, u8 addr) +{ + struct j1939_ecu *ecu; + + lockdep_assert_held(&priv->lock); + + if (!j1939_address_is_unicast(addr)) + return NULL; + + ecu = j1939_ecu_find_by_addr_locked(priv, addr); + if (ecu) + j1939_ecu_get(ecu); + + return ecu; +} + +struct j1939_ecu *j1939_ecu_get_by_addr(struct j1939_priv *priv, u8 addr) +{ + struct j1939_ecu *ecu; + + read_lock_bh(&priv->lock); + ecu = j1939_ecu_get_by_addr_locked(priv, addr); + read_unlock_bh(&priv->lock); + + return ecu; +} + +/* get pointer to ecu without increasing ref counter */ +static struct j1939_ecu *j1939_ecu_find_by_name_locked(struct j1939_priv *priv, + name_t name) +{ + struct j1939_ecu *ecu; + + lockdep_assert_held(&priv->lock); + + list_for_each_entry(ecu, &priv->ecus, list) { + if (ecu->name == name) + return ecu; + } + + return NULL; +} + +struct j1939_ecu *j1939_ecu_get_by_name_locked(struct j1939_priv *priv, + name_t name) +{ + struct j1939_ecu *ecu; + + lockdep_assert_held(&priv->lock); + + if (!name) + return NULL; + + ecu = j1939_ecu_find_by_name_locked(priv, name); + if (ecu) + j1939_ecu_get(ecu); + + return ecu; +} + +struct j1939_ecu *j1939_ecu_get_by_name(struct j1939_priv *priv, name_t name) +{ + struct j1939_ecu *ecu; + + read_lock_bh(&priv->lock); + ecu = j1939_ecu_get_by_name_locked(priv, name); + read_unlock_bh(&priv->lock); + + return ecu; +} + +u8 j1939_name_to_addr(struct j1939_priv *priv, name_t name) +{ + struct j1939_ecu *ecu; + int addr = J1939_IDLE_ADDR; + + if (!name) + return J1939_NO_ADDR; + + read_lock_bh(&priv->lock); + ecu = j1939_ecu_find_by_name_locked(priv, name); + if (ecu && j1939_ecu_is_mapped_locked(ecu)) + /* ecu's SA is registered */ + addr = ecu->addr; + + read_unlock_bh(&priv->lock); + + return addr; +} + +/* TX addr/name accounting + * Transport protocol needs to know if a SA is local or not + * These functions originate from userspace manipulating sockets, + * so locking is straigforward + */ + +int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa) +{ + struct j1939_ecu *ecu; + int err = 0; + + write_lock_bh(&priv->lock); + + if (j1939_address_is_unicast(sa)) + priv->ents[sa].nusers++; + + if (!name) + goto done; + + ecu = j1939_ecu_get_by_name_locked(priv, name); + if (!ecu) + ecu = j1939_ecu_create_locked(priv, name); + err = PTR_ERR_OR_ZERO(ecu); + if (err) + goto done; + + ecu->nusers++; + /* TODO: do we care if ecu->addr != sa? */ + if (j1939_ecu_is_mapped_locked(ecu)) + /* ecu's sa is active already */ + priv->ents[ecu->addr].nusers++; + + done: + write_unlock_bh(&priv->lock); + + return err; +} + +void j1939_local_ecu_put(struct j1939_priv *priv, name_t name, u8 sa) +{ + struct j1939_ecu *ecu; + + write_lock_bh(&priv->lock); + + if (j1939_address_is_unicast(sa)) + priv->ents[sa].nusers--; + + if (!name) + goto done; + + ecu = j1939_ecu_find_by_name_locked(priv, name); + if (WARN_ON_ONCE(!ecu)) + goto done; + + ecu->nusers--; + /* TODO: do we care if ecu->addr != sa? */ + if (j1939_ecu_is_mapped_locked(ecu)) + /* ecu's sa is active already */ + priv->ents[ecu->addr].nusers--; + j1939_ecu_put(ecu); + + done: + write_unlock_bh(&priv->lock); +} diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h new file mode 100644 index 000000000000..12369b604ce9 --- /dev/null +++ b/net/can/j1939/j1939-priv.h @@ -0,0 +1,338 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (c) 2010-2011 EIA Electronics, +// Kurt Van Dijck +// Copyright (c) 2017-2019 Pengutronix, +// Marc Kleine-Budde +// Copyright (c) 2017-2019 Pengutronix, +// Oleksij Rempel + +#ifndef _J1939_PRIV_H_ +#define _J1939_PRIV_H_ + +#include +#include + +/* Timeout to receive the abort signal over loop back. In case CAN + * bus is open, the timeout should be triggered. + */ +#define J1939_XTP_ABORT_TIMEOUT_MS 500 +#define J1939_SIMPLE_ECHO_TIMEOUT_MS (10 * 1000) + +struct j1939_session; +enum j1939_sk_errqueue_type { + J1939_ERRQUEUE_ACK, + J1939_ERRQUEUE_SCHED, + J1939_ERRQUEUE_ABORT, +}; + +/* j1939 devices */ +struct j1939_ecu { + struct list_head list; + name_t name; + u8 addr; + + /* indicates that this ecu successfully claimed @sa as its address */ + struct hrtimer ac_timer; + struct kref kref; + struct j1939_priv *priv; + + /* count users, to help transport protocol decide for interaction */ + int nusers; +}; + +struct j1939_priv { + struct list_head ecus; + /* local list entry in priv + * These allow irq (& softirq) context lookups on j1939 devices + * This approach (separate lists) is done as the other 2 alternatives + * are not easier or even wrong + * 1) using the pure kobject methods involves mutexes, which are not + * allowed in irq context. + * 2) duplicating data structures would require a lot of synchronization + * code + * usage: + */ + + /* segments need a lock to protect the above list */ + rwlock_t lock; + + struct net_device *ndev; + + /* list of 256 ecu ptrs, that cache the claimed addresses. + * also protected by the above lock + */ + struct j1939_addr_ent { + struct j1939_ecu *ecu; + /* count users, to help transport protocol */ + int nusers; + } ents[256]; + + struct kref kref; + + /* List of active sessions to prevent start of conflicting + * one. + * + * Do not start two sessions of same type, addresses and + * direction. + */ + struct list_head active_session_list; + + /* protects active_session_list */ + spinlock_t active_session_list_lock; + + unsigned int tp_max_packet_size; + + /* lock for j1939_socks list */ + spinlock_t j1939_socks_lock; + struct list_head j1939_socks; + + struct kref rx_kref; +}; + +void j1939_ecu_put(struct j1939_ecu *ecu); + +/* keep the cache of what is local */ +int j1939_local_ecu_get(struct j1939_priv *priv, name_t name, u8 sa); +void j1939_local_ecu_put(struct j1939_priv *priv, name_t name, u8 sa); + +static inline bool j1939_address_is_unicast(u8 addr) +{ + return addr <= J1939_MAX_UNICAST_ADDR; +} + +static inline bool j1939_address_is_idle(u8 addr) +{ + return addr == J1939_IDLE_ADDR; +} + +static inline bool j1939_address_is_valid(u8 addr) +{ + return addr != J1939_NO_ADDR; +} + +static inline bool j1939_pgn_is_pdu1(pgn_t pgn) +{ + /* ignore dp & res bits for this */ + return (pgn & 0xff00) < 0xf000; +} + +/* utility to correctly unmap an ECU */ +void j1939_ecu_unmap_locked(struct j1939_ecu *ecu); +void j1939_ecu_unmap(struct j1939_ecu *ecu); + +u8 j1939_name_to_addr(struct j1939_priv *priv, name_t name); +struct j1939_ecu *j1939_ecu_find_by_addr_locked(struct j1939_priv *priv, + u8 addr); +struct j1939_ecu *j1939_ecu_get_by_addr(struct j1939_priv *priv, u8 addr); +struct j1939_ecu *j1939_ecu_get_by_addr_locked(struct j1939_priv *priv, + u8 addr); +struct j1939_ecu *j1939_ecu_get_by_name(struct j1939_priv *priv, name_t name); +struct j1939_ecu *j1939_ecu_get_by_name_locked(struct j1939_priv *priv, + name_t name); + +enum j1939_transfer_type { + J1939_TP, + J1939_ETP, + J1939_SIMPLE, +}; + +struct j1939_addr { + name_t src_name; + name_t dst_name; + pgn_t pgn; + + u8 sa; + u8 da; + + u8 type; +}; + +/* control buffer of the sk_buff */ +struct j1939_sk_buff_cb { + /* Offset in bytes within one ETP session */ + u32 offset; + + /* for tx, MSG_SYN will be used to sync on sockets */ + u32 msg_flags; + u32 tskey; + + struct j1939_addr addr; + + /* Flags for quick lookups during skb processing. + * These are set in the receive path only. + */ +#define J1939_ECU_LOCAL_SRC BIT(0) +#define J1939_ECU_LOCAL_DST BIT(1) + u8 flags; + + priority_t priority; +}; + +static inline +struct j1939_sk_buff_cb *j1939_skb_to_cb(const struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct j1939_sk_buff_cb) > sizeof(skb->cb)); + + return (struct j1939_sk_buff_cb *)skb->cb; +} + +int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb); +void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb); +bool j1939_sk_recv_match(struct j1939_priv *priv, + struct j1939_sk_buff_cb *skcb); +void j1939_sk_send_loop_abort(struct sock *sk, int err); +void j1939_sk_errqueue(struct j1939_session *session, + enum j1939_sk_errqueue_type type); +void j1939_sk_queue_activate_next(struct j1939_session *session); + +/* stack entries */ +struct j1939_session *j1939_tp_send(struct j1939_priv *priv, + struct sk_buff *skb, size_t size); +int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb); +int j1939_ac_fixup(struct j1939_priv *priv, struct sk_buff *skb); +void j1939_ac_recv(struct j1939_priv *priv, struct sk_buff *skb); +void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb); + +/* network management */ +struct j1939_ecu *j1939_ecu_create_locked(struct j1939_priv *priv, name_t name); + +void j1939_ecu_timer_start(struct j1939_ecu *ecu); +void j1939_ecu_timer_cancel(struct j1939_ecu *ecu); +void j1939_ecu_unmap_all(struct j1939_priv *priv); + +struct j1939_priv *j1939_netdev_start(struct net_device *ndev); +void j1939_netdev_stop(struct j1939_priv *priv); + +void j1939_priv_put(struct j1939_priv *priv); +void j1939_priv_get(struct j1939_priv *priv); + +/* notify/alert all j1939 sockets bound to ifindex */ +void j1939_sk_netdev_event_netdown(struct j1939_priv *priv); +int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk); +void j1939_tp_init(struct j1939_priv *priv); + +/* decrement pending skb for a j1939 socket */ +void j1939_sock_pending_del(struct sock *sk); + +enum j1939_session_state { + J1939_SESSION_NEW, + J1939_SESSION_ACTIVE, + /* waiting for abort signal on the bus */ + J1939_SESSION_WAITING_ABORT, + J1939_SESSION_ACTIVE_MAX, + J1939_SESSION_DONE, +}; + +struct j1939_session { + struct j1939_priv *priv; + struct list_head active_session_list_entry; + struct list_head sk_session_queue_entry; + struct kref kref; + struct sock *sk; + + /* ifindex, src, dst, pgn define the session block + * the are _never_ modified after insertion in the list + * this decreases locking problems a _lot_ + */ + struct j1939_sk_buff_cb skcb; + struct sk_buff_head skb_queue; + + /* all tx related stuff (last_txcmd, pkt.tx) + * is protected (modified only) with the txtimer hrtimer + * 'total' & 'block' are never changed, + * last_cmd, last & block are protected by ->lock + * this means that the tx may run after cts is received that should + * have stopped tx, but this time discrepancy is never avoided anyhow + */ + u8 last_cmd, last_txcmd; + bool transmission; + bool extd; + /* Total message size, number of bytes */ + unsigned int total_message_size; + /* Total number of bytes queue from socket to the session */ + unsigned int total_queued_size; + unsigned int tx_retry; + + int err; + u32 tskey; + enum j1939_session_state state; + + /* Packets counters for a (extended) transfer session. The packet is + * maximal of 7 bytes. + */ + struct { + /* total - total number of packets for this session */ + unsigned int total; + /* last - last packet of a transfer block after which + * responder should send ETP.CM_CTS and originator + * ETP.CM_DPO + */ + unsigned int last; + /* tx - number of packets send by originator node. + * this counter can be set back if responder node + * didn't received all packets send by originator. + */ + unsigned int tx; + unsigned int tx_acked; + /* rx - number of packets received */ + unsigned int rx; + /* block - amount of packets expected in one block */ + unsigned int block; + /* dpo - ETP.CM_DPO, Data Packet Offset */ + unsigned int dpo; + } pkt; + struct hrtimer txtimer, rxtimer; +}; + +struct j1939_sock { + struct sock sk; /* must be first to skip with memset */ + struct j1939_priv *priv; + struct list_head list; + +#define J1939_SOCK_BOUND BIT(0) +#define J1939_SOCK_CONNECTED BIT(1) +#define J1939_SOCK_PROMISC BIT(2) +#define J1939_SOCK_ERRQUEUE BIT(3) + int state; + + int ifindex; + struct j1939_addr addr; + struct j1939_filter *filters; + int nfilters; + pgn_t pgn_rx_filter; + + /* j1939 may emit equal PGN (!= equal CAN-id's) out of order + * when transport protocol comes in. + * To allow emitting in order, keep a 'pending' nr. of packets + */ + atomic_t skb_pending; + wait_queue_head_t waitq; + + /* lock for the sk_session_queue list */ + spinlock_t sk_session_queue_lock; + struct list_head sk_session_queue; +}; + +static inline struct j1939_sock *j1939_sk(const struct sock *sk) +{ + return container_of(sk, struct j1939_sock, sk); +} + +void j1939_session_get(struct j1939_session *session); +void j1939_session_put(struct j1939_session *session); +void j1939_session_skb_queue(struct j1939_session *session, + struct sk_buff *skb); +int j1939_session_activate(struct j1939_session *session); +void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec); +void j1939_session_timers_cancel(struct j1939_session *session); + +#define J1939_MAX_TP_PACKET_SIZE (7 * 0xff) +#define J1939_MAX_ETP_PACKET_SIZE (7 * 0x00ffffff) + +#define J1939_REGULAR 0 +#define J1939_EXTENDED 1 + +/* CAN protocol */ +extern const struct can_proto j1939_can_proto; + +#endif /* _J1939_PRIV_H_ */ diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c new file mode 100644 index 000000000000..def2f813ffce --- /dev/null +++ b/net/can/j1939/main.c @@ -0,0 +1,403 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2010-2011 EIA Electronics, +// Pieter Beyens +// Copyright (c) 2010-2011 EIA Electronics, +// Kurt Van Dijck +// Copyright (c) 2018 Protonic, +// Robin van der Gracht +// Copyright (c) 2017-2019 Pengutronix, +// Marc Kleine-Budde +// Copyright (c) 2017-2019 Pengutronix, +// Oleksij Rempel + +/* Core of can-j1939 that links j1939 to CAN. */ + +#include +#include +#include +#include +#include + +#include "j1939-priv.h" + +MODULE_DESCRIPTION("PF_CAN SAE J1939"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("EIA Electronics (Kurt Van Dijck & Pieter Beyens)"); +MODULE_ALIAS("can-proto-" __stringify(CAN_J1939)); + +/* LOWLEVEL CAN interface */ + +/* CAN_HDR: #bytes before can_frame data part */ +#define J1939_CAN_HDR (offsetof(struct can_frame, data)) + +/* CAN_FTR: #bytes beyond data part */ +#define J1939_CAN_FTR (sizeof(struct can_frame) - J1939_CAN_HDR - \ + sizeof(((struct can_frame *)0)->data)) + +/* lowest layer */ +static void j1939_can_recv(struct sk_buff *iskb, void *data) +{ + struct j1939_priv *priv = data; + struct sk_buff *skb; + struct j1939_sk_buff_cb *skcb, *iskcb; + struct can_frame *cf; + + /* create a copy of the skb + * j1939 only delivers the real data bytes, + * the header goes into sockaddr. + * j1939 may not touch the incoming skb in such way + */ + skb = skb_clone(iskb, GFP_ATOMIC); + if (!skb) + return; + + can_skb_set_owner(skb, iskb->sk); + + /* get a pointer to the header of the skb + * the skb payload (pointer) is moved, so that the next skb_data + * returns the actual payload + */ + cf = (void *)skb->data; + skb_pull(skb, J1939_CAN_HDR); + + /* fix length, set to dlc, with 8 maximum */ + skb_trim(skb, min_t(uint8_t, cf->can_dlc, 8)); + + /* set addr */ + skcb = j1939_skb_to_cb(skb); + memset(skcb, 0, sizeof(*skcb)); + + iskcb = j1939_skb_to_cb(iskb); + skcb->tskey = iskcb->tskey; + skcb->priority = (cf->can_id >> 26) & 0x7; + skcb->addr.sa = cf->can_id; + skcb->addr.pgn = (cf->can_id >> 8) & J1939_PGN_MAX; + /* set default message type */ + skcb->addr.type = J1939_TP; + if (j1939_pgn_is_pdu1(skcb->addr.pgn)) { + /* Type 1: with destination address */ + skcb->addr.da = skcb->addr.pgn; + /* normalize pgn: strip dst address */ + skcb->addr.pgn &= 0x3ff00; + } else { + /* set broadcast address */ + skcb->addr.da = J1939_NO_ADDR; + } + + /* update localflags */ + read_lock_bh(&priv->lock); + if (j1939_address_is_unicast(skcb->addr.sa) && + priv->ents[skcb->addr.sa].nusers) + skcb->flags |= J1939_ECU_LOCAL_SRC; + if (j1939_address_is_unicast(skcb->addr.da) && + priv->ents[skcb->addr.da].nusers) + skcb->flags |= J1939_ECU_LOCAL_DST; + read_unlock_bh(&priv->lock); + + /* deliver into the j1939 stack ... */ + j1939_ac_recv(priv, skb); + + if (j1939_tp_recv(priv, skb)) + /* this means the transport layer processed the message */ + goto done; + + j1939_simple_recv(priv, skb); + j1939_sk_recv(priv, skb); + done: + kfree_skb(skb); +} + +/* NETDEV MANAGEMENT */ + +/* values for can_rx_(un)register */ +#define J1939_CAN_ID CAN_EFF_FLAG +#define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG) + +static DEFINE_SPINLOCK(j1939_netdev_lock); + +static struct j1939_priv *j1939_priv_create(struct net_device *ndev) +{ + struct j1939_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + + rwlock_init(&priv->lock); + INIT_LIST_HEAD(&priv->ecus); + priv->ndev = ndev; + kref_init(&priv->kref); + kref_init(&priv->rx_kref); + dev_hold(ndev); + + netdev_dbg(priv->ndev, "%s : 0x%p\n", __func__, priv); + + return priv; +} + +static inline void j1939_priv_set(struct net_device *ndev, + struct j1939_priv *priv) +{ + struct can_ml_priv *can_ml_priv = ndev->ml_priv; + + can_ml_priv->j1939_priv = priv; +} + +static void __j1939_priv_release(struct kref *kref) +{ + struct j1939_priv *priv = container_of(kref, struct j1939_priv, kref); + struct net_device *ndev = priv->ndev; + + netdev_dbg(priv->ndev, "%s: 0x%p\n", __func__, priv); + + dev_put(ndev); + kfree(priv); +} + +void j1939_priv_put(struct j1939_priv *priv) +{ + kref_put(&priv->kref, __j1939_priv_release); +} + +void j1939_priv_get(struct j1939_priv *priv) +{ + kref_get(&priv->kref); +} + +static int j1939_can_rx_register(struct j1939_priv *priv) +{ + struct net_device *ndev = priv->ndev; + int ret; + + j1939_priv_get(priv); + ret = can_rx_register(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK, + j1939_can_recv, priv, "j1939", NULL); + if (ret < 0) { + j1939_priv_put(priv); + return ret; + } + + return 0; +} + +static void j1939_can_rx_unregister(struct j1939_priv *priv) +{ + struct net_device *ndev = priv->ndev; + + can_rx_unregister(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK, + j1939_can_recv, priv); + + j1939_priv_put(priv); +} + +static void __j1939_rx_release(struct kref *kref) + __releases(&j1939_netdev_lock) +{ + struct j1939_priv *priv = container_of(kref, struct j1939_priv, + rx_kref); + + j1939_can_rx_unregister(priv); + j1939_ecu_unmap_all(priv); + j1939_priv_set(priv->ndev, NULL); + spin_unlock(&j1939_netdev_lock); +} + +/* get pointer to priv without increasing ref counter */ +static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev) +{ + struct can_ml_priv *can_ml_priv = ndev->ml_priv; + + return can_ml_priv->j1939_priv; +} + +static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev) +{ + struct j1939_priv *priv; + + lockdep_assert_held(&j1939_netdev_lock); + + if (ndev->type != ARPHRD_CAN) + return NULL; + + priv = j1939_ndev_to_priv(ndev); + if (priv) + j1939_priv_get(priv); + + return priv; +} + +static struct j1939_priv *j1939_priv_get_by_ndev(struct net_device *ndev) +{ + struct j1939_priv *priv; + + spin_lock(&j1939_netdev_lock); + priv = j1939_priv_get_by_ndev_locked(ndev); + spin_unlock(&j1939_netdev_lock); + + return priv; +} + +struct j1939_priv *j1939_netdev_start(struct net_device *ndev) +{ + struct j1939_priv *priv, *priv_new; + int ret; + + priv = j1939_priv_get_by_ndev(ndev); + if (priv) { + kref_get(&priv->rx_kref); + return priv; + } + + priv = j1939_priv_create(ndev); + if (!priv) + return ERR_PTR(-ENOMEM); + + j1939_tp_init(priv); + spin_lock_init(&priv->j1939_socks_lock); + INIT_LIST_HEAD(&priv->j1939_socks); + + spin_lock(&j1939_netdev_lock); + priv_new = j1939_priv_get_by_ndev_locked(ndev); + if (priv_new) { + /* Someone was faster than us, use their priv and roll + * back our's. + */ + spin_unlock(&j1939_netdev_lock); + dev_put(ndev); + kfree(priv); + kref_get(&priv_new->rx_kref); + return priv_new; + } + j1939_priv_set(ndev, priv); + spin_unlock(&j1939_netdev_lock); + + ret = j1939_can_rx_register(priv); + if (ret < 0) + goto out_priv_put; + + return priv; + + out_priv_put: + j1939_priv_set(ndev, NULL); + dev_put(ndev); + kfree(priv); + + return ERR_PTR(ret); +} + +void j1939_netdev_stop(struct j1939_priv *priv) +{ + kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock); + j1939_priv_put(priv); +} + +int j1939_send_one(struct j1939_priv *priv, struct sk_buff *skb) +{ + int ret, dlc; + canid_t canid; + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct can_frame *cf; + + /* apply sanity checks */ + if (j1939_pgn_is_pdu1(skcb->addr.pgn)) + skcb->addr.pgn &= J1939_PGN_PDU1_MAX; + else + skcb->addr.pgn &= J1939_PGN_MAX; + + if (skcb->priority > 7) + skcb->priority = 6; + + ret = j1939_ac_fixup(priv, skb); + if (unlikely(ret)) + goto failed; + dlc = skb->len; + + /* re-claim the CAN_HDR from the SKB */ + cf = skb_push(skb, J1939_CAN_HDR); + + /* make it a full can frame again */ + skb_put(skb, J1939_CAN_FTR + (8 - dlc)); + + canid = CAN_EFF_FLAG | + (skcb->priority << 26) | + (skcb->addr.pgn << 8) | + skcb->addr.sa; + if (j1939_pgn_is_pdu1(skcb->addr.pgn)) + canid |= skcb->addr.da << 8; + + cf->can_id = canid; + cf->can_dlc = dlc; + + return can_send(skb, 1); + + failed: + kfree_skb(skb); + return ret; +} + +static int j1939_netdev_notify(struct notifier_block *nb, + unsigned long msg, void *data) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(data); + struct j1939_priv *priv; + + priv = j1939_priv_get_by_ndev(ndev); + if (!priv) + goto notify_done; + + if (ndev->type != ARPHRD_CAN) + goto notify_put; + + switch (msg) { + case NETDEV_DOWN: + j1939_cancel_active_session(priv, NULL); + j1939_sk_netdev_event_netdown(priv); + j1939_ecu_unmap_all(priv); + break; + } + +notify_put: + j1939_priv_put(priv); + +notify_done: + return NOTIFY_DONE; +} + +static struct notifier_block j1939_netdev_notifier = { + .notifier_call = j1939_netdev_notify, +}; + +/* MODULE interface */ +static __init int j1939_module_init(void) +{ + int ret; + + pr_info("can: SAE J1939\n"); + + ret = register_netdevice_notifier(&j1939_netdev_notifier); + if (ret) + goto fail_notifier; + + ret = can_proto_register(&j1939_can_proto); + if (ret < 0) { + pr_err("can: registration of j1939 protocol failed\n"); + goto fail_sk; + } + + return 0; + + fail_sk: + unregister_netdevice_notifier(&j1939_netdev_notifier); + fail_notifier: + return ret; +} + +static __exit void j1939_module_exit(void) +{ + can_proto_unregister(&j1939_can_proto); + + unregister_netdevice_notifier(&j1939_netdev_notifier); +} + +module_init(j1939_module_init); +module_exit(j1939_module_exit); diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c new file mode 100644 index 000000000000..37c1040bcb9c --- /dev/null +++ b/net/can/j1939/socket.c @@ -0,0 +1,1160 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2010-2011 EIA Electronics, +// Pieter Beyens +// Copyright (c) 2010-2011 EIA Electronics, +// Kurt Van Dijck +// Copyright (c) 2018 Protonic, +// Robin van der Gracht +// Copyright (c) 2017-2019 Pengutronix, +// Marc Kleine-Budde +// Copyright (c) 2017-2019 Pengutronix, +// Oleksij Rempel + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include + +#include "j1939-priv.h" + +#define J1939_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.j1939) + +/* conversion function between struct sock::sk_priority from linux and + * j1939 priority field + */ +static inline priority_t j1939_prio(u32 sk_priority) +{ + sk_priority = min(sk_priority, 7U); + + return 7 - sk_priority; +} + +static inline u32 j1939_to_sk_priority(priority_t prio) +{ + return 7 - prio; +} + +/* function to see if pgn is to be evaluated */ +static inline bool j1939_pgn_is_valid(pgn_t pgn) +{ + return pgn <= J1939_PGN_MAX; +} + +/* test function to avoid non-zero DA placeholder for pdu1 pgn's */ +static inline bool j1939_pgn_is_clean_pdu(pgn_t pgn) +{ + if (j1939_pgn_is_pdu1(pgn)) + return !(pgn & 0xff); + else + return true; +} + +static inline void j1939_sock_pending_add(struct sock *sk) +{ + struct j1939_sock *jsk = j1939_sk(sk); + + atomic_inc(&jsk->skb_pending); +} + +static int j1939_sock_pending_get(struct sock *sk) +{ + struct j1939_sock *jsk = j1939_sk(sk); + + return atomic_read(&jsk->skb_pending); +} + +void j1939_sock_pending_del(struct sock *sk) +{ + struct j1939_sock *jsk = j1939_sk(sk); + + /* atomic_dec_return returns the new value */ + if (!atomic_dec_return(&jsk->skb_pending)) + wake_up(&jsk->waitq); /* no pending SKB's */ +} + +static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk) +{ + jsk->state |= J1939_SOCK_BOUND; + j1939_priv_get(priv); + jsk->priv = priv; + + spin_lock_bh(&priv->j1939_socks_lock); + list_add_tail(&jsk->list, &priv->j1939_socks); + spin_unlock_bh(&priv->j1939_socks_lock); +} + +static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk) +{ + spin_lock_bh(&priv->j1939_socks_lock); + list_del_init(&jsk->list); + spin_unlock_bh(&priv->j1939_socks_lock); + + jsk->priv = NULL; + j1939_priv_put(priv); + jsk->state &= ~J1939_SOCK_BOUND; +} + +static bool j1939_sk_queue_session(struct j1939_session *session) +{ + struct j1939_sock *jsk = j1939_sk(session->sk); + bool empty; + + spin_lock_bh(&jsk->sk_session_queue_lock); + empty = list_empty(&jsk->sk_session_queue); + j1939_session_get(session); + list_add_tail(&session->sk_session_queue_entry, &jsk->sk_session_queue); + spin_unlock_bh(&jsk->sk_session_queue_lock); + j1939_sock_pending_add(&jsk->sk); + + return empty; +} + +static struct +j1939_session *j1939_sk_get_incomplete_session(struct j1939_sock *jsk) +{ + struct j1939_session *session = NULL; + + spin_lock_bh(&jsk->sk_session_queue_lock); + if (!list_empty(&jsk->sk_session_queue)) { + session = list_last_entry(&jsk->sk_session_queue, + struct j1939_session, + sk_session_queue_entry); + if (session->total_queued_size == session->total_message_size) + session = NULL; + else + j1939_session_get(session); + } + spin_unlock_bh(&jsk->sk_session_queue_lock); + + return session; +} + +static void j1939_sk_queue_drop_all(struct j1939_priv *priv, + struct j1939_sock *jsk, int err) +{ + struct j1939_session *session, *tmp; + + netdev_dbg(priv->ndev, "%s: err: %i\n", __func__, err); + spin_lock_bh(&jsk->sk_session_queue_lock); + list_for_each_entry_safe(session, tmp, &jsk->sk_session_queue, + sk_session_queue_entry) { + list_del_init(&session->sk_session_queue_entry); + session->err = err; + j1939_session_put(session); + } + spin_unlock_bh(&jsk->sk_session_queue_lock); +} + +static void j1939_sk_queue_activate_next_locked(struct j1939_session *session) +{ + struct j1939_sock *jsk; + struct j1939_session *first; + int err; + + /* RX-Session don't have a socket (yet) */ + if (!session->sk) + return; + + jsk = j1939_sk(session->sk); + lockdep_assert_held(&jsk->sk_session_queue_lock); + + err = session->err; + + first = list_first_entry_or_null(&jsk->sk_session_queue, + struct j1939_session, + sk_session_queue_entry); + + /* Some else has already activated the next session */ + if (first != session) + return; + +activate_next: + list_del_init(&first->sk_session_queue_entry); + j1939_session_put(first); + first = list_first_entry_or_null(&jsk->sk_session_queue, + struct j1939_session, + sk_session_queue_entry); + if (!first) + return; + + if (WARN_ON_ONCE(j1939_session_activate(first))) { + first->err = -EBUSY; + goto activate_next; + } else { + /* Give receiver some time (arbitrary chosen) to recover */ + int time_ms = 0; + + if (err) + time_ms = 10 + prandom_u32_max(16); + + j1939_tp_schedule_txtimer(first, time_ms); + } +} + +void j1939_sk_queue_activate_next(struct j1939_session *session) +{ + struct j1939_sock *jsk; + + if (!session->sk) + return; + + jsk = j1939_sk(session->sk); + + spin_lock_bh(&jsk->sk_session_queue_lock); + j1939_sk_queue_activate_next_locked(session); + spin_unlock_bh(&jsk->sk_session_queue_lock); +} + +static bool j1939_sk_match_dst(struct j1939_sock *jsk, + const struct j1939_sk_buff_cb *skcb) +{ + if ((jsk->state & J1939_SOCK_PROMISC)) + return true; + + /* Destination address filter */ + if (jsk->addr.src_name && skcb->addr.dst_name) { + if (jsk->addr.src_name != skcb->addr.dst_name) + return false; + } else { + /* receive (all sockets) if + * - all packages that match our bind() address + * - all broadcast on a socket if SO_BROADCAST + * is set + */ + if (j1939_address_is_unicast(skcb->addr.da)) { + if (jsk->addr.sa != skcb->addr.da) + return false; + } else if (!sock_flag(&jsk->sk, SOCK_BROADCAST)) { + /* receiving broadcast without SO_BROADCAST + * flag is not allowed + */ + return false; + } + } + + /* Source address filter */ + if (jsk->state & J1939_SOCK_CONNECTED) { + /* receive (all sockets) if + * - all packages that match our connect() name or address + */ + if (jsk->addr.dst_name && skcb->addr.src_name) { + if (jsk->addr.dst_name != skcb->addr.src_name) + return false; + } else { + if (jsk->addr.da != skcb->addr.sa) + return false; + } + } + + /* PGN filter */ + if (j1939_pgn_is_valid(jsk->pgn_rx_filter) && + jsk->pgn_rx_filter != skcb->addr.pgn) + return false; + + return true; +} + +/* matches skb control buffer (addr) with a j1939 filter */ +static bool j1939_sk_match_filter(struct j1939_sock *jsk, + const struct j1939_sk_buff_cb *skcb) +{ + const struct j1939_filter *f = jsk->filters; + int nfilter = jsk->nfilters; + + if (!nfilter) + /* receive all when no filters are assigned */ + return true; + + for (; nfilter; ++f, --nfilter) { + if ((skcb->addr.pgn & f->pgn_mask) != f->pgn) + continue; + if ((skcb->addr.sa & f->addr_mask) != f->addr) + continue; + if ((skcb->addr.src_name & f->name_mask) != f->name) + continue; + return true; + } + return false; +} + +static bool j1939_sk_recv_match_one(struct j1939_sock *jsk, + const struct j1939_sk_buff_cb *skcb) +{ + if (!(jsk->state & J1939_SOCK_BOUND)) + return false; + + if (!j1939_sk_match_dst(jsk, skcb)) + return false; + + if (!j1939_sk_match_filter(jsk, skcb)) + return false; + + return true; +} + +static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb) +{ + const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb); + struct j1939_sk_buff_cb *skcb; + struct sk_buff *skb; + + if (oskb->sk == &jsk->sk) + return; + + if (!j1939_sk_recv_match_one(jsk, oskcb)) + return; + + skb = skb_clone(oskb, GFP_ATOMIC); + if (!skb) { + pr_warn("skb clone failed\n"); + return; + } + can_skb_set_owner(skb, oskb->sk); + + skcb = j1939_skb_to_cb(skb); + skcb->msg_flags &= ~(MSG_DONTROUTE); + if (skb->sk) + skcb->msg_flags |= MSG_DONTROUTE; + + if (sock_queue_rcv_skb(&jsk->sk, skb) < 0) + kfree_skb(skb); +} + +bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb) +{ + struct j1939_sock *jsk; + bool match = false; + + spin_lock_bh(&priv->j1939_socks_lock); + list_for_each_entry(jsk, &priv->j1939_socks, list) { + match = j1939_sk_recv_match_one(jsk, skcb); + if (match) + break; + } + spin_unlock_bh(&priv->j1939_socks_lock); + + return match; +} + +void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sock *jsk; + + spin_lock_bh(&priv->j1939_socks_lock); + list_for_each_entry(jsk, &priv->j1939_socks, list) { + j1939_sk_recv_one(jsk, skb); + } + spin_unlock_bh(&priv->j1939_socks_lock); +} + +static int j1939_sk_init(struct sock *sk) +{ + struct j1939_sock *jsk = j1939_sk(sk); + + /* Ensure that "sk" is first member in "struct j1939_sock", so that we + * can skip it during memset(). + */ + BUILD_BUG_ON(offsetof(struct j1939_sock, sk) != 0); + memset((void *)jsk + sizeof(jsk->sk), 0x0, + sizeof(*jsk) - sizeof(jsk->sk)); + + INIT_LIST_HEAD(&jsk->list); + init_waitqueue_head(&jsk->waitq); + jsk->sk.sk_priority = j1939_to_sk_priority(6); + jsk->sk.sk_reuse = 1; /* per default */ + jsk->addr.sa = J1939_NO_ADDR; + jsk->addr.da = J1939_NO_ADDR; + jsk->addr.pgn = J1939_NO_PGN; + jsk->pgn_rx_filter = J1939_NO_PGN; + atomic_set(&jsk->skb_pending, 0); + spin_lock_init(&jsk->sk_session_queue_lock); + INIT_LIST_HEAD(&jsk->sk_session_queue); + + return 0; +} + +static int j1939_sk_sanity_check(struct sockaddr_can *addr, int len) +{ + if (!addr) + return -EDESTADDRREQ; + if (len < J1939_MIN_NAMELEN) + return -EINVAL; + if (addr->can_family != AF_CAN) + return -EINVAL; + if (!addr->can_ifindex) + return -ENODEV; + if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) && + !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) + return -EINVAL; + + return 0; +} + +static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) +{ + struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; + struct j1939_sock *jsk = j1939_sk(sock->sk); + struct j1939_priv *priv = jsk->priv; + struct sock *sk = sock->sk; + struct net *net = sock_net(sk); + int ret = 0; + + ret = j1939_sk_sanity_check(addr, len); + if (ret) + return ret; + + lock_sock(sock->sk); + + /* Already bound to an interface? */ + if (jsk->state & J1939_SOCK_BOUND) { + /* A re-bind() to a different interface is not + * supported. + */ + if (jsk->ifindex != addr->can_ifindex) { + ret = -EINVAL; + goto out_release_sock; + } + + /* drop old references */ + j1939_jsk_del(priv, jsk); + j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa); + } else { + struct net_device *ndev; + + ndev = dev_get_by_index(net, addr->can_ifindex); + if (!ndev) { + ret = -ENODEV; + goto out_release_sock; + } + + if (ndev->type != ARPHRD_CAN) { + dev_put(ndev); + ret = -ENODEV; + goto out_release_sock; + } + + priv = j1939_netdev_start(ndev); + dev_put(ndev); + if (IS_ERR(priv)) { + ret = PTR_ERR(priv); + goto out_release_sock; + } + + jsk->ifindex = addr->can_ifindex; + } + + /* set default transmit pgn */ + if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn)) + jsk->pgn_rx_filter = addr->can_addr.j1939.pgn; + jsk->addr.src_name = addr->can_addr.j1939.name; + jsk->addr.sa = addr->can_addr.j1939.addr; + + /* get new references */ + ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa); + if (ret) { + j1939_netdev_stop(priv); + goto out_release_sock; + } + + j1939_jsk_add(priv, jsk); + + out_release_sock: /* fall through */ + release_sock(sock->sk); + + return ret; +} + +static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr, + int len, int flags) +{ + struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; + struct j1939_sock *jsk = j1939_sk(sock->sk); + int ret = 0; + + ret = j1939_sk_sanity_check(addr, len); + if (ret) + return ret; + + lock_sock(sock->sk); + + /* bind() before connect() is mandatory */ + if (!(jsk->state & J1939_SOCK_BOUND)) { + ret = -EINVAL; + goto out_release_sock; + } + + /* A connect() to a different interface is not supported. */ + if (jsk->ifindex != addr->can_ifindex) { + ret = -EINVAL; + goto out_release_sock; + } + + if (!addr->can_addr.j1939.name && + addr->can_addr.j1939.addr == J1939_NO_ADDR && + !sock_flag(&jsk->sk, SOCK_BROADCAST)) { + /* broadcast, but SO_BROADCAST not set */ + ret = -EACCES; + goto out_release_sock; + } + + jsk->addr.dst_name = addr->can_addr.j1939.name; + jsk->addr.da = addr->can_addr.j1939.addr; + + if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn)) + jsk->addr.pgn = addr->can_addr.j1939.pgn; + + jsk->state |= J1939_SOCK_CONNECTED; + + out_release_sock: /* fall through */ + release_sock(sock->sk); + + return ret; +} + +static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr, + const struct j1939_sock *jsk, int peer) +{ + addr->can_family = AF_CAN; + addr->can_ifindex = jsk->ifindex; + addr->can_addr.j1939.pgn = jsk->addr.pgn; + if (peer) { + addr->can_addr.j1939.name = jsk->addr.dst_name; + addr->can_addr.j1939.addr = jsk->addr.da; + } else { + addr->can_addr.j1939.name = jsk->addr.src_name; + addr->can_addr.j1939.addr = jsk->addr.sa; + } +} + +static int j1939_sk_getname(struct socket *sock, struct sockaddr *uaddr, + int peer) +{ + struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; + struct sock *sk = sock->sk; + struct j1939_sock *jsk = j1939_sk(sk); + int ret = 0; + + lock_sock(sk); + + if (peer && !(jsk->state & J1939_SOCK_CONNECTED)) { + ret = -EADDRNOTAVAIL; + goto failure; + } + + j1939_sk_sock2sockaddr_can(addr, jsk, peer); + ret = J1939_MIN_NAMELEN; + + failure: + release_sock(sk); + + return ret; +} + +static int j1939_sk_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct j1939_sock *jsk; + + if (!sk) + return 0; + + jsk = j1939_sk(sk); + lock_sock(sk); + + if (jsk->state & J1939_SOCK_BOUND) { + struct j1939_priv *priv = jsk->priv; + + if (wait_event_interruptible(jsk->waitq, + !j1939_sock_pending_get(&jsk->sk))) { + j1939_cancel_active_session(priv, sk); + j1939_sk_queue_drop_all(priv, jsk, ESHUTDOWN); + } + + j1939_jsk_del(priv, jsk); + + j1939_local_ecu_put(priv, jsk->addr.src_name, + jsk->addr.sa); + + j1939_netdev_stop(priv); + } + + sock_orphan(sk); + sock->sk = NULL; + + release_sock(sk); + sock_put(sk); + + return 0; +} + +static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, char __user *optval, + unsigned int optlen, int flag) +{ + int tmp; + + if (optlen != sizeof(tmp)) + return -EINVAL; + if (copy_from_user(&tmp, optval, optlen)) + return -EFAULT; + lock_sock(&jsk->sk); + if (tmp) + jsk->state |= flag; + else + jsk->state &= ~flag; + release_sock(&jsk->sk); + return tmp; +} + +static int j1939_sk_setsockopt(struct socket *sock, int level, int optname, + char __user *optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + struct j1939_sock *jsk = j1939_sk(sk); + int tmp, count = 0, ret = 0; + struct j1939_filter *filters = NULL, *ofilters; + + if (level != SOL_CAN_J1939) + return -EINVAL; + + switch (optname) { + case SO_J1939_FILTER: + if (optval) { + struct j1939_filter *f; + int c; + + if (optlen % sizeof(*filters) != 0) + return -EINVAL; + + if (optlen > J1939_FILTER_MAX * + sizeof(struct j1939_filter)) + return -EINVAL; + + count = optlen / sizeof(*filters); + filters = memdup_user(optval, optlen); + if (IS_ERR(filters)) + return PTR_ERR(filters); + + for (f = filters, c = count; c; f++, c--) { + f->name &= f->name_mask; + f->pgn &= f->pgn_mask; + f->addr &= f->addr_mask; + } + } + + lock_sock(&jsk->sk); + ofilters = jsk->filters; + jsk->filters = filters; + jsk->nfilters = count; + release_sock(&jsk->sk); + kfree(ofilters); + return 0; + case SO_J1939_PROMISC: + return j1939_sk_setsockopt_flag(jsk, optval, optlen, + J1939_SOCK_PROMISC); + case SO_J1939_ERRQUEUE: + ret = j1939_sk_setsockopt_flag(jsk, optval, optlen, + J1939_SOCK_ERRQUEUE); + if (ret < 0) + return ret; + + if (!(jsk->state & J1939_SOCK_ERRQUEUE)) + skb_queue_purge(&sk->sk_error_queue); + return ret; + case SO_J1939_SEND_PRIO: + if (optlen != sizeof(tmp)) + return -EINVAL; + if (copy_from_user(&tmp, optval, optlen)) + return -EFAULT; + if (tmp < 0 || tmp > 7) + return -EDOM; + if (tmp < 2 && !capable(CAP_NET_ADMIN)) + return -EPERM; + lock_sock(&jsk->sk); + jsk->sk.sk_priority = j1939_to_sk_priority(tmp); + release_sock(&jsk->sk); + return 0; + default: + return -ENOPROTOOPT; + } +} + +static int j1939_sk_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct j1939_sock *jsk = j1939_sk(sk); + int ret, ulen; + /* set defaults for using 'int' properties */ + int tmp = 0; + int len = sizeof(tmp); + void *val = &tmp; + + if (level != SOL_CAN_J1939) + return -EINVAL; + if (get_user(ulen, optlen)) + return -EFAULT; + if (ulen < 0) + return -EINVAL; + + lock_sock(&jsk->sk); + switch (optname) { + case SO_J1939_PROMISC: + tmp = (jsk->state & J1939_SOCK_PROMISC) ? 1 : 0; + break; + case SO_J1939_ERRQUEUE: + tmp = (jsk->state & J1939_SOCK_ERRQUEUE) ? 1 : 0; + break; + case SO_J1939_SEND_PRIO: + tmp = j1939_prio(jsk->sk.sk_priority); + break; + default: + ret = -ENOPROTOOPT; + goto no_copy; + } + + /* copy to user, based on 'len' & 'val' + * but most sockopt's are 'int' properties, and have 'len' & 'val' + * left unchanged, but instead modified 'tmp' + */ + if (len > ulen) + ret = -EFAULT; + else if (put_user(len, optlen)) + ret = -EFAULT; + else if (copy_to_user(optval, val, len)) + ret = -EFAULT; + else + ret = 0; + no_copy: + release_sock(&jsk->sk); + return ret; +} + +static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) +{ + struct sock *sk = sock->sk; + struct sk_buff *skb; + struct j1939_sk_buff_cb *skcb; + int ret = 0; + + if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE)) + return -EINVAL; + + if (flags & MSG_ERRQUEUE) + return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939, + SCM_J1939_ERRQUEUE); + + skb = skb_recv_datagram(sk, flags, 0, &ret); + if (!skb) + return ret; + + if (size < skb->len) + msg->msg_flags |= MSG_TRUNC; + else + size = skb->len; + + ret = memcpy_to_msg(msg, skb->data, size); + if (ret < 0) { + skb_free_datagram(sk, skb); + return ret; + } + + skcb = j1939_skb_to_cb(skb); + if (j1939_address_is_valid(skcb->addr.da)) + put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_ADDR, + sizeof(skcb->addr.da), &skcb->addr.da); + + if (skcb->addr.dst_name) + put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_NAME, + sizeof(skcb->addr.dst_name), &skcb->addr.dst_name); + + put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_PRIO, + sizeof(skcb->priority), &skcb->priority); + + if (msg->msg_name) { + struct sockaddr_can *paddr = msg->msg_name; + + msg->msg_namelen = J1939_MIN_NAMELEN; + memset(msg->msg_name, 0, msg->msg_namelen); + paddr->can_family = AF_CAN; + paddr->can_ifindex = skb->skb_iif; + paddr->can_addr.j1939.name = skcb->addr.src_name; + paddr->can_addr.j1939.addr = skcb->addr.sa; + paddr->can_addr.j1939.pgn = skcb->addr.pgn; + } + + sock_recv_ts_and_drops(msg, sk, skb); + msg->msg_flags |= skcb->msg_flags; + skb_free_datagram(sk, skb); + + return size; +} + +static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev, + struct sock *sk, + struct msghdr *msg, size_t size, + int *errcode) +{ + struct j1939_sock *jsk = j1939_sk(sk); + struct j1939_sk_buff_cb *skcb; + struct sk_buff *skb; + int ret; + + skb = sock_alloc_send_skb(sk, + size + + sizeof(struct can_frame) - + sizeof(((struct can_frame *)NULL)->data) + + sizeof(struct can_skb_priv), + msg->msg_flags & MSG_DONTWAIT, &ret); + if (!skb) + goto failure; + + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = ndev->ifindex; + can_skb_prv(skb)->skbcnt = 0; + skb_reserve(skb, offsetof(struct can_frame, data)); + + ret = memcpy_from_msg(skb_put(skb, size), msg, size); + if (ret < 0) + goto free_skb; + + skb->dev = ndev; + + skcb = j1939_skb_to_cb(skb); + memset(skcb, 0, sizeof(*skcb)); + skcb->addr = jsk->addr; + skcb->priority = j1939_prio(sk->sk_priority); + + if (msg->msg_name) { + struct sockaddr_can *addr = msg->msg_name; + + if (addr->can_addr.j1939.name || + addr->can_addr.j1939.addr != J1939_NO_ADDR) { + skcb->addr.dst_name = addr->can_addr.j1939.name; + skcb->addr.da = addr->can_addr.j1939.addr; + } + if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn)) + skcb->addr.pgn = addr->can_addr.j1939.pgn; + } + + *errcode = ret; + return skb; + +free_skb: + kfree_skb(skb); +failure: + *errcode = ret; + return NULL; +} + +static size_t j1939_sk_opt_stats_get_size(void) +{ + return + nla_total_size(sizeof(u32)) + /* J1939_NLA_BYTES_ACKED */ + 0; +} + +static struct sk_buff * +j1939_sk_get_timestamping_opt_stats(struct j1939_session *session) +{ + struct sk_buff *stats; + u32 size; + + stats = alloc_skb(j1939_sk_opt_stats_get_size(), GFP_ATOMIC); + if (!stats) + return NULL; + + if (session->skcb.addr.type == J1939_SIMPLE) + size = session->total_message_size; + else + size = min(session->pkt.tx_acked * 7, + session->total_message_size); + + nla_put_u32(stats, J1939_NLA_BYTES_ACKED, size); + + return stats; +} + +void j1939_sk_errqueue(struct j1939_session *session, + enum j1939_sk_errqueue_type type) +{ + struct j1939_priv *priv = session->priv; + struct sock *sk = session->sk; + struct j1939_sock *jsk; + struct sock_exterr_skb *serr; + struct sk_buff *skb; + char *state = "UNK"; + int err; + + /* currently we have no sk for the RX session */ + if (!sk) + return; + + jsk = j1939_sk(sk); + + if (!(jsk->state & J1939_SOCK_ERRQUEUE)) + return; + + skb = j1939_sk_get_timestamping_opt_stats(session); + if (!skb) + return; + + skb->tstamp = ktime_get_real(); + + BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); + + serr = SKB_EXT_ERR(skb); + memset(serr, 0, sizeof(*serr)); + switch (type) { + case J1939_ERRQUEUE_ACK: + if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) + return; + + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + serr->ee.ee_info = SCM_TSTAMP_ACK; + state = "ACK"; + break; + case J1939_ERRQUEUE_SCHED: + if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) + return; + + serr->ee.ee_errno = ENOMSG; + serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + serr->ee.ee_info = SCM_TSTAMP_SCHED; + state = "SCH"; + break; + case J1939_ERRQUEUE_ABORT: + serr->ee.ee_errno = session->err; + serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL; + serr->ee.ee_info = J1939_EE_INFO_TX_ABORT; + state = "ABT"; + break; + default: + netdev_err(priv->ndev, "Unknown errqueue type %i\n", type); + } + + serr->opt_stats = true; + if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) + serr->ee.ee_data = session->tskey; + + netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n", + __func__, session, session->tskey, state); + err = sock_queue_err_skb(sk, skb); + + if (err) + kfree_skb(skb); +}; + +void j1939_sk_send_loop_abort(struct sock *sk, int err) +{ + sk->sk_err = err; + + sk->sk_error_report(sk); +} + +static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk, + struct msghdr *msg, size_t size) + +{ + struct j1939_sock *jsk = j1939_sk(sk); + struct j1939_session *session = j1939_sk_get_incomplete_session(jsk); + struct sk_buff *skb; + size_t segment_size, todo_size; + int ret = 0; + + if (session && + session->total_message_size != session->total_queued_size + size) { + j1939_session_put(session); + return -EIO; + } + + todo_size = size; + + while (todo_size) { + struct j1939_sk_buff_cb *skcb; + + segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE, + todo_size); + + /* Allocate skb for one segment */ + skb = j1939_sk_alloc_skb(priv->ndev, sk, msg, segment_size, + &ret); + if (ret) + break; + + skcb = j1939_skb_to_cb(skb); + + if (!session) { + /* at this point the size should be full size + * of the session + */ + skcb->offset = 0; + session = j1939_tp_send(priv, skb, size); + if (IS_ERR(session)) { + ret = PTR_ERR(session); + goto kfree_skb; + } + if (j1939_sk_queue_session(session)) { + /* try to activate session if we a + * fist in the queue + */ + if (!j1939_session_activate(session)) { + j1939_tp_schedule_txtimer(session, 0); + } else { + ret = -EBUSY; + session->err = ret; + j1939_sk_queue_drop_all(priv, jsk, + EBUSY); + break; + } + } + } else { + skcb->offset = session->total_queued_size; + j1939_session_skb_queue(session, skb); + } + + todo_size -= segment_size; + session->total_queued_size += segment_size; + } + + switch (ret) { + case 0: /* OK */ + if (todo_size) + netdev_warn(priv->ndev, + "no error found and not completely queued?! %zu\n", + todo_size); + ret = size; + break; + case -ERESTARTSYS: + ret = -EINTR; + /* fall through */ + case -EAGAIN: /* OK */ + if (todo_size != size) + ret = size - todo_size; + break; + default: /* ERROR */ + break; + } + + if (session) + j1939_session_put(session); + + return ret; + + kfree_skb: + kfree_skb(skb); + return ret; +} + +static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg, + size_t size) +{ + struct sock *sk = sock->sk; + struct j1939_sock *jsk = j1939_sk(sk); + struct j1939_priv *priv = jsk->priv; + int ifindex; + int ret; + + /* various socket state tests */ + if (!(jsk->state & J1939_SOCK_BOUND)) + return -EBADFD; + + ifindex = jsk->ifindex; + + if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) + /* no source address assigned yet */ + return -EBADFD; + + /* deal with provided destination address info */ + if (msg->msg_name) { + struct sockaddr_can *addr = msg->msg_name; + + if (msg->msg_namelen < J1939_MIN_NAMELEN) + return -EINVAL; + + if (addr->can_family != AF_CAN) + return -EINVAL; + + if (addr->can_ifindex && addr->can_ifindex != ifindex) + return -EBADFD; + + if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) && + !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) + return -EINVAL; + + if (!addr->can_addr.j1939.name && + addr->can_addr.j1939.addr == J1939_NO_ADDR && + !sock_flag(sk, SOCK_BROADCAST)) + /* broadcast, but SO_BROADCAST not set */ + return -EACCES; + } else { + if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR && + !sock_flag(sk, SOCK_BROADCAST)) + /* broadcast, but SO_BROADCAST not set */ + return -EACCES; + } + + ret = j1939_sk_send_loop(priv, sk, msg, size); + + return ret; +} + +void j1939_sk_netdev_event_netdown(struct j1939_priv *priv) +{ + struct j1939_sock *jsk; + int error_code = ENETDOWN; + + spin_lock_bh(&priv->j1939_socks_lock); + list_for_each_entry(jsk, &priv->j1939_socks, list) { + jsk->sk.sk_err = error_code; + if (!sock_flag(&jsk->sk, SOCK_DEAD)) + jsk->sk.sk_error_report(&jsk->sk); + + j1939_sk_queue_drop_all(priv, jsk, error_code); + } + spin_unlock_bh(&priv->j1939_socks_lock); +} + +static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd, + unsigned long arg) +{ + /* no ioctls for socket layer -> hand it down to NIC layer */ + return -ENOIOCTLCMD; +} + +static const struct proto_ops j1939_ops = { + .family = PF_CAN, + .release = j1939_sk_release, + .bind = j1939_sk_bind, + .connect = j1939_sk_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .getname = j1939_sk_getname, + .poll = datagram_poll, + .ioctl = j1939_sk_no_ioctlcmd, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = j1939_sk_setsockopt, + .getsockopt = j1939_sk_getsockopt, + .sendmsg = j1939_sk_sendmsg, + .recvmsg = j1939_sk_recvmsg, + .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage, +}; + +static struct proto j1939_proto __read_mostly = { + .name = "CAN_J1939", + .owner = THIS_MODULE, + .obj_size = sizeof(struct j1939_sock), + .init = j1939_sk_init, +}; + +const struct can_proto j1939_can_proto = { + .type = SOCK_DGRAM, + .protocol = CAN_J1939, + .ops = &j1939_ops, + .prot = &j1939_proto, +}; diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c new file mode 100644 index 000000000000..fe000ea757ea --- /dev/null +++ b/net/can/j1939/transport.c @@ -0,0 +1,2027 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2010-2011 EIA Electronics, +// Kurt Van Dijck +// Copyright (c) 2018 Protonic, +// Robin van der Gracht +// Copyright (c) 2017-2019 Pengutronix, +// Marc Kleine-Budde +// Copyright (c) 2017-2019 Pengutronix, +// Oleksij Rempel + +#include + +#include "j1939-priv.h" + +#define J1939_XTP_TX_RETRY_LIMIT 100 + +#define J1939_ETP_PGN_CTL 0xc800 +#define J1939_ETP_PGN_DAT 0xc700 +#define J1939_TP_PGN_CTL 0xec00 +#define J1939_TP_PGN_DAT 0xeb00 + +#define J1939_TP_CMD_RTS 0x10 +#define J1939_TP_CMD_CTS 0x11 +#define J1939_TP_CMD_EOMA 0x13 +#define J1939_TP_CMD_BAM 0x20 +#define J1939_TP_CMD_ABORT 0xff + +#define J1939_ETP_CMD_RTS 0x14 +#define J1939_ETP_CMD_CTS 0x15 +#define J1939_ETP_CMD_DPO 0x16 +#define J1939_ETP_CMD_EOMA 0x17 +#define J1939_ETP_CMD_ABORT 0xff + +enum j1939_xtp_abort { + J1939_XTP_NO_ABORT = 0, + J1939_XTP_ABORT_BUSY = 1, + /* Already in one or more connection managed sessions and + * cannot support another. + * + * EALREADY: + * Operation already in progress + */ + + J1939_XTP_ABORT_RESOURCE = 2, + /* System resources were needed for another task so this + * connection managed session was terminated. + * + * EMSGSIZE: + * The socket type requires that message be sent atomically, + * and the size of the message to be sent made this + * impossible. + */ + + J1939_XTP_ABORT_TIMEOUT = 3, + /* A timeout occurred and this is the connection abort to + * close the session. + * + * EHOSTUNREACH: + * The destination host cannot be reached (probably because + * the host is down or a remote router cannot reach it). + */ + + J1939_XTP_ABORT_GENERIC = 4, + /* CTS messages received when data transfer is in progress + * + * EBADMSG: + * Not a data message + */ + + J1939_XTP_ABORT_FAULT = 5, + /* Maximal retransmit request limit reached + * + * ENOTRECOVERABLE: + * State not recoverable + */ + + J1939_XTP_ABORT_UNEXPECTED_DATA = 6, + /* Unexpected data transfer packet + * + * ENOTCONN: + * Transport endpoint is not connected + */ + + J1939_XTP_ABORT_BAD_SEQ = 7, + /* Bad sequence number (and software is not able to recover) + * + * EILSEQ: + * Illegal byte sequence + */ + + J1939_XTP_ABORT_DUP_SEQ = 8, + /* Duplicate sequence number (and software is not able to + * recover) + */ + + J1939_XTP_ABORT_EDPO_UNEXPECTED = 9, + /* Unexpected EDPO packet (ETP) or Message size > 1785 bytes + * (TP) + */ + + J1939_XTP_ABORT_BAD_EDPO_PGN = 10, + /* Unexpected EDPO PGN (PGN in EDPO is bad) */ + + J1939_XTP_ABORT_EDPO_OUTOF_CTS = 11, + /* EDPO number of packets is greater than CTS */ + + J1939_XTP_ABORT_BAD_EDPO_OFFSET = 12, + /* Bad EDPO offset */ + + J1939_XTP_ABORT_OTHER_DEPRECATED = 13, + /* Deprecated. Use 250 instead (Any other reason) */ + + J1939_XTP_ABORT_ECTS_UNXPECTED_PGN = 14, + /* Unexpected ECTS PGN (PGN in ECTS is bad) */ + + J1939_XTP_ABORT_ECTS_TOO_BIG = 15, + /* ECTS requested packets exceeds message size */ + + J1939_XTP_ABORT_OTHER = 250, + /* Any other reason (if a Connection Abort reason is + * identified that is not listed in the table use code 250) + */ +}; + +static unsigned int j1939_tp_block = 255; +static unsigned int j1939_tp_packet_delay; +static unsigned int j1939_tp_padding = 1; + +/* helpers */ +static const char *j1939_xtp_abort_to_str(enum j1939_xtp_abort abort) +{ + switch (abort) { + case J1939_XTP_ABORT_BUSY: + return "Already in one or more connection managed sessions and cannot support another."; + case J1939_XTP_ABORT_RESOURCE: + return "System resources were needed for another task so this connection managed session was terminated."; + case J1939_XTP_ABORT_TIMEOUT: + return "A timeout occurred and this is the connection abort to close the session."; + case J1939_XTP_ABORT_GENERIC: + return "CTS messages received when data transfer is in progress"; + case J1939_XTP_ABORT_FAULT: + return "Maximal retransmit request limit reached"; + case J1939_XTP_ABORT_UNEXPECTED_DATA: + return "Unexpected data transfer packet"; + case J1939_XTP_ABORT_BAD_SEQ: + return "Bad sequence number (and software is not able to recover)"; + case J1939_XTP_ABORT_DUP_SEQ: + return "Duplicate sequence number (and software is not able to recover)"; + case J1939_XTP_ABORT_EDPO_UNEXPECTED: + return "Unexpected EDPO packet (ETP) or Message size > 1785 bytes (TP)"; + case J1939_XTP_ABORT_BAD_EDPO_PGN: + return "Unexpected EDPO PGN (PGN in EDPO is bad)"; + case J1939_XTP_ABORT_EDPO_OUTOF_CTS: + return "EDPO number of packets is greater than CTS"; + case J1939_XTP_ABORT_BAD_EDPO_OFFSET: + return "Bad EDPO offset"; + case J1939_XTP_ABORT_OTHER_DEPRECATED: + return "Deprecated. Use 250 instead (Any other reason)"; + case J1939_XTP_ABORT_ECTS_UNXPECTED_PGN: + return "Unexpected ECTS PGN (PGN in ECTS is bad)"; + case J1939_XTP_ABORT_ECTS_TOO_BIG: + return "ECTS requested packets exceeds message size"; + case J1939_XTP_ABORT_OTHER: + return "Any other reason (if a Connection Abort reason is identified that is not listed in the table use code 250)"; + default: + return ""; + } +} + +static int j1939_xtp_abort_to_errno(struct j1939_priv *priv, + enum j1939_xtp_abort abort) +{ + int err; + + switch (abort) { + case J1939_XTP_NO_ABORT: + WARN_ON_ONCE(abort == J1939_XTP_NO_ABORT); + err = 0; + break; + case J1939_XTP_ABORT_BUSY: + err = EALREADY; + break; + case J1939_XTP_ABORT_RESOURCE: + err = EMSGSIZE; + break; + case J1939_XTP_ABORT_TIMEOUT: + err = EHOSTUNREACH; + break; + case J1939_XTP_ABORT_GENERIC: + err = EBADMSG; + break; + case J1939_XTP_ABORT_FAULT: + err = ENOTRECOVERABLE; + break; + case J1939_XTP_ABORT_UNEXPECTED_DATA: + err = ENOTCONN; + break; + case J1939_XTP_ABORT_BAD_SEQ: + err = EILSEQ; + break; + case J1939_XTP_ABORT_DUP_SEQ: + err = EPROTO; + break; + case J1939_XTP_ABORT_EDPO_UNEXPECTED: + err = EPROTO; + break; + case J1939_XTP_ABORT_BAD_EDPO_PGN: + err = EPROTO; + break; + case J1939_XTP_ABORT_EDPO_OUTOF_CTS: + err = EPROTO; + break; + case J1939_XTP_ABORT_BAD_EDPO_OFFSET: + err = EPROTO; + break; + case J1939_XTP_ABORT_OTHER_DEPRECATED: + err = EPROTO; + break; + case J1939_XTP_ABORT_ECTS_UNXPECTED_PGN: + err = EPROTO; + break; + case J1939_XTP_ABORT_ECTS_TOO_BIG: + err = EPROTO; + break; + case J1939_XTP_ABORT_OTHER: + err = EPROTO; + break; + default: + netdev_warn(priv->ndev, "Unknown abort code %i", abort); + err = EPROTO; + } + + return err; +} + +static inline void j1939_session_list_lock(struct j1939_priv *priv) +{ + spin_lock_bh(&priv->active_session_list_lock); +} + +static inline void j1939_session_list_unlock(struct j1939_priv *priv) +{ + spin_unlock_bh(&priv->active_session_list_lock); +} + +void j1939_session_get(struct j1939_session *session) +{ + kref_get(&session->kref); +} + +/* session completion functions */ +static void __j1939_session_drop(struct j1939_session *session) +{ + if (!session->transmission) + return; + + j1939_sock_pending_del(session->sk); +} + +static void j1939_session_destroy(struct j1939_session *session) +{ + if (session->err) + j1939_sk_errqueue(session, J1939_ERRQUEUE_ABORT); + else + j1939_sk_errqueue(session, J1939_ERRQUEUE_ACK); + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + skb_queue_purge(&session->skb_queue); + __j1939_session_drop(session); + j1939_priv_put(session->priv); + kfree(session); +} + +static void __j1939_session_release(struct kref *kref) +{ + struct j1939_session *session = container_of(kref, struct j1939_session, + kref); + + j1939_session_destroy(session); +} + +void j1939_session_put(struct j1939_session *session) +{ + kref_put(&session->kref, __j1939_session_release); +} + +static void j1939_session_txtimer_cancel(struct j1939_session *session) +{ + if (hrtimer_cancel(&session->txtimer)) + j1939_session_put(session); +} + +static void j1939_session_rxtimer_cancel(struct j1939_session *session) +{ + if (hrtimer_cancel(&session->rxtimer)) + j1939_session_put(session); +} + +void j1939_session_timers_cancel(struct j1939_session *session) +{ + j1939_session_txtimer_cancel(session); + j1939_session_rxtimer_cancel(session); +} + +static inline bool j1939_cb_is_broadcast(const struct j1939_sk_buff_cb *skcb) +{ + return (!skcb->addr.dst_name && (skcb->addr.da == 0xff)); +} + +static void j1939_session_skb_drop_old(struct j1939_session *session) +{ + struct sk_buff *do_skb; + struct j1939_sk_buff_cb *do_skcb; + unsigned int offset_start; + unsigned long flags; + + if (skb_queue_len(&session->skb_queue) < 2) + return; + + offset_start = session->pkt.tx_acked * 7; + + spin_lock_irqsave(&session->skb_queue.lock, flags); + do_skb = skb_peek(&session->skb_queue); + do_skcb = j1939_skb_to_cb(do_skb); + + if ((do_skcb->offset + do_skb->len) < offset_start) { + __skb_unlink(do_skb, &session->skb_queue); + kfree_skb(do_skb); + } + spin_unlock_irqrestore(&session->skb_queue.lock, flags); +} + +void j1939_session_skb_queue(struct j1939_session *session, + struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_priv *priv = session->priv; + + j1939_ac_fixup(priv, skb); + + if (j1939_address_is_unicast(skcb->addr.da) && + priv->ents[skcb->addr.da].nusers) + skcb->flags |= J1939_ECU_LOCAL_DST; + + skcb->flags |= J1939_ECU_LOCAL_SRC; + + skb_queue_tail(&session->skb_queue, skb); +} + +static struct sk_buff *j1939_session_skb_find(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + struct sk_buff *skb = NULL; + struct sk_buff *do_skb; + struct j1939_sk_buff_cb *do_skcb; + unsigned int offset_start; + unsigned long flags; + + offset_start = session->pkt.dpo * 7; + + spin_lock_irqsave(&session->skb_queue.lock, flags); + skb_queue_walk(&session->skb_queue, do_skb) { + do_skcb = j1939_skb_to_cb(do_skb); + + if (offset_start >= do_skcb->offset && + offset_start < (do_skcb->offset + do_skb->len)) { + skb = do_skb; + } + } + spin_unlock_irqrestore(&session->skb_queue.lock, flags); + + if (!skb) + netdev_dbg(priv->ndev, "%s: 0x%p: no skb found for start: %i, queue size: %i\n", + __func__, session, offset_start, + skb_queue_len(&session->skb_queue)); + + return skb; +} + +/* see if we are receiver + * returns 0 for broadcasts, although we will receive them + */ +static inline int j1939_tp_im_receiver(const struct j1939_sk_buff_cb *skcb) +{ + return skcb->flags & J1939_ECU_LOCAL_DST; +} + +/* see if we are sender */ +static inline int j1939_tp_im_transmitter(const struct j1939_sk_buff_cb *skcb) +{ + return skcb->flags & J1939_ECU_LOCAL_SRC; +} + +/* see if we are involved as either receiver or transmitter */ +static int j1939_tp_im_involved(const struct j1939_sk_buff_cb *skcb, bool swap) +{ + if (swap) + return j1939_tp_im_receiver(skcb); + else + return j1939_tp_im_transmitter(skcb); +} + +static int j1939_tp_im_involved_anydir(struct j1939_sk_buff_cb *skcb) +{ + return skcb->flags & (J1939_ECU_LOCAL_SRC | J1939_ECU_LOCAL_DST); +} + +/* extract pgn from flow-ctl message */ +static inline pgn_t j1939_xtp_ctl_to_pgn(const u8 *dat) +{ + pgn_t pgn; + + pgn = (dat[7] << 16) | (dat[6] << 8) | (dat[5] << 0); + if (j1939_pgn_is_pdu1(pgn)) + pgn &= 0xffff00; + return pgn; +} + +static inline unsigned int j1939_tp_ctl_to_size(const u8 *dat) +{ + return (dat[2] << 8) + (dat[1] << 0); +} + +static inline unsigned int j1939_etp_ctl_to_packet(const u8 *dat) +{ + return (dat[4] << 16) | (dat[3] << 8) | (dat[2] << 0); +} + +static inline unsigned int j1939_etp_ctl_to_size(const u8 *dat) +{ + return (dat[4] << 24) | (dat[3] << 16) | + (dat[2] << 8) | (dat[1] << 0); +} + +/* find existing session: + * reverse: swap cb's src & dst + * there is no problem with matching broadcasts, since + * broadcasts (no dst, no da) would never call this + * with reverse == true + */ +static bool j1939_session_match(struct j1939_addr *se_addr, + struct j1939_addr *sk_addr, bool reverse) +{ + if (se_addr->type != sk_addr->type) + return false; + + if (reverse) { + if (se_addr->src_name) { + if (se_addr->src_name != sk_addr->dst_name) + return false; + } else if (se_addr->sa != sk_addr->da) { + return false; + } + + if (se_addr->dst_name) { + if (se_addr->dst_name != sk_addr->src_name) + return false; + } else if (se_addr->da != sk_addr->sa) { + return false; + } + } else { + if (se_addr->src_name) { + if (se_addr->src_name != sk_addr->src_name) + return false; + } else if (se_addr->sa != sk_addr->sa) { + return false; + } + + if (se_addr->dst_name) { + if (se_addr->dst_name != sk_addr->dst_name) + return false; + } else if (se_addr->da != sk_addr->da) { + return false; + } + } + + return true; +} + +static struct +j1939_session *j1939_session_get_by_addr_locked(struct j1939_priv *priv, + struct list_head *root, + struct j1939_addr *addr, + bool reverse, bool transmitter) +{ + struct j1939_session *session; + + lockdep_assert_held(&priv->active_session_list_lock); + + list_for_each_entry(session, root, active_session_list_entry) { + j1939_session_get(session); + if (j1939_session_match(&session->skcb.addr, addr, reverse) && + session->transmission == transmitter) + return session; + j1939_session_put(session); + } + + return NULL; +} + +static struct +j1939_session *j1939_session_get_simple(struct j1939_priv *priv, + struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + + lockdep_assert_held(&priv->active_session_list_lock); + + list_for_each_entry(session, &priv->active_session_list, + active_session_list_entry) { + j1939_session_get(session); + if (session->skcb.addr.type == J1939_SIMPLE && + session->tskey == skcb->tskey && session->sk == skb->sk) + return session; + j1939_session_put(session); + } + + return NULL; +} + +static struct +j1939_session *j1939_session_get_by_addr(struct j1939_priv *priv, + struct j1939_addr *addr, + bool reverse, bool transmitter) +{ + struct j1939_session *session; + + j1939_session_list_lock(priv); + session = j1939_session_get_by_addr_locked(priv, + &priv->active_session_list, + addr, reverse, transmitter); + j1939_session_list_unlock(priv); + + return session; +} + +static void j1939_skbcb_swap(struct j1939_sk_buff_cb *skcb) +{ + u8 tmp = 0; + + swap(skcb->addr.dst_name, skcb->addr.src_name); + swap(skcb->addr.da, skcb->addr.sa); + + /* swap SRC and DST flags, leave other untouched */ + if (skcb->flags & J1939_ECU_LOCAL_SRC) + tmp |= J1939_ECU_LOCAL_DST; + if (skcb->flags & J1939_ECU_LOCAL_DST) + tmp |= J1939_ECU_LOCAL_SRC; + skcb->flags &= ~(J1939_ECU_LOCAL_SRC | J1939_ECU_LOCAL_DST); + skcb->flags |= tmp; +} + +static struct +sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv, + const struct j1939_sk_buff_cb *re_skcb, + bool ctl, + bool swap_src_dst) +{ + struct sk_buff *skb; + struct j1939_sk_buff_cb *skcb; + + skb = alloc_skb(sizeof(struct can_frame) + sizeof(struct can_skb_priv), + GFP_ATOMIC); + if (unlikely(!skb)) + return ERR_PTR(-ENOMEM); + + skb->dev = priv->ndev; + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = priv->ndev->ifindex; + /* reserve CAN header */ + skb_reserve(skb, offsetof(struct can_frame, data)); + + memcpy(skb->cb, re_skcb, sizeof(skb->cb)); + skcb = j1939_skb_to_cb(skb); + if (swap_src_dst) + j1939_skbcb_swap(skcb); + + if (ctl) { + if (skcb->addr.type == J1939_ETP) + skcb->addr.pgn = J1939_ETP_PGN_CTL; + else + skcb->addr.pgn = J1939_TP_PGN_CTL; + } else { + if (skcb->addr.type == J1939_ETP) + skcb->addr.pgn = J1939_ETP_PGN_DAT; + else + skcb->addr.pgn = J1939_TP_PGN_DAT; + } + + return skb; +} + +/* TP transmit packet functions */ +static int j1939_tp_tx_dat(struct j1939_session *session, + const u8 *dat, int len) +{ + struct j1939_priv *priv = session->priv; + struct sk_buff *skb; + + skb = j1939_tp_tx_dat_new(priv, &session->skcb, + false, false); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + skb_put_data(skb, dat, len); + if (j1939_tp_padding && len < 8) + memset(skb_put(skb, 8 - len), 0xff, 8 - len); + + return j1939_send_one(priv, skb); +} + +static int j1939_xtp_do_tx_ctl(struct j1939_priv *priv, + const struct j1939_sk_buff_cb *re_skcb, + bool swap_src_dst, pgn_t pgn, const u8 *dat) +{ + struct sk_buff *skb; + u8 *skdat; + + if (!j1939_tp_im_involved(re_skcb, swap_src_dst)) + return 0; + + skb = j1939_tp_tx_dat_new(priv, re_skcb, true, swap_src_dst); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + skdat = skb_put(skb, 8); + memcpy(skdat, dat, 5); + skdat[5] = (pgn >> 0); + skdat[6] = (pgn >> 8); + skdat[7] = (pgn >> 16); + + return j1939_send_one(priv, skb); +} + +static inline int j1939_tp_tx_ctl(struct j1939_session *session, + bool swap_src_dst, const u8 *dat) +{ + struct j1939_priv *priv = session->priv; + + return j1939_xtp_do_tx_ctl(priv, &session->skcb, + swap_src_dst, + session->skcb.addr.pgn, dat); +} + +static int j1939_xtp_tx_abort(struct j1939_priv *priv, + const struct j1939_sk_buff_cb *re_skcb, + bool swap_src_dst, + enum j1939_xtp_abort err, + pgn_t pgn) +{ + u8 dat[5]; + + if (!j1939_tp_im_involved(re_skcb, swap_src_dst)) + return 0; + + memset(dat, 0xff, sizeof(dat)); + dat[0] = J1939_TP_CMD_ABORT; + dat[1] = err; + return j1939_xtp_do_tx_ctl(priv, re_skcb, swap_src_dst, pgn, dat); +} + +void j1939_tp_schedule_txtimer(struct j1939_session *session, int msec) +{ + j1939_session_get(session); + hrtimer_start(&session->txtimer, ms_to_ktime(msec), + HRTIMER_MODE_REL_SOFT); +} + +static inline void j1939_tp_set_rxtimeout(struct j1939_session *session, + int msec) +{ + j1939_session_rxtimer_cancel(session); + j1939_session_get(session); + hrtimer_start(&session->rxtimer, ms_to_ktime(msec), + HRTIMER_MODE_REL_SOFT); +} + +static int j1939_session_tx_rts(struct j1939_session *session) +{ + u8 dat[8]; + int ret; + + memset(dat, 0xff, sizeof(dat)); + + dat[1] = (session->total_message_size >> 0); + dat[2] = (session->total_message_size >> 8); + dat[3] = session->pkt.total; + + if (session->skcb.addr.type == J1939_ETP) { + dat[0] = J1939_ETP_CMD_RTS; + dat[1] = (session->total_message_size >> 0); + dat[2] = (session->total_message_size >> 8); + dat[3] = (session->total_message_size >> 16); + dat[4] = (session->total_message_size >> 24); + } else if (j1939_cb_is_broadcast(&session->skcb)) { + dat[0] = J1939_TP_CMD_BAM; + /* fake cts for broadcast */ + session->pkt.tx = 0; + } else { + dat[0] = J1939_TP_CMD_RTS; + dat[4] = dat[3]; + } + + if (dat[0] == session->last_txcmd) + /* done already */ + return 0; + + ret = j1939_tp_tx_ctl(session, false, dat); + if (ret < 0) + return ret; + + session->last_txcmd = dat[0]; + if (dat[0] == J1939_TP_CMD_BAM) + j1939_tp_schedule_txtimer(session, 50); + + j1939_tp_set_rxtimeout(session, 1250); + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + return 0; +} + +static int j1939_session_tx_dpo(struct j1939_session *session) +{ + unsigned int pkt; + u8 dat[8]; + int ret; + + memset(dat, 0xff, sizeof(dat)); + + dat[0] = J1939_ETP_CMD_DPO; + session->pkt.dpo = session->pkt.tx_acked; + pkt = session->pkt.dpo; + dat[1] = session->pkt.last - session->pkt.tx_acked; + dat[2] = (pkt >> 0); + dat[3] = (pkt >> 8); + dat[4] = (pkt >> 16); + + ret = j1939_tp_tx_ctl(session, false, dat); + if (ret < 0) + return ret; + + session->last_txcmd = dat[0]; + j1939_tp_set_rxtimeout(session, 1250); + session->pkt.tx = session->pkt.tx_acked; + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + return 0; +} + +static int j1939_session_tx_dat(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + struct j1939_sk_buff_cb *skcb; + int offset, pkt_done, pkt_end; + unsigned int len, pdelay; + struct sk_buff *se_skb; + const u8 *tpdat; + int ret = 0; + u8 dat[8]; + + se_skb = j1939_session_skb_find(session); + if (!se_skb) + return -ENOBUFS; + + skcb = j1939_skb_to_cb(se_skb); + tpdat = se_skb->data; + ret = 0; + pkt_done = 0; + if (session->skcb.addr.type != J1939_ETP && + j1939_cb_is_broadcast(&session->skcb)) + pkt_end = session->pkt.total; + else + pkt_end = session->pkt.last; + + while (session->pkt.tx < pkt_end) { + dat[0] = session->pkt.tx - session->pkt.dpo + 1; + offset = (session->pkt.tx * 7) - skcb->offset; + len = se_skb->len - offset; + if (len > 7) + len = 7; + + memcpy(&dat[1], &tpdat[offset], len); + ret = j1939_tp_tx_dat(session, dat, len + 1); + if (ret < 0) { + /* ENOBUS == CAN interface TX queue is full */ + if (ret != -ENOBUFS) + netdev_alert(priv->ndev, + "%s: 0x%p: queue data error: %i\n", + __func__, session, ret); + break; + } + + session->last_txcmd = 0xff; + pkt_done++; + session->pkt.tx++; + pdelay = j1939_cb_is_broadcast(&session->skcb) ? 50 : + j1939_tp_packet_delay; + + if (session->pkt.tx < session->pkt.total && pdelay) { + j1939_tp_schedule_txtimer(session, pdelay); + break; + } + } + + if (pkt_done) + j1939_tp_set_rxtimeout(session, 250); + + return ret; +} + +static int j1939_xtp_txnext_transmiter(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + int ret = 0; + + if (!j1939_tp_im_transmitter(&session->skcb)) { + netdev_alert(priv->ndev, "%s: 0x%p: called by not transmitter!\n", + __func__, session); + return -EINVAL; + } + + switch (session->last_cmd) { + case 0: + ret = j1939_session_tx_rts(session); + break; + + case J1939_ETP_CMD_CTS: + if (session->last_txcmd != J1939_ETP_CMD_DPO) { + ret = j1939_session_tx_dpo(session); + if (ret) + return ret; + } + + /* fall through */ + case J1939_TP_CMD_CTS: + case 0xff: /* did some data */ + case J1939_ETP_CMD_DPO: + case J1939_TP_CMD_BAM: + ret = j1939_session_tx_dat(session); + + break; + default: + netdev_alert(priv->ndev, "%s: 0x%p: unexpected last_cmd: %x\n", + __func__, session, session->last_cmd); + } + + return ret; +} + +static int j1939_session_tx_cts(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + unsigned int pkt, len; + int ret; + u8 dat[8]; + + if (!j1939_sk_recv_match(priv, &session->skcb)) + return -ENOENT; + + len = session->pkt.total - session->pkt.rx; + len = min3(len, session->pkt.block, j1939_tp_block ?: 255); + memset(dat, 0xff, sizeof(dat)); + + if (session->skcb.addr.type == J1939_ETP) { + pkt = session->pkt.rx + 1; + dat[0] = J1939_ETP_CMD_CTS; + dat[1] = len; + dat[2] = (pkt >> 0); + dat[3] = (pkt >> 8); + dat[4] = (pkt >> 16); + } else { + dat[0] = J1939_TP_CMD_CTS; + dat[1] = len; + dat[2] = session->pkt.rx + 1; + } + + if (dat[0] == session->last_txcmd) + /* done already */ + return 0; + + ret = j1939_tp_tx_ctl(session, true, dat); + if (ret < 0) + return ret; + + if (len) + /* only mark cts done when len is set */ + session->last_txcmd = dat[0]; + j1939_tp_set_rxtimeout(session, 1250); + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + return 0; +} + +static int j1939_session_tx_eoma(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + u8 dat[8]; + int ret; + + if (!j1939_sk_recv_match(priv, &session->skcb)) + return -ENOENT; + + memset(dat, 0xff, sizeof(dat)); + + if (session->skcb.addr.type == J1939_ETP) { + dat[0] = J1939_ETP_CMD_EOMA; + dat[1] = session->total_message_size >> 0; + dat[2] = session->total_message_size >> 8; + dat[3] = session->total_message_size >> 16; + dat[4] = session->total_message_size >> 24; + } else { + dat[0] = J1939_TP_CMD_EOMA; + dat[1] = session->total_message_size; + dat[2] = session->total_message_size >> 8; + dat[3] = session->pkt.total; + } + + if (dat[0] == session->last_txcmd) + /* done already */ + return 0; + + ret = j1939_tp_tx_ctl(session, true, dat); + if (ret < 0) + return ret; + + session->last_txcmd = dat[0]; + + /* wait for the EOMA packet to come in */ + j1939_tp_set_rxtimeout(session, 1250); + + netdev_dbg(session->priv->ndev, "%p: 0x%p\n", __func__, session); + + return 0; +} + +static int j1939_xtp_txnext_receiver(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + int ret = 0; + + if (!j1939_tp_im_receiver(&session->skcb)) { + netdev_alert(priv->ndev, "%s: 0x%p: called by not receiver!\n", + __func__, session); + return -EINVAL; + } + + switch (session->last_cmd) { + case J1939_TP_CMD_RTS: + case J1939_ETP_CMD_RTS: + ret = j1939_session_tx_cts(session); + break; + + case J1939_ETP_CMD_CTS: + case J1939_TP_CMD_CTS: + case 0xff: /* did some data */ + case J1939_ETP_CMD_DPO: + if ((session->skcb.addr.type == J1939_TP && + j1939_cb_is_broadcast(&session->skcb))) + break; + + if (session->pkt.rx >= session->pkt.total) { + ret = j1939_session_tx_eoma(session); + } else if (session->pkt.rx >= session->pkt.last) { + session->last_txcmd = 0; + ret = j1939_session_tx_cts(session); + } + break; + default: + netdev_alert(priv->ndev, "%s: 0x%p: unexpected last_cmd: %x\n", + __func__, session, session->last_cmd); + } + + return ret; +} + +static int j1939_simple_txnext(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + struct sk_buff *se_skb = j1939_session_skb_find(session); + struct sk_buff *skb; + int ret; + + if (!se_skb) + return 0; + + skb = skb_clone(se_skb, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + can_skb_set_owner(skb, se_skb->sk); + + j1939_tp_set_rxtimeout(session, J1939_SIMPLE_ECHO_TIMEOUT_MS); + + ret = j1939_send_one(priv, skb); + if (ret) + return ret; + + j1939_sk_errqueue(session, J1939_ERRQUEUE_SCHED); + j1939_sk_queue_activate_next(session); + + return 0; +} + +static bool j1939_session_deactivate_locked(struct j1939_session *session) +{ + bool active = false; + + lockdep_assert_held(&session->priv->active_session_list_lock); + + if (session->state >= J1939_SESSION_ACTIVE && + session->state < J1939_SESSION_ACTIVE_MAX) { + active = true; + + list_del_init(&session->active_session_list_entry); + session->state = J1939_SESSION_DONE; + j1939_session_put(session); + } + + return active; +} + +static bool j1939_session_deactivate(struct j1939_session *session) +{ + bool active; + + j1939_session_list_lock(session->priv); + active = j1939_session_deactivate_locked(session); + j1939_session_list_unlock(session->priv); + + return active; +} + +static void +j1939_session_deactivate_activate_next(struct j1939_session *session) +{ + if (j1939_session_deactivate(session)) + j1939_sk_queue_activate_next(session); +} + +static void j1939_session_cancel(struct j1939_session *session, + enum j1939_xtp_abort err) +{ + struct j1939_priv *priv = session->priv; + + WARN_ON_ONCE(!err); + + session->err = j1939_xtp_abort_to_errno(priv, err); + /* do not send aborts on incoming broadcasts */ + if (!j1939_cb_is_broadcast(&session->skcb)) { + session->state = J1939_SESSION_WAITING_ABORT; + j1939_xtp_tx_abort(priv, &session->skcb, + !session->transmission, + err, session->skcb.addr.pgn); + } + + if (session->sk) + j1939_sk_send_loop_abort(session->sk, session->err); +} + +static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer) +{ + struct j1939_session *session = + container_of(hrtimer, struct j1939_session, txtimer); + struct j1939_priv *priv = session->priv; + int ret = 0; + + if (session->skcb.addr.type == J1939_SIMPLE) { + ret = j1939_simple_txnext(session); + } else { + if (session->transmission) + ret = j1939_xtp_txnext_transmiter(session); + else + ret = j1939_xtp_txnext_receiver(session); + } + + switch (ret) { + case -ENOBUFS: + /* Retry limit is currently arbitrary chosen */ + if (session->tx_retry < J1939_XTP_TX_RETRY_LIMIT) { + session->tx_retry++; + j1939_tp_schedule_txtimer(session, + 10 + prandom_u32_max(16)); + } else { + netdev_alert(priv->ndev, "%s: 0x%p: tx retry count reached\n", + __func__, session); + session->err = -ENETUNREACH; + j1939_session_rxtimer_cancel(session); + j1939_session_deactivate_activate_next(session); + } + break; + case -ENETDOWN: + /* In this case we should get a netdev_event(), all active + * sessions will be cleared by + * j1939_cancel_all_active_sessions(). So handle this as an + * error, but let j1939_cancel_all_active_sessions() do the + * cleanup including propagation of the error to user space. + */ + break; + case 0: + session->tx_retry = 0; + break; + default: + netdev_alert(priv->ndev, "%s: 0x%p: tx aborted with unknown reason: %i\n", + __func__, session, ret); + if (session->skcb.addr.type != J1939_SIMPLE) { + j1939_tp_set_rxtimeout(session, + J1939_XTP_ABORT_TIMEOUT_MS); + j1939_session_cancel(session, J1939_XTP_ABORT_OTHER); + } else { + session->err = ret; + j1939_session_rxtimer_cancel(session); + j1939_session_deactivate_activate_next(session); + } + } + + j1939_session_put(session); + + return HRTIMER_NORESTART; +} + +static void j1939_session_completed(struct j1939_session *session) +{ + struct sk_buff *skb; + + if (!session->transmission) { + skb = j1939_session_skb_find(session); + /* distribute among j1939 receivers */ + j1939_sk_recv(session->priv, skb); + } + + j1939_session_deactivate_activate_next(session); +} + +static enum hrtimer_restart j1939_tp_rxtimer(struct hrtimer *hrtimer) +{ + struct j1939_session *session = container_of(hrtimer, + struct j1939_session, + rxtimer); + struct j1939_priv *priv = session->priv; + + if (session->state == J1939_SESSION_WAITING_ABORT) { + netdev_alert(priv->ndev, "%s: 0x%p: abort rx timeout. Force session deactivation\n", + __func__, session); + + j1939_session_deactivate_activate_next(session); + + } else if (session->skcb.addr.type == J1939_SIMPLE) { + netdev_alert(priv->ndev, "%s: 0x%p: Timeout. Failed to send simple message.\n", + __func__, session); + + /* The message is probably stuck in the CAN controller and can + * be send as soon as CAN bus is in working state again. + */ + session->err = -ETIME; + j1939_session_deactivate(session); + } else { + netdev_alert(priv->ndev, "%s: 0x%p: rx timeout, send abort\n", + __func__, session); + + j1939_session_list_lock(session->priv); + if (session->state >= J1939_SESSION_ACTIVE && + session->state < J1939_SESSION_ACTIVE_MAX) { + j1939_session_get(session); + hrtimer_start(&session->rxtimer, + ms_to_ktime(J1939_XTP_ABORT_TIMEOUT_MS), + HRTIMER_MODE_REL_SOFT); + j1939_session_cancel(session, J1939_XTP_ABORT_TIMEOUT); + } + j1939_session_list_unlock(session->priv); + } + + j1939_session_put(session); + + return HRTIMER_NORESTART; +} + +static bool j1939_xtp_rx_cmd_bad_pgn(struct j1939_session *session, + const struct sk_buff *skb) +{ + const struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + pgn_t pgn = j1939_xtp_ctl_to_pgn(skb->data); + struct j1939_priv *priv = session->priv; + enum j1939_xtp_abort abort = J1939_XTP_NO_ABORT; + u8 cmd = skb->data[0]; + + if (session->skcb.addr.pgn == pgn) + return false; + + switch (cmd) { + case J1939_TP_CMD_BAM: + abort = J1939_XTP_NO_ABORT; + break; + + case J1939_ETP_CMD_RTS: + case J1939_TP_CMD_RTS: /* fall through */ + abort = J1939_XTP_ABORT_BUSY; + break; + + case J1939_ETP_CMD_CTS: + case J1939_TP_CMD_CTS: /* fall through */ + abort = J1939_XTP_ABORT_ECTS_UNXPECTED_PGN; + break; + + case J1939_ETP_CMD_DPO: + abort = J1939_XTP_ABORT_BAD_EDPO_PGN; + break; + + case J1939_ETP_CMD_EOMA: + case J1939_TP_CMD_EOMA: /* fall through */ + abort = J1939_XTP_ABORT_OTHER; + break; + + case J1939_ETP_CMD_ABORT: /* && J1939_TP_CMD_ABORT */ + abort = J1939_XTP_NO_ABORT; + break; + + default: + WARN_ON_ONCE(1); + break; + } + + netdev_warn(priv->ndev, "%s: 0x%p: CMD 0x%02x with PGN 0x%05x for running session with different PGN 0x%05x.\n", + __func__, session, cmd, pgn, session->skcb.addr.pgn); + if (abort != J1939_XTP_NO_ABORT) + j1939_xtp_tx_abort(priv, skcb, true, abort, pgn); + + return true; +} + +static void j1939_xtp_rx_abort_one(struct j1939_priv *priv, struct sk_buff *skb, + bool reverse, bool transmitter) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + u8 abort = skb->data[1]; + + session = j1939_session_get_by_addr(priv, &skcb->addr, reverse, + transmitter); + if (!session) + return; + + if (j1939_xtp_rx_cmd_bad_pgn(session, skb)) + goto abort_put; + + netdev_info(priv->ndev, "%s: 0x%p: 0x%05x: (%u) %s\n", __func__, + session, j1939_xtp_ctl_to_pgn(skb->data), abort, + j1939_xtp_abort_to_str(abort)); + + j1939_session_timers_cancel(session); + session->err = j1939_xtp_abort_to_errno(priv, abort); + if (session->sk) + j1939_sk_send_loop_abort(session->sk, session->err); + j1939_session_deactivate_activate_next(session); + +abort_put: + j1939_session_put(session); +} + +/* abort packets may come in 2 directions */ +static void +j1939_xtp_rx_abort(struct j1939_priv *priv, struct sk_buff *skb, + bool transmitter) +{ + j1939_xtp_rx_abort_one(priv, skb, false, transmitter); + j1939_xtp_rx_abort_one(priv, skb, true, transmitter); +} + +static void +j1939_xtp_rx_eoma_one(struct j1939_session *session, struct sk_buff *skb) +{ + if (j1939_xtp_rx_cmd_bad_pgn(session, skb)) + return; + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + session->pkt.tx_acked = session->pkt.total; + j1939_session_timers_cancel(session); + /* transmitted without problems */ + j1939_session_completed(session); +} + +static void +j1939_xtp_rx_eoma(struct j1939_priv *priv, struct sk_buff *skb, + bool transmitter) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + + session = j1939_session_get_by_addr(priv, &skcb->addr, true, + transmitter); + if (!session) + return; + + j1939_xtp_rx_eoma_one(session, skb); + j1939_session_put(session); +} + +static void +j1939_xtp_rx_cts_one(struct j1939_session *session, struct sk_buff *skb) +{ + enum j1939_xtp_abort err = J1939_XTP_ABORT_FAULT; + unsigned int pkt; + const u8 *dat; + + dat = skb->data; + + if (j1939_xtp_rx_cmd_bad_pgn(session, skb)) + return; + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + if (session->last_cmd == dat[0]) { + err = J1939_XTP_ABORT_DUP_SEQ; + goto out_session_cancel; + } + + if (session->skcb.addr.type == J1939_ETP) + pkt = j1939_etp_ctl_to_packet(dat); + else + pkt = dat[2]; + + if (!pkt) + goto out_session_cancel; + else if (dat[1] > session->pkt.block /* 0xff for etp */) + goto out_session_cancel; + + /* set packet counters only when not CTS(0) */ + session->pkt.tx_acked = pkt - 1; + j1939_session_skb_drop_old(session); + session->pkt.last = session->pkt.tx_acked + dat[1]; + if (session->pkt.last > session->pkt.total) + /* safety measure */ + session->pkt.last = session->pkt.total; + /* TODO: do not set tx here, do it in txtimer */ + session->pkt.tx = session->pkt.tx_acked; + + session->last_cmd = dat[0]; + if (dat[1]) { + j1939_tp_set_rxtimeout(session, 1250); + if (session->transmission) { + if (session->pkt.tx_acked) + j1939_sk_errqueue(session, + J1939_ERRQUEUE_SCHED); + j1939_session_txtimer_cancel(session); + j1939_tp_schedule_txtimer(session, 0); + } + } else { + /* CTS(0) */ + j1939_tp_set_rxtimeout(session, 550); + } + return; + + out_session_cancel: + j1939_session_timers_cancel(session); + j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS); + j1939_session_cancel(session, err); +} + +static void +j1939_xtp_rx_cts(struct j1939_priv *priv, struct sk_buff *skb, bool transmitter) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + + session = j1939_session_get_by_addr(priv, &skcb->addr, true, + transmitter); + if (!session) + return; + j1939_xtp_rx_cts_one(session, skb); + j1939_session_put(session); +} + +static struct j1939_session *j1939_session_new(struct j1939_priv *priv, + struct sk_buff *skb, size_t size) +{ + struct j1939_session *session; + struct j1939_sk_buff_cb *skcb; + + session = kzalloc(sizeof(*session), gfp_any()); + if (!session) + return NULL; + + INIT_LIST_HEAD(&session->active_session_list_entry); + INIT_LIST_HEAD(&session->sk_session_queue_entry); + kref_init(&session->kref); + + j1939_priv_get(priv); + session->priv = priv; + session->total_message_size = size; + session->state = J1939_SESSION_NEW; + + skb_queue_head_init(&session->skb_queue); + skb_queue_tail(&session->skb_queue, skb); + + skcb = j1939_skb_to_cb(skb); + memcpy(&session->skcb, skcb, sizeof(session->skcb)); + + hrtimer_init(&session->txtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_SOFT); + session->txtimer.function = j1939_tp_txtimer; + hrtimer_init(&session->rxtimer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL_SOFT); + session->rxtimer.function = j1939_tp_rxtimer; + + netdev_dbg(priv->ndev, "%s: 0x%p: sa: %02x, da: %02x\n", + __func__, session, skcb->addr.sa, skcb->addr.da); + + return session; +} + +static struct +j1939_session *j1939_session_fresh_new(struct j1939_priv *priv, + int size, + const struct j1939_sk_buff_cb *rel_skcb) +{ + struct sk_buff *skb; + struct j1939_sk_buff_cb *skcb; + struct j1939_session *session; + + skb = alloc_skb(size + sizeof(struct can_skb_priv), GFP_ATOMIC); + if (unlikely(!skb)) + return NULL; + + skb->dev = priv->ndev; + can_skb_reserve(skb); + can_skb_prv(skb)->ifindex = priv->ndev->ifindex; + skcb = j1939_skb_to_cb(skb); + memcpy(skcb, rel_skcb, sizeof(*skcb)); + + session = j1939_session_new(priv, skb, skb->len); + if (!session) { + kfree_skb(skb); + return NULL; + } + + /* alloc data area */ + skb_put(skb, size); + /* skb is recounted in j1939_session_new() */ + return session; +} + +int j1939_session_activate(struct j1939_session *session) +{ + struct j1939_priv *priv = session->priv; + struct j1939_session *active = NULL; + int ret = 0; + + j1939_session_list_lock(priv); + if (session->skcb.addr.type != J1939_SIMPLE) + active = j1939_session_get_by_addr_locked(priv, + &priv->active_session_list, + &session->skcb.addr, false, + session->transmission); + if (active) { + j1939_session_put(active); + ret = -EAGAIN; + } else { + WARN_ON_ONCE(session->state != J1939_SESSION_NEW); + list_add_tail(&session->active_session_list_entry, + &priv->active_session_list); + j1939_session_get(session); + session->state = J1939_SESSION_ACTIVE; + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", + __func__, session); + } + j1939_session_list_unlock(priv); + + return ret; +} + +static struct +j1939_session *j1939_xtp_rx_rts_session_new(struct j1939_priv *priv, + struct sk_buff *skb) +{ + enum j1939_xtp_abort abort = J1939_XTP_NO_ABORT; + struct j1939_sk_buff_cb skcb = *j1939_skb_to_cb(skb); + struct j1939_session *session; + const u8 *dat; + pgn_t pgn; + int len; + + netdev_dbg(priv->ndev, "%s\n", __func__); + + dat = skb->data; + pgn = j1939_xtp_ctl_to_pgn(dat); + skcb.addr.pgn = pgn; + + if (!j1939_sk_recv_match(priv, &skcb)) + return NULL; + + if (skcb.addr.type == J1939_ETP) { + len = j1939_etp_ctl_to_size(dat); + if (len > J1939_MAX_ETP_PACKET_SIZE) + abort = J1939_XTP_ABORT_FAULT; + else if (len > priv->tp_max_packet_size) + abort = J1939_XTP_ABORT_RESOURCE; + else if (len <= J1939_MAX_TP_PACKET_SIZE) + abort = J1939_XTP_ABORT_FAULT; + } else { + len = j1939_tp_ctl_to_size(dat); + if (len > J1939_MAX_TP_PACKET_SIZE) + abort = J1939_XTP_ABORT_FAULT; + else if (len > priv->tp_max_packet_size) + abort = J1939_XTP_ABORT_RESOURCE; + } + + if (abort != J1939_XTP_NO_ABORT) { + j1939_xtp_tx_abort(priv, &skcb, true, abort, pgn); + return NULL; + } + + session = j1939_session_fresh_new(priv, len, &skcb); + if (!session) { + j1939_xtp_tx_abort(priv, &skcb, true, + J1939_XTP_ABORT_RESOURCE, pgn); + return NULL; + } + + /* initialize the control buffer: plain copy */ + session->pkt.total = (len + 6) / 7; + session->pkt.block = 0xff; + if (skcb.addr.type != J1939_ETP) { + if (dat[3] != session->pkt.total) + netdev_alert(priv->ndev, "%s: 0x%p: strange total, %u != %u\n", + __func__, session, session->pkt.total, + dat[3]); + session->pkt.total = dat[3]; + session->pkt.block = min(dat[3], dat[4]); + } + + session->pkt.rx = 0; + session->pkt.tx = 0; + + WARN_ON_ONCE(j1939_session_activate(session)); + + return session; +} + +static int j1939_xtp_rx_rts_session_active(struct j1939_session *session, + struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_priv *priv = session->priv; + + if (!session->transmission) { + if (j1939_xtp_rx_cmd_bad_pgn(session, skb)) + return -EBUSY; + + /* RTS on active session */ + j1939_session_timers_cancel(session); + j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS); + j1939_session_cancel(session, J1939_XTP_ABORT_BUSY); + } + + if (session->last_cmd != 0) { + /* we received a second rts on the same connection */ + netdev_alert(priv->ndev, "%s: 0x%p: connection exists (%02x %02x). last cmd: %x\n", + __func__, session, skcb->addr.sa, skcb->addr.da, + session->last_cmd); + + j1939_session_timers_cancel(session); + j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS); + j1939_session_cancel(session, J1939_XTP_ABORT_BUSY); + + return -EBUSY; + } + + if (session->skcb.addr.sa != skcb->addr.sa || + session->skcb.addr.da != skcb->addr.da) + netdev_warn(priv->ndev, "%s: 0x%p: session->skcb.addr.sa=0x%02x skcb->addr.sa=0x%02x session->skcb.addr.da=0x%02x skcb->addr.da=0x%02x\n", + __func__, session, + session->skcb.addr.sa, skcb->addr.sa, + session->skcb.addr.da, skcb->addr.da); + /* make sure 'sa' & 'da' are correct ! + * They may be 'not filled in yet' for sending + * skb's, since they did not pass the Address Claim ever. + */ + session->skcb.addr.sa = skcb->addr.sa; + session->skcb.addr.da = skcb->addr.da; + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + return 0; +} + +static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb, + bool transmitter) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + u8 cmd = skb->data[0]; + + session = j1939_session_get_by_addr(priv, &skcb->addr, false, + transmitter); + + if (!session) { + if (transmitter) { + /* If we're the transmitter and this function is called, + * we received our own RTS. A session has already been + * created. + * + * For some reasons however it might have been destroyed + * already. So don't create a new one here (using + * "j1939_xtp_rx_rts_session_new()") as this will be a + * receiver session. + * + * The reasons the session is already destroyed might + * be: + * - user space closed socket was and the session was + * aborted + * - session was aborted due to external abort message + */ + return; + } + session = j1939_xtp_rx_rts_session_new(priv, skb); + if (!session) + return; + } else { + if (j1939_xtp_rx_rts_session_active(session, skb)) { + j1939_session_put(session); + return; + } + } + session->last_cmd = cmd; + + j1939_tp_set_rxtimeout(session, 1250); + + if (cmd != J1939_TP_CMD_BAM && !session->transmission) { + j1939_session_txtimer_cancel(session); + j1939_tp_schedule_txtimer(session, 0); + } + + j1939_session_put(session); +} + +static void j1939_xtp_rx_dpo_one(struct j1939_session *session, + struct sk_buff *skb) +{ + const u8 *dat = skb->data; + + if (j1939_xtp_rx_cmd_bad_pgn(session, skb)) + return; + + netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); + + /* transmitted without problems */ + session->pkt.dpo = j1939_etp_ctl_to_packet(skb->data); + session->last_cmd = dat[0]; + j1939_tp_set_rxtimeout(session, 750); +} + +static void j1939_xtp_rx_dpo(struct j1939_priv *priv, struct sk_buff *skb, + bool transmitter) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + + session = j1939_session_get_by_addr(priv, &skcb->addr, false, + transmitter); + if (!session) { + netdev_info(priv->ndev, + "%s: no connection found\n", __func__); + return; + } + + j1939_xtp_rx_dpo_one(session, skb); + j1939_session_put(session); +} + +static void j1939_xtp_rx_dat_one(struct j1939_session *session, + struct sk_buff *skb) +{ + struct j1939_priv *priv = session->priv; + struct j1939_sk_buff_cb *skcb; + struct sk_buff *se_skb; + const u8 *dat; + u8 *tpdat; + int offset; + int nbytes; + bool final = false; + bool do_cts_eoma = false; + int packet; + + skcb = j1939_skb_to_cb(skb); + dat = skb->data; + if (skb->len <= 1) + /* makes no sense */ + goto out_session_cancel; + + switch (session->last_cmd) { + case 0xff: + break; + case J1939_ETP_CMD_DPO: + if (skcb->addr.type == J1939_ETP) + break; + /* fall through */ + case J1939_TP_CMD_BAM: /* fall through */ + case J1939_TP_CMD_CTS: /* fall through */ + if (skcb->addr.type != J1939_ETP) + break; + /* fall through */ + default: + netdev_info(priv->ndev, "%s: 0x%p: last %02x\n", __func__, + session, session->last_cmd); + goto out_session_cancel; + } + + packet = (dat[0] - 1 + session->pkt.dpo); + if (packet > session->pkt.total || + (session->pkt.rx + 1) > session->pkt.total) { + netdev_info(priv->ndev, "%s: 0x%p: should have been completed\n", + __func__, session); + goto out_session_cancel; + } + se_skb = j1939_session_skb_find(session); + if (!se_skb) { + netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__, + session); + goto out_session_cancel; + } + + skcb = j1939_skb_to_cb(se_skb); + offset = packet * 7 - skcb->offset; + nbytes = se_skb->len - offset; + if (nbytes > 7) + nbytes = 7; + if (nbytes <= 0 || (nbytes + 1) > skb->len) { + netdev_info(priv->ndev, "%s: 0x%p: nbytes %i, len %i\n", + __func__, session, nbytes, skb->len); + goto out_session_cancel; + } + + tpdat = se_skb->data; + memcpy(&tpdat[offset], &dat[1], nbytes); + if (packet == session->pkt.rx) + session->pkt.rx++; + + if (skcb->addr.type != J1939_ETP && + j1939_cb_is_broadcast(&session->skcb)) { + if (session->pkt.rx >= session->pkt.total) + final = true; + } else { + /* never final, an EOMA must follow */ + if (session->pkt.rx >= session->pkt.last) + do_cts_eoma = true; + } + + if (final) { + j1939_session_completed(session); + } else if (do_cts_eoma) { + j1939_tp_set_rxtimeout(session, 1250); + if (!session->transmission) + j1939_tp_schedule_txtimer(session, 0); + } else { + j1939_tp_set_rxtimeout(session, 250); + } + session->last_cmd = 0xff; + j1939_session_put(session); + + return; + + out_session_cancel: + j1939_session_timers_cancel(session); + j1939_tp_set_rxtimeout(session, J1939_XTP_ABORT_TIMEOUT_MS); + j1939_session_cancel(session, J1939_XTP_ABORT_FAULT); + j1939_session_put(session); +} + +static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb; + struct j1939_session *session; + + skcb = j1939_skb_to_cb(skb); + + if (j1939_tp_im_transmitter(skcb)) { + session = j1939_session_get_by_addr(priv, &skcb->addr, false, + true); + if (!session) + netdev_info(priv->ndev, "%s: no tx connection found\n", + __func__); + else + j1939_xtp_rx_dat_one(session, skb); + } + + if (j1939_tp_im_receiver(skcb)) { + session = j1939_session_get_by_addr(priv, &skcb->addr, false, + false); + if (!session) + netdev_info(priv->ndev, "%s: no rx connection found\n", + __func__); + else + j1939_xtp_rx_dat_one(session, skb); + } +} + +/* j1939 main intf */ +struct j1939_session *j1939_tp_send(struct j1939_priv *priv, + struct sk_buff *skb, size_t size) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + struct j1939_session *session; + int ret; + + if (skcb->addr.pgn == J1939_TP_PGN_DAT || + skcb->addr.pgn == J1939_TP_PGN_CTL || + skcb->addr.pgn == J1939_ETP_PGN_DAT || + skcb->addr.pgn == J1939_ETP_PGN_CTL) + /* avoid conflict */ + return ERR_PTR(-EDOM); + + if (size > priv->tp_max_packet_size) + return ERR_PTR(-EMSGSIZE); + + if (size <= 8) + skcb->addr.type = J1939_SIMPLE; + else if (size > J1939_MAX_TP_PACKET_SIZE) + skcb->addr.type = J1939_ETP; + else + skcb->addr.type = J1939_TP; + + if (skcb->addr.type == J1939_ETP && + j1939_cb_is_broadcast(skcb)) + return ERR_PTR(-EDESTADDRREQ); + + /* fill in addresses from names */ + ret = j1939_ac_fixup(priv, skb); + if (unlikely(ret)) + return ERR_PTR(ret); + + /* fix DST flags, it may be used there soon */ + if (j1939_address_is_unicast(skcb->addr.da) && + priv->ents[skcb->addr.da].nusers) + skcb->flags |= J1939_ECU_LOCAL_DST; + + /* src is always local, I'm sending ... */ + skcb->flags |= J1939_ECU_LOCAL_SRC; + + /* prepare new session */ + session = j1939_session_new(priv, skb, size); + if (!session) + return ERR_PTR(-ENOMEM); + + /* skb is recounted in j1939_session_new() */ + session->sk = skb->sk; + session->transmission = true; + session->pkt.total = (size + 6) / 7; + session->pkt.block = skcb->addr.type == J1939_ETP ? 255 : + min(j1939_tp_block ?: 255, session->pkt.total); + + if (j1939_cb_is_broadcast(&session->skcb)) + /* set the end-packet for broadcast */ + session->pkt.last = session->pkt.total; + + skcb->tskey = session->sk->sk_tskey++; + session->tskey = skcb->tskey; + + return session; +} + +static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + int extd = J1939_TP; + u8 cmd = skb->data[0]; + + switch (cmd) { + case J1939_ETP_CMD_RTS: + extd = J1939_ETP; + /* fall through */ + case J1939_TP_CMD_BAM: /* fall through */ + case J1939_TP_CMD_RTS: /* fall through */ + if (skcb->addr.type != extd) + return; + + if (cmd == J1939_TP_CMD_RTS && j1939_cb_is_broadcast(skcb)) { + netdev_alert(priv->ndev, "%s: rts without destination (%02x)\n", + __func__, skcb->addr.sa); + return; + } + + if (j1939_tp_im_transmitter(skcb)) + j1939_xtp_rx_rts(priv, skb, true); + + if (j1939_tp_im_receiver(skcb)) + j1939_xtp_rx_rts(priv, skb, false); + + break; + + case J1939_ETP_CMD_CTS: + extd = J1939_ETP; + /* fall through */ + case J1939_TP_CMD_CTS: + if (skcb->addr.type != extd) + return; + + if (j1939_tp_im_transmitter(skcb)) + j1939_xtp_rx_cts(priv, skb, false); + + if (j1939_tp_im_receiver(skcb)) + j1939_xtp_rx_cts(priv, skb, true); + + break; + + case J1939_ETP_CMD_DPO: + if (skcb->addr.type != J1939_ETP) + return; + + if (j1939_tp_im_transmitter(skcb)) + j1939_xtp_rx_dpo(priv, skb, true); + + if (j1939_tp_im_receiver(skcb)) + j1939_xtp_rx_dpo(priv, skb, false); + + break; + + case J1939_ETP_CMD_EOMA: + extd = J1939_ETP; + /* fall through */ + case J1939_TP_CMD_EOMA: + if (skcb->addr.type != extd) + return; + + if (j1939_tp_im_transmitter(skcb)) + j1939_xtp_rx_eoma(priv, skb, false); + + if (j1939_tp_im_receiver(skcb)) + j1939_xtp_rx_eoma(priv, skb, true); + + break; + + case J1939_ETP_CMD_ABORT: /* && J1939_TP_CMD_ABORT */ + if (j1939_tp_im_transmitter(skcb)) + j1939_xtp_rx_abort(priv, skb, true); + + if (j1939_tp_im_receiver(skcb)) + j1939_xtp_rx_abort(priv, skb, false); + + break; + default: + return; + } +} + +int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb); + + if (!j1939_tp_im_involved_anydir(skcb)) + return 0; + + switch (skcb->addr.pgn) { + case J1939_ETP_PGN_DAT: + skcb->addr.type = J1939_ETP; + /* fall through */ + case J1939_TP_PGN_DAT: + j1939_xtp_rx_dat(priv, skb); + break; + + case J1939_ETP_PGN_CTL: + skcb->addr.type = J1939_ETP; + /* fall through */ + case J1939_TP_PGN_CTL: + if (skb->len < 8) + return 0; /* Don't care. Nothing to extract here */ + + j1939_tp_cmd_recv(priv, skb); + break; + default: + return 0; /* no problem */ + } + return 1; /* "I processed the message" */ +} + +void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb) +{ + struct j1939_session *session; + + if (!skb->sk) + return; + + j1939_session_list_lock(priv); + session = j1939_session_get_simple(priv, skb); + j1939_session_list_unlock(priv); + if (!session) { + netdev_warn(priv->ndev, + "%s: Received already invalidated message\n", + __func__); + return; + } + + j1939_session_timers_cancel(session); + j1939_session_deactivate(session); + j1939_session_put(session); +} + +int j1939_cancel_active_session(struct j1939_priv *priv, struct sock *sk) +{ + struct j1939_session *session, *saved; + + netdev_dbg(priv->ndev, "%s, sk: %p\n", __func__, sk); + j1939_session_list_lock(priv); + list_for_each_entry_safe(session, saved, + &priv->active_session_list, + active_session_list_entry) { + if (!sk || sk == session->sk) { + j1939_session_timers_cancel(session); + session->err = ESHUTDOWN; + j1939_session_deactivate_locked(session); + } + } + j1939_session_list_unlock(priv); + return NOTIFY_DONE; +} + +void j1939_tp_init(struct j1939_priv *priv) +{ + spin_lock_init(&priv->active_session_list_lock); + INIT_LIST_HEAD(&priv->active_session_list); + priv->tp_max_packet_size = J1939_MAX_ETP_PACKET_SIZE; +} -- cgit v1.2.3 From f388ec7c16ad8676ee516a735a6b7588252f971d Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Tue, 16 Jul 2019 08:55:04 +0300 Subject: habanalabs: add comments on INFO IOCTL This patch adds some in-code documentation on the different opcodes of the INFO IOCTL. Signed-off-by: Oded Gabbay Reviewed-by: Omer Shpigelman --- include/uapi/misc/habanalabs.h | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 3956c226ca35..a5a1d0e7ec82 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -75,7 +75,19 @@ enum hl_device_status { HL_DEVICE_STATUS_MALFUNCTION }; -/* Opcode for management ioctl */ +/* Opcode for management ioctl + * + * HW_IP_INFO - Receive information about different IP blocks in the + * device. + * HL_INFO_HW_EVENTS - Receive an array describing how many times each event + * occurred since the last hard reset. + * HL_INFO_DRAM_USAGE - Retrieve the dram usage inside the device and of the + * specific context. This is relevant only for GOYA device. + * HL_INFO_HW_IDLE - Retrieve information about the idle status of each + * internal engine. + * HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't + * require an open context. + */ #define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_EVENTS 1 #define HL_INFO_DRAM_USAGE 2 -- cgit v1.2.3 From 4fd2cb15cd0894520422b3e7d9cf68b7e6548b13 Mon Sep 17 00:00:00 2001 From: Dotan Barak Date: Mon, 12 Aug 2019 10:23:33 +0300 Subject: habanalabs: explicitly set the queue-id enumerated numbers When looking at kernel log messages and when debugging user applications, we only see the queue id. This patch explicitly set the queue id in the queue enumeration which will be helpful for finding the queue name when we have its id. Signed-off-by: Dotan Barak Reviewed-by: Oded Gabbay Reviewed-by: Greg Kroah-Hartman Signed-off-by: Oded Gabbay --- include/uapi/misc/habanalabs.h | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index a5a1d0e7ec82..6cf50177cd21 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -28,20 +28,20 @@ enum goya_queue_id { GOYA_QUEUE_ID_DMA_0 = 0, - GOYA_QUEUE_ID_DMA_1, - GOYA_QUEUE_ID_DMA_2, - GOYA_QUEUE_ID_DMA_3, - GOYA_QUEUE_ID_DMA_4, - GOYA_QUEUE_ID_CPU_PQ, - GOYA_QUEUE_ID_MME, /* Internal queues start here */ - GOYA_QUEUE_ID_TPC0, - GOYA_QUEUE_ID_TPC1, - GOYA_QUEUE_ID_TPC2, - GOYA_QUEUE_ID_TPC3, - GOYA_QUEUE_ID_TPC4, - GOYA_QUEUE_ID_TPC5, - GOYA_QUEUE_ID_TPC6, - GOYA_QUEUE_ID_TPC7, + GOYA_QUEUE_ID_DMA_1 = 1, + GOYA_QUEUE_ID_DMA_2 = 2, + GOYA_QUEUE_ID_DMA_3 = 3, + GOYA_QUEUE_ID_DMA_4 = 4, + GOYA_QUEUE_ID_CPU_PQ = 5, + GOYA_QUEUE_ID_MME = 6, /* Internal queues start here */ + GOYA_QUEUE_ID_TPC0 = 7, + GOYA_QUEUE_ID_TPC1 = 8, + GOYA_QUEUE_ID_TPC2 = 9, + GOYA_QUEUE_ID_TPC3 = 10, + GOYA_QUEUE_ID_TPC4 = 11, + GOYA_QUEUE_ID_TPC5 = 12, + GOYA_QUEUE_ID_TPC6 = 13, + GOYA_QUEUE_ID_TPC7 = 14, GOYA_QUEUE_ID_SIZE }; -- cgit v1.2.3 From 413cf576fd50297429e967b050d63067c997645c Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Tue, 27 Aug 2019 16:14:18 +0000 Subject: habanalabs: Make the Coresight timestamp perpetual The Coresight timestamp is enabled for a specific debug session using the HL_DEBUG_OP_TIMESTAMP opcode of the debug IOCTL. In order to have a perpetual timestamp that would be comparable between various debug sessions, this patch moves the timestamp enablement to be part of the HW initialization. The HL_DEBUG_OP_TIMESTAMP opcode turns to be deprecated and shouldn't be used. Old user-space that will call it won't see any change in the behavior of the debug session. Signed-off-by: Tomer Tayar Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/misc/habanalabs/goya/goya.c | 23 +++++++++++++++++++++++ drivers/misc/habanalabs/goya/goya_coresight.c | 17 ++--------------- include/uapi/misc/habanalabs.h | 2 +- 3 files changed, 26 insertions(+), 16 deletions(-) (limited to 'include/uapi') diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index de275cb3bb98..0dd0b4429fee 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -2062,6 +2062,25 @@ static void goya_disable_msix(struct hl_device *hdev) goya->hw_cap_initialized &= ~HW_CAP_MSIX; } +static void goya_enable_timestamp(struct hl_device *hdev) +{ + /* Disable the timestamp counter */ + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0); + + /* Zero the lower/upper parts of the 64-bit counter */ + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0); + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0); + + /* Enable the counter */ + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1); +} + +static void goya_disable_timestamp(struct hl_device *hdev) +{ + /* Disable the timestamp counter */ + WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0); +} + static void goya_halt_engines(struct hl_device *hdev, bool hard_reset) { u32 wait_timeout_ms, cpu_timeout_ms; @@ -2102,6 +2121,8 @@ static void goya_halt_engines(struct hl_device *hdev, bool hard_reset) goya_disable_external_queues(hdev); goya_disable_internal_queues(hdev); + goya_disable_timestamp(hdev); + if (hard_reset) { goya_disable_msix(hdev); goya_mmu_remove_device_cpu_mappings(hdev); @@ -2504,6 +2525,8 @@ static int goya_hw_init(struct hl_device *hdev) goya_init_tpc_qmans(hdev); + goya_enable_timestamp(hdev); + /* MSI-X must be enabled before CPU queues are initialized */ rc = goya_enable_msix(hdev); if (rc) diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c index 3d77b1c20336..b4d406af1bed 100644 --- a/drivers/misc/habanalabs/goya/goya_coresight.c +++ b/drivers/misc/habanalabs/goya/goya_coresight.c @@ -636,24 +636,11 @@ static int goya_config_spmu(struct hl_device *hdev, return 0; } -static int goya_config_timestamp(struct hl_device *hdev, - struct hl_debug_params *params) -{ - WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0); - if (params->enable) { - WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0); - WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0); - WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1); - } - - return 0; -} - int goya_debug_coresight(struct hl_device *hdev, void *data) { struct hl_debug_params *params = data; u32 val; - int rc; + int rc = 0; switch (params->op) { case HL_DEBUG_OP_STM: @@ -675,7 +662,7 @@ int goya_debug_coresight(struct hl_device *hdev, void *data) rc = goya_config_spmu(hdev, params); break; case HL_DEBUG_OP_TIMESTAMP: - rc = goya_config_timestamp(hdev, params); + /* Do nothing as this opcode is deprecated */ break; default: diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 6cf50177cd21..266bf85056d4 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -451,7 +451,7 @@ struct hl_debug_params_spmu { #define HL_DEBUG_OP_BMON 4 /* Opcode for SPMU component */ #define HL_DEBUG_OP_SPMU 5 -/* Opcode for timestamp */ +/* Opcode for timestamp (deprecated) */ #define HL_DEBUG_OP_TIMESTAMP 6 /* Opcode for setting the device into or out of debug mode. The enable * variable should be 1 for enabling debug mode and 0 for disabling it -- cgit v1.2.3 From 75b3cb2bb080372d043e8f0c0aeae8f52461136b Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Wed, 28 Aug 2019 17:32:04 +0300 Subject: habanalabs: add uapi to retrieve device utilization Users and sysadmins usually want to know what is the device utilization as a level 0 indication if they are efficiently using the device. Add a new opcode to the INFO IOCTL that will return the device utilization over the last period of 100-1000ms. The return value is 0-100, representing as percentage the total utilization rate. Signed-off-by: Oded Gabbay Reviewed-by: Omer Shpigelman --- drivers/misc/habanalabs/command_submission.c | 20 ++++- drivers/misc/habanalabs/device.c | 116 ++++++++++++++++++++++++++- drivers/misc/habanalabs/habanalabs.h | 23 +++++- drivers/misc/habanalabs/habanalabs_ioctl.c | 27 +++++++ drivers/misc/habanalabs/hw_queue.c | 8 +- drivers/misc/habanalabs/include/goya/goya.h | 2 + include/uapi/misc/habanalabs.h | 49 +++++++---- 7 files changed, 222 insertions(+), 23 deletions(-) (limited to 'include/uapi') diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index e4dd3e83df8b..4777ec4c2b55 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -178,11 +178,23 @@ static void cs_do_release(struct kref *ref) /* We also need to update CI for internal queues */ if (cs->submitted) { - int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt); + hdev->asic_funcs->hw_queues_lock(hdev); - WARN_ONCE((cs_cnt < 0), - "hl%d: error in CS active cnt %d\n", - hdev->id, cs_cnt); + hdev->cs_active_cnt--; + if (!hdev->cs_active_cnt) { + struct hl_device_idle_busy_ts *ts; + + ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx++]; + ts->busy_to_idle_ts = ktime_get(); + + if (hdev->idle_busy_ts_idx == HL_IDLE_BUSY_TS_ARR_SIZE) + hdev->idle_busy_ts_idx = 0; + } else if (hdev->cs_active_cnt < 0) { + dev_crit(hdev->dev, "CS active cnt %d is negative\n", + hdev->cs_active_cnt); + } + + hdev->asic_funcs->hw_queues_unlock(hdev); hl_int_hw_queue_update_ci(cs); diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index 41c3ddbca351..cebdceb72298 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -293,6 +293,14 @@ static int device_early_init(struct hl_device *hdev) goto free_eq_wq; } + hdev->idle_busy_ts_arr = kmalloc_array(HL_IDLE_BUSY_TS_ARR_SIZE, + sizeof(struct hl_device_idle_busy_ts), + (GFP_KERNEL | __GFP_ZERO)); + if (!hdev->idle_busy_ts_arr) { + rc = -ENOMEM; + goto free_chip_info; + } + hl_cb_mgr_init(&hdev->kernel_cb_mgr); mutex_init(&hdev->send_cpu_message_lock); @@ -303,10 +311,11 @@ static int device_early_init(struct hl_device *hdev) INIT_LIST_HEAD(&hdev->fpriv_list); mutex_init(&hdev->fpriv_list_lock); atomic_set(&hdev->in_reset, 0); - atomic_set(&hdev->cs_active_cnt, 0); return 0; +free_chip_info: + kfree(hdev->hl_chip_info); free_eq_wq: destroy_workqueue(hdev->eq_wq); free_cq_wq: @@ -336,6 +345,7 @@ static void device_early_fini(struct hl_device *hdev) hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr); + kfree(hdev->idle_busy_ts_arr); kfree(hdev->hl_chip_info); destroy_workqueue(hdev->eq_wq); @@ -451,6 +461,102 @@ static void device_late_fini(struct hl_device *hdev) hdev->late_init_done = false; } +uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms) +{ + struct hl_device_idle_busy_ts *ts; + ktime_t zero_ktime, curr = ktime_get(); + u32 overlap_cnt = 0, last_index = hdev->idle_busy_ts_idx; + s64 period_us, last_start_us, last_end_us, last_busy_time_us, + total_busy_time_us = 0, total_busy_time_ms; + + zero_ktime = ktime_set(0, 0); + period_us = period_ms * USEC_PER_MSEC; + ts = &hdev->idle_busy_ts_arr[last_index]; + + /* check case that device is currently in idle */ + if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime) && + !ktime_compare(ts->idle_to_busy_ts, zero_ktime)) { + + last_index--; + /* Handle case idle_busy_ts_idx was 0 */ + if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE) + last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1; + + ts = &hdev->idle_busy_ts_arr[last_index]; + } + + while (overlap_cnt < HL_IDLE_BUSY_TS_ARR_SIZE) { + /* Check if we are in last sample case. i.e. if the sample + * begun before the sampling period. This could be a real + * sample or 0 so need to handle both cases + */ + last_start_us = ktime_to_us( + ktime_sub(curr, ts->idle_to_busy_ts)); + + if (last_start_us > period_us) { + + /* First check two cases: + * 1. If the device is currently busy + * 2. If the device was idle during the whole sampling + * period + */ + + if (!ktime_compare(ts->busy_to_idle_ts, zero_ktime)) { + /* Check if the device is currently busy */ + if (ktime_compare(ts->idle_to_busy_ts, + zero_ktime)) + return 100; + + /* We either didn't have any activity or we + * reached an entry which is 0. Either way, + * exit and return what was accumulated so far + */ + break; + } + + /* If sample has finished, check it is relevant */ + last_end_us = ktime_to_us( + ktime_sub(curr, ts->busy_to_idle_ts)); + + if (last_end_us > period_us) + break; + + /* It is relevant so add it but with adjustment */ + last_busy_time_us = ktime_to_us( + ktime_sub(ts->busy_to_idle_ts, + ts->idle_to_busy_ts)); + total_busy_time_us += last_busy_time_us - + (last_start_us - period_us); + break; + } + + /* Check if the sample is finished or still open */ + if (ktime_compare(ts->busy_to_idle_ts, zero_ktime)) + last_busy_time_us = ktime_to_us( + ktime_sub(ts->busy_to_idle_ts, + ts->idle_to_busy_ts)); + else + last_busy_time_us = ktime_to_us( + ktime_sub(curr, ts->idle_to_busy_ts)); + + total_busy_time_us += last_busy_time_us; + + last_index--; + /* Handle case idle_busy_ts_idx was 0 */ + if (last_index > HL_IDLE_BUSY_TS_ARR_SIZE) + last_index = HL_IDLE_BUSY_TS_ARR_SIZE - 1; + + ts = &hdev->idle_busy_ts_arr[last_index]; + + overlap_cnt++; + } + + total_busy_time_ms = DIV_ROUND_UP_ULL(total_busy_time_us, + USEC_PER_MSEC); + + return DIV_ROUND_UP_ULL(total_busy_time_ms * 100, period_ms); +} + /* * hl_device_set_frequency - set the frequency of the device * @@ -808,6 +914,14 @@ again: for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) hl_cq_reset(hdev, &hdev->completion_queue[i]); + hdev->idle_busy_ts_idx = 0; + hdev->idle_busy_ts_arr[0].busy_to_idle_ts = ktime_set(0, 0); + hdev->idle_busy_ts_arr[0].idle_to_busy_ts = ktime_set(0, 0); + + if (hdev->cs_active_cnt) + dev_crit(hdev->dev, "CS active cnt %d is not 0 during reset\n", + hdev->cs_active_cnt); + mutex_lock(&hdev->fpriv_list_lock); /* Make sure the context switch phase will run again */ diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index a4d929f5bad8..23b86b7f9732 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -45,6 +45,8 @@ /* MUST BE POWER OF 2 and larger than 1 */ #define HL_MAX_PENDING_CS 64 +#define HL_IDLE_BUSY_TS_ARR_SIZE 4096 + /* Memory */ #define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */ @@ -1156,6 +1158,16 @@ struct hl_device_reset_work { struct hl_device *hdev; }; +/** + * struct hl_device_idle_busy_ts - used for calculating device utilization rate. + * @idle_to_busy_ts: timestamp where device changed from idle to busy. + * @busy_to_idle_ts: timestamp where device changed from busy to idle. + */ +struct hl_device_idle_busy_ts { + ktime_t idle_to_busy_ts; + ktime_t busy_to_idle_ts; +}; + /** * struct hl_device - habanalabs device structure. * @pdev: pointer to PCI device, can be NULL in case of simulator device. @@ -1203,19 +1215,22 @@ struct hl_device_reset_work { * when a user opens the device * @fpriv_list_lock: protects the fpriv_list * @compute_ctx: current compute context executing. + * @idle_busy_ts_arr: array to hold time stamps of transitions from idle to busy + * and vice-versa * @dram_used_mem: current DRAM memory consumption. * @timeout_jiffies: device CS timeout value. * @max_power: the max power of the device, as configured by the sysadmin. This * value is saved so in case of hard-reset, KMD will restore this * value and update the F/W after the re-initialization * @in_reset: is device in reset flow. + * @curr_pll_profile: current PLL profile. * @cs_active_cnt: number of active command submissions on this device (active * means already in H/W queues) - * @curr_pll_profile: current PLL profile. * @major: habanalabs KMD major. * @high_pll: high PLL profile frequency. * @soft_reset_cnt: number of soft reset since KMD loading. * @hard_reset_cnt: number of hard reset since KMD loading. + * @idle_busy_ts_idx: index of current entry in idle_busy_ts_arr * @id: device minor. * @id_control: minor of the control device * @disabled: is device disabled. @@ -1285,16 +1300,19 @@ struct hl_device { struct hl_ctx *compute_ctx; + struct hl_device_idle_busy_ts *idle_busy_ts_arr; + atomic64_t dram_used_mem; u64 timeout_jiffies; u64 max_power; atomic_t in_reset; - atomic_t cs_active_cnt; enum hl_pll_frequency curr_pll_profile; + int cs_active_cnt; u32 major; u32 high_pll; u32 soft_reset_cnt; u32 hard_reset_cnt; + u32 idle_busy_ts_idx; u16 id; u16 id_control; u8 disabled; @@ -1457,6 +1475,7 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset, void hl_hpriv_get(struct hl_fpriv *hpriv); void hl_hpriv_put(struct hl_fpriv *hpriv); int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq); +uint32_t hl_device_utilization(struct hl_device *hdev, uint32_t period_ms); int hl_build_hwmon_channel_info(struct hl_device *hdev, struct armcp_sensor *sensors_arr); diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c index 8fbca2c0f44b..f958568f7996 100644 --- a/drivers/misc/habanalabs/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/habanalabs_ioctl.c @@ -197,6 +197,29 @@ out: return rc; } +static int device_utilization(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_device_utilization device_util = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + if ((args->period_ms < 100) || (args->period_ms > 1000) || + (args->period_ms % 100)) { + dev_err(hdev->dev, + "period %u must be between 100 - 1000 and must be divisible by 100\n", + args->period_ms); + return -EINVAL; + } + + device_util.utilization = hl_device_utilization(hdev, args->period_ms); + + return copy_to_user(out, &device_util, + min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0; +} + static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, struct device *dev) { @@ -239,6 +262,10 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, rc = hw_idle(hdev, args); break; + case HL_INFO_DEVICE_UTILIZATION: + rc = device_utilization(hdev, args); + break; + default: dev_err(dev, "Invalid request %d\n", args->op); rc = -ENOTTY; diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c index 696cf7d206c6..55b383b2a116 100644 --- a/drivers/misc/habanalabs/hw_queue.c +++ b/drivers/misc/habanalabs/hw_queue.c @@ -364,7 +364,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs) spin_unlock(&hdev->hw_queues_mirror_lock); } - atomic_inc(&hdev->cs_active_cnt); + if (!hdev->cs_active_cnt++) { + struct hl_device_idle_busy_ts *ts; + + ts = &hdev->idle_busy_ts_arr[hdev->idle_busy_ts_idx]; + ts->busy_to_idle_ts = ktime_set(0, 0); + ts->idle_to_busy_ts = ktime_get(); + } list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) if (job->ext_queue) diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/misc/habanalabs/include/goya/goya.h index 3f02a52ba4ce..43d241891e45 100644 --- a/drivers/misc/habanalabs/include/goya/goya.h +++ b/drivers/misc/habanalabs/include/goya/goya.h @@ -38,4 +38,6 @@ #define TPC_MAX_NUM 8 +#define MME_MAX_NUM 1 + #endif /* GOYA_H */ diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 266bf85056d4..73ee212d7fa6 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -77,22 +77,29 @@ enum hl_device_status { /* Opcode for management ioctl * - * HW_IP_INFO - Receive information about different IP blocks in the - * device. - * HL_INFO_HW_EVENTS - Receive an array describing how many times each event - * occurred since the last hard reset. - * HL_INFO_DRAM_USAGE - Retrieve the dram usage inside the device and of the - * specific context. This is relevant only for GOYA device. - * HL_INFO_HW_IDLE - Retrieve information about the idle status of each - * internal engine. + * HW_IP_INFO - Receive information about different IP blocks in the + * device. + * HL_INFO_HW_EVENTS - Receive an array describing how many times each event + * occurred since the last hard reset. + * HL_INFO_DRAM_USAGE - Retrieve the dram usage inside the device and of the + * specific context. This is relevant only for devices + * where the dram is managed by the kernel driver + * HL_INFO_HW_IDLE - Retrieve information about the idle status of each + * internal engine. * HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't * require an open context. + * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device + * over the last period specified by the user. + * The period can be between 100ms to 1s, in + * resolution of 100ms. The return value is a + * percentage of the utilization rate. */ -#define HL_INFO_HW_IP_INFO 0 -#define HL_INFO_HW_EVENTS 1 -#define HL_INFO_DRAM_USAGE 2 -#define HL_INFO_HW_IDLE 3 -#define HL_INFO_DEVICE_STATUS 4 +#define HL_INFO_HW_IP_INFO 0 +#define HL_INFO_HW_EVENTS 1 +#define HL_INFO_DRAM_USAGE 2 +#define HL_INFO_HW_IDLE 3 +#define HL_INFO_DEVICE_STATUS 4 +#define HL_INFO_DEVICE_UTILIZATION 6 #define HL_INFO_VERSION_MAX_LEN 128 @@ -134,6 +141,11 @@ struct hl_info_device_status { __u32 pad; }; +struct hl_info_device_utilization { + __u32 utilization; + __u32 pad; +}; + struct hl_info_args { /* Location of relevant struct in userspace */ __u64 return_pointer; @@ -149,8 +161,15 @@ struct hl_info_args { /* HL_INFO_* */ __u32 op; - /* Context ID - Currently not in use */ - __u32 ctx_id; + union { + /* Context ID - Currently not in use */ + __u32 ctx_id; + /* Period value for utilization rate (100ms - 1000ms, in 100ms + * resolution. + */ + __u32 period_ms; + }; + __u32 pad; }; -- cgit v1.2.3 From e9730763a21a5441d46511f124d703d76a5ef6e6 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Wed, 28 Aug 2019 21:51:52 +0300 Subject: habanalabs: add uapi to retrieve aggregate H/W events Add a new opcode to INFO IOCTL to retrieve aggregate H/W events. i.e. the events counters are NOT cleared upon device reset, but count from the loading of the driver. Add the code to support it in the device event handling function. Signed-off-by: Oded Gabbay Reviewed-by: Omer Shpigelman --- drivers/misc/habanalabs/goya/goya.c | 9 +++++++-- drivers/misc/habanalabs/goya/goyaP.h | 3 ++- drivers/misc/habanalabs/habanalabs.h | 3 ++- drivers/misc/habanalabs/habanalabs_ioctl.c | 11 ++++++++--- include/uapi/misc/habanalabs.h | 3 +++ 5 files changed, 22 insertions(+), 7 deletions(-) (limited to 'include/uapi') diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 0dd0b4429fee..1267ec75b19f 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -4469,6 +4469,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) struct goya_device *goya = hdev->asic_specific; goya->events_stat[event_type]++; + goya->events_stat_aggregate[event_type]++; switch (event_type) { case GOYA_ASYNC_EVENT_ID_PCIE_IF: @@ -4550,12 +4551,16 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) } } -void *goya_get_events_stat(struct hl_device *hdev, u32 *size) +void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size) { struct goya_device *goya = hdev->asic_specific; - *size = (u32) sizeof(goya->events_stat); + if (aggregate) { + *size = (u32) sizeof(goya->events_stat_aggregate); + return goya->events_stat_aggregate; + } + *size = (u32) sizeof(goya->events_stat); return goya->events_stat; } diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h index d7f48c9c41cd..f830cfd5c04d 100644 --- a/drivers/misc/habanalabs/goya/goyaP.h +++ b/drivers/misc/habanalabs/goya/goyaP.h @@ -162,6 +162,7 @@ struct goya_device { u64 ddr_bar_cur_addr; u32 events_stat[GOYA_ASYNC_EVENT_ID_SIZE]; + u32 events_stat_aggregate[GOYA_ASYNC_EVENT_ID_SIZE]; u32 hw_cap_initialized; u8 device_cpu_mmu_mappings_done; }; @@ -215,7 +216,7 @@ int goya_suspend(struct hl_device *hdev); int goya_resume(struct hl_device *hdev); void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry); -void *goya_get_events_stat(struct hl_device *hdev, u32 *size); +void *goya_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size); void goya_add_end_of_cb_packets(struct hl_device *hdev, u64 kernel_address, u32 len, u64 cq_addr, u32 cq_val, u32 msix_vec); diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index 23b86b7f9732..aa7aaa710f12 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -558,7 +558,8 @@ struct hl_asic_funcs { struct hl_eq_entry *eq_entry); void (*set_pll_profile)(struct hl_device *hdev, enum hl_pll_frequency freq); - void* (*get_events_stat)(struct hl_device *hdev, u32 *size); + void* (*get_events_stat)(struct hl_device *hdev, bool aggregate, + u32 *size); u64 (*read_pte)(struct hl_device *hdev, u64 addr); void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val); void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard); diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c index f958568f7996..66d9c710073c 100644 --- a/drivers/misc/habanalabs/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/habanalabs_ioctl.c @@ -75,7 +75,8 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0; } -static int hw_events_info(struct hl_device *hdev, struct hl_info_args *args) +static int hw_events_info(struct hl_device *hdev, bool aggregate, + struct hl_info_args *args) { u32 size, max_size = args->return_size; void __user *out = (void __user *) (uintptr_t) args->return_pointer; @@ -84,7 +85,7 @@ static int hw_events_info(struct hl_device *hdev, struct hl_info_args *args) if ((!max_size) || (!out)) return -EINVAL; - arr = hdev->asic_funcs->get_events_stat(hdev, &size); + arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size); return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0; } @@ -251,7 +252,7 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, switch (args->op) { case HL_INFO_HW_EVENTS: - rc = hw_events_info(hdev, args); + rc = hw_events_info(hdev, false, args); break; case HL_INFO_DRAM_USAGE: @@ -266,6 +267,10 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, rc = device_utilization(hdev, args); break; + case HL_INFO_HW_EVENTS_AGGREGATE: + rc = hw_events_info(hdev, true, args); + break; + default: dev_err(dev, "Invalid request %d\n", args->op); rc = -ENOTTY; diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 73ee212d7fa6..19f8039db2ea 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -93,6 +93,8 @@ enum hl_device_status { * The period can be between 100ms to 1s, in * resolution of 100ms. The return value is a * percentage of the utilization rate. + * HL_INFO_HW_EVENTS_AGGREGATE - Receive an array describing how many times each + * event occurred since the driver was loaded. */ #define HL_INFO_HW_IP_INFO 0 #define HL_INFO_HW_EVENTS 1 @@ -100,6 +102,7 @@ enum hl_device_status { #define HL_INFO_HW_IDLE 3 #define HL_INFO_DEVICE_STATUS 4 #define HL_INFO_DEVICE_UTILIZATION 6 +#define HL_INFO_HW_EVENTS_AGGREGATE 7 #define HL_INFO_VERSION_MAX_LEN 128 -- cgit v1.2.3 From 4c172bbfaa4e1aa26dab58781301902c7b3e4ebc Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Fri, 30 Aug 2019 16:59:33 +0300 Subject: habanalabs: stop using the acronym KMD We want to stop using the acronym KMD. Therefore, replace all locations (except for register names we can't modify) where KMD is written to other terms such as "Linux kernel driver" or "Host kernel driver", etc. Signed-off-by: Oded Gabbay Reviewed-by: Omer Shpigelman --- drivers/misc/habanalabs/asid.c | 2 +- drivers/misc/habanalabs/command_buffer.c | 3 +- drivers/misc/habanalabs/command_submission.c | 5 +- drivers/misc/habanalabs/context.c | 2 +- drivers/misc/habanalabs/goya/goya.c | 22 ++++----- drivers/misc/habanalabs/goya/goyaP.h | 14 +++--- drivers/misc/habanalabs/habanalabs.h | 32 ++++++------- drivers/misc/habanalabs/include/armcp_if.h | 70 ++++++++++++++-------------- include/uapi/misc/habanalabs.h | 22 ++++----- 9 files changed, 88 insertions(+), 84 deletions(-) (limited to 'include/uapi') diff --git a/drivers/misc/habanalabs/asid.c b/drivers/misc/habanalabs/asid.c index 2c01461701a3..a2fdf31cf27c 100644 --- a/drivers/misc/habanalabs/asid.c +++ b/drivers/misc/habanalabs/asid.c @@ -18,7 +18,7 @@ int hl_asid_init(struct hl_device *hdev) mutex_init(&hdev->asid_mutex); - /* ASID 0 is reserved for KMD and device CPU */ + /* ASID 0 is reserved for the kernel driver and device CPU */ set_bit(0, hdev->asid_bitmap); return 0; diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/command_buffer.c index e495f44064fa..53fddbd8e693 100644 --- a/drivers/misc/habanalabs/command_buffer.c +++ b/drivers/misc/habanalabs/command_buffer.c @@ -397,7 +397,8 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size) rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle, HL_KERNEL_ASID_ID); if (rc) { - dev_err(hdev->dev, "Failed to allocate CB for KMD %d\n", rc); + dev_err(hdev->dev, + "Failed to allocate CB for the kernel driver %d\n", rc); return NULL; } diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c index 4777ec4c2b55..a9ac045dcfde 100644 --- a/drivers/misc/habanalabs/command_submission.c +++ b/drivers/misc/habanalabs/command_submission.c @@ -409,8 +409,9 @@ static struct hl_cb *validate_queue_index(struct hl_device *hdev, return NULL; } - if (hw_queue_prop->kmd_only) { - dev_err(hdev->dev, "Queue index %d is restricted for KMD\n", + if (hw_queue_prop->driver_only) { + dev_err(hdev->dev, + "Queue index %d is restricted for the kernel driver\n", chunk->queue_index); return NULL; } else if (hw_queue_prop->type == QUEUE_TYPE_INT) { diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c index bc0dec57a983..17db7b3dfb4c 100644 --- a/drivers/misc/habanalabs/context.c +++ b/drivers/misc/habanalabs/context.c @@ -128,7 +128,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) ctx->thread_ctx_switch_wait_token = 0; if (is_kernel_ctx) { - ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */ + ctx->asid = HL_KERNEL_ASID_ID; /* Kernel driver gets ASID 0 */ rc = hl_mmu_ctx_init(ctx); if (rc) { dev_err(hdev->dev, "Failed to init mmu ctx module\n"); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index c88c2fea97b9..6fba14b81f90 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -42,8 +42,8 @@ * PQ, CQ and CP are not secured. * PQ, CB and the data are on the SRAM/DRAM. * - * Since QMAN DMA is secured, KMD is parsing the DMA CB: - * - KMD checks DMA pointer + * Since QMAN DMA is secured, the driver is parsing the DMA CB: + * - checks DMA pointer * - WREG, MSG_PROT are not allowed. * - MSG_LONG/SHORT are allowed. * @@ -56,15 +56,15 @@ * QMAN DMA: PQ, CQ and CP are secured. * MMU is set to bypass on the Secure props register of the QMAN. * The reasons we don't enable MMU for PQ, CQ and CP are: - * - PQ entry is in kernel address space and KMD doesn't map it. + * - PQ entry is in kernel address space and the driver doesn't map it. * - CP writes to MSIX register and to kernel address space (completion * queue). * - * DMA is not secured but because CP is secured, KMD still needs to parse the - * CB, but doesn't need to check the DMA addresses. + * DMA is not secured but because CP is secured, the driver still needs to parse + * the CB, but doesn't need to check the DMA addresses. * - * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD - * doesn't map memory in MMU. + * For QMAN DMA 0, DMA is also secured because only the driver uses this DMA and + * the driver doesn't map memory in MMU. * * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode) * @@ -336,18 +336,18 @@ void goya_get_fixed_properties(struct hl_device *hdev) for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) { prop->hw_queues_props[i].type = QUEUE_TYPE_EXT; - prop->hw_queues_props[i].kmd_only = 0; + prop->hw_queues_props[i].driver_only = 0; } for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) { prop->hw_queues_props[i].type = QUEUE_TYPE_CPU; - prop->hw_queues_props[i].kmd_only = 1; + prop->hw_queues_props[i].driver_only = 1; } for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES + NUMBER_OF_INT_HW_QUEUES; i++) { prop->hw_queues_props[i].type = QUEUE_TYPE_INT; - prop->hw_queues_props[i].kmd_only = 0; + prop->hw_queues_props[i].driver_only = 0; } for (; i < HL_MAX_QUEUES; i++) @@ -2853,7 +2853,7 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job) if (!hdev->asic_funcs->is_device_idle(hdev, NULL, NULL)) { dev_err_ratelimited(hdev->dev, - "Can't send KMD job on QMAN0 because the device is not idle\n"); + "Can't send driver job on QMAN0 because the device is not idle\n"); return -EBUSY; } diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h index 06da71e8d7ea..89b6574f8e4f 100644 --- a/drivers/misc/habanalabs/goya/goyaP.h +++ b/drivers/misc/habanalabs/goya/goyaP.h @@ -70,19 +70,19 @@ MMU_PAGE_TABLES_SIZE) #define MMU_CACHE_MNG_ADDR (MMU_DRAM_DEFAULT_PAGE_ADDR + \ MMU_DRAM_DEFAULT_PAGE_SIZE) -#define DRAM_KMD_END_ADDR (MMU_CACHE_MNG_ADDR + \ +#define DRAM_DRIVER_END_ADDR (MMU_CACHE_MNG_ADDR + \ MMU_CACHE_MNG_SIZE) #define DRAM_BASE_ADDR_USER 0x20000000 -#if (DRAM_KMD_END_ADDR > DRAM_BASE_ADDR_USER) -#error "KMD must reserve no more than 512MB" +#if (DRAM_DRIVER_END_ADDR > DRAM_BASE_ADDR_USER) +#error "Driver must reserve no more than 512MB" #endif /* - * SRAM Memory Map for KMD + * SRAM Memory Map for Driver * - * KMD occupies KMD_SRAM_SIZE bytes from the start of SRAM. It is used for + * Driver occupies DRIVER_SRAM_SIZE bytes from the start of SRAM. It is used for * MME/TPC QMANs * */ @@ -108,10 +108,10 @@ #define TPC7_QMAN_BASE_OFFSET (TPC6_QMAN_BASE_OFFSET + \ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) -#define SRAM_KMD_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \ +#define SRAM_DRIVER_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \ (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) -#if (SRAM_KMD_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START) +#if (SRAM_DRIVER_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START) #error "MME/TPC QMANs SRAM space exceeds limit" #endif diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index aa7aaa710f12..c39e07d665c4 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -96,12 +96,12 @@ enum hl_queue_type { /** * struct hw_queue_properties - queue information. * @type: queue type. - * @kmd_only: true if only KMD is allowed to send a job to this queue, false - * otherwise. + * @driver_only: true if only the driver is allowed to send a job to this queue, + * false otherwise. */ struct hw_queue_properties { enum hl_queue_type type; - u8 kmd_only; + u8 driver_only; }; /** @@ -324,7 +324,7 @@ struct hl_cs_job; #define HL_EQ_LENGTH 64 #define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE) -/* KMD <-> ArmCP shared memory size */ +/* Host <-> ArmCP shared memory size */ #define HL_CPU_ACCESSIBLE_MEM_SIZE SZ_2M /** @@ -405,7 +405,7 @@ struct hl_cs_parser; /** * enum hl_pm_mng_profile - power management profile. - * @PM_AUTO: internal clock is set by KMD. + * @PM_AUTO: internal clock is set by the Linux driver. * @PM_MANUAL: internal clock is set by the user. * @PM_LAST: last power management type. */ @@ -613,7 +613,7 @@ struct hl_va_range { * descriptor (hl_vm_phys_pg_list or hl_userptr). * @mmu_phys_hash: holds a mapping from physical address to pgt_info structure. * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure. - * @hpriv: pointer to the private (KMD) data of the process (fd). + * @hpriv: pointer to the private (Kernel Driver) data of the process (fd). * @hdev: pointer to the device structure. * @refcount: reference counter for the context. Context is released only when * this hits 0l. It is incremented on CS and CS_WAIT. @@ -1185,19 +1185,19 @@ struct hl_device_idle_busy_ts { * @completion_queue: array of hl_cq. * @cq_wq: work queue of completion queues for executing work in process context * @eq_wq: work queue of event queue for executing work in process context. - * @kernel_ctx: KMD context structure. + * @kernel_ctx: Kernel driver context structure. * @kernel_queues: array of hl_hw_queue. * @hw_queues_mirror_list: CS mirror list for TDR. * @hw_queues_mirror_lock: protects hw_queues_mirror_list. * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs. * @event_queue: event queue for IRQ from ArmCP. * @dma_pool: DMA pool for small allocations. - * @cpu_accessible_dma_mem: KMD <-> ArmCP shared memory CPU address. - * @cpu_accessible_dma_address: KMD <-> ArmCP shared memory DMA address. - * @cpu_accessible_dma_pool: KMD <-> ArmCP shared memory pool. + * @cpu_accessible_dma_mem: Host <-> ArmCP shared memory CPU address. + * @cpu_accessible_dma_address: Host <-> ArmCP shared memory DMA address. + * @cpu_accessible_dma_pool: Host <-> ArmCP shared memory pool. * @asid_bitmap: holds used/available ASIDs. * @asid_mutex: protects asid_bitmap. - * @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue. + * @send_cpu_message_lock: enforces only one message in Host <-> ArmCP queue. * @debug_lock: protects critical section of setting debug mode for device * @asic_prop: ASIC specific immutable properties. * @asic_funcs: ASIC specific functions. @@ -1221,16 +1221,16 @@ struct hl_device_idle_busy_ts { * @dram_used_mem: current DRAM memory consumption. * @timeout_jiffies: device CS timeout value. * @max_power: the max power of the device, as configured by the sysadmin. This - * value is saved so in case of hard-reset, KMD will restore this - * value and update the F/W after the re-initialization + * value is saved so in case of hard-reset, the driver will restore + * this value and update the F/W after the re-initialization * @in_reset: is device in reset flow. * @curr_pll_profile: current PLL profile. * @cs_active_cnt: number of active command submissions on this device (active * means already in H/W queues) - * @major: habanalabs KMD major. + * @major: habanalabs kernel driver major. * @high_pll: high PLL profile frequency. - * @soft_reset_cnt: number of soft reset since KMD loading. - * @hard_reset_cnt: number of hard reset since KMD loading. + * @soft_reset_cnt: number of soft reset since the driver was loaded. + * @hard_reset_cnt: number of hard reset since the driver was loaded. * @idle_busy_ts_idx: index of current entry in idle_busy_ts_arr * @id: device minor. * @id_control: minor of the control device diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h index 5565bce60bc9..e4c6699a1868 100644 --- a/drivers/misc/habanalabs/include/armcp_if.h +++ b/drivers/misc/habanalabs/include/armcp_if.h @@ -41,33 +41,34 @@ enum pq_init_status { /* * ArmCP Primary Queue Packets * - * During normal operation, KMD needs to send various messages to ArmCP, - * usually either to SET some value into a H/W periphery or to GET the current - * value of some H/W periphery. For example, SET the frequency of MME/TPC and - * GET the value of the thermal sensor. - * - * These messages can be initiated either by the User application or by KMD - * itself, e.g. power management code. In either case, the communication from - * KMD to ArmCP will *always* be in synchronous mode, meaning that KMD will - * send a single message and poll until the message was acknowledged and the - * results are ready (if results are needed). - * - * This means that only a single message can be sent at a time and KMD must - * wait for its result before sending the next message. Having said that, - * because these are control messages which are sent in a relatively low + * During normal operation, the host's kernel driver needs to send various + * messages to ArmCP, usually either to SET some value into a H/W periphery or + * to GET the current value of some H/W periphery. For example, SET the + * frequency of MME/TPC and GET the value of the thermal sensor. + * + * These messages can be initiated either by the User application or by the + * host's driver itself, e.g. power management code. In either case, the + * communication from the host's driver to ArmCP will *always* be in + * synchronous mode, meaning that the host will send a single message and poll + * until the message was acknowledged and the results are ready (if results are + * needed). + * + * This means that only a single message can be sent at a time and the host's + * driver must wait for its result before sending the next message. Having said + * that, because these are control messages which are sent in a relatively low * frequency, this limitation seems acceptable. It's important to note that * in case of multiple devices, messages to different devices *can* be sent * at the same time. * * The message, inputs/outputs (if relevant) and fence object will be located - * on the device DDR at an address that will be determined by KMD. During - * device initialization phase, KMD will pass to ArmCP that address. Most of - * the message types will contain inputs/outputs inside the message itself. - * The common part of each message will contain the opcode of the message (its - * type) and a field representing a fence object. - * - * When KMD wishes to send a message to ArmCP, it will write the message - * contents to the device DDR, clear the fence object and then write the + * on the device DDR at an address that will be determined by the host's driver. + * During device initialization phase, the host will pass to ArmCP that address. + * Most of the message types will contain inputs/outputs inside the message + * itself. The common part of each message will contain the opcode of the + * message (its type) and a field representing a fence object. + * + * When the host's driver wishes to send a message to ArmCP, it will write the + * message contents to the device DDR, clear the fence object and then write the * value 484 to the mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR register to issue * the 484 interrupt-id to the ARM core. * @@ -78,12 +79,13 @@ enum pq_init_status { * device DDR and then write to the fence object. If an error occurred, ArmCP * will fill the rc field with the right error code. * - * In the meantime, KMD will poll on the fence object. Once KMD sees that the - * fence object is signaled, it will read the results from the device DDR - * (if relevant) and resume the code execution in KMD. + * In the meantime, the host's driver will poll on the fence object. Once the + * host sees that the fence object is signaled, it will read the results from + * the device DDR (if relevant) and resume the code execution in the host's + * driver. * * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8 - * so the value being put by the KMD matches the value read by ArmCP + * so the value being put by the host's driver matches the value read by ArmCP * * Non-QMAN packets should be limited to values 1 through (2^8 - 1) * @@ -148,9 +150,9 @@ enum pq_init_status { * * ARMCP_PACKET_INFO_GET - * Fetch information from the device as specified in the packet's - * structure. KMD passes the max size it allows the ArmCP to write to - * the structure, to prevent data corruption in case of mismatched - * KMD/FW versions. + * structure. The host's driver passes the max size it allows the ArmCP to + * write to the structure, to prevent data corruption in case of + * mismatched driver/FW versions. * * ARMCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed * @@ -183,9 +185,9 @@ enum pq_init_status { * ARMCP_PACKET_EEPROM_DATA_GET - * Get EEPROM data from the ArmCP kernel. The buffer is specified in the * addr field. The CPU will put the returned data size in the result - * field. In addition, KMD passes the max size it allows the ArmCP to - * write to the structure, to prevent data corruption in case of - * mismatched KMD/FW versions. + * field. In addition, the host's driver passes the max size it allows the + * ArmCP to write to the structure, to prevent data corruption in case of + * mismatched driver/FW versions. * */ @@ -231,7 +233,7 @@ struct armcp_packet { __le32 ctl; - __le32 fence; /* Signal to KMD that message is completed */ + __le32 fence; /* Signal to host that message is completed */ union { struct {/* For temperature/current/voltage/fan/pwm get/set */ @@ -320,7 +322,7 @@ struct armcp_sensor { }; /** - * struct armcp_info - host driver's necessary info from ArmCP. + * struct armcp_info - Info from ArmCP that is necessary to the host's driver * @sensors: available sensors description. * @kernel_version: ArmCP linux kernel version. * @reserved: reserved field. diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 19f8039db2ea..39c4ea51a719 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note * - * Copyright 2016-2018 HabanaLabs, Ltd. + * Copyright 2016-2019 HabanaLabs, Ltd. * All Rights Reserved. * */ @@ -329,12 +329,12 @@ struct hl_mem_in { struct { /* * Requested virtual address of mapped memory. - * KMD will try to map the requested region to this - * hint address, as long as the address is valid and - * not already mapped. The user should check the + * The driver will try to map the requested region to + * this hint address, as long as the address is valid + * and not already mapped. The user should check the * returned address of the IOCTL to make sure he got - * the hint address. Passing 0 here means that KMD - * will choose the address itself. + * the hint address. Passing 0 here means that the + * driver will choose the address itself. */ __u64 hint_addr; /* Handle returned from HL_MEM_OP_ALLOC */ @@ -347,12 +347,12 @@ struct hl_mem_in { __u64 host_virt_addr; /* * Requested virtual address of mapped memory. - * KMD will try to map the requested region to this - * hint address, as long as the address is valid and - * not already mapped. The user should check the + * The driver will try to map the requested region to + * this hint address, as long as the address is valid + * and not already mapped. The user should check the * returned address of the IOCTL to make sure he got - * the hint address. Passing 0 here means that KMD - * will choose the address itself. + * the hint address. Passing 0 here means that the + * driver will choose the address itself. */ __u64 hint_addr; /* Size of allocated host memory */ -- cgit v1.2.3 From 106feb2fdced0c240ca8ecb3d5db91b2b64e9a48 Mon Sep 17 00:00:00 2001 From: Denis Efremov Date: Tue, 3 Sep 2019 14:10:20 +0300 Subject: PCI: pciehp: Remove pciehp_set_attention_status() Remove pciehp_set_attention_status() and use pciehp_set_indicators() instead, since the code is mostly the same. Link: https://lore.kernel.org/r/20190903111021.1559-4-efremov@linux.com Signed-off-by: Denis Efremov Signed-off-by: Bjorn Helgaas Reviewed-by: Kuppuswamy Sathyanarayanan --- drivers/pci/hotplug/pciehp.h | 1 - drivers/pci/hotplug/pciehp_core.c | 9 +++++++-- drivers/pci/hotplug/pciehp_hpc.c | 25 ------------------------- include/uapi/linux/pci_regs.h | 1 + 4 files changed, 8 insertions(+), 28 deletions(-) (limited to 'include/uapi') diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 2ca0f35a6a23..1f69f43a3f29 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -170,7 +170,6 @@ void pciehp_get_power_status(struct controller *ctrl, u8 *status); #define INDICATOR_NOOP -1 /* Leave indicator unchanged */ void pciehp_set_indicators(struct controller *ctrl, int pwr, int attn); -void pciehp_set_attention_status(struct controller *ctrl, u8 status); void pciehp_get_latch_status(struct controller *ctrl, u8 *status); int pciehp_query_power_fault(struct controller *ctrl); void pciehp_green_led_on(struct controller *ctrl); diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 6ad0d86762cb..b3122c151b80 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -95,15 +95,20 @@ static void cleanup_slot(struct controller *ctrl) } /* - * set_attention_status - Turns the Amber LED for a slot on, off or blink + * set_attention_status - Turns the Attention Indicator on, off or blinking */ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl->pcie->port; + if (status) + status <<= PCI_EXP_SLTCTL_ATTN_IND_SHIFT; + else + status = PCI_EXP_SLTCTL_ATTN_IND_OFF; + pci_config_pm_runtime_get(pdev); - pciehp_set_attention_status(ctrl, status); + pciehp_set_indicators(ctrl, INDICATOR_NOOP, status); pci_config_pm_runtime_put(pdev); return 0; } diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index a900bfd22447..6527345ccd8e 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -418,31 +418,6 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot, return 0; } -void pciehp_set_attention_status(struct controller *ctrl, u8 value) -{ - u16 slot_cmd; - - if (!ATTN_LED(ctrl)) - return; - - switch (value) { - case 0: /* turn off */ - slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_OFF; - break; - case 1: /* turn on */ - slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_ON; - break; - case 2: /* turn blink */ - slot_cmd = PCI_EXP_SLTCTL_ATTN_IND_BLINK; - break; - default: - return; - } - pcie_write_cmd_nowait(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC); - ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__, - pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd); -} - /** * pciehp_set_indicators() - set attention indicator, power indicator, or both * @ctrl: PCIe hotplug controller diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index f28e562d7ca8..de3e58afc564 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -591,6 +591,7 @@ #define PCI_EXP_SLTCTL_CCIE 0x0010 /* Command Completed Interrupt Enable */ #define PCI_EXP_SLTCTL_HPIE 0x0020 /* Hot-Plug Interrupt Enable */ #define PCI_EXP_SLTCTL_AIC 0x00c0 /* Attention Indicator Control */ +#define PCI_EXP_SLTCTL_ATTN_IND_SHIFT 6 /* Attention Indicator shift */ #define PCI_EXP_SLTCTL_ATTN_IND_ON 0x0040 /* Attention Indicator on */ #define PCI_EXP_SLTCTL_ATTN_IND_BLINK 0x0080 /* Attention Indicator blinking */ #define PCI_EXP_SLTCTL_ATTN_IND_OFF 0x00c0 /* Attention Indicator off */ -- cgit v1.2.3 From 95a7233c452a58a4c2310c456c73997853b2ec46 Mon Sep 17 00:00:00 2001 From: Paul Blakey Date: Wed, 4 Sep 2019 16:56:37 +0300 Subject: net: openvswitch: Set OvS recirc_id from tc chain index Offloaded OvS datapath rules are translated one to one to tc rules, for example the following simplified OvS rule: recirc_id(0),in_port(dev1),eth_type(0x0800),ct_state(-trk) actions:ct(),recirc(2) Will be translated to the following tc rule: $ tc filter add dev dev1 ingress \ prio 1 chain 0 proto ip \ flower tcp ct_state -trk \ action ct pipe \ action goto chain 2 Received packets will first travel though tc, and if they aren't stolen by it, like in the above rule, they will continue to OvS datapath. Since we already did some actions (action ct in this case) which might modify the packets, and updated action stats, we would like to continue the proccessing with the correct recirc_id in OvS (here recirc_id(2)) where we left off. To support this, introduce a new skb extension for tc, which will be used for translating tc chain to ovs recirc_id to handle these miss cases. Last tc chain index will be set by tc goto chain action and read by OvS datapath. Signed-off-by: Paul Blakey Signed-off-by: Vlad Buslov Acked-by: Jiri Pirko Acked-by: Pravin B Shelar Signed-off-by: David S. Miller --- include/linux/skbuff.h | 13 +++++++++++++ include/uapi/linux/openvswitch.h | 3 +++ net/core/skbuff.c | 6 ++++++ net/openvswitch/datapath.c | 38 +++++++++++++++++++++++++++++++++----- net/openvswitch/datapath.h | 2 ++ net/openvswitch/flow.c | 13 +++++++++++++ net/sched/Kconfig | 13 +++++++++++++ net/sched/cls_api.c | 12 ++++++++++++ 8 files changed, 95 insertions(+), 5 deletions(-) (limited to 'include/uapi') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 77c6dc88e95d..028e684fa974 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -279,6 +279,16 @@ struct nf_bridge_info { }; #endif +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) +/* Chain in tc_skb_ext will be used to share the tc chain with + * ovs recirc_id. It will be set to the current chain by tc + * and read by ovs to recirc_id. + */ +struct tc_skb_ext { + __u32 chain; +}; +#endif + struct sk_buff_head { /* These two members must be first. */ struct sk_buff *next; @@ -4057,6 +4067,9 @@ enum skb_ext_id { #endif #ifdef CONFIG_XFRM SKB_EXT_SEC_PATH, +#endif +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + TC_SKB_EXT, #endif SKB_EXT_NUM, /* must be last */ }; diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index f271f1ec50ae..1887a451c388 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -123,6 +123,9 @@ struct ovs_vport_stats { /* Allow datapath to associate multiple Netlink PIDs to each vport */ #define OVS_DP_F_VPORT_PIDS (1 << 1) +/* Allow tc offload recirc sharing */ +#define OVS_DP_F_TC_RECIRC_SHARING (1 << 2) + /* Fixed logical ports. */ #define OVSP_LOCAL ((__u32)0) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ea8e8d332d85..2b40b5a9425b 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -4087,6 +4087,9 @@ static const u8 skb_ext_type_len[] = { #ifdef CONFIG_XFRM [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path), #endif +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext), +#endif }; static __always_inline unsigned int skb_ext_total_length(void) @@ -4097,6 +4100,9 @@ static __always_inline unsigned int skb_ext_total_length(void) #endif #ifdef CONFIG_XFRM skb_ext_type_len[SKB_EXT_SEC_PATH] + +#endif +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + skb_ext_type_len[TC_SKB_EXT] + #endif 0; } diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 65122bbccd27..dde9d762edee 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1545,10 +1545,34 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *in dp->user_features = 0; } -static void ovs_dp_change(struct datapath *dp, struct nlattr *a[]) +DEFINE_STATIC_KEY_FALSE(tc_recirc_sharing_support); + +static int ovs_dp_change(struct datapath *dp, struct nlattr *a[]) { - if (a[OVS_DP_ATTR_USER_FEATURES]) - dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]); + u32 user_features = 0; + + if (a[OVS_DP_ATTR_USER_FEATURES]) { + user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]); + + if (user_features & ~(OVS_DP_F_VPORT_PIDS | + OVS_DP_F_UNALIGNED | + OVS_DP_F_TC_RECIRC_SHARING)) + return -EOPNOTSUPP; + +#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + if (user_features & OVS_DP_F_TC_RECIRC_SHARING) + return -EOPNOTSUPP; +#endif + } + + dp->user_features = user_features; + + if (dp->user_features & OVS_DP_F_TC_RECIRC_SHARING) + static_branch_enable(&tc_recirc_sharing_support); + else + static_branch_disable(&tc_recirc_sharing_support); + + return 0; } static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) @@ -1610,7 +1634,9 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) parms.port_no = OVSP_LOCAL; parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID]; - ovs_dp_change(dp, a); + err = ovs_dp_change(dp, a); + if (err) + goto err_destroy_meters; /* So far only local changes have been made, now need the lock. */ ovs_lock(); @@ -1736,7 +1762,9 @@ static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info) if (IS_ERR(dp)) goto err_unlock_free; - ovs_dp_change(dp, info->attrs); + err = ovs_dp_change(dp, info->attrs); + if (err) + goto err_unlock_free; err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, info->snd_seq, 0, OVS_DP_CMD_SET); diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 751d34accdf9..81e85dde8217 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -218,6 +218,8 @@ static inline struct datapath *get_dp(struct net *net, int dp_ifindex) extern struct notifier_block ovs_dp_device_notifier; extern struct genl_family dp_vport_genl_family; +DECLARE_STATIC_KEY_FALSE(tc_recirc_sharing_support); + void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key); void ovs_dp_detach_port(struct vport *); int ovs_dp_upcall(struct datapath *, struct sk_buff *, diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 9d81d2c7bf82..38147e6a20f5 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -842,6 +842,9 @@ static int key_extract_mac_proto(struct sk_buff *skb) int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, struct sk_buff *skb, struct sw_flow_key *key) { +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + struct tc_skb_ext *tc_ext; +#endif int res, err; /* Extract metadata from packet. */ @@ -874,7 +877,17 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, if (res < 0) return res; key->mac_proto = res; + +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + if (static_branch_unlikely(&tc_recirc_sharing_support)) { + tc_ext = skb_ext_find(skb, TC_SKB_EXT); + key->recirc_id = tc_ext ? tc_ext->chain : 0; + } else { + key->recirc_id = 0; + } +#else key->recirc_id = 0; +#endif err = key_extract(skb, key); if (!err) diff --git a/net/sched/Kconfig b/net/sched/Kconfig index afd2ba157a13..b3faafeafab9 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -963,6 +963,19 @@ config NET_IFE_SKBTCINDEX tristate "Support to encoding decoding skb tcindex on IFE action" depends on NET_ACT_IFE +config NET_TC_SKB_EXT + bool "TC recirculation support" + depends on NET_CLS_ACT + default y if NET_CLS_ACT + select SKB_EXTENSIONS + + help + Say Y here to allow tc chain misses to continue in OvS datapath in + the correct recirc_id, and hardware chain misses to continue in + the correct chain in tc software datapath. + + Say N here if you won't be using tc<->ovs offload or tc chains offload. + endif # NET_SCHED config NET_SCH_FIFO diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 671ca905dbb5..05c4fe1c3ca2 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1514,6 +1514,18 @@ reclassify: goto reset; } else if (unlikely(TC_ACT_EXT_CMP(err, TC_ACT_GOTO_CHAIN))) { first_tp = res->goto_tp; + +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) + { + struct tc_skb_ext *ext; + + ext = skb_ext_add(skb, TC_SKB_EXT); + if (WARN_ON_ONCE(!ext)) + return TC_ACT_SHOT; + + ext->chain = err & TC_ACT_EXT_VAL_MASK; + } +#endif goto reset; } #endif -- cgit v1.2.3 From d1967e495a8d1dd6b8ff2df9d3e045c5d60a37a6 Mon Sep 17 00:00:00 2001 From: David Dai Date: Wed, 4 Sep 2019 10:03:43 -0500 Subject: net_sched: act_police: add 2 new attributes to support police 64bit rate and peakrate For high speed adapter like Mellanox CX-5 card, it can reach upto 100 Gbits per second bandwidth. Currently htb already supports 64bit rate in tc utility. However police action rate and peakrate are still limited to 32bit value (upto 32 Gbits per second). Add 2 new attributes TCA_POLICE_RATE64 and TCA_POLICE_RATE64 in kernel for 64bit support so that tc utility can use them for 64bit rate and peakrate value to break the 32bit limit, and still keep the backward binary compatibility. Tested-by: David Dai Signed-off-by: David Dai Acked-by: Cong Wang Signed-off-by: David S. Miller --- include/uapi/linux/pkt_cls.h | 2 ++ net/sched/act_police.c | 27 +++++++++++++++++++++++---- 2 files changed, 25 insertions(+), 4 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index b057aeeb6338..a6aa466fac9e 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -160,6 +160,8 @@ enum { TCA_POLICE_RESULT, TCA_POLICE_TM, TCA_POLICE_PAD, + TCA_POLICE_RATE64, + TCA_POLICE_PEAKRATE64, __TCA_POLICE_MAX #define TCA_POLICE_RESULT TCA_POLICE_RESULT }; diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 6315e0f8d26e..89c04c52af3d 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -40,6 +40,8 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE }, [TCA_POLICE_AVRATE] = { .type = NLA_U32 }, [TCA_POLICE_RESULT] = { .type = NLA_U32 }, + [TCA_POLICE_RATE64] = { .type = NLA_U64 }, + [TCA_POLICE_PEAKRATE64] = { .type = NLA_U64 }, }; static int tcf_police_init(struct net *net, struct nlattr *nla, @@ -58,6 +60,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, struct tcf_police_params *new; bool exists = false; u32 index; + u64 rate64, prate64; if (nla == NULL) return -EINVAL; @@ -155,14 +158,18 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, } if (R_tab) { new->rate_present = true; - psched_ratecfg_precompute(&new->rate, &R_tab->rate, 0); + rate64 = tb[TCA_POLICE_RATE64] ? + nla_get_u64(tb[TCA_POLICE_RATE64]) : 0; + psched_ratecfg_precompute(&new->rate, &R_tab->rate, rate64); qdisc_put_rtab(R_tab); } else { new->rate_present = false; } if (P_tab) { new->peak_present = true; - psched_ratecfg_precompute(&new->peak, &P_tab->rate, 0); + prate64 = tb[TCA_POLICE_PEAKRATE64] ? + nla_get_u64(tb[TCA_POLICE_PEAKRATE64]) : 0; + psched_ratecfg_precompute(&new->peak, &P_tab->rate, prate64); qdisc_put_rtab(P_tab); } else { new->peak_present = false; @@ -313,10 +320,22 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a, lockdep_is_held(&police->tcf_lock)); opt.mtu = p->tcfp_mtu; opt.burst = PSCHED_NS2TICKS(p->tcfp_burst); - if (p->rate_present) + if (p->rate_present) { psched_ratecfg_getrate(&opt.rate, &p->rate); - if (p->peak_present) + if ((police->params->rate.rate_bytes_ps >= (1ULL << 32)) && + nla_put_u64_64bit(skb, TCA_POLICE_RATE64, + police->params->rate.rate_bytes_ps, + TCA_POLICE_PAD)) + goto nla_put_failure; + } + if (p->peak_present) { psched_ratecfg_getrate(&opt.peakrate, &p->peak); + if ((police->params->peak.rate_bytes_ps >= (1ULL << 32)) && + nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64, + police->params->peak.rate_bytes_ps, + TCA_POLICE_PAD)) + goto nla_put_failure; + } if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt)) goto nla_put_failure; if (p->tcfp_result && -- cgit v1.2.3 From ac90f249e15cd2a850daa9e36e15f81ce1ff6550 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 6 Sep 2019 10:26:21 -0600 Subject: io_uring: expose single mmap capability After commit 75b28affdd6a we can get by with just a single mmap to map both the sq and cq ring. However, userspace doesn't know that. Add a features variable to io_uring_params, and notify userspace that the kernel has this ability. This can then be used in liburing (or in applications directly) to avoid the second mmap. Signed-off-by: Jens Axboe --- fs/io_uring.c | 2 ++ include/uapi/linux/io_uring.h | 8 +++++++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/fs/io_uring.c b/fs/io_uring.c index 17dfe57c57f8..be24596e90d7 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -3391,6 +3391,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries); p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); p->cq_off.cqes = offsetof(struct io_rings, cqes); + + p->features = IORING_FEAT_SINGLE_MMAP; return ret; err: io_ring_ctx_wait_and_kill(ctx); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 1e1652f25cc1..96ee9d94b73e 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -128,11 +128,17 @@ struct io_uring_params { __u32 flags; __u32 sq_thread_cpu; __u32 sq_thread_idle; - __u32 resv[5]; + __u32 features; + __u32 resv[4]; struct io_sqring_offsets sq_off; struct io_cqring_offsets cq_off; }; +/* + * io_uring_params->features flags + */ +#define IORING_FEAT_SINGLE_MMAP (1U << 0) + /* * io_uring_register(2) opcodes and arguments */ -- cgit v1.2.3 From 78e05972c5e6c8e9ca4c00ccc6985409da69f904 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 5 Sep 2019 16:20:09 +0200 Subject: ipc: fix semtimedop for generic 32-bit architectures As Vincent noticed, the y2038 conversion of semtimedop in linux-5.1 broke when commit 00bf25d693e7 ("y2038: use time32 syscall names on 32-bit") changed all system calls on all architectures that take a 32-bit time_t to point to the _time32 implementation, but left out semtimedop in the asm-generic header. This affects all 32-bit architectures using asm-generic/unistd.h: h8300, unicore32, openrisc, nios2, hexagon, c6x, arc, nds32 and csky. The notable exception is riscv32, which has dropped support for the time32 system calls entirely. Reported-by: Vincent Chen Cc: stable@vger.kernel.org Cc: Vincent Chen Cc: Greentime Hu Cc: Yoshinori Sato Cc: Guan Xuetao Cc: Stafford Horne Cc: Jonas Bonn Cc: Stefan Kristiansson Cc: Ley Foon Tan Cc: Richard Kuo Cc: Mark Salter Cc: Aurelien Jacquiot Cc: Guo Ren Fixes: 00bf25d693e7 ("y2038: use time32 syscall names on 32-bit") Signed-off-by: Arnd Bergmann --- include/uapi/asm-generic/unistd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 1be0e798e362..1fc8faa6e973 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -569,7 +569,7 @@ __SYSCALL(__NR_semget, sys_semget) __SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl) #if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 #define __NR_semtimedop 192 -__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32) +__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop) #endif #define __NR_semop 193 __SYSCALL(__NR_semop, sys_semop) -- cgit v1.2.3 From fe163e534e5eecdfd7b5920b0dfd24c458ee85d6 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 5 Sep 2019 19:36:37 -0700 Subject: isdn/capi: check message length in capi_write() syzbot reported: BUG: KMSAN: uninit-value in capi_write+0x791/0xa90 drivers/isdn/capi/capi.c:700 CPU: 0 PID: 10025 Comm: syz-executor379 Not tainted 4.20.0-rc7+ #2 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x173/0x1d0 lib/dump_stack.c:113 kmsan_report+0x12e/0x2a0 mm/kmsan/kmsan.c:613 __msan_warning+0x82/0xf0 mm/kmsan/kmsan_instr.c:313 capi_write+0x791/0xa90 drivers/isdn/capi/capi.c:700 do_loop_readv_writev fs/read_write.c:703 [inline] do_iter_write+0x83e/0xd80 fs/read_write.c:961 vfs_writev fs/read_write.c:1004 [inline] do_writev+0x397/0x840 fs/read_write.c:1039 __do_sys_writev fs/read_write.c:1112 [inline] __se_sys_writev+0x9b/0xb0 fs/read_write.c:1109 __x64_sys_writev+0x4a/0x70 fs/read_write.c:1109 do_syscall_64+0xbc/0xf0 arch/x86/entry/common.c:291 entry_SYSCALL_64_after_hwframe+0x63/0xe7 [...] The problem is that capi_write() is reading past the end of the message. Fix it by checking the message's length in the needed places. Reported-and-tested-by: syzbot+0849c524d9c634f5ae66@syzkaller.appspotmail.com Signed-off-by: Eric Biggers Signed-off-by: David S. Miller --- drivers/isdn/capi/capi.c | 10 +++++++++- include/uapi/linux/isdn/capicmd.h | 1 + 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 3c3ad42f22bf..c92b405b7646 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c @@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos if (!cdev->ap.applid) return -ENODEV; + if (count < CAPIMSG_BASELEN) + return -EINVAL; + skb = alloc_skb(count, GFP_USER); if (!skb) return -ENOMEM; @@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos } mlen = CAPIMSG_LEN(skb->data); if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) { - if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) { + if (count < CAPI_DATA_B3_REQ_LEN || + (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) { kfree_skb(skb); return -EINVAL; } @@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos CAPIMSG_SETAPPID(skb->data, cdev->ap.applid); if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) { + if (count < CAPI_DISCONNECT_B3_RESP_LEN) { + kfree_skb(skb); + return -EINVAL; + } mutex_lock(&cdev->lock); capincci_free(cdev, CAPIMSG_NCCI(skb->data)); mutex_unlock(&cdev->lock); diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h index 4941628a4fb9..5ec88e7548a9 100644 --- a/include/uapi/linux/isdn/capicmd.h +++ b/include/uapi/linux/isdn/capicmd.h @@ -16,6 +16,7 @@ #define CAPI_MSG_BASELEN 8 #define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2) #define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2) +#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4) /*----- CAPI commands -----*/ #define CAPI_ALERT 0x01 -- cgit v1.2.3 From fc697dc0c26a5908d467454e49440862d7fe96d0 Mon Sep 17 00:00:00 2001 From: Sven Schnelle Date: Sun, 8 Sep 2019 11:33:04 +0200 Subject: parisc: add kexec syscall support Signed-off-by: Sven Schnelle Signed-off-by: Helge Deller --- arch/parisc/Kconfig | 13 +++ arch/parisc/include/asm/fixmap.h | 1 + arch/parisc/include/asm/kexec.h | 37 +++++++++ arch/parisc/kernel/Makefile | 1 + arch/parisc/kernel/kexec.c | 105 ++++++++++++++++++++++++ arch/parisc/kernel/relocate_kernel.S | 149 +++++++++++++++++++++++++++++++++++ include/uapi/linux/kexec.h | 1 + 7 files changed, 307 insertions(+) create mode 100644 arch/parisc/include/asm/kexec.h create mode 100644 arch/parisc/kernel/kexec.c create mode 100644 arch/parisc/kernel/relocate_kernel.S (limited to 'include/uapi') diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index ee59171edffe..548c767f4358 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -346,6 +346,19 @@ config NR_CPUS depends on SMP default "4" +config KEXEC + bool "Kexec system call" + select KEXEC_CORE + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + It is an ongoing process to be certain the hardware in a machine + shutdown, so do not be surprised if this code does not + initially work for you. + endmenu diff --git a/arch/parisc/include/asm/fixmap.h b/arch/parisc/include/asm/fixmap.h index 288da73d4cc0..e480b2c05407 100644 --- a/arch/parisc/include/asm/fixmap.h +++ b/arch/parisc/include/asm/fixmap.h @@ -30,6 +30,7 @@ enum fixed_addresses { /* Support writing RO kernel text via kprobes, jump labels, etc. */ FIX_TEXT_POKE0, + FIX_TEXT_KEXEC, FIX_BITMAP_COUNT }; diff --git a/arch/parisc/include/asm/kexec.h b/arch/parisc/include/asm/kexec.h new file mode 100644 index 000000000000..a99ea747d7ed --- /dev/null +++ b/arch/parisc/include/asm/kexec.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_PARISC_KEXEC_H +#define _ASM_PARISC_KEXEC_H + +#ifdef CONFIG_KEXEC + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +#define KEXEC_CONTROL_PAGE_SIZE 4096 + +#define KEXEC_ARCH KEXEC_ARCH_PARISC +#define ARCH_HAS_KIMAGE_ARCH + +#ifndef __ASSEMBLY__ + +struct kimage_arch { + unsigned long initrd_start; + unsigned long initrd_end; + unsigned long cmdline; +}; + +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + /* Dummy implementation for now */ +} + +#endif /* __ASSEMBLY__ */ + +#endif /* CONFIG_KEXEC */ + +#endif /* _ASM_PARISC_KEXEC_H */ diff --git a/arch/parisc/kernel/Makefile b/arch/parisc/kernel/Makefile index c232266b517c..487cf88866a8 100644 --- a/arch/parisc/kernel/Makefile +++ b/arch/parisc/kernel/Makefile @@ -37,3 +37,4 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KPROBES) += kprobes.o +obj-$(CONFIG_KEXEC) += kexec.o relocate_kernel.o diff --git a/arch/parisc/kernel/kexec.c b/arch/parisc/kernel/kexec.c new file mode 100644 index 000000000000..a92d265a2261 --- /dev/null +++ b/arch/parisc/kernel/kexec.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +extern void relocate_new_kernel(unsigned long head, + unsigned long start, + unsigned long phys); + +extern const unsigned int relocate_new_kernel_size; +extern unsigned int kexec_initrd_start_offset; +extern unsigned int kexec_initrd_end_offset; +extern unsigned int kexec_cmdline_offset; +extern unsigned int kexec_free_mem_offset; + +static void kexec_show_segment_info(const struct kimage *kimage, + unsigned long n) +{ + pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n", + n, + kimage->segment[n].mem, + kimage->segment[n].mem + kimage->segment[n].memsz, + (unsigned long)kimage->segment[n].memsz, + (unsigned long)kimage->segment[n].memsz / PAGE_SIZE); +} + +static void kexec_image_info(const struct kimage *kimage) +{ + unsigned long i; + + pr_debug("kexec kimage info:\n"); + pr_debug(" type: %d\n", kimage->type); + pr_debug(" start: %lx\n", kimage->start); + pr_debug(" head: %lx\n", kimage->head); + pr_debug(" nr_segments: %lu\n", kimage->nr_segments); + + for (i = 0; i < kimage->nr_segments; i++) + kexec_show_segment_info(kimage, i); +} + +void machine_kexec_cleanup(struct kimage *kimage) +{ +} + +void machine_crash_shutdown(struct pt_regs *regs) +{ +} + +void machine_shutdown(void) +{ + smp_send_stop(); + while (num_online_cpus() > 1) { + cpu_relax(); + mdelay(1); + } +} + +void machine_kexec(struct kimage *image) +{ +#ifdef CONFIG_64BIT + Elf64_Fdesc desc; +#endif + void (*reloc)(unsigned long head, + unsigned long start, + unsigned long phys); + + unsigned long phys = page_to_phys(image->control_code_page); + void *virt = (void *)__fix_to_virt(FIX_TEXT_KEXEC); + struct kimage_arch *arch = &image->arch; + + set_fixmap(FIX_TEXT_KEXEC, phys); + + flush_cache_all(); + +#ifdef CONFIG_64BIT + reloc = (void *)&desc; + desc.addr = (long long)virt; +#else + reloc = (void *)virt; +#endif + + memcpy(virt, dereference_function_descriptor(relocate_new_kernel), + relocate_new_kernel_size); + + *(unsigned long *)(virt + kexec_cmdline_offset) = arch->cmdline; + *(unsigned long *)(virt + kexec_initrd_start_offset) = arch->initrd_start; + *(unsigned long *)(virt + kexec_initrd_end_offset) = arch->initrd_end; + *(unsigned long *)(virt + kexec_free_mem_offset) = PAGE0->mem_free; + + flush_cache_all(); + flush_tlb_all(); + local_irq_disable(); + + reloc(image->head & PAGE_MASK, image->start, phys); +} + +int machine_kexec_prepare(struct kimage *image) +{ + kexec_image_info(image); + return 0; +} diff --git a/arch/parisc/kernel/relocate_kernel.S b/arch/parisc/kernel/relocate_kernel.S new file mode 100644 index 000000000000..2561e52b8d9b --- /dev/null +++ b/arch/parisc/kernel/relocate_kernel.S @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include + +#include +#include +#include +#include +#include + +.level PA_ASM_LEVEL + +.macro kexec_param name +.align 8 +ENTRY(kexec\()_\name) +#ifdef CONFIG_64BIT + .dword 0 +#else + .word 0 +#endif + +ENTRY(kexec\()_\name\()_offset) + .word kexec\()_\name - relocate_new_kernel +.endm + +.text + +/* args: + * r26 - kimage->head + * r25 - start address of kernel + * r24 - physical address of relocate code + */ + +ENTRY_CFI(relocate_new_kernel) +0: copy %arg1, %rp + /* disable I and Q bit, so we are allowed to execute RFI */ + rsm PSW_SM_I, %r0 + nop + nop + nop + nop + nop + nop + nop + + rsm PSW_SM_Q, %r0 + nop + nop + nop + nop + nop + nop + nop + + /* + * After return-from-interrupt, we want to run without Code/Data + * translation enabled just like on a normal boot. + */ + + /* calculate new physical execution address */ + ldo 1f-0b(%arg2), %r1 + mtctl %r0, %cr17 /* IIASQ */ + mtctl %r0, %cr17 /* IIASQ */ + mtctl %r1, %cr18 /* IIAOQ */ + ldo 4(%r1),%r1 + mtctl %r1, %cr18 /* IIAOQ */ +#ifdef CONFIG_64BIT + depdi,z 1, PSW_W_BIT, 1, %r1 + mtctl %r1, %cr22 /* IPSW */ +#else + mtctl %r0, %cr22 /* IPSW */ +#endif + /* lets go... */ + rfi +1: nop + nop + +.Lloop: + LDREG,ma REG_SZ(%arg0), %r3 + /* If crash kernel, no copy needed */ + cmpib,COND(=),n 0,%r3,boot + + bb,<,n %r3, 31 - IND_DONE_BIT, boot + bb,>=,n %r3, 31 - IND_INDIRECTION_BIT, .Lnotind + /* indirection, load and restart */ + movb %r3, %arg0, .Lloop + depi 0, 31, PAGE_SHIFT, %arg0 + +.Lnotind: + bb,>=,n %r3, 31 - IND_DESTINATION_BIT, .Lnotdest + b .Lloop + copy %r3, %r20 + +.Lnotdest: + bb,>= %r3, 31 - IND_SOURCE_BIT, .Lloop + depi 0, 31, PAGE_SHIFT, %r3 + copy %r3, %r21 + + /* copy page */ + copy %r0, %r18 + zdepi 1, 31 - PAGE_SHIFT, 1, %r18 + add %r20, %r18, %r17 + + depi 0, 31, PAGE_SHIFT, %r20 +.Lcopy: + copy %r20, %r12 + LDREG,ma REG_SZ(%r21), %r8 + LDREG,ma REG_SZ(%r21), %r9 + LDREG,ma REG_SZ(%r21), %r10 + LDREG,ma REG_SZ(%r21), %r11 + STREG,ma %r8, REG_SZ(%r20) + STREG,ma %r9, REG_SZ(%r20) + STREG,ma %r10, REG_SZ(%r20) + STREG,ma %r11, REG_SZ(%r20) + +#ifndef CONFIG_64BIT + LDREG,ma REG_SZ(%r21), %r8 + LDREG,ma REG_SZ(%r21), %r9 + LDREG,ma REG_SZ(%r21), %r10 + LDREG,ma REG_SZ(%r21), %r11 + STREG,ma %r8, REG_SZ(%r20) + STREG,ma %r9, REG_SZ(%r20) + STREG,ma %r10, REG_SZ(%r20) + STREG,ma %r11, REG_SZ(%r20) +#endif + + fdc %r0(%r12) + cmpb,COND(<<) %r20,%r17,.Lcopy + fic (%sr4, %r12) + b,n .Lloop + +boot: + mtctl %r0, %cr15 + + LDREG kexec_free_mem-0b(%arg2), %arg0 + LDREG kexec_cmdline-0b(%arg2), %arg1 + LDREG kexec_initrd_end-0b(%arg2), %arg3 + LDREG kexec_initrd_start-0b(%arg2), %arg2 + bv,n %r0(%rp) + +ENDPROC_CFI(relocate_new_kernel); + +ENTRY(relocate_new_kernel_size) + .word relocate_new_kernel_size - relocate_new_kernel + +kexec_param cmdline +kexec_param initrd_start +kexec_param initrd_end +kexec_param free_mem diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 6d112868272d..05669c87a0af 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -31,6 +31,7 @@ #define KEXEC_ARCH_DEFAULT ( 0 << 16) #define KEXEC_ARCH_386 ( 3 << 16) #define KEXEC_ARCH_68K ( 4 << 16) +#define KEXEC_ARCH_PARISC (15 << 16) #define KEXEC_ARCH_X86_64 (62 << 16) #define KEXEC_ARCH_PPC (20 << 16) #define KEXEC_ARCH_PPC64 (21 << 16) -- cgit v1.2.3 From 92f35b751c71d14250a401246f2c792e3aa5b386 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 18 Aug 2019 14:09:47 +0100 Subject: KVM: arm/arm64: vgic: Allow more than 256 vcpus for KVM_IRQ_LINE While parts of the VGIC support a large number of vcpus (we bravely allow up to 512), other parts are more limited. One of these limits is visible in the KVM_IRQ_LINE ioctl, which only allows 256 vcpus to be signalled when using the CPU or PPI types. Unfortunately, we've cornered ourselves badly by allocating all the bits in the irq field. Since the irq_type subfield (8 bit wide) is currently only taking the values 0, 1 and 2 (and we have been careful not to allow anything else), let's reduce this field to only 4 bits, and allocate the remaining 4 bits to a vcpu2_index, which acts as a multiplier: vcpu_id = 256 * vcpu2_index + vcpu_index With that, and a new capability (KVM_CAP_ARM_IRQ_LINE_LAYOUT_2) allowing this to be discovered, it becomes possible to inject PPIs to up to 4096 vcpus. But please just don't. Whilst we're there, add a clarification about the use of KVM_IRQ_LINE on arm, which is not completely conditionned by KVM_CAP_IRQCHIP. Reported-by: Zenghui Yu Reviewed-by: Eric Auger Reviewed-by: Zenghui Yu Signed-off-by: Marc Zyngier --- Documentation/virt/kvm/api.txt | 12 ++++++++++-- arch/arm/include/uapi/asm/kvm.h | 4 +++- arch/arm64/include/uapi/asm/kvm.h | 4 +++- include/uapi/linux/kvm.h | 1 + virt/kvm/arm/arm.c | 2 ++ 5 files changed, 19 insertions(+), 4 deletions(-) (limited to 'include/uapi') diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt index 2d067767b617..25931ca1cb38 100644 --- a/Documentation/virt/kvm/api.txt +++ b/Documentation/virt/kvm/api.txt @@ -753,8 +753,8 @@ in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to use PPIs designated for specific cpus. The irq field is interpreted like this: -  bits: | 31 ... 24 | 23 ... 16 | 15 ... 0 | - field: | irq_type | vcpu_index | irq_id | +  bits: | 31 ... 28 | 27 ... 24 | 23 ... 16 | 15 ... 0 | + field: | vcpu2_index | irq_type | vcpu_index | irq_id | The irq_type field has the following values: - irq_type[0]: out-of-kernel GIC: irq_id 0 is IRQ, irq_id 1 is FIQ @@ -766,6 +766,14 @@ The irq_type field has the following values: In both cases, level is used to assert/deassert the line. +When KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 is supported, the target vcpu is +identified as (256 * vcpu2_index + vcpu_index). Otherwise, vcpu2_index +must be zero. + +Note that on arm/arm64, the KVM_CAP_IRQCHIP capability only conditions +injection of interrupts for the in-kernel irqchip. KVM_IRQ_LINE can always +be used for a userspace interrupt controller. + struct kvm_irq_level { union { __u32 irq; /* GSI */ diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index a4217c1a5d01..2769360f195c 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -266,8 +266,10 @@ struct kvm_vcpu_events { #define KVM_DEV_ARM_ITS_CTRL_RESET 4 /* KVM_IRQ_LINE irq field index values */ +#define KVM_ARM_IRQ_VCPU2_SHIFT 28 +#define KVM_ARM_IRQ_VCPU2_MASK 0xf #define KVM_ARM_IRQ_TYPE_SHIFT 24 -#define KVM_ARM_IRQ_TYPE_MASK 0xff +#define KVM_ARM_IRQ_TYPE_MASK 0xf #define KVM_ARM_IRQ_VCPU_SHIFT 16 #define KVM_ARM_IRQ_VCPU_MASK 0xff #define KVM_ARM_IRQ_NUM_SHIFT 0 diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 9a507716ae2f..67c21f9bdbad 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -325,8 +325,10 @@ struct kvm_vcpu_events { #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 /* KVM_IRQ_LINE irq field index values */ +#define KVM_ARM_IRQ_VCPU2_SHIFT 28 +#define KVM_ARM_IRQ_VCPU2_MASK 0xf #define KVM_ARM_IRQ_TYPE_SHIFT 24 -#define KVM_ARM_IRQ_TYPE_MASK 0xff +#define KVM_ARM_IRQ_TYPE_MASK 0xf #define KVM_ARM_IRQ_VCPU_SHIFT 16 #define KVM_ARM_IRQ_VCPU_MASK 0xff #define KVM_ARM_IRQ_NUM_SHIFT 0 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 5e3f12d5359e..5414b6588fbb 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -996,6 +996,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_PTRAUTH_ADDRESS 171 #define KVM_CAP_ARM_PTRAUTH_GENERIC 172 #define KVM_CAP_PMU_EVENT_FILTER 173 +#define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174 #ifdef KVM_CAP_IRQ_ROUTING diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 35a069815baf..86c6aa1cb58e 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -196,6 +196,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MP_STATE: case KVM_CAP_IMMEDIATE_EXIT: case KVM_CAP_VCPU_EVENTS: + case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2: r = 1; break; case KVM_CAP_ARM_SET_DEVICE_ADDR: @@ -888,6 +889,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; + vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1); irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); -- cgit v1.2.3 From 40cf931fa81bedea08823dda9e6e73630db41b70 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Wed, 17 Jul 2019 12:39:20 -0500 Subject: btrfs: use common vfs LABEL ioctl definitions I lifted the btrfs label get/set ioctls to the vfs some time ago, but never followed up to use those common definitions directly in btrfs. This patch does that. Signed-off-by: Eric Sandeen Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/ioctl.c | 8 ++++---- include/uapi/linux/btrfs.h | 6 ++---- 2 files changed, 6 insertions(+), 8 deletions(-) (limited to 'include/uapi') diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 0a0c54e99b22..9eaf78d7b8eb 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -5453,6 +5453,10 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_setflags(file, argp); case FS_IOC_GETVERSION: return btrfs_ioctl_getversion(file, argp); + case FS_IOC_GETFSLABEL: + return btrfs_ioctl_get_fslabel(file, argp); + case FS_IOC_SETFSLABEL: + return btrfs_ioctl_set_fslabel(file, argp); case FITRIM: return btrfs_ioctl_fitrim(file, argp); case BTRFS_IOC_SNAP_CREATE: @@ -5564,10 +5568,6 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_quota_rescan_wait(file, argp); case BTRFS_IOC_DEV_REPLACE: return btrfs_ioctl_dev_replace(fs_info, argp); - case BTRFS_IOC_GET_FSLABEL: - return btrfs_ioctl_get_fslabel(file, argp); - case BTRFS_IOC_SET_FSLABEL: - return btrfs_ioctl_set_fslabel(file, argp); case BTRFS_IOC_GET_SUPPORTED_FEATURES: return btrfs_ioctl_get_supported_features(argp); case BTRFS_IOC_GET_FEATURES: diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index c195896d478f..7885d79f7515 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -917,10 +917,8 @@ enum btrfs_err_code { #define BTRFS_IOC_QUOTA_RESCAN_STATUS _IOR(BTRFS_IOCTL_MAGIC, 45, \ struct btrfs_ioctl_quota_rescan_args) #define BTRFS_IOC_QUOTA_RESCAN_WAIT _IO(BTRFS_IOCTL_MAGIC, 46) -#define BTRFS_IOC_GET_FSLABEL _IOR(BTRFS_IOCTL_MAGIC, 49, \ - char[BTRFS_LABEL_SIZE]) -#define BTRFS_IOC_SET_FSLABEL _IOW(BTRFS_IOCTL_MAGIC, 50, \ - char[BTRFS_LABEL_SIZE]) +#define BTRFS_IOC_GET_FSLABEL FS_IOC_GETFSLABEL +#define BTRFS_IOC_SET_FSLABEL FS_IOC_SETFSLABEL #define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \ struct btrfs_ioctl_get_dev_stats) #define BTRFS_IOC_DEV_REPLACE _IOWR(BTRFS_IOCTL_MAGIC, 53, \ -- cgit v1.2.3 From 73a3ca20934dd02c0912bcf32463fffec6139399 Mon Sep 17 00:00:00 2001 From: Hans van Kranenburg Date: Sat, 3 Aug 2019 23:36:34 +0200 Subject: btrfs: clarify btrfs_ioctl_get_dev_stats padding In commit c11d2c236cc26 ("Btrfs: add ioctl to get and reset the device stats") the get_dev_stats ioctl was added. Shortly thereafter, in commit b27f7c0c150f7 ("btrfs: join DEV_STATS ioctls to one") , the flags field was added. However, the calculation for unused padding space was not updated, which also invalidated the comment. Clarify what happened to reduce confusion and wasted time for anyone implementing this. Signed-off-by: Hans van Kranenburg Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/uapi/linux/btrfs.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 7885d79f7515..3ee0678c0a83 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -665,7 +665,12 @@ struct btrfs_ioctl_get_dev_stats { /* out values: */ __u64 values[BTRFS_DEV_STAT_VALUES_MAX]; - __u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; /* pad to 1k */ + /* + * This pads the struct to 1032 bytes. It was originally meant to pad to + * 1024 bytes, but when adding the flags field, the padding calculation + * was not adjusted. + */ + __u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; }; #define BTRFS_QUOTA_CTL_ENABLE 1 -- cgit v1.2.3 From 27e022a9c6fe97dd80e31c038328d4f79b2080c2 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Thu, 8 Aug 2019 12:32:44 +0800 Subject: btrfs: replace: BTRFS_DEV_REPLACE_ITEM_STATE_x defines should go The BTRFS_DEV_REPLACE_ITEM_STATE_x defines, as shown in [1], are unused in both kernel and btrfs-progs (except for one instance of BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED in kernel). [1] btrfs.h:#define BTRFS_IOCTL_DEV_REPLACE_STATE_FINISHED 2 btrfs.h:#define BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED 3 btrfs.h:#define BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED 4 Further these define-values are different form its counterpart BTRFS_IOCTL_DEV_REPLACE_STATE_x series as shown in [2]. [2] btrfs_tree.h:#define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED 2 btrfs_tree.h:#define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED 3 btrfs_tree.h:#define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED 4 So this patch deletes the BTRFS_DEV_REPLACE_ITEM_STATE_x altogether, and one instance of BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED is replaced with BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED in the kernel. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/dev-replace.c | 2 +- include/uapi/linux/btrfs_tree.h | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) (limited to 'include/uapi') diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 6b2e9aa83ffa..00ea828beb00 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -56,7 +56,7 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info) no_valid_dev_replace_entry_found: ret = 0; dev_replace->replace_state = - BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED; + BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED; dev_replace->cont_reading_from_srcdev_mode = BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS; dev_replace->time_started = 0; diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 34d5b34286fa..71246c1941aa 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -806,11 +806,6 @@ struct btrfs_dev_stats_item { #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS 0 #define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID 1 -#define BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED 0 -#define BTRFS_DEV_REPLACE_ITEM_STATE_STARTED 1 -#define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED 2 -#define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED 3 -#define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED 4 struct btrfs_dev_replace_item { /* -- cgit v1.2.3 From e35b79a1070d681b4842dad27b1edaf9811da7e9 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Fri, 30 Aug 2019 13:36:08 +0200 Subject: btrfs: turn checksum type define into an enum Turn the checksum type definition into a enum. This eases later addition of new checksums. Reviewed-by: Nikolay Borisov Signed-off-by: Johannes Thumshirn Reviewed-by: David Sterba Signed-off-by: David Sterba --- include/uapi/linux/btrfs_tree.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h index 71246c1941aa..b65c7ee75bc7 100644 --- a/include/uapi/linux/btrfs_tree.h +++ b/include/uapi/linux/btrfs_tree.h @@ -300,7 +300,9 @@ #define BTRFS_CSUM_SIZE 32 /* csum types */ -#define BTRFS_CSUM_TYPE_CRC32 0 +enum btrfs_csum_type { + BTRFS_CSUM_TYPE_CRC32 = 0, +}; /* * flags definitions for directory entry item type -- cgit v1.2.3 From 11a60d159259dbadf9188534b508e5003769af61 Mon Sep 17 00:00:00 2001 From: Scott Mayhew Date: Mon, 9 Sep 2019 16:10:30 -0400 Subject: nfsd: add a "GetVersion" upcall for nfsdcld Add a "GetVersion" upcall to allow nfsd to determine the maximum upcall version that the nfsdcld userspace daemon supports. If the daemon responds with -EOPNOTSUPP, then we know it only supports v1. Signed-off-by: Scott Mayhew Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4recover.c | 167 +++++++++++++++++++++++++++++------------- include/uapi/linux/nfsd/cld.h | 11 ++- 2 files changed, 127 insertions(+), 51 deletions(-) (limited to 'include/uapi') diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 87679557d0d6..58a61339d40c 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -59,8 +59,12 @@ struct nfsd4_client_tracking_ops { void (*remove)(struct nfs4_client *); int (*check)(struct nfs4_client *); void (*grace_done)(struct nfsd_net *); + uint8_t version; + size_t msglen; }; +static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops; + /* Globals */ static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery"; @@ -718,6 +722,8 @@ static const struct nfsd4_client_tracking_ops nfsd4_legacy_tracking_ops = { .remove = nfsd4_remove_clid_dir, .check = nfsd4_check_legacy_client, .grace_done = nfsd4_recdir_purge_old, + .version = 1, + .msglen = 0, }; /* Globals */ @@ -737,19 +743,24 @@ struct cld_upcall { struct list_head cu_list; struct cld_net *cu_net; struct completion cu_done; - struct cld_msg cu_msg; + union { + struct cld_msg_hdr cu_hdr; + struct cld_msg cu_msg; + } cu_u; }; static int -__cld_pipe_upcall(struct rpc_pipe *pipe, struct cld_msg *cmsg) +__cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg) { int ret; struct rpc_pipe_msg msg; - struct cld_upcall *cup = container_of(cmsg, struct cld_upcall, cu_msg); + struct cld_upcall *cup = container_of(cmsg, struct cld_upcall, cu_u); + struct nfsd_net *nn = net_generic(pipe->dentry->d_sb->s_fs_info, + nfsd_net_id); memset(&msg, 0, sizeof(msg)); msg.data = cmsg; - msg.len = sizeof(*cmsg); + msg.len = nn->client_tracking_ops->msglen; ret = rpc_queue_upcall(pipe, &msg); if (ret < 0) { @@ -765,7 +776,7 @@ out: } static int -cld_pipe_upcall(struct rpc_pipe *pipe, struct cld_msg *cmsg) +cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg) { int ret; @@ -809,7 +820,7 @@ __cld_pipe_inprogress_downcall(const struct cld_msg __user *cmsg, kfree(name.data); return -EFAULT; } - return sizeof(*cmsg); + return nn->client_tracking_ops->msglen; } return -EFAULT; } @@ -818,6 +829,7 @@ static ssize_t cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) { struct cld_upcall *tmp, *cup; + struct cld_msg_hdr __user *hdr = (struct cld_msg_hdr __user *)src; struct cld_msg __user *cmsg = (struct cld_msg __user *)src; uint32_t xid; struct nfsd_net *nn = net_generic(file_inode(filp)->i_sb->s_fs_info, @@ -825,14 +837,14 @@ cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) struct cld_net *cn = nn->cld_net; int16_t status; - if (mlen != sizeof(*cmsg)) { + if (mlen != nn->client_tracking_ops->msglen) { dprintk("%s: got %zu bytes, expected %zu\n", __func__, mlen, - sizeof(*cmsg)); + nn->client_tracking_ops->msglen); return -EINVAL; } /* copy just the xid so we can try to find that */ - if (copy_from_user(&xid, &cmsg->cm_xid, sizeof(xid)) != 0) { + if (copy_from_user(&xid, &hdr->cm_xid, sizeof(xid)) != 0) { dprintk("%s: error when copying xid from userspace", __func__); return -EFAULT; } @@ -842,7 +854,7 @@ cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) * list (for -EINPROGRESS, we just want to make sure the xid is * valid, not remove the upcall from the list) */ - if (get_user(status, &cmsg->cm_status)) { + if (get_user(status, &hdr->cm_status)) { dprintk("%s: error when copying status from userspace", __func__); return -EFAULT; } @@ -851,7 +863,7 @@ cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) cup = NULL; spin_lock(&cn->cn_lock); list_for_each_entry(tmp, &cn->cn_list, cu_list) { - if (get_unaligned(&tmp->cu_msg.cm_xid) == xid) { + if (get_unaligned(&tmp->cu_u.cu_hdr.cm_xid) == xid) { cup = tmp; if (status != -EINPROGRESS) list_del_init(&cup->cu_list); @@ -869,7 +881,7 @@ cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) if (status == -EINPROGRESS) return __cld_pipe_inprogress_downcall(cmsg, nn); - if (copy_from_user(&cup->cu_msg, src, mlen) != 0) + if (copy_from_user(&cup->cu_u.cu_msg, src, mlen) != 0) return -EFAULT; complete(&cup->cu_done); @@ -881,7 +893,7 @@ cld_pipe_destroy_msg(struct rpc_pipe_msg *msg) { struct cld_msg *cmsg = msg->data; struct cld_upcall *cup = container_of(cmsg, struct cld_upcall, - cu_msg); + cu_u.cu_msg); /* errno >= 0 means we got a downcall */ if (msg->errno >= 0) @@ -1012,9 +1024,10 @@ nfsd4_remove_cld_pipe(struct net *net) } static struct cld_upcall * -alloc_cld_upcall(struct cld_net *cn) +alloc_cld_upcall(struct nfsd_net *nn) { struct cld_upcall *new, *tmp; + struct cld_net *cn = nn->cld_net; new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) @@ -1024,20 +1037,20 @@ alloc_cld_upcall(struct cld_net *cn) restart_search: spin_lock(&cn->cn_lock); list_for_each_entry(tmp, &cn->cn_list, cu_list) { - if (tmp->cu_msg.cm_xid == cn->cn_xid) { + if (tmp->cu_u.cu_msg.cm_xid == cn->cn_xid) { cn->cn_xid++; spin_unlock(&cn->cn_lock); goto restart_search; } } init_completion(&new->cu_done); - new->cu_msg.cm_vers = CLD_UPCALL_VERSION; - put_unaligned(cn->cn_xid++, &new->cu_msg.cm_xid); + new->cu_u.cu_msg.cm_vers = nn->client_tracking_ops->version; + put_unaligned(cn->cn_xid++, &new->cu_u.cu_msg.cm_xid); new->cu_net = cn; list_add(&new->cu_list, &cn->cn_list); spin_unlock(&cn->cn_lock); - dprintk("%s: allocated xid %u\n", __func__, new->cu_msg.cm_xid); + dprintk("%s: allocated xid %u\n", __func__, new->cu_u.cu_msg.cm_xid); return new; } @@ -1066,20 +1079,20 @@ nfsd4_cld_create(struct nfs4_client *clp) if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags)) return; - cup = alloc_cld_upcall(cn); + cup = alloc_cld_upcall(nn); if (!cup) { ret = -ENOMEM; goto out_err; } - cup->cu_msg.cm_cmd = Cld_Create; - cup->cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len; - memcpy(cup->cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data, + cup->cu_u.cu_msg.cm_cmd = Cld_Create; + cup->cu_u.cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len; + memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data, clp->cl_name.len); - ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg); + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); if (!ret) { - ret = cup->cu_msg.cm_status; + ret = cup->cu_u.cu_msg.cm_status; set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags); } @@ -1103,20 +1116,20 @@ nfsd4_cld_remove(struct nfs4_client *clp) if (!test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags)) return; - cup = alloc_cld_upcall(cn); + cup = alloc_cld_upcall(nn); if (!cup) { ret = -ENOMEM; goto out_err; } - cup->cu_msg.cm_cmd = Cld_Remove; - cup->cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len; - memcpy(cup->cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data, + cup->cu_u.cu_msg.cm_cmd = Cld_Remove; + cup->cu_u.cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len; + memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data, clp->cl_name.len); - ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg); + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); if (!ret) { - ret = cup->cu_msg.cm_status; + ret = cup->cu_u.cu_msg.cm_status; clear_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags); } @@ -1145,21 +1158,21 @@ nfsd4_cld_check_v0(struct nfs4_client *clp) if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags)) return 0; - cup = alloc_cld_upcall(cn); + cup = alloc_cld_upcall(nn); if (!cup) { printk(KERN_ERR "NFSD: Unable to check client record on " "stable storage: %d\n", -ENOMEM); return -ENOMEM; } - cup->cu_msg.cm_cmd = Cld_Check; - cup->cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len; - memcpy(cup->cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data, + cup->cu_u.cu_msg.cm_cmd = Cld_Check; + cup->cu_u.cu_msg.cm_u.cm_name.cn_len = clp->cl_name.len; + memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data, clp->cl_name.len); - ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg); + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); if (!ret) { - ret = cup->cu_msg.cm_status; + ret = cup->cu_u.cu_msg.cm_status; set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags); } @@ -1223,16 +1236,16 @@ nfsd4_cld_grace_start(struct nfsd_net *nn) struct cld_upcall *cup; struct cld_net *cn = nn->cld_net; - cup = alloc_cld_upcall(cn); + cup = alloc_cld_upcall(nn); if (!cup) { ret = -ENOMEM; goto out_err; } - cup->cu_msg.cm_cmd = Cld_GraceStart; - ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg); + cup->cu_u.cu_msg.cm_cmd = Cld_GraceStart; + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); if (!ret) - ret = cup->cu_msg.cm_status; + ret = cup->cu_u.cu_msg.cm_status; free_cld_upcall(cup); out_err: @@ -1250,17 +1263,17 @@ nfsd4_cld_grace_done_v0(struct nfsd_net *nn) struct cld_upcall *cup; struct cld_net *cn = nn->cld_net; - cup = alloc_cld_upcall(cn); + cup = alloc_cld_upcall(nn); if (!cup) { ret = -ENOMEM; goto out_err; } - cup->cu_msg.cm_cmd = Cld_GraceDone; - cup->cu_msg.cm_u.cm_gracetime = (int64_t)nn->boot_time; - ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg); + cup->cu_u.cu_msg.cm_cmd = Cld_GraceDone; + cup->cu_u.cu_msg.cm_u.cm_gracetime = (int64_t)nn->boot_time; + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); if (!ret) - ret = cup->cu_msg.cm_status; + ret = cup->cu_u.cu_msg.cm_status; free_cld_upcall(cup); out_err: @@ -1279,16 +1292,16 @@ nfsd4_cld_grace_done(struct nfsd_net *nn) struct cld_upcall *cup; struct cld_net *cn = nn->cld_net; - cup = alloc_cld_upcall(cn); + cup = alloc_cld_upcall(nn); if (!cup) { ret = -ENOMEM; goto out_err; } - cup->cu_msg.cm_cmd = Cld_GraceDone; - ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_msg); + cup->cu_u.cu_msg.cm_cmd = Cld_GraceDone; + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); if (!ret) - ret = cup->cu_msg.cm_status; + ret = cup->cu_u.cu_msg.cm_status; free_cld_upcall(cup); out_err: @@ -1336,6 +1349,50 @@ cld_running(struct nfsd_net *nn) return pipe->nreaders || pipe->nwriters; } +static int +nfsd4_cld_get_version(struct nfsd_net *nn) +{ + int ret = 0; + struct cld_upcall *cup; + struct cld_net *cn = nn->cld_net; + uint8_t version; + + cup = alloc_cld_upcall(nn); + if (!cup) { + ret = -ENOMEM; + goto out_err; + } + cup->cu_u.cu_msg.cm_cmd = Cld_GetVersion; + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); + if (!ret) { + ret = cup->cu_u.cu_msg.cm_status; + if (ret) + goto out_free; + version = cup->cu_u.cu_msg.cm_u.cm_version; + dprintk("%s: userspace returned version %u\n", + __func__, version); + if (version < 1) + version = 1; + else if (version > CLD_UPCALL_VERSION) + version = CLD_UPCALL_VERSION; + + switch (version) { + case 1: + nn->client_tracking_ops = &nfsd4_cld_tracking_ops; + break; + default: + break; + } + } +out_free: + free_cld_upcall(cup); +out_err: + if (ret) + dprintk("%s: Unable to get version from userspace: %d\n", + __func__, ret); + return ret; +} + static int nfsd4_cld_tracking_init(struct net *net) { @@ -1368,10 +1425,14 @@ nfsd4_cld_tracking_init(struct net *net) goto err_remove; } + status = nfsd4_cld_get_version(nn); + if (status == -EOPNOTSUPP) + pr_warn("NFSD: nfsdcld GetVersion upcall failed. Please upgrade nfsdcld.\n"); + status = nfsd4_cld_grace_start(nn); if (status) { if (status == -EOPNOTSUPP) - printk(KERN_WARNING "NFSD: Please upgrade nfsdcld.\n"); + pr_warn("NFSD: nfsdcld GraceStart upcall failed. Please upgrade nfsdcld.\n"); nfs4_release_reclaim(nn); goto err_remove; } else @@ -1403,6 +1464,8 @@ static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops_v0 = { .remove = nfsd4_cld_remove, .check = nfsd4_cld_check_v0, .grace_done = nfsd4_cld_grace_done_v0, + .version = 1, + .msglen = sizeof(struct cld_msg), }; /* For newer nfsdcld's */ @@ -1413,6 +1476,8 @@ static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops = { .remove = nfsd4_cld_remove, .check = nfsd4_cld_check, .grace_done = nfsd4_cld_grace_done, + .version = 1, + .msglen = sizeof(struct cld_msg), }; /* upcall via usermodehelper */ @@ -1760,6 +1825,8 @@ static const struct nfsd4_client_tracking_ops nfsd4_umh_tracking_ops = { .remove = nfsd4_umh_cltrack_remove, .check = nfsd4_umh_cltrack_check, .grace_done = nfsd4_umh_cltrack_grace_done, + .version = 1, + .msglen = 0, }; int diff --git a/include/uapi/linux/nfsd/cld.h b/include/uapi/linux/nfsd/cld.h index b1e9de4f07d5..c5aad16d10c0 100644 --- a/include/uapi/linux/nfsd/cld.h +++ b/include/uapi/linux/nfsd/cld.h @@ -36,7 +36,8 @@ enum cld_command { Cld_Remove, /* remove record of this cm_id */ Cld_Check, /* is this cm_id allowed? */ Cld_GraceDone, /* grace period is complete */ - Cld_GraceStart, + Cld_GraceStart, /* grace start (upload client records) */ + Cld_GetVersion, /* query max supported upcall version */ }; /* representation of long-form NFSv4 client ID */ @@ -54,7 +55,15 @@ struct cld_msg { union { __s64 cm_gracetime; /* grace period start time */ struct cld_name cm_name; + __u8 cm_version; /* for getting max version */ } __attribute__((packed)) cm_u; } __attribute__((packed)); +struct cld_msg_hdr { + __u8 cm_vers; /* upcall version */ + __u8 cm_cmd; /* upcall command */ + __s16 cm_status; /* return code */ + __u32 cm_xid; /* transaction id */ +} __attribute__((packed)); + #endif /* !_NFSD_CLD_H */ -- cgit v1.2.3 From 6ee95d1c899186c0798cafd25998d436bcdb9618 Mon Sep 17 00:00:00 2001 From: Scott Mayhew Date: Mon, 9 Sep 2019 16:10:31 -0400 Subject: nfsd: add support for upcall version 2 Version 2 upcalls will allow the nfsd to include a hash of the kerberos principal string in the Cld_Create upcall. If a principal is present in the svc_cred, then the hash will be included in the Cld_Create upcall. We attempt to use the svc_cred.cr_raw_principal (which is returned by gssproxy) first, and then fall back to using the svc_cred.cr_principal (which is returned by both gssproxy and rpc.svcgssd). Upon a subsequent restart, the hash will be returned in the Cld_Gracestart downcall and stored in the reclaim_str_hashtbl so it can be used when handling reclaim opens. Signed-off-by: Scott Mayhew Signed-off-by: J. Bruce Fields --- fs/nfsd/nfs4recover.c | 223 +++++++++++++++++++++++++++++++++++++++--- fs/nfsd/nfs4state.c | 6 +- fs/nfsd/state.h | 3 +- include/uapi/linux/nfsd/cld.h | 30 +++++- 4 files changed, 245 insertions(+), 17 deletions(-) (limited to 'include/uapi') diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 58a61339d40c..cdc75ad4438b 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -64,6 +64,7 @@ struct nfsd4_client_tracking_ops { }; static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops; +static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops_v2; /* Globals */ static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery"; @@ -177,6 +178,7 @@ __nfsd4_create_reclaim_record_grace(struct nfs4_client *clp, const char *dname, int len, struct nfsd_net *nn) { struct xdr_netobj name; + struct xdr_netobj princhash = { .len = 0, .data = NULL }; struct nfs4_client_reclaim *crp; name.data = kmemdup(dname, len, GFP_KERNEL); @@ -186,7 +188,7 @@ __nfsd4_create_reclaim_record_grace(struct nfs4_client *clp, return; } name.len = len; - crp = nfs4_client_to_reclaim(name, nn); + crp = nfs4_client_to_reclaim(name, princhash, nn); if (!crp) { kfree(name.data); return; @@ -486,6 +488,7 @@ static int load_recdir(struct dentry *parent, struct dentry *child, struct nfsd_net *nn) { struct xdr_netobj name; + struct xdr_netobj princhash = { .len = 0, .data = NULL }; if (child->d_name.len != HEXDIR_LEN - 1) { printk("%s: illegal name %pd in recovery directory\n", @@ -500,7 +503,7 @@ load_recdir(struct dentry *parent, struct dentry *child, struct nfsd_net *nn) goto out; } name.len = HEXDIR_LEN; - if (!nfs4_client_to_reclaim(name, nn)) + if (!nfs4_client_to_reclaim(name, princhash, nn)) kfree(name.data); out: return 0; @@ -737,6 +740,7 @@ struct cld_net { struct list_head cn_list; unsigned int cn_xid; bool cn_has_legacy; + struct crypto_shash *cn_tfm; }; struct cld_upcall { @@ -746,6 +750,7 @@ struct cld_upcall { union { struct cld_msg_hdr cu_hdr; struct cld_msg cu_msg; + struct cld_msg_v2 cu_msg_v2; } cu_u; }; @@ -792,11 +797,11 @@ cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg) } static ssize_t -__cld_pipe_inprogress_downcall(const struct cld_msg __user *cmsg, +__cld_pipe_inprogress_downcall(const struct cld_msg_v2 __user *cmsg, struct nfsd_net *nn) { - uint8_t cmd; - struct xdr_netobj name; + uint8_t cmd, princhashlen; + struct xdr_netobj name, princhash = { .len = 0, .data = NULL }; uint16_t namelen; struct cld_net *cn = nn->cld_net; @@ -805,19 +810,45 @@ __cld_pipe_inprogress_downcall(const struct cld_msg __user *cmsg, return -EFAULT; } if (cmd == Cld_GraceStart) { - if (get_user(namelen, &cmsg->cm_u.cm_name.cn_len)) - return -EFAULT; - name.data = memdup_user(&cmsg->cm_u.cm_name.cn_id, namelen); - if (IS_ERR_OR_NULL(name.data)) - return -EFAULT; - name.len = namelen; + if (nn->client_tracking_ops->version >= 2) { + const struct cld_clntinfo __user *ci; + + ci = &cmsg->cm_u.cm_clntinfo; + if (get_user(namelen, &ci->cc_name.cn_len)) + return -EFAULT; + name.data = memdup_user(&ci->cc_name.cn_id, namelen); + if (IS_ERR_OR_NULL(name.data)) + return -EFAULT; + name.len = namelen; + get_user(princhashlen, &ci->cc_princhash.cp_len); + if (princhashlen > 0) { + princhash.data = memdup_user( + &ci->cc_princhash.cp_data, + princhashlen); + if (IS_ERR_OR_NULL(princhash.data)) + return -EFAULT; + princhash.len = princhashlen; + } else + princhash.len = 0; + } else { + const struct cld_name __user *cnm; + + cnm = &cmsg->cm_u.cm_name; + if (get_user(namelen, &cnm->cn_len)) + return -EFAULT; + name.data = memdup_user(&cnm->cn_id, namelen); + if (IS_ERR_OR_NULL(name.data)) + return -EFAULT; + name.len = namelen; + } if (name.len > 5 && memcmp(name.data, "hash:", 5) == 0) { name.len = name.len - 5; memmove(name.data, name.data + 5, name.len); cn->cn_has_legacy = true; } - if (!nfs4_client_to_reclaim(name, nn)) { + if (!nfs4_client_to_reclaim(name, princhash, nn)) { kfree(name.data); + kfree(princhash.data); return -EFAULT; } return nn->client_tracking_ops->msglen; @@ -830,7 +861,7 @@ cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) { struct cld_upcall *tmp, *cup; struct cld_msg_hdr __user *hdr = (struct cld_msg_hdr __user *)src; - struct cld_msg __user *cmsg = (struct cld_msg __user *)src; + struct cld_msg_v2 __user *cmsg = (struct cld_msg_v2 __user *)src; uint32_t xid; struct nfsd_net *nn = net_generic(file_inode(filp)->i_sb->s_fs_info, nfsd_net_id); @@ -881,7 +912,7 @@ cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) if (status == -EINPROGRESS) return __cld_pipe_inprogress_downcall(cmsg, nn); - if (copy_from_user(&cup->cu_u.cu_msg, src, mlen) != 0) + if (copy_from_user(&cup->cu_u.cu_msg_v2, src, mlen) != 0) return -EFAULT; complete(&cup->cu_done); @@ -1019,6 +1050,8 @@ nfsd4_remove_cld_pipe(struct net *net) nfsd4_cld_unregister_net(net, cn->cn_pipe); rpc_destroy_pipe_data(cn->cn_pipe); + if (cn->cn_tfm) + crypto_free_shash(cn->cn_tfm); kfree(nn->cld_net); nn->cld_net = NULL; } @@ -1103,6 +1136,75 @@ out_err: "record on stable storage: %d\n", ret); } +/* Ask daemon to create a new record */ +static void +nfsd4_cld_create_v2(struct nfs4_client *clp) +{ + int ret; + struct cld_upcall *cup; + struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); + struct cld_net *cn = nn->cld_net; + struct cld_msg_v2 *cmsg; + struct crypto_shash *tfm = cn->cn_tfm; + struct xdr_netobj cksum; + char *principal = NULL; + SHASH_DESC_ON_STACK(desc, tfm); + + /* Don't upcall if it's already stored */ + if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags)) + return; + + cup = alloc_cld_upcall(nn); + if (!cup) { + ret = -ENOMEM; + goto out_err; + } + + cmsg = &cup->cu_u.cu_msg_v2; + cmsg->cm_cmd = Cld_Create; + cmsg->cm_u.cm_clntinfo.cc_name.cn_len = clp->cl_name.len; + memcpy(cmsg->cm_u.cm_clntinfo.cc_name.cn_id, clp->cl_name.data, + clp->cl_name.len); + if (clp->cl_cred.cr_raw_principal) + principal = clp->cl_cred.cr_raw_principal; + else if (clp->cl_cred.cr_principal) + principal = clp->cl_cred.cr_principal; + if (principal) { + desc->tfm = tfm; + cksum.len = crypto_shash_digestsize(tfm); + cksum.data = kmalloc(cksum.len, GFP_KERNEL); + if (cksum.data == NULL) { + ret = -ENOMEM; + goto out; + } + ret = crypto_shash_digest(desc, principal, strlen(principal), + cksum.data); + shash_desc_zero(desc); + if (ret) { + kfree(cksum.data); + goto out; + } + cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = cksum.len; + memcpy(cmsg->cm_u.cm_clntinfo.cc_princhash.cp_data, + cksum.data, cksum.len); + kfree(cksum.data); + } else + cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = 0; + + ret = cld_pipe_upcall(cn->cn_pipe, cmsg); + if (!ret) { + ret = cmsg->cm_status; + set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags); + } + +out: + free_cld_upcall(cup); +out_err: + if (ret) + pr_err("NFSD: Unable to create client record on stable storage: %d\n", + ret); +} + /* Ask daemon to create a new record */ static void nfsd4_cld_remove(struct nfs4_client *clp) @@ -1229,6 +1331,79 @@ found: return 0; } +static int +nfsd4_cld_check_v2(struct nfs4_client *clp) +{ + struct nfs4_client_reclaim *crp; + struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); + struct cld_net *cn = nn->cld_net; + int status; + char dname[HEXDIR_LEN]; + struct xdr_netobj name; + struct crypto_shash *tfm = cn->cn_tfm; + struct xdr_netobj cksum; + char *principal = NULL; + SHASH_DESC_ON_STACK(desc, tfm); + + /* did we already find that this client is stable? */ + if (test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags)) + return 0; + + /* look for it in the reclaim hashtable otherwise */ + crp = nfsd4_find_reclaim_client(clp->cl_name, nn); + if (crp) + goto found; + + if (cn->cn_has_legacy) { + status = nfs4_make_rec_clidname(dname, &clp->cl_name); + if (status) + return -ENOENT; + + name.data = kmemdup(dname, HEXDIR_LEN, GFP_KERNEL); + if (!name.data) { + dprintk("%s: failed to allocate memory for name.data\n", + __func__); + return -ENOENT; + } + name.len = HEXDIR_LEN; + crp = nfsd4_find_reclaim_client(name, nn); + kfree(name.data); + if (crp) + goto found; + + } + return -ENOENT; +found: + if (crp->cr_princhash.len) { + if (clp->cl_cred.cr_raw_principal) + principal = clp->cl_cred.cr_raw_principal; + else if (clp->cl_cred.cr_principal) + principal = clp->cl_cred.cr_principal; + if (principal == NULL) + return -ENOENT; + desc->tfm = tfm; + cksum.len = crypto_shash_digestsize(tfm); + cksum.data = kmalloc(cksum.len, GFP_KERNEL); + if (cksum.data == NULL) + return -ENOENT; + status = crypto_shash_digest(desc, principal, strlen(principal), + cksum.data); + shash_desc_zero(desc); + if (status) { + kfree(cksum.data); + return -ENOENT; + } + if (memcmp(crp->cr_princhash.data, cksum.data, + crp->cr_princhash.len)) { + kfree(cksum.data); + return -ENOENT; + } + kfree(cksum.data); + } + crp->cr_clp = clp; + return 0; +} + static int nfsd4_cld_grace_start(struct nfsd_net *nn) { @@ -1380,6 +1555,9 @@ nfsd4_cld_get_version(struct nfsd_net *nn) case 1: nn->client_tracking_ops = &nfsd4_cld_tracking_ops; break; + case 2: + nn->client_tracking_ops = &nfsd4_cld_tracking_ops_v2; + break; default: break; } @@ -1408,6 +1586,11 @@ nfsd4_cld_tracking_init(struct net *net) status = __nfsd4_init_cld_pipe(net); if (status) goto err_shutdown; + nn->cld_net->cn_tfm = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(nn->cld_net->cn_tfm)) { + status = PTR_ERR(nn->cld_net->cn_tfm); + goto err_remove; + } /* * rpc pipe upcalls take 30 seconds to time out, so we don't want to @@ -1480,6 +1663,18 @@ static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops = { .msglen = sizeof(struct cld_msg), }; +/* v2 create/check ops include the principal, if available */ +static const struct nfsd4_client_tracking_ops nfsd4_cld_tracking_ops_v2 = { + .init = nfsd4_cld_tracking_init, + .exit = nfsd4_cld_tracking_exit, + .create = nfsd4_cld_create_v2, + .remove = nfsd4_cld_remove, + .check = nfsd4_cld_check_v2, + .grace_done = nfsd4_cld_grace_done, + .version = 2, + .msglen = sizeof(struct cld_msg_v2), +}; + /* upcall via usermodehelper */ static char cltrack_prog[PATH_MAX] = "/sbin/nfsdcltrack"; module_param_string(cltrack_prog, cltrack_prog, sizeof(cltrack_prog), diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 906e214dcce8..da1093dd5016 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -6887,7 +6887,8 @@ nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn) * will be freed in nfs4_remove_reclaim_record in the normal case). */ struct nfs4_client_reclaim * -nfs4_client_to_reclaim(struct xdr_netobj name, struct nfsd_net *nn) +nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash, + struct nfsd_net *nn) { unsigned int strhashval; struct nfs4_client_reclaim *crp; @@ -6900,6 +6901,8 @@ nfs4_client_to_reclaim(struct xdr_netobj name, struct nfsd_net *nn) list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); crp->cr_name.data = name.data; crp->cr_name.len = name.len; + crp->cr_princhash.data = princhash.data; + crp->cr_princhash.len = princhash.len; crp->cr_clp = NULL; nn->reclaim_str_hashtbl_size++; } @@ -6911,6 +6914,7 @@ nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn) { list_del(&crp->cr_strhash); kfree(crp->cr_name.data); + kfree(crp->cr_princhash.data); kfree(crp); nn->reclaim_str_hashtbl_size--; } diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index d00c86d05beb..46f56afb6cb8 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -378,6 +378,7 @@ struct nfs4_client_reclaim { struct list_head cr_strhash; /* hash by cr_name */ struct nfs4_client *cr_clp; /* pointer to associated clp */ struct xdr_netobj cr_name; /* recovery dir name */ + struct xdr_netobj cr_princhash; }; /* A reasonable value for REPLAY_ISIZE was estimated as follows: @@ -645,7 +646,7 @@ extern void nfsd4_shutdown_callback(struct nfs4_client *); extern void nfsd4_shutdown_copy(struct nfs4_client *clp); extern void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp); extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(struct xdr_netobj name, - struct nfsd_net *nn); + struct xdr_netobj princhash, struct nfsd_net *nn); extern bool nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn); struct nfs4_file *find_file(struct knfsd_fh *fh); diff --git a/include/uapi/linux/nfsd/cld.h b/include/uapi/linux/nfsd/cld.h index c5aad16d10c0..a519313af953 100644 --- a/include/uapi/linux/nfsd/cld.h +++ b/include/uapi/linux/nfsd/cld.h @@ -26,11 +26,15 @@ #include /* latest upcall version available */ -#define CLD_UPCALL_VERSION 1 +#define CLD_UPCALL_VERSION 2 /* defined by RFC3530 */ #define NFS4_OPAQUE_LIMIT 1024 +#ifndef SHA256_DIGEST_SIZE +#define SHA256_DIGEST_SIZE 32 +#endif + enum cld_command { Cld_Create, /* create a record for this cm_id */ Cld_Remove, /* remove record of this cm_id */ @@ -46,6 +50,17 @@ struct cld_name { unsigned char cn_id[NFS4_OPAQUE_LIMIT]; /* client-provided */ } __attribute__((packed)); +/* sha256 hash of the kerberos principal */ +struct cld_princhash { + __u8 cp_len; /* length of cp_data */ + unsigned char cp_data[SHA256_DIGEST_SIZE]; /* hash of principal */ +} __attribute__((packed)); + +struct cld_clntinfo { + struct cld_name cc_name; + struct cld_princhash cc_princhash; +} __attribute__((packed)); + /* message struct for communication with userspace */ struct cld_msg { __u8 cm_vers; /* upcall version */ @@ -59,6 +74,19 @@ struct cld_msg { } __attribute__((packed)) cm_u; } __attribute__((packed)); +/* version 2 message can include hash of kerberos principal */ +struct cld_msg_v2 { + __u8 cm_vers; /* upcall version */ + __u8 cm_cmd; /* upcall command */ + __s16 cm_status; /* return code */ + __u32 cm_xid; /* transaction id */ + union { + struct cld_name cm_name; + __u8 cm_version; /* for getting max version */ + struct cld_clntinfo cm_clntinfo; /* name & princ hash */ + } __attribute__((packed)) cm_u; +} __attribute__((packed)); + struct cld_msg_hdr { __u8 cm_vers; /* upcall version */ __u8 cm_cmd; /* upcall command */ -- cgit v1.2.3 From e019a3b17f0d79894917e6d83b17e87e8f809deb Mon Sep 17 00:00:00 2001 From: Dirk van der Merwe Date: Mon, 9 Sep 2019 00:54:17 +0100 Subject: devlink: extend 'fw_load_policy' values Add the 'disk' value to the generic 'fw_load_policy' devlink parameter. This value indicates that firmware should always be loaded from disk only. Signed-off-by: Dirk van der Merwe Signed-off-by: Jakub Kicinski Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- Documentation/networking/devlink-params.txt | 2 ++ include/uapi/linux/devlink.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include/uapi') diff --git a/Documentation/networking/devlink-params.txt b/Documentation/networking/devlink-params.txt index 2d26434ddcf8..fadb5436188d 100644 --- a/Documentation/networking/devlink-params.txt +++ b/Documentation/networking/devlink-params.txt @@ -48,4 +48,6 @@ fw_load_policy [DEVICE, GENERIC] Load firmware version preferred by the driver. * DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH (1) Load firmware currently stored in flash. + * DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK (2) + Load firmware currently available on host's disk. Type: u8 diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 546e75dd74ac..c25cc29a6647 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -202,6 +202,7 @@ enum devlink_param_cmode { enum devlink_param_fw_load_policy_value { DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER, DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH, + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK, }; enum { -- cgit v1.2.3 From 5bbd21df5a075a59ab53030d25f9848ccca93d73 Mon Sep 17 00:00:00 2001 From: Dirk van der Merwe Date: Mon, 9 Sep 2019 00:54:18 +0100 Subject: devlink: add 'reset_dev_on_drv_probe' param Add the 'reset_dev_on_drv_probe' devlink parameter, controlling the device reset policy on driver probe. This parameter is useful in conjunction with the existing 'fw_load_policy' parameter. Signed-off-by: Dirk van der Merwe Signed-off-by: Jakub Kicinski Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- Documentation/networking/devlink-params.txt | 14 ++++++++++++++ include/net/devlink.h | 5 +++++ include/uapi/linux/devlink.h | 7 +++++++ net/core/devlink.c | 5 +++++ 4 files changed, 31 insertions(+) (limited to 'include/uapi') diff --git a/Documentation/networking/devlink-params.txt b/Documentation/networking/devlink-params.txt index fadb5436188d..ddba3e9b55b1 100644 --- a/Documentation/networking/devlink-params.txt +++ b/Documentation/networking/devlink-params.txt @@ -51,3 +51,17 @@ fw_load_policy [DEVICE, GENERIC] * DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK (2) Load firmware currently available on host's disk. Type: u8 + +reset_dev_on_drv_probe [DEVICE, GENERIC] + Controls the device's reset policy on driver probe. + Valid values: + * DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_UNKNOWN (0) + Unknown or invalid value. + * DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_ALWAYS (1) + Always reset device on driver probe. + * DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_NEVER (2) + Never reset device on driver probe. + * DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_DISK (3) + Reset only if device firmware can be found in the + filesystem. + Type: u8 diff --git a/include/net/devlink.h b/include/net/devlink.h index 460bc629d1a4..03e4d9244ff3 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -398,6 +398,7 @@ enum devlink_param_generic_id { DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, + DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE, /* add new param generic ids above here*/ __DEVLINK_PARAM_GENERIC_ID_MAX, @@ -428,6 +429,10 @@ enum devlink_param_generic_id { #define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME "fw_load_policy" #define DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE DEVLINK_PARAM_TYPE_U8 +#define DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME \ + "reset_dev_on_drv_probe" +#define DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE DEVLINK_PARAM_TYPE_U8 + #define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \ { \ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \ diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index c25cc29a6647..1da3e83f1fd4 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -205,6 +205,13 @@ enum devlink_param_fw_load_policy_value { DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK, }; +enum devlink_param_reset_dev_on_drv_probe_value { + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_UNKNOWN, + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_ALWAYS, + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_NEVER, + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_DISK, +}; + enum { DEVLINK_ATTR_STATS_RX_PACKETS, /* u64 */ DEVLINK_ATTR_STATS_RX_BYTES, /* u64 */ diff --git a/net/core/devlink.c b/net/core/devlink.c index 6e52d639dac6..4a2fb94c44cf 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -2852,6 +2852,11 @@ static const struct devlink_param devlink_param_generic[] = { .name = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_NAME, .type = DEVLINK_PARAM_GENERIC_FW_LOAD_POLICY_TYPE, }, + { + .id = DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE, + .name = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME, + .type = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE, + }, }; static int devlink_param_generic_verify(const struct devlink_param *param) -- cgit v1.2.3 From ee394f96ad7517fbc0de9106dcc7ce9efb14f264 Mon Sep 17 00:00:00 2001 From: Fernando Fernandez Mancera Date: Sat, 7 Sep 2019 18:04:26 +0200 Subject: netfilter: nft_synproxy: add synproxy stateful object support Register a new synproxy stateful object type into the stateful object infrastructure. Signed-off-by: Fernando Fernandez Mancera Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter/nf_tables.h | 3 +- net/netfilter/nft_synproxy.c | 143 ++++++++++++++++++++++++++----- 2 files changed, 124 insertions(+), 22 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 0ff932dadc8e..ed8881ad18ed 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -1481,7 +1481,8 @@ enum nft_ct_expectation_attributes { #define NFT_OBJECT_CT_TIMEOUT 7 #define NFT_OBJECT_SECMARK 8 #define NFT_OBJECT_CT_EXPECT 9 -#define __NFT_OBJECT_MAX 10 +#define NFT_OBJECT_SYNPROXY 10 +#define __NFT_OBJECT_MAX 11 #define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1) /** diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c index db4c23f5dfcb..e2c1fc608841 100644 --- a/net/netfilter/nft_synproxy.c +++ b/net/netfilter/nft_synproxy.c @@ -24,7 +24,7 @@ static void nft_synproxy_tcp_options(struct synproxy_options *opts, const struct tcphdr *tcp, struct synproxy_net *snet, struct nf_synproxy_info *info, - struct nft_synproxy *priv) + const struct nft_synproxy *priv) { this_cpu_inc(snet->stats->syn_received); if (tcp->ece && tcp->cwr) @@ -41,14 +41,13 @@ static void nft_synproxy_tcp_options(struct synproxy_options *opts, NF_SYNPROXY_OPT_ECN); } -static void nft_synproxy_eval_v4(const struct nft_expr *expr, +static void nft_synproxy_eval_v4(const struct nft_synproxy *priv, struct nft_regs *regs, const struct nft_pktinfo *pkt, const struct tcphdr *tcp, struct tcphdr *_tcph, struct synproxy_options *opts) { - struct nft_synproxy *priv = nft_expr_priv(expr); struct nf_synproxy_info info = priv->info; struct net *net = nft_net(pkt); struct synproxy_net *snet = synproxy_pernet(net); @@ -73,14 +72,13 @@ static void nft_synproxy_eval_v4(const struct nft_expr *expr, } #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) -static void nft_synproxy_eval_v6(const struct nft_expr *expr, +static void nft_synproxy_eval_v6(const struct nft_synproxy *priv, struct nft_regs *regs, const struct nft_pktinfo *pkt, const struct tcphdr *tcp, struct tcphdr *_tcph, struct synproxy_options *opts) { - struct nft_synproxy *priv = nft_expr_priv(expr); struct nf_synproxy_info info = priv->info; struct net *net = nft_net(pkt); struct synproxy_net *snet = synproxy_pernet(net); @@ -105,9 +103,9 @@ static void nft_synproxy_eval_v6(const struct nft_expr *expr, } #endif /* CONFIG_NF_TABLES_IPV6*/ -static void nft_synproxy_eval(const struct nft_expr *expr, - struct nft_regs *regs, - const struct nft_pktinfo *pkt) +static void nft_synproxy_do_eval(const struct nft_synproxy *priv, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) { struct synproxy_options opts = {}; struct sk_buff *skb = pkt->skb; @@ -140,23 +138,22 @@ static void nft_synproxy_eval(const struct nft_expr *expr, switch (skb->protocol) { case htons(ETH_P_IP): - nft_synproxy_eval_v4(expr, regs, pkt, tcp, &_tcph, &opts); + nft_synproxy_eval_v4(priv, regs, pkt, tcp, &_tcph, &opts); return; #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) case htons(ETH_P_IPV6): - nft_synproxy_eval_v6(expr, regs, pkt, tcp, &_tcph, &opts); + nft_synproxy_eval_v6(priv, regs, pkt, tcp, &_tcph, &opts); return; #endif } regs->verdict.code = NFT_BREAK; } -static int nft_synproxy_init(const struct nft_ctx *ctx, - const struct nft_expr *expr, - const struct nlattr * const tb[]) +static int nft_synproxy_do_init(const struct nft_ctx *ctx, + const struct nlattr * const tb[], + struct nft_synproxy *priv) { struct synproxy_net *snet = synproxy_pernet(ctx->net); - struct nft_synproxy *priv = nft_expr_priv(expr); u32 flags; int err; @@ -206,8 +203,7 @@ nf_ct_failure: return err; } -static void nft_synproxy_destroy(const struct nft_ctx *ctx, - const struct nft_expr *expr) +static void nft_synproxy_do_destroy(const struct nft_ctx *ctx) { struct synproxy_net *snet = synproxy_pernet(ctx->net); @@ -229,10 +225,8 @@ static void nft_synproxy_destroy(const struct nft_ctx *ctx, nf_ct_netns_put(ctx->net, ctx->family); } -static int nft_synproxy_dump(struct sk_buff *skb, const struct nft_expr *expr) +static int nft_synproxy_do_dump(struct sk_buff *skb, struct nft_synproxy *priv) { - const struct nft_synproxy *priv = nft_expr_priv(expr); - if (nla_put_be16(skb, NFTA_SYNPROXY_MSS, htons(priv->info.mss)) || nla_put_u8(skb, NFTA_SYNPROXY_WSCALE, priv->info.wscale) || nla_put_be32(skb, NFTA_SYNPROXY_FLAGS, htonl(priv->info.options))) @@ -244,6 +238,15 @@ nla_put_failure: return -1; } +static void nft_synproxy_eval(const struct nft_expr *expr, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct nft_synproxy *priv = nft_expr_priv(expr); + + nft_synproxy_do_eval(priv, regs, pkt); +} + static int nft_synproxy_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nft_data **data) @@ -252,6 +255,28 @@ static int nft_synproxy_validate(const struct nft_ctx *ctx, (1 << NF_INET_FORWARD)); } +static int nft_synproxy_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +{ + struct nft_synproxy *priv = nft_expr_priv(expr); + + return nft_synproxy_do_init(ctx, tb, priv); +} + +static void nft_synproxy_destroy(const struct nft_ctx *ctx, + const struct nft_expr *expr) +{ + nft_synproxy_do_destroy(ctx); +} + +static int nft_synproxy_dump(struct sk_buff *skb, const struct nft_expr *expr) +{ + struct nft_synproxy *priv = nft_expr_priv(expr); + + return nft_synproxy_do_dump(skb, priv); +} + static struct nft_expr_type nft_synproxy_type; static const struct nft_expr_ops nft_synproxy_ops = { .eval = nft_synproxy_eval, @@ -271,14 +296,89 @@ static struct nft_expr_type nft_synproxy_type __read_mostly = { .maxattr = NFTA_SYNPROXY_MAX, }; +static int nft_synproxy_obj_init(const struct nft_ctx *ctx, + const struct nlattr * const tb[], + struct nft_object *obj) +{ + struct nft_synproxy *priv = nft_obj_data(obj); + + return nft_synproxy_do_init(ctx, tb, priv); +} + +static void nft_synproxy_obj_destroy(const struct nft_ctx *ctx, + struct nft_object *obj) +{ + nft_synproxy_do_destroy(ctx); +} + +static int nft_synproxy_obj_dump(struct sk_buff *skb, + struct nft_object *obj, bool reset) +{ + struct nft_synproxy *priv = nft_obj_data(obj); + + return nft_synproxy_do_dump(skb, priv); +} + +static void nft_synproxy_obj_eval(struct nft_object *obj, + struct nft_regs *regs, + const struct nft_pktinfo *pkt) +{ + const struct nft_synproxy *priv = nft_obj_data(obj); + + nft_synproxy_do_eval(priv, regs, pkt); +} + +static void nft_synproxy_obj_update(struct nft_object *obj, + struct nft_object *newobj) +{ + struct nft_synproxy *newpriv = nft_obj_data(newobj); + struct nft_synproxy *priv = nft_obj_data(obj); + + priv->info = newpriv->info; +} + +static struct nft_object_type nft_synproxy_obj_type; +static const struct nft_object_ops nft_synproxy_obj_ops = { + .type = &nft_synproxy_obj_type, + .size = sizeof(struct nft_synproxy), + .init = nft_synproxy_obj_init, + .destroy = nft_synproxy_obj_destroy, + .dump = nft_synproxy_obj_dump, + .eval = nft_synproxy_obj_eval, + .update = nft_synproxy_obj_update, +}; + +static struct nft_object_type nft_synproxy_obj_type __read_mostly = { + .type = NFT_OBJECT_SYNPROXY, + .ops = &nft_synproxy_obj_ops, + .maxattr = NFTA_SYNPROXY_MAX, + .policy = nft_synproxy_policy, + .owner = THIS_MODULE, +}; + static int __init nft_synproxy_module_init(void) { - return nft_register_expr(&nft_synproxy_type); + int err; + + err = nft_register_obj(&nft_synproxy_obj_type); + if (err < 0) + return err; + + err = nft_register_expr(&nft_synproxy_type); + if (err < 0) + goto err; + + return 0; + +err: + nft_unregister_obj(&nft_synproxy_obj_type); + return err; } static void __exit nft_synproxy_module_exit(void) { - return nft_unregister_expr(&nft_synproxy_type); + nft_unregister_expr(&nft_synproxy_type); + nft_unregister_obj(&nft_synproxy_obj_type); } module_init(nft_synproxy_module_init); @@ -287,3 +387,4 @@ module_exit(nft_synproxy_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Fernando Fernandez "); MODULE_ALIAS_NFT_EXPR("synproxy"); +MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_SYNPROXY); -- cgit v1.2.3 From 7396d337cfadc7c0b32dfd46581e9daff6666e84 Mon Sep 17 00:00:00 2001 From: Liran Alon Date: Mon, 26 Aug 2019 13:16:43 +0300 Subject: KVM: x86: Return to userspace with internal error on unexpected exit reason Receiving an unexpected exit reason from hardware should be considered as a severe bug in KVM. Therefore, instead of just injecting #UD to guest and ignore it, exit to userspace on internal error so that it could handle it properly (probably by terminating guest). In addition, prefer to use vcpu_unimpl() instead of WARN_ONCE() as handling unexpected exit reason should be a rare unexpected event (that was expected to never happen) and we prefer to print a message on it every time it occurs to guest. Furthermore, dump VMCS/VMCB to dmesg to assist diagnosing such cases. Reviewed-by: Mihai Carabas Reviewed-by: Nikita Leshenko Reviewed-by: Joao Martins Signed-off-by: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 11 ++++++++--- arch/x86/kvm/vmx/vmx.c | 9 +++++++-- include/uapi/linux/kvm.h | 2 ++ 3 files changed, 17 insertions(+), 5 deletions(-) (limited to 'include/uapi') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 890b1bffcf7c..fdeaf8f44949 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -4981,9 +4981,14 @@ static int handle_exit(struct kvm_vcpu *vcpu) if (exit_code >= ARRAY_SIZE(svm_exit_handlers) || !svm_exit_handlers[exit_code]) { - WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code); - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; + vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code); + dump_vmcb(vcpu); + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = + KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; + vcpu->run->internal.ndata = 1; + vcpu->run->internal.data[0] = exit_code; + return 0; } return svm_exit_handlers[exit_code](svm); diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 024cb9bf5ad3..d95c2f3c027c 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -5870,8 +5870,13 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) else { vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", exit_reason); - kvm_queue_exception(vcpu, UD_VECTOR); - return 1; + dump_vmcs(); + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = + KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON; + vcpu->run->internal.ndata = 1; + vcpu->run->internal.data[0] = exit_reason; + return 0; } } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 5414b6588fbb..233efbb1c81c 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -243,6 +243,8 @@ struct kvm_hyperv_exit { #define KVM_INTERNAL_ERROR_SIMUL_EX 2 /* Encounter unexpected vm-exit due to delivery event. */ #define KVM_INTERNAL_ERROR_DELIVERY_EV 3 +/* Encounter unexpected vm-exit reason */ +#define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4 /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ struct kvm_run { -- cgit v1.2.3 From 64f658ded48eccd23d429d636d49a03b3f53d741 Mon Sep 17 00:00:00 2001 From: Dirk van der Merwe Date: Wed, 11 Sep 2019 12:08:32 +0100 Subject: devlink: add unknown 'fw_load_policy' value Similar to the 'reset_dev_on_drv_probe' devlink parameter, it is useful to have an unknown value which can be used by drivers to report that the hardware value isn't recognized or is otherwise invalid instead of failing the operation. This is especially useful for u8/enum parameters. Suggested-by: Jakub Kicinski Signed-off-by: Dirk van der Merwe Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- include/uapi/linux/devlink.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 1da3e83f1fd4..8da5365850cd 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -203,6 +203,7 @@ enum devlink_param_fw_load_policy_value { DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER, DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH, DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK, + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_UNKNOWN, }; enum devlink_param_reset_dev_on_drv_probe_value { -- cgit v1.2.3 From c4bb667eaf520f21b3a3db0489682becc9c49bcc Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Fri, 2 Aug 2019 18:15:19 +0100 Subject: fuse: reserve values for mapping protocol SETUPMAPPING is a command for use with 'virtiofsd', a fuse-over-virtio implementation; it may find use in other fuse impelementations as well in which the kernel does not have access to the address space of the daemon directly. A SETUPMAPPING operation causes a section of a file to be mapped into a memory window visible to the kernel. The offsets in the file and the window are defined by the kernel performing the operation. The daemon may reject the request, for reasons including permissions and limited resources. When a request perfectly overlaps a previous mapping, the previous mapping is replaced. When a mapping partially overlaps a previous mapping, the previous mapping is split into one or two smaller mappings. REMOVEMAPPING is the complement to SETUPMAPPING; it unmaps a range of mapped files from the window visible to the kernel. The map_alignment field communicates the alignment constraint for FUSE_SETUPMAPPING/FUSE_REMOVEMAPPING and allows the daemon to constrain the addresses and file offsets chosen by the kernel. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Vivek Goyal Signed-off-by: Stefan Hajnoczi Signed-off-by: Miklos Szeredi --- include/uapi/linux/fuse.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index df2e12fb3381..802b0377a49e 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -133,6 +133,8 @@ * * 7.31 * - add FUSE_WRITE_KILL_PRIV flag + * - add FUSE_SETUPMAPPING and FUSE_REMOVEMAPPING + * - add map_alignment to fuse_init_out, add FUSE_MAP_ALIGNMENT flag */ #ifndef _LINUX_FUSE_H @@ -274,6 +276,7 @@ struct fuse_file_lock { * FUSE_CACHE_SYMLINKS: cache READLINK responses * FUSE_NO_OPENDIR_SUPPORT: kernel supports zero-message opendir * FUSE_EXPLICIT_INVAL_DATA: only invalidate cached pages on explicit request + * FUSE_MAP_ALIGNMENT: map_alignment field is valid */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -301,6 +304,7 @@ struct fuse_file_lock { #define FUSE_CACHE_SYMLINKS (1 << 23) #define FUSE_NO_OPENDIR_SUPPORT (1 << 24) #define FUSE_EXPLICIT_INVAL_DATA (1 << 25) +#define FUSE_MAP_ALIGNMENT (1 << 26) /** * CUSE INIT request/reply flags @@ -422,6 +426,8 @@ enum fuse_opcode { FUSE_RENAME2 = 45, FUSE_LSEEK = 46, FUSE_COPY_FILE_RANGE = 47, + FUSE_SETUPMAPPING = 48, + FUSE_REMOVEMAPPING = 49, /* CUSE specific operations */ CUSE_INIT = 4096, @@ -656,7 +662,7 @@ struct fuse_init_out { uint32_t max_write; uint32_t time_gran; uint16_t max_pages; - uint16_t padding; + uint16_t map_alignment; uint32_t unused[8]; }; -- cgit v1.2.3 From 501ae8ecae2ba5122774dee4445003505a7fd01b Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 4 Sep 2019 08:36:33 -0400 Subject: fuse: reserve byteswapped init opcodes virtio fs tunnels fuse over a virtio channel. One issue is two sides might be speaking different endian-ness. To detects this, host side looks at the opcode value in the FUSE_INIT command. Works fine at the moment but might fail if a future version of fuse will use such an opcode for initialization. Let's reserve this opcode so we remember and don't do this. Same for CUSE_INIT. Signed-off-by: Michael S. Tsirkin Signed-off-by: Miklos Szeredi --- include/uapi/linux/fuse.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 2971d29a42e4..df2e12fb3381 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -425,6 +425,10 @@ enum fuse_opcode { /* CUSE specific operations */ CUSE_INIT = 4096, + + /* Reserved opcodes: helpful to detect structure endian-ness */ + CUSE_INIT_BSWAP_RESERVED = 1048576, /* CUSE_INIT << 8 */ + FUSE_INIT_BSWAP_RESERVED = 436207616, /* FUSE_INIT << 24 */ }; enum fuse_notify_code { -- cgit v1.2.3 From 415606588c61230b7b4f0118fc2d64a0c1c4d102 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Wed, 11 Sep 2019 09:16:21 +0300 Subject: PTP: introduce new versions of IOCTLs The current version of the IOCTL have a small problem which prevents us from extending the API by making use of reserved fields. In these new IOCTLs, we are now making sure that flags and rsv fields are zero which will allow us to extend the API in the future. Reviewed-by: Richard Cochran Signed-off-by: Felipe Balbi Signed-off-by: David S. Miller --- drivers/ptp/ptp_chardev.c | 63 ++++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/ptp_clock.h | 24 +++++++++++++++- 2 files changed, 86 insertions(+), 1 deletion(-) (limited to 'include/uapi') diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 18ffe449efdf..9c18476d8d10 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -126,7 +126,9 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) switch (cmd) { case PTP_CLOCK_GETCAPS: + case PTP_CLOCK_GETCAPS2: memset(&caps, 0, sizeof(caps)); + caps.max_adj = ptp->info->max_adj; caps.n_alarm = ptp->info->n_alarm; caps.n_ext_ts = ptp->info->n_ext_ts; @@ -139,11 +141,24 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_EXTTS_REQUEST: + case PTP_EXTTS_REQUEST2: + memset(&req, 0, sizeof(req)); + if (copy_from_user(&req.extts, (void __user *)arg, sizeof(req.extts))) { err = -EFAULT; break; } + if (((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) || + req.extts.rsv[0] || req.extts.rsv[1]) && + cmd == PTP_EXTTS_REQUEST2) { + err = -EINVAL; + break; + } else if (cmd == PTP_EXTTS_REQUEST) { + req.extts.flags &= ~PTP_EXTTS_VALID_FLAGS; + req.extts.rsv[0] = 0; + req.extts.rsv[1] = 0; + } if (req.extts.index >= ops->n_ext_ts) { err = -EINVAL; break; @@ -154,11 +169,27 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_PEROUT_REQUEST: + case PTP_PEROUT_REQUEST2: + memset(&req, 0, sizeof(req)); + if (copy_from_user(&req.perout, (void __user *)arg, sizeof(req.perout))) { err = -EFAULT; break; } + if (((req.perout.flags & ~PTP_PEROUT_VALID_FLAGS) || + req.perout.rsv[0] || req.perout.rsv[1] || + req.perout.rsv[2] || req.perout.rsv[3]) && + cmd == PTP_PEROUT_REQUEST2) { + err = -EINVAL; + break; + } else if (cmd == PTP_PEROUT_REQUEST) { + req.perout.flags &= ~PTP_PEROUT_VALID_FLAGS; + req.perout.rsv[0] = 0; + req.perout.rsv[1] = 0; + req.perout.rsv[2] = 0; + req.perout.rsv[3] = 0; + } if (req.perout.index >= ops->n_per_out) { err = -EINVAL; break; @@ -169,6 +200,9 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_ENABLE_PPS: + case PTP_ENABLE_PPS2: + memset(&req, 0, sizeof(req)); + if (!capable(CAP_SYS_TIME)) return -EPERM; req.type = PTP_CLK_REQ_PPS; @@ -177,6 +211,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_SYS_OFFSET_PRECISE: + case PTP_SYS_OFFSET_PRECISE2: if (!ptp->info->getcrosststamp) { err = -EOPNOTSUPP; break; @@ -201,6 +236,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_SYS_OFFSET_EXTENDED: + case PTP_SYS_OFFSET_EXTENDED2: if (!ptp->info->gettimex64) { err = -EOPNOTSUPP; break; @@ -232,6 +268,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_SYS_OFFSET: + case PTP_SYS_OFFSET2: sysoff = memdup_user((void __user *)arg, sizeof(*sysoff)); if (IS_ERR(sysoff)) { err = PTR_ERR(sysoff); @@ -266,10 +303,23 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_PIN_GETFUNC: + case PTP_PIN_GETFUNC2: if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) { err = -EFAULT; break; } + if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2] + || pd.rsv[3] || pd.rsv[4]) + && cmd == PTP_PIN_GETFUNC2) { + err = -EINVAL; + break; + } else if (cmd == PTP_PIN_GETFUNC) { + pd.rsv[0] = 0; + pd.rsv[1] = 0; + pd.rsv[2] = 0; + pd.rsv[3] = 0; + pd.rsv[4] = 0; + } pin_index = pd.index; if (pin_index >= ops->n_pins) { err = -EINVAL; @@ -285,10 +335,23 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) break; case PTP_PIN_SETFUNC: + case PTP_PIN_SETFUNC2: if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) { err = -EFAULT; break; } + if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2] + || pd.rsv[3] || pd.rsv[4]) + && cmd == PTP_PIN_SETFUNC2) { + err = -EINVAL; + break; + } else if (cmd == PTP_PIN_SETFUNC) { + pd.rsv[0] = 0; + pd.rsv[1] = 0; + pd.rsv[2] = 0; + pd.rsv[3] = 0; + pd.rsv[4] = 0; + } pin_index = pd.index; if (pin_index >= ops->n_pins) { err = -EINVAL; diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index 1bc794ad957a..9a0af3511b68 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -25,10 +25,20 @@ #include #include -/* PTP_xxx bits, for the flags field within the request structures. */ +/* + * Bits of the ptp_extts_request.flags field: + */ #define PTP_ENABLE_FEATURE (1<<0) #define PTP_RISING_EDGE (1<<1) #define PTP_FALLING_EDGE (1<<2) +#define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \ + PTP_RISING_EDGE | \ + PTP_FALLING_EDGE) + +/* + * Bits of the ptp_perout_request.flags field: + */ +#define PTP_PEROUT_VALID_FLAGS (0) /* * struct ptp_clock_time - represents a time value @@ -149,6 +159,18 @@ struct ptp_pin_desc { #define PTP_SYS_OFFSET_EXTENDED \ _IOWR(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended) +#define PTP_CLOCK_GETCAPS2 _IOR(PTP_CLK_MAGIC, 10, struct ptp_clock_caps) +#define PTP_EXTTS_REQUEST2 _IOW(PTP_CLK_MAGIC, 11, struct ptp_extts_request) +#define PTP_PEROUT_REQUEST2 _IOW(PTP_CLK_MAGIC, 12, struct ptp_perout_request) +#define PTP_ENABLE_PPS2 _IOW(PTP_CLK_MAGIC, 13, int) +#define PTP_SYS_OFFSET2 _IOW(PTP_CLK_MAGIC, 14, struct ptp_sys_offset) +#define PTP_PIN_GETFUNC2 _IOWR(PTP_CLK_MAGIC, 15, struct ptp_pin_desc) +#define PTP_PIN_SETFUNC2 _IOW(PTP_CLK_MAGIC, 16, struct ptp_pin_desc) +#define PTP_SYS_OFFSET_PRECISE2 \ + _IOWR(PTP_CLK_MAGIC, 17, struct ptp_sys_offset_precise) +#define PTP_SYS_OFFSET_EXTENDED2 \ + _IOWR(PTP_CLK_MAGIC, 18, struct ptp_sys_offset_extended) + struct ptp_extts_event { struct ptp_clock_time t; /* Time event occured. */ unsigned int index; /* Which channel produced the event. */ -- cgit v1.2.3 From 823eb2a3c4c7f1b3e749f0dddb70bf8b09a76a10 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Wed, 11 Sep 2019 09:16:22 +0300 Subject: PTP: add support for one-shot output Some controllers allow for a one-shot output pulse, in contrast to periodic output. Now that we have extensible versions of our IOCTLs, we can finally make use of the 'flags' field to pass a bit telling driver that if we want one-shot pulse output. Signed-off-by: Felipe Balbi Reviewed-by: Richard Cochran Signed-off-by: David S. Miller --- include/uapi/linux/ptp_clock.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index 9a0af3511b68..f16301015949 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -38,8 +38,8 @@ /* * Bits of the ptp_perout_request.flags field: */ -#define PTP_PEROUT_VALID_FLAGS (0) - +#define PTP_PEROUT_ONE_SHOT (1<<0) +#define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT) /* * struct ptp_clock_time - represents a time value * @@ -77,7 +77,7 @@ struct ptp_perout_request { struct ptp_clock_time start; /* Absolute start time. */ struct ptp_clock_time period; /* Desired period, zero means disable. */ unsigned int index; /* Which channel to configure. */ - unsigned int flags; /* Reserved for future use. */ + unsigned int flags; unsigned int rsv[4]; /* Reserved for future use. */ }; -- cgit v1.2.3 From 33f2c35a54dfd75ad0e7e86918dcbe4de799a56c Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Mon, 9 Sep 2019 16:52:29 +1000 Subject: md: add feature flag MD_FEATURE_RAID0_LAYOUT Due to a bug introduced in Linux 3.14 we cannot determine the correctly layout for a multi-zone RAID0 array - there are two possibilities. It is possible to tell the kernel which to chose using a module parameter, but this can be clumsy to use. It would be best if the choice were recorded in the metadata. So add a feature flag for this purpose. If it is set, then the 'layout' field of the superblock is used to determine which layout to use. If this flag is not set, then mddev->layout gets set to -1, which causes the module parameter to be required. Acked-by: Guoqing Jiang Signed-off-by: NeilBrown Signed-off-by: Song Liu --- drivers/md/md.c | 13 +++++++++++++ drivers/md/raid0.c | 3 +++ include/uapi/linux/raid/md_p.h | 2 ++ 3 files changed, 18 insertions(+) (limited to 'include/uapi') diff --git a/drivers/md/md.c b/drivers/md/md.c index 73d5a1b04022..1be7abeb24fd 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1237,6 +1237,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) mddev->new_layout = mddev->layout; mddev->new_chunk_sectors = mddev->chunk_sectors; } + if (mddev->level == 0) + mddev->layout = -1; if (sb->state & (1<recovery_cp = MaxSector; @@ -1652,6 +1654,10 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; } + if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) && + sb->level != 0) + return -EINVAL; + if (!refdev) { ret = 1; } else { @@ -1762,6 +1768,10 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) mddev->new_chunk_sectors = mddev->chunk_sectors; } + if (mddev->level == 0 && + !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT)) + mddev->layout = -1; + if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) set_bit(MD_HAS_JOURNAL, &mddev->flags); @@ -6898,6 +6908,9 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) mddev->external = 0; mddev->layout = info->layout; + if (mddev->level == 0) + /* Cannot trust RAID0 layout info here */ + mddev->layout = -1; mddev->chunk_sectors = info->chunk_size >> 9; if (mddev->persistent) { diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index ec611abda835..f61693e59684 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -145,6 +145,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) if (conf->nr_strip_zones == 1) { conf->layout = RAID0_ORIG_LAYOUT; + } else if (mddev->layout == RAID0_ORIG_LAYOUT || + mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) { + conf->layout = mddev->layout; } else if (default_layout == RAID0_ORIG_LAYOUT || default_layout == RAID0_ALT_MULTIZONE_LAYOUT) { conf->layout = default_layout; diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index b0d15c73f6d7..1f2d8c81f0e0 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -329,6 +329,7 @@ struct mdp_superblock_1 { #define MD_FEATURE_JOURNAL 512 /* support write cache */ #define MD_FEATURE_PPL 1024 /* support PPL */ #define MD_FEATURE_MULTIPLE_PPLS 2048 /* support for multiple PPLs */ +#define MD_FEATURE_RAID0_LAYOUT 4096 /* layout is meaningful for RAID0 */ #define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \ |MD_FEATURE_RECOVERY_OFFSET \ |MD_FEATURE_RESHAPE_ACTIVE \ @@ -341,6 +342,7 @@ struct mdp_superblock_1 { |MD_FEATURE_JOURNAL \ |MD_FEATURE_PPL \ |MD_FEATURE_MULTIPLE_PPLS \ + |MD_FEATURE_RAID0_LAYOUT \ ) struct r5l_payload_header { -- cgit v1.2.3 From 2670ac2625f98557fd7e083f8aa22c297e49039e Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 12 Sep 2019 10:49:46 +0200 Subject: net: devlink: move reload fail indication to devlink core and expose to user Currently the fact that devlink reload failed is stored in drivers. Move this flag into devlink core. Also, expose it to the user. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 15 +++++---------- include/net/devlink.h | 3 +++ include/uapi/linux/devlink.h | 2 ++ net/core/devlink.c | 21 ++++++++++++++++++++- 4 files changed, 30 insertions(+), 11 deletions(-) (limited to 'include/uapi') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index c71a1d9ea17b..3fa96076e8a5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -80,7 +80,6 @@ struct mlxsw_core { struct mlxsw_thermal *thermal; struct mlxsw_core_port *ports; unsigned int max_ports; - bool reload_fail; bool fw_flash_in_progress; unsigned long driver_priv[0]; /* driver_priv has to be always the last item */ @@ -1002,15 +1001,11 @@ mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); - int err; - - err = mlxsw_core_bus_device_register(mlxsw_core->bus_info, - mlxsw_core->bus, - mlxsw_core->bus_priv, true, - devlink); - mlxsw_core->reload_fail = !!err; - return err; + return mlxsw_core_bus_device_register(mlxsw_core->bus_info, + mlxsw_core->bus, + mlxsw_core->bus_priv, true, + devlink); } static int mlxsw_devlink_flash_update(struct devlink *devlink, @@ -1254,7 +1249,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, { struct devlink *devlink = priv_to_devlink(mlxsw_core); - if (mlxsw_core->reload_fail) { + if (devlink_is_reload_failed(devlink)) { if (!reload) /* Only the parts that were not de-initialized in the * failed reload attempt need to be de-initialized. diff --git a/include/net/devlink.h b/include/net/devlink.h index ab8d56d12ffd..23e4b65ec9df 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -38,6 +38,7 @@ struct devlink { struct device *dev; possible_net_t _net; struct mutex lock; + bool reload_failed; char priv[0] __aligned(NETDEV_ALIGN); }; @@ -945,6 +946,8 @@ void devlink_health_reporter_state_update(struct devlink_health_reporter *reporter, enum devlink_health_reporter_state state); +bool devlink_is_reload_failed(const struct devlink *devlink); + void devlink_flash_update_begin_notify(struct devlink *devlink); void devlink_flash_update_end_notify(struct devlink *devlink); void devlink_flash_update_status_notify(struct devlink *devlink, diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h index 8da5365850cd..580b7a2e40e1 100644 --- a/include/uapi/linux/devlink.h +++ b/include/uapi/linux/devlink.h @@ -419,6 +419,8 @@ enum devlink_attr { DEVLINK_ATTR_TRAP_METADATA, /* nested */ DEVLINK_ATTR_TRAP_GROUP_NAME, /* string */ + DEVLINK_ATTR_RELOAD_FAILED, /* u8 0 or 1 */ + /* add new attributes above here, update the policy in devlink.c */ __DEVLINK_ATTR_MAX, diff --git a/net/core/devlink.c b/net/core/devlink.c index 9e522639693d..e48680efe54a 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -471,6 +471,8 @@ static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink, if (devlink_nl_put_handle(msg, devlink)) goto nla_put_failure; + if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_FAILED, devlink->reload_failed)) + goto nla_put_failure; genlmsg_end(msg, hdr); return 0; @@ -2677,6 +2679,21 @@ static bool devlink_reload_supported(struct devlink *devlink) return devlink->ops->reload_down && devlink->ops->reload_up; } +static void devlink_reload_failed_set(struct devlink *devlink, + bool reload_failed) +{ + if (devlink->reload_failed == reload_failed) + return; + devlink->reload_failed = reload_failed; + devlink_notify(devlink, DEVLINK_CMD_NEW); +} + +bool devlink_is_reload_failed(const struct devlink *devlink) +{ + return devlink->reload_failed; +} +EXPORT_SYMBOL_GPL(devlink_is_reload_failed); + static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) { struct devlink *devlink = info->user_ptr[0]; @@ -2693,7 +2710,9 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info) err = devlink->ops->reload_down(devlink, info->extack); if (err) return err; - return devlink->ops->reload_up(devlink, info->extack); + err = devlink->ops->reload_up(devlink, info->extack); + devlink_reload_failed_set(devlink, !!err); + return err; } static int devlink_nl_flash_update_fill(struct sk_buff *msg, -- cgit v1.2.3 From afa179eb603847494aa5061d4f501224a30dd187 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Mon, 16 Sep 2019 05:55:42 -0400 Subject: dm: introduce DM_GET_TARGET_VERSION This commit introduces a new ioctl DM_GET_TARGET_VERSION. It will load a target that is specified in the "name" entry in the parameter structure and return its version. This functionality is intended to be used by cryptsetup, so that it can query kernel capabilities before activating the device. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer --- drivers/md/dm-ioctl.c | 32 +++++++++++++++++++++++++++++--- include/uapi/linux/dm-ioctl.h | 6 ++++-- 2 files changed, 33 insertions(+), 5 deletions(-) (limited to 'include/uapi') diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index fb6f8fb1f13d..ac83f5002ce5 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -601,17 +601,27 @@ static void list_version_get_info(struct target_type *tt, void *param) info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1); } -static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param_size) +static int __list_versions(struct dm_ioctl *param, size_t param_size, const char *name) { size_t len, needed = 0; struct dm_target_versions *vers; struct vers_iter iter_info; + struct target_type *tt = NULL; + + if (name) { + tt = dm_get_target_type(name); + if (!tt) + return -EINVAL; + } /* * Loop through all the devices working out how much * space we need. */ - dm_target_iterate(list_version_get_needed, &needed); + if (!tt) + dm_target_iterate(list_version_get_needed, &needed); + else + list_version_get_needed(tt, &needed); /* * Grab our output buffer. @@ -632,13 +642,28 @@ static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param /* * Now loop through filling out the names & versions. */ - dm_target_iterate(list_version_get_info, &iter_info); + if (!tt) + dm_target_iterate(list_version_get_info, &iter_info); + else + list_version_get_info(tt, &iter_info); param->flags |= iter_info.flags; out: + if (tt) + dm_put_target_type(tt); return 0; } +static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param_size) +{ + return __list_versions(param, param_size, NULL); +} + +static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t param_size) +{ + return __list_versions(param, param_size, param->name); +} + static int check_name(const char *name) { if (strchr(name, '/')) { @@ -1664,6 +1689,7 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) {DM_TARGET_MSG_CMD, 0, target_message}, {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry}, {DM_DEV_ARM_POLL, IOCTL_FLAGS_NO_PARAMS, dev_arm_poll}, + {DM_GET_TARGET_VERSION, 0, get_target_version}, }; if (unlikely(cmd >= ARRAY_SIZE(_ioctls))) diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index f396a82dfd3e..2df8ceca1f9b 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h @@ -243,6 +243,7 @@ enum { DM_TARGET_MSG_CMD, DM_DEV_SET_GEOMETRY_CMD, DM_DEV_ARM_POLL_CMD, + DM_GET_TARGET_VERSION_CMD, }; #define DM_IOCTL 0xfd @@ -265,14 +266,15 @@ enum { #define DM_TABLE_STATUS _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, struct dm_ioctl) #define DM_LIST_VERSIONS _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, struct dm_ioctl) +#define DM_GET_TARGET_VERSION _IOWR(DM_IOCTL, DM_GET_TARGET_VERSION_CMD, struct dm_ioctl) #define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) #define DM_VERSION_MAJOR 4 -#define DM_VERSION_MINOR 40 +#define DM_VERSION_MINOR 41 #define DM_VERSION_PATCHLEVEL 0 -#define DM_VERSION_EXTRA "-ioctl (2019-01-18)" +#define DM_VERSION_EXTRA "-ioctl (2019-09-16)" /* Status bits */ #define DM_READONLY_FLAG (1 << 0) /* In/Out */ -- cgit v1.2.3 From f9af2dbbfe01def62765a58af7fbc488351893c3 Mon Sep 17 00:00:00 2001 From: Thomas Higdon Date: Fri, 13 Sep 2019 23:23:34 +0000 Subject: tcp: Add TCP_INFO counter for packets received out-of-order For receive-heavy cases on the server-side, we want to track the connection quality for individual client IPs. This counter, similar to the existing system-wide TCPOFOQueue counter in /proc/net/netstat, tracks out-of-order packet reception. By providing this counter in TCP_INFO, it will allow understanding to what degree receive-heavy sockets are experiencing out-of-order delivery and packet drops indicating congestion. Please note that this is similar to the counter in NetBSD TCP_INFO, and has the same name. Also note that we avoid increasing the size of the tcp_sock struct by taking advantage of a hole. Signed-off-by: Thomas Higdon Acked-by: Neal Cardwell Signed-off-by: David S. Miller --- include/linux/tcp.h | 2 ++ include/uapi/linux/tcp.h | 2 ++ net/ipv4/tcp.c | 2 ++ net/ipv4/tcp_input.c | 1 + 4 files changed, 7 insertions(+) (limited to 'include/uapi') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index f3a85a7fb4b1..99617e528ea2 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -354,6 +354,8 @@ struct tcp_sock { #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 #endif + u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */ + /* Receiver side RTT estimation */ u32 rcv_rtt_last_tsecr; struct { diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index b3564f85a762..20237987ccc8 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -270,6 +270,8 @@ struct tcp_info { __u64 tcpi_bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans */ __u32 tcpi_dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups */ __u32 tcpi_reord_seen; /* reordering events seen */ + + __u32 tcpi_rcv_ooopack; /* Out-of-order packets received */ }; /* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 94df48bcecc2..4cf58208270e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2653,6 +2653,7 @@ int tcp_disconnect(struct sock *sk, int flags) tp->rx_opt.saw_tstamp = 0; tp->rx_opt.dsack = 0; tp->rx_opt.num_sacks = 0; + tp->rcv_ooopack = 0; /* Clean up fastopen related fields */ @@ -3295,6 +3296,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_bytes_retrans = tp->bytes_retrans; info->tcpi_dsack_dups = tp->dsack_dups; info->tcpi_reord_seen = tp->reord_seen; + info->tcpi_rcv_ooopack = tp->rcv_ooopack; unlock_sock_fast(sk, slow); } EXPORT_SYMBOL_GPL(tcp_get_info); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 7e94223fdb2b..3578357abe30 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4555,6 +4555,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) tp->pred_flags = 0; inet_csk_schedule_ack(sk); + tp->rcv_ooopack += max_t(u16, 1, skb_shinfo(skb)->gso_segs); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); seq = TCP_SKB_CB(skb)->seq; end_seq = TCP_SKB_CB(skb)->end_seq; -- cgit v1.2.3 From 8f7baad7f03543451af27f5380fc816b008aa1f2 Mon Sep 17 00:00:00 2001 From: Thomas Higdon Date: Fri, 13 Sep 2019 23:23:35 +0000 Subject: tcp: Add snd_wnd to TCP_INFO Neal Cardwell mentioned that snd_wnd would be useful for diagnosing TCP performance problems -- > (1) Usually when we're diagnosing TCP performance problems, we do so > from the sender, since the sender makes most of the > performance-critical decisions (cwnd, pacing, TSO size, TSQ, etc). > From the sender-side the thing that would be most useful is to see > tp->snd_wnd, the receive window that the receiver has advertised to > the sender. This serves the purpose of adding an additional __u32 to avoid the would-be hole caused by the addition of the tcpi_rcvi_ooopack field. Signed-off-by: Thomas Higdon Acked-by: Yuchung Cheng Acked-by: Neal Cardwell Acked-by: Soheil Hassas Yeganeh Signed-off-by: David S. Miller --- include/uapi/linux/tcp.h | 4 ++++ net/ipv4/tcp.c | 1 + 2 files changed, 5 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h index 20237987ccc8..81e697978e8b 100644 --- a/include/uapi/linux/tcp.h +++ b/include/uapi/linux/tcp.h @@ -272,6 +272,10 @@ struct tcp_info { __u32 tcpi_reord_seen; /* reordering events seen */ __u32 tcpi_rcv_ooopack; /* Out-of-order packets received */ + + __u32 tcpi_snd_wnd; /* peer's advertised receive window after + * scaling (bytes) + */ }; /* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4cf58208270e..79c325a07ba5 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3297,6 +3297,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_dsack_dups = tp->dsack_dups; info->tcpi_reord_seen = tp->reord_seen; info->tcpi_rcv_ooopack = tp->rcv_ooopack; + info->tcpi_snd_wnd = tp->snd_wnd; unlock_sock_fast(sk, slow); } EXPORT_SYMBOL_GPL(tcp_get_info); -- cgit v1.2.3 From 9c66d15646760eb8982242b4531c4d4fd36118fd Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Sun, 15 Sep 2019 04:59:58 +0300 Subject: taprio: Add support for hardware offloading This allows taprio to offload the schedule enforcement to capable network cards, resulting in more precise windows and less CPU usage. The gate mask acts on traffic classes (groups of queues of same priority), as specified in IEEE 802.1Q-2018, and following the existing taprio and mqprio semantics. It is up to the driver to perform conversion between tc and individual netdev queues if for some reason it needs to make that distinction. Full offload is requested from the network interface by specifying "flags 2" in the tc qdisc creation command, which in turn corresponds to the TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD bit. The important detail here is the clockid which is implicitly /dev/ptpN for full offload, and hence not configurable. A reference counting API is added to support the use case where Ethernet drivers need to keep the taprio offload structure locally (i.e. they are a multi-port switch driver, and configuring a port depends on the settings of other ports as well). The refcount_t variable is kept in a private structure (__tc_taprio_qopt_offload) and not exposed to drivers. In the future, the private structure might also be expanded with a backpointer to taprio_sched *q, to implement the notification system described in the patch (of when admin became oper, or an error occurred, etc, so the offload can be monitored with 'tc qdisc show'). Signed-off-by: Vinicius Costa Gomes Signed-off-by: Voon Weifeng Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 + include/net/pkt_sched.h | 23 +++ include/uapi/linux/pkt_sched.h | 3 +- net/sched/sch_taprio.c | 409 ++++++++++++++++++++++++++++++++++++----- 4 files changed, 392 insertions(+), 44 deletions(-) (limited to 'include/uapi') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d7d5626002e9..9eda1c31d1f7 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -847,6 +847,7 @@ enum tc_setup_type { TC_SETUP_QDISC_ETF, TC_SETUP_ROOT_QDISC, TC_SETUP_QDISC_GRED, + TC_SETUP_QDISC_TAPRIO, }; /* These structures hold the attributes of bpf state that are being passed diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index a16fbe9a2a67..d1632979622e 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -161,4 +161,27 @@ struct tc_etf_qopt_offload { s32 queue; }; +struct tc_taprio_sched_entry { + u8 command; /* TC_TAPRIO_CMD_* */ + + /* The gate_mask in the offloading side refers to traffic classes */ + u32 gate_mask; + u32 interval; +}; + +struct tc_taprio_qopt_offload { + u8 enable; + ktime_t base_time; + u64 cycle_time; + u64 cycle_time_extension; + + size_t num_entries; + struct tc_taprio_sched_entry entries[0]; +}; + +/* Reference counting */ +struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload + *offload); +void taprio_offload_free(struct tc_taprio_qopt_offload *offload); + #endif diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index 18f185299f47..5011259b8f67 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -1160,7 +1160,8 @@ enum { * [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL] */ -#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST 0x1 +#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST BIT(0) +#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD BIT(1) enum { TCA_TAPRIO_ATTR_UNSPEC, diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 84b863e2bdbd..2f7b34205c82 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -29,8 +29,8 @@ static DEFINE_SPINLOCK(taprio_list_lock); #define TAPRIO_ALL_GATES_OPEN -1 -#define FLAGS_VALID(flags) (!((flags) & ~TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)) #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) +#define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) struct sched_entry { struct list_head list; @@ -75,9 +75,16 @@ struct taprio_sched { struct sched_gate_list __rcu *admin_sched; struct hrtimer advance_timer; struct list_head taprio_list; + struct sk_buff *(*dequeue)(struct Qdisc *sch); + struct sk_buff *(*peek)(struct Qdisc *sch); u32 txtime_delay; }; +struct __tc_taprio_qopt_offload { + refcount_t users; + struct tc_taprio_qopt_offload offload; +}; + static ktime_t sched_base_time(const struct sched_gate_list *sched) { if (!sched) @@ -268,6 +275,19 @@ static bool is_valid_interval(struct sk_buff *skb, struct Qdisc *sch) return entry; } +static bool taprio_flags_valid(u32 flags) +{ + /* Make sure no other flag bits are set. */ + if (flags & ~(TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | + TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) + return false; + /* txtime-assist and full offload are mutually exclusive */ + if ((flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) && + (flags & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)) + return false; + return true; +} + /* This returns the tstamp value set by TCP in terms of the set clock. */ static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb) { @@ -417,7 +437,7 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, return qdisc_enqueue(skb, child, to_free); } -static struct sk_buff *taprio_peek(struct Qdisc *sch) +static struct sk_buff *taprio_peek_soft(struct Qdisc *sch) { struct taprio_sched *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); @@ -461,6 +481,36 @@ static struct sk_buff *taprio_peek(struct Qdisc *sch) return NULL; } +static struct sk_buff *taprio_peek_offload(struct Qdisc *sch) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + struct sk_buff *skb; + int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct Qdisc *child = q->qdiscs[i]; + + if (unlikely(!child)) + continue; + + skb = child->ops->peek(child); + if (!skb) + continue; + + return skb; + } + + return NULL; +} + +static struct sk_buff *taprio_peek(struct Qdisc *sch) +{ + struct taprio_sched *q = qdisc_priv(sch); + + return q->peek(sch); +} + static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry) { atomic_set(&entry->budget, @@ -468,7 +518,7 @@ static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry) atomic64_read(&q->picos_per_byte))); } -static struct sk_buff *taprio_dequeue(struct Qdisc *sch) +static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch) { struct taprio_sched *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); @@ -550,6 +600,40 @@ done: return skb; } +static struct sk_buff *taprio_dequeue_offload(struct Qdisc *sch) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + struct sk_buff *skb; + int i; + + for (i = 0; i < dev->num_tx_queues; i++) { + struct Qdisc *child = q->qdiscs[i]; + + if (unlikely(!child)) + continue; + + skb = child->ops->dequeue(child); + if (unlikely(!skb)) + continue; + + qdisc_bstats_update(sch, skb); + qdisc_qstats_backlog_dec(sch, skb); + sch->q.qlen--; + + return skb; + } + + return NULL; +} + +static struct sk_buff *taprio_dequeue(struct Qdisc *sch) +{ + struct taprio_sched *q = qdisc_priv(sch); + + return q->dequeue(sch); +} + static bool should_restart_cycle(const struct sched_gate_list *oper, const struct sched_entry *entry) { @@ -932,6 +1016,9 @@ static void taprio_start_sched(struct Qdisc *sch, struct taprio_sched *q = qdisc_priv(sch); ktime_t expires; + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) + return; + expires = hrtimer_get_expires(&q->advance_timer); if (expires == 0) expires = KTIME_MAX; @@ -1011,6 +1098,254 @@ static void setup_txtime(struct taprio_sched *q, } } +static struct tc_taprio_qopt_offload *taprio_offload_alloc(int num_entries) +{ + size_t size = sizeof(struct tc_taprio_sched_entry) * num_entries + + sizeof(struct __tc_taprio_qopt_offload); + struct __tc_taprio_qopt_offload *__offload; + + __offload = kzalloc(size, GFP_KERNEL); + if (!__offload) + return NULL; + + refcount_set(&__offload->users, 1); + + return &__offload->offload; +} + +struct tc_taprio_qopt_offload *taprio_offload_get(struct tc_taprio_qopt_offload + *offload) +{ + struct __tc_taprio_qopt_offload *__offload; + + __offload = container_of(offload, struct __tc_taprio_qopt_offload, + offload); + + refcount_inc(&__offload->users); + + return offload; +} +EXPORT_SYMBOL_GPL(taprio_offload_get); + +void taprio_offload_free(struct tc_taprio_qopt_offload *offload) +{ + struct __tc_taprio_qopt_offload *__offload; + + __offload = container_of(offload, struct __tc_taprio_qopt_offload, + offload); + + if (!refcount_dec_and_test(&__offload->users)) + return; + + kfree(__offload); +} +EXPORT_SYMBOL_GPL(taprio_offload_free); + +/* The function will only serve to keep the pointers to the "oper" and "admin" + * schedules valid in relation to their base times, so when calling dump() the + * users looks at the right schedules. + * When using full offload, the admin configuration is promoted to oper at the + * base_time in the PHC time domain. But because the system time is not + * necessarily in sync with that, we can't just trigger a hrtimer to call + * switch_schedules at the right hardware time. + * At the moment we call this by hand right away from taprio, but in the future + * it will be useful to create a mechanism for drivers to notify taprio of the + * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump(). + * This is left as TODO. + */ +void taprio_offload_config_changed(struct taprio_sched *q) +{ + struct sched_gate_list *oper, *admin; + + spin_lock(&q->current_entry_lock); + + oper = rcu_dereference_protected(q->oper_sched, + lockdep_is_held(&q->current_entry_lock)); + admin = rcu_dereference_protected(q->admin_sched, + lockdep_is_held(&q->current_entry_lock)); + + switch_schedules(q, &admin, &oper); + + spin_unlock(&q->current_entry_lock); +} + +static void taprio_sched_to_offload(struct taprio_sched *q, + struct sched_gate_list *sched, + const struct tc_mqprio_qopt *mqprio, + struct tc_taprio_qopt_offload *offload) +{ + struct sched_entry *entry; + int i = 0; + + offload->base_time = sched->base_time; + offload->cycle_time = sched->cycle_time; + offload->cycle_time_extension = sched->cycle_time_extension; + + list_for_each_entry(entry, &sched->entries, list) { + struct tc_taprio_sched_entry *e = &offload->entries[i]; + + e->command = entry->command; + e->interval = entry->interval; + e->gate_mask = entry->gate_mask; + i++; + } + + offload->num_entries = i; +} + +static int taprio_enable_offload(struct net_device *dev, + struct tc_mqprio_qopt *mqprio, + struct taprio_sched *q, + struct sched_gate_list *sched, + struct netlink_ext_ack *extack) +{ + const struct net_device_ops *ops = dev->netdev_ops; + struct tc_taprio_qopt_offload *offload; + int err = 0; + + if (!ops->ndo_setup_tc) { + NL_SET_ERR_MSG(extack, + "Device does not support taprio offload"); + return -EOPNOTSUPP; + } + + offload = taprio_offload_alloc(sched->num_entries); + if (!offload) { + NL_SET_ERR_MSG(extack, + "Not enough memory for enabling offload mode"); + return -ENOMEM; + } + offload->enable = 1; + taprio_sched_to_offload(q, sched, mqprio, offload); + + err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); + if (err < 0) { + NL_SET_ERR_MSG(extack, + "Device failed to setup taprio offload"); + goto done; + } + + taprio_offload_config_changed(q); + +done: + taprio_offload_free(offload); + + return err; +} + +static int taprio_disable_offload(struct net_device *dev, + struct taprio_sched *q, + struct netlink_ext_ack *extack) +{ + const struct net_device_ops *ops = dev->netdev_ops; + struct tc_taprio_qopt_offload *offload; + int err; + + if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) + return 0; + + if (!ops->ndo_setup_tc) + return -EOPNOTSUPP; + + offload = taprio_offload_alloc(0); + if (!offload) { + NL_SET_ERR_MSG(extack, + "Not enough memory to disable offload mode"); + return -ENOMEM; + } + offload->enable = 0; + + err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload); + if (err < 0) { + NL_SET_ERR_MSG(extack, + "Device failed to disable offload"); + goto out; + } + +out: + taprio_offload_free(offload); + + return err; +} + +/* If full offload is enabled, the only possible clockid is the net device's + * PHC. For that reason, specifying a clockid through netlink is incorrect. + * For txtime-assist, it is implicitly assumed that the device's PHC is kept + * in sync with the specified clockid via a user space daemon such as phc2sys. + * For both software taprio and txtime-assist, the clockid is used for the + * hrtimer that advances the schedule and hence mandatory. + */ +static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb, + struct netlink_ext_ack *extack) +{ + struct taprio_sched *q = qdisc_priv(sch); + struct net_device *dev = qdisc_dev(sch); + int err = -EINVAL; + + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_ts_info info = { + .cmd = ETHTOOL_GET_TS_INFO, + .phc_index = -1, + }; + + if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { + NL_SET_ERR_MSG(extack, + "The 'clockid' cannot be specified for full offload"); + goto out; + } + + if (ops && ops->get_ts_info) + err = ops->get_ts_info(dev, &info); + + if (err || info.phc_index < 0) { + NL_SET_ERR_MSG(extack, + "Device does not have a PTP clock"); + err = -ENOTSUPP; + goto out; + } + } else if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { + int clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]); + + /* We only support static clockids and we don't allow + * for it to be modified after the first init. + */ + if (clockid < 0 || + (q->clockid != -1 && q->clockid != clockid)) { + NL_SET_ERR_MSG(extack, + "Changing the 'clockid' of a running schedule is not supported"); + err = -ENOTSUPP; + goto out; + } + + switch (clockid) { + case CLOCK_REALTIME: + q->tk_offset = TK_OFFS_REAL; + break; + case CLOCK_MONOTONIC: + q->tk_offset = TK_OFFS_MAX; + break; + case CLOCK_BOOTTIME: + q->tk_offset = TK_OFFS_BOOT; + break; + case CLOCK_TAI: + q->tk_offset = TK_OFFS_TAI; + break; + default: + NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); + err = -EINVAL; + goto out; + } + + q->clockid = clockid; + } else { + NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory"); + goto out; + } +out: + return err; +} + static int taprio_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { @@ -1020,9 +1355,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, struct net_device *dev = qdisc_dev(sch); struct tc_mqprio_qopt *mqprio = NULL; u32 taprio_flags = 0; - int i, err, clockid; unsigned long flags; ktime_t start; + int i, err; err = nla_parse_nested_deprecated(tb, TCA_TAPRIO_ATTR_MAX, opt, taprio_policy, extack); @@ -1038,7 +1373,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, if (q->flags != 0 && q->flags != taprio_flags) { NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); return -EOPNOTSUPP; - } else if (!FLAGS_VALID(taprio_flags)) { + } else if (!taprio_flags_valid(taprio_flags)) { NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); return -EINVAL; } @@ -1078,30 +1413,19 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, goto free_sched; } - if (tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { - clockid = nla_get_s32(tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]); - - /* We only support static clockids and we don't allow - * for it to be modified after the first init. - */ - if (clockid < 0 || - (q->clockid != -1 && q->clockid != clockid)) { - NL_SET_ERR_MSG(extack, "Changing the 'clockid' of a running schedule is not supported"); - err = -ENOTSUPP; - goto free_sched; - } - - q->clockid = clockid; - } - - if (q->clockid == -1 && !tb[TCA_TAPRIO_ATTR_SCHED_CLOCKID]) { - NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory"); - err = -EINVAL; + err = taprio_parse_clockid(sch, tb, extack); + if (err < 0) goto free_sched; - } taprio_set_picos_per_byte(dev, q); + if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) + err = taprio_enable_offload(dev, mqprio, q, new_admin, extack); + else + err = taprio_disable_offload(dev, q, extack); + if (err) + goto free_sched; + /* Protects against enqueue()/dequeue() */ spin_lock_bh(qdisc_lock(sch)); @@ -1116,6 +1440,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, } if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) && + !FULL_OFFLOAD_IS_ENABLED(taprio_flags) && !hrtimer_active(&q->advance_timer)) { hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); q->advance_timer.function = advance_sched; @@ -1134,23 +1459,15 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, mqprio->prio_tc_map[i]); } - switch (q->clockid) { - case CLOCK_REALTIME: - q->tk_offset = TK_OFFS_REAL; - break; - case CLOCK_MONOTONIC: - q->tk_offset = TK_OFFS_MAX; - break; - case CLOCK_BOOTTIME: - q->tk_offset = TK_OFFS_BOOT; - break; - case CLOCK_TAI: - q->tk_offset = TK_OFFS_TAI; - break; - default: - NL_SET_ERR_MSG(extack, "Invalid 'clockid'"); - err = -EINVAL; - goto unlock; + if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) { + q->dequeue = taprio_dequeue_offload; + q->peek = taprio_peek_offload; + } else { + /* Be sure to always keep the function pointers + * in a consistent state. + */ + q->dequeue = taprio_dequeue_soft; + q->peek = taprio_peek_soft; } err = taprio_get_start_time(sch, new_admin, &start); @@ -1212,6 +1529,8 @@ static void taprio_destroy(struct Qdisc *sch) hrtimer_cancel(&q->advance_timer); + taprio_disable_offload(dev, q, NULL); + if (q->qdiscs) { for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++) qdisc_put(q->qdiscs[i]); @@ -1241,6 +1560,9 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS); q->advance_timer.function = advance_sched; + q->dequeue = taprio_dequeue_soft; + q->peek = taprio_peek_soft; + q->root = sch; /* We only support static clockids. Use an invalid value as default @@ -1423,7 +1745,8 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb) if (nla_put(skb, TCA_TAPRIO_ATTR_PRIOMAP, sizeof(opt), &opt)) goto options_error; - if (nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid)) + if (!FULL_OFFLOAD_IS_ENABLED(q->flags) && + nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid)) goto options_error; if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags)) -- cgit v1.2.3 From 9f2f13f4ffb13ee914105dfc9f860d68cae98108 Mon Sep 17 00:00:00 2001 From: Alexandru Ardelean Date: Mon, 16 Sep 2019 10:35:25 +0300 Subject: ethtool: implement Energy Detect Powerdown support via phy-tunable The `phy_tunable_id` has been named `ETHTOOL_PHY_EDPD` since it looks like this feature is common across other PHYs (like EEE), and defining `ETHTOOL_PHY_ENERGY_DETECT_POWER_DOWN` seems too long. The way EDPD works, is that the RX block is put to a lower power mode, except for link-pulse detection circuits. The TX block is also put to low power mode, but the PHY wakes-up periodically to send link pulses, to avoid lock-ups in case the other side is also in EDPD mode. Currently, there are 2 PHY drivers that look like they could use this new PHY tunable feature: the `adin` && `micrel` PHYs. The ADIN's datasheet mentions that TX pulses are at intervals of 1 second default each, and they can be disabled. For the Micrel KSZ9031 PHY, the datasheet does not mention whether they can be disabled, but mentions that they can modified. The way this change is structured, is similar to the PHY tunable downshift control: * a `ETHTOOL_PHY_EDPD_DFLT_TX_MSECS` value is exposed to cover a default TX interval; some PHYs could specify a certain value that makes sense * `ETHTOOL_PHY_EDPD_NO_TX` would disable TX when EDPD is enabled * `ETHTOOL_PHY_EDPD_DISABLE` will disable EDPD As noted by the `ETHTOOL_PHY_EDPD_DFLT_TX_MSECS` the interval unit is 1 millisecond, which should cover a reasonable range of intervals: - from 1 millisecond, which does not sound like much of a power-saver - to ~65 seconds which is quite a lot to wait for a link to come up when plugging a cable Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: Alexandru Ardelean Signed-off-by: David S. Miller --- include/uapi/linux/ethtool.h | 22 ++++++++++++++++++++++ net/core/ethtool.c | 6 ++++++ 2 files changed, 28 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index dd06302aa93e..8938b76c4ee3 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -259,10 +259,32 @@ struct ethtool_tunable { #define ETHTOOL_PHY_FAST_LINK_DOWN_ON 0 #define ETHTOOL_PHY_FAST_LINK_DOWN_OFF 0xff +/* Energy Detect Power Down (EDPD) is a feature supported by some PHYs, where + * the PHY's RX & TX blocks are put into a low-power mode when there is no + * link detected (typically cable is un-plugged). For RX, only a minimal + * link-detection is available, and for TX the PHY wakes up to send link pulses + * to avoid any lock-ups in case the peer PHY may also be running in EDPD mode. + * + * Some PHYs may support configuration of the wake-up interval for TX pulses, + * and some PHYs may support only disabling TX pulses entirely. For the latter + * a special value is required (ETHTOOL_PHY_EDPD_NO_TX) so that this can be + * configured from userspace (should the user want it). + * + * The interval units for TX wake-up are in milliseconds, since this should + * cover a reasonable range of intervals: + * - from 1 millisecond, which does not sound like much of a power-saver + * - to ~65 seconds which is quite a lot to wait for a link to come up when + * plugging a cable + */ +#define ETHTOOL_PHY_EDPD_DFLT_TX_MSECS 0xffff +#define ETHTOOL_PHY_EDPD_NO_TX 0xfffe +#define ETHTOOL_PHY_EDPD_DISABLE 0 + enum phy_tunable_id { ETHTOOL_PHY_ID_UNSPEC, ETHTOOL_PHY_DOWNSHIFT, ETHTOOL_PHY_FAST_LINK_DOWN, + ETHTOOL_PHY_EDPD, /* * Add your fresh new phy tunable attribute above and remember to update * phy_tunable_strings[] in net/core/ethtool.c diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 6288e69e94fc..c763106c73fc 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -133,6 +133,7 @@ phy_tunable_strings[__ETHTOOL_PHY_TUNABLE_COUNT][ETH_GSTRING_LEN] = { [ETHTOOL_ID_UNSPEC] = "Unspec", [ETHTOOL_PHY_DOWNSHIFT] = "phy-downshift", [ETHTOOL_PHY_FAST_LINK_DOWN] = "phy-fast-link-down", + [ETHTOOL_PHY_EDPD] = "phy-energy-detect-power-down", }; static int ethtool_get_features(struct net_device *dev, void __user *useraddr) @@ -2451,6 +2452,11 @@ static int ethtool_phy_tunable_valid(const struct ethtool_tunable *tuna) tuna->type_id != ETHTOOL_TUNABLE_U8) return -EINVAL; break; + case ETHTOOL_PHY_EDPD: + if (tuna->len != sizeof(u16) || + tuna->type_id != ETHTOOL_TUNABLE_U16) + return -EINVAL; + break; default: return -EINVAL; } -- cgit v1.2.3 From 5262f567987d3c30052b22e78c35c2313d07b230 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 17 Sep 2019 12:26:57 -0600 Subject: io_uring: IORING_OP_TIMEOUT support There's been a few requests for functionality similar to io_getevents() and epoll_wait(), where the user can specify a timeout for waiting on events. I deliberately did not add support for this through the system call initially to avoid overloading the args, but I can see that the use cases for this are valid. This adds support for IORING_OP_TIMEOUT. If a user wants to get woken when waiting for events, simply submit one of these timeout commands with your wait call (or before). This ensures that the application sleeping on the CQ ring waiting for events will get woken. The timeout command is passed in as a pointer to a struct timespec. Timeouts are relative. The timeout command also includes a way to auto-cancel after N events has passed. Signed-off-by: Jens Axboe --- fs/io_uring.c | 149 ++++++++++++++++++++++++++++++++++++++++-- include/uapi/linux/io_uring.h | 2 + 2 files changed, 146 insertions(+), 5 deletions(-) (limited to 'include/uapi') diff --git a/fs/io_uring.c b/fs/io_uring.c index 05a299e80159..9d8e703bc851 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -200,6 +200,7 @@ struct io_ring_ctx { struct io_uring_sqe *sq_sqes; struct list_head defer_list; + struct list_head timeout_list; } ____cacheline_aligned_in_smp; /* IO offload */ @@ -216,6 +217,7 @@ struct io_ring_ctx { struct wait_queue_head cq_wait; struct fasync_struct *cq_fasync; struct eventfd_ctx *cq_ev_fd; + atomic_t cq_timeouts; } ____cacheline_aligned_in_smp; struct io_rings *rings; @@ -283,6 +285,11 @@ struct io_poll_iocb { struct wait_queue_entry wait; }; +struct io_timeout { + struct file *file; + struct hrtimer timer; +}; + /* * NOTE! Each of the iocb union members has the file pointer * as the first entry in their struct definition. So you can @@ -294,6 +301,7 @@ struct io_kiocb { struct file *file; struct kiocb rw; struct io_poll_iocb poll; + struct io_timeout timeout; }; struct sqe_submit submit; @@ -313,6 +321,7 @@ struct io_kiocb { #define REQ_F_LINK_DONE 128 /* linked sqes done */ #define REQ_F_FAIL_LINK 256 /* fail rest of links */ #define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */ +#define REQ_F_TIMEOUT 1024 /* timeout request */ u64 user_data; u32 result; u32 sequence; @@ -344,6 +353,8 @@ struct io_submit_state { }; static void io_sq_wq_submit_work(struct work_struct *work); +static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data, + long res); static void __io_free_req(struct io_kiocb *req); static struct kmem_cache *req_cachep; @@ -400,26 +411,30 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->poll_list); INIT_LIST_HEAD(&ctx->cancel_list); INIT_LIST_HEAD(&ctx->defer_list); + INIT_LIST_HEAD(&ctx->timeout_list); return ctx; } static inline bool io_sequence_defer(struct io_ring_ctx *ctx, struct io_kiocb *req) { - if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN) + /* timeout requests always honor sequence */ + if (!(req->flags & REQ_F_TIMEOUT) && + (req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN) return false; return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped; } -static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) +static struct io_kiocb *__io_get_deferred_req(struct io_ring_ctx *ctx, + struct list_head *list) { struct io_kiocb *req; - if (list_empty(&ctx->defer_list)) + if (list_empty(list)) return NULL; - req = list_first_entry(&ctx->defer_list, struct io_kiocb, list); + req = list_first_entry(list, struct io_kiocb, list); if (!io_sequence_defer(ctx, req)) { list_del_init(&req->list); return req; @@ -428,6 +443,16 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) return NULL; } +static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) +{ + return __io_get_deferred_req(ctx, &ctx->defer_list); +} + +static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx) +{ + return __io_get_deferred_req(ctx, &ctx->timeout_list); +} + static void __io_commit_cqring(struct io_ring_ctx *ctx) { struct io_rings *rings = ctx->rings; @@ -460,10 +485,36 @@ static inline void io_queue_async_work(struct io_ring_ctx *ctx, queue_work(ctx->sqo_wq[rw], &req->work); } +static void io_kill_timeout(struct io_kiocb *req) +{ + int ret; + + ret = hrtimer_try_to_cancel(&req->timeout.timer); + if (ret != -1) { + atomic_inc(&req->ctx->cq_timeouts); + list_del(&req->list); + io_cqring_fill_event(req->ctx, req->user_data, 0); + __io_free_req(req); + } +} + +static void io_kill_timeouts(struct io_ring_ctx *ctx) +{ + struct io_kiocb *req, *tmp; + + spin_lock_irq(&ctx->completion_lock); + list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list) + io_kill_timeout(req); + spin_unlock_irq(&ctx->completion_lock); +} + static void io_commit_cqring(struct io_ring_ctx *ctx) { struct io_kiocb *req; + while ((req = io_get_timeout_req(ctx)) != NULL) + io_kill_timeout(req); + __io_commit_cqring(ctx); while ((req = io_get_deferred_req(ctx)) != NULL) { @@ -1765,6 +1816,81 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) return ipt.error; } +static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) +{ + struct io_ring_ctx *ctx; + struct io_kiocb *req; + unsigned long flags; + + req = container_of(timer, struct io_kiocb, timeout.timer); + ctx = req->ctx; + atomic_inc(&ctx->cq_timeouts); + + spin_lock_irqsave(&ctx->completion_lock, flags); + list_del(&req->list); + + io_cqring_fill_event(ctx, req->user_data, -ETIME); + io_commit_cqring(ctx); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + + io_cqring_ev_posted(ctx); + + io_put_req(req); + return HRTIMER_NORESTART; +} + +static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + unsigned count, req_dist, tail_index; + struct io_ring_ctx *ctx = req->ctx; + struct list_head *entry; + struct timespec ts; + + if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) + return -EINVAL; + if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags || + sqe->len != 1) + return -EINVAL; + if (copy_from_user(&ts, (void __user *) (unsigned long) sqe->addr, + sizeof(ts))) + return -EFAULT; + + /* + * sqe->off holds how many events that need to occur for this + * timeout event to be satisfied. + */ + count = READ_ONCE(sqe->off); + if (!count) + count = 1; + + req->sequence = ctx->cached_sq_head + count - 1; + req->flags |= REQ_F_TIMEOUT; + + /* + * Insertion sort, ensuring the first entry in the list is always + * the one we need first. + */ + tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped; + req_dist = req->sequence - tail_index; + spin_lock_irq(&ctx->completion_lock); + list_for_each_prev(entry, &ctx->timeout_list) { + struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list); + unsigned dist; + + dist = nxt->sequence - tail_index; + if (req_dist >= dist) + break; + } + list_add(&req->list, entry); + spin_unlock_irq(&ctx->completion_lock); + + hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + req->timeout.timer.function = io_timeout_fn; + hrtimer_start(&req->timeout.timer, timespec_to_ktime(ts), + HRTIMER_MODE_REL); + return 0; +} + static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, const struct io_uring_sqe *sqe) { @@ -1842,6 +1968,9 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, case IORING_OP_RECVMSG: ret = io_recvmsg(req, s->sqe, force_nonblock); break; + case IORING_OP_TIMEOUT: + ret = io_timeout(req, s->sqe); + break; default: ret = -EINVAL; break; @@ -2599,6 +2728,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, const sigset_t __user *sig, size_t sigsz) { struct io_rings *rings = ctx->rings; + unsigned nr_timeouts; int ret; if (io_cqring_events(rings) >= min_events) @@ -2617,7 +2747,15 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, return ret; } - ret = wait_event_interruptible(ctx->wait, io_cqring_events(rings) >= min_events); + nr_timeouts = atomic_read(&ctx->cq_timeouts); + /* + * Return if we have enough events, or if a timeout occured since + * we started waiting. For timeouts, we always want to return to + * userspace. + */ + ret = wait_event_interruptible(ctx->wait, + io_cqring_events(rings) >= min_events || + atomic_read(&ctx->cq_timeouts) != nr_timeouts); restore_saved_sigmask_unless(ret == -ERESTARTSYS); if (ret == -ERESTARTSYS) ret = -EINTR; @@ -3288,6 +3426,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) percpu_ref_kill(&ctx->refs); mutex_unlock(&ctx->uring_lock); + io_kill_timeouts(ctx); io_poll_remove_all(ctx); io_iopoll_reap_events(ctx); wait_for_completion(&ctx->ctx_done); diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 96ee9d94b73e..ea57526a5b89 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -28,6 +28,7 @@ struct io_uring_sqe { __u16 poll_events; __u32 sync_range_flags; __u32 msg_flags; + __u32 timeout_flags; }; __u64 user_data; /* data to be passed back at completion time */ union { @@ -61,6 +62,7 @@ struct io_uring_sqe { #define IORING_OP_SYNC_FILE_RANGE 8 #define IORING_OP_SENDMSG 9 #define IORING_OP_RECVMSG 10 +#define IORING_OP_TIMEOUT 11 /* * sqe->fsync_flags -- cgit v1.2.3 From a62a8ef9d97da23762a588592c8b8eb50a8deb6a Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Tue, 12 Jun 2018 09:41:17 +0100 Subject: virtio-fs: add virtiofs filesystem MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a basic file system module for virtio-fs. This does not yet contain shared data support between host and guest or metadata coherency speedups. However it is already significantly faster than virtio-9p. Design Overview =============== With the goal of designing something with better performance and local file system semantics, a bunch of ideas were proposed. - Use fuse protocol (instead of 9p) for communication between guest and host. Guest kernel will be fuse client and a fuse server will run on host to serve the requests. - For data access inside guest, mmap portion of file in QEMU address space and guest accesses this memory using dax. That way guest page cache is bypassed and there is only one copy of data (on host). This will also enable mmap(MAP_SHARED) between guests. - For metadata coherency, there is a shared memory region which contains version number associated with metadata and any guest changing metadata updates version number and other guests refresh metadata on next access. This is yet to be implemented. How virtio-fs differs from existing approaches ============================================== The unique idea behind virtio-fs is to take advantage of the co-location of the virtual machine and hypervisor to avoid communication (vmexits). DAX allows file contents to be accessed without communication with the hypervisor. The shared memory region for metadata avoids communication in the common case where metadata is unchanged. By replacing expensive communication with cheaper shared memory accesses, we expect to achieve better performance than approaches based on network file system protocols. In addition, this also makes it easier to achieve local file system semantics (coherency). These techniques are not applicable to network file system protocols since the communications channel is bypassed by taking advantage of shared memory on a local machine. This is why we decided to build virtio-fs rather than focus on 9P or NFS. Caching Modes ============= Like virtio-9p, different caching modes are supported which determine the coherency level as well. The “cache=FOO” and “writeback” options control the level of coherence between the guest and host filesystems. - cache=none metadata, data and pathname lookup are not cached in guest. They are always fetched from host and any changes are immediately pushed to host. - cache=always metadata, data and pathname lookup are cached in guest and never expire. - cache=auto metadata and pathname lookup cache expires after a configured amount of time (default is 1 second). Data is cached while the file is open (close to open consistency). - writeback/no_writeback These options control the writeback strategy. If writeback is disabled, then normal writes will immediately be synchronized with the host fs. If writeback is enabled, then writes may be cached in the guest until the file is closed or an fsync(2) performed. This option has no effect on mmap-ed writes or writes going through the DAX mechanism. Signed-off-by: Stefan Hajnoczi Signed-off-by: Vivek Goyal Acked-by: Michael S. Tsirkin Signed-off-by: Miklos Szeredi --- fs/fuse/Kconfig | 11 + fs/fuse/Makefile | 1 + fs/fuse/fuse_i.h | 9 + fs/fuse/inode.c | 4 + fs/fuse/virtio_fs.c | 1195 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/virtio_fs.h | 19 + include/uapi/linux/virtio_ids.h | 1 + 7 files changed, 1240 insertions(+) create mode 100644 fs/fuse/virtio_fs.c create mode 100644 include/uapi/linux/virtio_fs.h (limited to 'include/uapi') diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig index 24fc5a5c1b97..0635cba19971 100644 --- a/fs/fuse/Kconfig +++ b/fs/fuse/Kconfig @@ -27,3 +27,14 @@ config CUSE If you want to develop or use a userspace character device based on CUSE, answer Y or M. + +config VIRTIO_FS + tristate "Virtio Filesystem" + depends on FUSE_FS + select VIRTIO + help + The Virtio Filesystem allows guests to mount file systems from the + host. + + If you want to share files between guests or with the host, answer Y + or M. diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index 9485019c2a14..6419a2b3510d 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_FUSE_FS) += fuse.o obj-$(CONFIG_CUSE) += cuse.o +obj-$(CONFIG_VIRTIO_FS) += virtio_fs.o fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index fc89cb40e874..956aeaf961ae 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -353,6 +353,10 @@ struct fuse_req { /** Used to wake up the task waiting for completion of request*/ wait_queue_head_t waitq; +#if IS_ENABLED(CONFIG_VIRTIO_FS) + /** virtio-fs's physically contiguous buffer for in and out args */ + void *argbuf; +#endif }; struct fuse_iqueue; @@ -383,6 +387,11 @@ struct fuse_iqueue_ops { */ void (*wake_pending_and_unlock)(struct fuse_iqueue *fiq) __releases(fiq->lock); + + /** + * Clean up when fuse_iqueue is destroyed + */ + void (*release)(struct fuse_iqueue *fiq); }; /** /dev/fuse input queue operations */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 10d193b24fb8..3d598a5bb5b5 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -630,6 +630,10 @@ EXPORT_SYMBOL_GPL(fuse_conn_init); void fuse_conn_put(struct fuse_conn *fc) { if (refcount_dec_and_test(&fc->count)) { + struct fuse_iqueue *fiq = &fc->iq; + + if (fiq->ops->release) + fiq->ops->release(fiq); put_pid_ns(fc->pid_ns); put_user_ns(fc->user_ns); fc->release(fc); diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c new file mode 100644 index 000000000000..6af3f131e468 --- /dev/null +++ b/fs/fuse/virtio_fs.c @@ -0,0 +1,1195 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * virtio-fs: Virtio Filesystem + * Copyright (C) 2018 Red Hat, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "fuse_i.h" + +/* List of virtio-fs device instances and a lock for the list. Also provides + * mutual exclusion in device removal and mounting path + */ +static DEFINE_MUTEX(virtio_fs_mutex); +static LIST_HEAD(virtio_fs_instances); + +enum { + VQ_HIPRIO, + VQ_REQUEST +}; + +/* Per-virtqueue state */ +struct virtio_fs_vq { + spinlock_t lock; + struct virtqueue *vq; /* protected by ->lock */ + struct work_struct done_work; + struct list_head queued_reqs; + struct delayed_work dispatch_work; + struct fuse_dev *fud; + bool connected; + long in_flight; + char name[24]; +} ____cacheline_aligned_in_smp; + +/* A virtio-fs device instance */ +struct virtio_fs { + struct kref refcount; + struct list_head list; /* on virtio_fs_instances */ + char *tag; + struct virtio_fs_vq *vqs; + unsigned int nvqs; /* number of virtqueues */ + unsigned int num_request_queues; /* number of request queues */ +}; + +struct virtio_fs_forget { + struct fuse_in_header ih; + struct fuse_forget_in arg; + /* This request can be temporarily queued on virt queue */ + struct list_head list; +}; + +static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq) +{ + struct virtio_fs *fs = vq->vdev->priv; + + return &fs->vqs[vq->index]; +} + +static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq) +{ + return &vq_to_fsvq(vq)->fud->pq; +} + +static void release_virtio_fs_obj(struct kref *ref) +{ + struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount); + + kfree(vfs->vqs); + kfree(vfs); +} + +/* Make sure virtiofs_mutex is held */ +static void virtio_fs_put(struct virtio_fs *fs) +{ + kref_put(&fs->refcount, release_virtio_fs_obj); +} + +static void virtio_fs_fiq_release(struct fuse_iqueue *fiq) +{ + struct virtio_fs *vfs = fiq->priv; + + mutex_lock(&virtio_fs_mutex); + virtio_fs_put(vfs); + mutex_unlock(&virtio_fs_mutex); +} + +static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq) +{ + WARN_ON(fsvq->in_flight < 0); + + /* Wait for in flight requests to finish.*/ + while (1) { + spin_lock(&fsvq->lock); + if (!fsvq->in_flight) { + spin_unlock(&fsvq->lock); + break; + } + spin_unlock(&fsvq->lock); + /* TODO use completion instead of timeout */ + usleep_range(1000, 2000); + } + + flush_work(&fsvq->done_work); + flush_delayed_work(&fsvq->dispatch_work); +} + +static inline void drain_hiprio_queued_reqs(struct virtio_fs_vq *fsvq) +{ + struct virtio_fs_forget *forget; + + spin_lock(&fsvq->lock); + while (1) { + forget = list_first_entry_or_null(&fsvq->queued_reqs, + struct virtio_fs_forget, list); + if (!forget) + break; + list_del(&forget->list); + kfree(forget); + } + spin_unlock(&fsvq->lock); +} + +static void virtio_fs_drain_all_queues(struct virtio_fs *fs) +{ + struct virtio_fs_vq *fsvq; + int i; + + for (i = 0; i < fs->nvqs; i++) { + fsvq = &fs->vqs[i]; + if (i == VQ_HIPRIO) + drain_hiprio_queued_reqs(fsvq); + + virtio_fs_drain_queue(fsvq); + } +} + +static void virtio_fs_start_all_queues(struct virtio_fs *fs) +{ + struct virtio_fs_vq *fsvq; + int i; + + for (i = 0; i < fs->nvqs; i++) { + fsvq = &fs->vqs[i]; + spin_lock(&fsvq->lock); + fsvq->connected = true; + spin_unlock(&fsvq->lock); + } +} + +/* Add a new instance to the list or return -EEXIST if tag name exists*/ +static int virtio_fs_add_instance(struct virtio_fs *fs) +{ + struct virtio_fs *fs2; + bool duplicate = false; + + mutex_lock(&virtio_fs_mutex); + + list_for_each_entry(fs2, &virtio_fs_instances, list) { + if (strcmp(fs->tag, fs2->tag) == 0) + duplicate = true; + } + + if (!duplicate) + list_add_tail(&fs->list, &virtio_fs_instances); + + mutex_unlock(&virtio_fs_mutex); + + if (duplicate) + return -EEXIST; + return 0; +} + +/* Return the virtio_fs with a given tag, or NULL */ +static struct virtio_fs *virtio_fs_find_instance(const char *tag) +{ + struct virtio_fs *fs; + + mutex_lock(&virtio_fs_mutex); + + list_for_each_entry(fs, &virtio_fs_instances, list) { + if (strcmp(fs->tag, tag) == 0) { + kref_get(&fs->refcount); + goto found; + } + } + + fs = NULL; /* not found */ + +found: + mutex_unlock(&virtio_fs_mutex); + + return fs; +} + +static void virtio_fs_free_devs(struct virtio_fs *fs) +{ + unsigned int i; + + for (i = 0; i < fs->nvqs; i++) { + struct virtio_fs_vq *fsvq = &fs->vqs[i]; + + if (!fsvq->fud) + continue; + + fuse_dev_free(fsvq->fud); + fsvq->fud = NULL; + } +} + +/* Read filesystem name from virtio config into fs->tag (must kfree()). */ +static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs) +{ + char tag_buf[sizeof_field(struct virtio_fs_config, tag)]; + char *end; + size_t len; + + virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag), + &tag_buf, sizeof(tag_buf)); + end = memchr(tag_buf, '\0', sizeof(tag_buf)); + if (end == tag_buf) + return -EINVAL; /* empty tag */ + if (!end) + end = &tag_buf[sizeof(tag_buf)]; + + len = end - tag_buf; + fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL); + if (!fs->tag) + return -ENOMEM; + memcpy(fs->tag, tag_buf, len); + fs->tag[len] = '\0'; + return 0; +} + +/* Work function for hiprio completion */ +static void virtio_fs_hiprio_done_work(struct work_struct *work) +{ + struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, + done_work); + struct virtqueue *vq = fsvq->vq; + + /* Free completed FUSE_FORGET requests */ + spin_lock(&fsvq->lock); + do { + unsigned int len; + void *req; + + virtqueue_disable_cb(vq); + + while ((req = virtqueue_get_buf(vq, &len)) != NULL) { + kfree(req); + fsvq->in_flight--; + } + } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq))); + spin_unlock(&fsvq->lock); +} + +static void virtio_fs_dummy_dispatch_work(struct work_struct *work) +{ +} + +static void virtio_fs_hiprio_dispatch_work(struct work_struct *work) +{ + struct virtio_fs_forget *forget; + struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, + dispatch_work.work); + struct virtqueue *vq = fsvq->vq; + struct scatterlist sg; + struct scatterlist *sgs[] = {&sg}; + bool notify; + int ret; + + pr_debug("virtio-fs: worker %s called.\n", __func__); + while (1) { + spin_lock(&fsvq->lock); + forget = list_first_entry_or_null(&fsvq->queued_reqs, + struct virtio_fs_forget, list); + if (!forget) { + spin_unlock(&fsvq->lock); + return; + } + + list_del(&forget->list); + if (!fsvq->connected) { + spin_unlock(&fsvq->lock); + kfree(forget); + continue; + } + + sg_init_one(&sg, forget, sizeof(*forget)); + + /* Enqueue the request */ + dev_dbg(&vq->vdev->dev, "%s\n", __func__); + ret = virtqueue_add_sgs(vq, sgs, 1, 0, forget, GFP_ATOMIC); + if (ret < 0) { + if (ret == -ENOMEM || ret == -ENOSPC) { + pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n", + ret); + list_add_tail(&forget->list, + &fsvq->queued_reqs); + schedule_delayed_work(&fsvq->dispatch_work, + msecs_to_jiffies(1)); + } else { + pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n", + ret); + kfree(forget); + } + spin_unlock(&fsvq->lock); + return; + } + + fsvq->in_flight++; + notify = virtqueue_kick_prepare(vq); + spin_unlock(&fsvq->lock); + + if (notify) + virtqueue_notify(vq); + pr_debug("virtio-fs: worker %s dispatched one forget request.\n", + __func__); + } +} + +/* Allocate and copy args into req->argbuf */ +static int copy_args_to_argbuf(struct fuse_req *req) +{ + struct fuse_args *args = req->args; + unsigned int offset = 0; + unsigned int num_in; + unsigned int num_out; + unsigned int len; + unsigned int i; + + num_in = args->in_numargs - args->in_pages; + num_out = args->out_numargs - args->out_pages; + len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) + + fuse_len_args(num_out, args->out_args); + + req->argbuf = kmalloc(len, GFP_ATOMIC); + if (!req->argbuf) + return -ENOMEM; + + for (i = 0; i < num_in; i++) { + memcpy(req->argbuf + offset, + args->in_args[i].value, + args->in_args[i].size); + offset += args->in_args[i].size; + } + + return 0; +} + +/* Copy args out of and free req->argbuf */ +static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req) +{ + unsigned int remaining; + unsigned int offset; + unsigned int num_in; + unsigned int num_out; + unsigned int i; + + remaining = req->out.h.len - sizeof(req->out.h); + num_in = args->in_numargs - args->in_pages; + num_out = args->out_numargs - args->out_pages; + offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args); + + for (i = 0; i < num_out; i++) { + unsigned int argsize = args->out_args[i].size; + + if (args->out_argvar && + i == args->out_numargs - 1 && + argsize > remaining) { + argsize = remaining; + } + + memcpy(args->out_args[i].value, req->argbuf + offset, argsize); + offset += argsize; + + if (i != args->out_numargs - 1) + remaining -= argsize; + } + + /* Store the actual size of the variable-length arg */ + if (args->out_argvar) + args->out_args[args->out_numargs - 1].size = remaining; + + kfree(req->argbuf); + req->argbuf = NULL; +} + +/* Work function for request completion */ +static void virtio_fs_requests_done_work(struct work_struct *work) +{ + struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq, + done_work); + struct fuse_pqueue *fpq = &fsvq->fud->pq; + struct fuse_conn *fc = fsvq->fud->fc; + struct virtqueue *vq = fsvq->vq; + struct fuse_req *req; + struct fuse_args_pages *ap; + struct fuse_req *next; + struct fuse_args *args; + unsigned int len, i, thislen; + struct page *page; + LIST_HEAD(reqs); + + /* Collect completed requests off the virtqueue */ + spin_lock(&fsvq->lock); + do { + virtqueue_disable_cb(vq); + + while ((req = virtqueue_get_buf(vq, &len)) != NULL) { + spin_lock(&fpq->lock); + list_move_tail(&req->list, &reqs); + spin_unlock(&fpq->lock); + } + } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq))); + spin_unlock(&fsvq->lock); + + /* End requests */ + list_for_each_entry_safe(req, next, &reqs, list) { + /* + * TODO verify that server properly follows FUSE protocol + * (oh.uniq, oh.len) + */ + args = req->args; + copy_args_from_argbuf(args, req); + + if (args->out_pages && args->page_zeroing) { + len = args->out_args[args->out_numargs - 1].size; + ap = container_of(args, typeof(*ap), args); + for (i = 0; i < ap->num_pages; i++) { + thislen = ap->descs[i].length; + if (len < thislen) { + WARN_ON(ap->descs[i].offset); + page = ap->pages[i]; + zero_user_segment(page, len, thislen); + len = 0; + } else { + len -= thislen; + } + } + } + + spin_lock(&fpq->lock); + clear_bit(FR_SENT, &req->flags); + list_del_init(&req->list); + spin_unlock(&fpq->lock); + + fuse_request_end(fc, req); + spin_lock(&fsvq->lock); + fsvq->in_flight--; + spin_unlock(&fsvq->lock); + } +} + +/* Virtqueue interrupt handler */ +static void virtio_fs_vq_done(struct virtqueue *vq) +{ + struct virtio_fs_vq *fsvq = vq_to_fsvq(vq); + + dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name); + + schedule_work(&fsvq->done_work); +} + +/* Initialize virtqueues */ +static int virtio_fs_setup_vqs(struct virtio_device *vdev, + struct virtio_fs *fs) +{ + struct virtqueue **vqs; + vq_callback_t **callbacks; + const char **names; + unsigned int i; + int ret = 0; + + virtio_cread(vdev, struct virtio_fs_config, num_request_queues, + &fs->num_request_queues); + if (fs->num_request_queues == 0) + return -EINVAL; + + fs->nvqs = 1 + fs->num_request_queues; + fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL); + if (!fs->vqs) + return -ENOMEM; + + vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL); + callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]), + GFP_KERNEL); + names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL); + if (!vqs || !callbacks || !names) { + ret = -ENOMEM; + goto out; + } + + callbacks[VQ_HIPRIO] = virtio_fs_vq_done; + snprintf(fs->vqs[VQ_HIPRIO].name, sizeof(fs->vqs[VQ_HIPRIO].name), + "hiprio"); + names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name; + INIT_WORK(&fs->vqs[VQ_HIPRIO].done_work, virtio_fs_hiprio_done_work); + INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].queued_reqs); + INIT_DELAYED_WORK(&fs->vqs[VQ_HIPRIO].dispatch_work, + virtio_fs_hiprio_dispatch_work); + spin_lock_init(&fs->vqs[VQ_HIPRIO].lock); + + /* Initialize the requests virtqueues */ + for (i = VQ_REQUEST; i < fs->nvqs; i++) { + spin_lock_init(&fs->vqs[i].lock); + INIT_WORK(&fs->vqs[i].done_work, virtio_fs_requests_done_work); + INIT_DELAYED_WORK(&fs->vqs[i].dispatch_work, + virtio_fs_dummy_dispatch_work); + INIT_LIST_HEAD(&fs->vqs[i].queued_reqs); + snprintf(fs->vqs[i].name, sizeof(fs->vqs[i].name), + "requests.%u", i - VQ_REQUEST); + callbacks[i] = virtio_fs_vq_done; + names[i] = fs->vqs[i].name; + } + + ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL); + if (ret < 0) + goto out; + + for (i = 0; i < fs->nvqs; i++) + fs->vqs[i].vq = vqs[i]; + + virtio_fs_start_all_queues(fs); +out: + kfree(names); + kfree(callbacks); + kfree(vqs); + if (ret) + kfree(fs->vqs); + return ret; +} + +/* Free virtqueues (device must already be reset) */ +static void virtio_fs_cleanup_vqs(struct virtio_device *vdev, + struct virtio_fs *fs) +{ + vdev->config->del_vqs(vdev); +} + +static int virtio_fs_probe(struct virtio_device *vdev) +{ + struct virtio_fs *fs; + int ret; + + fs = kzalloc(sizeof(*fs), GFP_KERNEL); + if (!fs) + return -ENOMEM; + kref_init(&fs->refcount); + vdev->priv = fs; + + ret = virtio_fs_read_tag(vdev, fs); + if (ret < 0) + goto out; + + ret = virtio_fs_setup_vqs(vdev, fs); + if (ret < 0) + goto out; + + /* TODO vq affinity */ + + /* Bring the device online in case the filesystem is mounted and + * requests need to be sent before we return. + */ + virtio_device_ready(vdev); + + ret = virtio_fs_add_instance(fs); + if (ret < 0) + goto out_vqs; + + return 0; + +out_vqs: + vdev->config->reset(vdev); + virtio_fs_cleanup_vqs(vdev, fs); + +out: + vdev->priv = NULL; + kfree(fs); + return ret; +} + +static void virtio_fs_stop_all_queues(struct virtio_fs *fs) +{ + struct virtio_fs_vq *fsvq; + int i; + + for (i = 0; i < fs->nvqs; i++) { + fsvq = &fs->vqs[i]; + spin_lock(&fsvq->lock); + fsvq->connected = false; + spin_unlock(&fsvq->lock); + } +} + +static void virtio_fs_remove(struct virtio_device *vdev) +{ + struct virtio_fs *fs = vdev->priv; + + mutex_lock(&virtio_fs_mutex); + /* This device is going away. No one should get new reference */ + list_del_init(&fs->list); + virtio_fs_stop_all_queues(fs); + virtio_fs_drain_all_queues(fs); + vdev->config->reset(vdev); + virtio_fs_cleanup_vqs(vdev, fs); + + vdev->priv = NULL; + /* Put device reference on virtio_fs object */ + virtio_fs_put(fs); + mutex_unlock(&virtio_fs_mutex); +} + +#ifdef CONFIG_PM_SLEEP +static int virtio_fs_freeze(struct virtio_device *vdev) +{ + /* TODO need to save state here */ + pr_warn("virtio-fs: suspend/resume not yet supported\n"); + return -EOPNOTSUPP; +} + +static int virtio_fs_restore(struct virtio_device *vdev) +{ + /* TODO need to restore state here */ + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +const static struct virtio_device_id id_table[] = { + { VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID }, + {}, +}; + +const static unsigned int feature_table[] = {}; + +static struct virtio_driver virtio_fs_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .feature_table = feature_table, + .feature_table_size = ARRAY_SIZE(feature_table), + .probe = virtio_fs_probe, + .remove = virtio_fs_remove, +#ifdef CONFIG_PM_SLEEP + .freeze = virtio_fs_freeze, + .restore = virtio_fs_restore, +#endif +}; + +static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq) +__releases(fiq->lock) +{ + struct fuse_forget_link *link; + struct virtio_fs_forget *forget; + struct scatterlist sg; + struct scatterlist *sgs[] = {&sg}; + struct virtio_fs *fs; + struct virtqueue *vq; + struct virtio_fs_vq *fsvq; + bool notify; + u64 unique; + int ret; + + link = fuse_dequeue_forget(fiq, 1, NULL); + unique = fuse_get_unique(fiq); + + fs = fiq->priv; + fsvq = &fs->vqs[VQ_HIPRIO]; + spin_unlock(&fiq->lock); + + /* Allocate a buffer for the request */ + forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL); + + forget->ih = (struct fuse_in_header){ + .opcode = FUSE_FORGET, + .nodeid = link->forget_one.nodeid, + .unique = unique, + .len = sizeof(*forget), + }; + forget->arg = (struct fuse_forget_in){ + .nlookup = link->forget_one.nlookup, + }; + + sg_init_one(&sg, forget, sizeof(*forget)); + + /* Enqueue the request */ + spin_lock(&fsvq->lock); + + if (!fsvq->connected) { + kfree(forget); + spin_unlock(&fsvq->lock); + goto out; + } + + vq = fsvq->vq; + dev_dbg(&vq->vdev->dev, "%s\n", __func__); + + ret = virtqueue_add_sgs(vq, sgs, 1, 0, forget, GFP_ATOMIC); + if (ret < 0) { + if (ret == -ENOMEM || ret == -ENOSPC) { + pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later.\n", + ret); + list_add_tail(&forget->list, &fsvq->queued_reqs); + schedule_delayed_work(&fsvq->dispatch_work, + msecs_to_jiffies(1)); + } else { + pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n", + ret); + kfree(forget); + } + spin_unlock(&fsvq->lock); + goto out; + } + + fsvq->in_flight++; + notify = virtqueue_kick_prepare(vq); + + spin_unlock(&fsvq->lock); + + if (notify) + virtqueue_notify(vq); +out: + kfree(link); +} + +static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq) +__releases(fiq->lock) +{ + /* + * TODO interrupts. + * + * Normal fs operations on a local filesystems aren't interruptible. + * Exceptions are blocking lock operations; for example fcntl(F_SETLKW) + * with shared lock between host and guest. + */ + spin_unlock(&fiq->lock); +} + +/* Return the number of scatter-gather list elements required */ +static unsigned int sg_count_fuse_req(struct fuse_req *req) +{ + struct fuse_args *args = req->args; + struct fuse_args_pages *ap = container_of(args, typeof(*ap), args); + unsigned int total_sgs = 1 /* fuse_in_header */; + + if (args->in_numargs - args->in_pages) + total_sgs += 1; + + if (args->in_pages) + total_sgs += ap->num_pages; + + if (!test_bit(FR_ISREPLY, &req->flags)) + return total_sgs; + + total_sgs += 1 /* fuse_out_header */; + + if (args->out_numargs - args->out_pages) + total_sgs += 1; + + if (args->out_pages) + total_sgs += ap->num_pages; + + return total_sgs; +} + +/* Add pages to scatter-gather list and return number of elements used */ +static unsigned int sg_init_fuse_pages(struct scatterlist *sg, + struct page **pages, + struct fuse_page_desc *page_descs, + unsigned int num_pages, + unsigned int total_len) +{ + unsigned int i; + unsigned int this_len; + + for (i = 0; i < num_pages && total_len; i++) { + sg_init_table(&sg[i], 1); + this_len = min(page_descs[i].length, total_len); + sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset); + total_len -= this_len; + } + + return i; +} + +/* Add args to scatter-gather list and return number of elements used */ +static unsigned int sg_init_fuse_args(struct scatterlist *sg, + struct fuse_req *req, + struct fuse_arg *args, + unsigned int numargs, + bool argpages, + void *argbuf, + unsigned int *len_used) +{ + struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); + unsigned int total_sgs = 0; + unsigned int len; + + len = fuse_len_args(numargs - argpages, args); + if (len) + sg_init_one(&sg[total_sgs++], argbuf, len); + + if (argpages) + total_sgs += sg_init_fuse_pages(&sg[total_sgs], + ap->pages, ap->descs, + ap->num_pages, + args[numargs - 1].size); + + if (len_used) + *len_used = len; + + return total_sgs; +} + +/* Add a request to a virtqueue and kick the device */ +static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq, + struct fuse_req *req) +{ + /* requests need at least 4 elements */ + struct scatterlist *stack_sgs[6]; + struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)]; + struct scatterlist **sgs = stack_sgs; + struct scatterlist *sg = stack_sg; + struct virtqueue *vq; + struct fuse_args *args = req->args; + unsigned int argbuf_used = 0; + unsigned int out_sgs = 0; + unsigned int in_sgs = 0; + unsigned int total_sgs; + unsigned int i; + int ret; + bool notify; + + /* Does the sglist fit on the stack? */ + total_sgs = sg_count_fuse_req(req); + if (total_sgs > ARRAY_SIZE(stack_sgs)) { + sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC); + sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC); + if (!sgs || !sg) { + ret = -ENOMEM; + goto out; + } + } + + /* Use a bounce buffer since stack args cannot be mapped */ + ret = copy_args_to_argbuf(req); + if (ret < 0) + goto out; + + /* Request elements */ + sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h)); + out_sgs += sg_init_fuse_args(&sg[out_sgs], req, + (struct fuse_arg *)args->in_args, + args->in_numargs, args->in_pages, + req->argbuf, &argbuf_used); + + /* Reply elements */ + if (test_bit(FR_ISREPLY, &req->flags)) { + sg_init_one(&sg[out_sgs + in_sgs++], + &req->out.h, sizeof(req->out.h)); + in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req, + args->out_args, args->out_numargs, + args->out_pages, + req->argbuf + argbuf_used, NULL); + } + + WARN_ON(out_sgs + in_sgs != total_sgs); + + for (i = 0; i < total_sgs; i++) + sgs[i] = &sg[i]; + + spin_lock(&fsvq->lock); + + if (!fsvq->connected) { + spin_unlock(&fsvq->lock); + ret = -ENOTCONN; + goto out; + } + + vq = fsvq->vq; + ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC); + if (ret < 0) { + spin_unlock(&fsvq->lock); + goto out; + } + + fsvq->in_flight++; + notify = virtqueue_kick_prepare(vq); + + spin_unlock(&fsvq->lock); + + if (notify) + virtqueue_notify(vq); + +out: + if (ret < 0 && req->argbuf) { + kfree(req->argbuf); + req->argbuf = NULL; + } + if (sgs != stack_sgs) { + kfree(sgs); + kfree(sg); + } + + return ret; +} + +static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq) +__releases(fiq->lock) +{ + unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */ + struct virtio_fs *fs; + struct fuse_conn *fc; + struct fuse_req *req; + struct fuse_pqueue *fpq; + int ret; + + WARN_ON(list_empty(&fiq->pending)); + req = list_last_entry(&fiq->pending, struct fuse_req, list); + clear_bit(FR_PENDING, &req->flags); + list_del_init(&req->list); + WARN_ON(!list_empty(&fiq->pending)); + spin_unlock(&fiq->lock); + + fs = fiq->priv; + fc = fs->vqs[queue_id].fud->fc; + + pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n", + __func__, req->in.h.opcode, req->in.h.unique, + req->in.h.nodeid, req->in.h.len, + fuse_len_args(req->args->out_numargs, req->args->out_args)); + + fpq = &fs->vqs[queue_id].fud->pq; + spin_lock(&fpq->lock); + if (!fpq->connected) { + spin_unlock(&fpq->lock); + req->out.h.error = -ENODEV; + pr_err("virtio-fs: %s disconnected\n", __func__); + fuse_request_end(fc, req); + return; + } + list_add_tail(&req->list, fpq->processing); + spin_unlock(&fpq->lock); + set_bit(FR_SENT, &req->flags); + /* matches barrier in request_wait_answer() */ + smp_mb__after_atomic(); + +retry: + ret = virtio_fs_enqueue_req(&fs->vqs[queue_id], req); + if (ret < 0) { + if (ret == -ENOMEM || ret == -ENOSPC) { + /* Virtqueue full. Retry submission */ + /* TODO use completion instead of timeout */ + usleep_range(20, 30); + goto retry; + } + req->out.h.error = ret; + pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret); + spin_lock(&fpq->lock); + clear_bit(FR_SENT, &req->flags); + list_del_init(&req->list); + spin_unlock(&fpq->lock); + fuse_request_end(fc, req); + return; + } +} + +const static struct fuse_iqueue_ops virtio_fs_fiq_ops = { + .wake_forget_and_unlock = virtio_fs_wake_forget_and_unlock, + .wake_interrupt_and_unlock = virtio_fs_wake_interrupt_and_unlock, + .wake_pending_and_unlock = virtio_fs_wake_pending_and_unlock, + .release = virtio_fs_fiq_release, +}; + +static int virtio_fs_fill_super(struct super_block *sb) +{ + struct fuse_conn *fc = get_fuse_conn_super(sb); + struct virtio_fs *fs = fc->iq.priv; + unsigned int i; + int err; + struct fuse_fs_context ctx = { + .rootmode = S_IFDIR, + .default_permissions = 1, + .allow_other = 1, + .max_read = UINT_MAX, + .blksize = 512, + .destroy = true, + .no_control = true, + .no_force_umount = true, + }; + + mutex_lock(&virtio_fs_mutex); + + /* After holding mutex, make sure virtiofs device is still there. + * Though we are holding a reference to it, drive ->remove might + * still have cleaned up virtual queues. In that case bail out. + */ + err = -EINVAL; + if (list_empty(&fs->list)) { + pr_info("virtio-fs: tag <%s> not found\n", fs->tag); + goto err; + } + + err = -ENOMEM; + /* Allocate fuse_dev for hiprio and notification queues */ + for (i = 0; i < VQ_REQUEST; i++) { + struct virtio_fs_vq *fsvq = &fs->vqs[i]; + + fsvq->fud = fuse_dev_alloc(); + if (!fsvq->fud) + goto err_free_fuse_devs; + } + + ctx.fudptr = (void **)&fs->vqs[VQ_REQUEST].fud; + err = fuse_fill_super_common(sb, &ctx); + if (err < 0) + goto err_free_fuse_devs; + + fc = fs->vqs[VQ_REQUEST].fud->fc; + + for (i = 0; i < fs->nvqs; i++) { + struct virtio_fs_vq *fsvq = &fs->vqs[i]; + + if (i == VQ_REQUEST) + continue; /* already initialized */ + fuse_dev_install(fsvq->fud, fc); + } + + /* Previous unmount will stop all queues. Start these again */ + virtio_fs_start_all_queues(fs); + fuse_send_init(fc); + mutex_unlock(&virtio_fs_mutex); + return 0; + +err_free_fuse_devs: + virtio_fs_free_devs(fs); +err: + mutex_unlock(&virtio_fs_mutex); + return err; +} + +static void virtio_kill_sb(struct super_block *sb) +{ + struct fuse_conn *fc = get_fuse_conn_super(sb); + struct virtio_fs *vfs; + struct virtio_fs_vq *fsvq; + + /* If mount failed, we can still be called without any fc */ + if (!fc) + return fuse_kill_sb_anon(sb); + + vfs = fc->iq.priv; + fsvq = &vfs->vqs[VQ_HIPRIO]; + + /* Stop forget queue. Soon destroy will be sent */ + spin_lock(&fsvq->lock); + fsvq->connected = false; + spin_unlock(&fsvq->lock); + virtio_fs_drain_all_queues(vfs); + + fuse_kill_sb_anon(sb); + + /* fuse_kill_sb_anon() must have sent destroy. Stop all queues + * and drain one more time and free fuse devices. Freeing fuse + * devices will drop their reference on fuse_conn and that in + * turn will drop its reference on virtio_fs object. + */ + virtio_fs_stop_all_queues(vfs); + virtio_fs_drain_all_queues(vfs); + virtio_fs_free_devs(vfs); +} + +static int virtio_fs_test_super(struct super_block *sb, + struct fs_context *fsc) +{ + struct fuse_conn *fc = fsc->s_fs_info; + + return fc->iq.priv == get_fuse_conn_super(sb)->iq.priv; +} + +static int virtio_fs_set_super(struct super_block *sb, + struct fs_context *fsc) +{ + int err; + + err = get_anon_bdev(&sb->s_dev); + if (!err) + fuse_conn_get(fsc->s_fs_info); + + return err; +} + +static int virtio_fs_get_tree(struct fs_context *fsc) +{ + struct virtio_fs *fs; + struct super_block *sb; + struct fuse_conn *fc; + int err; + + /* This gets a reference on virtio_fs object. This ptr gets installed + * in fc->iq->priv. Once fuse_conn is going away, it calls ->put() + * to drop the reference to this object. + */ + fs = virtio_fs_find_instance(fsc->source); + if (!fs) { + pr_info("virtio-fs: tag <%s> not found\n", fsc->source); + return -EINVAL; + } + + fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL); + if (!fc) { + mutex_lock(&virtio_fs_mutex); + virtio_fs_put(fs); + mutex_unlock(&virtio_fs_mutex); + return -ENOMEM; + } + + fuse_conn_init(fc, get_user_ns(current_user_ns()), &virtio_fs_fiq_ops, + fs); + fc->release = fuse_free_conn; + fc->delete_stale = true; + + fsc->s_fs_info = fc; + sb = sget_fc(fsc, virtio_fs_test_super, virtio_fs_set_super); + fuse_conn_put(fc); + if (IS_ERR(sb)) + return PTR_ERR(sb); + + if (!sb->s_root) { + err = virtio_fs_fill_super(sb); + if (err) { + deactivate_locked_super(sb); + return err; + } + + sb->s_flags |= SB_ACTIVE; + } + + WARN_ON(fsc->root); + fsc->root = dget(sb->s_root); + return 0; +} + +static const struct fs_context_operations virtio_fs_context_ops = { + .get_tree = virtio_fs_get_tree, +}; + +static int virtio_fs_init_fs_context(struct fs_context *fsc) +{ + fsc->ops = &virtio_fs_context_ops; + return 0; +} + +static struct file_system_type virtio_fs_type = { + .owner = THIS_MODULE, + .name = "virtiofs", + .init_fs_context = virtio_fs_init_fs_context, + .kill_sb = virtio_kill_sb, +}; + +static int __init virtio_fs_init(void) +{ + int ret; + + ret = register_virtio_driver(&virtio_fs_driver); + if (ret < 0) + return ret; + + ret = register_filesystem(&virtio_fs_type); + if (ret < 0) { + unregister_virtio_driver(&virtio_fs_driver); + return ret; + } + + return 0; +} +module_init(virtio_fs_init); + +static void __exit virtio_fs_exit(void) +{ + unregister_filesystem(&virtio_fs_type); + unregister_virtio_driver(&virtio_fs_driver); +} +module_exit(virtio_fs_exit); + +MODULE_AUTHOR("Stefan Hajnoczi "); +MODULE_DESCRIPTION("Virtio Filesystem"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_FS(KBUILD_MODNAME); +MODULE_DEVICE_TABLE(virtio, id_table); diff --git a/include/uapi/linux/virtio_fs.h b/include/uapi/linux/virtio_fs.h new file mode 100644 index 000000000000..b02eb2ac3d99 --- /dev/null +++ b/include/uapi/linux/virtio_fs.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ + +#ifndef _UAPI_LINUX_VIRTIO_FS_H +#define _UAPI_LINUX_VIRTIO_FS_H + +#include +#include +#include +#include + +struct virtio_fs_config { + /* Filesystem name (UTF-8, not NUL-terminated, padded with NULs) */ + __u8 tag[36]; + + /* Number of request queues */ + __u32 num_request_queues; +} __attribute__((packed)); + +#endif /* _UAPI_LINUX_VIRTIO_FS_H */ diff --git a/include/uapi/linux/virtio_ids.h b/include/uapi/linux/virtio_ids.h index 348fd0176f75..585e07b27333 100644 --- a/include/uapi/linux/virtio_ids.h +++ b/include/uapi/linux/virtio_ids.h @@ -44,6 +44,7 @@ #define VIRTIO_ID_VSOCK 19 /* virtio vsock transport */ #define VIRTIO_ID_CRYPTO 20 /* virtio crypto */ #define VIRTIO_ID_IOMMU 23 /* virtio IOMMU */ +#define VIRTIO_ID_FS 26 /* virtio filesystem */ #define VIRTIO_ID_PMEM 27 /* virtio pmem */ #endif /* _LINUX_VIRTIO_IDS_H */ -- cgit v1.2.3 From a0791f0df7d212c245761538b17a9ea93607b667 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 17 Sep 2019 10:45:38 -0700 Subject: bpf: fix BTF limits vmlinux BTF has more than 64k types. Its string section is also at the offset larger than 64k. Adjust both limits to make in-kernel BTF verifier successfully parse in-kernel BTF. Fixes: 69b693f0aefa ("bpf: btf: Introduce BPF Type Format (BTF)") Signed-off-by: Alexei Starovoitov Acked-by: Martin KaFai Lau Signed-off-by: Daniel Borkmann --- include/uapi/linux/btf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h index 63ae4a39e58b..c02dec97e1ce 100644 --- a/include/uapi/linux/btf.h +++ b/include/uapi/linux/btf.h @@ -22,9 +22,9 @@ struct btf_header { }; /* Max # of type identifier */ -#define BTF_MAX_TYPE 0x0000ffff +#define BTF_MAX_TYPE 0x000fffff /* Max offset into the string section */ -#define BTF_MAX_NAME_OFFSET 0x0000ffff +#define BTF_MAX_NAME_OFFSET 0x00ffffff /* Max # of struct/union/enum members or func args */ #define BTF_MAX_VLEN 0xffff -- cgit v1.2.3 From dee04eee9182dae91801d0db5bb2acfd5365a749 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Wed, 4 Sep 2019 16:13:26 +0000 Subject: KVM: RISC-V: Add KVM_REG_RISCV for ONE_REG interface We will be using ONE_REG interface accessing VCPU registers from user-space hence we add KVM_REG_RISCV for RISC-V VCPU registers. Signed-off-by: Anup Patel Acked-by: Paolo Bonzini Reviewed-by: Paolo Bonzini Reviewed-by: Alexander Graf Signed-off-by: Paul Walmsley --- include/uapi/linux/kvm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 233efbb1c81c..18a2b43097f8 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1145,6 +1145,7 @@ struct kvm_dirty_tlb { #define KVM_REG_S390 0x5000000000000000ULL #define KVM_REG_ARM64 0x6000000000000000ULL #define KVM_REG_MIPS 0x7000000000000000ULL +#define KVM_REG_RISCV 0x8000000000000000ULL #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL -- cgit v1.2.3 From 344c6c804703841d2bff4d68d7390ba726053874 Mon Sep 17 00:00:00 2001 From: Tianyu Lan Date: Thu, 22 Aug 2019 22:30:20 +0800 Subject: KVM/Hyper-V: Add new KVM capability KVM_CAP_HYPERV_DIRECT_TLBFLUSH Hyper-V direct tlb flush function should be enabled for guest that only uses Hyper-V hypercall. User space hypervisor(e.g, Qemu) can disable KVM identification in CPUID and just exposes Hyper-V identification to make sure the precondition. Add new KVM capability KVM_CAP_ HYPERV_DIRECT_TLBFLUSH for user space to enable Hyper-V direct tlb function and this function is default to be disabled in KVM. Signed-off-by: Tianyu Lan Signed-off-by: Paolo Bonzini --- Documentation/virt/kvm/api.txt | 13 +++++++++++++ arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/x86.c | 8 ++++++++ include/uapi/linux/kvm.h | 1 + 4 files changed, 23 insertions(+) (limited to 'include/uapi') diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.txt index 136f1eef3712..4833904d32a5 100644 --- a/Documentation/virt/kvm/api.txt +++ b/Documentation/virt/kvm/api.txt @@ -5309,3 +5309,16 @@ Architectures: x86 This capability indicates that KVM supports paravirtualized Hyper-V IPI send hypercalls: HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx. +8.21 KVM_CAP_HYPERV_DIRECT_TLBFLUSH + +Architecture: x86 + +This capability indicates that KVM running on top of Hyper-V hypervisor +enables Direct TLB flush for its guests meaning that TLB flush +hypercalls are handled by Level 0 hypervisor (Hyper-V) bypassing KVM. +Due to the different ABI for hypercall parameters between Hyper-V and +KVM, enabling this capability effectively disables all hypercall +handling by KVM (as some KVM hypercall may be mistakenly treated as TLB +flush hypercalls by Hyper-V) so userspace should disable KVM identification +in CPUID and only exposes Hyper-V identification. In this case, guest +thinks it's running on Hyper-V and only use Hyper-V hypercalls. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index a3a3ec73fa2f..4765ae0fc506 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1213,6 +1213,7 @@ struct kvm_x86_ops { bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu); bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu); + int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu); }; struct kvm_arch_async_pf { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index bb93771e4170..5becc6753280 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3282,6 +3282,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = kvm_x86_ops->get_nested_state ? kvm_x86_ops->get_nested_state(NULL, NULL, 0) : 0; break; + case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: + r = kvm_x86_ops->enable_direct_tlbflush ? 1 : 0; + break; default: break; } @@ -4055,6 +4058,11 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, r = -EFAULT; } return r; + case KVM_CAP_HYPERV_DIRECT_TLBFLUSH: + if (!kvm_x86_ops->enable_direct_tlbflush) + return -ENOTTY; + + return kvm_x86_ops->enable_direct_tlbflush(vcpu); default: return -EINVAL; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 233efbb1c81c..c73aead0c0a8 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -999,6 +999,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_PTRAUTH_GENERIC 172 #define KVM_CAP_PMU_EVENT_FILTER 173 #define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174 +#define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175 #ifdef KVM_CAP_IRQ_ROUTING -- cgit v1.2.3 From 20ff1cb506727f81acba59acab8a0f37e1a13e43 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 24 Sep 2019 07:40:06 +0900 Subject: netfilter: ebtables: use __u8 instead of uint8_t in uapi header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When CONFIG_UAPI_HEADER_TEST=y, exported headers are compile-tested to make sure they can be included from user-space. Currently, linux/netfilter_bridge/ebtables.h is excluded from the test coverage. To make it join the compile-test, we need to fix the build errors attached below. For a case like this, we decided to use __u{8,16,32,64} variable types in this discussion: https://lkml.org/lkml/2019/6/5/18 Build log: CC usr/include/linux/netfilter_bridge/ebtables.h.s In file included from :32:0: ./usr/include/linux/netfilter_bridge/ebtables.h:126:4: error: unknown type name ‘uint8_t’ uint8_t revision; ^~~~~~~ ./usr/include/linux/netfilter_bridge/ebtables.h:139:4: error: unknown type name ‘uint8_t’ uint8_t revision; ^~~~~~~ ./usr/include/linux/netfilter_bridge/ebtables.h:152:4: error: unknown type name ‘uint8_t’ uint8_t revision; ^~~~~~~ Signed-off-by: Masahiro Yamada Signed-off-by: Pablo Neira Ayuso --- include/uapi/linux/netfilter_bridge/ebtables.h | 6 +++--- usr/include/Makefile | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'include/uapi') diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h index 3b86c14ea49d..8076c940ffeb 100644 --- a/include/uapi/linux/netfilter_bridge/ebtables.h +++ b/include/uapi/linux/netfilter_bridge/ebtables.h @@ -123,7 +123,7 @@ struct ebt_entry_match { union { struct { char name[EBT_EXTENSION_MAXNAMELEN]; - uint8_t revision; + __u8 revision; }; struct xt_match *match; } u; @@ -136,7 +136,7 @@ struct ebt_entry_watcher { union { struct { char name[EBT_EXTENSION_MAXNAMELEN]; - uint8_t revision; + __u8 revision; }; struct xt_target *watcher; } u; @@ -149,7 +149,7 @@ struct ebt_entry_target { union { struct { char name[EBT_EXTENSION_MAXNAMELEN]; - uint8_t revision; + __u8 revision; }; struct xt_target *target; } u; diff --git a/usr/include/Makefile b/usr/include/Makefile index 1fb6abe29b2f..379cc5abc162 100644 --- a/usr/include/Makefile +++ b/usr/include/Makefile @@ -38,7 +38,6 @@ header-test- += linux/ivtv.h header-test- += linux/jffs2.h header-test- += linux/kexec.h header-test- += linux/matroxfb.h -header-test- += linux/netfilter_bridge/ebtables.h header-test- += linux/netfilter_ipv4/ipt_LOG.h header-test- += linux/netfilter_ipv6/ip6t_LOG.h header-test- += linux/nfc.h -- cgit v1.2.3 From 541be05095437a7e5e08e7d13a13e03ec0994ae7 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 25 Sep 2019 16:45:56 -0700 Subject: linux/coff.h: add include guard Add a header include guard just in case. My motivation is to allow Kbuild to detect missing include guard: https://patchwork.kernel.org/patch/11063011/ Before I enable this checker I want to fix as many headers as possible. Link: http://lkml.kernel.org/r/20190728154728.11126-1-yamada.masahiro@socionext.com Signed-off-by: Masahiro Yamada Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/uapi/linux/coff.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/uapi') diff --git a/include/uapi/linux/coff.h b/include/uapi/linux/coff.h index e4a79f80b9a0..ab5c7e847eed 100644 --- a/include/uapi/linux/coff.h +++ b/include/uapi/linux/coff.h @@ -11,6 +11,9 @@ more information about COFF, then O'Reilly has a very excellent book. */ +#ifndef _UAPI_LINUX_COFF_H +#define _UAPI_LINUX_COFF_H + #define E_SYMNMLEN 8 /* Number of characters in a symbol name */ #define E_FILNMLEN 14 /* Number of characters in a file name */ #define E_DIMNUM 4 /* Number of array dimensions in auxiliary entry */ @@ -350,3 +353,5 @@ struct COFF_reloc { /* For new sections we haven't heard of before */ #define COFF_DEF_SECTION_ALIGNMENT 4 + +#endif /* _UAPI_LINUX_COFF_H */ -- cgit v1.2.3 From 9c276cc65a58faf98be8e56962745ec99ab87636 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Wed, 25 Sep 2019 16:49:08 -0700 Subject: mm: introduce MADV_COLD MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "Introduce MADV_COLD and MADV_PAGEOUT", v7. - Background The Android terminology used for forking a new process and starting an app from scratch is a cold start, while resuming an existing app is a hot start. While we continually try to improve the performance of cold starts, hot starts will always be significantly less power hungry as well as faster so we are trying to make hot start more likely than cold start. To increase hot start, Android userspace manages the order that apps should be killed in a process called ActivityManagerService. ActivityManagerService tracks every Android app or service that the user could be interacting with at any time and translates that into a ranked list for lmkd(low memory killer daemon). They are likely to be killed by lmkd if the system has to reclaim memory. In that sense they are similar to entries in any other cache. Those apps are kept alive for opportunistic performance improvements but those performance improvements will vary based on the memory requirements of individual workloads. - Problem Naturally, cached apps were dominant consumers of memory on the system. However, they were not significant consumers of swap even though they are good candidate for swap. Under investigation, swapping out only begins once the low zone watermark is hit and kswapd wakes up, but the overall allocation rate in the system might trip lmkd thresholds and cause a cached process to be killed(we measured performance swapping out vs. zapping the memory by killing a process. Unsurprisingly, zapping is 10x times faster even though we use zram which is much faster than real storage) so kill from lmkd will often satisfy the high zone watermark, resulting in very few pages actually being moved to swap. - Approach The approach we chose was to use a new interface to allow userspace to proactively reclaim entire processes by leveraging platform information. This allowed us to bypass the inaccuracy of the kernel’s LRUs for pages that are known to be cold from userspace and to avoid races with lmkd by reclaiming apps as soon as they entered the cached state. Additionally, it could provide many chances for platform to use much information to optimize memory efficiency. To achieve the goal, the patchset introduce two new options for madvise. One is MADV_COLD which will deactivate activated pages and the other is MADV_PAGEOUT which will reclaim private pages instantly. These new options complement MADV_DONTNEED and MADV_FREE by adding non-destructive ways to gain some free memory space. MADV_PAGEOUT is similar to MADV_DONTNEED in a way that it hints the kernel that memory region is not currently needed and should be reclaimed immediately; MADV_COLD is similar to MADV_FREE in a way that it hints the kernel that memory region is not currently needed and should be reclaimed when memory pressure rises. This patch (of 5): When a process expects no accesses to a certain memory range, it could give a hint to kernel that the pages can be reclaimed when memory pressure happens but data should be preserved for future use. This could reduce workingset eviction so it ends up increasing performance. This patch introduces the new MADV_COLD hint to madvise(2) syscall. MADV_COLD can be used by a process to mark a memory range as not expected to be used in the near future. The hint can help kernel in deciding which pages to evict early during memory pressure. It works for every LRU pages like MADV_[DONTNEED|FREE]. IOW, It moves active file page -> inactive file LRU active anon page -> inacdtive anon LRU Unlike MADV_FREE, it doesn't move active anonymous pages to inactive file LRU's head because MADV_COLD is a little bit different symantic. MADV_FREE means it's okay to discard when the memory pressure because the content of the page is *garbage* so freeing such pages is almost zero overhead since we don't need to swap out and access afterward causes just minor fault. Thus, it would make sense to put those freeable pages in inactive file LRU to compete other used-once pages. It makes sense for implmentaion point of view, too because it's not swapbacked memory any longer until it would be re-dirtied. Even, it could give a bonus to make them be reclaimed on swapless system. However, MADV_COLD doesn't mean garbage so reclaiming them requires swap-out/in in the end so it's bigger cost. Since we have designed VM LRU aging based on cost-model, anonymous cold pages would be better to position inactive anon's LRU list, not file LRU. Furthermore, it would help to avoid unnecessary scanning if system doesn't have a swap device. Let's start simpler way without adding complexity at this moment. However, keep in mind, too that it's a caveat that workloads with a lot of pages cache are likely to ignore MADV_COLD on anonymous memory because we rarely age anonymous LRU lists. * man-page material MADV_COLD (since Linux x.x) Pages in the specified regions will be treated as less-recently-accessed compared to pages in the system with similar access frequencies. In contrast to MADV_FREE, the contents of the region are preserved regardless of subsequent writes to pages. MADV_COLD cannot be applied to locked pages, Huge TLB pages, or VM_PFNMAP pages. [akpm@linux-foundation.org: resolve conflicts with hmm.git] Link: http://lkml.kernel.org/r/20190726023435.214162-2-minchan@kernel.org Signed-off-by: Minchan Kim Reported-by: kbuild test robot Acked-by: Michal Hocko Acked-by: Johannes Weiner Cc: James E.J. Bottomley Cc: Richard Henderson Cc: Ralf Baechle Cc: Chris Zankel Cc: Johannes Weiner Cc: Daniel Colascione Cc: Dave Hansen Cc: Hillf Danton Cc: Joel Fernandes (Google) Cc: Kirill A. Shutemov Cc: Oleksandr Natalenko Cc: Shakeel Butt Cc: Sonny Rao Cc: Suren Baghdasaryan Cc: Tim Murray Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/uapi/asm/mman.h | 2 + arch/mips/include/uapi/asm/mman.h | 2 + arch/parisc/include/uapi/asm/mman.h | 2 + arch/xtensa/include/uapi/asm/mman.h | 2 + include/linux/swap.h | 1 + include/uapi/asm-generic/mman-common.h | 2 + mm/internal.h | 2 +- mm/madvise.c | 179 ++++++++++++++++++++++++++++++++- mm/oom_kill.c | 2 +- mm/swap.c | 42 ++++++++ 10 files changed, 232 insertions(+), 4 deletions(-) (limited to 'include/uapi') diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h index ac23379b7a87..f3258fbf03d0 100644 --- a/arch/alpha/include/uapi/asm/mman.h +++ b/arch/alpha/include/uapi/asm/mman.h @@ -68,6 +68,8 @@ #define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ #define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ +#define MADV_COLD 20 /* deactivate these pages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h index c2b40969eb1f..00ad09fc5eb1 100644 --- a/arch/mips/include/uapi/asm/mman.h +++ b/arch/mips/include/uapi/asm/mman.h @@ -95,6 +95,8 @@ #define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ #define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ +#define MADV_COLD 20 /* deactivate these pages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index c98162f494db..eb14e3a7b8f3 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -48,6 +48,8 @@ #define MADV_DONTFORK 10 /* don't inherit across fork */ #define MADV_DOFORK 11 /* do inherit across fork */ +#define MADV_COLD 20 /* deactivate these pages */ + #define MADV_MERGEABLE 65 /* KSM may merge identical pages */ #define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h index ebbb48842190..f926b00ff11f 100644 --- a/arch/xtensa/include/uapi/asm/mman.h +++ b/arch/xtensa/include/uapi/asm/mman.h @@ -103,6 +103,8 @@ #define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ #define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ +#define MADV_COLD 20 /* deactivate these pages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/include/linux/swap.h b/include/linux/swap.h index de2c67a33b7e..0ce997edb8bb 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -340,6 +340,7 @@ extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_all(void); extern void rotate_reclaimable_page(struct page *page); extern void deactivate_file_page(struct page *page); +extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); extern void swap_setup(void); diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index 63b1f506ea67..23431faf0eb6 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -67,6 +67,8 @@ #define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ #define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ +#define MADV_COLD 20 /* deactivate these pages */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/mm/internal.h b/mm/internal.h index e32390802fd3..0d5f720c75ab 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -39,7 +39,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf); void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); -static inline bool can_madv_dontneed_vma(struct vm_area_struct *vma) +static inline bool can_madv_lru_vma(struct vm_area_struct *vma) { return !(vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)); } diff --git a/mm/madvise.c b/mm/madvise.c index 1f8a6fdc6878..e1aee62967c3 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -42,6 +43,7 @@ static int madvise_need_mmap_write(int behavior) case MADV_REMOVE: case MADV_WILLNEED: case MADV_DONTNEED: + case MADV_COLD: case MADV_FREE: return 0; default: @@ -289,6 +291,176 @@ static long madvise_willneed(struct vm_area_struct *vma, return 0; } +static int madvise_cold_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + struct mmu_gather *tlb = walk->private; + struct mm_struct *mm = tlb->mm; + struct vm_area_struct *vma = walk->vma; + pte_t *orig_pte, *pte, ptent; + spinlock_t *ptl; + struct page *page; + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (pmd_trans_huge(*pmd)) { + pmd_t orig_pmd; + unsigned long next = pmd_addr_end(addr, end); + + tlb_change_page_size(tlb, HPAGE_PMD_SIZE); + ptl = pmd_trans_huge_lock(pmd, vma); + if (!ptl) + return 0; + + orig_pmd = *pmd; + if (is_huge_zero_pmd(orig_pmd)) + goto huge_unlock; + + if (unlikely(!pmd_present(orig_pmd))) { + VM_BUG_ON(thp_migration_supported() && + !is_pmd_migration_entry(orig_pmd)); + goto huge_unlock; + } + + page = pmd_page(orig_pmd); + if (next - addr != HPAGE_PMD_SIZE) { + int err; + + if (page_mapcount(page) != 1) + goto huge_unlock; + + get_page(page); + spin_unlock(ptl); + lock_page(page); + err = split_huge_page(page); + unlock_page(page); + put_page(page); + if (!err) + goto regular_page; + return 0; + } + + if (pmd_young(orig_pmd)) { + pmdp_invalidate(vma, addr, pmd); + orig_pmd = pmd_mkold(orig_pmd); + + set_pmd_at(mm, addr, pmd, orig_pmd); + tlb_remove_pmd_tlb_entry(tlb, pmd, addr); + } + + test_and_clear_page_young(page); + deactivate_page(page); +huge_unlock: + spin_unlock(ptl); + return 0; + } + + if (pmd_trans_unstable(pmd)) + return 0; +regular_page: +#endif + tlb_change_page_size(tlb, PAGE_SIZE); + orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + flush_tlb_batched_pending(mm); + arch_enter_lazy_mmu_mode(); + for (; addr < end; pte++, addr += PAGE_SIZE) { + ptent = *pte; + + if (pte_none(ptent)) + continue; + + if (!pte_present(ptent)) + continue; + + page = vm_normal_page(vma, addr, ptent); + if (!page) + continue; + + /* + * Creating a THP page is expensive so split it only if we + * are sure it's worth. Split it if we are only owner. + */ + if (PageTransCompound(page)) { + if (page_mapcount(page) != 1) + break; + get_page(page); + if (!trylock_page(page)) { + put_page(page); + break; + } + pte_unmap_unlock(orig_pte, ptl); + if (split_huge_page(page)) { + unlock_page(page); + put_page(page); + pte_offset_map_lock(mm, pmd, addr, &ptl); + break; + } + unlock_page(page); + put_page(page); + pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + pte--; + addr -= PAGE_SIZE; + continue; + } + + VM_BUG_ON_PAGE(PageTransCompound(page), page); + + if (pte_young(ptent)) { + ptent = ptep_get_and_clear_full(mm, addr, pte, + tlb->fullmm); + ptent = pte_mkold(ptent); + set_pte_at(mm, addr, pte, ptent); + tlb_remove_tlb_entry(tlb, pte, addr); + } + + /* + * We are deactivating a page for accelerating reclaiming. + * VM couldn't reclaim the page unless we clear PG_young. + * As a side effect, it makes confuse idle-page tracking + * because they will miss recent referenced history. + */ + test_and_clear_page_young(page); + deactivate_page(page); + } + + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(orig_pte, ptl); + cond_resched(); + + return 0; +} + +static const struct mm_walk_ops cold_walk_ops = { + .pmd_entry = madvise_cold_pte_range, +}; + +static void madvise_cold_page_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, + unsigned long addr, unsigned long end) +{ + tlb_start_vma(tlb, vma); + walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, NULL); + tlb_end_vma(tlb, vma); +} + +static long madvise_cold(struct vm_area_struct *vma, + struct vm_area_struct **prev, + unsigned long start_addr, unsigned long end_addr) +{ + struct mm_struct *mm = vma->vm_mm; + struct mmu_gather tlb; + + *prev = vma; + if (!can_madv_lru_vma(vma)) + return -EINVAL; + + lru_add_drain(); + tlb_gather_mmu(&tlb, mm, start_addr, end_addr); + madvise_cold_page_range(&tlb, vma, start_addr, end_addr); + tlb_finish_mmu(&tlb, start_addr, end_addr); + + return 0; +} + static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -493,7 +665,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma, int behavior) { *prev = vma; - if (!can_madv_dontneed_vma(vma)) + if (!can_madv_lru_vma(vma)) return -EINVAL; if (!userfaultfd_remove(vma, start, end)) { @@ -515,7 +687,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma, */ return -ENOMEM; } - if (!can_madv_dontneed_vma(vma)) + if (!can_madv_lru_vma(vma)) return -EINVAL; if (end > vma->vm_end) { /* @@ -669,6 +841,8 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, return madvise_remove(vma, prev, start, end); case MADV_WILLNEED: return madvise_willneed(vma, prev, start, end); + case MADV_COLD: + return madvise_cold(vma, prev, start, end); case MADV_FREE: case MADV_DONTNEED: return madvise_dontneed_free(vma, prev, start, end, behavior); @@ -690,6 +864,7 @@ madvise_behavior_valid(int behavior) case MADV_WILLNEED: case MADV_DONTNEED: case MADV_FREE: + case MADV_COLD: #ifdef CONFIG_KSM case MADV_MERGEABLE: case MADV_UNMERGEABLE: diff --git a/mm/oom_kill.c b/mm/oom_kill.c index c1d9496b4c43..71e3acea7817 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -523,7 +523,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm) set_bit(MMF_UNSTABLE, &mm->flags); for (vma = mm->mmap ; vma; vma = vma->vm_next) { - if (!can_madv_dontneed_vma(vma)) + if (!can_madv_lru_vma(vma)) continue; /* diff --git a/mm/swap.c b/mm/swap.c index 784dc1620620..38c3fa4308e2 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -47,6 +47,7 @@ int page_cluster; static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); +static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_lazyfree_pvecs); #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); @@ -538,6 +539,22 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, update_page_reclaim_stat(lruvec, file, 0); } +static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, + void *arg) +{ + if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { + int file = page_is_file_cache(page); + int lru = page_lru_base_type(page); + + del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); + ClearPageActive(page); + ClearPageReferenced(page); + add_page_to_lru_list(page, lruvec, lru); + + __count_vm_events(PGDEACTIVATE, hpage_nr_pages(page)); + update_page_reclaim_stat(lruvec, file, 0); + } +} static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, void *arg) @@ -590,6 +607,10 @@ void lru_add_drain_cpu(int cpu) if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); + pvec = &per_cpu(lru_deactivate_pvecs, cpu); + if (pagevec_count(pvec)) + pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); + pvec = &per_cpu(lru_lazyfree_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); @@ -623,6 +644,26 @@ void deactivate_file_page(struct page *page) } } +/* + * deactivate_page - deactivate a page + * @page: page to deactivate + * + * deactivate_page() moves @page to the inactive list if @page was on the active + * list and was not an unevictable page. This is done to accelerate the reclaim + * of @page. + */ +void deactivate_page(struct page *page) +{ + if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { + struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); + + get_page(page); + if (!pagevec_add(pvec, page) || PageCompound(page)) + pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); + put_cpu_var(lru_deactivate_pvecs); + } +} + /** * mark_page_lazyfree - make an anon page lazyfree * @page: page to deactivate @@ -687,6 +728,7 @@ void lru_add_drain_all(void) if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || + pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || pagevec_count(&per_cpu(lru_lazyfree_pvecs, cpu)) || need_activate_page_drain(cpu)) { INIT_WORK(work, lru_add_drain_per_cpu); -- cgit v1.2.3 From 1a4e58cce84ee88129d5d49c064bd2852b481357 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Wed, 25 Sep 2019 16:49:15 -0700 Subject: mm: introduce MADV_PAGEOUT When a process expects no accesses to a certain memory range for a long time, it could hint kernel that the pages can be reclaimed instantly but data should be preserved for future use. This could reduce workingset eviction so it ends up increasing performance. This patch introduces the new MADV_PAGEOUT hint to madvise(2) syscall. MADV_PAGEOUT can be used by a process to mark a memory range as not expected to be used for a long time so that kernel reclaims *any LRU* pages instantly. The hint can help kernel in deciding which pages to evict proactively. A note: It doesn't apply SWAP_CLUSTER_MAX LRU page isolation limit intentionally because it's automatically bounded by PMD size. If PMD size(e.g., 256) makes some trouble, we could fix it later by limit it to SWAP_CLUSTER_MAX[1]. - man-page material MADV_PAGEOUT (since Linux x.x) Do not expect access in the near future so pages in the specified regions could be reclaimed instantly regardless of memory pressure. Thus, access in the range after successful operation could cause major page fault but never lose the up-to-date contents unlike MADV_DONTNEED. Pages belonging to a shared mapping are only processed if a write access is allowed for the calling process. MADV_PAGEOUT cannot be applied to locked pages, Huge TLB pages, or VM_PFNMAP pages. [1] https://lore.kernel.org/lkml/20190710194719.GS29695@dhcp22.suse.cz/ [minchan@kernel.org: clear PG_active on MADV_PAGEOUT] Link: http://lkml.kernel.org/r/20190802200643.GA181880@google.com [akpm@linux-foundation.org: resolve conflicts with hmm.git] Link: http://lkml.kernel.org/r/20190726023435.214162-5-minchan@kernel.org Signed-off-by: Minchan Kim Reported-by: kbuild test robot Acked-by: Michal Hocko Cc: James E.J. Bottomley Cc: Richard Henderson Cc: Ralf Baechle Cc: Chris Zankel Cc: Daniel Colascione Cc: Dave Hansen Cc: Hillf Danton Cc: Joel Fernandes (Google) Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Oleksandr Natalenko Cc: Shakeel Butt Cc: Sonny Rao Cc: Suren Baghdasaryan Cc: Tim Murray Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/include/uapi/asm/mman.h | 1 + arch/mips/include/uapi/asm/mman.h | 1 + arch/parisc/include/uapi/asm/mman.h | 1 + arch/xtensa/include/uapi/asm/mman.h | 1 + include/linux/swap.h | 1 + include/uapi/asm-generic/mman-common.h | 1 + mm/madvise.c | 189 +++++++++++++++++++++++++++++++++ mm/vmscan.c | 56 ++++++++++ 8 files changed, 251 insertions(+) (limited to 'include/uapi') diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h index f3258fbf03d0..a18ec7f63888 100644 --- a/arch/alpha/include/uapi/asm/mman.h +++ b/arch/alpha/include/uapi/asm/mman.h @@ -69,6 +69,7 @@ #define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ #define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h index 00ad09fc5eb1..57dc2ac4f8bd 100644 --- a/arch/mips/include/uapi/asm/mman.h +++ b/arch/mips/include/uapi/asm/mman.h @@ -96,6 +96,7 @@ #define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ #define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ /* compatibility flags */ #define MAP_FILE 0 diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h index eb14e3a7b8f3..6fd8871e4081 100644 --- a/arch/parisc/include/uapi/asm/mman.h +++ b/arch/parisc/include/uapi/asm/mman.h @@ -49,6 +49,7 @@ #define MADV_DOFORK 11 /* do inherit across fork */ #define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ #define MADV_MERGEABLE 65 /* KSM may merge identical pages */ #define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */ diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h index f926b00ff11f..e5e643752947 100644 --- a/arch/xtensa/include/uapi/asm/mman.h +++ b/arch/xtensa/include/uapi/asm/mman.h @@ -104,6 +104,7 @@ #define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ #define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ /* compatibility flags */ #define MAP_FILE 0 diff --git a/include/linux/swap.h b/include/linux/swap.h index 0ce997edb8bb..063c0c1e112b 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -365,6 +365,7 @@ extern int vm_swappiness; extern int remove_mapping(struct address_space *mapping, struct page *page); extern unsigned long vm_total_pages; +extern unsigned long reclaim_pages(struct list_head *page_list); #ifdef CONFIG_NUMA extern int node_reclaim_mode; extern int sysctl_min_unmapped_ratio; diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index 23431faf0eb6..c160a5354eb6 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -68,6 +68,7 @@ #define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ #define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ /* compatibility flags */ #define MAP_FILE 0 diff --git a/mm/madvise.c b/mm/madvise.c index e1aee62967c3..54c5639774b6 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -44,6 +44,7 @@ static int madvise_need_mmap_write(int behavior) case MADV_WILLNEED: case MADV_DONTNEED: case MADV_COLD: + case MADV_PAGEOUT: case MADV_FREE: return 0; default: @@ -461,6 +462,191 @@ static long madvise_cold(struct vm_area_struct *vma, return 0; } +static int madvise_pageout_pte_range(pmd_t *pmd, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + struct mmu_gather *tlb = walk->private; + struct mm_struct *mm = tlb->mm; + struct vm_area_struct *vma = walk->vma; + pte_t *orig_pte, *pte, ptent; + spinlock_t *ptl; + LIST_HEAD(page_list); + struct page *page; + + if (fatal_signal_pending(current)) + return -EINTR; + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (pmd_trans_huge(*pmd)) { + pmd_t orig_pmd; + unsigned long next = pmd_addr_end(addr, end); + + tlb_change_page_size(tlb, HPAGE_PMD_SIZE); + ptl = pmd_trans_huge_lock(pmd, vma); + if (!ptl) + return 0; + + orig_pmd = *pmd; + if (is_huge_zero_pmd(orig_pmd)) + goto huge_unlock; + + if (unlikely(!pmd_present(orig_pmd))) { + VM_BUG_ON(thp_migration_supported() && + !is_pmd_migration_entry(orig_pmd)); + goto huge_unlock; + } + + page = pmd_page(orig_pmd); + if (next - addr != HPAGE_PMD_SIZE) { + int err; + + if (page_mapcount(page) != 1) + goto huge_unlock; + get_page(page); + spin_unlock(ptl); + lock_page(page); + err = split_huge_page(page); + unlock_page(page); + put_page(page); + if (!err) + goto regular_page; + return 0; + } + + if (pmd_young(orig_pmd)) { + pmdp_invalidate(vma, addr, pmd); + orig_pmd = pmd_mkold(orig_pmd); + + set_pmd_at(mm, addr, pmd, orig_pmd); + tlb_remove_tlb_entry(tlb, pmd, addr); + } + + ClearPageReferenced(page); + test_and_clear_page_young(page); + + if (!isolate_lru_page(page)) + list_add(&page->lru, &page_list); +huge_unlock: + spin_unlock(ptl); + reclaim_pages(&page_list); + return 0; + } + + if (pmd_trans_unstable(pmd)) + return 0; +regular_page: +#endif + tlb_change_page_size(tlb, PAGE_SIZE); + orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + flush_tlb_batched_pending(mm); + arch_enter_lazy_mmu_mode(); + for (; addr < end; pte++, addr += PAGE_SIZE) { + ptent = *pte; + if (!pte_present(ptent)) + continue; + + page = vm_normal_page(vma, addr, ptent); + if (!page) + continue; + + /* + * creating a THP page is expensive so split it only if we + * are sure it's worth. Split it if we are only owner. + */ + if (PageTransCompound(page)) { + if (page_mapcount(page) != 1) + break; + get_page(page); + if (!trylock_page(page)) { + put_page(page); + break; + } + pte_unmap_unlock(orig_pte, ptl); + if (split_huge_page(page)) { + unlock_page(page); + put_page(page); + pte_offset_map_lock(mm, pmd, addr, &ptl); + break; + } + unlock_page(page); + put_page(page); + pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + pte--; + addr -= PAGE_SIZE; + continue; + } + + VM_BUG_ON_PAGE(PageTransCompound(page), page); + + if (pte_young(ptent)) { + ptent = ptep_get_and_clear_full(mm, addr, pte, + tlb->fullmm); + ptent = pte_mkold(ptent); + set_pte_at(mm, addr, pte, ptent); + tlb_remove_tlb_entry(tlb, pte, addr); + } + ClearPageReferenced(page); + test_and_clear_page_young(page); + + if (!isolate_lru_page(page)) + list_add(&page->lru, &page_list); + } + + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(orig_pte, ptl); + reclaim_pages(&page_list); + cond_resched(); + + return 0; +} + +static void madvise_pageout_page_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, + unsigned long addr, unsigned long end) +{ + tlb_start_vma(tlb, vma); + walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, NULL); + tlb_end_vma(tlb, vma); +} + +static inline bool can_do_pageout(struct vm_area_struct *vma) +{ + if (vma_is_anonymous(vma)) + return true; + if (!vma->vm_file) + return false; + /* + * paging out pagecache only for non-anonymous mappings that correspond + * to the files the calling process could (if tried) open for writing; + * otherwise we'd be including shared non-exclusive mappings, which + * opens a side channel. + */ + return inode_owner_or_capable(file_inode(vma->vm_file)) || + inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0; +} + +static long madvise_pageout(struct vm_area_struct *vma, + struct vm_area_struct **prev, + unsigned long start_addr, unsigned long end_addr) +{ + struct mm_struct *mm = vma->vm_mm; + struct mmu_gather tlb; + + *prev = vma; + if (!can_madv_lru_vma(vma)) + return -EINVAL; + + if (!can_do_pageout(vma)) + return 0; + + lru_add_drain(); + tlb_gather_mmu(&tlb, mm, start_addr, end_addr); + madvise_pageout_page_range(&tlb, vma, start_addr, end_addr); + tlb_finish_mmu(&tlb, start_addr, end_addr); + + return 0; +} + static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -843,6 +1029,8 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, return madvise_willneed(vma, prev, start, end); case MADV_COLD: return madvise_cold(vma, prev, start, end); + case MADV_PAGEOUT: + return madvise_pageout(vma, prev, start, end); case MADV_FREE: case MADV_DONTNEED: return madvise_dontneed_free(vma, prev, start, end, behavior); @@ -865,6 +1053,7 @@ madvise_behavior_valid(int behavior) case MADV_DONTNEED: case MADV_FREE: case MADV_COLD: + case MADV_PAGEOUT: #ifdef CONFIG_KSM case MADV_MERGEABLE: case MADV_UNMERGEABLE: diff --git a/mm/vmscan.c b/mm/vmscan.c index d8bbaf068c35..e5d52d6a24af 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2145,6 +2145,62 @@ static void shrink_active_list(unsigned long nr_to_scan, nr_deactivate, nr_rotated, sc->priority, file); } +unsigned long reclaim_pages(struct list_head *page_list) +{ + int nid = -1; + unsigned long nr_reclaimed = 0; + LIST_HEAD(node_page_list); + struct reclaim_stat dummy_stat; + struct page *page; + struct scan_control sc = { + .gfp_mask = GFP_KERNEL, + .priority = DEF_PRIORITY, + .may_writepage = 1, + .may_unmap = 1, + .may_swap = 1, + }; + + while (!list_empty(page_list)) { + page = lru_to_page(page_list); + if (nid == -1) { + nid = page_to_nid(page); + INIT_LIST_HEAD(&node_page_list); + } + + if (nid == page_to_nid(page)) { + ClearPageActive(page); + list_move(&page->lru, &node_page_list); + continue; + } + + nr_reclaimed += shrink_page_list(&node_page_list, + NODE_DATA(nid), + &sc, 0, + &dummy_stat, false); + while (!list_empty(&node_page_list)) { + page = lru_to_page(&node_page_list); + list_del(&page->lru); + putback_lru_page(page); + } + + nid = -1; + } + + if (!list_empty(&node_page_list)) { + nr_reclaimed += shrink_page_list(&node_page_list, + NODE_DATA(nid), + &sc, 0, + &dummy_stat, false); + while (!list_empty(&node_page_list)) { + page = lru_to_page(&node_page_list); + list_del(&page->lru); + putback_lru_page(page); + } + } + + return nr_reclaimed; +} + /* * The inactive anon list should be small enough that the VM never has * to do too much work. -- cgit v1.2.3 From 2df4de1681767df900e15e34195bbf7dc1b23e06 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Wed, 25 Sep 2019 19:28:19 -0700 Subject: ptp: correctly disable flags on old ioctls Commit 415606588c61 ("PTP: introduce new versions of IOCTLs", 2019-09-13) introduced new versions of the PTP ioctls which actually validate that the flags are acceptable values. As part of this, it cleared the flags value using a bitwise and+negation, in an attempt to prevent the old ioctl from accidentally enabling new features. This is incorrect for a couple of reasons. First, it results in accidentally preventing previously working flags on the request ioctl. By clearing the "valid" flags, we now no longer allow setting the enable, rising edge, or falling edge flags. Second, if we add new additional flags in the future, they must not be set by the old ioctl. (Since the flag wasn't checked before, we could potentially break userspace programs which sent garbage flag data. The correct way to resolve this is to check for and clear all but the originally valid flags. Create defines indicating which flags are correctly checked and interpreted by the original ioctls. Use these to clear any bits which will not be correctly interpreted by the original ioctls. In the future, new flags must be added to the VALID_FLAGS macros, but *not* to the V1_VALID_FLAGS macros. In this way, new features may be exposed over the v2 ioctls, but without breaking previous userspace which happened to not clear the flags value properly. The old ioctl will continue to behave the same way, while the new ioctl gains the benefit of using the flags fields. Cc: Richard Cochran Cc: Felipe Balbi Cc: David S. Miller Cc: Christopher Hall Signed-off-by: Jacob Keller Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/ptp/ptp_chardev.c | 4 ++-- include/uapi/linux/ptp_clock.h | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+), 2 deletions(-) (limited to 'include/uapi') diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 9c18476d8d10..67d0199840fd 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c @@ -155,7 +155,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) err = -EINVAL; break; } else if (cmd == PTP_EXTTS_REQUEST) { - req.extts.flags &= ~PTP_EXTTS_VALID_FLAGS; + req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS; req.extts.rsv[0] = 0; req.extts.rsv[1] = 0; } @@ -184,7 +184,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) err = -EINVAL; break; } else if (cmd == PTP_PEROUT_REQUEST) { - req.perout.flags &= ~PTP_PEROUT_VALID_FLAGS; + req.perout.flags &= PTP_PEROUT_V1_VALID_FLAGS; req.perout.rsv[0] = 0; req.perout.rsv[1] = 0; req.perout.rsv[2] = 0; diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index f16301015949..59e89a1bc3bb 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h @@ -31,15 +31,37 @@ #define PTP_ENABLE_FEATURE (1<<0) #define PTP_RISING_EDGE (1<<1) #define PTP_FALLING_EDGE (1<<2) + +/* + * flag fields valid for the new PTP_EXTTS_REQUEST2 ioctl. + */ #define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \ PTP_RISING_EDGE | \ PTP_FALLING_EDGE) +/* + * flag fields valid for the original PTP_EXTTS_REQUEST ioctl. + * DO NOT ADD NEW FLAGS HERE. + */ +#define PTP_EXTTS_V1_VALID_FLAGS (PTP_ENABLE_FEATURE | \ + PTP_RISING_EDGE | \ + PTP_FALLING_EDGE) + /* * Bits of the ptp_perout_request.flags field: */ #define PTP_PEROUT_ONE_SHOT (1<<0) + +/* + * flag fields valid for the new PTP_PEROUT_REQUEST2 ioctl. + */ #define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT) + +/* + * No flags are valid for the original PTP_PEROUT_REQUEST ioctl + */ +#define PTP_PEROUT_V1_VALID_FLAGS (0) + /* * struct ptp_clock_time - represents a time value * -- cgit v1.2.3