From 0434ccdcf883e53ec7156a6843943e940dc1feb8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 24 Aug 2018 08:43:36 +0200 Subject: netfilter: nf_tables: rework ct timeout set support Using a private template is problematic: 1. We can't assign both a zone and a timeout policy (zone assigns a conntrack template, so we hit problem 1) 2. Using a template needs to take care of ct refcount, else we'll eventually free the private template due to ->use underflow. This patch reworks template policy to instead work with existing conntrack. As long as such conntrack has not yet been placed into the hash table (unconfirmed) we can still add the timeout extension. The only caveat is that we now need to update/correct ct->timeout to reflect the initial/new state, otherwise the conntrack entry retains the default 'new' timeout. Side effect of this change is that setting the policy must now occur from chains that are evaluated *after* the conntrack lookup has taken place. No released kernel contains the timeout policy feature yet, so this change should be ok. Changes since v2: - don't handle 'ct is confirmed case' - after previous patch, no need to special-case tcp/dccp/sctp timeout anymore Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- include/net/netfilter/nf_conntrack_timeout.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h index d5f62cc6c2ae..3394d75e1c80 100644 --- a/include/net/netfilter/nf_conntrack_timeout.h +++ b/include/net/netfilter/nf_conntrack_timeout.h @@ -30,7 +30,7 @@ struct nf_conn_timeout { }; static inline unsigned int * -nf_ct_timeout_data(struct nf_conn_timeout *t) +nf_ct_timeout_data(const struct nf_conn_timeout *t) { struct nf_ct_timeout *timeout; -- cgit v1.2.3 From 45cd0faae3715e305bc46e23b34c5ed4d185ceb8 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Mon, 27 Aug 2018 15:56:02 +0300 Subject: vfs: add the fadvise() file operation This is going to be used by overlayfs and possibly useful for other filesystems. Signed-off-by: Amir Goldstein Signed-off-by: Miklos Szeredi --- Documentation/filesystems/vfs.txt | 3 ++ include/linux/fs.h | 5 +++ mm/fadvise.c | 78 ++++++++++++++++++++++----------------- 3 files changed, 53 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index ec2142c8dbd3..a6c6a8af48a2 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -885,6 +885,7 @@ struct file_operations { ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int); int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64); int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64); + int (*fadvise)(struct file *, loff_t, loff_t, int); }; Again, all methods are called without any locks being held, unless @@ -965,6 +966,8 @@ otherwise noted. dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE command. + fadvise: possibly called by the fadvise64() system call. + Note that the file operations are implemented by the specific filesystem in which the inode resides. When opening a device node (character or block special) most filesystems will call special diff --git a/include/linux/fs.h b/include/linux/fs.h index 33322702c910..6c0b4a1c22ff 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1763,6 +1763,7 @@ struct file_operations { u64); int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64); + int (*fadvise)(struct file *, loff_t, loff_t, int); } __randomize_layout; struct inode_operations { @@ -3459,4 +3460,8 @@ static inline bool dir_relax_shared(struct inode *inode) extern bool path_noexec(const struct path *path); extern void inode_nohighmem(struct inode *inode); +/* mm/fadvise.c */ +extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, + int advice); + #endif /* _LINUX_FS_H */ diff --git a/mm/fadvise.c b/mm/fadvise.c index 2d8376e3c640..2f59bac1cb77 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -27,9 +27,9 @@ * deactivate the pages and clear PG_Referenced. */ -int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) +static int generic_fadvise(struct file *file, loff_t offset, loff_t len, + int advice) { - struct fd f = fdget(fd); struct inode *inode; struct address_space *mapping; struct backing_dev_info *bdi; @@ -37,22 +37,14 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) pgoff_t start_index; pgoff_t end_index; unsigned long nrpages; - int ret = 0; - if (!f.file) - return -EBADF; + inode = file_inode(file); + if (S_ISFIFO(inode->i_mode)) + return -ESPIPE; - inode = file_inode(f.file); - if (S_ISFIFO(inode->i_mode)) { - ret = -ESPIPE; - goto out; - } - - mapping = f.file->f_mapping; - if (!mapping || len < 0) { - ret = -EINVAL; - goto out; - } + mapping = file->f_mapping; + if (!mapping || len < 0) + return -EINVAL; bdi = inode_to_bdi(mapping->host); @@ -67,9 +59,9 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) /* no bad return value, but ignore advice */ break; default: - ret = -EINVAL; + return -EINVAL; } - goto out; + return 0; } /* @@ -85,21 +77,21 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) switch (advice) { case POSIX_FADV_NORMAL: - f.file->f_ra.ra_pages = bdi->ra_pages; - spin_lock(&f.file->f_lock); - f.file->f_mode &= ~FMODE_RANDOM; - spin_unlock(&f.file->f_lock); + file->f_ra.ra_pages = bdi->ra_pages; + spin_lock(&file->f_lock); + file->f_mode &= ~FMODE_RANDOM; + spin_unlock(&file->f_lock); break; case POSIX_FADV_RANDOM: - spin_lock(&f.file->f_lock); - f.file->f_mode |= FMODE_RANDOM; - spin_unlock(&f.file->f_lock); + spin_lock(&file->f_lock); + file->f_mode |= FMODE_RANDOM; + spin_unlock(&file->f_lock); break; case POSIX_FADV_SEQUENTIAL: - f.file->f_ra.ra_pages = bdi->ra_pages * 2; - spin_lock(&f.file->f_lock); - f.file->f_mode &= ~FMODE_RANDOM; - spin_unlock(&f.file->f_lock); + file->f_ra.ra_pages = bdi->ra_pages * 2; + spin_lock(&file->f_lock); + file->f_mode &= ~FMODE_RANDOM; + spin_unlock(&file->f_lock); break; case POSIX_FADV_WILLNEED: /* First and last PARTIAL page! */ @@ -115,8 +107,7 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) * Ignore return value because fadvise() shall return * success even if filesystem can't retrieve a hint, */ - force_page_cache_readahead(mapping, f.file, start_index, - nrpages); + force_page_cache_readahead(mapping, file, start_index, nrpages); break; case POSIX_FADV_NOREUSE: break; @@ -183,9 +174,30 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) } break; default: - ret = -EINVAL; + return -EINVAL; } -out: + return 0; +} + +int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice) +{ + if (file->f_op->fadvise) + return file->f_op->fadvise(file, offset, len, advice); + + return generic_fadvise(file, offset, len, advice); +} +EXPORT_SYMBOL(vfs_fadvise); + +int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice) +{ + struct fd f = fdget(fd); + int ret; + + if (!f.file) + return -EBADF; + + ret = vfs_fadvise(f.file, offset, len, advice); + fdput(f); return ret; } -- cgit v1.2.3 From 0d6c3011409135ea84e2a231b013a22017ff999a Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Tue, 4 Sep 2018 15:31:14 +0200 Subject: HID: core: fix grouping by application commit f07b3c1da92d ("HID: generic: create one input report per application type") was effectively the same as MULTI_INPUT: hidinput->report was never set, so hidinput_match_application() always returned null. Fix that by testing against the real application. Note that this breaks some old eGalax touchscreens that expect MULTI_INPUT instead of HID_QUIRK_INPUT_PER_APP. Enable this quirk for backward compatibility on all non-Win8 touchscreens. link: https://bugzilla.kernel.org/show_bug.cgi?id=200847 link: https://bugzilla.kernel.org/show_bug.cgi?id=200849 link: https://bugs.archlinux.org/task/59699 link: https://github.com/NixOS/nixpkgs/issues/45165 Cc: stable@vger.kernel.org # v4.18+ Signed-off-by: Benjamin Tissoires Signed-off-by: Jiri Kosina --- drivers/hid/hid-input.c | 4 ++-- drivers/hid/hid-multitouch.c | 3 +++ include/linux/hid.h | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index ac201817a2dd..a481eaf39e88 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1582,6 +1582,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid, input_dev->dev.parent = &hid->dev; hidinput->input = input_dev; + hidinput->application = application; list_add_tail(&hidinput->list, &hid->inputs); INIT_LIST_HEAD(&hidinput->reports); @@ -1677,8 +1678,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report) struct hid_input *hidinput; list_for_each_entry(hidinput, &hid->inputs, list) { - if (hidinput->report && - hidinput->report->application == report->application) + if (hidinput->application == report->application) return hidinput; } diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 88da991ef256..da954f3f4da7 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1697,6 +1697,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) */ hdev->quirks |= HID_QUIRK_INPUT_PER_APP; + if (id->group != HID_GROUP_MULTITOUCH_WIN_8) + hdev->quirks |= HID_QUIRK_MULTI_INPUT; + timer_setup(&td->release_timer, mt_expired_timeout, 0); ret = hid_parse(hdev); diff --git a/include/linux/hid.h b/include/linux/hid.h index 834e6461a690..d44a78362942 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -526,6 +526,7 @@ struct hid_input { const char *name; bool registered; struct list_head reports; /* the list of reports */ + unsigned int application; /* application usage for this input */ }; enum hid_type { -- cgit v1.2.3 From d23df2dc56325c72b51670b1fb400ddd23dc17cd Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 3 Sep 2018 12:51:59 -0700 Subject: linux/mod_devicetable.h: fix kernel-doc missing notation for typec_device_id Fix kernel-doc warning for missing struct member description: ../include/linux/mod_devicetable.h:763: warning: Function parameter or member 'driver_data' not described in 'typec_device_id' Fixes: 8a37d87d72f0c ("usb: typec: Bus type for alternate modes") Signed-off-by: Randy Dunlap Cc: Heikki Krogerus Reviewed-by: Heikki Krogerus Signed-off-by: Greg Kroah-Hartman --- include/linux/mod_devicetable.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 1298a7daa57d..01797cb4587e 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -754,6 +754,7 @@ struct tb_service_id { * struct typec_device_id - USB Type-C alternate mode identifiers * @svid: Standard or Vendor ID * @mode: Mode index + * @driver_data: Driver specific data */ struct typec_device_id { __u16 svid; -- cgit v1.2.3 From 76d5581c870454be5f1f1a106c57985902e7ea20 Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Sun, 5 Aug 2018 09:19:33 +0300 Subject: net/mlx5: Fix use-after-free in self-healing flow When the mlx5 health mechanism detects a problem while the driver is in the middle of init_one or remove_one, the driver needs to prevent the health mechanism from scheduling future work; if future work is scheduled, there is a problem with use-after-free: the system WQ tries to run the work item (which has been freed) at the scheduled future time. Prevent this by disabling work item scheduling in the health mechanism when the driver is in the middle of init_one() or remove_one(). Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Signed-off-by: Jack Morgenstein Reviewed-by: Feras Daoud Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/health.c | 10 +++++++++- drivers/net/ethernet/mellanox/mlx5/core/main.c | 6 +++--- include/linux/mlx5/driver.h | 2 +- 3 files changed, 13 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index d39b0b7011b2..9f39aeca863f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -331,9 +331,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) add_timer(&health->timer); } -void mlx5_stop_health_poll(struct mlx5_core_dev *dev) +void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) { struct mlx5_core_health *health = &dev->priv.health; + unsigned long flags; + + if (disable_health) { + spin_lock_irqsave(&health->wq_lock, flags); + set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); + set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); + spin_unlock_irqrestore(&health->wq_lock, flags); + } del_timer_sync(&health->timer); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index cf3e4a659052..739aad0a0b35 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1286,7 +1286,7 @@ err_cleanup_once: mlx5_cleanup_once(dev); err_stop_poll: - mlx5_stop_health_poll(dev); + mlx5_stop_health_poll(dev, boot); if (mlx5_cmd_teardown_hca(dev)) { dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); goto out_err; @@ -1346,7 +1346,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_free_irq_vectors(dev); if (cleanup) mlx5_cleanup_once(dev); - mlx5_stop_health_poll(dev); + mlx5_stop_health_poll(dev, cleanup); err = mlx5_cmd_teardown_hca(dev); if (err) { dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); @@ -1608,7 +1608,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) * with the HCA, so the health polll is no longer needed. */ mlx5_drain_health_wq(dev); - mlx5_stop_health_poll(dev); + mlx5_stop_health_poll(dev, false); ret = mlx5_cmd_force_teardown_hca(dev); if (ret) { diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 7a452716de4b..aa65f58c6610 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -1052,7 +1052,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); void mlx5_start_health_poll(struct mlx5_core_dev *dev); -void mlx5_stop_health_poll(struct mlx5_core_dev *dev); +void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health); void mlx5_drain_health_wq(struct mlx5_core_dev *dev); void mlx5_trigger_health_work(struct mlx5_core_dev *dev); void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); -- cgit v1.2.3 From 8d71e818506718e8d7032ce824b5c74a17d4f7a5 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 21 Aug 2018 16:04:41 +0300 Subject: net/mlx5: Use u16 for Work Queue buffer fragment size Minimal stride size is 16. Hence, the number of strides in a fragment (of PAGE_SIZE) is <= PAGE_SIZE / 16 <= 4K. u16 is sufficient to represent this. Fixes: 388ca8be0037 ("IB/mlx5: Implement fragmented completion queue (CQ)") Signed-off-by: Tariq Toukan Reviewed-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/wq.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/wq.h | 2 +- include/linux/mlx5/driver.h | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index c8c315eb5128..d838af9539b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -39,9 +39,9 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) return (u32)wq->fbc.sz_m1 + 1; } -u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) +u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) { - return (u32)wq->fbc.frag_sz_m1 + 1; + return wq->fbc.frag_sz_m1 + 1; } u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 2bd4c3184eba..3a1a170bb2d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -80,7 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl); u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); -u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); +u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index aa65f58c6610..3a1258fd8ac3 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -362,7 +362,7 @@ struct mlx5_frag_buf { struct mlx5_frag_buf_ctrl { struct mlx5_frag_buf frag_buf; u32 sz_m1; - u32 frag_sz_m1; + u16 frag_sz_m1; u32 strides_offset; u8 log_sz; u8 log_stride; -- cgit v1.2.3 From a09036221092989b88c55d24d1f12ceb1d7d361f Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Tue, 21 Aug 2018 16:07:58 +0300 Subject: net/mlx5: Use u16 for Work Queue buffer strides offset Minimal stride size is 16. Hence, the number of strides in a fragment (of PAGE_SIZE) is <= PAGE_SIZE / 16 <= 4K. u16 is sufficient to represent this. Fixes: d7037ad73daa ("net/mlx5: Fix QP fragmented buffer allocation") Signed-off-by: Tariq Toukan Reviewed-by: Eran Ben Elisha Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/wq.c | 2 +- include/linux/mlx5/driver.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index d838af9539b1..68e7f8df2a6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -138,7 +138,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *qpc, struct mlx5_wq_qp *wq, struct mlx5_wq_ctrl *wq_ctrl) { - u32 sq_strides_offset; + u16 sq_strides_offset; u32 rq_pg_remainder; int err; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 3a1258fd8ac3..66d94b4557cf 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -363,7 +363,7 @@ struct mlx5_frag_buf_ctrl { struct mlx5_frag_buf frag_buf; u32 sz_m1; u16 frag_sz_m1; - u32 strides_offset; + u16 strides_offset; u8 log_sz; u8 log_stride; u8 log_frag_strides; @@ -995,7 +995,7 @@ static inline u32 mlx5_base_mkey(const u32 key) } static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz, - u32 strides_offset, + u16 strides_offset, struct mlx5_frag_buf_ctrl *fbc) { fbc->log_stride = log_stride; -- cgit v1.2.3 From 09121255c784fd36ad6237a4e239c634b0209de0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 29 Aug 2018 14:13:13 +0200 Subject: perf/UAPI: Clearly mark __PERF_SAMPLE_CALLCHAIN_EARLY as internal use Vince noted that commit: 6cbc304f2f36 ("perf/x86/intel: Fix unwind errors from PEBS entries (mk-II)") 'leaked' __PERF_SAMPLE_CALLCHAIN_EARLY into the UAPI namespace. And while sys_perf_event_open() will error out if you try to use it, it is exposed. Clearly mark it for internal use only to avoid any confusion. Requested-by: Vince Weaver Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/uapi/linux/perf_event.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index eeb787b1c53c..f35eb72739c0 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -144,7 +144,7 @@ enum perf_event_sample_format { PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */ - __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, + __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */ }; /* -- cgit v1.2.3 From 01c5f85aebaaddfd7e6051fb2ec80c1d4b463554 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 11 Sep 2018 10:59:53 -0600 Subject: blk-cgroup: increase number of supported policies After merging the iolatency policy, we potentially now have 4 policies being registered, but only support 3. This causes one of them to fail loading. Takashi reports that BFQ no longer works for him, because it fails to load due to policy registration failure. Bump to 5 policies, and also add a warning for when we have exceeded the global amount. If we have to touch this again, we should switch to a dynamic scheme instead. Reported-by: Takashi Iwai Reviewed-by: Jeff Moyer Tested-by: Takashi Iwai Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 4 +++- include/linux/blkdev.h | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index c19f9078da1e..c630e02836a8 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1510,8 +1510,10 @@ int blkcg_policy_register(struct blkcg_policy *pol) for (i = 0; i < BLKCG_MAX_POLS; i++) if (!blkcg_policy[i]) break; - if (i >= BLKCG_MAX_POLS) + if (i >= BLKCG_MAX_POLS) { + pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n"); goto err_unlock; + } /* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */ if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) || diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d6869e0e2b64..6980014357d4 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -54,7 +54,7 @@ struct blk_stat_callback; * Maximum number of blkcg policies allowed to be registered concurrently. * Defined here to simplify include dependency. */ -#define BLKCG_MAX_POLS 3 +#define BLKCG_MAX_POLS 5 typedef void (rq_end_io_fn)(struct request *, blk_status_t); -- cgit v1.2.3 From bfc456060d0cbcf6902a436d358b60cb1534668c Mon Sep 17 00:00:00 2001 From: Dennis Dalessandro Date: Fri, 31 Aug 2018 10:34:14 -0700 Subject: IB/hfi1,PCI: Allow bus reset while probing Calling into the new API to reset the secondary bus results in a deadlock. This occurs because the device/bus is already locked at probe time. Reverting back to the old behavior while the API is improved. Link: https://bugzilla.kernel.org/show_bug.cgi?id=200985 Fixes: c6a44ba950d1 ("PCI: Rename pci_try_reset_bus() to pci_reset_bus()") Fixes: 409888e0966e ("IB/hfi1: Use pci_try_reset_bus() for initiating PCI Secondary Bus Reset") Signed-off-by: Dennis Dalessandro Signed-off-by: Bjorn Helgaas Reviewed-by: Michael J. Ruhl Cc: Sinan Kaya --- drivers/infiniband/hw/hfi1/pcie.c | 11 ++++------- drivers/pci/pci.c | 1 + include/linux/pci.h | 3 +++ 3 files changed, 8 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index eec83757d55f..6c967dde58e7 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c @@ -893,14 +893,11 @@ static int trigger_sbr(struct hfi1_devdata *dd) } /* - * A secondary bus reset (SBR) issues a hot reset to our device. - * The following routine does a 1s wait after the reset is dropped - * per PCI Trhfa (recovery time). PCIe 3.0 section 6.6.1 - - * Conventional Reset, paragraph 3, line 35 also says that a 1s - * delay after a reset is required. Per spec requirements, - * the link is either working or not after that point. + * This is an end around to do an SBR during probe time. A new API needs + * to be implemented to have cleaner interface but this fixes the + * current brokenness */ - return pci_reset_bus(dev); + return pci_bridge_secondary_bus_reset(dev->bus->self); } /* diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 30b260332a10..1835f3a7aa8d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -4547,6 +4547,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev) return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS); } +EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset); static int pci_parent_bus_reset(struct pci_dev *dev, int probe) { diff --git a/include/linux/pci.h b/include/linux/pci.h index e72ca8dd6241..6925828f9f25 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1235,6 +1235,9 @@ void pci_bus_remove_resources(struct pci_bus *bus); int devm_request_pci_bus_resources(struct device *dev, struct list_head *resources); +/* Temporary until new and working PCI SBR API in place */ +int pci_bridge_secondary_bus_reset(struct pci_dev *dev); + #define pci_bus_for_each_resource(bus, res, i) \ for (i = 0; \ (res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \ -- cgit v1.2.3 From 7a9cdebdcc17e426fb5287e4a82db1dfe86339b2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 12 Sep 2018 23:57:48 -1000 Subject: mm: get rid of vmacache_flush_all() entirely Jann Horn points out that the vmacache_flush_all() function is not only potentially expensive, it's buggy too. It also happens to be entirely unnecessary, because the sequence number overflow case can be avoided by simply making the sequence number be 64-bit. That doesn't even grow the data structures in question, because the other adjacent fields are already 64-bit. So simplify the whole thing by just making the sequence number overflow case go away entirely, which gets rid of all the complications and makes the code faster too. Win-win. [ Oleg Nesterov points out that the VMACACHE_FULL_FLUSHES statistics also just goes away entirely with this ] Reported-by: Jann Horn Suggested-by: Will Deacon Acked-by: Davidlohr Bueso Cc: Oleg Nesterov Cc: stable@kernel.org Signed-off-by: Linus Torvalds --- include/linux/mm_types.h | 2 +- include/linux/mm_types_task.h | 2 +- include/linux/vm_event_item.h | 1 - include/linux/vmacache.h | 5 ----- mm/debug.c | 4 ++-- mm/vmacache.c | 38 -------------------------------------- 6 files changed, 4 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index cd2bc939efd0..5ed8f6292a53 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -341,7 +341,7 @@ struct mm_struct { struct { struct vm_area_struct *mmap; /* list of VMAs */ struct rb_root mm_rb; - u32 vmacache_seqnum; /* per-thread vmacache */ + u64 vmacache_seqnum; /* per-thread vmacache */ #ifdef CONFIG_MMU unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h index 5fe87687664c..d7016dcb245e 100644 --- a/include/linux/mm_types_task.h +++ b/include/linux/mm_types_task.h @@ -32,7 +32,7 @@ #define VMACACHE_MASK (VMACACHE_SIZE - 1) struct vmacache { - u32 seqnum; + u64 seqnum; struct vm_area_struct *vmas[VMACACHE_SIZE]; }; diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 5c7f010676a7..47a3441cf4c4 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, #ifdef CONFIG_DEBUG_VM_VMACACHE VMACACHE_FIND_CALLS, VMACACHE_FIND_HITS, - VMACACHE_FULL_FLUSHES, #endif #ifdef CONFIG_SWAP SWAP_RA, diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h index 3e9a963edd6a..6fce268a4588 100644 --- a/include/linux/vmacache.h +++ b/include/linux/vmacache.h @@ -10,7 +10,6 @@ static inline void vmacache_flush(struct task_struct *tsk) memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas)); } -extern void vmacache_flush_all(struct mm_struct *mm); extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma); extern struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr); @@ -24,10 +23,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm, static inline void vmacache_invalidate(struct mm_struct *mm) { mm->vmacache_seqnum++; - - /* deal with overflows */ - if (unlikely(mm->vmacache_seqnum == 0)) - vmacache_flush_all(mm); } #endif /* __LINUX_VMACACHE_H */ diff --git a/mm/debug.c b/mm/debug.c index 38c926520c97..bd10aad8539a 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -114,7 +114,7 @@ EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { - pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n" + pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n" #ifdef CONFIG_MMU "get_unmapped_area %px\n" #endif @@ -142,7 +142,7 @@ void dump_mm(const struct mm_struct *mm) "tlb_flush_pending %d\n" "def_flags: %#lx(%pGv)\n", - mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, + mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size, #ifdef CONFIG_MMU mm->get_unmapped_area, #endif diff --git a/mm/vmacache.c b/mm/vmacache.c index ea517bef7dc5..cdc32a3b02fa 100644 --- a/mm/vmacache.c +++ b/mm/vmacache.c @@ -19,44 +19,6 @@ #endif #define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK) -/* - * Flush vma caches for threads that share a given mm. - * - * The operation is safe because the caller holds the mmap_sem - * exclusively and other threads accessing the vma cache will - * have mmap_sem held at least for read, so no extra locking - * is required to maintain the vma cache. - */ -void vmacache_flush_all(struct mm_struct *mm) -{ - struct task_struct *g, *p; - - count_vm_vmacache_event(VMACACHE_FULL_FLUSHES); - - /* - * Single threaded tasks need not iterate the entire - * list of process. We can avoid the flushing as well - * since the mm's seqnum was increased and don't have - * to worry about other threads' seqnum. Current's - * flush will occur upon the next lookup. - */ - if (atomic_read(&mm->mm_users) == 1) - return; - - rcu_read_lock(); - for_each_process_thread(g, p) { - /* - * Only flush the vmacache pointers as the - * mm seqnum is already set and curr's will - * be set upon invalidation when the next - * lookup is done. - */ - if (mm == p->mm) - vmacache_flush(p); - } - rcu_read_unlock(); -} - /* * This task may be accessing a foreign mm via (for example) * get_user_pages()->find_vma(). The vmacache is task-local and this -- cgit v1.2.3 From 500dd232449e7c07500e713dc6970aa713f8e4f1 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Thu, 13 Sep 2018 13:48:27 +0100 Subject: asm-generic: io: Fix ioport_map() for !CONFIG_GENERIC_IOMAP && CONFIG_INDIRECT_PIO The !CONFIG_GENERIC_IOMAP version of ioport_map uses MMIO_UPPER_LIMIT to prevent users from making I/O accesses outside the expected I/O range - however it erroneously treats MMIO_UPPER_LIMIT as a mask which is contradictory to its other users. The introduction of CONFIG_INDIRECT_PIO, which subtracts an arbitrary amount from IO_SPACE_LIMIT to form MMIO_UPPER_LIMIT, results in ioport_map mangling the given port rather than capping it. We address this by aligning more closely with the CONFIG_GENERIC_IOMAP implementation of ioport_map by using the comparison operator and returning NULL where the port exceeds MMIO_UPPER_LIMIT. Though note that we preserve the existing behavior of masking with IO_SPACE_LIMIT such that we don't break existing buggy drivers that somehow rely on this masking. Fixes: 5745392e0c2b ("PCI: Apply the new generic I/O management on PCI IO hosts") Reported-by: Will Deacon Reviewed-by: Arnd Bergmann Signed-off-by: Andrew Murray Signed-off-by: Will Deacon --- include/asm-generic/io.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 66d1d45fa2e1..d356f802945a 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -1026,7 +1026,8 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size) #define ioport_map ioport_map static inline void __iomem *ioport_map(unsigned long port, unsigned int nr) { - return PCI_IOBASE + (port & MMIO_UPPER_LIMIT); + port &= IO_SPACE_LIMIT; + return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port; } #endif -- cgit v1.2.3 From 197ecb3802c04499d8ff4f8cb28f6efa008067db Mon Sep 17 00:00:00 2001 From: Marek Marczykowski-Górecki Date: Fri, 7 Sep 2018 18:49:08 +0200 Subject: xen/balloon: add runtime control for scrubbing ballooned out pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Scrubbing pages on initial balloon down can take some time, especially in nested virtualization case (nested EPT is slow). When HVM/PVH guest is started with memory= significantly lower than maxmem=, all the extra pages will be scrubbed before returning to Xen. But since most of them weren't used at all at that point, Xen needs to populate them first (from populate-on-demand pool). In nested virt case (Xen inside KVM) this slows down the guest boot by 15-30s with just 1.5GB needed to be returned to Xen. Add runtime parameter to enable/disable it, to allow initially disabling scrubbing, then enable it back during boot (for example in initramfs). Such usage relies on assumption that a) most pages ballooned out during initial boot weren't used at all, and b) even if they were, very few secrets are in the guest at that time (before any serious userspace kicks in). Convert CONFIG_XEN_SCRUB_PAGES to CONFIG_XEN_SCRUB_PAGES_DEFAULT (also enabled by default), controlling default value for the new runtime switch. Signed-off-by: Marek Marczykowski-Górecki Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky --- Documentation/ABI/stable/sysfs-devices-system-xen_memory | 9 +++++++++ Documentation/admin-guide/kernel-parameters.txt | 6 ++++++ drivers/xen/Kconfig | 10 +++++++--- drivers/xen/mem-reservation.c | 4 ++++ drivers/xen/xen-balloon.c | 3 +++ include/xen/mem-reservation.h | 7 ++++--- 6 files changed, 33 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/Documentation/ABI/stable/sysfs-devices-system-xen_memory b/Documentation/ABI/stable/sysfs-devices-system-xen_memory index caa311d59ac1..6d83f95a8a8e 100644 --- a/Documentation/ABI/stable/sysfs-devices-system-xen_memory +++ b/Documentation/ABI/stable/sysfs-devices-system-xen_memory @@ -75,3 +75,12 @@ Contact: Konrad Rzeszutek Wilk Description: Amount (in KiB) of low (or normal) memory in the balloon. + +What: /sys/devices/system/xen_memory/xen_memory0/scrub_pages +Date: September 2018 +KernelVersion: 4.20 +Contact: xen-devel@lists.xenproject.org +Description: + Control scrubbing pages before returning them to Xen for others domains + use. Can be set with xen_scrub_pages cmdline + parameter. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 64a3bf54b974..92eb1f42240d 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5000,6 +5000,12 @@ Disables the PV optimizations forcing the HVM guest to run as generic HVM guest with no PV drivers. + xen_scrub_pages= [XEN] + Boolean option to control scrubbing pages before giving them back + to Xen, for use by other domains. Can be also changed at runtime + with /sys/devices/system/xen_memory/xen_memory0/scrub_pages. + Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT. + xirc2ps_cs= [NET,PCMCIA] Format: ,,,,,[,[,[,]]] diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index b459edfacff3..90d387b50ab7 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT This value is used to allocate enough space in internal tables needed for physical memory administration. -config XEN_SCRUB_PAGES - bool "Scrub pages before returning them to system" +config XEN_SCRUB_PAGES_DEFAULT + bool "Scrub pages before returning them to system by default" depends on XEN_BALLOON default y help Scrub pages before returning them to the system for reuse by other domains. This makes sure that any confidential data is not accidentally visible to other domains. Is it more - secure, but slightly less efficient. + secure, but slightly less efficient. This can be controlled with + xen_scrub_pages=0 parameter and + /sys/devices/system/xen_memory/xen_memory0/scrub_pages. + This option only sets the default value. + If in doubt, say yes. config XEN_DEV_EVTCHN diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c index 084799c6180e..3782cf070338 100644 --- a/drivers/xen/mem-reservation.c +++ b/drivers/xen/mem-reservation.c @@ -14,6 +14,10 @@ #include #include +#include + +bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT); +core_param(xen_scrub_pages, xen_scrub_pages, bool, 0); /* * Use one extent per PAGE_SIZE to avoid to break down the page into diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index 294f35ce9e46..63c1494a8d73 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c @@ -44,6 +44,7 @@ #include #include #include +#include #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) @@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay); static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay); static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count); static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count); +static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages); static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr, char *buf) @@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = { &dev_attr_max_schedule_delay.attr.attr, &dev_attr_retry_count.attr.attr, &dev_attr_max_retry_count.attr.attr, + &dev_attr_scrub_pages.attr.attr, NULL }; diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h index 80b52b4945e9..a2ab516fcd2c 100644 --- a/include/xen/mem-reservation.h +++ b/include/xen/mem-reservation.h @@ -17,11 +17,12 @@ #include +extern bool xen_scrub_pages; + static inline void xenmem_reservation_scrub_page(struct page *page) { -#ifdef CONFIG_XEN_SCRUB_PAGES - clear_highpage(page); -#endif + if (xen_scrub_pages) + clear_highpage(page); } #ifdef CONFIG_XEN_HAVE_PVMMU -- cgit v1.2.3