summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/damon.h2
-rw-r--r--include/linux/firmware/imx/sm.h47
-rw-r--r--include/linux/io_uring_types.h3
-rw-r--r--include/linux/mlx5/driver.h1
-rw-r--r--include/linux/mlx5/fs.h2
-rw-r--r--include/linux/pm_domain.h7
-rw-r--r--include/linux/rv.h6
-rw-r--r--include/linux/swap.h10
-rw-r--r--include/linux/virtio_config.h11
9 files changed, 77 insertions, 12 deletions
diff --git a/include/linux/damon.h b/include/linux/damon.h
index f13664c62ddd..9e62b2a85538 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -636,6 +636,7 @@ struct damon_operations {
* @data: Data that will be passed to @fn.
* @repeat: Repeat invocations.
* @return_code: Return code from @fn invocation.
+ * @dealloc_on_cancel: De-allocate when canceled.
*
* Control damon_call(), which requests specific kdamond to invoke a given
* function. Refer to damon_call() for more details.
@@ -645,6 +646,7 @@ struct damon_call_control {
void *data;
bool repeat;
int return_code;
+ bool dealloc_on_cancel;
/* private: internal use only */
/* informs if the kdamond finished handling of the request */
struct completion completion;
diff --git a/include/linux/firmware/imx/sm.h b/include/linux/firmware/imx/sm.h
index d4212bc42b2c..a33b45027356 100644
--- a/include/linux/firmware/imx/sm.h
+++ b/include/linux/firmware/imx/sm.h
@@ -26,13 +26,43 @@
#define SCMI_IMX94_CTRL_SAI3_MCLK 5U /*!< WAKE SAI3 MCLK */
#define SCMI_IMX94_CTRL_SAI4_MCLK 6U /*!< WAKE SAI4 MCLK */
+#if IS_ENABLED(CONFIG_IMX_SCMI_MISC_DRV)
int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val);
int scmi_imx_misc_ctrl_set(u32 id, u32 val);
+#else
+static inline int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int scmi_imx_misc_ctrl_set(u32 id, u32 val)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IMX_SCMI_CPU_DRV)
int scmi_imx_cpu_start(u32 cpuid, bool start);
int scmi_imx_cpu_started(u32 cpuid, bool *started);
int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start, bool boot,
bool resume);
+#else
+static inline int scmi_imx_cpu_start(u32 cpuid, bool start)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_cpu_started(u32 cpuid, bool *started)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_cpu_reset_vector_set(u32 cpuid, u64 vector, bool start,
+ bool boot, bool resume)
+{
+ return -EOPNOTSUPP;
+}
+#endif
enum scmi_imx_lmm_op {
SCMI_IMX_LMM_BOOT,
@@ -44,7 +74,24 @@ enum scmi_imx_lmm_op {
#define SCMI_IMX_LMM_OP_FORCEFUL 0
#define SCMI_IMX_LMM_OP_GRACEFUL BIT(0)
+#if IS_ENABLED(CONFIG_IMX_SCMI_LMM_DRV)
int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags);
int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info);
int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector);
+#else
+static inline int scmi_imx_lmm_operation(u32 lmid, enum scmi_imx_lmm_op op, u32 flags)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_lmm_info(u32 lmid, struct scmi_imx_lmm_info *info)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int scmi_imx_lmm_reset_vector_set(u32 lmid, u32 cpuid, u32 flags, u64 vector)
+{
+ return -EOPNOTSUPP;
+}
+#endif
#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 80a178f3d896..12f5ee43850e 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -420,9 +420,6 @@ struct io_ring_ctx {
struct list_head defer_list;
unsigned nr_drained;
- struct io_alloc_cache msg_cache;
- spinlock_t msg_lock;
-
#ifdef CONFIG_NET_RX_BUSY_POLL
struct list_head napi_list; /* track busy poll napi_id */
spinlock_t napi_lock; /* napi_list lock */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 8c5fbfb85749..10fe492e1fed 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -663,6 +663,7 @@ struct mlx5e_resources {
bool tisn_valid;
} hw_objs;
struct net_device *uplink_netdev;
+ netdevice_tracker tracker;
struct mutex uplink_netdev_lock;
struct mlx5_crypto_dek_priv *dek_priv;
};
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 86055d55836d..6ac76a0c3827 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -308,6 +308,8 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
struct mlx5_fc *mlx5_fc_local_create(u32 counter_id, u32 offset, u32 bulk_size);
void mlx5_fc_local_destroy(struct mlx5_fc *counter);
+void mlx5_fc_local_get(struct mlx5_fc *counter);
+void mlx5_fc_local_put(struct mlx5_fc *counter);
u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index c84edf217819..f67a2cb7d781 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -115,6 +115,12 @@ struct dev_pm_domain_list {
* genpd provider specific way, likely through a
* parent device node. This flag makes genpd to
* skip its internal support for this.
+ *
+ * GENPD_FLAG_NO_STAY_ON: For genpd OF providers a powered-on PM domain at
+ * initialization is prevented from being
+ * powered-off until the ->sync_state() callback is
+ * invoked. This flag informs genpd to allow a
+ * power-off without waiting for ->sync_state().
*/
#define GENPD_FLAG_PM_CLK (1U << 0)
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
@@ -126,6 +132,7 @@ struct dev_pm_domain_list {
#define GENPD_FLAG_OPP_TABLE_FW (1U << 7)
#define GENPD_FLAG_DEV_NAME_FW (1U << 8)
#define GENPD_FLAG_NO_SYNC_STATE (1U << 9)
+#define GENPD_FLAG_NO_STAY_ON (1U << 10)
enum gpd_status {
GENPD_STATE_ON = 0, /* PM domain is on */
diff --git a/include/linux/rv.h b/include/linux/rv.h
index 14410a42faef..9520aab34bcb 100644
--- a/include/linux/rv.h
+++ b/include/linux/rv.h
@@ -7,16 +7,14 @@
#ifndef _LINUX_RV_H
#define _LINUX_RV_H
-#include <linux/types.h>
-#include <linux/list.h>
-
#define MAX_DA_NAME_LEN 32
#define MAX_DA_RETRY_RACING_EVENTS 3
#ifdef CONFIG_RV
+#include <linux/array_size.h>
#include <linux/bitops.h>
+#include <linux/list.h>
#include <linux/types.h>
-#include <linux/array_size.h>
/*
* Deterministic automaton per-object variables.
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 2fe6ed2cc3fd..7012a0f758d8 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -385,6 +385,16 @@ void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
void mark_page_accessed(struct page *);
void folio_mark_accessed(struct folio *);
+static inline bool folio_may_be_lru_cached(struct folio *folio)
+{
+ /*
+ * Holding PMD-sized folios in per-CPU LRU cache unbalances accounting.
+ * Holding small numbers of low-order mTHP folios in per-CPU LRU cache
+ * will be sensible, but nobody has implemented and tested that yet.
+ */
+ return !folio_test_large(folio);
+}
+
extern atomic_t lru_disable_count;
static inline bool lru_cache_disabled(void)
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 8bf156dde554..7427b79d6f3d 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -193,14 +193,15 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
}
static inline void virtio_get_features(struct virtio_device *vdev,
- u64 *features)
+ u64 *features_out)
{
if (vdev->config->get_extended_features) {
- vdev->config->get_extended_features(vdev, features);
+ vdev->config->get_extended_features(vdev, features_out);
return;
}
- virtio_features_from_u64(features, vdev->config->get_features(vdev));
+ virtio_features_from_u64(features_out,
+ vdev->config->get_features(vdev));
}
/**
@@ -326,11 +327,11 @@ int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
static inline
bool virtio_get_shm_region(struct virtio_device *vdev,
- struct virtio_shm_region *region, u8 id)
+ struct virtio_shm_region *region_out, u8 id)
{
if (!vdev->config->get_shm_region)
return false;
- return vdev->config->get_shm_region(vdev, region, id);
+ return vdev->config->get_shm_region(vdev, region_out, id);
}
static inline bool virtio_is_little_endian(struct virtio_device *vdev)