summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/genalloc.h1
-rw-r--r--include/linux/kasan.h16
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/leafops.h4
-rw-r--r--include/linux/memory-failure.h2
-rw-r--r--include/linux/mm.h8
6 files changed, 27 insertions, 8 deletions
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 0bd581003cd5..60de63e46b33 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -44,6 +44,7 @@ struct gen_pool;
* @nr: The number of zeroed bits we're looking for
* @data: optional additional data used by the callback
* @pool: the pool being allocated from
+ * @start_addr: start address of memory chunk
*/
typedef unsigned long (*genpool_algo_t)(unsigned long *map,
unsigned long size,
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index f335c1d7b61d..9c6ac4b62eb9 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -28,6 +28,7 @@ typedef unsigned int __bitwise kasan_vmalloc_flags_t;
#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
+#define KASAN_VMALLOC_KEEP_TAG ((__force kasan_vmalloc_flags_t)0x08u)
#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
#define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */
@@ -630,6 +631,16 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
__kasan_poison_vmalloc(start, size);
}
+void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
+ kasan_vmalloc_flags_t flags);
+static __always_inline void
+kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
+ kasan_vmalloc_flags_t flags)
+{
+ if (kasan_enabled())
+ __kasan_unpoison_vmap_areas(vms, nr_vms, flags);
+}
+
#else /* CONFIG_KASAN_VMALLOC */
static inline void kasan_populate_early_vm_area_shadow(void *start,
@@ -654,6 +665,11 @@ static inline void *kasan_unpoison_vmalloc(const void *start,
static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
{ }
+static __always_inline void
+kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
+ kasan_vmalloc_flags_t flags)
+{ }
+
#endif /* CONFIG_KASAN_VMALLOC */
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index ff7e231b0485..8a22bc9b8c6c 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -530,7 +530,7 @@ extern bool kexec_file_dbg_print;
#define kexec_dprintk(fmt, arg...) \
do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
-extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size);
+extern void *kimage_map_segment(struct kimage *image, int idx);
extern void kimage_unmap_segment(void *buffer);
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
@@ -540,7 +540,7 @@ static inline void __crash_kexec(struct pt_regs *regs) { }
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
static inline int kexec_crash_loaded(void) { return 0; }
-static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size)
+static inline void *kimage_map_segment(struct kimage *image, int idx)
{ return NULL; }
static inline void kimage_unmap_segment(void *buffer) { }
#define kexec_in_progress false
diff --git a/include/linux/leafops.h b/include/linux/leafops.h
index cfafe7a5e7b1..a9ff94b744f2 100644
--- a/include/linux/leafops.h
+++ b/include/linux/leafops.h
@@ -133,7 +133,7 @@ static inline bool softleaf_is_none(softleaf_t entry)
/**
* softleaf_type() - Identify the type of leaf entry.
- * @enntry: Leaf entry.
+ * @entry: Leaf entry.
*
* Returns: the leaf entry type associated with @entry.
*/
@@ -534,7 +534,7 @@ static inline bool pte_is_uffd_wp_marker(pte_t pte)
/**
* pte_is_uffd_marker() - Does this PTE entry encode a userfault-specific marker
* leaf entry?
- * @entry: Leaf entry.
+ * @pte: PTE entry.
*
* It's useful to be able to determine which leaf entries encode UFFD-specific
* markers so we can handle these correctly.
diff --git a/include/linux/memory-failure.h b/include/linux/memory-failure.h
index bc326503d2d2..7b5e11cf905f 100644
--- a/include/linux/memory-failure.h
+++ b/include/linux/memory-failure.h
@@ -9,6 +9,8 @@ struct pfn_address_space;
struct pfn_address_space {
struct interval_tree_node node;
struct address_space *mapping;
+ int (*pfn_to_vma_pgoff)(struct vm_area_struct *vma,
+ unsigned long pfn, pgoff_t *pgoff);
};
int register_pfn_address_space(struct pfn_address_space *pfn_space);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 15076261d0c2..6f959d8ca4b4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2459,10 +2459,10 @@ static inline int folio_expected_ref_count(const struct folio *folio)
if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio)))
return 0;
- if (folio_test_anon(folio)) {
- /* One reference per page from the swapcache. */
- ref_count += folio_test_swapcache(folio) << order;
- } else {
+ /* One reference per page from the swapcache. */
+ ref_count += folio_test_swapcache(folio) << order;
+
+ if (!folio_test_anon(folio)) {
/* One reference per page from the pagecache. */
ref_count += !!folio->mapping << order;
/* One reference from PG_private. */