summaryrefslogtreecommitdiff
path: root/include/linux/memremap.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/memremap.h')
-rw-r--r--include/linux/memremap.h58
1 files changed, 53 insertions, 5 deletions
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index e5951ba12a28..713ec0435b48 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -25,7 +25,6 @@ struct vmem_altmap {
unsigned long free;
unsigned long align;
unsigned long alloc;
- bool inaccessible;
};
/*
@@ -77,11 +76,11 @@ enum memory_type {
struct dev_pagemap_ops {
/*
- * Called once the page refcount reaches 0. The reference count will be
+ * Called once the folio refcount reaches 0. The reference count will be
* reset to one by the core code after the method is called to prepare
- * for handing out the page again.
+ * for handing out the folio again.
*/
- void (*page_free)(struct page *page);
+ void (*folio_free)(struct folio *folio);
/*
* Used for private (un-addressable) device memory only. Must migrate
@@ -100,6 +99,13 @@ struct dev_pagemap_ops {
*/
int (*memory_failure)(struct dev_pagemap *pgmap, unsigned long pfn,
unsigned long nr_pages, int mf_flags);
+
+ /*
+ * Used for private (un-addressable) device memory only.
+ * This callback is used when a folio is split into
+ * a smaller folio
+ */
+ void (*folio_split)(struct folio *head, struct folio *tail);
};
#define PGMAP_ALTMAP_VALID (1 << 0)
@@ -177,6 +183,18 @@ static inline bool folio_is_pci_p2pdma(const struct folio *folio)
folio->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
}
+static inline void *folio_zone_device_data(const struct folio *folio)
+{
+ VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio);
+ return folio->page.zone_device_data;
+}
+
+static inline void folio_set_zone_device_data(struct folio *folio, void *data)
+{
+ VM_WARN_ON_FOLIO(!folio_is_device_private(folio), folio);
+ folio->page.zone_device_data = data;
+}
+
static inline bool is_pci_p2pdma_page(const struct page *page)
{
return IS_ENABLED(CONFIG_PCI_P2PDMA) &&
@@ -206,7 +224,7 @@ static inline bool is_fsdax_page(const struct page *page)
}
#ifdef CONFIG_ZONE_DEVICE
-void zone_device_page_init(struct page *page);
+void zone_device_page_init(struct page *page, unsigned int order);
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
@@ -215,6 +233,31 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn);
bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn);
unsigned long memremap_compat_align(void);
+
+static inline void zone_device_folio_init(struct folio *folio, unsigned int order)
+{
+ zone_device_page_init(&folio->page, order);
+ if (order)
+ folio_set_large_rmappable(folio);
+}
+
+static inline void zone_device_private_split_cb(struct folio *original_folio,
+ struct folio *new_folio)
+{
+ if (folio_is_device_private(original_folio)) {
+ if (!original_folio->pgmap->ops->folio_split) {
+ if (new_folio) {
+ new_folio->pgmap = original_folio->pgmap;
+ new_folio->page.mapping =
+ original_folio->page.mapping;
+ }
+ } else {
+ original_folio->pgmap->ops->folio_split(original_folio,
+ new_folio);
+ }
+ }
+}
+
#else
static inline void *devm_memremap_pages(struct device *dev,
struct dev_pagemap *pgmap)
@@ -248,6 +291,11 @@ static inline unsigned long memremap_compat_align(void)
{
return PAGE_SIZE;
}
+
+static inline void zone_device_private_split_cb(struct folio *original_folio,
+ struct folio *new_folio)
+{
+}
#endif /* CONFIG_ZONE_DEVICE */
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)