diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 2 | ||||
-rw-r--r-- | mm/filemap.c | 34 | ||||
-rw-r--r-- | mm/hmm.c | 19 | ||||
-rw-r--r-- | mm/kmsan/hooks.c | 13 | ||||
-rw-r--r-- | mm/page-writeback.c | 45 |
5 files changed, 64 insertions, 49 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 128b525b8811..41b6c9386b69 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -1031,7 +1031,7 @@ struct backing_dev_info *bdi_alloc(int node_id) kfree(bdi); return NULL; } - bdi->capabilities = BDI_CAP_WRITEBACK | BDI_CAP_WRITEBACK_ACCT; + bdi->capabilities = BDI_CAP_WRITEBACK; bdi->ra_pages = VM_READAHEAD_PAGES; bdi->io_pages = VM_READAHEAD_PAGES; timer_setup(&bdi->laptop_mode_wb_timer, laptop_mode_timer_fn, 0); diff --git a/mm/filemap.c b/mm/filemap.c index a52dd38d2b4a..13f0259d993c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1621,7 +1621,7 @@ static void filemap_end_dropbehind(struct folio *folio) * completes. Do that now. If we fail, it's likely because of a big folio - * just reset dropbehind for that case and latter completions should invalidate. */ -static void filemap_end_dropbehind_write(struct folio *folio) +void folio_end_dropbehind(struct folio *folio) { if (!folio_test_dropbehind(folio)) return; @@ -1638,16 +1638,18 @@ static void filemap_end_dropbehind_write(struct folio *folio) folio_unlock(folio); } } +EXPORT_SYMBOL_GPL(folio_end_dropbehind); /** - * folio_end_writeback - End writeback against a folio. + * folio_end_writeback_no_dropbehind - End writeback against a folio. * @folio: The folio. * * The folio must actually be under writeback. + * This call is intended for filesystems that need to defer dropbehind. * * Context: May be called from process or interrupt context. */ -void folio_end_writeback(struct folio *folio) +void folio_end_writeback_no_dropbehind(struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio); @@ -1663,6 +1665,25 @@ void folio_end_writeback(struct folio *folio) folio_rotate_reclaimable(folio); } + if (__folio_end_writeback(folio)) + folio_wake_bit(folio, PG_writeback); + + acct_reclaim_writeback(folio); +} +EXPORT_SYMBOL_GPL(folio_end_writeback_no_dropbehind); + +/** + * folio_end_writeback - End writeback against a folio. + * @folio: The folio. + * + * The folio must actually be under writeback. + * + * Context: May be called from process or interrupt context. + */ +void folio_end_writeback(struct folio *folio) +{ + VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio); + /* * Writeback does not hold a folio reference of its own, relying * on truncation to wait for the clearing of PG_writeback. @@ -1670,11 +1691,8 @@ void folio_end_writeback(struct folio *folio) * reused before the folio_wake_bit(). */ folio_get(folio); - if (__folio_end_writeback(folio)) - folio_wake_bit(folio, PG_writeback); - - filemap_end_dropbehind_write(folio); - acct_reclaim_writeback(folio); + folio_end_writeback_no_dropbehind(folio); + folio_end_dropbehind(folio); folio_put(folio); } EXPORT_SYMBOL(folio_end_writeback); @@ -806,7 +806,7 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map, case PCI_P2PDMA_MAP_NONE: break; case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE: - attrs |= DMA_ATTR_SKIP_CPU_SYNC; + attrs |= DMA_ATTR_MMIO; pfns[idx] |= HMM_PFN_P2PDMA; break; case PCI_P2PDMA_MAP_BUS_ADDR: @@ -835,8 +835,8 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map, if (WARN_ON_ONCE(dma_need_unmap(dev) && !dma_addrs)) goto error; - dma_addr = dma_map_page(dev, page, 0, map->dma_entry_size, - DMA_BIDIRECTIONAL); + dma_addr = dma_map_phys(dev, paddr, map->dma_entry_size, + DMA_BIDIRECTIONAL, attrs); if (dma_mapping_error(dev, dma_addr)) goto error; @@ -871,16 +871,17 @@ bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx) if ((pfns[idx] & valid_dma) != valid_dma) return false; + if (pfns[idx] & HMM_PFN_P2PDMA) + attrs |= DMA_ATTR_MMIO; + if (pfns[idx] & HMM_PFN_P2PDMA_BUS) ; /* no need to unmap bus address P2P mappings */ - else if (dma_use_iova(state)) { - if (pfns[idx] & HMM_PFN_P2PDMA) - attrs |= DMA_ATTR_SKIP_CPU_SYNC; + else if (dma_use_iova(state)) dma_iova_unlink(dev, state, idx * map->dma_entry_size, map->dma_entry_size, DMA_BIDIRECTIONAL, attrs); - } else if (dma_need_unmap(dev)) - dma_unmap_page(dev, dma_addrs[idx], map->dma_entry_size, - DMA_BIDIRECTIONAL); + else if (dma_need_unmap(dev)) + dma_unmap_phys(dev, dma_addrs[idx], map->dma_entry_size, + DMA_BIDIRECTIONAL, attrs); pfns[idx] &= ~(HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA | HMM_PFN_P2PDMA_BUS); diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c index 97de3d6194f0..90bee565b9bc 100644 --- a/mm/kmsan/hooks.c +++ b/mm/kmsan/hooks.c @@ -336,14 +336,16 @@ static void kmsan_handle_dma_page(const void *addr, size_t size, } /* Helper function to handle DMA data transfers. */ -void kmsan_handle_dma(struct page *page, size_t offset, size_t size, +void kmsan_handle_dma(phys_addr_t phys, size_t size, enum dma_data_direction dir) { - u64 page_offset, to_go, addr; + struct page *page = phys_to_page(phys); + u64 page_offset, to_go; + void *addr; - if (PageHighMem(page)) + if (PhysHighMem(phys)) return; - addr = (u64)page_address(page) + offset; + addr = page_to_virt(page); /* * The kernel may occasionally give us adjacent DMA pages not belonging * to the same allocation. Process them separately to avoid triggering @@ -366,8 +368,7 @@ void kmsan_handle_dma_sg(struct scatterlist *sg, int nents, int i; for_each_sg(sg, item, nents, i) - kmsan_handle_dma(sg_page(item), item->offset, item->length, - dir); + kmsan_handle_dma(sg_phys(item), item->length, dir); } /* Functions from kmsan-checks.h follow. */ diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5f90fd6a7137..757bc4d3b5b5 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2990,26 +2990,23 @@ bool __folio_end_writeback(struct folio *folio) if (mapping && mapping_use_writeback_tags(mapping)) { struct inode *inode = mapping->host; - struct backing_dev_info *bdi = inode_to_bdi(inode); + struct bdi_writeback *wb; unsigned long flags; xa_lock_irqsave(&mapping->i_pages, flags); ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback); __xa_clear_mark(&mapping->i_pages, folio->index, PAGECACHE_TAG_WRITEBACK); - if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { - struct bdi_writeback *wb = inode_to_wb(inode); - wb_stat_mod(wb, WB_WRITEBACK, -nr); - __wb_writeout_add(wb, nr); - if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) - wb_inode_writeback_end(wb); + wb = inode_to_wb(inode); + wb_stat_mod(wb, WB_WRITEBACK, -nr); + __wb_writeout_add(wb, nr); + if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) { + wb_inode_writeback_end(wb); + if (mapping->host) + sb_clear_inode_writeback(mapping->host); } - if (mapping->host && !mapping_tagged(mapping, - PAGECACHE_TAG_WRITEBACK)) - sb_clear_inode_writeback(mapping->host); - xa_unlock_irqrestore(&mapping->i_pages, flags); } else { ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback); @@ -3034,7 +3031,7 @@ void __folio_start_writeback(struct folio *folio, bool keep_write) if (mapping && mapping_use_writeback_tags(mapping)) { XA_STATE(xas, &mapping->i_pages, folio->index); struct inode *inode = mapping->host; - struct backing_dev_info *bdi = inode_to_bdi(inode); + struct bdi_writeback *wb; unsigned long flags; bool on_wblist; @@ -3045,21 +3042,19 @@ void __folio_start_writeback(struct folio *folio, bool keep_write) on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK); xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK); - if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { - struct bdi_writeback *wb = inode_to_wb(inode); - - wb_stat_mod(wb, WB_WRITEBACK, nr); - if (!on_wblist) - wb_inode_writeback_start(wb); + wb = inode_to_wb(inode); + wb_stat_mod(wb, WB_WRITEBACK, nr); + if (!on_wblist) { + wb_inode_writeback_start(wb); + /* + * We can come through here when swapping anonymous + * folios, so we don't necessarily have an inode to + * track for sync. + */ + if (mapping->host) + sb_mark_inode_writeback(mapping->host); } - /* - * We can come through here when swapping anonymous - * folios, so we don't necessarily have an inode to - * track for sync. - */ - if (mapping->host && !on_wblist) - sb_mark_inode_writeback(mapping->host); if (!folio_test_dirty(folio)) xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); if (!keep_write) |