diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/ata/libata-sff.c | 6 | ||||
-rw-r--r-- | drivers/base/node.c | 4 | ||||
-rw-r--r-- | drivers/block/aoe/aoecmd.c | 2 | ||||
-rw-r--r-- | drivers/block/zram/zram_drv.c | 23 | ||||
-rw-r--r-- | drivers/char/mem.c | 21 | ||||
-rw-r--r-- | drivers/firmware/efi/efi-init.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/drm_gem.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gem/i915_gem_pages.c | 2 | ||||
-rw-r--r-- | drivers/memstick/core/mspro_block.c | 3 | ||||
-rw-r--r-- | drivers/memstick/host/jmb38x_ms.c | 3 | ||||
-rw-r--r-- | drivers/memstick/host/tifm_ms.c | 3 | ||||
-rw-r--r-- | drivers/misc/vmw_balloon.c | 4 | ||||
-rw-r--r-- | drivers/mmc/host/tifm_sd.c | 4 | ||||
-rw-r--r-- | drivers/mmc/host/usdhi6rol0.c | 4 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 3 | ||||
-rw-r--r-- | drivers/scsi/sg.c | 3 | ||||
-rw-r--r-- | drivers/vfio/pci/pds/lm.c | 3 | ||||
-rw-r--r-- | drivers/vfio/pci/virtio/migrate.c | 3 | ||||
-rw-r--r-- | drivers/video/fbdev/core/fbcon.c | 9 | ||||
-rw-r--r-- | drivers/virtio/virtio_balloon.c | 10 | ||||
-rw-r--r-- | drivers/xen/Kconfig | 1 | ||||
-rw-r--r-- | drivers/xen/gntdev.c | 5 |
22 files changed, 83 insertions, 66 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 7fc407255eb4..1e2a2c33cdc8 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -614,7 +614,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) offset = qc->cursg->offset + qc->cursg_ofs; /* get the current page and offset */ - page = nth_page(page, (offset >> PAGE_SHIFT)); + page += offset >> PAGE_SHIFT; offset %= PAGE_SIZE; /* don't overrun current sg */ @@ -631,7 +631,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) unsigned int split_len = PAGE_SIZE - offset; ata_pio_xfer(qc, page, offset, split_len); - ata_pio_xfer(qc, nth_page(page, 1), 0, count - split_len); + ata_pio_xfer(qc, page + 1, 0, count - split_len); } else { ata_pio_xfer(qc, page, offset, count); } @@ -751,7 +751,7 @@ next_sg: offset = sg->offset + qc->cursg_ofs; /* get the current page and offset */ - page = nth_page(page, (offset >> PAGE_SHIFT)); + page += offset >> PAGE_SHIFT; offset %= PAGE_SIZE; /* don't overrun current sg */ diff --git a/drivers/base/node.c b/drivers/base/node.c index 3399594136b2..67b01d579737 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -885,6 +885,10 @@ int register_one_node(int nid) node_devices[nid] = node; error = register_node(node_devices[nid], nid); + if (error) { + node_devices[nid] = NULL; + return error; + } /* link cpu under this node */ for_each_present_cpu(cpu) { diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 6298f8e271e3..a9affb7c264d 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -1761,6 +1761,6 @@ aoecmd_exit(void) kfree(kts); kfree(ktiowq); - free_page((unsigned long) page_address(empty_page)); + __free_page(empty_page); empty_page = NULL; } diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 42809cf6dd82..a43074657531 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1225,18 +1225,6 @@ static void comp_algorithm_set(struct zram *zram, u32 prio, const char *alg) zram->comp_algs[prio] = alg; } -static ssize_t __comp_algorithm_show(struct zram *zram, u32 prio, - char *buf, ssize_t at) -{ - ssize_t sz; - - down_read(&zram->init_lock); - sz = zcomp_available_show(zram->comp_algs[prio], buf, at); - up_read(&zram->init_lock); - - return sz; -} - static int __comp_algorithm_store(struct zram *zram, u32 prio, const char *buf) { char *compressor; @@ -1387,8 +1375,12 @@ static ssize_t comp_algorithm_show(struct device *dev, char *buf) { struct zram *zram = dev_to_zram(dev); + ssize_t sz; - return __comp_algorithm_show(zram, ZRAM_PRIMARY_COMP, buf, 0); + down_read(&zram->init_lock); + sz = zcomp_available_show(zram->comp_algs[ZRAM_PRIMARY_COMP], buf, 0); + up_read(&zram->init_lock); + return sz; } static ssize_t comp_algorithm_store(struct device *dev, @@ -1412,14 +1404,15 @@ static ssize_t recomp_algorithm_show(struct device *dev, ssize_t sz = 0; u32 prio; + down_read(&zram->init_lock); for (prio = ZRAM_SECONDARY_COMP; prio < ZRAM_MAX_COMPS; prio++) { if (!zram->comp_algs[prio]) continue; sz += sysfs_emit_at(buf, sz, "#%d: ", prio); - sz += __comp_algorithm_show(zram, prio, buf, sz); + sz += zcomp_available_show(zram->comp_algs[prio], buf, sz); } - + up_read(&zram->init_lock); return sz; } diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 48839958b0b1..34b815901b20 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -512,11 +512,18 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma) return 0; } +#ifndef CONFIG_MMU +static unsigned long get_unmapped_area_zero(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + return -ENOSYS; +} +#else static unsigned long get_unmapped_area_zero(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { -#ifdef CONFIG_MMU if (flags & MAP_SHARED) { /* * mmap_zero() will call shmem_zero_setup() to create a file, @@ -527,12 +534,18 @@ static unsigned long get_unmapped_area_zero(struct file *file, return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags); } - /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ - return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); + /* + * Otherwise flags & MAP_PRIVATE: with no shmem object beneath it, + * attempt to map aligned to huge page size if possible, otherwise we + * fall back to system page size mappings. + */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + return thp_get_unmapped_area(file, addr, len, pgoff, flags); #else - return -ENOSYS; + return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); #endif } +#endif /* CONFIG_MMU */ static ssize_t write_full(struct file *file, const char __user *buf, size_t count, loff_t *ppos) diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c index a00e07b853f2..a65c2d5b9e7b 100644 --- a/drivers/firmware/efi/efi-init.c +++ b/drivers/firmware/efi/efi-init.c @@ -12,6 +12,7 @@ #include <linux/efi.h> #include <linux/fwnode.h> #include <linux/init.h> +#include <linux/kexec_handover.h> #include <linux/memblock.h> #include <linux/mm_types.h> #include <linux/of.h> @@ -164,12 +165,32 @@ static __init void reserve_regions(void) pr_info("Processing EFI memory map:\n"); /* - * Discard memblocks discovered so far: if there are any at this - * point, they originate from memory nodes in the DT, and UEFI - * uses its own memory map instead. + * Discard memblocks discovered so far except for KHO scratch + * regions. Most memblocks at this point originate from memory nodes + * in the DT and UEFI uses its own memory map instead. However, if + * KHO is enabled, scratch regions, which are good known memory + * must be preserved. */ memblock_dump_all(); - memblock_remove(0, PHYS_ADDR_MAX); + + if (is_kho_boot()) { + struct memblock_region *r; + + /* Remove all non-KHO regions */ + for_each_mem_region(r) { + if (!memblock_is_kho_scratch(r)) { + memblock_remove(r->base, r->size); + r--; + } + } + } else { + /* + * KHO is disabled. Discard memblocks discovered so far: + * if there are any at this point, they originate from memory + * nodes in the DT, and UEFI uses its own memory map instead. + */ + memblock_remove(0, PHYS_ADDR_MAX); + } for_each_efi_memory_desc(md) { paddr = md->phys_addr; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 8d25cc65707d..f884d155a832 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -628,7 +628,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) struct page **pages; struct folio *folio; struct folio_batch fbatch; - long i, j, npages; + unsigned long i, j, npages; if (WARN_ON(!obj->filp)) return ERR_PTR(-EINVAL); @@ -652,7 +652,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) i = 0; while (i < npages) { - long nr; + unsigned long nr; folio = shmem_read_folio_gfp(mapping, i, mapping_gfp_mask(mapping)); if (IS_ERR(folio)) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 76d2178572b6..3f09cbce05bb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -765,7 +765,7 @@ __i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n) GEM_BUG_ON(!i915_gem_object_has_struct_page(obj)); sg = i915_gem_object_get_sg(obj, n, &offset); - return nth_page(sg_page(sg), offset); + return sg_page(sg) + offset; } /* Like i915_gem_object_get_page(), but mark the returned page dirty */ diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index 075519caa547..e507bb11c802 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -560,8 +560,7 @@ has_int_reg: t_offset += msb->current_page * msb->page_size; sg_set_page(&t_sg, - nth_page(sg_page(&(msb->req_sg[msb->current_seg])), - t_offset >> PAGE_SHIFT), + sg_page(&(msb->req_sg[msb->current_seg])) + (t_offset >> PAGE_SHIFT), msb->page_size, offset_in_page(t_offset)); memstick_init_req_sg(*mrq, msb->data_dir == READ diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index cddddb3a5a27..79e66e30417c 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -317,8 +317,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host) unsigned int p_off; if (host->req->long_data) { - pg = nth_page(sg_page(&host->req->sg), - off >> PAGE_SHIFT); + pg = sg_page(&host->req->sg) + (off >> PAGE_SHIFT); p_off = offset_in_page(off); p_cnt = PAGE_SIZE - p_off; p_cnt = min(p_cnt, length); diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c index db7f3a088fb0..0b6a90661eee 100644 --- a/drivers/memstick/host/tifm_ms.c +++ b/drivers/memstick/host/tifm_ms.c @@ -201,8 +201,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host) unsigned int p_off; if (host->req->long_data) { - pg = nth_page(sg_page(&host->req->sg), - off >> PAGE_SHIFT); + pg = sg_page(&host->req->sg) + (off >> PAGE_SHIFT); p_off = offset_in_page(off); p_cnt = PAGE_SIZE - p_off; p_cnt = min(p_cnt, length); diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 6653fc53c951..6df51ee8db62 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -1806,7 +1806,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, * the list after acquiring the lock. */ get_page(newpage); - ret = MIGRATEPAGE_SUCCESS; + ret = 0; } /* Update the balloon list under the @pages_lock */ @@ -1817,7 +1817,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, * If we succeed just insert it to the list and update the statistics * under the lock. */ - if (ret == MIGRATEPAGE_SUCCESS) { + if (!ret) { balloon_page_insert(&b->b_dev_info, newpage); __count_vm_event(BALLOON_MIGRATE); } diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index ac636efd911d..2cd69c9e9571 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -191,7 +191,7 @@ static void tifm_sd_transfer_data(struct tifm_sd *host) } off = sg[host->sg_pos].offset + host->block_pos; - pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT); + pg = sg_page(&sg[host->sg_pos]) + (off >> PAGE_SHIFT); p_off = offset_in_page(off); p_cnt = PAGE_SIZE - p_off; p_cnt = min(p_cnt, cnt); @@ -240,7 +240,7 @@ static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data) } off = sg[host->sg_pos].offset + host->block_pos; - pg = nth_page(sg_page(&sg[host->sg_pos]), off >> PAGE_SHIFT); + pg = sg_page(&sg[host->sg_pos]) + (off >> PAGE_SHIFT); p_off = offset_in_page(off); p_cnt = PAGE_SIZE - p_off; p_cnt = min(p_cnt, cnt); diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index 85b49c07918b..3bccf800339b 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -323,7 +323,7 @@ static void usdhi6_blk_bounce(struct usdhi6_host *host, host->head_pg.page = host->pg.page; host->head_pg.mapped = host->pg.mapped; - host->pg.page = nth_page(host->pg.page, 1); + host->pg.page = host->pg.page + 1; host->pg.mapped = kmap(host->pg.page); host->blk_page = host->bounce_buf; @@ -503,7 +503,7 @@ static void usdhi6_sg_advance(struct usdhi6_host *host) /* We cannot get here after crossing a page border */ /* Next page in the same SG */ - host->pg.page = nth_page(sg_page(host->sg), host->page_idx); + host->pg.page = sg_page(host->sg) + host->page_idx; host->pg.mapped = kmap(host->pg.page); host->blk_page = host->pg.mapped; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0c65ecfedfbd..d7e42293b864 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -3148,8 +3148,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, /* Offset starting from the beginning of first page in this sg-entry */ *offset = *offset - len_complete + sg->offset; - /* Assumption: contiguous pages can be accessed as "page + i" */ - page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); + page = sg_page(sg) + (*offset >> PAGE_SHIFT); *offset &= ~PAGE_MASK; /* Bytes in this sg-entry from *offset to the end of the page */ diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 3c02a5f7b5f3..4c62c597c7be 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1235,8 +1235,7 @@ sg_vma_fault(struct vm_fault *vmf) len = vma->vm_end - sa; len = (len < length) ? len : length; if (offset < len) { - struct page *page = nth_page(rsv_schp->pages[k], - offset >> PAGE_SHIFT); + struct page *page = rsv_schp->pages[k] + (offset >> PAGE_SHIFT); get_page(page); /* increment page count */ vmf->page = page; return 0; /* success */ diff --git a/drivers/vfio/pci/pds/lm.c b/drivers/vfio/pci/pds/lm.c index f2673d395236..4d70c833fa32 100644 --- a/drivers/vfio/pci/pds/lm.c +++ b/drivers/vfio/pci/pds/lm.c @@ -151,8 +151,7 @@ static struct page *pds_vfio_get_file_page(struct pds_vfio_lm_file *lm_file, lm_file->last_offset_sg = sg; lm_file->sg_last_entry += i; lm_file->last_offset = cur_offset; - return nth_page(sg_page(sg), - (offset - cur_offset) / PAGE_SIZE); + return sg_page(sg) + (offset - cur_offset) / PAGE_SIZE; } cur_offset += sg->length; } diff --git a/drivers/vfio/pci/virtio/migrate.c b/drivers/vfio/pci/virtio/migrate.c index ba92bb4e9af9..7dd0ac866461 100644 --- a/drivers/vfio/pci/virtio/migrate.c +++ b/drivers/vfio/pci/virtio/migrate.c @@ -53,8 +53,7 @@ virtiovf_get_migration_page(struct virtiovf_data_buffer *buf, buf->last_offset_sg = sg; buf->sg_last_entry += i; buf->last_offset = cur_offset; - return nth_page(sg_page(sg), - (offset - cur_offset) / PAGE_SIZE); + return sg_page(sg) + (offset - cur_offset) / PAGE_SIZE; } cur_offset += sg->length; } diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 5940e2eb9231..96cc9b389246 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -279,14 +279,7 @@ static int fbcon_get_rotate(struct fb_info *info) static bool fbcon_skip_panic(struct fb_info *info) { -/* panic_cpu is not exported, and can't be used if built as module. Use - * oops_in_progress instead, but non-fatal oops won't be printed. - */ -#if defined(MODULE) - return (info->skip_panic && unlikely(oops_in_progress)); -#else - return (info->skip_panic && unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID)); -#endif + return (info->skip_panic && unlikely(panic_in_progress())); } static inline bool fbcon_is_active(struct vc_data *vc, struct fb_info *info) diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index e299e18346a3..7f3fd72678eb 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -488,8 +488,7 @@ static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb, page = balloon_page_pop(&vb->free_page_list); if (!page) break; - free_pages((unsigned long)page_address(page), - VIRTIO_BALLOON_HINT_BLOCK_ORDER); + __free_pages(page, VIRTIO_BALLOON_HINT_BLOCK_ORDER); } vb->num_free_page_blocks -= num_returned; spin_unlock_irq(&vb->free_page_list_lock); @@ -719,8 +718,7 @@ static int get_free_page_and_send(struct virtio_balloon *vb) if (vq->num_free > 1) { err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL); if (unlikely(err)) { - free_pages((unsigned long)p, - VIRTIO_BALLOON_HINT_BLOCK_ORDER); + __free_pages(page, VIRTIO_BALLOON_HINT_BLOCK_ORDER); return err; } virtqueue_kick(vq); @@ -733,7 +731,7 @@ static int get_free_page_and_send(struct virtio_balloon *vb) * The vq has no available entry to add this page block, so * just free it. */ - free_pages((unsigned long)p, VIRTIO_BALLOON_HINT_BLOCK_ORDER); + __free_pages(page, VIRTIO_BALLOON_HINT_BLOCK_ORDER); } return 0; @@ -875,7 +873,7 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, balloon_page_finalize(page); put_page(page); /* balloon reference */ - return MIGRATEPAGE_SUCCESS; + return 0; } #endif /* CONFIG_BALLOON_COMPACTION */ diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 24f485827e03..f9a35ed266ec 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -138,6 +138,7 @@ config XEN_GNTDEV depends on XEN default m select MMU_NOTIFIER + select FIND_NORMAL_PAGE help Allows userspace processes to use grants. diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 91ba5078c9d9..2c960f187f7c 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -318,6 +318,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data) BUG_ON(pgnr >= map->count); pte_maddr = arbitrary_virt_to_machine(pte).maddr; + /* Note: this will perform a pte_mkspecial() through the hypercall. */ gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, map->grants[pgnr].ref, map->grants[pgnr].domid); @@ -525,7 +526,7 @@ static void gntdev_vma_close(struct vm_area_struct *vma) gntdev_put_map(priv, map); } -static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma, +static struct page *gntdev_vma_find_normal_page(struct vm_area_struct *vma, unsigned long addr) { struct gntdev_grant_map *map = vma->vm_private_data; @@ -536,7 +537,7 @@ static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma, static const struct vm_operations_struct gntdev_vmops = { .open = gntdev_vma_open, .close = gntdev_vma_close, - .find_special_page = gntdev_vma_find_special_page, + .find_normal_page = gntdev_vma_find_normal_page, }; /* ------------------------------------------------------------------ */ |