diff options
Diffstat (limited to 'arch/s390/mm')
| -rw-r--r-- | arch/s390/mm/cmm.c | 4 | ||||
| -rw-r--r-- | arch/s390/mm/dump_pagetables.c | 23 | ||||
| -rw-r--r-- | arch/s390/mm/extmem.c | 17 | ||||
| -rw-r--r-- | arch/s390/mm/fault.c | 29 | ||||
| -rw-r--r-- | arch/s390/mm/gmap.c | 28 | ||||
| -rw-r--r-- | arch/s390/mm/hugetlbpage.c | 3 | ||||
| -rw-r--r-- | arch/s390/mm/mmap.c | 1 | ||||
| -rw-r--r-- | arch/s390/mm/pageattr.c | 4 | ||||
| -rw-r--r-- | arch/s390/mm/pfault.c | 3 | ||||
| -rw-r--r-- | arch/s390/mm/pgalloc.c | 2 | ||||
| -rw-r--r-- | arch/s390/mm/pgtable.c | 18 | ||||
| -rw-r--r-- | arch/s390/mm/vmem.c | 21 |
12 files changed, 63 insertions, 90 deletions
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index e2a6eb92420f..eb7ef63fab1e 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -321,8 +321,8 @@ static int cmm_timeout_handler(const struct ctl_table *ctl, int write, cmm_set_timeout(nr, seconds); *ppos += *lenp; } else { - len = sprintf(buf, "%ld %ld\n", - cmm_timeout_pages, cmm_timeout_seconds); + len = scnprintf(buf, sizeof(buf), "%ld %ld\n", + cmm_timeout_pages, cmm_timeout_seconds); if (len > *lenp) len = *lenp; memcpy(buffer, buf, len); diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index 9af2aae0a515..89badbe72ae7 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -51,7 +51,7 @@ struct pg_state { struct seq_file *__m = (m); \ \ if (__m) \ - seq_printf(__m, fmt); \ + seq_puts(__m, fmt); \ }) static void print_prot(struct seq_file *m, unsigned int pr, int level) @@ -291,16 +291,14 @@ static int ptdump_cmp(const void *a, const void *b) static int add_marker(unsigned long start, unsigned long end, const char *name) { - size_t oldsize, newsize; - - oldsize = markers_cnt * sizeof(*markers); - newsize = oldsize + 2 * sizeof(*markers); - if (!oldsize) - markers = kvmalloc(newsize, GFP_KERNEL); - else - markers = kvrealloc(markers, newsize, GFP_KERNEL); - if (!markers) - goto error; + struct addr_marker *new; + size_t newsize; + + newsize = (markers_cnt + 2) * sizeof(*markers); + new = kvrealloc(markers, newsize, GFP_KERNEL); + if (!new) + return -ENOMEM; + markers = new; markers[markers_cnt].is_start = 1; markers[markers_cnt].start_address = start; markers[markers_cnt].size = end - start; @@ -312,9 +310,6 @@ static int add_marker(unsigned long start, unsigned long end, const char *name) markers[markers_cnt].name = name; markers_cnt++; return 0; -error: - markers_cnt = 0; - return -ENOMEM; } static int pt_dump_init(void) diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index f7da53e212f5..6cc33c705de2 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -7,8 +7,7 @@ * Copyright IBM Corp. 2002, 2004 */ -#define KMSG_COMPONENT "extmem" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "extmem: " fmt #include <linux/kernel.h> #include <linux/string.h> @@ -598,14 +597,16 @@ segment_save(char *name) goto out; } - sprintf(cmd1, "DEFSEG %s", name); + snprintf(cmd1, sizeof(cmd1), "DEFSEG %s", name); for (i=0; i<seg->segcnt; i++) { - sprintf(cmd1+strlen(cmd1), " %lX-%lX %s", - seg->range[i].start >> PAGE_SHIFT, - seg->range[i].end >> PAGE_SHIFT, - segtype_string[seg->range[i].start & 0xff]); + size_t len = strlen(cmd1); + + snprintf(cmd1 + len, sizeof(cmd1) - len, " %lX-%lX %s", + seg->range[i].start >> PAGE_SHIFT, + seg->range[i].end >> PAGE_SHIFT, + segtype_string[seg->range[i].start & 0xff]); } - sprintf(cmd2, "SAVESEG %s", name); + snprintf(cmd2, sizeof(cmd2), "SAVESEG %s", name); response = 0; cpcmd(cmd1, NULL, 0, &response); if (response) { diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index e1ad05bfd28a..e2e13778c36a 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -23,7 +23,6 @@ #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> -#include <linux/compat.h> #include <linux/smp.h> #include <linux/kdebug.h> #include <linux/init.h> @@ -133,8 +132,17 @@ static void dump_fault_info(struct pt_regs *regs) union teid teid = { .val = regs->int_parm_long }; unsigned long asce; - pr_alert("Failing address: %016lx TEID: %016lx\n", + pr_alert("Failing address: %016lx TEID: %016lx", get_fault_address(regs), teid.val); + if (test_facility(131)) + pr_cont(" ESOP-2"); + else if (machine_has_esop()) + pr_cont(" ESOP-1"); + else + pr_cont(" SOP"); + if (test_facility(75)) + pr_cont(" FSI"); + pr_cont("\n"); pr_alert("Fault in "); switch (teid.as) { case PSW_BITS_AS_HOME: @@ -365,23 +373,20 @@ void do_protection_exception(struct pt_regs *regs) * The exception to this rule are aborted transactions, for these * the PSW already points to the correct location. */ - if (!(regs->int_code & 0x200)) + if (!(regs->int_code & 0x200)) { regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); + set_pt_regs_flag(regs, PIF_PSW_ADDR_ADJUSTED); + } /* - * Check for low-address protection. This needs to be treated - * as a special case because the translation exception code - * field is not guaranteed to contain valid data in this case. + * If bit 61 if the TEID is not set, the remainder of the + * TEID is unpredictable. Special handling is required. */ if (unlikely(!teid.b61)) { if (user_mode(regs)) { - /* Low-address protection in user mode: cannot happen */ dump_fault_info(regs); - die(regs, "Low-address protection"); + die(regs, "Unexpected TEID"); } - /* - * Low-address protection in kernel mode means - * NULL pointer write access in kernel mode. - */ + /* Assume low-address protection in kernel mode. */ return handle_fault_error_nolock(regs, 0); } if (unlikely(cpu_has_nx() && teid.b56)) { diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 8ff6bba107e8..603d9e5febb5 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -138,10 +138,7 @@ EXPORT_SYMBOL_GPL(gmap_create); static void gmap_flush_tlb(struct gmap *gmap) { - if (cpu_has_idte()) - __tlb_flush_idte(gmap->asce); - else - __tlb_flush_global(); + __tlb_flush_idte(gmap->asce); } static void gmap_radix_tree_free(struct radix_tree_root *root) @@ -1988,10 +1985,8 @@ static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new, if (machine_has_tlb_guest()) __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce, IDTE_GLOBAL); - else if (cpu_has_idte()) - __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL); else - __pmdp_csp(pmdp); + __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL); set_pmd(pmdp, new); } @@ -2012,7 +2007,7 @@ static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr, _SEGMENT_ENTRY_GMAP_UC | _SEGMENT_ENTRY)); if (purge) - __pmdp_csp(pmdp); + __pmdp_cspg(pmdp); set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); } spin_unlock(&gmap->guest_table_lock); @@ -2033,17 +2028,6 @@ void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr) EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate); /** - * gmap_pmdp_csp - csp all affected guest pmd entries - * @mm: pointer to the process mm_struct - * @vmaddr: virtual address in the process address space - */ -void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr) -{ - gmap_pmdp_clear(mm, vmaddr, 1); -} -EXPORT_SYMBOL_GPL(gmap_pmdp_csp); - -/** * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry * @mm: pointer to the process mm_struct * @vmaddr: virtual address in the process address space @@ -2066,7 +2050,7 @@ void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr) if (machine_has_tlb_guest()) __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE, gmap->asce, IDTE_LOCAL); - else if (cpu_has_idte()) + else __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL); *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); } @@ -2099,10 +2083,8 @@ void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr) if (machine_has_tlb_guest()) __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE, gmap->asce, IDTE_GLOBAL); - else if (cpu_has_idte()) - __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL); else - __pmdp_csp(pmdp); + __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL); *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); } spin_unlock(&gmap->guest_table_lock); diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 72e8fa136af5..d42e61c7594e 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -6,8 +6,7 @@ * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com> */ -#define KMSG_COMPONENT "hugetlb" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "hugetlb: " fmt #include <linux/cpufeature.h> #include <linux/mm.h> diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 197c1d9497a7..2a222a7e14f4 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -15,7 +15,6 @@ #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/random.h> -#include <linux/compat.h> #include <linux/security.h> #include <linux/hugetlb.h> #include <asm/elf.h> diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 348e759840e7..3042647c9dbf 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -78,10 +78,8 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr, } table = (unsigned long *)((unsigned long)old & mask); crdte(*old, new, table, dtt, addr, get_lowcore()->kernel_asce.val); - } else if (cpu_has_idte()) { - cspg(old, *old, new); } else { - csp((unsigned int *)old + 1, *old, new); + cspg(old, *old, new); } } diff --git a/arch/s390/mm/pfault.c b/arch/s390/mm/pfault.c index e6175d75e4b0..2f829448c719 100644 --- a/arch/s390/mm/pfault.c +++ b/arch/s390/mm/pfault.c @@ -199,8 +199,7 @@ block: * return to userspace schedule() to block. */ __set_current_state(TASK_UNINTERRUPTIBLE); - set_tsk_need_resched(tsk); - set_preempt_need_resched(); + set_need_resched_current(); } } out: diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 626fca116cd7..7df23528c01b 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -164,6 +164,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) { struct ptdesc *ptdesc = virt_to_ptdesc(table); + if (pagetable_is_reserved(ptdesc)) + return free_reserved_ptdesc(ptdesc); pagetable_dtor_free(ptdesc); } diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 0fde20bbc50b..7ae77df276b5 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -274,9 +274,9 @@ void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep, preempt_disable(); atomic_inc(&mm->context.flush_count); if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) - __ptep_rdp(addr, ptep, 0, 0, 1); + __ptep_rdp(addr, ptep, 1); else - __ptep_rdp(addr, ptep, 0, 0, 0); + __ptep_rdp(addr, ptep, 0); /* * PTE is not invalidated by RDP, only _PAGE_PROTECT is cleared. That * means it is still valid and active, and must not be changed according @@ -360,14 +360,10 @@ static inline void pmdp_idte_global(struct mm_struct *mm, mm->context.asce, IDTE_GLOBAL); if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) gmap_pmdp_idte_global(mm, addr); - } else if (cpu_has_idte()) { + } else { __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL); if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) gmap_pmdp_idte_global(mm, addr); - } else { - __pmdp_csp(pmdp); - if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) - gmap_pmdp_csp(mm, addr); } } @@ -487,14 +483,8 @@ static inline void pudp_idte_global(struct mm_struct *mm, if (machine_has_tlb_guest()) __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE, mm->context.asce, IDTE_GLOBAL); - else if (cpu_has_idte()) - __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL); else - /* - * Invalid bit position is the same for pmd and pud, so we can - * reuse _pmd_csp() here - */ - __pmdp_csp((pmd_t *) pudp); + __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL); } static inline pud_t pudp_flush_direct(struct mm_struct *mm, diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index f48ef361bc83..d96587b84e81 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -4,6 +4,7 @@ */ #include <linux/memory_hotplug.h> +#include <linux/bootmem_info.h> #include <linux/cpufeature.h> #include <linux/memblock.h> #include <linux/pfn.h> @@ -39,15 +40,21 @@ static void __ref *vmem_alloc_pages(unsigned int order) static void vmem_free_pages(unsigned long addr, int order, struct vmem_altmap *altmap) { + unsigned int nr_pages = 1 << order; + struct page *page; + if (altmap) { vmem_altmap_free(altmap, 1 << order); return; } - /* We don't expect boot memory to be removed ever. */ - if (!slab_is_available() || - WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr)))) - return; - free_pages(addr, order); + page = virt_to_page((void *)addr); + if (PageReserved(page)) { + /* allocated from memblock */ + while (nr_pages--) + free_bootmem_page(page++); + } else { + free_pages(addr, order); + } } void *vmem_crst_alloc(unsigned long val) @@ -79,10 +86,6 @@ pte_t __ref *vmem_pte_alloc(void) static void vmem_pte_free(unsigned long *table) { - /* We don't expect boot memory to be removed ever. */ - if (!slab_is_available() || - WARN_ON_ONCE(PageReserved(virt_to_page(table)))) - return; page_table_free(&init_mm, table); } |
