summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/configs/debug_defconfig4
-rw-r--r--arch/s390/configs/defconfig4
-rw-r--r--arch/s390/crypto/sha.h8
-rw-r--r--arch/s390/include/asm/hugetlb.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/include/asm/pgtable.h22
-rw-r--r--arch/s390/kernel/early.c3
-rw-r--r--arch/s390/kernel/uv.c12
-rw-r--r--arch/s390/kernel/vmlinux.lds.S10
-rw-r--r--arch/s390/kvm/interrupt.c20
-rw-r--r--arch/s390/mm/gmap.c2
-rw-r--r--arch/s390/mm/gmap_helpers.c12
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/mmap.c10
-rw-r--r--arch/s390/mm/pgalloc.c6
-rw-r--r--arch/s390/mm/pgtable.c23
17 files changed, 81 insertions, 62 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2414ee3ff002..c4145672ca34 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -712,7 +712,6 @@ menu "Memory setup"
config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_VMEMMAP_ENABLE
- select SPARSEMEM_VMEMMAP
config ARCH_SPARSEMEM_DEFAULT
def_bool y
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 99467f2dc018..b31c1df90257 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -125,8 +125,10 @@ CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
+CONFIG_DIBS=y
+CONFIG_DIBS_LO=y
+CONFIG_SMC=m
CONFIG_SMC_DIAG=m
-CONFIG_SMC_LO=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index a8573807e0c0..161dad7ef211 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -116,8 +116,10 @@ CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
+CONFIG_DIBS=y
+CONFIG_DIBS_LO=y
+CONFIG_SMC=m
CONFIG_SMC_DIAG=m
-CONFIG_SMC_LO=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
diff --git a/arch/s390/crypto/sha.h b/arch/s390/crypto/sha.h
index cadb4b13622a..b9cd9572dd35 100644
--- a/arch/s390/crypto/sha.h
+++ b/arch/s390/crypto/sha.h
@@ -10,14 +10,15 @@
#ifndef _CRYPTO_ARCH_S390_SHA_H
#define _CRYPTO_ARCH_S390_SHA_H
+#include <crypto/hash.h>
#include <crypto/sha2.h>
#include <crypto/sha3.h>
+#include <linux/build_bug.h>
#include <linux/types.h>
/* must be big enough for the largest SHA variant */
#define CPACF_MAX_PARMBLOCK_SIZE SHA3_STATE_SIZE
#define SHA_MAX_BLOCK_SIZE SHA3_224_BLOCK_SIZE
-#define S390_SHA_CTX_SIZE sizeof(struct s390_sha_ctx)
struct s390_sha_ctx {
u64 count; /* message length in bytes */
@@ -42,4 +43,9 @@ int s390_sha_update_blocks(struct shash_desc *desc, const u8 *data,
int s390_sha_finup(struct shash_desc *desc, const u8 *src, unsigned int len,
u8 *out);
+static inline void __check_s390_sha_ctx_size(void)
+{
+ BUILD_BUG_ON(S390_SHA_CTX_SIZE != sizeof(struct s390_sha_ctx));
+}
+
#endif
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index 931fcc413598..69131736daaa 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -39,7 +39,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline void arch_clear_hugetlb_flags(struct folio *folio)
{
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
}
#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index f870d09515cc..95d15416c39d 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -356,7 +356,7 @@ struct kvm_s390_float_interrupt {
int counters[FIRQ_MAX_COUNT];
struct kvm_s390_mchk_info mchk;
struct kvm_s390_ext_info srv_signal;
- int next_rr_cpu;
+ int last_sleep_cpu;
struct mutex ais_lock;
u8 simm;
u8 nimm;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index c1a7a92f0575..b7100c6a4054 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -2055,4 +2055,26 @@ static inline unsigned long gmap_pgste_get_pgt_addr(unsigned long *pgt)
return res;
}
+static inline pgste_t pgste_get_lock(pte_t *ptep)
+{
+ unsigned long value = 0;
+#ifdef CONFIG_PGSTE
+ unsigned long *ptr = (unsigned long *)(ptep + PTRS_PER_PTE);
+
+ do {
+ value = __atomic64_or_barrier(PGSTE_PCL_BIT, ptr);
+ } while (value & PGSTE_PCL_BIT);
+ value |= PGSTE_PCL_BIT;
+#endif
+ return __pgste(value);
+}
+
+static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
+{
+#ifdef CONFIG_PGSTE
+ barrier();
+ WRITE_ONCE(*(unsigned long *)(ptep + PTRS_PER_PTE), pgste_val(pgste) & ~PGSTE_PCL_BIT);
+#endif
+}
+
#endif /* _S390_PAGE_H */
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 9adfbdd377dc..544e5403dd91 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <asm/asm-extable.h>
#include <linux/memblock.h>
+#include <linux/kasan.h>
#include <asm/access-regs.h>
#include <asm/asm-offsets.h>
#include <asm/machine.h>
@@ -65,7 +66,7 @@ static void __init kasan_early_init(void)
{
#ifdef CONFIG_KASAN
init_task.kasan_depth = 0;
- pr_info("KernelAddressSanitizer initialized\n");
+ kasan_init_generic();
#endif
}
diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c
index 47f574cd1728..93b2a01bae40 100644
--- a/arch/s390/kernel/uv.c
+++ b/arch/s390/kernel/uv.c
@@ -144,7 +144,7 @@ int uv_destroy_folio(struct folio *folio)
folio_get(folio);
rc = uv_destroy(folio_to_phys(folio));
if (!rc)
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
folio_put(folio);
return rc;
}
@@ -193,7 +193,7 @@ int uv_convert_from_secure_folio(struct folio *folio)
folio_get(folio);
rc = uv_convert_from_secure(folio_to_phys(folio));
if (!rc)
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
folio_put(folio);
return rc;
}
@@ -289,7 +289,7 @@ static int __make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
expected = expected_folio_refs(folio) + 1;
if (!folio_ref_freeze(folio, expected))
return -EBUSY;
- set_bit(PG_arch_1, &folio->flags);
+ set_bit(PG_arch_1, &folio->flags.f);
/*
* If the UVC does not succeed or fail immediately, we don't want to
* loop for long, or we might get stall notifications.
@@ -483,18 +483,18 @@ int arch_make_folio_accessible(struct folio *folio)
* convert_to_secure.
* As secure pages are never large folios, both variants can co-exists.
*/
- if (!test_bit(PG_arch_1, &folio->flags))
+ if (!test_bit(PG_arch_1, &folio->flags.f))
return 0;
rc = uv_pin_shared(folio_to_phys(folio));
if (!rc) {
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
return 0;
}
rc = uv_convert_from_secure(folio_to_phys(folio));
if (!rc) {
- clear_bit(PG_arch_1, &folio->flags);
+ clear_bit(PG_arch_1, &folio->flags.f);
return 0;
}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 1c606dfa595d..feecf1a6ddb4 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -209,6 +209,11 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
_end = . ;
+ /* Debugging sections. */
+ STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
+
/*
* uncompressed image info used by the decompressor
* it should match struct vmlinux_info
@@ -239,11 +244,6 @@ SECTIONS
#endif
} :NONE
- /* Debugging sections. */
- STABS_DEBUG
- DWARF_DEBUG
- ELF_DETAILS
-
/*
* Make sure that the .got.plt is either completely empty or it
* contains only the three reserved double words.
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 9384572ffa7b..c62a868cf2b6 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1323,6 +1323,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
no_timer:
kvm_vcpu_srcu_read_unlock(vcpu);
+ vcpu->kvm->arch.float_int.last_sleep_cpu = vcpu->vcpu_idx;
kvm_vcpu_halt(vcpu);
vcpu->valid_wakeup = false;
__unset_cpu_idle(vcpu);
@@ -1949,18 +1950,15 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
if (!online_vcpus)
return;
- /* find idle VCPUs first, then round robin */
- sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
- if (sigcpu == online_vcpus) {
- do {
- sigcpu = kvm->arch.float_int.next_rr_cpu++;
- kvm->arch.float_int.next_rr_cpu %= online_vcpus;
- /* avoid endless loops if all vcpus are stopped */
- if (nr_tries++ >= online_vcpus)
- return;
- } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
+ for (sigcpu = kvm->arch.float_int.last_sleep_cpu; ; sigcpu++) {
+ sigcpu %= online_vcpus;
+ dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
+ if (!is_vcpu_stopped(dst_vcpu))
+ break;
+ /* avoid endless loops if all vcpus are stopped */
+ if (nr_tries++ >= online_vcpus)
+ return;
}
- dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
/* make the VCPU drop out of the SIE, or wake it up if sleeping */
switch (type) {
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index c7defe4ed1f6..8ff6bba107e8 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2272,7 +2272,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
start = pmd_val(*pmd) & HPAGE_MASK;
end = start + HPAGE_SIZE;
__storage_key_init_range(start, end);
- set_bit(PG_arch_1, &folio->flags);
+ set_bit(PG_arch_1, &folio->flags.f);
cond_resched();
return 0;
}
diff --git a/arch/s390/mm/gmap_helpers.c b/arch/s390/mm/gmap_helpers.c
index b63f427e7289..d4c3c36855e2 100644
--- a/arch/s390/mm/gmap_helpers.c
+++ b/arch/s390/mm/gmap_helpers.c
@@ -15,6 +15,7 @@
#include <linux/pagewalk.h>
#include <linux/ksm.h>
#include <asm/gmap_helpers.h>
+#include <asm/pgtable.h>
/**
* ptep_zap_swap_entry() - discard a swap entry.
@@ -47,6 +48,7 @@ void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr)
{
struct vm_area_struct *vma;
spinlock_t *ptl;
+ pgste_t pgste;
pte_t *ptep;
mmap_assert_locked(mm);
@@ -60,8 +62,16 @@ void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr)
ptep = get_locked_pte(mm, vmaddr, &ptl);
if (unlikely(!ptep))
return;
- if (pte_swap(*ptep))
+ if (pte_swap(*ptep)) {
+ preempt_disable();
+ pgste = pgste_get_lock(ptep);
+
ptep_zap_swap_entry(mm, pte_to_swp_entry(*ptep));
+ pte_clear(mm, vmaddr, ptep);
+
+ pgste_set_unlock(ptep, pgste);
+ preempt_enable();
+ }
pte_unmap_unlock(ptep, ptl);
}
EXPORT_SYMBOL_GPL(gmap_helper_zap_one_page);
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index e88c02c9e642..72e8fa136af5 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -155,7 +155,7 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
paddr = rste & PMD_MASK;
}
- if (!test_and_set_bit(PG_arch_1, &folio->flags))
+ if (!test_and_set_bit(PG_arch_1, &folio->flags.f))
__storage_key_init_range(paddr, paddr + size);
}
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 40a526d28184..197c1d9497a7 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -27,7 +27,7 @@ static unsigned long stack_maxrandom_size(void)
return STACK_RND_MASK << PAGE_SHIFT;
}
-static inline int mmap_is_legacy(struct rlimit *rlim_stack)
+static inline int mmap_is_legacy(const struct rlimit *rlim_stack)
{
if (current->personality & ADDR_COMPAT_LAYOUT)
return 1;
@@ -47,7 +47,7 @@ static unsigned long mmap_base_legacy(unsigned long rnd)
}
static inline unsigned long mmap_base(unsigned long rnd,
- struct rlimit *rlim_stack)
+ const struct rlimit *rlim_stack)
{
unsigned long gap = rlim_stack->rlim_cur;
unsigned long pad = stack_maxrandom_size() + stack_guard_gap;
@@ -169,7 +169,7 @@ check_asce_limit:
* This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use:
*/
-void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
+void arch_pick_mmap_layout(struct mm_struct *mm, const struct rlimit *rlim_stack)
{
unsigned long random_factor = 0UL;
@@ -182,10 +182,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
*/
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = mmap_base_legacy(random_factor);
- clear_bit(MMF_TOPDOWN, &mm->flags);
+ mm_flags_clear(MMF_TOPDOWN, mm);
} else {
mm->mmap_base = mmap_base(random_factor, rlim_stack);
- set_bit(MMF_TOPDOWN, &mm->flags);
+ mm_flags_set(MMF_TOPDOWN, mm);
}
}
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 36700384fe6b..76d92069799f 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -25,7 +25,7 @@ unsigned long *crst_table_alloc_noprof(struct mm_struct *mm)
ptdesc = pagetable_alloc_noprof(gfp, CRST_ALLOC_ORDER);
if (!ptdesc)
return NULL;
- table = ptdesc_to_virt(ptdesc);
+ table = ptdesc_address(ptdesc);
__arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
return table;
}
@@ -123,7 +123,7 @@ struct ptdesc *page_table_alloc_pgste_noprof(struct mm_struct *mm)
ptdesc = pagetable_alloc_noprof(GFP_KERNEL_ACCOUNT, 0);
if (ptdesc) {
- table = (u64 *)ptdesc_to_virt(ptdesc);
+ table = (u64 *)ptdesc_address(ptdesc);
__arch_set_page_dat(table, 1);
memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
@@ -153,7 +153,7 @@ unsigned long *page_table_alloc_noprof(struct mm_struct *mm)
pagetable_free(ptdesc);
return NULL;
}
- table = ptdesc_to_virt(ptdesc);
+ table = ptdesc_address(ptdesc);
__arch_set_page_dat(table, 1);
memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 50eb57c976bc..0fde20bbc50b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -24,6 +24,7 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/page-states.h>
+#include <asm/pgtable.h>
#include <asm/machine.h>
pgprot_t pgprot_writecombine(pgprot_t prot)
@@ -115,28 +116,6 @@ static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
return old;
}
-static inline pgste_t pgste_get_lock(pte_t *ptep)
-{
- unsigned long value = 0;
-#ifdef CONFIG_PGSTE
- unsigned long *ptr = (unsigned long *)(ptep + PTRS_PER_PTE);
-
- do {
- value = __atomic64_or_barrier(PGSTE_PCL_BIT, ptr);
- } while (value & PGSTE_PCL_BIT);
- value |= PGSTE_PCL_BIT;
-#endif
- return __pgste(value);
-}
-
-static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
-{
-#ifdef CONFIG_PGSTE
- barrier();
- WRITE_ONCE(*(unsigned long *)(ptep + PTRS_PER_PTE), pgste_val(pgste) & ~PGSTE_PCL_BIT);
-#endif
-}
-
static inline pgste_t pgste_get(pte_t *ptep)
{
unsigned long pgste = 0;