diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/kvm/arm_vgic.h | 16 | ||||
| -rw-r--r-- | include/linux/kvm_host.h | 4 | ||||
| -rw-r--r-- | include/linux/kvm_types.h | 1 | ||||
| -rw-r--r-- | include/linux/mmu_notifier.h | 44 | ||||
| -rw-r--r-- | include/linux/perf/riscv_pmu.h | 8 | ||||
| -rw-r--r-- | include/trace/events/kvm.h | 15 |
6 files changed, 17 insertions, 71 deletions
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 47035946648e..f5172549f9ba 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -210,6 +210,12 @@ struct vgic_its { struct mutex its_lock; struct list_head device_list; struct list_head collection_list; + + /* + * Caches the (device_id, event_id) -> vgic_irq translation for + * LPIs that are mapped and enabled. + */ + struct xarray translation_cache; }; struct vgic_state_iter; @@ -274,13 +280,8 @@ struct vgic_dist { */ u64 propbaser; - /* Protects the lpi_list. */ - raw_spinlock_t lpi_list_lock; +#define LPI_XA_MARK_DEBUG_ITER XA_MARK_0 struct xarray lpi_xa; - atomic_t lpi_count; - - /* LPI translation cache */ - struct list_head lpi_translation_cache; /* used by vgic-debug */ struct vgic_state_iter *iter; @@ -330,7 +331,7 @@ struct vgic_cpu { struct vgic_v3_cpu_if vgic_v3; }; - struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; + struct vgic_irq *private_irqs; raw_spinlock_t ap_list_lock; /* Protects the ap_list */ @@ -388,7 +389,6 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); void kvm_vgic_load(struct kvm_vcpu *vcpu); void kvm_vgic_put(struct kvm_vcpu *vcpu); -void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu); #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) #define vgic_initialized(k) ((k)->arch.vgic.initialized) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 48f31dcd318a..692c01e41a18 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -193,8 +193,6 @@ static inline bool is_error_page(struct page *page) bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, unsigned long *vcpu_bitmap); bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); -bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, - struct kvm_vcpu *except); #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 @@ -259,7 +257,6 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER union kvm_mmu_notifier_arg { - pte_t pte; unsigned long attributes; }; @@ -273,7 +270,6 @@ struct kvm_gfn_range { bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); #endif enum { diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index d93f6522b2c3..827ecc0b7e10 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -86,6 +86,7 @@ struct gfn_to_pfn_cache { struct kvm_mmu_memory_cache { gfp_t gfp_zero; gfp_t gfp_custom; + u64 init_value; struct kmem_cache *kmem_cache; int capacity; int nobjs; diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index f349e08a9dfe..d39ebb10caeb 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -123,15 +123,6 @@ struct mmu_notifier_ops { unsigned long address); /* - * change_pte is called in cases that pte mapping to page is changed: - * for example, when ksm remaps pte to point to a new shared page. - */ - void (*change_pte)(struct mmu_notifier *subscription, - struct mm_struct *mm, - unsigned long address, - pte_t pte); - - /* * invalidate_range_start() and invalidate_range_end() must be * paired and are called only when the mmap_lock and/or the * locks protecting the reverse maps are held. If the subsystem @@ -392,8 +383,6 @@ extern int __mmu_notifier_clear_young(struct mm_struct *mm, unsigned long end); extern int __mmu_notifier_test_young(struct mm_struct *mm, unsigned long address); -extern void __mmu_notifier_change_pte(struct mm_struct *mm, - unsigned long address, pte_t pte); extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r); extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r); extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, @@ -439,13 +428,6 @@ static inline int mmu_notifier_test_young(struct mm_struct *mm, return 0; } -static inline void mmu_notifier_change_pte(struct mm_struct *mm, - unsigned long address, pte_t pte) -{ - if (mm_has_notifiers(mm)) - __mmu_notifier_change_pte(mm, address, pte); -} - static inline void mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) { @@ -581,26 +563,6 @@ static inline void mmu_notifier_range_init_owner( __young; \ }) -/* - * set_pte_at_notify() sets the pte _after_ running the notifier. - * This is safe to start by updating the secondary MMUs, because the primary MMU - * pte invalidate must have already happened with a ptep_clear_flush() before - * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is - * required when we change both the protection of the mapping from read-only to - * read-write and the pfn (like during copy on write page faults). Otherwise the - * old page would remain mapped readonly in the secondary MMUs after the new - * page is already writable by some CPU through the primary MMU. - */ -#define set_pte_at_notify(__mm, __address, __ptep, __pte) \ -({ \ - struct mm_struct *___mm = __mm; \ - unsigned long ___address = __address; \ - pte_t ___pte = __pte; \ - \ - mmu_notifier_change_pte(___mm, ___address, ___pte); \ - set_pte_at(___mm, ___address, __ptep, ___pte); \ -}) - #else /* CONFIG_MMU_NOTIFIER */ struct mmu_notifier_range { @@ -650,11 +612,6 @@ static inline int mmu_notifier_test_young(struct mm_struct *mm, return 0; } -static inline void mmu_notifier_change_pte(struct mm_struct *mm, - unsigned long address, pte_t pte) -{ -} - static inline void mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) { @@ -693,7 +650,6 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm) #define ptep_clear_flush_notify ptep_clear_flush #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush #define pudp_huge_clear_flush_notify pudp_huge_clear_flush -#define set_pte_at_notify set_pte_at static inline void mmu_notifier_synchronize(void) { diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h index 43282e22ebe1..701974639ff2 100644 --- a/include/linux/perf/riscv_pmu.h +++ b/include/linux/perf/riscv_pmu.h @@ -39,6 +39,14 @@ struct cpu_hw_events { DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS); /* currently enabled firmware counters */ DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS); + /* The virtual address of the shared memory where counter snapshot will be taken */ + void *snapshot_addr; + /* The physical address of the shared memory where counter snapshot will be taken */ + phys_addr_t snapshot_addr_phys; + /* Boolean flag to indicate setup is already done */ + bool snapshot_set_done; + /* A shadow copy of the counter values to avoid clobbering during multiple SBI calls */ + u64 snapshot_cval_shcopy[RISCV_MAX_COUNTERS]; }; struct riscv_pmu { diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 011fba6b5552..74e40d5d4af4 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -456,21 +456,6 @@ TRACE_EVENT(kvm_unmap_hva_range, __entry->start, __entry->end) ); -TRACE_EVENT(kvm_set_spte_hva, - TP_PROTO(unsigned long hva), - TP_ARGS(hva), - - TP_STRUCT__entry( - __field( unsigned long, hva ) - ), - - TP_fast_assign( - __entry->hva = hva; - ), - - TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva) -); - TRACE_EVENT(kvm_age_hva, TP_PROTO(unsigned long start, unsigned long end), TP_ARGS(start, end), |
