diff options
Diffstat (limited to 'mm/hugetlb.c')
| -rw-r--r-- | mm/hugetlb.c | 51 | 
1 files changed, 26 insertions, 25 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 218679138255..3612fbb32e9d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2798,7 +2798,8 @@ static int __init hugetlb_init(void)  	num_fault_mutexes = 1;  #endif  	hugetlb_fault_mutex_table = -		kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL); +		kmalloc_array(num_fault_mutexes, sizeof(struct mutex), +			      GFP_KERNEL);  	BUG_ON(!hugetlb_fault_mutex_table);  	for (i = 0; i < num_fault_mutexes; i++) @@ -3159,7 +3160,7 @@ static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)   * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get   * this far.   */ -static int hugetlb_vm_op_fault(struct vm_fault *vmf) +static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)  {  	BUG();  	return 0; @@ -3291,7 +3292,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,  				 * table protection not changing it to point  				 * to a new page.  				 * -				 * See Documentation/vm/mmu_notifier.txt +				 * See Documentation/vm/mmu_notifier.rst  				 */  				huge_ptep_set_wrprotect(src, addr, src_pte);  			} @@ -3686,6 +3687,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,  	struct page *page;  	pte_t new_pte;  	spinlock_t *ptl; +	unsigned long haddr = address & huge_page_mask(h);  	/*  	 * Currently, we are forced to kill the process in the event the @@ -3716,7 +3718,7 @@ retry:  			u32 hash;  			struct vm_fault vmf = {  				.vma = vma, -				.address = address, +				.address = haddr,  				.flags = flags,  				/*  				 * Hard to debug if it ends up being @@ -3733,14 +3735,14 @@ retry:  			 * fault to make calling code simpler.  			 */  			hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, -							idx, address); +							idx, haddr);  			mutex_unlock(&hugetlb_fault_mutex_table[hash]);  			ret = handle_userfault(&vmf, VM_UFFD_MISSING);  			mutex_lock(&hugetlb_fault_mutex_table[hash]);  			goto out;  		} -		page = alloc_huge_page(vma, address, 0); +		page = alloc_huge_page(vma, haddr, 0);  		if (IS_ERR(page)) {  			ret = PTR_ERR(page);  			if (ret == -ENOMEM) @@ -3789,12 +3791,12 @@ retry:  	 * the spinlock.  	 */  	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { -		if (vma_needs_reservation(h, vma, address) < 0) { +		if (vma_needs_reservation(h, vma, haddr) < 0) {  			ret = VM_FAULT_OOM;  			goto backout_unlocked;  		}  		/* Just decrements count, does not deallocate */ -		vma_end_reservation(h, vma, address); +		vma_end_reservation(h, vma, haddr);  	}  	ptl = huge_pte_lock(h, mm, ptep); @@ -3808,17 +3810,17 @@ retry:  	if (anon_rmap) {  		ClearPagePrivate(page); -		hugepage_add_new_anon_rmap(page, vma, address); +		hugepage_add_new_anon_rmap(page, vma, haddr);  	} else  		page_dup_rmap(page, true);  	new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)  				&& (vma->vm_flags & VM_SHARED))); -	set_huge_pte_at(mm, address, ptep, new_pte); +	set_huge_pte_at(mm, haddr, ptep, new_pte);  	hugetlb_count_add(pages_per_huge_page(h), mm);  	if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {  		/* Optimization, do the COW without a second fault */ -		ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); +		ret = hugetlb_cow(mm, vma, haddr, ptep, page, ptl);  	}  	spin_unlock(ptl); @@ -3830,7 +3832,7 @@ backout:  	spin_unlock(ptl);  backout_unlocked:  	unlock_page(page); -	restore_reserve_on_error(h, vma, address, page); +	restore_reserve_on_error(h, vma, haddr, page);  	put_page(page);  	goto out;  } @@ -3883,10 +3885,9 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,  	struct hstate *h = hstate_vma(vma);  	struct address_space *mapping;  	int need_wait_lock = 0; +	unsigned long haddr = address & huge_page_mask(h); -	address &= huge_page_mask(h); - -	ptep = huge_pte_offset(mm, address, huge_page_size(h)); +	ptep = huge_pte_offset(mm, haddr, huge_page_size(h));  	if (ptep) {  		entry = huge_ptep_get(ptep);  		if (unlikely(is_hugetlb_entry_migration(entry))) { @@ -3896,20 +3897,20 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,  			return VM_FAULT_HWPOISON_LARGE |  				VM_FAULT_SET_HINDEX(hstate_index(h));  	} else { -		ptep = huge_pte_alloc(mm, address, huge_page_size(h)); +		ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));  		if (!ptep)  			return VM_FAULT_OOM;  	}  	mapping = vma->vm_file->f_mapping; -	idx = vma_hugecache_offset(h, vma, address); +	idx = vma_hugecache_offset(h, vma, haddr);  	/*  	 * Serialize hugepage allocation and instantiation, so that we don't  	 * get spurious allocation failures if two CPUs race to instantiate  	 * the same page in the page cache.  	 */ -	hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address); +	hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);  	mutex_lock(&hugetlb_fault_mutex_table[hash]);  	entry = huge_ptep_get(ptep); @@ -3939,16 +3940,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,  	 * consumed.  	 */  	if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) { -		if (vma_needs_reservation(h, vma, address) < 0) { +		if (vma_needs_reservation(h, vma, haddr) < 0) {  			ret = VM_FAULT_OOM;  			goto out_mutex;  		}  		/* Just decrements count, does not deallocate */ -		vma_end_reservation(h, vma, address); +		vma_end_reservation(h, vma, haddr);  		if (!(vma->vm_flags & VM_MAYSHARE))  			pagecache_page = hugetlbfs_pagecache_page(h, -								vma, address); +								vma, haddr);  	}  	ptl = huge_pte_lock(h, mm, ptep); @@ -3973,16 +3974,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,  	if (flags & FAULT_FLAG_WRITE) {  		if (!huge_pte_write(entry)) { -			ret = hugetlb_cow(mm, vma, address, ptep, +			ret = hugetlb_cow(mm, vma, haddr, ptep,  					  pagecache_page, ptl);  			goto out_put_page;  		}  		entry = huge_pte_mkdirty(entry);  	}  	entry = pte_mkyoung(entry); -	if (huge_ptep_set_access_flags(vma, address, ptep, entry, +	if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,  						flags & FAULT_FLAG_WRITE)) -		update_mmu_cache(vma, address, ptep); +		update_mmu_cache(vma, haddr, ptep);  out_put_page:  	if (page != pagecache_page)  		unlock_page(page); @@ -4357,7 +4358,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,  	 * No need to call mmu_notifier_invalidate_range() we are downgrading  	 * page table protection not changing it to point to a new page.  	 * -	 * See Documentation/vm/mmu_notifier.txt +	 * See Documentation/vm/mmu_notifier.rst  	 */  	i_mmap_unlock_write(vma->vm_file->f_mapping);  	mmu_notifier_invalidate_range_end(mm, start, end);  | 
