Fix mmu notifiers for range-based invalidates
commit f7edb07ad7c66eab3dce57384f33b9799d579133 upstream. Update the __flush_tlb_range_op macro not to modify its parameters as these are unexepcted semantics. In practice, this fixes the call to mmu_notifier_arch_invalidate_secondary_tlbs() in __flush_tlb_range_nosync() to use the correct range instead of an empty range with start=end. The empty range was (un)lucky as it results in taking the invalidate-all path that doesn't cause correctness issues, but can certainly result in suboptimal perf. This has been broken since commit6bbd42e2df
("mmu_notifiers: call invalidate_range() when invalidating TLBs") when the call to the notifiers was added to __flush_tlb_range(). It predates the addition of the __flush_tlb_range_op() macro from commit360839027a
("arm64: tlb: Refactor the core flush algorithm of __flush_tlb_range") that made the bug hard to spot. Fixes:6bbd42e2df
("mmu_notifiers: call invalidate_range() when invalidating TLBs") Signed-off-by: Piotr Jaroszynski <pjaroszynski@nvidia.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Raghavendra Rao Ananta <rananta@google.com> Cc: SeongJae Park <sj@kernel.org> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Nicolin Chen <nicolinc@nvidia.com> Cc: linux-arm-kernel@lists.infradead.org Cc: iommu@lists.linux.dev Cc: linux-mm@kvack.org Cc: linux-kernel@vger.kernel.org Cc: stable@vger.kernel.org Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Alistair Popple <apopple@nvidia.com> Link: https://lore.kernel.org/r/20250304085127.2238030-1-pjaroszynski@nvidia.com Signed-off-by: Will Deacon <will@kernel.org> [will: Resolve conflicts due to lack of LPA2 support] Signed-off-by: Will Deacon <will@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
da210d4f88
commit
4f687721a9
@@ -369,31 +369,33 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
|
||||
#define __flush_tlb_range_op(op, start, pages, stride, \
|
||||
asid, tlb_level, tlbi_user) \
|
||||
do { \
|
||||
typeof(start) __flush_start = start; \
|
||||
typeof(pages) __flush_pages = pages; \
|
||||
int num = 0; \
|
||||
int scale = 3; \
|
||||
unsigned long addr; \
|
||||
\
|
||||
while (pages > 0) { \
|
||||
while (__flush_pages > 0) { \
|
||||
if (!system_supports_tlb_range() || \
|
||||
pages == 1) { \
|
||||
addr = __TLBI_VADDR(start, asid); \
|
||||
__flush_pages == 1) { \
|
||||
addr = __TLBI_VADDR(__flush_start, asid); \
|
||||
__tlbi_level(op, addr, tlb_level); \
|
||||
if (tlbi_user) \
|
||||
__tlbi_user_level(op, addr, tlb_level); \
|
||||
start += stride; \
|
||||
pages -= stride >> PAGE_SHIFT; \
|
||||
__flush_start += stride; \
|
||||
__flush_pages -= stride >> PAGE_SHIFT; \
|
||||
continue; \
|
||||
} \
|
||||
\
|
||||
num = __TLBI_RANGE_NUM(pages, scale); \
|
||||
num = __TLBI_RANGE_NUM(__flush_pages, scale); \
|
||||
if (num >= 0) { \
|
||||
addr = __TLBI_VADDR_RANGE(start, asid, scale, \
|
||||
num, tlb_level); \
|
||||
addr = __TLBI_VADDR_RANGE(__flush_start, asid, \
|
||||
scale, num, tlb_level); \
|
||||
__tlbi(r##op, addr); \
|
||||
if (tlbi_user) \
|
||||
__tlbi_user(r##op, addr); \
|
||||
start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
|
||||
pages -= __TLBI_RANGE_PAGES(num, scale); \
|
||||
__flush_start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
|
||||
__flush_pages -= __TLBI_RANGE_PAGES(num, scale);\
|
||||
} \
|
||||
scale--; \
|
||||
} \
|
||||
|
Reference in New Issue
Block a user