From 398eba1ea6f15c980fbccf026b733ceb03d05141 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 13:25:38 +0800 Subject: [PATCH 1/2] mm/cma: add API to enable concurrent allocation from the CMA hygon inclusion category: feature CVE: NA --------------------------- The mutex prevents allocating CMA memory concurently, and it's removed and reverted back and forth, refer to commit 60a60e32cf91 ("Revert "mm/cma.c: remove redundant cma_mutex lock"") and commit a4efc174b382 ("mm/cma.c: remove redundant cma_mutex lock") in the upstream. To solve the awkward dilemma, an API to enable concurrency is added, it's up to user to decide whether their CMA can handle concurrent allocations. Signed-off-by: Yangwencheng Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- include/linux/cma.h | 1 + mm/cma.c | 14 ++++++++++++-- mm/cma.h | 1 + 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/include/linux/cma.h b/include/linux/cma.h index 18c8d6495f089..010c89f4b7727 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -58,4 +58,5 @@ extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) extern void cma_reserve_pages_on_error(struct cma *cma); extern int __init cma_alloc_areas(unsigned int max_cma_size); +extern void cma_enable_concurrency(struct cma *cma); #endif diff --git a/mm/cma.c b/mm/cma.c index 5af7642e607b4..304a4e69180c9 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -492,10 +492,12 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, spin_unlock_irq(&cma->lock); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); - mutex_lock(&cma_mutex); + if (!cma->no_mutex) + mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); - mutex_unlock(&cma_mutex); + if (!cma->no_mutex) + mutex_unlock(&cma_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; @@ -609,3 +611,11 @@ int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) return 0; } + +void cma_enable_concurrency(struct cma *cma) +{ + if (!cma) + return; + + cma->no_mutex = true; +} diff --git a/mm/cma.h b/mm/cma.h index 12aba820969c2..50275c1d98cc6 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -16,6 +16,7 @@ struct cma { unsigned long *bitmap; unsigned int order_per_bit; /* Order of pages represented by one bit */ spinlock_t lock; + bool no_mutex; #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; From 21cecdf043647c6ac307a95eaaf64a495add00fc Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Fri, 15 Mar 2024 13:28:39 +0800 Subject: [PATCH 2/2] x86/mm: CSV allows CMA allocation concurrently hygon inclusion category: feature CVE: NA --------------------------- CSV allows CMA allocation concurrently. Signed-off-by: Yangwencheng Signed-off-by: Xin Jiang Signed-off-by: hanliyang --- arch/x86/mm/mem_encrypt_hygon.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index e0acb34e16af2..1871850cbb604 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -282,6 +282,7 @@ static void __init csv_cma_reserve_mem(void) 1 << CSV_CMA_SHIFT, node); break; } + cma_enable_concurrency(csv_cma->cma); if (start > cma_get_base(csv_cma->cma) || !start) start = cma_get_base(csv_cma->cma);