[RHEL7,COMMIT] kvm: unlock kvm_lock in case no VMs to shrink

Submitted by Konstantin Khorenko on Aug. 5, 2019, 2:41 p.m.

Details

Message ID 201908051441.x75EfSwj003143@finist-ce7.sw.ru
State New
Series "kvm: unlock kvm_lock in case no VMs to shrink"
Headers show

Commit Message

Konstantin Khorenko Aug. 5, 2019, 2:41 p.m.
The commit is pushed to "branch-rh7-3.10.0-957.21.3.vz7.106.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-957.21.3.vz7.106.7
------>
commit 0a6a07c576c28f23ecc7ef06407706d5a6c37963
Author: Konstantin Khorenko <khorenko@virtuozzo.com>
Date:   Tue Jul 23 16:42:49 2019 +0300

    kvm: unlock kvm_lock in case no VMs to shrink
    
    If vm_list is empty kvm_lock is acquired and never released in
    mmu_shrink_scan(), fix this.
    
    Fixes: bbacd5e44b5b ("kvm: move actual VM memory shrink out of
    kvm_lock")
    https://jira.sw.ru/browse/PSBM-96262
    
    Signed-off-by: Konstantin Khorenko <khorenko@virtuozzo.com>
    Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 arch/x86/kvm/mmu.c | 51 ++++++++++++++++++++++++++++++---------------------
 1 file changed, 30 insertions(+), 21 deletions(-)

Patch hide | download patch | download mbox

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7d18cda1e2db..2ca7c39ec5ea 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -5346,13 +5346,12 @@  mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 	struct kvm *kvm, *tmp;
 	int nr_to_scan = sc->nr_to_scan;
 	unsigned long freed = 0;
+	int idx, found = 0;
+	LIST_HEAD(invalid_list);
 
 	spin_lock(&kvm_lock);
 
 	list_for_each_entry_safe(kvm, tmp, &vm_list, vm_list) {
-		int idx;
-		LIST_HEAD(invalid_list);
-
 		/*
 		 * Never scan more than sc->nr_to_scan VM instances.
 		 * Will not hit this condition practically since we do not try
@@ -5360,7 +5359,6 @@  mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 		 * !n_used_mmu_pages so many times.
 		 */
 		if (!nr_to_scan--) {
-			spin_unlock(&kvm_lock);
 			break;
 		}
 
@@ -5389,30 +5387,41 @@  mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
 		 */
 		if (!kvm_try_get_kvm(kvm))
 			continue;
-		spin_unlock(&kvm_lock);
-
-		idx = srcu_read_lock(&kvm->srcu);
-		spin_lock(&kvm->mmu_lock);
 
-		if (kvm_has_zapped_obsolete_pages(kvm)) {
-			kvm_mmu_commit_zap_page(kvm,
-			      &kvm->arch.zapped_obsolete_pages);
-			goto unlock;
-		}
+		/*
+		 * We found VM to shrink, and as we shrink only one VM per
+		 * function call, break the cycle and do actual shrink out of
+		 * the cycle.
+		 */
+		found = 1;
+		break;
+	}
 
-		if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
-			freed++;
-		kvm_mmu_commit_zap_page(kvm, &invalid_list);
+	spin_unlock(&kvm_lock);
 
-unlock:
-		spin_unlock(&kvm->mmu_lock);
-		srcu_read_unlock(&kvm->srcu, idx);
+	/* If not found a VM to shrink, just exit. */
+	if (!found)
+		return freed;
 
-		kvm_put_kvm(kvm);
+	idx = srcu_read_lock(&kvm->srcu);
+	spin_lock(&kvm->mmu_lock);
 
-		break;
+	if (kvm_has_zapped_obsolete_pages(kvm)) {
+		kvm_mmu_commit_zap_page(kvm,
+					&kvm->arch.zapped_obsolete_pages);
+		goto unlock;
 	}
 
+	if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
+		freed++;
+	kvm_mmu_commit_zap_page(kvm, &invalid_list);
+
+unlock:
+	spin_unlock(&kvm->mmu_lock);
+	srcu_read_unlock(&kvm->srcu, idx);
+
+	kvm_put_kvm(kvm);
+
 	return freed;
 
 }