ploop: Kill unneeded PLOOP_S_WAIT_PROCESS bit

Submitted by Kirill Tkhai on Feb. 15, 2019, 1:48 p.m.

Details

Message ID 155023841083.2547.16327987288449691466.stgit@localhost.localdomain
State New
Series "ploop: Kill unneeded PLOOP_S_WAIT_PROCESS bit"
Headers show

Commit Message

Kirill Tkhai Feb. 15, 2019, 1:48 p.m.
This bit just duplicates the information that waitqueue_active()
already provides. We should not use two lines of code, and check
for both PLOOP_S_WAIT_PROCESS and waitqueue_active(), where
the single line is enough. So, kill the bit.

Also, note, that using both plo->lock and plo->waitq->lock
is overkill, and this also may be simplified. But in this
patch I just care about reducing code to improve the readability.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 drivers/block/ploop/dev.c         |   28 ++++++++++------------------
 drivers/block/ploop/io_direct.c   |    2 +-
 drivers/block/ploop/io_kaio.c     |    2 +-
 drivers/block/ploop/push_backup.c |    4 ++--
 include/linux/ploop/ploop.h       |    1 -
 5 files changed, 14 insertions(+), 23 deletions(-)

Patch hide | download patch | download mbox

diff --git a/drivers/block/ploop/dev.c b/drivers/block/ploop/dev.c
index 231f8cf336d1..de6990fc0a6e 100644
--- a/drivers/block/ploop/dev.c
+++ b/drivers/block/ploop/dev.c
@@ -126,11 +126,10 @@  static void mitigation_timeout(unsigned long data)
 		return;
 
 	spin_lock_irq(&plo->lock);
-	if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state) &&
+	if (waitqueue_active(&plo->waitq) &&
 	    (!list_empty(&plo->entry_queue) ||
 	     ((plo->bio_head || !bio_list_empty(&plo->bio_discard_list)) &&
-	      !list_empty(&plo->free_list))) &&
-	      waitqueue_active(&plo->waitq))
+	      !list_empty(&plo->free_list))))
 		wake_up_interruptible(&plo->waitq);
 	spin_unlock_irq(&plo->lock);
 }
@@ -258,8 +257,7 @@  void ploop_preq_drop(struct ploop_device * plo, struct list_head *drop_list)
 	plo->free_qlen += drop_qlen;
 	if (waitqueue_active(&plo->req_waitq))
 		wake_up(&plo->req_waitq);
-	else if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state) &&
-		waitqueue_active(&plo->waitq) &&
+	else if (waitqueue_active(&plo->waitq) &&
 		(plo->bio_head || !bio_list_empty(&plo->bio_discard_list)))
 		wake_up_interruptible(&plo->waitq);
 
@@ -649,7 +647,6 @@  DEFINE_BIO_CB(ploop_fast_end_io)
 	plo->bio_total--;
 
 	if (plo->active_reqs == 0 &&
-	    test_bit(PLOOP_S_WAIT_PROCESS, &plo->state) &&
 	    waitqueue_active(&plo->waitq) &&
 	    (test_bit(PLOOP_S_EXITING, &plo->state) ||
 	     !list_empty(&plo->entry_queue)))
@@ -823,7 +820,6 @@  static void ploop_unplug(struct blk_plug_cb *cb, bool from_schedule)
 
 	if ((!list_empty(&plo->entry_queue) ||
 	     (plo->bio_head && !list_empty(&plo->free_list))) &&
-	    test_bit(PLOOP_S_WAIT_PROCESS, &plo->state) &&
 	    waitqueue_active(&plo->waitq))
 		wake_up_interruptible(&plo->waitq);
 	spin_unlock_irq(&plo->lock);
@@ -1071,7 +1067,7 @@  static void ploop_make_request(struct request_queue *q, struct bio *bio)
 	 * But try to mitigate wakeups, delaying wakeup for some short
 	 * time.
 	 */
-	if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state)) {
+	if (waitqueue_active(&plo->waitq)) {
 		/* Synchronous requests are not batched. */
 		if (plo->entry_qlen > plo->tune.batch_entry_qlen ||
 			(bio->bi_rw & (REQ_FLUSH|REQ_FUA)) ||
@@ -1455,8 +1451,7 @@  static void ploop_complete_request(struct ploop_request * preq)
 		plo->free_qlen++;
 		if (waitqueue_active(&plo->req_waitq))
 			wake_up(&plo->req_waitq);
-		else if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state) &&
-			 waitqueue_active(&plo->waitq) &&
+		else if (waitqueue_active(&plo->waitq) &&
 			 (plo->bio_head ||
 			  !bio_list_empty(&plo->bio_discard_list)))
 			wake_up_interruptible(&plo->waitq);
@@ -1531,8 +1526,7 @@  void ploop_complete_io_state(struct ploop_request * preq)
 		set_bit(PLOOP_S_ABORT, &plo->state);
 
 	list_add_tail(&preq->list, &plo->ready_queue);
-	if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state) &&
-	    waitqueue_active(&plo->waitq))
+	if (waitqueue_active(&plo->waitq))
 		wake_up_interruptible(&plo->waitq);
 	spin_unlock_irqrestore(&plo->lock, flags);
 }
@@ -2947,7 +2941,6 @@  static void ploop_wait(struct ploop_device * plo, int once, struct blk_plug *plu
 		if (kthread_should_stop() && !plo->active_reqs)
 			break;
 
-		set_bit(PLOOP_S_WAIT_PROCESS, &plo->state);
 		if (kthread_should_stop())
 			set_bit(PLOOP_S_EXITING, &plo->state);
 		once = 0;
@@ -2956,7 +2949,6 @@  static void ploop_wait(struct ploop_device * plo, int once, struct blk_plug *plu
 		schedule();
 		blk_start_plug(plug);
 		spin_lock_irq(&plo->lock);
-		clear_bit(PLOOP_S_WAIT_PROCESS, &plo->state);
 	}
 	finish_wait(&plo->waitq, &_wait);
 }
@@ -3426,7 +3418,7 @@  void ploop_quiesce(struct ploop_device * plo)
 	ploop_entry_add(plo, preq);
 	plo->barrier_reqs++;
 
-	if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
+	if (waitqueue_active(&plo->waitq))
 		wake_up_interruptible(&plo->waitq);
 	spin_unlock_irq(&plo->lock);
 
@@ -3720,7 +3712,7 @@  static void ploop_merge_process(struct ploop_device * plo)
 
 		ploop_entry_add(plo, preq);
 
-		if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
+		if (waitqueue_active(&plo->waitq))
 			wake_up_interruptible(&plo->waitq);
 	}
 
@@ -4345,7 +4337,7 @@  static void ploop_relocate(struct ploop_device * plo, int grow_stage)
 
 	ploop_entry_add(plo, preq);
 
-	if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
+	if (waitqueue_active(&plo->waitq))
 		wake_up_interruptible(&plo->waitq);
 
 	if (atomic_dec_and_test(&plo->maintenance_cnt))
@@ -4671,7 +4663,7 @@  static void ploop_relocblks_process(struct ploop_device *plo)
 
 		ploop_entry_add(plo, preq);
 
-		if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
+		if (waitqueue_active(&plo->waitq))
 			wake_up_interruptible(&plo->waitq);
 	}
 
diff --git a/drivers/block/ploop/io_direct.c b/drivers/block/ploop/io_direct.c
index 5388169c19de..41aacaf6760f 100644
--- a/drivers/block/ploop/io_direct.c
+++ b/drivers/block/ploop/io_direct.c
@@ -841,7 +841,7 @@  static int dio_fsync_thread(void * data)
 		}
 		plo->st.bio_fsync++;
 
-		if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
+		if (waitqueue_active(&plo->waitq))
 			wake_up_interruptible(&plo->waitq);
 	}
 	spin_unlock_irq(&plo->lock);
diff --git a/drivers/block/ploop/io_kaio.c b/drivers/block/ploop/io_kaio.c
index 6089522f8f5a..81b42fd254a0 100644
--- a/drivers/block/ploop/io_kaio.c
+++ b/drivers/block/ploop/io_kaio.c
@@ -539,7 +539,7 @@  static int kaio_fsync_thread(void * data)
 		spin_lock_irq(&plo->lock);
 		list_add_tail(&preq->list, &plo->ready_queue);
 
-		if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
+		if (waitqueue_active(&plo->waitq))
 			wake_up_interruptible(&plo->waitq);
 	}
 	spin_unlock_irq(&plo->lock);
diff --git a/drivers/block/ploop/push_backup.c b/drivers/block/ploop/push_backup.c
index 1f00e24aab1c..746037976e6c 100644
--- a/drivers/block/ploop/push_backup.c
+++ b/drivers/block/ploop/push_backup.c
@@ -753,7 +753,7 @@  unsigned long ploop_pb_stop(struct ploop_pushbackup_desc *pbd, bool do_merge)
 		spin_lock_irq(&plo->lock);
 		list_splice_init(&drop_list, plo->ready_queue.prev);
 		return_bios_back_to_plo(plo, &pbd->bio_pending_list);
-		if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
+		if (waitqueue_active(&plo->waitq))
 			wake_up_interruptible(&plo->waitq);
 		spin_unlock_irq(&plo->lock);
 	}
@@ -980,7 +980,7 @@  void ploop_pb_put_reported(struct ploop_pushbackup_desc *pbd,
 
 		spin_lock_irq(&plo->lock);
 		list_splice(&ready_list, plo->ready_queue.prev);
-		if (test_bit(PLOOP_S_WAIT_PROCESS, &plo->state))
+		if (waitqueue_active(&plo->waitq))
 			wake_up_interruptible(&plo->waitq);
 		spin_unlock_irq(&plo->lock);
 	}
diff --git a/include/linux/ploop/ploop.h b/include/linux/ploop/ploop.h
index b5645910c01e..3cba60b8d7bc 100644
--- a/include/linux/ploop/ploop.h
+++ b/include/linux/ploop/ploop.h
@@ -40,7 +40,6 @@  enum {
 	PLOOP_S_RUNNING,	/* Device is active */
 	PLOOP_S_ATTENTION,	/* Device is processing a barrier, everything
 				 * is queued to be totally serialized */
-	PLOOP_S_WAIT_PROCESS,	/* Main thread is waiting for requests */
 	PLOOP_S_EXITING,	/* Exiting */
 	PLOOP_S_ABORT,		/* Device is aborted due to unrecoverable
 				 * error. Reads are still allowed. */