[RHEL7,COMMIT] fs/fuse kio: fix problem with simultaneous map resolving

Submitted by Vasily Averin on Dec. 24, 2020, 6:42 p.m.

Details

Message ID 202012241842.0BOIg2mE024269@vz7build.vvs.sw.ru
State New
Series "fs/fuse kio: fix problem with simultaneous map resolving"
Headers show

Commit Message

Vasily Averin Dec. 24, 2020, 6:42 p.m.
The commit is pushed to "branch-rh7-3.10.0-1160.11.1.vz7.172.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-1160.11.1.vz7.172.4
------>
commit b32712e7deb17776e6df360e511a05d1d9a347e0
Author: Ildar Ismagilov <ildar.ismagilov@virtuozzo.com>
Date:   Thu Dec 24 21:42:02 2020 +0300

    fs/fuse kio: fix problem with simultaneous map resolving
    
    Simultaneous map resolving is possible, beacuse during the
    resolving of the map, it may go into an error state (explicit
    assign m->state = PCS_MAP_ERROR) and after that we can try to
    resolve it again. For example, while map state is being updated
    from RO to RW, the new READ request may call __map_error()
    due to the fact that all CS are blacklisted.
    This may cause kernel panic:
    
    kernel BUG at fs/fuse/kio/pcs/pcs_fuse_kdirect.c:543!
    Call Trace:
      pcs_map_queue_resolve+0x12e/0x370 [fuse_kio_pcs]
      map_submit+0x228/0x4b0 [fuse_kio_pcs]
      pcs_cs_wakeup+0x114/0x280 [fuse_kio_pcs]
      pcs_deaccount_ireq+0x37f/0x4c0 [fuse_kio_pcs]
      map_notify_soft_error+0xdb/0x440 [fuse_kio_pcs]
      pcs_sreq_complete+0x1f9/0x270 [fuse_kio_pcs]
      cs_response_done+0x1cb/0x2c0 [fuse_kio_pcs]
      cs_sent+0x2d/0x40 [fuse_kio_pcs]
      rpc_abort+0x2d3/0x420 [fuse_kio_pcs]
      pcs_rpc_reset+0x1c/0x40 [fuse_kio_pcs]
      pcs_rdmaconnect_start+0x106/0x3a0 [fuse_kio_pcs]
      cs_connect+0x126/0x290 [fuse_kio_pcs]
      pcs_rpc_connect+0x40/0x70 [fuse_kio_pcs]
      pcs_rpc_send+0x97/0x1c0 [fuse_kio_pcs]
      rpc_queue_work+0x12c/0x380 [fuse_kio_pcs]
      process_one_work+0x185/0x440
      worker_thread+0x126/0x3c0
      kthread+0xd1/0xe0
      ret_from_fork_nospec_begin+0x7/0x21
    
    To resolve this problem, we postpone all requests that try to
    move the map into an error state until state of map is resolved.
    
    https://pmc.acronis.com/browse/VSTOR-39656
    
    Signed-off-by: Ildar Ismagilov <ildar.ismagilov@virtuozzo.com>
    Reviewed-by: Alexey Kuznetsov <kuznet@acronis.com>
---
 fs/fuse/kio/pcs/pcs_map.c | 34 +++++++++++++++++++++++++---------
 1 file changed, 25 insertions(+), 9 deletions(-)

Patch hide | download patch | download mbox

diff --git a/fs/fuse/kio/pcs/pcs_map.c b/fs/fuse/kio/pcs/pcs_map.c
index 8f2ca5c..0a787f0 100644
--- a/fs/fuse/kio/pcs/pcs_map.c
+++ b/fs/fuse/kio/pcs/pcs_map.c
@@ -741,12 +741,6 @@  static inline void map_remote_error_nolock(struct pcs_map_entry *m , int error,
 {
 	__map_error(m, 1 , error, offender);
 }
-static void map_remote_error(struct pcs_map_entry *m , int error, u64 offender)
-{
-	spin_lock(&m->lock);
-	map_remote_error_nolock(m, error, offender);
-	spin_unlock(&m->lock);
-}
 
 void pcs_map_notify_addr_change(struct pcs_cs * cs)
 {
@@ -1097,6 +1091,8 @@  void pcs_map_complete(struct pcs_map_entry *m, struct pcs_ioc_getmap *omap)
 		   m->state = PCS_MAP_ERROR;
 		   If m->state becomes atomic bit fields this will be impossible.
 		 */
+		TRACE("skip getmap resp: m:%p, state:%x resp{ st:%d, err:%d, v:" VER_FMT "}\n",
+		      m, m->state, omap->state, omap->error.value, VER_ARGS(omap->version));
 		spin_unlock(&m->lock);
 		goto out_ignore;
 	}
@@ -1902,6 +1898,23 @@  pcs_ireq_split(struct pcs_int_request *ireq, unsigned int iochunk, int noalign)
 	return sreq;
 }
 
+static inline bool ireq_remote_error(struct pcs_int_request *ireq,
+				     struct pcs_map_entry *m,
+				     int error, u64 offender)
+{
+	spin_lock(&m->lock);
+	if (m->state & PCS_MAP_RESOLVING) {
+		/* Defer request until the map is resolved */
+		list_add_tail(&ireq->list, &m->queue);
+		spin_unlock(&m->lock);
+		return false;
+	}
+	map_remote_error_nolock(m, error, offender);
+	spin_unlock(&m->lock);
+
+	return true;
+}
+
 static int pcs_cslist_submit_read(struct pcs_int_request *ireq, struct pcs_cs_list * csl)
 {
 	struct pcs_cluster_core *cc = ireq->cc;
@@ -1947,7 +1960,8 @@  static int pcs_cslist_submit_read(struct pcs_int_request *ireq, struct pcs_cs_li
 			 * and let MDS to figure what heppened with the rest.
 			 */
 			cs = csl->cs[0].cslink.cs;
-			map_remote_error(ireq->iochunk.map, cs->blacklist_reason, cs->id.val);
+			if (!ireq_remote_error(ireq, ireq->iochunk.map, cs->blacklist_reason, cs->id.val))
+				return 0;
 
 			FUSE_KTRACE(ireq->cc->fc, "Read from " MAP_FMT " blocked by blacklist error %d, CS" NODE_FMT,
 			      MAP_ARGS(ireq->iochunk.map), cs->blacklist_reason, NODE_ARGS(cs->id));
@@ -2112,7 +2126,8 @@  restart:
 	for (i = 0; i < csl->nsrv; i++) {
 		cs = csl->cs[i].cslink.cs;
 		if (cs_is_blacklisted(cs)) {
-			map_remote_error(ireq->iochunk.map, cs->blacklist_reason, cs->id.val);
+			if (!ireq_remote_error(ireq, ireq->iochunk.map, cs->blacklist_reason, cs->id.val))
+				return 0;
 			FUSE_KTRACE(cc_from_csset(cs->css)->fc, "Write to " MAP_FMT " blocked by blacklist error %d, CS" NODE_FMT,
 			      MAP_ARGS(ireq->iochunk.map), cs->blacklist_reason, NODE_ARGS(cs->id));
 			spin_lock(&ireq->completion_data.child_lock);
@@ -2219,7 +2234,8 @@  static int pcs_cslist_submit_flush(struct pcs_int_request *ireq, struct pcs_cs_l
 		cs = csl->cs[i].cslink.cs;
 
 		if (cs_is_blacklisted(cs)) {
-			map_remote_error(ireq->flushreq.map, cs->blacklist_reason, cs->id.val);
+			if (!ireq_remote_error(ireq, ireq->flushreq.map, cs->blacklist_reason, cs->id.val))
+				return 0;
 			FUSE_KTRACE(cc_from_csset(cs->css)->fc, "Flush to " MAP_FMT " blocked by blacklist error %d, CS" NODE_FMT,
 			      MAP_ARGS(ireq->flushreq.map), cs->blacklist_reason, NODE_ARGS(cs->id));
 			spin_lock(&ireq->completion_data.child_lock);