[Devel,vz7,18/46] fuse: separate out input queue

Submitted by Maxim Patlasov on March 25, 2017, 2:19 a.m.

Details

Message ID 149040836344.25341.10386440159315718072.stgit@maxim-thinkpad
State New
Series "fuse: add multi-threading support"
Headers show

Commit Message

Maxim Patlasov March 25, 2017, 2:19 a.m.
Backport from ml:

commit f88996a93324483ff3ec027312bbacacf97a555b
Author: Miklos Szeredi <mszeredi@suse.cz>
Date:   Wed Jul 1 16:26:01 2015 +0200

    fuse: separate out input queue

    The input queue contains normal requests (fc->pending), forgets
    (fc->forget_*) and interrupts (fc->interrupts).  There's also fc->waitq and
    fc->fasync for waking up the readers of the fuse device when a request is
    available.

    The fc->reqctr is also moved to the input queue (assigned to the request
    when the request is added to the input queue.

    This patch just rearranges the fields, no functional change.

    Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
    Reviewed-by: Ashish Samant <ashish.samant@oracle.com>

Signed-off-by: Maxim Patlasov <mpatlasov@virtuozzo.com>
---
 fs/fuse/control.c |    4 +-
 fs/fuse/dev.c     |  134 ++++++++++++++++++++++++++++++-----------------------
 fs/fuse/fuse_i.h  |   47 ++++++++++---------
 fs/fuse/inode.c   |   20 +++++---
 4 files changed, 116 insertions(+), 89 deletions(-)

Patch hide | download patch | download mbox

diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 889c907..61a6a48 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -284,7 +284,7 @@  static int fuse_conn_seq_open(struct file *filp, int list_id)
 		fcp->req_list = &conn->processing;
 		break;
 	case FUSE_PENDING_REQ:
-		fcp->req_list = &conn->pending;
+		fcp->req_list = &conn->iq.pending;
 		break;
 	case FUSE_IO_REQ:
 		fcp->req_list = &conn->io;
@@ -399,7 +399,7 @@  static int fuse_conn_show(struct seq_file *sf, void *v)
 	seq_printf(sf, "Connected: %d\n", fc->connected);
 	seq_printf(sf, "Initialized: %d\n", fc->initialized);
 	seq_printf(sf, "Blocked: %d\n", fc->blocked);
-	seq_printf(sf, "WQ active: %d\n", waitqueue_active(&fc->waitq));
+	seq_printf(sf, "WQ active: %d\n", waitqueue_active(&fc->iq.waitq));
 	seq_printf(sf, "Blocked_wq active: %d\n", waitqueue_active(&fc->blocked_waitq));
 	seq_printf(sf, "num_background: %d\n", fc->num_background);
 	seq_printf(sf, "num_waiting: %d\n", atomic_read(&fc->num_waiting));
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 943e050..8b18c19 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -301,32 +301,34 @@  static unsigned len_args(unsigned numargs, struct fuse_arg *args)
 	return nbytes;
 }
 
-static u64 fuse_get_unique(struct fuse_conn *fc)
+static u64 fuse_get_unique(struct fuse_iqueue *fiq)
 {
-	return ++fc->reqctr;
+	return ++fiq->reqctr;
 }
 
-static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
+static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
 {
 	req->in.h.len = sizeof(struct fuse_in_header) +
 		len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
-	list_add_tail(&req->list, &fc->pending);
-	wake_up(&fc->waitq);
-	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+	list_add_tail(&req->list, &fiq->pending);
+	wake_up(&fiq->waitq);
+	kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
 }
 
 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
 		       u64 nodeid, u64 nlookup)
 {
+	struct fuse_iqueue *fiq = &fc->iq;
+
 	forget->forget_one.nodeid = nodeid;
 	forget->forget_one.nlookup = nlookup;
 
 	spin_lock(&fc->lock);
 	if (fc->connected) {
-		fc->forget_list_tail->next = forget;
-		fc->forget_list_tail = forget;
-		wake_up(&fc->waitq);
-		kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+		fiq->forget_list_tail->next = forget;
+		fiq->forget_list_tail = forget;
+		wake_up(&fiq->waitq);
+		kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
 	} else {
 		kfree(forget);
 	}
@@ -338,12 +340,13 @@  static void flush_bg_queue(struct fuse_conn *fc)
 	while (fc->active_background < fc->max_background &&
 	       !list_empty(&fc->bg_queue)) {
 		struct fuse_req *req;
+		struct fuse_iqueue *fiq = &fc->iq;
 
 		req = list_entry(fc->bg_queue.next, struct fuse_req, list);
 		list_del(&req->list);
 		fc->active_background++;
-		req->in.h.unique = fuse_get_unique(fc);
-		queue_request(fc, req);
+		req->in.h.unique = fuse_get_unique(fiq);
+		queue_request(fiq, req);
 	}
 }
 
@@ -393,11 +396,11 @@  __releases(fc->lock)
 	fuse_put_request(fc, req);
 }
 
-static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
+static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
 {
-	list_add_tail(&req->intr_entry, &fc->interrupts);
-	wake_up(&fc->waitq);
-	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+	list_add_tail(&req->intr_entry, &fiq->interrupts);
+	wake_up(&fiq->waitq);
+	kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
 }
 
 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
@@ -414,7 +417,7 @@  static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
 		spin_lock(&fc->lock);
 		set_bit(FR_INTERRUPTED, &req->flags);
 		if (test_bit(FR_SENT, &req->flags))
-			queue_interrupt(fc, req);
+			queue_interrupt(&fc->iq, req);
 		spin_unlock(&fc->lock);
 	}
 
@@ -457,8 +460,10 @@  static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req,
 		spin_unlock(&fc->lock);
 		req->out.h.error = -EIO;
 	} else {
-		req->in.h.unique = fuse_get_unique(fc);
-		queue_request(fc, req);
+		struct fuse_iqueue *fiq = &fc->iq;
+
+		req->in.h.unique = fuse_get_unique(fiq);
+		queue_request(fiq, req);
 		/* acquire extra reference, since request is still needed
 		   after request_end() */
 		__fuse_get_request(req);
@@ -539,12 +544,13 @@  static int fuse_request_send_notify_reply(struct fuse_conn *fc,
 					  struct fuse_req *req, u64 unique)
 {
 	int err = -ENODEV;
+	struct fuse_iqueue *fiq = &fc->iq;
 
 	__clear_bit(FR_ISREPLY, &req->flags);
 	req->in.h.unique = unique;
 	spin_lock(&fc->lock);
 	if (fc->connected) {
-		queue_request(fc, req);
+		queue_request(fiq, req);
 		err = 0;
 	}
 	spin_unlock(&fc->lock);
@@ -1009,15 +1015,15 @@  static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
 	return err;
 }
 
-static int forget_pending(struct fuse_conn *fc)
+static int forget_pending(struct fuse_iqueue *fiq)
 {
-	return fc->forget_list_head.next != NULL;
+	return fiq->forget_list_head.next != NULL;
 }
 
-static int request_pending(struct fuse_conn *fc)
+static int request_pending(struct fuse_iqueue *fiq)
 {
-	return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
-		forget_pending(fc);
+	return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
+		forget_pending(fiq);
 }
 
 /* Wait until a request is available on the pending list */
@@ -1025,10 +1031,11 @@  static void request_wait(struct fuse_conn *fc)
 __releases(fc->lock)
 __acquires(fc->lock)
 {
+	struct fuse_iqueue *fiq = &fc->iq;
 	DECLARE_WAITQUEUE(wait, current);
 
-	add_wait_queue_exclusive(&fc->waitq, &wait);
-	while (fc->connected && !request_pending(fc)) {
+	add_wait_queue_exclusive(&fiq->waitq, &wait);
+	while (fc->connected && !request_pending(fiq)) {
 		set_current_state(TASK_INTERRUPTIBLE);
 		if (signal_pending(current))
 			break;
@@ -1038,7 +1045,7 @@  __acquires(fc->lock)
 		spin_lock(&fc->lock);
 	}
 	set_current_state(TASK_RUNNING);
-	remove_wait_queue(&fc->waitq, &wait);
+	remove_wait_queue(&fiq->waitq, &wait);
 }
 
 /*
@@ -1059,7 +1066,7 @@  __releases(fc->lock)
 	int err;
 
 	list_del_init(&req->intr_entry);
-	req->intr_unique = fuse_get_unique(fc);
+	req->intr_unique = fuse_get_unique(&fc->iq);
 	memset(&ih, 0, sizeof(ih));
 	memset(&arg, 0, sizeof(arg));
 	ih.len = reqsize;
@@ -1079,21 +1086,21 @@  __releases(fc->lock)
 	return err ? err : reqsize;
 }
 
-static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
+static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
 					       unsigned max,
 					       unsigned *countp)
 {
-	struct fuse_forget_link *head = fc->forget_list_head.next;
+	struct fuse_forget_link *head = fiq->forget_list_head.next;
 	struct fuse_forget_link **newhead = &head;
 	unsigned count;
 
 	for (count = 0; *newhead != NULL && count < max; count++)
 		newhead = &(*newhead)->next;
 
-	fc->forget_list_head.next = *newhead;
+	fiq->forget_list_head.next = *newhead;
 	*newhead = NULL;
-	if (fc->forget_list_head.next == NULL)
-		fc->forget_list_tail = &fc->forget_list_head;
+	if (fiq->forget_list_head.next == NULL)
+		fiq->forget_list_tail = &fiq->forget_list_head;
 
 	if (countp != NULL)
 		*countp = count;
@@ -1107,14 +1114,15 @@  static int fuse_read_single_forget(struct fuse_conn *fc,
 __releases(fc->lock)
 {
 	int err;
-	struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
+	struct fuse_iqueue *fiq = &fc->iq;
+	struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
 	struct fuse_forget_in arg = {
 		.nlookup = forget->forget_one.nlookup,
 	};
 	struct fuse_in_header ih = {
 		.opcode = FUSE_FORGET,
 		.nodeid = forget->forget_one.nodeid,
-		.unique = fuse_get_unique(fc),
+		.unique = fuse_get_unique(fiq),
 		.len = sizeof(ih) + sizeof(arg),
 	};
 
@@ -1142,10 +1150,11 @@  __releases(fc->lock)
 	unsigned max_forgets;
 	unsigned count;
 	struct fuse_forget_link *head;
+	struct fuse_iqueue *fiq = &fc->iq;
 	struct fuse_batch_forget_in arg = { .count = 0 };
 	struct fuse_in_header ih = {
 		.opcode = FUSE_BATCH_FORGET,
-		.unique = fuse_get_unique(fc),
+		.unique = fuse_get_unique(fiq),
 		.len = sizeof(ih) + sizeof(arg),
 	};
 
@@ -1155,7 +1164,7 @@  __releases(fc->lock)
 	}
 
 	max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
-	head = dequeue_forget(fc, max_forgets, &count);
+	head = dequeue_forget(fiq, max_forgets, &count);
 	spin_unlock(&fc->lock);
 
 	arg.count = count;
@@ -1187,7 +1196,9 @@  static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
 			    size_t nbytes)
 __releases(fc->lock)
 {
-	if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
+	struct fuse_iqueue *fiq = &fc->iq;
+
+	if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
 		return fuse_read_single_forget(fc, cs, nbytes);
 	else
 		return fuse_read_batch_forget(fc, cs, nbytes);
@@ -1206,6 +1217,7 @@  static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
 				struct fuse_copy_state *cs, size_t nbytes)
 {
 	int err;
+	struct fuse_iqueue *fiq = &fc->iq;
 	struct fuse_req *req;
 	struct fuse_in *in;
 	unsigned reqsize;
@@ -1214,7 +1226,7 @@  static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
 	spin_lock(&fc->lock);
 	err = -EAGAIN;
 	if ((file->f_flags & O_NONBLOCK) && fc->connected &&
-	    !request_pending(fc))
+	    !request_pending(fiq))
 		goto err_unlock;
 
 	request_wait(fc);
@@ -1222,24 +1234,24 @@  static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
 	if (!fc->connected)
 		goto err_unlock;
 	err = -ERESTARTSYS;
-	if (!request_pending(fc))
+	if (!request_pending(fiq))
 		goto err_unlock;
 
-	if (!list_empty(&fc->interrupts)) {
-		req = list_entry(fc->interrupts.next, struct fuse_req,
+	if (!list_empty(&fiq->interrupts)) {
+		req = list_entry(fiq->interrupts.next, struct fuse_req,
 				 intr_entry);
 		return fuse_read_interrupt(fc, cs, nbytes, req);
 	}
 
-	if (forget_pending(fc)) {
-		if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
+	if (forget_pending(fiq)) {
+		if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
 			return fuse_read_forget(fc, cs, nbytes);
 
-		if (fc->forget_batch <= -8)
-			fc->forget_batch = 16;
+		if (fiq->forget_batch <= -8)
+			fiq->forget_batch = 16;
 	}
 
-	req = list_entry(fc->pending.next, struct fuse_req, list);
+	req = list_entry(fiq->pending.next, struct fuse_req, list);
 	clear_bit(FR_PENDING, &req->flags);
 	list_move(&req->list, &fc->io);
 
@@ -1278,7 +1290,7 @@  static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
 		set_bit(FR_SENT, &req->flags);
 		list_move_tail(&req->list, &fc->processing);
 		if (test_bit(FR_INTERRUPTED, &req->flags))
-			queue_interrupt(fc, req);
+			queue_interrupt(fiq, req);
 		spin_unlock(&fc->lock);
 	}
 	return reqsize;
@@ -1898,7 +1910,7 @@  static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
 		if (oh.error == -ENOSYS)
 			fc->no_interrupt = 1;
 		else if (oh.error == -EAGAIN)
-			queue_interrupt(fc, req);
+			queue_interrupt(&fc->iq, req);
 
 		spin_unlock(&fc->lock);
 		fuse_copy_finish(cs);
@@ -2028,16 +2040,18 @@  out:
 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
 {
 	unsigned mask = POLLOUT | POLLWRNORM;
+	struct fuse_iqueue *fiq;
 	struct fuse_conn *fc = fuse_get_conn(file);
 	if (!fc)
 		return POLLERR;
 
-	poll_wait(file, &fc->waitq, wait);
+	fiq = &fc->iq;
+	poll_wait(file, &fiq->waitq, wait);
 
 	spin_lock(&fc->lock);
 	if (!fc->connected)
 		mask = POLLERR;
-	else if (request_pending(fc))
+	else if (request_pending(fiq))
 		mask |= POLLIN | POLLRDNORM;
 	spin_unlock(&fc->lock);
 
@@ -2099,6 +2113,8 @@  static void end_polls(struct fuse_conn *fc)
  */
 void fuse_abort_conn(struct fuse_conn *fc)
 {
+	struct fuse_iqueue *fiq = &fc->iq;
+
 	spin_lock(&fc->lock);
 	if (fc->connected) {
 		struct fuse_req *req, *next;
@@ -2118,7 +2134,7 @@  void fuse_abort_conn(struct fuse_conn *fc)
 		}
 		fc->max_background = UINT_MAX;
 		flush_bg_queue(fc);
-		list_splice_init(&fc->pending, &to_end2);
+		list_splice_init(&fiq->pending, &to_end2);
 		list_splice_init(&fc->processing, &to_end2);
 		while (!list_empty(&to_end1)) {
 			req = list_first_entry(&to_end1, struct fuse_req, list);
@@ -2127,12 +2143,12 @@  void fuse_abort_conn(struct fuse_conn *fc)
 			spin_lock(&fc->lock);
 		}
 		end_requests(fc, &to_end2);
-		while (forget_pending(fc))
-			kfree(dequeue_forget(fc, 1, NULL));
+		while (forget_pending(fiq))
+			kfree(dequeue_forget(fiq, 1, NULL));
 		end_polls(fc);
-		wake_up_all(&fc->waitq);
+		wake_up_all(&fiq->waitq);
 		wake_up_all(&fc->blocked_waitq);
-		kill_fasync(&fc->fasync, SIGIO, POLL_IN);
+		kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
 	}
 	spin_unlock(&fc->lock);
 }
@@ -2143,7 +2159,7 @@  int fuse_dev_release(struct inode *inode, struct file *file)
 	struct fuse_conn *fc = fuse_get_conn(file);
 	if (fc) {
 		WARN_ON(!list_empty(&fc->io));
-		WARN_ON(fc->fasync != NULL);
+		WARN_ON(fc->iq.fasync != NULL);
 		fuse_abort_conn(fc);
 		fuse_conn_put(fc);
 	}
@@ -2159,7 +2175,7 @@  static int fuse_dev_fasync(int fd, struct file *file, int on)
 		return -EPERM;
 
 	/* No locking - fasync_helper does its own locking */
-	return fasync_helper(fd, file, on, &fc->fasync);
+	return fasync_helper(fd, file, on, &fc->iq.fasync);
 }
 
 const struct file_operations fuse_dev_operations = {
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index eddc995..7f9280b 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -407,6 +407,30 @@  struct fuse_req {
 	struct file *stolen_file;
 };
 
+struct fuse_iqueue {
+	/** Readers of the connection are waiting on this */
+	wait_queue_head_t waitq;
+
+	/** The next unique request id */
+	u64 reqctr;
+
+	/** The list of pending requests */
+	struct list_head pending;
+
+	/** Pending interrupts */
+	struct list_head interrupts;
+
+	/** Queue of pending forgets */
+	struct fuse_forget_link forget_list_head;
+	struct fuse_forget_link *forget_list_tail;
+
+	/** Batching of FORGET requests (positive indicates FORGET batch) */
+	int forget_batch;
+
+	/** O_ASYNC requests */
+	struct fasync_struct *fasync;
+};
+
 /**
  * A Fuse connection.
  *
@@ -439,11 +463,8 @@  struct fuse_conn {
 	/** Maximum write size */
 	unsigned max_write;
 
-	/** Readers of the connection are waiting on this */
-	wait_queue_head_t waitq;
-
-	/** The list of pending requests */
-	struct list_head pending;
+	/** Input queue */
+	struct fuse_iqueue iq;
 
 	/** The list of requests being processed */
 	struct list_head processing;
@@ -472,16 +493,6 @@  struct fuse_conn {
 	/** The list of background requests set aside for later queuing */
 	struct list_head bg_queue;
 
-	/** Pending interrupts */
-	struct list_head interrupts;
-
-	/** Queue of pending forgets */
-	struct fuse_forget_link forget_list_head;
-	struct fuse_forget_link *forget_list_tail;
-
-	/** Batching of FORGET requests (positive indicates FORGET batch) */
-	int forget_batch;
-
 	/** Flag indicating that INIT reply has been received. Allocating
 	 * any fuse request will be suspended until the flag is set */
 	int initialized;
@@ -497,9 +508,6 @@  struct fuse_conn {
 	/** waitq for reserved requests */
 	wait_queue_head_t reserved_req_waitq;
 
-	/** The next unique request id */
-	u64 reqctr;
-
 	/** Connection established, cleared on umount, connection
 	    abort and device release */
 	unsigned connected;
@@ -625,9 +633,6 @@  struct fuse_conn {
 	/** number of dentries used in the above array */
 	int ctl_ndents;
 
-	/** O_ASYNC requests */
-	struct fasync_struct *fasync;
-
 	/** Key for lock owner ID scrambling */
 	u32 scramble_key[4];
 
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index bf7f411..f3610d6 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -422,7 +422,7 @@  int fuse_invalidate_files(struct fuse_conn *fc, u64 nodeid)
 	if (!err || err == -EIO) { /* AS_EIO might trigger -EIO */
 		spin_lock(&fc->lock);
 		fuse_kill_requests(fc, inode, &fc->processing);
-		fuse_kill_requests(fc, inode, &fc->pending);
+		fuse_kill_requests(fc, inode, &fc->iq.pending);
 		fuse_kill_requests(fc, inode, &fc->bg_queue);
 		fuse_kill_requests(fc, inode, &fc->io);
 		wake_up(&fi->page_waitq); /* readpage[s] can wait on fuse wb */
@@ -470,8 +470,8 @@  void fuse_conn_kill(struct fuse_conn *fc)
 	fc->initialized = 1;
 	spin_unlock(&fc->lock);
 	/* Flush all readers on this fs */
-	kill_fasync(&fc->fasync, SIGIO, POLL_IN);
-	wake_up_all(&fc->waitq);
+	kill_fasync(&fc->iq.fasync, SIGIO, POLL_IN);
+	wake_up_all(&fc->iq.waitq);
 	wake_up_all(&fc->blocked_waitq);
 	wake_up_all(&fc->reserved_req_waitq);
 }
@@ -702,6 +702,15 @@  static int fuse_show_options(struct seq_file *m, struct dentry *root)
 	return 0;
 }
 
+static void fuse_iqueue_init(struct fuse_iqueue *fiq)
+{
+	memset(fiq, 0, sizeof(struct fuse_iqueue));
+	init_waitqueue_head(&fiq->waitq);
+	INIT_LIST_HEAD(&fiq->pending);
+	INIT_LIST_HEAD(&fiq->interrupts);
+	fiq->forget_list_tail = &fiq->forget_list_head;
+}
+
 void fuse_conn_init(struct fuse_conn *fc)
 {
 	memset(fc, 0, sizeof(*fc));
@@ -709,17 +718,14 @@  void fuse_conn_init(struct fuse_conn *fc)
 	mutex_init(&fc->inst_mutex);
 	init_rwsem(&fc->killsb);
 	atomic_set(&fc->count, 1);
-	init_waitqueue_head(&fc->waitq);
 	init_waitqueue_head(&fc->blocked_waitq);
 	init_waitqueue_head(&fc->reserved_req_waitq);
-	INIT_LIST_HEAD(&fc->pending);
+	fuse_iqueue_init(&fc->iq);
 	INIT_LIST_HEAD(&fc->processing);
 	INIT_LIST_HEAD(&fc->io);
-	INIT_LIST_HEAD(&fc->interrupts);
 	INIT_LIST_HEAD(&fc->bg_queue);
 	INIT_LIST_HEAD(&fc->entry);
 	INIT_LIST_HEAD(&fc->conn_files);
-	fc->forget_list_tail = &fc->forget_list_head;
 	atomic_set(&fc->num_waiting, 0);
 	fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
 	fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;