[RHEL7,COMMIT] fuse kio: logging improvements

Submitted by Konstantin Khorenko on July 19, 2018, 10:19 a.m.

Details

Message ID 201807191019.w6JAJRwo015320@finist_ce7.work
State New
Series "fuse kio: logging improvements"
Headers show

Commit Message

Konstantin Khorenko July 19, 2018, 10:19 a.m.
The commit is pushed to "branch-rh7-3.10.0-862.6.3.vz7.62.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-862.6.3.vz7.62.4
------>
commit 470dad75dfed84b1eed26253fcbc7635b969012f
Author: Alexey Kuznetsov <kuznet@virtuozzo.com>
Date:   Thu Jul 19 13:19:27 2018 +0300

    fuse kio: logging improvements
    
    Recent problems showed we have severe flaws of logging which
    make work on problems in kio/pcs very difficult if possible
    at all.
    
    This patch relays all exceptional trace messages to user space,
    where they saved to vstorage-mount log. Broken pcs_log() macro
    is dropped.
    
    level <= LOG_TRACE will not affects performance because these messages
    are rare.
    
    Current TRACE/DTRACE macros are preserved as is, they still do internal
    kernel tracing via trace_printk.
    
    Signed-off-by: Alexey Kuznetsov <kuznet@virtuozzo.com>
---
 fs/fuse/kio/pcs/fuse_io.c          |   2 +
 fs/fuse/kio/pcs/fuse_ktrace.h      |   9 ++++
 fs/fuse/kio/pcs/log.h              |  11 ++--
 fs/fuse/kio/pcs/pcs_client_types.h |   7 +--
 fs/fuse/kio/pcs/pcs_cluster.c      |   5 +-
 fs/fuse/kio/pcs/pcs_cs.c           |  22 ++++----
 fs/fuse/kio/pcs/pcs_fuse_kdirect.c |  34 ++++++++++--
 fs/fuse/kio/pcs/pcs_map.c          | 106 ++++++++++++++++---------------------
 fs/fuse/kio/pcs/pcs_req.c          |   9 ++--
 fs/fuse/kio/pcs/pcs_req.h          |  10 ----
 fs/fuse/kio/pcs/pcs_rpc.c          |  36 ++++++-------
 11 files changed, 124 insertions(+), 127 deletions(-)

Patch hide | download patch | download mbox

diff --git a/fs/fuse/kio/pcs/fuse_io.c b/fs/fuse/kio/pcs/fuse_io.c
index 65c4a1881d89..61a2e7f1eac6 100644
--- a/fs/fuse/kio/pcs/fuse_io.c
+++ b/fs/fuse/kio/pcs/fuse_io.c
@@ -14,6 +14,7 @@ 
 #include "pcs_cluster.h"
 #include "log.h"
 #include "fuse_prometheus.h"
+#include "fuse_ktrace.h"
 
 #include "../../fuse_i.h"
 
@@ -159,6 +160,7 @@  static void prepare_io_(struct pcs_fuse_req *r, unsigned short type, off_t offse
 	struct pcs_int_request *ireq = &r->exec.ireq;
 
 	TRACE("INIT r(%p) ireq:%p {%ld, %ld}\n", r, ireq, offset, size);
+	FUSE_KDTRACE(ireq->cc->fc, "req %d {%p} " DENTRY_FMT " %ld+%ld", type, ireq, DENTRY_ARGS(ireq->dentry), offset, size);
 
 	/* Initialize IO request */
 	switch (type)
diff --git a/fs/fuse/kio/pcs/fuse_ktrace.h b/fs/fuse/kio/pcs/fuse_ktrace.h
index 7f8a50a9cc1f..1950b872ef4a 100644
--- a/fs/fuse/kio/pcs/fuse_ktrace.h
+++ b/fs/fuse/kio/pcs/fuse_ktrace.h
@@ -4,6 +4,8 @@ 
 #include "fuse_ktrace_prot.h"
 #include <linux/relay.h>
 
+#define KTRACE_LOG_BUF_SIZE	256
+
 struct fuse_ktrace
 {
 	atomic_t				refcnt;
@@ -12,6 +14,7 @@  struct fuse_ktrace
 	unsigned long __percpu			*ovfl;
 	struct dentry				*prometheus_dentry;
 	struct kfuse_histogram * __percpu	*prometheus_hist;
+	u8 * __percpu				buf;
 };
 
 static inline void * fuse_trace_prepare(struct fuse_ktrace * tr, int type, int len)
@@ -42,4 +45,10 @@  static inline void * fuse_trace_prepare(struct fuse_ktrace * tr, int type, int l
 #define FUSE_TRACE_PREPARE(tr, type, len) fuse_trace_prepare((tr), (type), (len))
 #define FUSE_TRACE_COMMIT(tr)       preempt_enable()
 
+void __kfuse_trace(struct fuse_conn * fc, unsigned long ip, const char * fmt, ...);
+
+#define FUSE_KTRACE(fc, fmt, args...) do { struct fuse_conn * __fc = (fc); if (__fc->ktrace_level >= LOG_TRACE) __kfuse_trace(__fc, _THIS_IP_, "%s: " fmt, __FUNCTION__, ## args); } while (0)
+#define FUSE_KDTRACE(fc, fmt, args...) do { struct fuse_conn * __fc = (fc); if (__fc->ktrace_level >= LOG_DTRACE) __kfuse_trace(__fc, _THIS_IP_, "%s: " fmt, __FUNCTION__, ## args); } while (0)
+#define FUSE_KLOG(fc, level, fmt, args...) do { struct fuse_conn * __fc = (fc); if (__fc->ktrace_level >= (level)) __kfuse_trace(__fc, 0, "%s: " fmt, __FUNCTION__, ## args); } while (0)
+
 #endif /* _FUSE_KTRACE_H_ */
diff --git a/fs/fuse/kio/pcs/log.h b/fs/fuse/kio/pcs/log.h
index ee524a8b7a34..5127470d6f7e 100644
--- a/fs/fuse/kio/pcs/log.h
+++ b/fs/fuse/kio/pcs/log.h
@@ -19,6 +19,7 @@ 
 #define LOG_DEBUG5	9
 #define LOG_LEVEL_MAX	LOG_DEBUG5
 
+#define LOG_DTRACE LOG_DEBUG4
 
 #define __PCS_DEBUG__ 1
 #define __PCS_DTRACE__ 1
@@ -28,18 +29,12 @@ 
 #define TRACE(fmt, ...) do {} while (0)
 #define DTRACE(fmt, ...) do {} while (0)
 #else
-static int pcs_loglevel __attribute__ ((unused)) = LOG_DEBUG;
-#define pcs_log(level, fmt, args...) do					\
-	{								\
-		if (level <= pcs_loglevel)				\
-			pr_debug(fmt , ##args);				\
-	} while (0)
 #define TRACE(fmt, args...)	trace_printk("%d: " fmt "\n", __LINE__, ## args)
 
 #ifndef __PCS_DTRACE__
 #define DTRACE(fmt, ...) do {} while (0)
 #else
 #define DTRACE(fmt, args...)	trace_printk("%d: " fmt "\n", __LINE__, ## args)
-#endif
-#endif
+#endif /* __PCS_DTRACE__ */
+#endif /* __PCS_DEBUG__ */
 #endif /* __PCSLOG_H__ */
diff --git a/fs/fuse/kio/pcs/pcs_client_types.h b/fs/fuse/kio/pcs/pcs_client_types.h
index 089817b4af9c..9ddce5cff3f5 100644
--- a/fs/fuse/kio/pcs/pcs_client_types.h
+++ b/fs/fuse/kio/pcs/pcs_client_types.h
@@ -164,11 +164,8 @@  typedef struct _pcs_api_csconnreq_t {
 
 #define PCS_FILE_ID_FMT       "[%08llx]"
 #define PCS_FILE_ID_ARGS(id)  (unsigned long long)(id)
-#define DENTRY_NAME_FMT       "%*.*s"
-#define DENTRY_FMT            PCS_FILE_ID_FMT "/" DENTRY_NAME_FMT
-#define DENTRY_NAME_ARGS(n)   (n).len, (n).len, (n).data
-#define DENTRY_ID_ARGS(id)    PCS_FILE_ID_ARGS((id).parent), DENTRY_NAME_ARGS((id).name)
-#define DENTRY_ARGS(de)	      DENTRY_ID_ARGS(((struct pcs_dentry_info *)(de))->id)
+#define DENTRY_FMT            PCS_FILE_ID_FMT "/" PCS_FILE_ID_FMT
+#define DENTRY_ARGS(de)	      PCS_FILE_ID_ARGS((de)->id.parent), PCS_FILE_ID_ARGS((de)->fileinfo.attr.id)
 
 #define DENTRY_SIZE(de)       ((de)->fileinfo.attr.size)
 #define DENTRY_CHUNK_SIZE(de) ((de)->fileinfo.sys.chunk_size)
diff --git a/fs/fuse/kio/pcs/pcs_cluster.c b/fs/fuse/kio/pcs/pcs_cluster.c
index b3d481287362..e243b11d0695 100644
--- a/fs/fuse/kio/pcs/pcs_cluster.c
+++ b/fs/fuse/kio/pcs/pcs_cluster.c
@@ -38,10 +38,6 @@  void pcs_sreq_complete(struct pcs_int_request *sreq)
 			 * and, most likely, resubmit request.
 			 */
 			if (ireq_check_redo(sreq)) {
-				if (ireq_is_timed_out(sreq)) {
-					DTRACE("timeout while IO request on \"" DENTRY_FMT "\" last_err=%u",
-						DENTRY_ARGS(sreq->dentry), sreq->error.value);
-				}
 				if (sreq->type != PCS_IREQ_CUSTOM) {
 					map_notify_soft_error(sreq);
 
@@ -567,6 +563,7 @@  int pcs_cluster_init(struct pcs_fuse_cluster *pfc, struct workqueue_struct *wq,
 	/* core init */
 	if (pcs_cc_init(&pfc->cc, wq, NULL, &attr))
 		return -1;
+	pfc->cc.fc = fc;
 	pfc->cc.op.ireq_process	   = ireq_process_;
 	pfc->cc.op.ireq_on_error   = ireq_on_error_;
 	pfc->cc.op.ireq_check_redo = ireq_check_redo_;
diff --git a/fs/fuse/kio/pcs/pcs_cs.c b/fs/fuse/kio/pcs/pcs_cs.c
index bea8e5c16f5c..56ab3adb36db 100644
--- a/fs/fuse/kio/pcs/pcs_cs.c
+++ b/fs/fuse/kio/pcs/pcs_cs.c
@@ -194,7 +194,7 @@  struct pcs_cs *pcs_cs_find_create(struct pcs_cs_set *csset, PCS_NODE_ID_T *id, P
 				cs->addr = *addr;
 				cs->addr_serno++;
 
-				TRACE("Port change CS" NODE_FMT " seq=%d", NODE_ARGS(*id), cs->addr_serno);
+				FUSE_KTRACE(cc_from_csset(csset)->fc, "Port change CS" NODE_FMT " seq=%d", NODE_ARGS(*id), cs->addr_serno);
 				pcs_rpc_set_address(cs->rpc, addr);
 
 				if (!(flags & CS_FL_INACTIVE)) {
@@ -337,7 +337,7 @@  static void cs_response_done(struct pcs_msg *msg)
 
 		pcs_map_verify_sync_state(ireq->dentry, ireq, msg);
 	} else {
-		TRACE(XID_FMT " IO error %d %lu, ireq:%p : %llu:%u+%u\n",
+		FUSE_KTRACE(ireq->cc->fc, XID_FMT " IO error %d %lu, ireq:%p : %llu:%u+%u",
 		      XID_ARGS(ireq->iochunk.hbuf.hdr.xid), msg->error.value,
 		      msg->error.remote ? (unsigned long)msg->error.offender.val : 0UL,
 		      ireq, (unsigned long long)ireq->iochunk.chunk,
@@ -580,7 +580,7 @@  static void handle_congestion(struct pcs_cs *cs, struct pcs_rpc_hdr *h)
 {
 	struct pcs_cs *who;
 
-	TRACE("Received congestion notification from CS" NODE_FMT, NODE_ARGS(h->xid.origin));
+	FUSE_KTRACE(cc_from_csset(cs->css)->fc, "Received congestion notification from CS" NODE_FMT, NODE_ARGS(h->xid.origin));
 
 	if (cs->id.val == h->xid.origin.val) {
 		who = cs;
@@ -649,8 +649,8 @@  static void cs_keep_waiting(struct pcs_rpc *ep, struct pcs_msg *req, struct pcs_
 		}
 
 		if (!who->cwr_state) {
-			DTRACE("Congestion window on CS" NODE_FMT " reducing %d/%d/%d", NODE_ARGS(h->xid.origin),
-			       who->in_flight, who->eff_cwnd, who->cwnd);
+			FUSE_KDTRACE(cc_from_csset(cs->css)->fc, "Congestion window on CS" NODE_FMT " reducing %d/%d/%d", NODE_ARGS(h->xid.origin),
+				     who->in_flight, who->eff_cwnd, who->cwnd);
 			if (who->in_flight < who->cwnd)
 				who->cwnd = who->in_flight;
 			who->cwnd /= 2;
@@ -671,9 +671,9 @@  static void cs_keep_waiting(struct pcs_rpc *ep, struct pcs_msg *req, struct pcs_
 			 */
 			if (ireq->iochunk.banned_cs.val == 0 && lat >= PCS_MAX_READ_IO_LATENCY*1000
 			    && may_reroute(ireq->iochunk.csl, h->xid.origin)) {
-				TRACE("Canceling read on CS" NODE_FMT, NODE_ARGS(h->xid.origin));
 				ireq->iochunk.banned_cs = h->xid.origin;
 				spin_unlock(&who->lock);
+				FUSE_KTRACE(ireq->cc->fc, "Canceling read on CS" NODE_FMT, NODE_ARGS(h->xid.origin));
 				pcs_rpc_cancel_request(req);
 				return;
 			}
@@ -694,7 +694,7 @@  static int cs_input(struct pcs_rpc *ep, struct pcs_msg *msg)
 		msg->done(msg);
 		return 0;
 	default:
-		pcs_log(0, "Unsupported message type %u\n", h->type);
+		FUSE_KLOG(cc_from_rpc(ep->eng)->fc, LOG_ERR, "Unsupported message type %u\n", h->type);
 		return PCS_ERR_PROTOCOL;
 	}
 }
@@ -913,7 +913,7 @@  void cs_increment_in_flight(struct pcs_cs *cs, unsigned int to_add)
 	if (cs->in_flight > cs->in_flight_hwm) {
 		cs->in_flight_hwm = cs->in_flight;
 		cs->in_flight_hwm_stamp = jiffies;
-		DTRACE("HWM on CS" NODE_FMT " is %u\n", NODE_ARGS(cs->id), cs->in_flight);
+		FUSE_KDTRACE(cc_from_csset(cs->css)->fc, "HWM on CS" NODE_FMT " is %u", NODE_ARGS(cs->id), cs->in_flight);
 	}
 	spin_unlock(&cs->lock);
 }
@@ -950,8 +950,8 @@  void cs_cwnd_use_or_lose(struct pcs_cs *cs)
 			if (cwnd < PCS_CS_INIT_CWND)
 				cwnd = PCS_CS_INIT_CWND;
 
-			TRACE("Congestion window on CS#" NODE_FMT " was not used, shrink %u -> %u", NODE_ARGS(cs->id),
-			      cs->cwnd, cwnd);
+			FUSE_KTRACE(cc_from_csset(cs->css)->fc, "Congestion window on CS#" NODE_FMT " was not used, shrink %u -> %u", NODE_ARGS(cs->id),
+				    cs->cwnd, cwnd);
 			cs->cwnd = cwnd;
 			if (cs->eff_cwnd > cwnd)
 				cs->eff_cwnd = cwnd;
@@ -972,7 +972,7 @@  static void cs_probe_done(struct pcs_msg *msg)
 		if (!pcs_if_error(&msg->error)) {
 			cs_whitelist(cs, "probe");
 		} else {
-			TRACE("probe error %d", msg->error.value);
+			FUSE_KTRACE(cc_from_csset(css)->fc, "probe error %d", msg->error.value);
 			cs_blacklist(cs, msg->error.value, "probe");
 		}
 		cs->use_count--;
diff --git a/fs/fuse/kio/pcs/pcs_fuse_kdirect.c b/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
index 118b75bf920c..9d81792c5807 100644
--- a/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
+++ b/fs/fuse/kio/pcs/pcs_fuse_kdirect.c
@@ -849,9 +849,7 @@  static void pcs_fuse_submit(struct pcs_fuse_cluster *pfc, struct fuse_req *req,
 	switch (r->req.in.h.opcode) {
 	case FUSE_WRITE:
 	case FUSE_READ: {
-		int ret;
-
-		ret = pcs_fuse_prep_rw(r);
+		int ret;		ret = pcs_fuse_prep_rw(r);
 		if (!ret)
 			goto submit;
 		if (ret > 0)
@@ -1097,6 +1095,7 @@  static void fuse_trace_free(struct fuse_ktrace *tr)
 		}
 		free_percpu(tr->prometheus_hist);
 	}
+	free_percpu(tr->buf);
 	debugfs_remove(tr->dir);
 	kfree(tr);
 }
@@ -1303,6 +1302,8 @@  static int fuse_ktrace_setup(struct fuse_conn * fc)
 		tr->prometheus_hist = hist;
 	}
 
+	tr->buf = __alloc_percpu(KTRACE_LOG_BUF_SIZE, 16);
+
 	atomic_set(&tr->refcnt, 1);
 
 	ret = -EBUSY;
@@ -1320,6 +1321,33 @@  static int fuse_ktrace_setup(struct fuse_conn * fc)
 	return ret;
 }
 
+void __kfuse_trace(struct fuse_conn * fc, unsigned long ip, const char * fmt, ...)
+{
+	struct fuse_ktrace * tr;
+        va_list va;
+	int cpu;
+
+	cpu = get_cpu();
+	tr = fc->ktrace;
+	if (tr) {
+		u8 * buf = per_cpu_ptr(tr->buf, cpu);
+		struct fuse_trace_hdr * t;
+		int len;
+
+		va_start(va, fmt);
+		len = vsnprintf(buf, KTRACE_LOG_BUF_SIZE, fmt, va);
+		va_end(va);
+		t = fuse_trace_prepare(tr, FUSE_KTRACE_STRING, len + 1);
+		if (t)
+			memcpy(t + 1, buf, len + 1);
+		FUSE_TRACE_COMMIT(tr);
+		if (ip)
+			__trace_puts(ip, buf, len);
+		else
+			pr_debug("%s", buf);
+	}
+	put_cpu();
+}
 
 static struct fuse_kio_ops kio_pcs_ops = {
 	.name		= "pcs",
diff --git a/fs/fuse/kio/pcs/pcs_map.c b/fs/fuse/kio/pcs/pcs_map.c
index fa4edd37d0e1..c07adb3b26d0 100644
--- a/fs/fuse/kio/pcs/pcs_map.c
+++ b/fs/fuse/kio/pcs/pcs_map.c
@@ -14,7 +14,9 @@ 
 #include "pcs_map.h"
 #include "pcs_cs.h"
 #include "pcs_ioctl.h"
+#include "pcs_cluster.h"
 #include "log.h"
+#include "fuse_ktrace.h"
 
 /*  Lock order
    ->map->lock	: Motivated by truncate
@@ -456,13 +458,7 @@  static void map_queue_on_limit(struct pcs_int_request *ireq)
 {
 	struct pcs_map_set * maps = &ireq->dentry->cluster->maps;
 
-	TRACE("queueing due to dirty limit\n");
-
-	if (ireq_is_timed_out(ireq)) {
-		pcs_log(LOG_ERR, "timeout while map get on \"" DENTRY_FMT "\" last_err=%u",
-			DENTRY_ARGS(ireq->dentry), ireq->error.value);
-		BUG();
-	}
+	FUSE_KTRACE(ireq->cc->fc, "queueing due to dirty limit");
 
 	if (ireq->type == PCS_IREQ_IOCHUNK && ireq->iochunk.map) {
 		pcs_map_put(ireq->iochunk.map);
@@ -698,7 +694,7 @@  void cs_blacklist(struct pcs_cs * cs, int error, char * reason)
 		spin_lock(&cs->css->lock);
 		set_bit(CS_SF_BLACKLISTED, &cs->state);
 		cs->blacklist_reason = error;
-		TRACE("Blacklisting CS" NODE_FMT " by %s, err=%d", NODE_ARGS(cs->id), reason, error);
+		FUSE_KTRACE(cc_from_csset(cs->css)->fc, "Blacklisting CS" NODE_FMT " by %s, err=%d", NODE_ARGS(cs->id), reason, error);
 		if (list_empty(&cs->css->bl_list)) {
 			struct pcs_cluster_core *cc = cc_from_csset(cs->css);
 
@@ -723,7 +719,7 @@  void cs_whitelist(struct pcs_cs * cs, char * reason)
 
 	if (cs_is_blacklisted(cs)) {
 		clear_bit(CS_SF_BLACKLISTED, &cs->state);
-		TRACE("Whitelisting CS" NODE_FMT " by %s", NODE_ARGS(cs->id), reason);
+		FUSE_KTRACE(cc_from_csset(cs->css)->fc, "Whitelisting CS" NODE_FMT " by %s", NODE_ARGS(cs->id), reason);
 
 		map_recalc_maps(cs);
 
@@ -793,7 +789,7 @@  void pcs_map_notify_addr_change(struct pcs_cs * cs)
 		if (m->state & (PCS_MAP_ERROR|PCS_MAP_RESOLVING|PCS_MAP_NEW))
 			goto unlock;
 
-		TRACE(MAP_FMT " invalidating due to address change of CS#"NODE_FMT,
+		FUSE_KTRACE(cc_from_csset(cs->css)->fc, MAP_FMT " invalidating due to address change of CS#"NODE_FMT,
 		      MAP_ARGS(m), NODE_ARGS(cs->id));
 
 		map_remote_error_nolock(m, PCS_ERR_CSD_STALE_MAP, cs->id.val);
@@ -869,7 +865,7 @@  static void evaluate_dirty_status(struct pcs_map_entry * m)
 					atomic_inc(&m->maps->dirty_count);
 				}
 			} else {
-				TRACE(MAP_FMT " integrity seq advanced on CS#"NODE_FMT,
+				FUSE_KTRACE(cc_from_maps(m->maps)->fc, MAP_FMT " integrity seq advanced on CS#"NODE_FMT,
 				      MAP_ARGS(m), NODE_ARGS(rec->info.id));
 
 				rec->sync.dirty_integrity = 0;
@@ -882,9 +878,9 @@  static void evaluate_dirty_status(struct pcs_map_entry * m)
 
 	if (!(m->flags & PCS_MAP_DIRTY)) {
 		map_sync_work_del(m);
-		pcs_log(LOG_DEBUG5, "map %p is clean", m);
+		FUSE_KLOG(cc_from_maps(m->maps)->fc, LOG_DEBUG5, "map %p is clean", m);
 	} else {
-		pcs_log(LOG_DEBUG5, "map %p is dirty", m);
+		FUSE_KLOG(cc_from_maps(m->maps)->fc, LOG_DEBUG5, "map %p is dirty", m);
 		if (!timer_pending(&m->sync_work.timer) && !(m->flags & PCS_MAP_FLUSHING))
 			map_sync_work_add(m, pcs_sync_timeout(cc_from_map(m)));
 	}
@@ -1147,19 +1143,20 @@  void pcs_map_complete(struct pcs_map_entry *m, struct pcs_ioc_getmap *omap)
 
 	evaluate_dirty_status(m);
 #ifdef __PCS_DEBUG
-	if (1) {
+	/* The output is too ugly and it is unnecessary. The information is in user space log */
+	if (0) {
 		int i;
 		TRACE(MAP_FMT " -> " CUID_FMT " psize=%u %d node map { ",
 			MAP_ARGS(m), CUID_ARGS(m->id),
 		      m->chunk_psize, m->cs_list ? m->cs_list->nsrv : 0);
 		if (m->cs_list) {
 			for (i = 0; i < m->cs_list->nsrv; i++)
-				printk( NODE_FMT ":%x:%u ",
+				trace_printk( NODE_FMT ":%x:%u ",
 				       NODE_ARGS(m->cs_list->cs[i].info.id),
 				       m->cs_list->cs[i].info.flags,
 				       CS_FL_ROLE_GET(m->cs_list->cs[i].info.flags));
 		}
-		printk("}\n");
+		trace_puts("}\n");
 	}
 #endif
 	m->error_tstamp = 0;
@@ -1264,7 +1261,7 @@  static void map_notify_error(struct pcs_map_entry * m, struct pcs_int_request *
 			for (i = 0; i < csl->nsrv; i++) {
 				if (csl->cs[i].info.id.val == sreq->error.offender.val) {
 					if (csl->cs[i].cslink.cs->addr_serno != csl->cs[i].cslink.addr_serno) {
-						TRACE("error for CS"NODE_FMT " has been suppressed", NODE_ARGS(sreq->error.offender));
+						FUSE_KTRACE(cc_from_maps(m->maps)->fc, "error for CS"NODE_FMT " has been suppressed", NODE_ARGS(sreq->error.offender));
 						suppress_error = 1;
 					}
 					break;
@@ -1310,7 +1307,7 @@  static void map_replicating(struct pcs_int_request *ireq)
 
 	read_idx = READ_ONCE(csl->read_index);
 
-	TRACE("reading unfinished replica %lx %d", csl->blacklist, read_idx);
+	FUSE_KTRACE(ireq->cc->fc, "reading unfinished replica %lx %d", csl->blacklist, read_idx);
 
 	if (ireq->iochunk.cs_index != read_idx)
 		return;
@@ -1319,7 +1316,7 @@  static void map_replicating(struct pcs_int_request *ireq)
 
 	if (!ireq->error.remote ||
 	    csl->cs[read_idx].cslink.cs->id.val != ireq->error.offender.val) {
-		TRACE("wrong cs id " NODE_FMT " " NODE_FMT, NODE_ARGS(csl->cs[read_idx].cslink.cs->id), NODE_ARGS(ireq->error.offender));
+		FUSE_KTRACE(ireq->cc->fc, "wrong cs id " NODE_FMT " " NODE_FMT, NODE_ARGS(csl->cs[read_idx].cslink.cs->id), NODE_ARGS(ireq->error.offender));
 		return;
 	}
 
@@ -1360,7 +1357,7 @@  static void map_read_error(struct pcs_int_request *ireq)
 
 	/* If this CS is already blacklisted, select another CS, we have spare ones */
 	if (cs_is_blacklisted(cs)) {
-		TRACE("Skipping CS" NODE_FMT, NODE_ARGS(cs->id));
+		FUSE_KTRACE(ireq->cc->fc, "Skipping CS" NODE_FMT, NODE_ARGS(cs->id));
 		WRITE_ONCE(csl->read_index, -1);
 		pcs_clear_error(&ireq->error);
 		return;
@@ -1418,7 +1415,7 @@  static void pcs_cs_deaccount(struct pcs_int_request *ireq, struct pcs_cs * cs, i
 			else if (cs->last_latency > iolat_cutoff*2)
 				clamp = PCS_CS_INIT_CWND/2;
 
-			TRACE("IO latency on CS" NODE_FMT " is %u, cwnd %u, clamp %u", NODE_ARGS(cs->id), cs->last_latency, cs->cwnd, clamp);
+			FUSE_KTRACE(cc_from_csset(cs->css)->fc, "IO latency on CS" NODE_FMT " is %u, cwnd %u, clamp %u", NODE_ARGS(cs->id), cs->last_latency, cs->cwnd, clamp);
 
 			if (cs->cwnd > clamp)
 				cs->cwnd = clamp;
@@ -1434,7 +1431,7 @@  static void pcs_cs_deaccount(struct pcs_int_request *ireq, struct pcs_cs * cs, i
 				cwnd = PCS_CS_MAX_CWND;
 			if (cwnd != cs->cwnd) {
 				cs->cwnd = cwnd;
-				DTRACE("Congestion window on CS" NODE_FMT " UP %u", NODE_ARGS(cs->id), cwnd);
+				FUSE_KDTRACE(cc_from_csset(cs->css)->fc, "Congestion window on CS" NODE_FMT " UP %u", NODE_ARGS(cs->id), cwnd);
 			}
 		}
 		cs->eff_cwnd = cs->cwnd;
@@ -1444,7 +1441,7 @@  static void pcs_cs_deaccount(struct pcs_int_request *ireq, struct pcs_cs * cs, i
 		 * window to minimum to allow one request in flight. It will come back to normal
 		 * as soon as CS is probed for aliveness.
 		 */
-		TRACE("Congestion window on CS" NODE_FMT " is closed (%u)", NODE_ARGS(cs->id), cs->cwnd);
+		FUSE_KTRACE(cc_from_csset(cs->css)->fc, "Congestion window on CS" NODE_FMT " is closed (%u)", NODE_ARGS(cs->id), cs->cwnd);
 		cs->eff_cwnd = 1;
 	}
 	cs_decrement_in_flight(cs, cost);
@@ -1744,7 +1741,7 @@  static int select_cs_for_read(struct pcs_cluster_core *cc, struct pcs_cs_list *
 		if (test_bit(i, &csl->blacklist)) {
 			if (jiffies < READ_ONCE(csl->blacklist_expires))
 				continue;
-			TRACE("expire replication blacklist");
+			FUSE_KTRACE(cc_from_csset(cs->css)->fc, "expire replication blacklist");
 			clear_bit(i, &csl->blacklist);
 		}
 
@@ -1934,7 +1931,7 @@  static int pcs_cslist_submit_read(struct pcs_int_request *ireq, struct pcs_cs_li
 			cs = csl->cs[0].cslink.cs;
 			map_remote_error(ireq->iochunk.map, cs->blacklist_reason, cs->id.val);
 
-			TRACE("Read from " MAP_FMT " blocked by blacklist error %d, CS" NODE_FMT,
+			FUSE_KTRACE(ireq->cc->fc, "Read from " MAP_FMT " blocked by blacklist error %d, CS" NODE_FMT,
 			      MAP_ARGS(ireq->iochunk.map), cs->blacklist_reason, NODE_ARGS(cs->id));
 			return -1;
 		}
@@ -1942,7 +1939,7 @@  static int pcs_cslist_submit_read(struct pcs_int_request *ireq, struct pcs_cs_li
 		WRITE_ONCE(csl->read_index, i);
 		WRITE_ONCE(csl->select_stamp, jiffies);
 
-		TRACE("Selected read map " MAP_FMT " to CS" NODE_FMT "; is_seq=%d\n", MAP_ARGS(ireq->iochunk.map),
+		FUSE_KTRACE(ireq->cc->fc, "Selected read map " MAP_FMT " to CS" NODE_FMT "; is_seq=%d\n", MAP_ARGS(ireq->iochunk.map),
 		      NODE_ARGS(csl->cs[i].cslink.cs->id), is_seq);
 		pcs_flow_bind_cs(ireq->iochunk.flow, csl->cs[i].cslink.cs);
 	}
@@ -2098,7 +2095,7 @@  static int pcs_cslist_submit_write(struct pcs_int_request *ireq, struct pcs_cs_l
 		cs = csl->cs[i].cslink.cs;
 		if (cs_is_blacklisted(cs)) {
 			map_remote_error(ireq->iochunk.map, cs->blacklist_reason, cs->id.val);
-			TRACE("Write to " MAP_FMT " blocked by blacklist error %d, CS" NODE_FMT,
+			FUSE_KTRACE(cc_from_csset(cs->css)->fc, "Write to " MAP_FMT " blocked by blacklist error %d, CS" NODE_FMT,
 			      MAP_ARGS(ireq->iochunk.map), cs->blacklist_reason, NODE_ARGS(cs->id));
 			spin_lock(&ireq->completion_data.child_lock);
 			ireq_drop_tokens(ireq);
@@ -2205,7 +2202,7 @@  static int pcs_cslist_submit_flush(struct pcs_int_request *ireq, struct pcs_cs_l
 
 		if (cs_is_blacklisted(cs)) {
 			map_remote_error(ireq->flushreq.map, cs->blacklist_reason, cs->id.val);
-			TRACE("Flush to " MAP_FMT " blocked by blacklist error %d, CS" NODE_FMT,
+			FUSE_KTRACE(cc_from_csset(cs->css)->fc, "Flush to " MAP_FMT " blocked by blacklist error %d, CS" NODE_FMT,
 			      MAP_ARGS(ireq->flushreq.map), cs->blacklist_reason, NODE_ARGS(cs->id));
 			spin_lock(&ireq->completion_data.child_lock);
 			ireq_drop_tokens(ireq);
@@ -2294,16 +2291,8 @@  void map_submit(struct pcs_map_entry * m, struct pcs_int_request *ireq)
 
 	DTRACE("enter m: " MAP_FMT ", ireq:%p \n", MAP_ARGS(m),	 ireq);
 	BUG_ON(ireq->type != PCS_IREQ_IOCHUNK && ireq->type != PCS_IREQ_FLUSH);
-
-	if (ireq_is_timed_out(ireq)) {
-		pcs_log(LOG_ERR, "timeout while getting map \"" MAP_FMT "\", last err=%d",
-			MAP_ARGS(m), ireq->error.value);
-		BUG();
-	}
-
 	BUG_ON(pcs_if_error(&ireq->error));
 
-
 	direction = (ireq->type != PCS_IREQ_FLUSH ? pcs_req_direction(ireq->iochunk.cmd) : 1);
 
 	do {
@@ -2409,7 +2398,7 @@  void process_ireq_truncate(struct pcs_int_request *ireq)
 
 	m = pcs_find_get_map(di, ireq->truncreq.offset - 1);
 
-	TRACE("process TRUNCATE %llu@" DENTRY_FMT " %x",
+	FUSE_KTRACE(ireq->cc->fc, "process TRUNCATE %llu@" DENTRY_FMT " %x",
 	      (unsigned long long)ireq->truncreq.offset, DENTRY_ARGS(di), m ? m->state : -1);
 
 	if (m == NULL) {
@@ -2438,7 +2427,7 @@  void process_ireq_truncate(struct pcs_int_request *ireq)
 		    (PCS_MAP_NEW|PCS_MAP_READABLE)) {
 
 			spin_unlock(&m->lock);
-			pcs_log(LOG_INFO, "map " MAP_FMT " unexpectedly converted to hole", MAP_ARGS(m));
+			FUSE_KLOG(cc_from_maps(m->maps)->fc, LOG_INFO, "map " MAP_FMT " unexpectedly converted to hole", MAP_ARGS(m));
 			map_truncate_tail(&di->mapping, end);
 			ireq_complete(ireq);
 			return;
@@ -2458,7 +2447,7 @@  void process_ireq_truncate(struct pcs_int_request *ireq)
 				return;
 			}
 
-			TRACE("map " MAP_FMT " is not updated yet", MAP_ARGS(m));
+			FUSE_KTRACE(ireq->cc->fc, "map " MAP_FMT " is not updated yet", MAP_ARGS(m));
 			map_remote_error_nolock(m, PCS_ERR_CSD_STALE_MAP, m->cs_list ? m->cs_list->cs[0].info.id.val : 0);
 
 		}
@@ -2500,7 +2489,7 @@  noinline void pcs_mapping_truncate(struct pcs_int_request *ireq, u64 old_size)
 	m = pcs_find_get_map(di, offset - 1);
 
 	if (m) {
-		TRACE("mapping truncate %llu->%llu " DENTRY_FMT " %x", (unsigned long long)old_size,
+		FUSE_KTRACE(ireq->cc->fc, "mapping truncate %llu->%llu " DENTRY_FMT " %x", (unsigned long long)old_size,
 		      (unsigned long long)new_size, DENTRY_ARGS(ireq->dentry), m ? m->state : -1);
 	}
 	if (m && map_chunk_end(m) == offset) {
@@ -2553,7 +2542,7 @@  static int commit_cs_record(struct pcs_map_entry * m, struct pcs_cs_record * rec
 		 *
 		 * The request is restarted with new map.
 		 */
-		TRACE(MAP_FMT " integrity seq mismatch CS" NODE_FMT " %d != %d, %d",
+		FUSE_KTRACE(cc_from_maps(m->maps)->fc, MAP_FMT " integrity seq mismatch CS" NODE_FMT " %d != %d, %d",
 			MAP_ARGS(m),
 			NODE_ARGS(rec->info.id),
 			rec->info.integrity_seq, sync->integrity_seq, srec->dirty_integrity);
@@ -2608,14 +2597,14 @@  static int commit_one_record(struct pcs_map_entry * m, PCS_NODE_ID_T cs_id,
 	if (m->cs_list == NULL)
 		return 0;
 
-	TRACE("sync ["NODE_FMT",%u,%u,%u,%u]", NODE_ARGS(cs_id),
+	FUSE_KDTRACE(cc_from_maps(m->maps)->fc, "sync ["NODE_FMT",%u,%u,%u,%u]", NODE_ARGS(cs_id),
 	      sync->integrity_seq, sync->sync_epoch, sync->sync_dirty, sync->sync_current);
 
 	for (i = 0; i < m->cs_list->nsrv; i++) {
 		if (m->cs_list->cs[i].info.id.val == cs_id.val) {
 			err = commit_cs_record(m, &m->cs_list->cs[i], sync, lat, op_type);
 
-			TRACE("commited ["NODE_FMT",%u/%u,%u/%u,%u/%u]", NODE_ARGS(cs_id),
+			FUSE_KDTRACE(cc_from_maps(m->maps)->fc, "commited ["NODE_FMT",%u/%u,%u/%u,%u/%u]", NODE_ARGS(cs_id),
 			      m->cs_list->cs[i].info.integrity_seq,
 			      m->cs_list->cs[i].sync.dirty_integrity,
 			      m->cs_list->cs[i].sync.dirty_epoch,
@@ -2719,7 +2708,7 @@  void pcs_map_verify_sync_state(struct pcs_dentry_info *di, struct pcs_int_reques
 		return;
 	}
 	if (commit_sync_info(ireq, m, ireq->iochunk.csl, resp)) {
-		TRACE(MAP_FMT " sync integrity error: map retry follows", MAP_ARGS(m));
+		FUSE_KTRACE(cc_from_maps(m->maps)->fc, MAP_FMT " sync integrity error: map retry follows", MAP_ARGS(m));
 
 		msg->error.value = PCS_ERR_CSD_STALE_MAP;
 		msg->error.remote = 1;
@@ -2756,7 +2745,7 @@  void sync_done(struct pcs_msg * msg)
 	}
 
 	if (commit_sync_info(sreq, m, sreq->flushreq.csl, resp)) {
-		TRACE(MAP_FMT " sync integrity error: sync retry follows", MAP_ARGS(m));
+		FUSE_KTRACE(cc_from_maps(m->maps)->fc, MAP_FMT " sync integrity error: sync retry follows", MAP_ARGS(m));
 
 		sreq->error.remote = 1;
 		sreq->error.value = PCS_ERR_CSD_STALE_MAP;
@@ -2782,13 +2771,13 @@  static int sync_is_finished(struct pcs_msg * msg, struct pcs_map_entry * m)
 	     srec++) {
 		int i;
 
-		TRACE("Checking cs="NODE_FMT" sync=[%d,%d,%d,%d]", NODE_ARGS(srec->cs_id), srec->sync.integrity_seq,
+		FUSE_KDTRACE(cc_from_maps(m->maps)->fc, "Checking cs="NODE_FMT" sync=[%d,%d,%d,%d]", NODE_ARGS(srec->cs_id), srec->sync.integrity_seq,
 		      srec->sync.sync_epoch,
 		      srec->sync.sync_dirty, srec->sync.sync_current);
 
 		for (i = 0; i < m->cs_list->nsrv; i++) {
 			if (m->cs_list->cs[i].info.id.val == srec->cs_id.val) {
-				TRACE("Checking against sync=[%d,%d,%d,%d,%d]",
+				FUSE_KDTRACE(cc_from_maps(m->maps)->fc, "Checking against sync=[%d,%d,%d,%d,%d]",
 				      m->cs_list->cs[i].sync.dirty_integrity,
 				      m->cs_list->cs[i].sync.dirty_epoch,
 				      m->cs_list->cs[i].sync.dirty_seq,
@@ -2813,12 +2802,12 @@  void process_flush_req(struct pcs_int_request *ireq)
 	if (m->state & PCS_MAP_DEAD)
 		goto done;
 
-	TRACE("process FLUSH " MAP_FMT, MAP_ARGS(m));
+	FUSE_KTRACE(ireq->cc->fc, "process FLUSH " MAP_FMT, MAP_ARGS(m));
 
 	if (!(m->flags & PCS_MAP_DIRTY))
 		goto done;
 	if (sync_is_finished(ireq->flushreq.msg, m)) {
-		TRACE("finished");
+		FUSE_KTRACE(ireq->cc->fc, "finished");
 		goto done;
 	}
 	spin_unlock(&m->lock);
@@ -2828,7 +2817,7 @@  void process_flush_req(struct pcs_int_request *ireq)
 done:
 	spin_unlock(&m->lock);
 	if (pcs_if_error(&ireq->error)) {
-		TRACE("oops, delete me %d", ireq->error.value);
+		FUSE_KTRACE(ireq->cc->fc, "oops, delete me %d", ireq->error.value);
 		pcs_clear_error(&ireq->error);
 	}
 	ireq_complete(ireq);
@@ -2853,7 +2842,7 @@  static void pcs_flushreq_complete(struct pcs_int_request * sreq)
 
 	if (!pcs_if_error(&sreq->error)) {
 		if (sync_is_finished(sreq->flushreq.msg, m)) {
-			TRACE("finished");
+			FUSE_KTRACE(ireq->cc->fc, "finished");
 			goto done_dirty;
 		}
 		sreq->error.value = PCS_ERR_CSD_STALE_MAP;
@@ -2863,12 +2852,7 @@  static void pcs_flushreq_complete(struct pcs_int_request * sreq)
 
 	if (ireq && !pcs_if_error(&ireq->error)) {
 		if (ireq_check_redo(sreq)) {
-			if (ireq_is_timed_out(sreq)) {
-				pcs_log(LOG_ERR, "timeout while flush request on \"" DENTRY_FMT "\" last_err=%u",
-					DENTRY_ARGS(sreq->dentry), sreq->error.value);
-				BUG();
-			}
-			TRACE("restart after flush error %d", sreq->error.value);
+			FUSE_KTRACE(ireq->cc->fc, "restart after flush error %d", sreq->error.value);
 			if (map_version_compare(&ioh->map_version, &m->version) < 0)
 				sreq->flags &= ~IREQ_F_ONCE;
 			spin_unlock(&m->lock);
@@ -2884,7 +2868,7 @@  static void pcs_flushreq_complete(struct pcs_int_request * sreq)
 				ireq_delay(sreq);
 			return;
 		}
-		TRACE("flush error %d", sreq->error.value);
+		FUSE_KTRACE(ireq->cc->fc, "flush error %d", sreq->error.value);
 		pcs_copy_error(&ireq->error, &sreq->error);
 		notify_error = 1;
 	}
@@ -2951,7 +2935,7 @@  static void prepare_map_flush_msg(struct pcs_map_entry * m, struct pcs_int_reque
 				arr->sync.ts_net = 0;
 				arr->sync._reserved = 0;
 				ioh->hdr.len += sizeof(struct pcs_cs_sync_resp);
-				pcs_log(LOG_DEBUG5, "fill sync "NODE_FMT" [%d,%d,%d,%d]", NODE_ARGS(arr->cs_id),
+				FUSE_KLOG(cc_from_maps(m->maps)->fc, LOG_DEBUG5, "fill sync "NODE_FMT" [%d,%d,%d,%d]", NODE_ARGS(arr->cs_id),
 					arr->sync.integrity_seq, arr->sync.sync_epoch,
 					arr->sync.sync_dirty, arr->sync.sync_current);
 				arr++;
@@ -3028,7 +3012,7 @@  static int prepare_map_flush_ireq(struct pcs_map_entry *m, struct pcs_int_reques
 	sreq->flushreq.csl = NULL;
 	sreq->complete_cb = pcs_flushreq_complete;
 	sreq->flushreq.msg = msg;
-	TRACE("timed FLUSH " MAP_FMT, MAP_ARGS(m));
+	FUSE_KTRACE(sreq->cc->fc, "timed FLUSH " MAP_FMT, MAP_ARGS(m));
 	m->flags |= PCS_MAP_FLUSHING;
 	__pcs_map_get(m);
 	spin_unlock(&m->lock);
@@ -3084,7 +3068,7 @@  void map_inject_flush_req(struct pcs_int_request *ireq)
 
 	if (di->fileinfo.sys.map_type != PCS_MAP_PLAIN ||
 	    di->fileinfo.sys.stripe_depth != 1) {
-		pcs_log(LOG_ERR, "bad map_type");
+		FUSE_KLOG(ireq->cc->fc, LOG_ERR, "bad map_type");
 		pcs_set_local_error(&ireq->error, PCS_ERR_PROTOCOL);
 		ireq_complete(ireq);
 		return;
diff --git a/fs/fuse/kio/pcs/pcs_req.c b/fs/fuse/kio/pcs/pcs_req.c
index 9516768c47d4..f60ad025af59 100644
--- a/fs/fuse/kio/pcs/pcs_req.c
+++ b/fs/fuse/kio/pcs/pcs_req.c
@@ -9,7 +9,9 @@ 
 #include "pcs_sock_io.h"
 #include "pcs_rpc.h"
 #include "pcs_req.h"
+#include "pcs_cluster.h"
 #include "log.h"
+#include "fuse_ktrace.h"
 
 static void ireq_timer_handler(unsigned long arg)
 {
@@ -135,14 +137,9 @@  noinline void pcs_ireq_queue_fail(struct list_head *queue, int error)
 			ireq_on_error(ireq);
 
 			if (!(ireq->flags & IREQ_F_FATAL)) {
-				if (ireq_is_timed_out(ireq)) {
-					pcs_log(LOG_ERR, "timeout while truncate(%d) request on \"" DENTRY_FMT "\" last err=%u",
-						ireq->type, DENTRY_ARGS(ireq->dentry), ireq->error.value);
-					BUG();
-				}
 				pcs_clear_error(&ireq->error);
 
-				TRACE("requeue truncate(%d) %llu@" DENTRY_FMT "\n", ireq->type,
+				FUSE_KTRACE(ireq->cc->fc, "requeue truncate(%d) %llu@" DENTRY_FMT "\n", ireq->type,
 				      (unsigned long long)ireq->truncreq.offset, DENTRY_ARGS(ireq->dentry));
 
 				ireq_delay(ireq);
diff --git a/fs/fuse/kio/pcs/pcs_req.h b/fs/fuse/kio/pcs/pcs_req.h
index 557e8e476856..fec6c1e1575c 100644
--- a/fs/fuse/kio/pcs/pcs_req.h
+++ b/fs/fuse/kio/pcs/pcs_req.h
@@ -310,16 +310,6 @@  static inline int ireq_check_redo(struct pcs_int_request *ireq)
 	return 1;
 }
 
-static inline int ireq_is_timed_out(struct pcs_int_request *ireq)
-{
-	int timed_out;
-	timed_out = ireq->cc->cfg.curr.abort_timeout &&
-		ireq->create_ts + ireq->cc->cfg.curr.abort_timeout < jiffies;
-	if (timed_out && ireq->cc->abort_callback)
-		return ireq->cc->abort_callback(ireq->cc, ireq);
-	return timed_out;
-}
-
 struct pcs_int_request * __ireq_alloc(void);
 struct pcs_int_request *ireq_alloc(struct pcs_dentry_info *di);
 struct pcs_int_request *ireq_alloc_by_cluster(struct pcs_cluster_core *cc);
diff --git a/fs/fuse/kio/pcs/pcs_rpc.c b/fs/fuse/kio/pcs/pcs_rpc.c
index 1faa99ec0426..c8737b96d932 100644
--- a/fs/fuse/kio/pcs/pcs_rpc.c
+++ b/fs/fuse/kio/pcs/pcs_rpc.c
@@ -26,6 +26,7 @@ 
 #include "pcs_rpc.h"
 #include "pcs_cluster.h"
 #include "log.h"
+#include "fuse_ktrace.h"
 
 static void timer_work(struct work_struct *w);
 static int rpc_gc_classify(struct pcs_rpc * ep);
@@ -163,8 +164,8 @@  void rpc_abort(struct pcs_rpc * ep, int fatal, int error)
 	while (!list_empty(&ep->pending_queue)) {
 		struct pcs_msg * msg = list_first_entry(&ep->pending_queue, struct pcs_msg, list);
 		list_move_tail(&msg->list, &failed_list);
-		TRACE("aborted msg to " PEER_FMT ", tmo=%d, err=%d, %ld", PEER_ARGS(ep),
-		      msg->timeout, error, (long)(msg->start_time + msg->timeout - jiffies));
+		FUSE_KTRACE(cc_from_rpc(ep->eng)->fc, "aborted msg to " PEER_FMT ", tmo=%d, err=%d, %ld", PEER_ARGS(ep),
+			    msg->timeout, error, (long)(msg->start_time + msg->timeout - jiffies));
 		pcs_msg_del_calendar(msg);
 		msg->stage = PCS_MSG_STAGE_NONE;
 	}
@@ -172,8 +173,8 @@  void rpc_abort(struct pcs_rpc * ep, int fatal, int error)
 		while (!list_empty(&ep->state_queue)) {
 			struct pcs_msg * msg = list_first_entry(&ep->state_queue, struct pcs_msg, list);
 			list_move_tail(&msg->list, &failed_list);
-			TRACE("aborted unsent msg to " PEER_FMT ", tmo=%d, err=%d", PEER_ARGS(ep),
-			      msg->timeout, error);
+			FUSE_KTRACE(cc_from_rpc(ep->eng)->fc, "aborted unsent msg to " PEER_FMT ", tmo=%d, err=%d", PEER_ARGS(ep),
+				    msg->timeout, error);
 			pcs_msg_del_calendar(msg);
 			msg->stage = PCS_MSG_STAGE_NONE;
 		}
@@ -432,7 +433,7 @@  static void handle_keep_waiting(struct pcs_rpc * ep, struct pcs_msg * msg)
 	if (h->hdr.len < sizeof(struct pcs_rpc_keep_waiting))
 		return;
 
-	TRACE("Received keep wait from " NODE_FMT " for request " XID_FMT,
+	FUSE_KTRACE(cc_from_rpc(ep->eng)->fc, "Received keep wait from " NODE_FMT " for request " XID_FMT,
 	      NODE_ARGS(h->hdr.xid.origin), XID_ARGS(h->xid));
 
 	req = pcs_rpc_lookup_xid(ep, &h->xid);
@@ -507,7 +508,7 @@  struct pcs_msg *rpc_get_hdr(struct pcs_sockio * sio, u32 *msg_size)
 
 	/* Fatal stream format error */
 	if (h->len < sizeof(struct pcs_rpc_hdr) || h->len > ep->params.max_msg_size) {
-		pcs_log(0, "Bad message header %u %u\n", h->len, h->type);
+		FUSE_KLOG(cc_from_rpc(ep->eng)->fc, LOG_ERR, "Bad message header %u %u\n", h->len, h->type);
 		return NULL;
 	}
 
@@ -522,14 +523,12 @@  struct pcs_msg *rpc_get_hdr(struct pcs_sockio * sio, u32 *msg_size)
 		next_input = rpc_work_input;
 		break;
 	default:
-		pcs_log(0, "Received msg in bad state %u\n", ep->state);
-		BUG();
+		FUSE_KLOG(cc_from_rpc(ep->eng)->fc, LOG_ERR, "Received msg in bad state %u\n", ep->state);
 		return NULL;
-
 	}
 
 	if (h->len > PAGE_SIZE) {
-		pcs_log(0, "Received too big msg  %u\n", h->len);
+		FUSE_KLOG(cc_from_rpc(ep->eng)->fc, LOG_ERR, "Received too big msg  %u\n", h->len);
 		return PCS_TRASH_MSG;
 	}
 
@@ -653,7 +652,7 @@  static void calendar_work(struct work_struct *w)
 		struct pcs_rpc_hdr * h = (struct pcs_rpc_hdr *)msg_inline_head(msg);
 
 		(void)h;
-		TRACE("killing msg to " PEER_FMT " type=%u xid=" XID_FMT " stage=%d tmo=%d exp=%ld rem=%ld\n",
+		FUSE_KTRACE(cc->fc, "killing msg to " PEER_FMT " type=%u xid=" XID_FMT " stage=%d tmo=%d exp=%ld rem=%ld\n",
 		      PEER_ARGS(msg->rpc), h->type, XID_ARGS(h->xid),
 		      msg->stage, msg->timeout,
 		      (long)(msg->start_time + msg->timeout - jiffies),
@@ -693,8 +692,8 @@  static void calendar_work(struct work_struct *w)
 		count++;
 	}
 	if (count)
-		printk("%s %d messages to "PEER_FMT" destroyed\n", __FUNCTION__,
-		       count, PEER_ARGS(ep));
+		trace_printk("%s %d messages to "PEER_FMT" destroyed\n", __FUNCTION__,
+			     count, PEER_ARGS(ep));
 
 	for (i=0; i < RPC_MAX_CALENDAR-1; i++) {
 		kill_slot = (ep->kill_arrow  + i) & (RPC_MAX_CALENDAR - 1);
@@ -969,18 +968,18 @@  static int rpc_check_memlimit(struct pcs_rpc * ep)
 		 * However, if this happens we must do something.
 		 */
 		if (eng->msg_allocated > eng->mem_limit) {
-			pcs_log(LOG_ERR, "Hard memory limit exceeded");
+			FUSE_KLOG(cc_from_rpc(ep->eng)->fc, LOG_ERR, "Hard memory limit exceeded");
 			return 1;
 		}
 		if (ep->peer_role == PCS_NODE_ROLE_CN) {
 			/* CN contributes 3 (repl.norm) times of memory pressure on cluster */
 			if (3 * ep->accounted * eng->accounted_rpcs >= eng->msg_allocated) {
-				TRACE("Soft memory limit exceeded " PEER_FMT, PEER_ARGS(ep));
+				FUSE_KTRACE(cc_from_rpc(eng)->fc, "Soft memory limit exceeded " PEER_FMT, PEER_ARGS(ep));
 				return 1;
 			}
 		} else {
 			if (ep->accounted * eng->accounted_rpcs >= eng->msg_allocated) {
-				TRACE("Soft memory limit exceeded " PEER_FMT, PEER_ARGS(ep));
+				FUSE_KTRACE(cc_from_rpc(eng)->fc, "Soft memory limit exceeded " PEER_FMT, PEER_ARGS(ep));
 				return 1;
 			}
 		}
@@ -1169,7 +1168,7 @@  static void timer_work(struct work_struct *w)
 	case PCS_RPC_WORK: {
 		int err = list_empty(&ep->pending_queue) ? PCS_ERR_RESPONSE_TIMEOUT : PCS_ERR_WRITE_TIMEOUT;
 
-		TRACE("rpc timer expired, killing connection to " PEER_FMT ", %d",
+		FUSE_KTRACE(cc_from_rpc(ep->eng)->fc, "rpc timer expired, killing connection to " PEER_FMT ", %d",
 		      PEER_ARGS(ep), err);
 		rpc_abort(ep, 0, err);
 		break;
@@ -1189,7 +1188,6 @@  static void connstat_work(struct work_struct *w)
 	struct pcs_rpc_engine * eng = container_of(w, struct pcs_rpc_engine, stat_work.work);
 	struct pcs_cluster_core *cc = cc_from_rpc(eng);
 
-	pcs_log(LOG_INFO, "TODO send connstat-s\n");
 	(void)eng;
 	/* account_connstat(eng); */
 	mod_delayed_work(cc->wq, &eng->stat_work, PCS_MSG_MAX_CALENDAR * HZ);
@@ -1299,7 +1297,7 @@  void rpc_connect_done(struct pcs_rpc *ep, struct socket *sock)
 	ep->retries++;
 
 	if (ep->state != PCS_RPC_CONNECT) {
-		pcs_log(LOG_ERR, "Invalid state: %u", ep->state);
+		FUSE_KLOG(cc_from_rpc(ep->eng)->fc, LOG_ERR, "Invalid state: %u", ep->state);
 		BUG();
 	}