[Devel,RHEL7,COMMIT] net: Allow pass cpu mask into snmp_fold_field{, 64}()

Submitted by Konstantin Khorenko on Sept. 13, 2016, 11:39 a.m.

Details

Message ID 201609131139.u8DBdYeF024273@finist_cl7.x64_64.work.ct
State New
Series "Series without cover letter"
Headers show

Commit Message

Konstantin Khorenko Sept. 13, 2016, 11:39 a.m.
The commit is pushed to "branch-rh7-3.10.0-327.28.2.vz7.17.x-ovz" and will appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-327.28.2.vz7.17.6
------>
commit bd3502d0dd697a8c42460dc75469b86daedc5ef1
Author: Kirill Tkhai <ktkhai@virtuozzo.com>
Date:   Tue Sep 13 15:39:34 2016 +0400

    net: Allow pass cpu mask into snmp_fold_field{, 64}()
    
    This allows to pass cpu_online_mask instead of cpu_possible_mask.
    
    Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
    Reviewed-by: Andrei Vagin <avagin@virtuozzo.com>
---
 include/net/ip.h   | 17 +++++++++++++++--
 net/ipv4/af_inet.c | 12 +++++++-----
 2 files changed, 22 insertions(+), 7 deletions(-)

Patch hide | download patch | download mbox

diff --git a/include/net/ip.h b/include/net/ip.h
index 07eac7a..4852746 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -200,14 +200,27 @@  void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
 #define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd)
 #define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
 
-unsigned long snmp_fold_field(void __percpu *mib[], int offt);
+unsigned long __snmp_fold_field(void __percpu *mib[], int offt, const struct cpumask *mask);
+static inline unsigned long snmp_fold_field(void __percpu *mib[], int offt)
+{
+	return __snmp_fold_field(mib, offt, cpu_possible_mask);
+}
 #if BITS_PER_LONG==32
-u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off);
+u64 __snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off,
+			const struct cpumask *mask);
+static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t sync_off)
+{
+	return __snmp_fold_field64(mib, offt, sync_off, cpu_possible_mask)
+}
 #else
 static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off)
 {
 	return snmp_fold_field(mib, offt);
 }
+static inline unsigned long __snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_off, const struct cpumask *mask)
+{
+	return __snmp_fold_field(mib, offt, mask);
+}
 #endif
 int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
 
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 3d89861..91cb5f1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1443,27 +1443,29 @@  int inet_ctl_sock_create(struct sock **sk, unsigned short family,
 }
 EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
 
-unsigned long snmp_fold_field(void __percpu *mib[], int offt)
+unsigned long __snmp_fold_field(void __percpu *mib[], int offt,
+				const struct cpumask *mask)
 {
 	unsigned long res = 0;
 	int i, j;
 
-	for_each_possible_cpu(i) {
+	for_each_cpu(i, mask) {
 		for (j = 0; j < SNMP_ARRAY_SZ; j++)
 			res += *(((unsigned long *) per_cpu_ptr(mib[j], i)) + offt);
 	}
 	return res;
 }
-EXPORT_SYMBOL_GPL(snmp_fold_field);
+EXPORT_SYMBOL_GPL(__snmp_fold_field);
 
 #if BITS_PER_LONG==32
 
-u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
+u64 __snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset,
+			const struct cpumask *mask)
 {
 	u64 res = 0;
 	int cpu;
 
-	for_each_possible_cpu(cpu) {
+	for_each_cpu(cpu, mask) {
 		void *bhptr;
 		struct u64_stats_sync *syncp;
 		u64 v;