xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c (revision 58f6259b7a08f8d47d4629609703d358b042f0fd)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include "lag.h"
5 
6 static char *get_str_mode_type(struct mlx5_lag *ldev)
7 {
8 	switch (ldev->mode) {
9 	case MLX5_LAG_MODE_ROCE: return "roce";
10 	case MLX5_LAG_MODE_SRIOV: return "switchdev";
11 	case MLX5_LAG_MODE_MULTIPATH: return "multipath";
12 	case MLX5_LAG_MODE_MPESW: return "multiport_eswitch";
13 	default: return "invalid";
14 	}
15 
16 	return NULL;
17 }
18 
19 static int type_show(struct seq_file *file, void *priv)
20 {
21 	struct mlx5_core_dev *dev = file->private;
22 	struct mlx5_lag *ldev;
23 	char *mode = NULL;
24 
25 	ldev = mlx5_lag_dev(dev);
26 	mutex_lock(&ldev->lock);
27 	if (__mlx5_lag_is_active(ldev))
28 		mode = get_str_mode_type(ldev);
29 	mutex_unlock(&ldev->lock);
30 	if (!mode)
31 		return -EINVAL;
32 	seq_printf(file, "%s\n", mode);
33 
34 	return 0;
35 }
36 
37 static int port_sel_mode_show(struct seq_file *file, void *priv)
38 {
39 	struct mlx5_core_dev *dev = file->private;
40 	struct mlx5_lag *ldev;
41 	int ret = 0;
42 	char *mode;
43 
44 	ldev = mlx5_lag_dev(dev);
45 	mutex_lock(&ldev->lock);
46 	if (__mlx5_lag_is_active(ldev))
47 		mode = mlx5_get_str_port_sel_mode(ldev->mode, ldev->mode_flags);
48 	else
49 		ret = -EINVAL;
50 	mutex_unlock(&ldev->lock);
51 	if (ret)
52 		return ret;
53 
54 	seq_printf(file, "%s\n", mode);
55 	return 0;
56 }
57 
58 static int state_show(struct seq_file *file, void *priv)
59 {
60 	struct mlx5_core_dev *dev = file->private;
61 	struct mlx5_lag *ldev;
62 	bool active;
63 
64 	ldev = mlx5_lag_dev(dev);
65 	mutex_lock(&ldev->lock);
66 	active = __mlx5_lag_is_active(ldev);
67 	mutex_unlock(&ldev->lock);
68 	seq_printf(file, "%s\n", active ? "active" : "disabled");
69 	return 0;
70 }
71 
72 static int flags_show(struct seq_file *file, void *priv)
73 {
74 	struct mlx5_core_dev *dev = file->private;
75 	bool fdb_sel_mode_native;
76 	struct mlx5_lag *ldev;
77 	bool shared_fdb;
78 	bool lag_active;
79 
80 	ldev = mlx5_lag_dev(dev);
81 	mutex_lock(&ldev->lock);
82 	lag_active = __mlx5_lag_is_active(ldev);
83 	if (!lag_active)
84 		goto unlock;
85 
86 	shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
87 	fdb_sel_mode_native = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,
88 				       &ldev->mode_flags);
89 
90 unlock:
91 	mutex_unlock(&ldev->lock);
92 	if (!lag_active)
93 		return -EINVAL;
94 
95 	seq_printf(file, "%s:%s\n", "shared_fdb", shared_fdb ? "on" : "off");
96 	seq_printf(file, "%s:%s\n", "fdb_selection_mode",
97 		   fdb_sel_mode_native ? "native" : "affinity");
98 	return 0;
99 }
100 
101 static int mapping_show(struct seq_file *file, void *priv)
102 {
103 	struct mlx5_core_dev *dev = file->private;
104 	u8 ports[MLX5_MAX_PORTS] = {};
105 	struct mlx5_lag *ldev;
106 	bool hash = false;
107 	bool lag_active;
108 	int num_ports;
109 	int i;
110 
111 	ldev = mlx5_lag_dev(dev);
112 	mutex_lock(&ldev->lock);
113 	lag_active = __mlx5_lag_is_active(ldev);
114 	if (lag_active) {
115 		if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) {
116 			mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, ports,
117 					      &num_ports);
118 			hash = true;
119 		} else {
120 			for (i = 0; i < ldev->ports; i++)
121 				ports[i] = ldev->v2p_map[i];
122 			num_ports = ldev->ports;
123 		}
124 	}
125 	mutex_unlock(&ldev->lock);
126 	if (!lag_active)
127 		return -EINVAL;
128 
129 	for (i = 0; i < num_ports; i++) {
130 		if (hash)
131 			seq_printf(file, "%d\n", ports[i] + 1);
132 		else
133 			seq_printf(file, "%d:%d\n", i + 1, ports[i]);
134 	}
135 
136 	return 0;
137 }
138 
139 static int members_show(struct seq_file *file, void *priv)
140 {
141 	struct mlx5_core_dev *dev = file->private;
142 	struct mlx5_lag *ldev;
143 	int i;
144 
145 	ldev = mlx5_lag_dev(dev);
146 	mutex_lock(&ldev->lock);
147 	for (i = 0; i < ldev->ports; i++) {
148 		if (!ldev->pf[i].dev)
149 			continue;
150 		seq_printf(file, "%s\n", dev_name(ldev->pf[i].dev->device));
151 	}
152 	mutex_unlock(&ldev->lock);
153 
154 	return 0;
155 }
156 
157 DEFINE_SHOW_ATTRIBUTE(type);
158 DEFINE_SHOW_ATTRIBUTE(port_sel_mode);
159 DEFINE_SHOW_ATTRIBUTE(state);
160 DEFINE_SHOW_ATTRIBUTE(flags);
161 DEFINE_SHOW_ATTRIBUTE(mapping);
162 DEFINE_SHOW_ATTRIBUTE(members);
163 
164 void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev)
165 {
166 	struct dentry *dbg;
167 
168 	dbg = debugfs_create_dir("lag", mlx5_debugfs_get_dev_root(dev));
169 	dev->priv.dbg.lag_debugfs = dbg;
170 
171 	debugfs_create_file("type", 0444, dbg, dev, &type_fops);
172 	debugfs_create_file("port_sel_mode", 0444, dbg, dev, &port_sel_mode_fops);
173 	debugfs_create_file("state", 0444, dbg, dev, &state_fops);
174 	debugfs_create_file("flags", 0444, dbg, dev, &flags_fops);
175 	debugfs_create_file("mapping", 0444, dbg, dev, &mapping_fops);
176 	debugfs_create_file("members", 0444, dbg, dev, &members_fops);
177 }
178 
179 void mlx5_ldev_remove_debugfs(struct dentry *dbg)
180 {
181 	debugfs_remove_recursive(dbg);
182 }
183