xref: /linux/drivers/net/ethernet/mellanox/mlx4/port.c (revision 72503791edffe516848d0f01d377fa9cd0711970)
1 /*
2  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/errno.h>
34 #include <linux/if_ether.h>
35 #include <linux/export.h>
36 
37 #include <linux/mlx4/cmd.h>
38 
39 #include "mlx4.h"
40 
41 #define MLX4_MAC_VALID		(1ull << 63)
42 
43 #define MLX4_VLAN_VALID		(1u << 31)
44 #define MLX4_VLAN_MASK		0xfff
45 
46 #define MLX4_STATS_TRAFFIC_COUNTERS_MASK	0xfULL
47 #define MLX4_STATS_TRAFFIC_DROPS_MASK		0xc0ULL
48 #define MLX4_STATS_ERROR_COUNTERS_MASK		0x1ffc30ULL
49 #define MLX4_STATS_PORT_COUNTERS_MASK		0x1fe00000ULL
50 
51 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
52 {
53 	int i;
54 
55 	mutex_init(&table->mutex);
56 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
57 		table->entries[i] = 0;
58 		table->refs[i]	 = 0;
59 	}
60 	table->max   = 1 << dev->caps.log_num_macs;
61 	table->total = 0;
62 }
63 
64 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
65 {
66 	int i;
67 
68 	mutex_init(&table->mutex);
69 	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
70 		table->entries[i] = 0;
71 		table->refs[i]	 = 0;
72 	}
73 	table->max   = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
74 	table->total = 0;
75 }
76 
77 static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
78 			     u64 mac, int *qpn, u64 *reg_id)
79 {
80 	__be64 be_mac;
81 	int err;
82 
83 	mac &= MLX4_MAC_MASK;
84 	be_mac = cpu_to_be64(mac << 16);
85 
86 	switch (dev->caps.steering_mode) {
87 	case MLX4_STEERING_MODE_B0: {
88 		struct mlx4_qp qp;
89 		u8 gid[16] = {0};
90 
91 		qp.qpn = *qpn;
92 		memcpy(&gid[10], &be_mac, ETH_ALEN);
93 		gid[5] = port;
94 
95 		err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
96 		break;
97 	}
98 	case MLX4_STEERING_MODE_DEVICE_MANAGED: {
99 		struct mlx4_spec_list spec_eth = { {NULL} };
100 		__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
101 
102 		struct mlx4_net_trans_rule rule = {
103 			.queue_mode = MLX4_NET_TRANS_Q_FIFO,
104 			.exclusive = 0,
105 			.allow_loopback = 1,
106 			.promisc_mode = MLX4_FS_PROMISC_NONE,
107 			.priority = MLX4_DOMAIN_NIC,
108 		};
109 
110 		rule.port = port;
111 		rule.qpn = *qpn;
112 		INIT_LIST_HEAD(&rule.list);
113 
114 		spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
115 		memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
116 		memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
117 		list_add_tail(&spec_eth.list, &rule.list);
118 
119 		err = mlx4_flow_attach(dev, &rule, reg_id);
120 		break;
121 	}
122 	default:
123 		return -EINVAL;
124 	}
125 	if (err)
126 		mlx4_warn(dev, "Failed Attaching Unicast\n");
127 
128 	return err;
129 }
130 
131 static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
132 				  u64 mac, int qpn, u64 reg_id)
133 {
134 	switch (dev->caps.steering_mode) {
135 	case MLX4_STEERING_MODE_B0: {
136 		struct mlx4_qp qp;
137 		u8 gid[16] = {0};
138 		__be64 be_mac;
139 
140 		qp.qpn = qpn;
141 		mac &= MLX4_MAC_MASK;
142 		be_mac = cpu_to_be64(mac << 16);
143 		memcpy(&gid[10], &be_mac, ETH_ALEN);
144 		gid[5] = port;
145 
146 		mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
147 		break;
148 	}
149 	case MLX4_STEERING_MODE_DEVICE_MANAGED: {
150 		mlx4_flow_detach(dev, reg_id);
151 		break;
152 	}
153 	default:
154 		mlx4_err(dev, "Invalid steering mode.\n");
155 	}
156 }
157 
158 static int validate_index(struct mlx4_dev *dev,
159 			  struct mlx4_mac_table *table, int index)
160 {
161 	int err = 0;
162 
163 	if (index < 0 || index >= table->max || !table->entries[index]) {
164 		mlx4_warn(dev, "No valid Mac entry for the given index\n");
165 		err = -EINVAL;
166 	}
167 	return err;
168 }
169 
170 static int find_index(struct mlx4_dev *dev,
171 		      struct mlx4_mac_table *table, u64 mac)
172 {
173 	int i;
174 
175 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
176 		if ((mac & MLX4_MAC_MASK) ==
177 		    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
178 			return i;
179 	}
180 	/* Mac not found */
181 	return -EINVAL;
182 }
183 
184 int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
185 {
186 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
187 	struct mlx4_mac_entry *entry;
188 	int index = 0;
189 	int err = 0;
190 	u64 reg_id;
191 
192 	mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
193 			(unsigned long long) mac);
194 	index = mlx4_register_mac(dev, port, mac);
195 	if (index < 0) {
196 		err = index;
197 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
198 			 (unsigned long long) mac);
199 		return err;
200 	}
201 
202 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
203 		*qpn = info->base_qpn + index;
204 		return 0;
205 	}
206 
207 	err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
208 	mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
209 	if (err) {
210 		mlx4_err(dev, "Failed to reserve qp for mac registration\n");
211 		goto qp_err;
212 	}
213 
214 	err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
215 	if (err)
216 		goto steer_err;
217 
218 	entry = kmalloc(sizeof *entry, GFP_KERNEL);
219 	if (!entry) {
220 		err = -ENOMEM;
221 		goto alloc_err;
222 	}
223 	entry->mac = mac;
224 	entry->reg_id = reg_id;
225 	err = radix_tree_insert(&info->mac_tree, *qpn, entry);
226 	if (err)
227 		goto insert_err;
228 	return 0;
229 
230 insert_err:
231 	kfree(entry);
232 
233 alloc_err:
234 	mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
235 
236 steer_err:
237 	mlx4_qp_release_range(dev, *qpn, 1);
238 
239 qp_err:
240 	mlx4_unregister_mac(dev, port, mac);
241 	return err;
242 }
243 EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
244 
245 void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
246 {
247 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
248 	struct mlx4_mac_entry *entry;
249 
250 	mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
251 		 (unsigned long long) mac);
252 	mlx4_unregister_mac(dev, port, mac);
253 
254 	if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
255 		entry = radix_tree_lookup(&info->mac_tree, qpn);
256 		if (entry) {
257 			mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
258 				 " qpn %d\n", port,
259 				 (unsigned long long) mac, qpn);
260 			mlx4_uc_steer_release(dev, port, entry->mac,
261 					      qpn, entry->reg_id);
262 			mlx4_qp_release_range(dev, qpn, 1);
263 			radix_tree_delete(&info->mac_tree, qpn);
264 			kfree(entry);
265 		}
266 	}
267 }
268 EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
269 
270 static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
271 				   __be64 *entries)
272 {
273 	struct mlx4_cmd_mailbox *mailbox;
274 	u32 in_mod;
275 	int err;
276 
277 	mailbox = mlx4_alloc_cmd_mailbox(dev);
278 	if (IS_ERR(mailbox))
279 		return PTR_ERR(mailbox);
280 
281 	memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
282 
283 	in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
284 
285 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
286 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
287 
288 	mlx4_free_cmd_mailbox(dev, mailbox);
289 	return err;
290 }
291 
292 int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
293 {
294 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
295 	struct mlx4_mac_table *table = &info->mac_table;
296 	int i, err = 0;
297 	int free = -1;
298 
299 	mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
300 		 (unsigned long long) mac, port);
301 
302 	mutex_lock(&table->mutex);
303 	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
304 		if (free < 0 && !table->entries[i]) {
305 			free = i;
306 			continue;
307 		}
308 
309 		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
310 			/* MAC already registered, Must not have duplicates */
311 			err = -EEXIST;
312 			goto out;
313 		}
314 	}
315 
316 	mlx4_dbg(dev, "Free MAC index is %d\n", free);
317 
318 	if (table->total == table->max) {
319 		/* No free mac entries */
320 		err = -ENOSPC;
321 		goto out;
322 	}
323 
324 	/* Register new MAC */
325 	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
326 
327 	err = mlx4_set_port_mac_table(dev, port, table->entries);
328 	if (unlikely(err)) {
329 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
330 			 (unsigned long long) mac);
331 		table->entries[free] = 0;
332 		goto out;
333 	}
334 
335 	err = free;
336 	++table->total;
337 out:
338 	mutex_unlock(&table->mutex);
339 	return err;
340 }
341 EXPORT_SYMBOL_GPL(__mlx4_register_mac);
342 
343 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
344 {
345 	u64 out_param;
346 	int err;
347 
348 	if (mlx4_is_mfunc(dev)) {
349 		set_param_l(&out_param, port);
350 		err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
351 				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
352 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
353 		if (err)
354 			return err;
355 
356 		return get_param_l(&out_param);
357 	}
358 	return __mlx4_register_mac(dev, port, mac);
359 }
360 EXPORT_SYMBOL_GPL(mlx4_register_mac);
361 
362 
363 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
364 {
365 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
366 	struct mlx4_mac_table *table = &info->mac_table;
367 	int index;
368 
369 	index = find_index(dev, table, mac);
370 
371 	mutex_lock(&table->mutex);
372 
373 	if (validate_index(dev, table, index))
374 		goto out;
375 
376 	table->entries[index] = 0;
377 	mlx4_set_port_mac_table(dev, port, table->entries);
378 	--table->total;
379 out:
380 	mutex_unlock(&table->mutex);
381 }
382 EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
383 
384 void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
385 {
386 	u64 out_param;
387 
388 	if (mlx4_is_mfunc(dev)) {
389 		set_param_l(&out_param, port);
390 		(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
391 				    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
392 				    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
393 		return;
394 	}
395 	__mlx4_unregister_mac(dev, port, mac);
396 	return;
397 }
398 EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
399 
400 int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
401 {
402 	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
403 	struct mlx4_mac_table *table = &info->mac_table;
404 	struct mlx4_mac_entry *entry;
405 	int index = qpn - info->base_qpn;
406 	int err = 0;
407 
408 	if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
409 		entry = radix_tree_lookup(&info->mac_tree, qpn);
410 		if (!entry)
411 			return -EINVAL;
412 		mlx4_uc_steer_release(dev, port, entry->mac,
413 				      qpn, entry->reg_id);
414 		mlx4_unregister_mac(dev, port, entry->mac);
415 		entry->mac = new_mac;
416 		entry->reg_id = 0;
417 		mlx4_register_mac(dev, port, new_mac);
418 		err = mlx4_uc_steer_add(dev, port, entry->mac,
419 					&qpn, &entry->reg_id);
420 		return err;
421 	}
422 
423 	/* CX1 doesn't support multi-functions */
424 	mutex_lock(&table->mutex);
425 
426 	err = validate_index(dev, table, index);
427 	if (err)
428 		goto out;
429 
430 	table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
431 
432 	err = mlx4_set_port_mac_table(dev, port, table->entries);
433 	if (unlikely(err)) {
434 		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
435 			 (unsigned long long) new_mac);
436 		table->entries[index] = 0;
437 	}
438 out:
439 	mutex_unlock(&table->mutex);
440 	return err;
441 }
442 EXPORT_SYMBOL_GPL(mlx4_replace_mac);
443 
444 static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
445 				    __be32 *entries)
446 {
447 	struct mlx4_cmd_mailbox *mailbox;
448 	u32 in_mod;
449 	int err;
450 
451 	mailbox = mlx4_alloc_cmd_mailbox(dev);
452 	if (IS_ERR(mailbox))
453 		return PTR_ERR(mailbox);
454 
455 	memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
456 	in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
457 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
458 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
459 
460 	mlx4_free_cmd_mailbox(dev, mailbox);
461 
462 	return err;
463 }
464 
465 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
466 {
467 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
468 	int i;
469 
470 	for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
471 		if (table->refs[i] &&
472 		    (vid == (MLX4_VLAN_MASK &
473 			      be32_to_cpu(table->entries[i])))) {
474 			/* VLAN already registered, increase reference count */
475 			*idx = i;
476 			return 0;
477 		}
478 	}
479 
480 	return -ENOENT;
481 }
482 EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
483 
484 static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
485 				int *index)
486 {
487 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
488 	int i, err = 0;
489 	int free = -1;
490 
491 	mutex_lock(&table->mutex);
492 
493 	if (table->total == table->max) {
494 		/* No free vlan entries */
495 		err = -ENOSPC;
496 		goto out;
497 	}
498 
499 	for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
500 		if (free < 0 && (table->refs[i] == 0)) {
501 			free = i;
502 			continue;
503 		}
504 
505 		if (table->refs[i] &&
506 		    (vlan == (MLX4_VLAN_MASK &
507 			      be32_to_cpu(table->entries[i])))) {
508 			/* Vlan already registered, increase references count */
509 			*index = i;
510 			++table->refs[i];
511 			goto out;
512 		}
513 	}
514 
515 	if (free < 0) {
516 		err = -ENOMEM;
517 		goto out;
518 	}
519 
520 	/* Register new VLAN */
521 	table->refs[free] = 1;
522 	table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
523 
524 	err = mlx4_set_port_vlan_table(dev, port, table->entries);
525 	if (unlikely(err)) {
526 		mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
527 		table->refs[free] = 0;
528 		table->entries[free] = 0;
529 		goto out;
530 	}
531 
532 	*index = free;
533 	++table->total;
534 out:
535 	mutex_unlock(&table->mutex);
536 	return err;
537 }
538 
539 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
540 {
541 	u64 out_param;
542 	int err;
543 
544 	if (mlx4_is_mfunc(dev)) {
545 		set_param_l(&out_param, port);
546 		err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
547 				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
548 				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
549 		if (!err)
550 			*index = get_param_l(&out_param);
551 
552 		return err;
553 	}
554 	return __mlx4_register_vlan(dev, port, vlan, index);
555 }
556 EXPORT_SYMBOL_GPL(mlx4_register_vlan);
557 
558 static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
559 {
560 	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
561 
562 	if (index < MLX4_VLAN_REGULAR) {
563 		mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
564 		return;
565 	}
566 
567 	mutex_lock(&table->mutex);
568 	if (!table->refs[index]) {
569 		mlx4_warn(dev, "No vlan entry for index %d\n", index);
570 		goto out;
571 	}
572 	if (--table->refs[index]) {
573 		mlx4_dbg(dev, "Have more references for index %d,"
574 			 "no need to modify vlan table\n", index);
575 		goto out;
576 	}
577 	table->entries[index] = 0;
578 	mlx4_set_port_vlan_table(dev, port, table->entries);
579 	--table->total;
580 out:
581 	mutex_unlock(&table->mutex);
582 }
583 
584 void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
585 {
586 	u64 in_param;
587 	int err;
588 
589 	if (mlx4_is_mfunc(dev)) {
590 		set_param_l(&in_param, port);
591 		err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
592 			       MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
593 			       MLX4_CMD_WRAPPED);
594 		if (!err)
595 			mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
596 					index);
597 
598 		return;
599 	}
600 	__mlx4_unregister_vlan(dev, port, index);
601 }
602 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
603 
604 int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
605 {
606 	struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
607 	u8 *inbuf, *outbuf;
608 	int err;
609 
610 	inmailbox = mlx4_alloc_cmd_mailbox(dev);
611 	if (IS_ERR(inmailbox))
612 		return PTR_ERR(inmailbox);
613 
614 	outmailbox = mlx4_alloc_cmd_mailbox(dev);
615 	if (IS_ERR(outmailbox)) {
616 		mlx4_free_cmd_mailbox(dev, inmailbox);
617 		return PTR_ERR(outmailbox);
618 	}
619 
620 	inbuf = inmailbox->buf;
621 	outbuf = outmailbox->buf;
622 	memset(inbuf, 0, 256);
623 	memset(outbuf, 0, 256);
624 	inbuf[0] = 1;
625 	inbuf[1] = 1;
626 	inbuf[2] = 1;
627 	inbuf[3] = 1;
628 	*(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
629 	*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
630 
631 	err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
632 			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
633 			   MLX4_CMD_NATIVE);
634 	if (!err)
635 		*caps = *(__be32 *) (outbuf + 84);
636 	mlx4_free_cmd_mailbox(dev, inmailbox);
637 	mlx4_free_cmd_mailbox(dev, outmailbox);
638 	return err;
639 }
640 
641 static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
642 				u8 op_mod, struct mlx4_cmd_mailbox *inbox)
643 {
644 	struct mlx4_priv *priv = mlx4_priv(dev);
645 	struct mlx4_port_info *port_info;
646 	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
647 	struct mlx4_slave_state *slave_st = &master->slave_state[slave];
648 	struct mlx4_set_port_rqp_calc_context *qpn_context;
649 	struct mlx4_set_port_general_context *gen_context;
650 	int reset_qkey_viols;
651 	int port;
652 	int is_eth;
653 	u32 in_modifier;
654 	u32 promisc;
655 	u16 mtu, prev_mtu;
656 	int err;
657 	int i;
658 	__be32 agg_cap_mask;
659 	__be32 slave_cap_mask;
660 	__be32 new_cap_mask;
661 
662 	port = in_mod & 0xff;
663 	in_modifier = in_mod >> 8;
664 	is_eth = op_mod;
665 	port_info = &priv->port[port];
666 
667 	/* Slaves cannot perform SET_PORT operations except changing MTU */
668 	if (is_eth) {
669 		if (slave != dev->caps.function &&
670 		    in_modifier != MLX4_SET_PORT_GENERAL) {
671 			mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
672 					slave);
673 			return -EINVAL;
674 		}
675 		switch (in_modifier) {
676 		case MLX4_SET_PORT_RQP_CALC:
677 			qpn_context = inbox->buf;
678 			qpn_context->base_qpn =
679 				cpu_to_be32(port_info->base_qpn);
680 			qpn_context->n_mac = 0x7;
681 			promisc = be32_to_cpu(qpn_context->promisc) >>
682 				SET_PORT_PROMISC_SHIFT;
683 			qpn_context->promisc = cpu_to_be32(
684 				promisc << SET_PORT_PROMISC_SHIFT |
685 				port_info->base_qpn);
686 			promisc = be32_to_cpu(qpn_context->mcast) >>
687 				SET_PORT_MC_PROMISC_SHIFT;
688 			qpn_context->mcast = cpu_to_be32(
689 				promisc << SET_PORT_MC_PROMISC_SHIFT |
690 				port_info->base_qpn);
691 			break;
692 		case MLX4_SET_PORT_GENERAL:
693 			gen_context = inbox->buf;
694 			/* Mtu is configured as the max MTU among all the
695 			 * the functions on the port. */
696 			mtu = be16_to_cpu(gen_context->mtu);
697 			mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
698 			prev_mtu = slave_st->mtu[port];
699 			slave_st->mtu[port] = mtu;
700 			if (mtu > master->max_mtu[port])
701 				master->max_mtu[port] = mtu;
702 			if (mtu < prev_mtu && prev_mtu ==
703 						master->max_mtu[port]) {
704 				slave_st->mtu[port] = mtu;
705 				master->max_mtu[port] = mtu;
706 				for (i = 0; i < dev->num_slaves; i++) {
707 					master->max_mtu[port] =
708 					max(master->max_mtu[port],
709 					    master->slave_state[i].mtu[port]);
710 				}
711 			}
712 
713 			gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
714 			break;
715 		}
716 		return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
717 				MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
718 				MLX4_CMD_NATIVE);
719 	}
720 
721 	/* For IB, we only consider:
722 	 * - The capability mask, which is set to the aggregate of all
723 	 *   slave function capabilities
724 	 * - The QKey violatin counter - reset according to each request.
725 	 */
726 
727 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
728 		reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
729 		new_cap_mask = ((__be32 *) inbox->buf)[2];
730 	} else {
731 		reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
732 		new_cap_mask = ((__be32 *) inbox->buf)[1];
733 	}
734 
735 	/* slave may not set the IS_SM capability for the port */
736 	if (slave != mlx4_master_func_num(dev) &&
737 	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
738 		return -EINVAL;
739 
740 	/* No DEV_MGMT in multifunc mode */
741 	if (mlx4_is_mfunc(dev) &&
742 	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
743 		return -EINVAL;
744 
745 	agg_cap_mask = 0;
746 	slave_cap_mask =
747 		priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
748 	priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
749 	for (i = 0; i < dev->num_slaves; i++)
750 		agg_cap_mask |=
751 			priv->mfunc.master.slave_state[i].ib_cap_mask[port];
752 
753 	/* only clear mailbox for guests.  Master may be setting
754 	* MTU or PKEY table size
755 	*/
756 	if (slave != dev->caps.function)
757 		memset(inbox->buf, 0, 256);
758 	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
759 		*(u8 *) inbox->buf	   |= !!reset_qkey_viols << 6;
760 		((__be32 *) inbox->buf)[2] = agg_cap_mask;
761 	} else {
762 		((u8 *) inbox->buf)[3]     |= !!reset_qkey_viols;
763 		((__be32 *) inbox->buf)[1] = agg_cap_mask;
764 	}
765 
766 	err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
767 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
768 	if (err)
769 		priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
770 			slave_cap_mask;
771 	return err;
772 }
773 
774 int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
775 			  struct mlx4_vhcr *vhcr,
776 			  struct mlx4_cmd_mailbox *inbox,
777 			  struct mlx4_cmd_mailbox *outbox,
778 			  struct mlx4_cmd_info *cmd)
779 {
780 	return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
781 				    vhcr->op_modifier, inbox);
782 }
783 
784 /* bit locations for set port command with zero op modifier */
785 enum {
786 	MLX4_SET_PORT_VL_CAP	 = 4, /* bits 7:4 */
787 	MLX4_SET_PORT_MTU_CAP	 = 12, /* bits 15:12 */
788 	MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
789 	MLX4_CHANGE_PORT_VL_CAP	 = 21,
790 	MLX4_CHANGE_PORT_MTU_CAP = 22,
791 };
792 
793 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
794 {
795 	struct mlx4_cmd_mailbox *mailbox;
796 	int err, vl_cap, pkey_tbl_flag = 0;
797 
798 	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
799 		return 0;
800 
801 	mailbox = mlx4_alloc_cmd_mailbox(dev);
802 	if (IS_ERR(mailbox))
803 		return PTR_ERR(mailbox);
804 
805 	memset(mailbox->buf, 0, 256);
806 
807 	((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
808 
809 	if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
810 		pkey_tbl_flag = 1;
811 		((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
812 	}
813 
814 	/* IB VL CAP enum isn't used by the firmware, just numerical values */
815 	for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
816 		((__be32 *) mailbox->buf)[0] = cpu_to_be32(
817 			(1 << MLX4_CHANGE_PORT_MTU_CAP) |
818 			(1 << MLX4_CHANGE_PORT_VL_CAP)  |
819 			(pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
820 			(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
821 			(vl_cap << MLX4_SET_PORT_VL_CAP));
822 		err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
823 				MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
824 		if (err != -ENOMEM)
825 			break;
826 	}
827 
828 	mlx4_free_cmd_mailbox(dev, mailbox);
829 	return err;
830 }
831 
832 int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
833 			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
834 {
835 	struct mlx4_cmd_mailbox *mailbox;
836 	struct mlx4_set_port_general_context *context;
837 	int err;
838 	u32 in_mod;
839 
840 	mailbox = mlx4_alloc_cmd_mailbox(dev);
841 	if (IS_ERR(mailbox))
842 		return PTR_ERR(mailbox);
843 	context = mailbox->buf;
844 	memset(context, 0, sizeof *context);
845 
846 	context->flags = SET_PORT_GEN_ALL_VALID;
847 	context->mtu = cpu_to_be16(mtu);
848 	context->pptx = (pptx * (!pfctx)) << 7;
849 	context->pfctx = pfctx;
850 	context->pprx = (pprx * (!pfcrx)) << 7;
851 	context->pfcrx = pfcrx;
852 
853 	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
854 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
855 		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
856 
857 	mlx4_free_cmd_mailbox(dev, mailbox);
858 	return err;
859 }
860 EXPORT_SYMBOL(mlx4_SET_PORT_general);
861 
862 int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
863 			   u8 promisc)
864 {
865 	struct mlx4_cmd_mailbox *mailbox;
866 	struct mlx4_set_port_rqp_calc_context *context;
867 	int err;
868 	u32 in_mod;
869 	u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
870 		MCAST_DIRECT : MCAST_DEFAULT;
871 
872 	if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
873 		return 0;
874 
875 	mailbox = mlx4_alloc_cmd_mailbox(dev);
876 	if (IS_ERR(mailbox))
877 		return PTR_ERR(mailbox);
878 	context = mailbox->buf;
879 	memset(context, 0, sizeof *context);
880 
881 	context->base_qpn = cpu_to_be32(base_qpn);
882 	context->n_mac = dev->caps.log_num_macs;
883 	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
884 				       base_qpn);
885 	context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
886 				     base_qpn);
887 	context->intra_no_vlan = 0;
888 	context->no_vlan = MLX4_NO_VLAN_IDX;
889 	context->intra_vlan_miss = 0;
890 	context->vlan_miss = MLX4_VLAN_MISS_IDX;
891 
892 	in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
893 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
894 		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
895 
896 	mlx4_free_cmd_mailbox(dev, mailbox);
897 	return err;
898 }
899 EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
900 
901 int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
902 {
903 	struct mlx4_cmd_mailbox *mailbox;
904 	struct mlx4_set_port_prio2tc_context *context;
905 	int err;
906 	u32 in_mod;
907 	int i;
908 
909 	mailbox = mlx4_alloc_cmd_mailbox(dev);
910 	if (IS_ERR(mailbox))
911 		return PTR_ERR(mailbox);
912 	context = mailbox->buf;
913 	memset(context, 0, sizeof *context);
914 
915 	for (i = 0; i < MLX4_NUM_UP; i += 2)
916 		context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
917 
918 	in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
919 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
920 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
921 
922 	mlx4_free_cmd_mailbox(dev, mailbox);
923 	return err;
924 }
925 EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
926 
927 int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
928 		u8 *pg, u16 *ratelimit)
929 {
930 	struct mlx4_cmd_mailbox *mailbox;
931 	struct mlx4_set_port_scheduler_context *context;
932 	int err;
933 	u32 in_mod;
934 	int i;
935 
936 	mailbox = mlx4_alloc_cmd_mailbox(dev);
937 	if (IS_ERR(mailbox))
938 		return PTR_ERR(mailbox);
939 	context = mailbox->buf;
940 	memset(context, 0, sizeof *context);
941 
942 	for (i = 0; i < MLX4_NUM_TC; i++) {
943 		struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
944 		u16 r = ratelimit && ratelimit[i] ? ratelimit[i] :
945 			MLX4_RATELIMIT_DEFAULT;
946 
947 		tc->pg = htons(pg[i]);
948 		tc->bw_precentage = htons(tc_tx_bw[i]);
949 
950 		tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS);
951 		tc->max_bw_value = htons(r);
952 	}
953 
954 	in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
955 	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
956 		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
957 
958 	mlx4_free_cmd_mailbox(dev, mailbox);
959 	return err;
960 }
961 EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
962 
963 int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
964 				struct mlx4_vhcr *vhcr,
965 				struct mlx4_cmd_mailbox *inbox,
966 				struct mlx4_cmd_mailbox *outbox,
967 				struct mlx4_cmd_info *cmd)
968 {
969 	int err = 0;
970 
971 	return err;
972 }
973 
974 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
975 			u64 mac, u64 clear, u8 mode)
976 {
977 	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
978 			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
979 			MLX4_CMD_WRAPPED);
980 }
981 EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
982 
983 int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
984 			       struct mlx4_vhcr *vhcr,
985 			       struct mlx4_cmd_mailbox *inbox,
986 			       struct mlx4_cmd_mailbox *outbox,
987 			       struct mlx4_cmd_info *cmd)
988 {
989 	int err = 0;
990 
991 	return err;
992 }
993 
994 int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
995 			       u32 in_mod, struct mlx4_cmd_mailbox *outbox)
996 {
997 	return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
998 			    MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
999 			    MLX4_CMD_NATIVE);
1000 }
1001 
1002 int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
1003 				struct mlx4_vhcr *vhcr,
1004 				struct mlx4_cmd_mailbox *inbox,
1005 				struct mlx4_cmd_mailbox *outbox,
1006 				struct mlx4_cmd_info *cmd)
1007 {
1008 	if (slave != dev->caps.function)
1009 		return 0;
1010 	return mlx4_common_dump_eth_stats(dev, slave,
1011 					  vhcr->in_modifier, outbox);
1012 }
1013 
1014 void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
1015 {
1016 	if (!mlx4_is_mfunc(dev)) {
1017 		*stats_bitmap = 0;
1018 		return;
1019 	}
1020 
1021 	*stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
1022 			 MLX4_STATS_TRAFFIC_DROPS_MASK |
1023 			 MLX4_STATS_PORT_COUNTERS_MASK);
1024 
1025 	if (mlx4_is_master(dev))
1026 		*stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
1027 }
1028 EXPORT_SYMBOL(mlx4_set_stats_bitmap);
1029