1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 /* Copyright (c) 2018 Mellanox Technologies */ 3 4 #include <linux/mlx5/vport.h> 5 #include <linux/list.h> 6 #include "lib/devcom.h" 7 #include "mlx5_core.h" 8 9 static LIST_HEAD(devcom_dev_list); 10 static LIST_HEAD(devcom_comp_list); 11 /* protect device list */ 12 static DEFINE_MUTEX(dev_list_lock); 13 /* protect component list */ 14 static DEFINE_MUTEX(comp_list_lock); 15 16 #define devcom_for_each_component(iter) \ 17 list_for_each_entry(iter, &devcom_comp_list, comp_list) 18 19 struct mlx5_devcom_dev { 20 struct list_head list; 21 struct mlx5_core_dev *dev; 22 struct kref ref; 23 }; 24 25 struct mlx5_devcom_comp { 26 struct list_head comp_list; 27 enum mlx5_devcom_component id; 28 u64 key; 29 struct list_head comp_dev_list_head; 30 mlx5_devcom_event_handler_t handler; 31 struct kref ref; 32 bool ready; 33 struct rw_semaphore sem; 34 struct lock_class_key lock_key; 35 }; 36 37 struct mlx5_devcom_comp_dev { 38 struct list_head list; 39 struct mlx5_devcom_comp *comp; 40 struct mlx5_devcom_dev *devc; 41 void __rcu *data; 42 }; 43 44 static bool devcom_dev_exists(struct mlx5_core_dev *dev) 45 { 46 struct mlx5_devcom_dev *iter; 47 48 list_for_each_entry(iter, &devcom_dev_list, list) 49 if (iter->dev == dev) 50 return true; 51 52 return false; 53 } 54 55 static struct mlx5_devcom_dev * 56 mlx5_devcom_dev_alloc(struct mlx5_core_dev *dev) 57 { 58 struct mlx5_devcom_dev *devc; 59 60 devc = kzalloc(sizeof(*devc), GFP_KERNEL); 61 if (!devc) 62 return NULL; 63 64 devc->dev = dev; 65 kref_init(&devc->ref); 66 return devc; 67 } 68 69 struct mlx5_devcom_dev * 70 mlx5_devcom_register_device(struct mlx5_core_dev *dev) 71 { 72 struct mlx5_devcom_dev *devc; 73 74 mutex_lock(&dev_list_lock); 75 76 if (devcom_dev_exists(dev)) { 77 devc = ERR_PTR(-EEXIST); 78 goto out; 79 } 80 81 devc = mlx5_devcom_dev_alloc(dev); 82 if (!devc) { 83 devc = ERR_PTR(-ENOMEM); 84 goto out; 85 } 86 87 list_add_tail(&devc->list, &devcom_dev_list); 88 out: 89 mutex_unlock(&dev_list_lock); 90 return devc; 91 } 92 93 static void 94 mlx5_devcom_dev_release(struct kref *ref) 95 { 96 struct mlx5_devcom_dev *devc = container_of(ref, struct mlx5_devcom_dev, ref); 97 98 mutex_lock(&dev_list_lock); 99 list_del(&devc->list); 100 mutex_unlock(&dev_list_lock); 101 kfree(devc); 102 } 103 104 void mlx5_devcom_unregister_device(struct mlx5_devcom_dev *devc) 105 { 106 if (!IS_ERR_OR_NULL(devc)) 107 kref_put(&devc->ref, mlx5_devcom_dev_release); 108 } 109 110 static struct mlx5_devcom_comp * 111 mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler) 112 { 113 struct mlx5_devcom_comp *comp; 114 115 comp = kzalloc(sizeof(*comp), GFP_KERNEL); 116 if (!comp) 117 return ERR_PTR(-ENOMEM); 118 119 comp->id = id; 120 comp->key = key; 121 comp->handler = handler; 122 init_rwsem(&comp->sem); 123 lockdep_register_key(&comp->lock_key); 124 lockdep_set_class(&comp->sem, &comp->lock_key); 125 kref_init(&comp->ref); 126 INIT_LIST_HEAD(&comp->comp_dev_list_head); 127 128 return comp; 129 } 130 131 static void 132 mlx5_devcom_comp_release(struct kref *ref) 133 { 134 struct mlx5_devcom_comp *comp = container_of(ref, struct mlx5_devcom_comp, ref); 135 136 mutex_lock(&comp_list_lock); 137 list_del(&comp->comp_list); 138 mutex_unlock(&comp_list_lock); 139 lockdep_unregister_key(&comp->lock_key); 140 kfree(comp); 141 } 142 143 static struct mlx5_devcom_comp_dev * 144 devcom_alloc_comp_dev(struct mlx5_devcom_dev *devc, 145 struct mlx5_devcom_comp *comp, 146 void *data) 147 { 148 struct mlx5_devcom_comp_dev *devcom; 149 150 devcom = kzalloc(sizeof(*devcom), GFP_KERNEL); 151 if (!devcom) 152 return ERR_PTR(-ENOMEM); 153 154 kref_get(&devc->ref); 155 devcom->devc = devc; 156 devcom->comp = comp; 157 rcu_assign_pointer(devcom->data, data); 158 159 down_write(&comp->sem); 160 list_add_tail(&devcom->list, &comp->comp_dev_list_head); 161 up_write(&comp->sem); 162 163 return devcom; 164 } 165 166 static void 167 devcom_free_comp_dev(struct mlx5_devcom_comp_dev *devcom) 168 { 169 struct mlx5_devcom_comp *comp = devcom->comp; 170 171 down_write(&comp->sem); 172 list_del(&devcom->list); 173 up_write(&comp->sem); 174 175 kref_put(&devcom->devc->ref, mlx5_devcom_dev_release); 176 kfree(devcom); 177 kref_put(&comp->ref, mlx5_devcom_comp_release); 178 } 179 180 static bool 181 devcom_component_equal(struct mlx5_devcom_comp *devcom, 182 enum mlx5_devcom_component id, 183 u64 key) 184 { 185 return devcom->id == id && devcom->key == key; 186 } 187 188 static struct mlx5_devcom_comp * 189 devcom_component_get(struct mlx5_devcom_dev *devc, 190 enum mlx5_devcom_component id, 191 u64 key, 192 mlx5_devcom_event_handler_t handler) 193 { 194 struct mlx5_devcom_comp *comp; 195 196 devcom_for_each_component(comp) { 197 if (devcom_component_equal(comp, id, key)) { 198 if (handler == comp->handler) { 199 kref_get(&comp->ref); 200 return comp; 201 } 202 203 mlx5_core_err(devc->dev, 204 "Cannot register existing devcom component with different handler\n"); 205 return ERR_PTR(-EINVAL); 206 } 207 } 208 209 return NULL; 210 } 211 212 struct mlx5_devcom_comp_dev * 213 mlx5_devcom_register_component(struct mlx5_devcom_dev *devc, 214 enum mlx5_devcom_component id, 215 u64 key, 216 mlx5_devcom_event_handler_t handler, 217 void *data) 218 { 219 struct mlx5_devcom_comp_dev *devcom; 220 struct mlx5_devcom_comp *comp; 221 222 if (IS_ERR_OR_NULL(devc)) 223 return NULL; 224 225 mutex_lock(&comp_list_lock); 226 comp = devcom_component_get(devc, id, key, handler); 227 if (IS_ERR(comp)) { 228 devcom = ERR_PTR(-EINVAL); 229 goto out_unlock; 230 } 231 232 if (!comp) { 233 comp = mlx5_devcom_comp_alloc(id, key, handler); 234 if (IS_ERR(comp)) { 235 devcom = ERR_CAST(comp); 236 goto out_unlock; 237 } 238 list_add_tail(&comp->comp_list, &devcom_comp_list); 239 } 240 mutex_unlock(&comp_list_lock); 241 242 devcom = devcom_alloc_comp_dev(devc, comp, data); 243 if (IS_ERR(devcom)) 244 kref_put(&comp->ref, mlx5_devcom_comp_release); 245 246 return devcom; 247 248 out_unlock: 249 mutex_unlock(&comp_list_lock); 250 return devcom; 251 } 252 253 void mlx5_devcom_unregister_component(struct mlx5_devcom_comp_dev *devcom) 254 { 255 if (!IS_ERR_OR_NULL(devcom)) 256 devcom_free_comp_dev(devcom); 257 } 258 259 int mlx5_devcom_send_event(struct mlx5_devcom_comp_dev *devcom, 260 int event, int rollback_event, 261 void *event_data) 262 { 263 struct mlx5_devcom_comp_dev *pos; 264 struct mlx5_devcom_comp *comp; 265 int err = 0; 266 void *data; 267 268 if (IS_ERR_OR_NULL(devcom)) 269 return -ENODEV; 270 271 comp = devcom->comp; 272 down_write(&comp->sem); 273 list_for_each_entry(pos, &comp->comp_dev_list_head, list) { 274 data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem)); 275 276 if (pos != devcom && data) { 277 err = comp->handler(event, data, event_data); 278 if (err) 279 goto rollback; 280 } 281 } 282 283 up_write(&comp->sem); 284 return 0; 285 286 rollback: 287 if (list_entry_is_head(pos, &comp->comp_dev_list_head, list)) 288 goto out; 289 pos = list_prev_entry(pos, list); 290 list_for_each_entry_from_reverse(pos, &comp->comp_dev_list_head, list) { 291 data = rcu_dereference_protected(pos->data, lockdep_is_held(&comp->sem)); 292 293 if (pos != devcom && data) 294 comp->handler(rollback_event, data, event_data); 295 } 296 out: 297 up_write(&comp->sem); 298 return err; 299 } 300 301 void mlx5_devcom_comp_set_ready(struct mlx5_devcom_comp_dev *devcom, bool ready) 302 { 303 WARN_ON(!rwsem_is_locked(&devcom->comp->sem)); 304 305 WRITE_ONCE(devcom->comp->ready, ready); 306 } 307 308 bool mlx5_devcom_comp_is_ready(struct mlx5_devcom_comp_dev *devcom) 309 { 310 if (IS_ERR_OR_NULL(devcom)) 311 return false; 312 313 return READ_ONCE(devcom->comp->ready); 314 } 315 316 bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom_comp_dev *devcom) 317 { 318 struct mlx5_devcom_comp *comp; 319 320 if (IS_ERR_OR_NULL(devcom)) 321 return false; 322 323 comp = devcom->comp; 324 down_read(&comp->sem); 325 if (!READ_ONCE(comp->ready)) { 326 up_read(&comp->sem); 327 return false; 328 } 329 330 return true; 331 } 332 333 void mlx5_devcom_for_each_peer_end(struct mlx5_devcom_comp_dev *devcom) 334 { 335 up_read(&devcom->comp->sem); 336 } 337 338 void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom_comp_dev *devcom, 339 struct mlx5_devcom_comp_dev **pos) 340 { 341 struct mlx5_devcom_comp *comp = devcom->comp; 342 struct mlx5_devcom_comp_dev *tmp; 343 void *data; 344 345 tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list); 346 347 list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) { 348 if (tmp != devcom) { 349 data = rcu_dereference_protected(tmp->data, lockdep_is_held(&comp->sem)); 350 if (data) 351 break; 352 } 353 } 354 355 if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list)) 356 return NULL; 357 358 *pos = tmp; 359 return data; 360 } 361 362 void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom, 363 struct mlx5_devcom_comp_dev **pos) 364 { 365 struct mlx5_devcom_comp *comp = devcom->comp; 366 struct mlx5_devcom_comp_dev *tmp; 367 void *data; 368 369 tmp = list_prepare_entry(*pos, &comp->comp_dev_list_head, list); 370 371 list_for_each_entry_continue(tmp, &comp->comp_dev_list_head, list) { 372 if (tmp != devcom) { 373 /* This can change concurrently, however 'data' pointer will remain 374 * valid for the duration of RCU read section. 375 */ 376 if (!READ_ONCE(comp->ready)) 377 return NULL; 378 data = rcu_dereference(tmp->data); 379 if (data) 380 break; 381 } 382 } 383 384 if (list_entry_is_head(tmp, &comp->comp_dev_list_head, list)) 385 return NULL; 386 387 *pos = tmp; 388 return data; 389 } 390 391 void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom) 392 { 393 if (IS_ERR_OR_NULL(devcom)) 394 return; 395 down_write(&devcom->comp->sem); 396 } 397 398 void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom) 399 { 400 if (IS_ERR_OR_NULL(devcom)) 401 return; 402 up_write(&devcom->comp->sem); 403 } 404 405 int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom) 406 { 407 if (IS_ERR_OR_NULL(devcom)) 408 return 0; 409 return down_write_trylock(&devcom->comp->sem); 410 } 411