xref: /linux/drivers/usb/cdns3/cdnsp-mem.c (revision a460513ed4b6994bfeb7bd86f72853140bc1ac12)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cadence CDNSP DRD Driver.
4  *
5  * Copyright (C) 2020 Cadence.
6  *
7  * Author: Pawel Laszczak <pawell@cadence.com>
8  *
9  * Code based on Linux XHCI driver.
10  * Origin: Copyright (C) 2008 Intel Corp.
11  */
12 
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/slab.h>
16 #include <linux/usb.h>
17 
18 #include "cdnsp-gadget.h"
19 #include "cdnsp-trace.h"
20 
21 static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
22 				   struct cdnsp_ep *pep);
23 /*
24  * Allocates a generic ring segment from the ring pool, sets the dma address,
25  * initializes the segment to zero, and sets the private next pointer to NULL.
26  *
27  * "All components of all Command and Transfer TRBs shall be initialized to '0'"
28  */
29 static struct cdnsp_segment *cdnsp_segment_alloc(struct cdnsp_device *pdev,
30 						 unsigned int cycle_state,
31 						 unsigned int max_packet,
32 						 gfp_t flags)
33 {
34 	struct cdnsp_segment *seg;
35 	dma_addr_t dma;
36 	int i;
37 
38 	seg = kzalloc(sizeof(*seg), flags);
39 	if (!seg)
40 		return NULL;
41 
42 	seg->trbs = dma_pool_zalloc(pdev->segment_pool, flags, &dma);
43 	if (!seg->trbs) {
44 		kfree(seg);
45 		return NULL;
46 	}
47 
48 	if (max_packet) {
49 		seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
50 		if (!seg->bounce_buf)
51 			goto free_dma;
52 	}
53 
54 	/* If the cycle state is 0, set the cycle bit to 1 for all the TRBs. */
55 	if (cycle_state == 0) {
56 		for (i = 0; i < TRBS_PER_SEGMENT; i++)
57 			seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
58 	}
59 	seg->dma = dma;
60 	seg->next = NULL;
61 
62 	return seg;
63 
64 free_dma:
65 	dma_pool_free(pdev->segment_pool, seg->trbs, dma);
66 	kfree(seg);
67 
68 	return NULL;
69 }
70 
71 static void cdnsp_segment_free(struct cdnsp_device *pdev,
72 			       struct cdnsp_segment *seg)
73 {
74 	if (seg->trbs)
75 		dma_pool_free(pdev->segment_pool, seg->trbs, seg->dma);
76 
77 	kfree(seg->bounce_buf);
78 	kfree(seg);
79 }
80 
81 static void cdnsp_free_segments_for_ring(struct cdnsp_device *pdev,
82 					 struct cdnsp_segment *first)
83 {
84 	struct cdnsp_segment *seg;
85 
86 	seg = first->next;
87 
88 	while (seg != first) {
89 		struct cdnsp_segment *next = seg->next;
90 
91 		cdnsp_segment_free(pdev, seg);
92 		seg = next;
93 	}
94 
95 	cdnsp_segment_free(pdev, first);
96 }
97 
98 /*
99  * Make the prev segment point to the next segment.
100  *
101  * Change the last TRB in the prev segment to be a Link TRB which points to the
102  * DMA address of the next segment. The caller needs to set any Link TRB
103  * related flags, such as End TRB, Toggle Cycle, and no snoop.
104  */
105 static void cdnsp_link_segments(struct cdnsp_device *pdev,
106 				struct cdnsp_segment *prev,
107 				struct cdnsp_segment *next,
108 				enum cdnsp_ring_type type)
109 {
110 	struct cdnsp_link_trb *link;
111 	u32 val;
112 
113 	if (!prev || !next)
114 		return;
115 
116 	prev->next = next;
117 	if (type != TYPE_EVENT) {
118 		link = &prev->trbs[TRBS_PER_SEGMENT - 1].link;
119 		link->segment_ptr = cpu_to_le64(next->dma);
120 
121 		/*
122 		 * Set the last TRB in the segment to have a TRB type ID
123 		 * of Link TRB
124 		 */
125 		val = le32_to_cpu(link->control);
126 		val &= ~TRB_TYPE_BITMASK;
127 		val |= TRB_TYPE(TRB_LINK);
128 		link->control = cpu_to_le32(val);
129 	}
130 }
131 
132 /*
133  * Link the ring to the new segments.
134  * Set Toggle Cycle for the new ring if needed.
135  */
136 static void cdnsp_link_rings(struct cdnsp_device *pdev,
137 			     struct cdnsp_ring *ring,
138 			     struct cdnsp_segment *first,
139 			     struct cdnsp_segment *last,
140 			     unsigned int num_segs)
141 {
142 	struct cdnsp_segment *next;
143 
144 	if (!ring || !first || !last)
145 		return;
146 
147 	next = ring->enq_seg->next;
148 	cdnsp_link_segments(pdev, ring->enq_seg, first, ring->type);
149 	cdnsp_link_segments(pdev, last, next, ring->type);
150 	ring->num_segs += num_segs;
151 	ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
152 
153 	if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
154 		ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
155 			~cpu_to_le32(LINK_TOGGLE);
156 		last->trbs[TRBS_PER_SEGMENT - 1].link.control |=
157 			cpu_to_le32(LINK_TOGGLE);
158 		ring->last_seg = last;
159 	}
160 }
161 
162 /*
163  * We need a radix tree for mapping physical addresses of TRBs to which stream
164  * ID they belong to. We need to do this because the device controller won't
165  * tell us which stream ring the TRB came from. We could store the stream ID
166  * in an event data TRB, but that doesn't help us for the cancellation case,
167  * since the endpoint may stop before it reaches that event data TRB.
168  *
169  * The radix tree maps the upper portion of the TRB DMA address to a ring
170  * segment that has the same upper portion of DMA addresses. For example,
171  * say I have segments of size 1KB, that are always 1KB aligned. A segment may
172  * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
173  * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
174  * pass the radix tree a key to get the right stream ID:
175  *
176  *	0x10c90fff >> 10 = 0x43243
177  *	0x10c912c0 >> 10 = 0x43244
178  *	0x10c91400 >> 10 = 0x43245
179  *
180  * Obviously, only those TRBs with DMA addresses that are within the segment
181  * will make the radix tree return the stream ID for that ring.
182  *
183  * Caveats for the radix tree:
184  *
185  * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
186  * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
187  * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
188  * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
189  * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
190  * extended systems (where the DMA address can be bigger than 32-bits),
191  * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
192  */
193 static int cdnsp_insert_segment_mapping(struct radix_tree_root *trb_address_map,
194 					struct cdnsp_ring *ring,
195 					struct cdnsp_segment *seg,
196 					gfp_t mem_flags)
197 {
198 	unsigned long key;
199 	int ret;
200 
201 	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
202 
203 	/* Skip any segments that were already added. */
204 	if (radix_tree_lookup(trb_address_map, key))
205 		return 0;
206 
207 	ret = radix_tree_maybe_preload(mem_flags);
208 	if (ret)
209 		return ret;
210 
211 	ret = radix_tree_insert(trb_address_map, key, ring);
212 	radix_tree_preload_end();
213 
214 	return ret;
215 }
216 
217 static void cdnsp_remove_segment_mapping(struct radix_tree_root *trb_address_map,
218 					 struct cdnsp_segment *seg)
219 {
220 	unsigned long key;
221 
222 	key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
223 	if (radix_tree_lookup(trb_address_map, key))
224 		radix_tree_delete(trb_address_map, key);
225 }
226 
227 static int cdnsp_update_stream_segment_mapping(struct radix_tree_root *trb_address_map,
228 					       struct cdnsp_ring *ring,
229 					       struct cdnsp_segment *first_seg,
230 					       struct cdnsp_segment *last_seg,
231 					       gfp_t mem_flags)
232 {
233 	struct cdnsp_segment *failed_seg;
234 	struct cdnsp_segment *seg;
235 	int ret;
236 
237 	seg = first_seg;
238 	do {
239 		ret = cdnsp_insert_segment_mapping(trb_address_map, ring, seg,
240 						   mem_flags);
241 		if (ret)
242 			goto remove_streams;
243 		if (seg == last_seg)
244 			return 0;
245 		seg = seg->next;
246 	} while (seg != first_seg);
247 
248 	return 0;
249 
250 remove_streams:
251 	failed_seg = seg;
252 	seg = first_seg;
253 	do {
254 		cdnsp_remove_segment_mapping(trb_address_map, seg);
255 		if (seg == failed_seg)
256 			return ret;
257 		seg = seg->next;
258 	} while (seg != first_seg);
259 
260 	return ret;
261 }
262 
263 static void cdnsp_remove_stream_mapping(struct cdnsp_ring *ring)
264 {
265 	struct cdnsp_segment *seg;
266 
267 	seg = ring->first_seg;
268 	do {
269 		cdnsp_remove_segment_mapping(ring->trb_address_map, seg);
270 		seg = seg->next;
271 	} while (seg != ring->first_seg);
272 }
273 
274 static int cdnsp_update_stream_mapping(struct cdnsp_ring *ring)
275 {
276 	return cdnsp_update_stream_segment_mapping(ring->trb_address_map, ring,
277 			ring->first_seg, ring->last_seg, GFP_ATOMIC);
278 }
279 
280 static void cdnsp_ring_free(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
281 {
282 	if (!ring)
283 		return;
284 
285 	trace_cdnsp_ring_free(ring);
286 
287 	if (ring->first_seg) {
288 		if (ring->type == TYPE_STREAM)
289 			cdnsp_remove_stream_mapping(ring);
290 
291 		cdnsp_free_segments_for_ring(pdev, ring->first_seg);
292 	}
293 
294 	kfree(ring);
295 }
296 
297 void cdnsp_initialize_ring_info(struct cdnsp_ring *ring)
298 {
299 	ring->enqueue = ring->first_seg->trbs;
300 	ring->enq_seg = ring->first_seg;
301 	ring->dequeue = ring->enqueue;
302 	ring->deq_seg = ring->first_seg;
303 
304 	/*
305 	 * The ring is initialized to 0. The producer must write 1 to the cycle
306 	 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
307 	 * compare CCS to the cycle bit to check ownership, so CCS = 1.
308 	 *
309 	 * New rings are initialized with cycle state equal to 1; if we are
310 	 * handling ring expansion, set the cycle state equal to the old ring.
311 	 */
312 	ring->cycle_state = 1;
313 
314 	/*
315 	 * Each segment has a link TRB, and leave an extra TRB for SW
316 	 * accounting purpose
317 	 */
318 	ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
319 }
320 
321 /* Allocate segments and link them for a ring. */
322 static int cdnsp_alloc_segments_for_ring(struct cdnsp_device *pdev,
323 					 struct cdnsp_segment **first,
324 					 struct cdnsp_segment **last,
325 					 unsigned int num_segs,
326 					 unsigned int cycle_state,
327 					 enum cdnsp_ring_type type,
328 					 unsigned int max_packet,
329 					 gfp_t flags)
330 {
331 	struct cdnsp_segment *prev;
332 
333 	/* Allocate first segment. */
334 	prev = cdnsp_segment_alloc(pdev, cycle_state, max_packet, flags);
335 	if (!prev)
336 		return -ENOMEM;
337 
338 	num_segs--;
339 	*first = prev;
340 
341 	/* Allocate all other segments. */
342 	while (num_segs > 0) {
343 		struct cdnsp_segment	*next;
344 
345 		next = cdnsp_segment_alloc(pdev, cycle_state,
346 					   max_packet, flags);
347 		if (!next) {
348 			cdnsp_free_segments_for_ring(pdev, *first);
349 			return -ENOMEM;
350 		}
351 
352 		cdnsp_link_segments(pdev, prev, next, type);
353 
354 		prev = next;
355 		num_segs--;
356 	}
357 
358 	cdnsp_link_segments(pdev, prev, *first, type);
359 	*last = prev;
360 
361 	return 0;
362 }
363 
364 /*
365  * Create a new ring with zero or more segments.
366  *
367  * Link each segment together into a ring.
368  * Set the end flag and the cycle toggle bit on the last segment.
369  */
370 static struct cdnsp_ring *cdnsp_ring_alloc(struct cdnsp_device *pdev,
371 					   unsigned int num_segs,
372 					   enum cdnsp_ring_type type,
373 					   unsigned int max_packet,
374 					   gfp_t flags)
375 {
376 	struct cdnsp_ring *ring;
377 	int ret;
378 
379 	ring = kzalloc(sizeof *(ring), flags);
380 	if (!ring)
381 		return NULL;
382 
383 	ring->num_segs = num_segs;
384 	ring->bounce_buf_len = max_packet;
385 	INIT_LIST_HEAD(&ring->td_list);
386 	ring->type = type;
387 
388 	if (num_segs == 0)
389 		return ring;
390 
391 	ret = cdnsp_alloc_segments_for_ring(pdev, &ring->first_seg,
392 					    &ring->last_seg, num_segs,
393 					    1, type, max_packet, flags);
394 	if (ret)
395 		goto fail;
396 
397 	/* Only event ring does not use link TRB. */
398 	if (type != TYPE_EVENT)
399 		ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
400 			cpu_to_le32(LINK_TOGGLE);
401 
402 	cdnsp_initialize_ring_info(ring);
403 	trace_cdnsp_ring_alloc(ring);
404 	return ring;
405 fail:
406 	kfree(ring);
407 	return NULL;
408 }
409 
410 void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
411 {
412 	cdnsp_ring_free(pdev, pep->ring);
413 	pep->ring = NULL;
414 	cdnsp_free_stream_info(pdev, pep);
415 }
416 
417 /*
418  * Expand an existing ring.
419  * Allocate a new ring which has same segment numbers and link the two rings.
420  */
421 int cdnsp_ring_expansion(struct cdnsp_device *pdev,
422 			 struct cdnsp_ring *ring,
423 			 unsigned int num_trbs,
424 			 gfp_t flags)
425 {
426 	unsigned int num_segs_needed;
427 	struct cdnsp_segment *first;
428 	struct cdnsp_segment *last;
429 	unsigned int num_segs;
430 	int ret;
431 
432 	num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
433 			(TRBS_PER_SEGMENT - 1);
434 
435 	/* Allocate number of segments we needed, or double the ring size. */
436 	num_segs = max(ring->num_segs, num_segs_needed);
437 
438 	ret = cdnsp_alloc_segments_for_ring(pdev, &first, &last, num_segs,
439 					    ring->cycle_state, ring->type,
440 					    ring->bounce_buf_len, flags);
441 	if (ret)
442 		return -ENOMEM;
443 
444 	if (ring->type == TYPE_STREAM)
445 		ret = cdnsp_update_stream_segment_mapping(ring->trb_address_map,
446 							  ring, first,
447 							  last, flags);
448 
449 	if (ret) {
450 		cdnsp_free_segments_for_ring(pdev, first);
451 
452 		return ret;
453 	}
454 
455 	cdnsp_link_rings(pdev, ring, first, last, num_segs);
456 	trace_cdnsp_ring_expansion(ring);
457 
458 	return 0;
459 }
460 
461 static int cdnsp_init_device_ctx(struct cdnsp_device *pdev)
462 {
463 	int size = HCC_64BYTE_CONTEXT(pdev->hcc_params) ? 2048 : 1024;
464 
465 	pdev->out_ctx.type = CDNSP_CTX_TYPE_DEVICE;
466 	pdev->out_ctx.size = size;
467 	pdev->out_ctx.ctx_size = CTX_SIZE(pdev->hcc_params);
468 	pdev->out_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
469 					      &pdev->out_ctx.dma);
470 
471 	if (!pdev->out_ctx.bytes)
472 		return -ENOMEM;
473 
474 	pdev->in_ctx.type = CDNSP_CTX_TYPE_INPUT;
475 	pdev->in_ctx.ctx_size = pdev->out_ctx.ctx_size;
476 	pdev->in_ctx.size = size + pdev->out_ctx.ctx_size;
477 	pdev->in_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
478 					     &pdev->in_ctx.dma);
479 
480 	if (!pdev->in_ctx.bytes) {
481 		dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
482 			      pdev->out_ctx.dma);
483 		return -ENOMEM;
484 	}
485 
486 	return 0;
487 }
488 
489 struct cdnsp_input_control_ctx
490 	*cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx)
491 {
492 	if (ctx->type != CDNSP_CTX_TYPE_INPUT)
493 		return NULL;
494 
495 	return (struct cdnsp_input_control_ctx *)ctx->bytes;
496 }
497 
498 struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx)
499 {
500 	if (ctx->type == CDNSP_CTX_TYPE_DEVICE)
501 		return (struct cdnsp_slot_ctx *)ctx->bytes;
502 
503 	return (struct cdnsp_slot_ctx *)(ctx->bytes + ctx->ctx_size);
504 }
505 
506 struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx,
507 				      unsigned int ep_index)
508 {
509 	/* Increment ep index by offset of start of ep ctx array. */
510 	ep_index++;
511 	if (ctx->type == CDNSP_CTX_TYPE_INPUT)
512 		ep_index++;
513 
514 	return (struct cdnsp_ep_ctx *)(ctx->bytes + (ep_index * ctx->ctx_size));
515 }
516 
517 static void cdnsp_free_stream_ctx(struct cdnsp_device *pdev,
518 				  struct cdnsp_ep *pep)
519 {
520 	dma_pool_free(pdev->device_pool, pep->stream_info.stream_ctx_array,
521 		      pep->stream_info.ctx_array_dma);
522 }
523 
524 /* The stream context array must be a power of 2. */
525 static struct cdnsp_stream_ctx
526 	*cdnsp_alloc_stream_ctx(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
527 {
528 	size_t size = sizeof(struct cdnsp_stream_ctx) *
529 		      pep->stream_info.num_stream_ctxs;
530 
531 	if (size > CDNSP_CTX_SIZE)
532 		return NULL;
533 
534 	/**
535 	 * Driver uses intentionally the device_pool to allocated stream
536 	 * context array. Device Pool has 2048 bytes of size what gives us
537 	 * 128 entries.
538 	 */
539 	return dma_pool_zalloc(pdev->device_pool, GFP_DMA32 | GFP_ATOMIC,
540 			       &pep->stream_info.ctx_array_dma);
541 }
542 
543 struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *pep, u64 address)
544 {
545 	if (pep->ep_state & EP_HAS_STREAMS)
546 		return radix_tree_lookup(&pep->stream_info.trb_address_map,
547 					 address >> TRB_SEGMENT_SHIFT);
548 
549 	return pep->ring;
550 }
551 
552 /*
553  * Change an endpoint's internal structure so it supports stream IDs.
554  * The number of requested streams includes stream 0, which cannot be used by
555  * driver.
556  *
557  * The number of stream contexts in the stream context array may be bigger than
558  * the number of streams the driver wants to use. This is because the number of
559  * stream context array entries must be a power of two.
560  */
561 int cdnsp_alloc_stream_info(struct cdnsp_device *pdev,
562 			    struct cdnsp_ep *pep,
563 			    unsigned int num_stream_ctxs,
564 			    unsigned int num_streams)
565 {
566 	struct cdnsp_stream_info *stream_info;
567 	struct cdnsp_ring *cur_ring;
568 	u32 cur_stream;
569 	u64 addr;
570 	int ret;
571 	int mps;
572 
573 	stream_info = &pep->stream_info;
574 	stream_info->num_streams = num_streams;
575 	stream_info->num_stream_ctxs = num_stream_ctxs;
576 
577 	/* Initialize the array of virtual pointers to stream rings. */
578 	stream_info->stream_rings = kcalloc(num_streams,
579 					    sizeof(struct cdnsp_ring *),
580 					    GFP_ATOMIC);
581 	if (!stream_info->stream_rings)
582 		return -ENOMEM;
583 
584 	/* Initialize the array of DMA addresses for stream rings for the HW. */
585 	stream_info->stream_ctx_array = cdnsp_alloc_stream_ctx(pdev, pep);
586 	if (!stream_info->stream_ctx_array)
587 		goto cleanup_stream_rings;
588 
589 	memset(stream_info->stream_ctx_array, 0,
590 	       sizeof(struct cdnsp_stream_ctx) * num_stream_ctxs);
591 	INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
592 	mps = usb_endpoint_maxp(pep->endpoint.desc);
593 
594 	/*
595 	 * Allocate rings for all the streams that the driver will use,
596 	 * and add their segment DMA addresses to the radix tree.
597 	 * Stream 0 is reserved.
598 	 */
599 	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
600 		cur_ring = cdnsp_ring_alloc(pdev, 2, TYPE_STREAM, mps,
601 					    GFP_ATOMIC);
602 		stream_info->stream_rings[cur_stream] = cur_ring;
603 
604 		if (!cur_ring)
605 			goto cleanup_rings;
606 
607 		cur_ring->stream_id = cur_stream;
608 		cur_ring->trb_address_map = &stream_info->trb_address_map;
609 
610 		/* Set deq ptr, cycle bit, and stream context type. */
611 		addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) |
612 		       cur_ring->cycle_state;
613 
614 		stream_info->stream_ctx_array[cur_stream].stream_ring =
615 			cpu_to_le64(addr);
616 
617 		trace_cdnsp_set_stream_ring(cur_ring);
618 
619 		ret = cdnsp_update_stream_mapping(cur_ring);
620 		if (ret)
621 			goto cleanup_rings;
622 	}
623 
624 	return 0;
625 
626 cleanup_rings:
627 	for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
628 		cur_ring = stream_info->stream_rings[cur_stream];
629 		if (cur_ring) {
630 			cdnsp_ring_free(pdev, cur_ring);
631 			stream_info->stream_rings[cur_stream] = NULL;
632 		}
633 	}
634 
635 cleanup_stream_rings:
636 	kfree(pep->stream_info.stream_rings);
637 
638 	return -ENOMEM;
639 }
640 
641 /* Frees all stream contexts associated with the endpoint. */
642 static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
643 				   struct cdnsp_ep *pep)
644 {
645 	struct cdnsp_stream_info *stream_info = &pep->stream_info;
646 	struct cdnsp_ring *cur_ring;
647 	int cur_stream;
648 
649 	if (!(pep->ep_state & EP_HAS_STREAMS))
650 		return;
651 
652 	for (cur_stream = 1; cur_stream < stream_info->num_streams;
653 	     cur_stream++) {
654 		cur_ring = stream_info->stream_rings[cur_stream];
655 		if (cur_ring) {
656 			cdnsp_ring_free(pdev, cur_ring);
657 			stream_info->stream_rings[cur_stream] = NULL;
658 		}
659 	}
660 
661 	if (stream_info->stream_ctx_array)
662 		cdnsp_free_stream_ctx(pdev, pep);
663 
664 	kfree(stream_info->stream_rings);
665 	pep->ep_state &= ~EP_HAS_STREAMS;
666 }
667 
668 /* All the cdnsp_tds in the ring's TD list should be freed at this point.*/
669 static void cdnsp_free_priv_device(struct cdnsp_device *pdev)
670 {
671 	pdev->dcbaa->dev_context_ptrs[1] = 0;
672 
673 	cdnsp_free_endpoint_rings(pdev, &pdev->eps[0]);
674 
675 	if (pdev->in_ctx.bytes)
676 		dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
677 			      pdev->in_ctx.dma);
678 
679 	if (pdev->out_ctx.bytes)
680 		dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
681 			      pdev->out_ctx.dma);
682 
683 	pdev->in_ctx.bytes = NULL;
684 	pdev->out_ctx.bytes = NULL;
685 }
686 
687 static int cdnsp_alloc_priv_device(struct cdnsp_device *pdev)
688 {
689 	int ret = -ENOMEM;
690 
691 	ret = cdnsp_init_device_ctx(pdev);
692 	if (ret)
693 		return ret;
694 
695 	/* Allocate endpoint 0 ring. */
696 	pdev->eps[0].ring = cdnsp_ring_alloc(pdev, 2, TYPE_CTRL, 0, GFP_ATOMIC);
697 	if (!pdev->eps[0].ring)
698 		goto fail;
699 
700 	/* Point to output device context in dcbaa. */
701 	pdev->dcbaa->dev_context_ptrs[1] = cpu_to_le64(pdev->out_ctx.dma);
702 	pdev->cmd.in_ctx = &pdev->in_ctx;
703 
704 	trace_cdnsp_alloc_priv_device(pdev);
705 	return 0;
706 fail:
707 	dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
708 		      pdev->out_ctx.dma);
709 	dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
710 		      pdev->in_ctx.dma);
711 
712 	return ret;
713 }
714 
715 void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev)
716 {
717 	struct cdnsp_ep_ctx *ep0_ctx = pdev->eps[0].in_ctx;
718 	struct cdnsp_ring *ep_ring = pdev->eps[0].ring;
719 	dma_addr_t dma;
720 
721 	dma = cdnsp_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
722 	ep0_ctx->deq = cpu_to_le64(dma | ep_ring->cycle_state);
723 }
724 
725 /* Setup an controller private device for a Set Address command. */
726 int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev)
727 {
728 	struct cdnsp_slot_ctx *slot_ctx;
729 	struct cdnsp_ep_ctx *ep0_ctx;
730 	u32 max_packets, port;
731 
732 	ep0_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, 0);
733 	slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
734 
735 	/* Only the control endpoint is valid - one endpoint context. */
736 	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
737 
738 	switch (pdev->gadget.speed) {
739 	case USB_SPEED_SUPER_PLUS:
740 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
741 		max_packets = MAX_PACKET(512);
742 		break;
743 	case USB_SPEED_SUPER:
744 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
745 		max_packets = MAX_PACKET(512);
746 		break;
747 	case USB_SPEED_HIGH:
748 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
749 		max_packets = MAX_PACKET(64);
750 		break;
751 	case USB_SPEED_FULL:
752 		slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
753 		max_packets = MAX_PACKET(64);
754 		break;
755 	default:
756 		/* Speed was not set , this shouldn't happen. */
757 		return -EINVAL;
758 	}
759 
760 	port = DEV_PORT(pdev->active_port->port_num);
761 	slot_ctx->dev_port |= cpu_to_le32(port);
762 	slot_ctx->dev_state = cpu_to_le32((pdev->device_address &
763 					   DEV_ADDR_MASK));
764 	ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(0x8));
765 	ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
766 	ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
767 					 max_packets);
768 
769 	ep0_ctx->deq = cpu_to_le64(pdev->eps[0].ring->first_seg->dma |
770 				   pdev->eps[0].ring->cycle_state);
771 
772 	trace_cdnsp_setup_addressable_priv_device(pdev);
773 
774 	return 0;
775 }
776 
777 /*
778  * Convert interval expressed as 2^(bInterval - 1) == interval into
779  * straight exponent value 2^n == interval.
780  */
781 static unsigned int cdnsp_parse_exponent_interval(struct usb_gadget *g,
782 						  struct cdnsp_ep *pep)
783 {
784 	unsigned int interval;
785 
786 	interval = clamp_val(pep->endpoint.desc->bInterval, 1, 16) - 1;
787 	if (interval != pep->endpoint.desc->bInterval - 1)
788 		dev_warn(&g->dev, "ep %s - rounding interval to %d %sframes\n",
789 			 pep->name, 1 << interval,
790 			 g->speed == USB_SPEED_FULL ? "" : "micro");
791 
792 	/*
793 	 * Full speed isoc endpoints specify interval in frames,
794 	 * not microframes. We are using microframes everywhere,
795 	 * so adjust accordingly.
796 	 */
797 	if (g->speed == USB_SPEED_FULL)
798 		interval += 3;	/* 1 frame = 2^3 uframes */
799 
800 	/* Controller handles only up to 512ms (2^12). */
801 	if (interval > 12)
802 		interval = 12;
803 
804 	return interval;
805 }
806 
807 /*
808  * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
809  * microframes, rounded down to nearest power of 2.
810  */
811 static unsigned int cdnsp_microframes_to_exponent(struct usb_gadget *g,
812 						  struct cdnsp_ep *pep,
813 						  unsigned int desc_interval,
814 						  unsigned int min_exponent,
815 						  unsigned int max_exponent)
816 {
817 	unsigned int interval;
818 
819 	interval = fls(desc_interval) - 1;
820 	return clamp_val(interval, min_exponent, max_exponent);
821 }
822 
823 /*
824  * Return the polling interval.
825  *
826  * The polling interval is expressed in "microframes". If controllers's Interval
827  * field is set to N, it will service the endpoint every 2^(Interval)*125us.
828  */
829 static unsigned int cdnsp_get_endpoint_interval(struct usb_gadget *g,
830 						struct cdnsp_ep *pep)
831 {
832 	unsigned int interval = 0;
833 
834 	switch (g->speed) {
835 	case USB_SPEED_HIGH:
836 	case USB_SPEED_SUPER_PLUS:
837 	case USB_SPEED_SUPER:
838 		if (usb_endpoint_xfer_int(pep->endpoint.desc) ||
839 		    usb_endpoint_xfer_isoc(pep->endpoint.desc))
840 			interval = cdnsp_parse_exponent_interval(g, pep);
841 		break;
842 	case USB_SPEED_FULL:
843 		if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
844 			interval = cdnsp_parse_exponent_interval(g, pep);
845 		} else if (usb_endpoint_xfer_int(pep->endpoint.desc)) {
846 			interval = pep->endpoint.desc->bInterval << 3;
847 			interval = cdnsp_microframes_to_exponent(g, pep,
848 								 interval,
849 								 3, 10);
850 		}
851 
852 		break;
853 	default:
854 		WARN_ON(1);
855 	}
856 
857 	return interval;
858 }
859 
860 /*
861  * The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
862  * High speed endpoint descriptors can define "the number of additional
863  * transaction opportunities per microframe", but that goes in the Max Burst
864  * endpoint context field.
865  */
866 static u32 cdnsp_get_endpoint_mult(struct usb_gadget *g, struct cdnsp_ep *pep)
867 {
868 	if (g->speed < USB_SPEED_SUPER ||
869 	    !usb_endpoint_xfer_isoc(pep->endpoint.desc))
870 		return 0;
871 
872 	return pep->endpoint.comp_desc->bmAttributes;
873 }
874 
875 static u32 cdnsp_get_endpoint_max_burst(struct usb_gadget *g,
876 					struct cdnsp_ep *pep)
877 {
878 	/* Super speed and Plus have max burst in ep companion desc */
879 	if (g->speed >= USB_SPEED_SUPER)
880 		return pep->endpoint.comp_desc->bMaxBurst;
881 
882 	if (g->speed == USB_SPEED_HIGH &&
883 	    (usb_endpoint_xfer_isoc(pep->endpoint.desc) ||
884 	     usb_endpoint_xfer_int(pep->endpoint.desc)))
885 		return (usb_endpoint_maxp(pep->endpoint.desc) & 0x1800) >> 11;
886 
887 	return 0;
888 }
889 
890 static u32 cdnsp_get_endpoint_type(const struct usb_endpoint_descriptor *desc)
891 {
892 	int in;
893 
894 	in = usb_endpoint_dir_in(desc);
895 
896 	switch (usb_endpoint_type(desc)) {
897 	case USB_ENDPOINT_XFER_CONTROL:
898 		return CTRL_EP;
899 	case USB_ENDPOINT_XFER_BULK:
900 		return in ? BULK_IN_EP : BULK_OUT_EP;
901 	case USB_ENDPOINT_XFER_ISOC:
902 		return in ? ISOC_IN_EP : ISOC_OUT_EP;
903 	case USB_ENDPOINT_XFER_INT:
904 		return in ? INT_IN_EP : INT_OUT_EP;
905 	}
906 
907 	return 0;
908 }
909 
910 /*
911  * Return the maximum endpoint service interval time (ESIT) payload.
912  * Basically, this is the maxpacket size, multiplied by the burst size
913  * and mult size.
914  */
915 static u32 cdnsp_get_max_esit_payload(struct usb_gadget *g,
916 				      struct cdnsp_ep *pep)
917 {
918 	int max_packet;
919 	int max_burst;
920 
921 	/* Only applies for interrupt or isochronous endpoints*/
922 	if (usb_endpoint_xfer_control(pep->endpoint.desc) ||
923 	    usb_endpoint_xfer_bulk(pep->endpoint.desc))
924 		return 0;
925 
926 	/* SuperSpeedPlus Isoc ep sending over 48k per EIST. */
927 	if (g->speed >= USB_SPEED_SUPER_PLUS &&
928 	    USB_SS_SSP_ISOC_COMP(pep->endpoint.desc->bmAttributes))
929 		return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
930 	/* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
931 	else if (g->speed >= USB_SPEED_SUPER)
932 		return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
933 
934 	max_packet = usb_endpoint_maxp(pep->endpoint.desc);
935 	max_burst = usb_endpoint_maxp_mult(pep->endpoint.desc);
936 
937 	/* A 0 in max burst means 1 transfer per ESIT */
938 	return max_packet * max_burst;
939 }
940 
941 int cdnsp_endpoint_init(struct cdnsp_device *pdev,
942 			struct cdnsp_ep *pep,
943 			gfp_t mem_flags)
944 {
945 	enum cdnsp_ring_type ring_type;
946 	struct cdnsp_ep_ctx *ep_ctx;
947 	unsigned int err_count = 0;
948 	unsigned int avg_trb_len;
949 	unsigned int max_packet;
950 	unsigned int max_burst;
951 	unsigned int interval;
952 	u32 max_esit_payload;
953 	unsigned int mult;
954 	u32 endpoint_type;
955 	int ret;
956 
957 	ep_ctx = pep->in_ctx;
958 
959 	endpoint_type = cdnsp_get_endpoint_type(pep->endpoint.desc);
960 	if (!endpoint_type)
961 		return -EINVAL;
962 
963 	ring_type = usb_endpoint_type(pep->endpoint.desc);
964 
965 	/*
966 	 * Get values to fill the endpoint context, mostly from ep descriptor.
967 	 * The average TRB buffer length for bulk endpoints is unclear as we
968 	 * have no clue on scatter gather list entry size. For Isoc and Int,
969 	 * set it to max available.
970 	 */
971 	max_esit_payload = cdnsp_get_max_esit_payload(&pdev->gadget, pep);
972 	interval = cdnsp_get_endpoint_interval(&pdev->gadget, pep);
973 	mult = cdnsp_get_endpoint_mult(&pdev->gadget, pep);
974 	max_packet = usb_endpoint_maxp(pep->endpoint.desc);
975 	max_burst = cdnsp_get_endpoint_max_burst(&pdev->gadget, pep);
976 	avg_trb_len = max_esit_payload;
977 
978 	/* Allow 3 retries for everything but isoc, set CErr = 3. */
979 	if (!usb_endpoint_xfer_isoc(pep->endpoint.desc))
980 		err_count = 3;
981 	if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
982 	    pdev->gadget.speed == USB_SPEED_HIGH)
983 		max_packet = 512;
984 	/* Controller spec indicates that ctrl ep avg TRB Length should be 8. */
985 	if (usb_endpoint_xfer_control(pep->endpoint.desc))
986 		avg_trb_len = 8;
987 
988 	/* Set up the endpoint ring. */
989 	pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags);
990 	pep->skip = false;
991 
992 	/* Fill the endpoint context */
993 	ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
994 				EP_INTERVAL(interval) | EP_MULT(mult));
995 	ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
996 				MAX_PACKET(max_packet) | MAX_BURST(max_burst) |
997 				ERROR_COUNT(err_count));
998 	ep_ctx->deq = cpu_to_le64(pep->ring->first_seg->dma |
999 				  pep->ring->cycle_state);
1000 
1001 	ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
1002 				EP_AVG_TRB_LENGTH(avg_trb_len));
1003 
1004 	if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
1005 	    pdev->gadget.speed > USB_SPEED_HIGH) {
1006 		ret = cdnsp_alloc_streams(pdev, pep);
1007 		if (ret < 0)
1008 			return ret;
1009 	}
1010 
1011 	return 0;
1012 }
1013 
1014 void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
1015 {
1016 	pep->in_ctx->ep_info = 0;
1017 	pep->in_ctx->ep_info2 = 0;
1018 	pep->in_ctx->deq = 0;
1019 	pep->in_ctx->tx_info = 0;
1020 }
1021 
1022 static int cdnsp_alloc_erst(struct cdnsp_device *pdev,
1023 			    struct cdnsp_ring *evt_ring,
1024 			    struct cdnsp_erst *erst)
1025 {
1026 	struct cdnsp_erst_entry *entry;
1027 	struct cdnsp_segment *seg;
1028 	unsigned int val;
1029 	size_t size;
1030 
1031 	size = sizeof(struct cdnsp_erst_entry) * evt_ring->num_segs;
1032 	erst->entries = dma_alloc_coherent(pdev->dev, size,
1033 					   &erst->erst_dma_addr, GFP_KERNEL);
1034 	if (!erst->entries)
1035 		return -ENOMEM;
1036 
1037 	erst->num_entries = evt_ring->num_segs;
1038 
1039 	seg = evt_ring->first_seg;
1040 	for (val = 0; val < evt_ring->num_segs; val++) {
1041 		entry = &erst->entries[val];
1042 		entry->seg_addr = cpu_to_le64(seg->dma);
1043 		entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
1044 		entry->rsvd = 0;
1045 		seg = seg->next;
1046 	}
1047 
1048 	return 0;
1049 }
1050 
1051 static void cdnsp_free_erst(struct cdnsp_device *pdev, struct cdnsp_erst *erst)
1052 {
1053 	size_t size = sizeof(struct cdnsp_erst_entry) * (erst->num_entries);
1054 	struct device *dev = pdev->dev;
1055 
1056 	if (erst->entries)
1057 		dma_free_coherent(dev, size, erst->entries,
1058 				  erst->erst_dma_addr);
1059 
1060 	erst->entries = NULL;
1061 }
1062 
1063 void cdnsp_mem_cleanup(struct cdnsp_device *pdev)
1064 {
1065 	struct device *dev = pdev->dev;
1066 
1067 	cdnsp_free_priv_device(pdev);
1068 	cdnsp_free_erst(pdev, &pdev->erst);
1069 
1070 	if (pdev->event_ring)
1071 		cdnsp_ring_free(pdev, pdev->event_ring);
1072 
1073 	pdev->event_ring = NULL;
1074 
1075 	if (pdev->cmd_ring)
1076 		cdnsp_ring_free(pdev, pdev->cmd_ring);
1077 
1078 	pdev->cmd_ring = NULL;
1079 
1080 	dma_pool_destroy(pdev->segment_pool);
1081 	pdev->segment_pool = NULL;
1082 	dma_pool_destroy(pdev->device_pool);
1083 	pdev->device_pool = NULL;
1084 
1085 	if (pdev->dcbaa)
1086 		dma_free_coherent(dev, sizeof(*pdev->dcbaa),
1087 				  pdev->dcbaa, pdev->dcbaa->dma);
1088 
1089 	pdev->dcbaa = NULL;
1090 
1091 	pdev->usb2_port.exist = 0;
1092 	pdev->usb3_port.exist = 0;
1093 	pdev->usb2_port.port_num = 0;
1094 	pdev->usb3_port.port_num = 0;
1095 	pdev->active_port = NULL;
1096 }
1097 
1098 static void cdnsp_set_event_deq(struct cdnsp_device *pdev)
1099 {
1100 	dma_addr_t deq;
1101 	u64 temp;
1102 
1103 	deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
1104 				    pdev->event_ring->dequeue);
1105 
1106 	/* Update controller event ring dequeue pointer */
1107 	temp = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
1108 	temp &= ERST_PTR_MASK;
1109 
1110 	/*
1111 	 * Don't clear the EHB bit (which is RW1C) because
1112 	 * there might be more events to service.
1113 	 */
1114 	temp &= ~ERST_EHB;
1115 
1116 	cdnsp_write_64(((u64)deq & (u64)~ERST_PTR_MASK) | temp,
1117 		       &pdev->ir_set->erst_dequeue);
1118 }
1119 
1120 static void cdnsp_add_in_port(struct cdnsp_device *pdev,
1121 			      struct cdnsp_port *port,
1122 			      __le32 __iomem *addr)
1123 {
1124 	u32 temp, port_offset, port_count;
1125 
1126 	temp = readl(addr);
1127 	port->maj_rev = CDNSP_EXT_PORT_MAJOR(temp);
1128 	port->min_rev = CDNSP_EXT_PORT_MINOR(temp);
1129 
1130 	/* Port offset and count in the third dword.*/
1131 	temp = readl(addr + 2);
1132 	port_offset = CDNSP_EXT_PORT_OFF(temp);
1133 	port_count = CDNSP_EXT_PORT_COUNT(temp);
1134 
1135 	trace_cdnsp_port_info(addr, port_offset, port_count, port->maj_rev);
1136 
1137 	port->port_num = port_offset;
1138 	port->exist = 1;
1139 }
1140 
1141 /*
1142  * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
1143  * specify what speeds each port is supposed to be.
1144  */
1145 static int cdnsp_setup_port_arrays(struct cdnsp_device *pdev)
1146 {
1147 	void __iomem *base;
1148 	u32 offset;
1149 	int i;
1150 
1151 	base = &pdev->cap_regs->hc_capbase;
1152 	offset = cdnsp_find_next_ext_cap(base, 0,
1153 					 EXT_CAP_CFG_DEV_20PORT_CAP_ID);
1154 	pdev->port20_regs = base + offset;
1155 
1156 	offset = cdnsp_find_next_ext_cap(base, 0, D_XEC_CFG_3XPORT_CAP);
1157 	pdev->port3x_regs =  base + offset;
1158 
1159 	offset = 0;
1160 	base = &pdev->cap_regs->hc_capbase;
1161 
1162 	/* Driver expects max 2 extended protocol capability. */
1163 	for (i = 0; i < 2; i++) {
1164 		u32 temp;
1165 
1166 		offset = cdnsp_find_next_ext_cap(base, offset,
1167 						 EXT_CAPS_PROTOCOL);
1168 		temp = readl(base + offset);
1169 
1170 		if (CDNSP_EXT_PORT_MAJOR(temp) == 0x03 &&
1171 		    !pdev->usb3_port.port_num)
1172 			cdnsp_add_in_port(pdev, &pdev->usb3_port,
1173 					  base + offset);
1174 
1175 		if (CDNSP_EXT_PORT_MAJOR(temp) == 0x02 &&
1176 		    !pdev->usb2_port.port_num)
1177 			cdnsp_add_in_port(pdev, &pdev->usb2_port,
1178 					  base + offset);
1179 	}
1180 
1181 	if (!pdev->usb2_port.exist || !pdev->usb3_port.exist) {
1182 		dev_err(pdev->dev, "Error: Only one port detected\n");
1183 		return -ENODEV;
1184 	}
1185 
1186 	trace_cdnsp_init("Found USB 2.0 ports and  USB 3.0 ports.");
1187 
1188 	pdev->usb2_port.regs = (struct cdnsp_port_regs __iomem *)
1189 			       (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
1190 				(pdev->usb2_port.port_num - 1));
1191 
1192 	pdev->usb3_port.regs = (struct cdnsp_port_regs __iomem *)
1193 			       (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
1194 				(pdev->usb3_port.port_num - 1));
1195 
1196 	return 0;
1197 }
1198 
1199 /*
1200  * Initialize memory for CDNSP (one-time init).
1201  *
1202  * Program the PAGESIZE register, initialize the device context array, create
1203  * device contexts, set up a command ring segment, create event
1204  * ring (one for now).
1205  */
1206 int cdnsp_mem_init(struct cdnsp_device *pdev)
1207 {
1208 	struct device *dev = pdev->dev;
1209 	int ret = -ENOMEM;
1210 	unsigned int val;
1211 	dma_addr_t dma;
1212 	u32 page_size;
1213 	u64 val_64;
1214 
1215 	/*
1216 	 * Use 4K pages, since that's common and the minimum the
1217 	 * controller supports
1218 	 */
1219 	page_size = 1 << 12;
1220 
1221 	val = readl(&pdev->op_regs->config_reg);
1222 	val |= ((val & ~MAX_DEVS) | CDNSP_DEV_MAX_SLOTS) | CONFIG_U3E;
1223 	writel(val, &pdev->op_regs->config_reg);
1224 
1225 	/*
1226 	 * Doorbell array must be physically contiguous
1227 	 * and 64-byte (cache line) aligned.
1228 	 */
1229 	pdev->dcbaa = dma_alloc_coherent(dev, sizeof(*pdev->dcbaa),
1230 					 &dma, GFP_KERNEL);
1231 	if (!pdev->dcbaa)
1232 		return -ENOMEM;
1233 
1234 	memset(pdev->dcbaa, 0, sizeof(*pdev->dcbaa));
1235 	pdev->dcbaa->dma = dma;
1236 
1237 	cdnsp_write_64(dma, &pdev->op_regs->dcbaa_ptr);
1238 
1239 	/*
1240 	 * Initialize the ring segment pool.  The ring must be a contiguous
1241 	 * structure comprised of TRBs. The TRBs must be 16 byte aligned,
1242 	 * however, the command ring segment needs 64-byte aligned segments
1243 	 * and our use of dma addresses in the trb_address_map radix tree needs
1244 	 * TRB_SEGMENT_SIZE alignment, so driver pick the greater alignment
1245 	 * need.
1246 	 */
1247 	pdev->segment_pool = dma_pool_create("CDNSP ring segments", dev,
1248 					     TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE,
1249 					     page_size);
1250 	if (!pdev->segment_pool)
1251 		goto release_dcbaa;
1252 
1253 	pdev->device_pool = dma_pool_create("CDNSP input/output contexts", dev,
1254 					    CDNSP_CTX_SIZE, 64, page_size);
1255 	if (!pdev->device_pool)
1256 		goto destroy_segment_pool;
1257 
1258 
1259 	/* Set up the command ring to have one segments for now. */
1260 	pdev->cmd_ring = cdnsp_ring_alloc(pdev, 1, TYPE_COMMAND, 0, GFP_KERNEL);
1261 	if (!pdev->cmd_ring)
1262 		goto destroy_device_pool;
1263 
1264 	/* Set the address in the Command Ring Control register */
1265 	val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
1266 	val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
1267 		 (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
1268 		 pdev->cmd_ring->cycle_state;
1269 	cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
1270 
1271 	val = readl(&pdev->cap_regs->db_off);
1272 	val &= DBOFF_MASK;
1273 	pdev->dba = (void __iomem *)pdev->cap_regs + val;
1274 
1275 	/* Set ir_set to interrupt register set 0 */
1276 	pdev->ir_set = &pdev->run_regs->ir_set[0];
1277 
1278 	/*
1279 	 * Event ring setup: Allocate a normal ring, but also setup
1280 	 * the event ring segment table (ERST).
1281 	 */
1282 	pdev->event_ring = cdnsp_ring_alloc(pdev, ERST_NUM_SEGS, TYPE_EVENT,
1283 					    0, GFP_KERNEL);
1284 	if (!pdev->event_ring)
1285 		goto free_cmd_ring;
1286 
1287 	ret = cdnsp_alloc_erst(pdev, pdev->event_ring, &pdev->erst);
1288 	if (ret)
1289 		goto free_event_ring;
1290 
1291 	/* Set ERST count with the number of entries in the segment table. */
1292 	val = readl(&pdev->ir_set->erst_size);
1293 	val &= ERST_SIZE_MASK;
1294 	val |= ERST_NUM_SEGS;
1295 	writel(val, &pdev->ir_set->erst_size);
1296 
1297 	/* Set the segment table base address. */
1298 	val_64 = cdnsp_read_64(&pdev->ir_set->erst_base);
1299 	val_64 &= ERST_PTR_MASK;
1300 	val_64 |= (pdev->erst.erst_dma_addr & (u64)~ERST_PTR_MASK);
1301 	cdnsp_write_64(val_64, &pdev->ir_set->erst_base);
1302 
1303 	/* Set the event ring dequeue address. */
1304 	cdnsp_set_event_deq(pdev);
1305 
1306 	ret = cdnsp_setup_port_arrays(pdev);
1307 	if (ret)
1308 		goto free_erst;
1309 
1310 	ret = cdnsp_alloc_priv_device(pdev);
1311 	if (ret) {
1312 		dev_err(pdev->dev,
1313 			"Could not allocate cdnsp_device data structures\n");
1314 		goto free_erst;
1315 	}
1316 
1317 	return 0;
1318 
1319 free_erst:
1320 	cdnsp_free_erst(pdev, &pdev->erst);
1321 free_event_ring:
1322 	cdnsp_ring_free(pdev, pdev->event_ring);
1323 free_cmd_ring:
1324 	cdnsp_ring_free(pdev, pdev->cmd_ring);
1325 destroy_device_pool:
1326 	dma_pool_destroy(pdev->device_pool);
1327 destroy_segment_pool:
1328 	dma_pool_destroy(pdev->segment_pool);
1329 release_dcbaa:
1330 	dma_free_coherent(dev, sizeof(*pdev->dcbaa), pdev->dcbaa,
1331 			  pdev->dcbaa->dma);
1332 
1333 	cdnsp_reset(pdev);
1334 
1335 	return ret;
1336 }
1337