Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / usb / dwc2 / hcd_ddma.c
1 /*
2  * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
3  *
4  * Copyright (C) 2004-2013 Synopsys, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions, and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The names of the above-listed copyright holders may not be used
16  *    to endorse or promote products derived from this software without
17  *    specific prior written permission.
18  *
19  * ALTERNATIVELY, this software may be distributed under the terms of the
20  * GNU General Public License ("GPL") as published by the Free Software
21  * Foundation; either version 2 of the License, or (at your option) any
22  * later version.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 /*
38  * This file contains the Descriptor DMA implementation for Host mode
39  */
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/spinlock.h>
43 #include <linux/interrupt.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/io.h>
46 #include <linux/slab.h>
47 #include <linux/usb.h>
48
49 #include <linux/usb/hcd.h>
50 #include <linux/usb/ch11.h>
51
52 #include "core.h"
53 #include "hcd.h"
54
55 static u16 dwc2_frame_list_idx(u16 frame)
56 {
57         return frame & (FRLISTEN_64_SIZE - 1);
58 }
59
60 static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
61 {
62         return (idx + inc) &
63                 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
64                   MAX_DMA_DESC_NUM_GENERIC) - 1);
65 }
66
67 static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
68 {
69         return (idx - inc) &
70                 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
71                   MAX_DMA_DESC_NUM_GENERIC) - 1);
72 }
73
74 static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
75 {
76         return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
77                 qh->dev_speed == USB_SPEED_HIGH) ?
78                 MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
79 }
80
81 static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
82 {
83         return qh->dev_speed == USB_SPEED_HIGH ?
84                (qh->interval + 8 - 1) / 8 : qh->interval;
85 }
86
87 static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
88                                 gfp_t flags)
89 {
90         qh->desc_list = dma_alloc_coherent(hsotg->dev,
91                                 sizeof(struct dwc2_hcd_dma_desc) *
92                                 dwc2_max_desc_num(qh), &qh->desc_list_dma,
93                                 flags);
94
95         if (!qh->desc_list)
96                 return -ENOMEM;
97
98         memset(qh->desc_list, 0,
99                sizeof(struct dwc2_hcd_dma_desc) * dwc2_max_desc_num(qh));
100
101         qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags);
102         if (!qh->n_bytes) {
103                 dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc)
104                                   * dwc2_max_desc_num(qh), qh->desc_list,
105                                   qh->desc_list_dma);
106                 qh->desc_list = NULL;
107                 return -ENOMEM;
108         }
109
110         return 0;
111 }
112
113 static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
114 {
115         if (qh->desc_list) {
116                 dma_free_coherent(hsotg->dev, sizeof(struct dwc2_hcd_dma_desc)
117                                   * dwc2_max_desc_num(qh), qh->desc_list,
118                                   qh->desc_list_dma);
119                 qh->desc_list = NULL;
120         }
121
122         kfree(qh->n_bytes);
123         qh->n_bytes = NULL;
124 }
125
126 static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
127 {
128         if (hsotg->frame_list)
129                 return 0;
130
131         hsotg->frame_list = dma_alloc_coherent(hsotg->dev,
132                                                4 * FRLISTEN_64_SIZE,
133                                                &hsotg->frame_list_dma,
134                                                mem_flags);
135         if (!hsotg->frame_list)
136                 return -ENOMEM;
137
138         memset(hsotg->frame_list, 0, 4 * FRLISTEN_64_SIZE);
139         return 0;
140 }
141
142 static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
143 {
144         u32 *frame_list;
145         dma_addr_t frame_list_dma;
146         unsigned long flags;
147
148         spin_lock_irqsave(&hsotg->lock, flags);
149
150         if (!hsotg->frame_list) {
151                 spin_unlock_irqrestore(&hsotg->lock, flags);
152                 return;
153         }
154
155         frame_list = hsotg->frame_list;
156         frame_list_dma = hsotg->frame_list_dma;
157         hsotg->frame_list = NULL;
158
159         spin_unlock_irqrestore(&hsotg->lock, flags);
160
161         dma_free_coherent(hsotg->dev, 4 * FRLISTEN_64_SIZE, frame_list,
162                           frame_list_dma);
163 }
164
165 static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
166 {
167         u32 hcfg;
168         unsigned long flags;
169
170         spin_lock_irqsave(&hsotg->lock, flags);
171
172         hcfg = readl(hsotg->regs + HCFG);
173         if (hcfg & HCFG_PERSCHEDENA) {
174                 /* already enabled */
175                 spin_unlock_irqrestore(&hsotg->lock, flags);
176                 return;
177         }
178
179         writel(hsotg->frame_list_dma, hsotg->regs + HFLBADDR);
180
181         hcfg &= ~HCFG_FRLISTEN_MASK;
182         hcfg |= fr_list_en | HCFG_PERSCHEDENA;
183         dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
184         writel(hcfg, hsotg->regs + HCFG);
185
186         spin_unlock_irqrestore(&hsotg->lock, flags);
187 }
188
189 static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
190 {
191         u32 hcfg;
192         unsigned long flags;
193
194         spin_lock_irqsave(&hsotg->lock, flags);
195
196         hcfg = readl(hsotg->regs + HCFG);
197         if (!(hcfg & HCFG_PERSCHEDENA)) {
198                 /* already disabled */
199                 spin_unlock_irqrestore(&hsotg->lock, flags);
200                 return;
201         }
202
203         hcfg &= ~HCFG_PERSCHEDENA;
204         dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
205         writel(hcfg, hsotg->regs + HCFG);
206
207         spin_unlock_irqrestore(&hsotg->lock, flags);
208 }
209
210 /*
211  * Activates/Deactivates FrameList entries for the channel based on endpoint
212  * servicing period
213  */
214 static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
215                                    int enable)
216 {
217         struct dwc2_host_chan *chan;
218         u16 i, j, inc;
219
220         if (!hsotg) {
221                 pr_err("hsotg = %p\n", hsotg);
222                 return;
223         }
224
225         if (!qh->channel) {
226                 dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
227                 return;
228         }
229
230         if (!hsotg->frame_list) {
231                 dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
232                         hsotg->frame_list);
233                 return;
234         }
235
236         chan = qh->channel;
237         inc = dwc2_frame_incr_val(qh);
238         if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
239                 i = dwc2_frame_list_idx(qh->sched_frame);
240         else
241                 i = 0;
242
243         j = i;
244         do {
245                 if (enable)
246                         hsotg->frame_list[j] |= 1 << chan->hc_num;
247                 else
248                         hsotg->frame_list[j] &= ~(1 << chan->hc_num);
249                 j = (j + inc) & (FRLISTEN_64_SIZE - 1);
250         } while (j != i);
251
252         if (!enable)
253                 return;
254
255         chan->schinfo = 0;
256         if (chan->speed == USB_SPEED_HIGH && qh->interval) {
257                 j = 1;
258                 /* TODO - check this */
259                 inc = (8 + qh->interval - 1) / qh->interval;
260                 for (i = 0; i < inc; i++) {
261                         chan->schinfo |= j;
262                         j = j << qh->interval;
263                 }
264         } else {
265                 chan->schinfo = 0xff;
266         }
267 }
268
269 static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
270                                       struct dwc2_qh *qh)
271 {
272         struct dwc2_host_chan *chan = qh->channel;
273
274         if (dwc2_qh_is_non_per(qh)) {
275                 if (hsotg->core_params->uframe_sched > 0)
276                         hsotg->available_host_channels++;
277                 else
278                         hsotg->non_periodic_channels--;
279         } else {
280                 dwc2_update_frame_list(hsotg, qh, 0);
281         }
282
283         /*
284          * The condition is added to prevent double cleanup try in case of
285          * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
286          */
287         if (chan->qh) {
288                 if (!list_empty(&chan->hc_list_entry))
289                         list_del(&chan->hc_list_entry);
290                 dwc2_hc_cleanup(hsotg, chan);
291                 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
292                 chan->qh = NULL;
293         }
294
295         qh->channel = NULL;
296         qh->ntd = 0;
297
298         if (qh->desc_list)
299                 memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) *
300                        dwc2_max_desc_num(qh));
301 }
302
303 /**
304  * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
305  * related members
306  *
307  * @hsotg: The HCD state structure for the DWC OTG controller
308  * @qh:    The QH to init
309  *
310  * Return: 0 if successful, negative error code otherwise
311  *
312  * Allocates memory for the descriptor list. For the first periodic QH,
313  * allocates memory for the FrameList and enables periodic scheduling.
314  */
315 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
316                           gfp_t mem_flags)
317 {
318         int retval;
319
320         if (qh->do_split) {
321                 dev_err(hsotg->dev,
322                         "SPLIT Transfers are not supported in Descriptor DMA mode.\n");
323                 retval = -EINVAL;
324                 goto err0;
325         }
326
327         retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
328         if (retval)
329                 goto err0;
330
331         if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
332             qh->ep_type == USB_ENDPOINT_XFER_INT) {
333                 if (!hsotg->frame_list) {
334                         retval = dwc2_frame_list_alloc(hsotg, mem_flags);
335                         if (retval)
336                                 goto err1;
337                         /* Enable periodic schedule on first periodic QH */
338                         dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
339                 }
340         }
341
342         qh->ntd = 0;
343         return 0;
344
345 err1:
346         dwc2_desc_list_free(hsotg, qh);
347 err0:
348         return retval;
349 }
350
351 /**
352  * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
353  * members
354  *
355  * @hsotg: The HCD state structure for the DWC OTG controller
356  * @qh:    The QH to free
357  *
358  * Frees descriptor list memory associated with the QH. If QH is periodic and
359  * the last, frees FrameList memory and disables periodic scheduling.
360  */
361 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
362 {
363         dwc2_desc_list_free(hsotg, qh);
364
365         /*
366          * Channel still assigned due to some reasons.
367          * Seen on Isoc URB dequeue. Channel halted but no subsequent
368          * ChHalted interrupt to release the channel. Afterwards
369          * when it comes here from endpoint disable routine
370          * channel remains assigned.
371          */
372         if (qh->channel)
373                 dwc2_release_channel_ddma(hsotg, qh);
374
375         if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
376              qh->ep_type == USB_ENDPOINT_XFER_INT) &&
377             (hsotg->core_params->uframe_sched > 0 ||
378              !hsotg->periodic_channels) && hsotg->frame_list) {
379                 dwc2_per_sched_disable(hsotg);
380                 dwc2_frame_list_free(hsotg);
381         }
382 }
383
384 static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
385 {
386         if (qh->dev_speed == USB_SPEED_HIGH)
387                 /* Descriptor set (8 descriptors) index which is 8-aligned */
388                 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
389         else
390                 return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
391 }
392
393 /*
394  * Determine starting frame for Isochronous transfer.
395  * Few frames skipped to prevent race condition with HC.
396  */
397 static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
398                                     struct dwc2_qh *qh, u16 *skip_frames)
399 {
400         u16 frame;
401
402         hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
403
404         /* sched_frame is always frame number (not uFrame) both in FS and HS! */
405
406         /*
407          * skip_frames is used to limit activated descriptors number
408          * to avoid the situation when HC services the last activated
409          * descriptor firstly.
410          * Example for FS:
411          * Current frame is 1, scheduled frame is 3. Since HC always fetches
412          * the descriptor corresponding to curr_frame+1, the descriptor
413          * corresponding to frame 2 will be fetched. If the number of
414          * descriptors is max=64 (or greather) the list will be fully programmed
415          * with Active descriptors and it is possible case (rare) that the
416          * latest descriptor(considering rollback) corresponding to frame 2 will
417          * be serviced first. HS case is more probable because, in fact, up to
418          * 11 uframes (16 in the code) may be skipped.
419          */
420         if (qh->dev_speed == USB_SPEED_HIGH) {
421                 /*
422                  * Consider uframe counter also, to start xfer asap. If half of
423                  * the frame elapsed skip 2 frames otherwise just 1 frame.
424                  * Starting descriptor index must be 8-aligned, so if the
425                  * current frame is near to complete the next one is skipped as
426                  * well.
427                  */
428                 if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
429                         *skip_frames = 2 * 8;
430                         frame = dwc2_frame_num_inc(hsotg->frame_number,
431                                                    *skip_frames);
432                 } else {
433                         *skip_frames = 1 * 8;
434                         frame = dwc2_frame_num_inc(hsotg->frame_number,
435                                                    *skip_frames);
436                 }
437
438                 frame = dwc2_full_frame_num(frame);
439         } else {
440                 /*
441                  * Two frames are skipped for FS - the current and the next.
442                  * But for descriptor programming, 1 frame (descriptor) is
443                  * enough, see example above.
444                  */
445                 *skip_frames = 1;
446                 frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
447         }
448
449         return frame;
450 }
451
452 /*
453  * Calculate initial descriptor index for isochronous transfer based on
454  * scheduled frame
455  */
456 static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
457                                         struct dwc2_qh *qh)
458 {
459         u16 frame, fr_idx, fr_idx_tmp, skip_frames;
460
461         /*
462          * With current ISOC processing algorithm the channel is being released
463          * when no more QTDs in the list (qh->ntd == 0). Thus this function is
464          * called only when qh->ntd == 0 and qh->channel == 0.
465          *
466          * So qh->channel != NULL branch is not used and just not removed from
467          * the source file. It is required for another possible approach which
468          * is, do not disable and release the channel when ISOC session
469          * completed, just move QH to inactive schedule until new QTD arrives.
470          * On new QTD, the QH moved back to 'ready' schedule, starting frame and
471          * therefore starting desc_index are recalculated. In this case channel
472          * is released only on ep_disable.
473          */
474
475         /*
476          * Calculate starting descriptor index. For INTERRUPT endpoint it is
477          * always 0.
478          */
479         if (qh->channel) {
480                 frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
481                 /*
482                  * Calculate initial descriptor index based on FrameList current
483                  * bitmap and servicing period
484                  */
485                 fr_idx_tmp = dwc2_frame_list_idx(frame);
486                 fr_idx = (FRLISTEN_64_SIZE +
487                           dwc2_frame_list_idx(qh->sched_frame) - fr_idx_tmp)
488                          % dwc2_frame_incr_val(qh);
489                 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
490         } else {
491                 qh->sched_frame = dwc2_calc_starting_frame(hsotg, qh,
492                                                            &skip_frames);
493                 fr_idx = dwc2_frame_list_idx(qh->sched_frame);
494         }
495
496         qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
497
498         return skip_frames;
499 }
500
501 #define ISOC_URB_GIVEBACK_ASAP
502
503 #define MAX_ISOC_XFER_SIZE_FS   1023
504 #define MAX_ISOC_XFER_SIZE_HS   3072
505 #define DESCNUM_THRESHOLD       4
506
507 static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
508                                          struct dwc2_qtd *qtd,
509                                          struct dwc2_qh *qh, u32 max_xfer_size,
510                                          u16 idx)
511 {
512         struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
513         struct dwc2_hcd_iso_packet_desc *frame_desc;
514
515         memset(dma_desc, 0, sizeof(*dma_desc));
516         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
517
518         if (frame_desc->length > max_xfer_size)
519                 qh->n_bytes[idx] = max_xfer_size;
520         else
521                 qh->n_bytes[idx] = frame_desc->length;
522
523         dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
524         dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
525                            HOST_DMA_ISOC_NBYTES_MASK;
526
527 #ifdef ISOC_URB_GIVEBACK_ASAP
528         /* Set IOC for each descriptor corresponding to last frame of URB */
529         if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
530                 dma_desc->status |= HOST_DMA_IOC;
531 #endif
532
533         qh->ntd++;
534         qtd->isoc_frame_index_last++;
535 }
536
537 static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
538                                     struct dwc2_qh *qh, u16 skip_frames)
539 {
540         struct dwc2_qtd *qtd;
541         u32 max_xfer_size;
542         u16 idx, inc, n_desc, ntd_max = 0;
543
544         idx = qh->td_last;
545         inc = qh->interval;
546         n_desc = 0;
547
548         if (qh->interval) {
549                 ntd_max = (dwc2_max_desc_num(qh) + qh->interval - 1) /
550                                 qh->interval;
551                 if (skip_frames && !qh->channel)
552                         ntd_max -= skip_frames / qh->interval;
553         }
554
555         max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
556                         MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
557
558         list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
559                 while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
560                                                 qtd->urb->packet_count) {
561                         if (n_desc > 1)
562                                 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
563                         dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
564                                                      max_xfer_size, idx);
565                         idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
566                         n_desc++;
567                 }
568                 qtd->in_process = 1;
569         }
570
571         qh->td_last = idx;
572
573 #ifdef ISOC_URB_GIVEBACK_ASAP
574         /* Set IOC for last descriptor if descriptor list is full */
575         if (qh->ntd == ntd_max) {
576                 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
577                 qh->desc_list[idx].status |= HOST_DMA_IOC;
578         }
579 #else
580         /*
581          * Set IOC bit only for one descriptor. Always try to be ahead of HW
582          * processing, i.e. on IOC generation driver activates next descriptor
583          * but core continues to process descriptors following the one with IOC
584          * set.
585          */
586
587         if (n_desc > DESCNUM_THRESHOLD)
588                 /*
589                  * Move IOC "up". Required even if there is only one QTD
590                  * in the list, because QTDs might continue to be queued,
591                  * but during the activation it was only one queued.
592                  * Actually more than one QTD might be in the list if this
593                  * function called from XferCompletion - QTDs was queued during
594                  * HW processing of the previous descriptor chunk.
595                  */
596                 idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
597                                             qh->dev_speed);
598         else
599                 /*
600                  * Set the IOC for the latest descriptor if either number of
601                  * descriptors is not greater than threshold or no more new
602                  * descriptors activated
603                  */
604                 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
605
606         qh->desc_list[idx].status |= HOST_DMA_IOC;
607 #endif
608
609         if (n_desc) {
610                 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
611                 if (n_desc > 1)
612                         qh->desc_list[0].status |= HOST_DMA_A;
613         }
614 }
615
616 static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
617                                     struct dwc2_host_chan *chan,
618                                     struct dwc2_qtd *qtd, struct dwc2_qh *qh,
619                                     int n_desc)
620 {
621         struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
622         int len = chan->xfer_len;
623
624         if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1))
625                 len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1);
626
627         if (chan->ep_is_in) {
628                 int num_packets;
629
630                 if (len > 0 && chan->max_packet)
631                         num_packets = (len + chan->max_packet - 1)
632                                         / chan->max_packet;
633                 else
634                         /* Need 1 packet for transfer length of 0 */
635                         num_packets = 1;
636
637                 /* Always program an integral # of packets for IN transfers */
638                 len = num_packets * chan->max_packet;
639         }
640
641         dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
642         qh->n_bytes[n_desc] = len;
643
644         if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
645             qtd->control_phase == DWC2_CONTROL_SETUP)
646                 dma_desc->status |= HOST_DMA_SUP;
647
648         dma_desc->buf = (u32)chan->xfer_dma;
649
650         /*
651          * Last (or only) descriptor of IN transfer with actual size less
652          * than MaxPacket
653          */
654         if (len > chan->xfer_len) {
655                 chan->xfer_len = 0;
656         } else {
657                 chan->xfer_dma += len;
658                 chan->xfer_len -= len;
659         }
660 }
661
662 static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
663                                         struct dwc2_qh *qh)
664 {
665         struct dwc2_qtd *qtd;
666         struct dwc2_host_chan *chan = qh->channel;
667         int n_desc = 0;
668
669         dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
670                  (unsigned long)chan->xfer_dma, chan->xfer_len);
671
672         /*
673          * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
674          * if SG transfer consists of multiple URBs, this pointer is re-assigned
675          * to the buffer of the currently processed QTD. For non-SG request
676          * there is always one QTD active.
677          */
678
679         list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
680                 dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
681
682                 if (n_desc) {
683                         /* SG request - more than 1 QTD */
684                         chan->xfer_dma = qtd->urb->dma +
685                                         qtd->urb->actual_length;
686                         chan->xfer_len = qtd->urb->length -
687                                         qtd->urb->actual_length;
688                         dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
689                                  (unsigned long)chan->xfer_dma, chan->xfer_len);
690                 }
691
692                 qtd->n_desc = 0;
693                 do {
694                         if (n_desc > 1) {
695                                 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
696                                 dev_vdbg(hsotg->dev,
697                                          "set A bit in desc %d (%p)\n",
698                                          n_desc - 1,
699                                          &qh->desc_list[n_desc - 1]);
700                         }
701                         dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
702                         dev_vdbg(hsotg->dev,
703                                  "desc %d (%p) buf=%08x status=%08x\n",
704                                  n_desc, &qh->desc_list[n_desc],
705                                  qh->desc_list[n_desc].buf,
706                                  qh->desc_list[n_desc].status);
707                         qtd->n_desc++;
708                         n_desc++;
709                 } while (chan->xfer_len > 0 &&
710                          n_desc != MAX_DMA_DESC_NUM_GENERIC);
711
712                 dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
713                 qtd->in_process = 1;
714                 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
715                         break;
716                 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
717                         break;
718         }
719
720         if (n_desc) {
721                 qh->desc_list[n_desc - 1].status |=
722                                 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
723                 dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
724                          n_desc - 1, &qh->desc_list[n_desc - 1]);
725                 if (n_desc > 1) {
726                         qh->desc_list[0].status |= HOST_DMA_A;
727                         dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
728                                  &qh->desc_list[0]);
729                 }
730                 chan->ntd = n_desc;
731         }
732 }
733
734 /**
735  * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
736  *
737  * @hsotg: The HCD state structure for the DWC OTG controller
738  * @qh:    The QH to init
739  *
740  * Return: 0 if successful, negative error code otherwise
741  *
742  * For Control and Bulk endpoints, initializes descriptor list and starts the
743  * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
744  * list then updates FrameList, marking appropriate entries as active.
745  *
746  * For Isochronous endpoints the starting descriptor index is calculated based
747  * on the scheduled frame, but only on the first transfer descriptor within a
748  * session. Then the transfer is started via enabling the channel.
749  *
750  * For Isochronous endpoints the channel is not halted on XferComplete
751  * interrupt so remains assigned to the endpoint(QH) until session is done.
752  */
753 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
754 {
755         /* Channel is already assigned */
756         struct dwc2_host_chan *chan = qh->channel;
757         u16 skip_frames = 0;
758
759         switch (chan->ep_type) {
760         case USB_ENDPOINT_XFER_CONTROL:
761         case USB_ENDPOINT_XFER_BULK:
762                 dwc2_init_non_isoc_dma_desc(hsotg, qh);
763                 dwc2_hc_start_transfer_ddma(hsotg, chan);
764                 break;
765         case USB_ENDPOINT_XFER_INT:
766                 dwc2_init_non_isoc_dma_desc(hsotg, qh);
767                 dwc2_update_frame_list(hsotg, qh, 1);
768                 dwc2_hc_start_transfer_ddma(hsotg, chan);
769                 break;
770         case USB_ENDPOINT_XFER_ISOC:
771                 if (!qh->ntd)
772                         skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
773                 dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
774
775                 if (!chan->xfer_started) {
776                         dwc2_update_frame_list(hsotg, qh, 1);
777
778                         /*
779                          * Always set to max, instead of actual size. Otherwise
780                          * ntd will be changed with channel being enabled. Not
781                          * recommended.
782                          */
783                         chan->ntd = dwc2_max_desc_num(qh);
784
785                         /* Enable channel only once for ISOC */
786                         dwc2_hc_start_transfer_ddma(hsotg, chan);
787                 }
788
789                 break;
790         default:
791                 break;
792         }
793 }
794
795 #define DWC2_CMPL_DONE          1
796 #define DWC2_CMPL_STOP          2
797
798 static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
799                                         struct dwc2_host_chan *chan,
800                                         struct dwc2_qtd *qtd,
801                                         struct dwc2_qh *qh, u16 idx)
802 {
803         struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
804         struct dwc2_hcd_iso_packet_desc *frame_desc;
805         u16 remain = 0;
806         int rc = 0;
807
808         if (!qtd->urb)
809                 return -EINVAL;
810
811         frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
812         dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
813         if (chan->ep_is_in)
814                 remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
815                          HOST_DMA_ISOC_NBYTES_SHIFT;
816
817         if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
818                 /*
819                  * XactError, or unable to complete all the transactions
820                  * in the scheduled micro-frame/frame, both indicated by
821                  * HOST_DMA_STS_PKTERR
822                  */
823                 qtd->urb->error_count++;
824                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
825                 frame_desc->status = -EPROTO;
826         } else {
827                 /* Success */
828                 frame_desc->actual_length = qh->n_bytes[idx] - remain;
829                 frame_desc->status = 0;
830         }
831
832         if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
833                 /*
834                  * urb->status is not used for isoc transfers here. The
835                  * individual frame_desc status are used instead.
836                  */
837                 dwc2_host_complete(hsotg, qtd, 0);
838                 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
839
840                 /*
841                  * This check is necessary because urb_dequeue can be called
842                  * from urb complete callback (sound driver for example). All
843                  * pending URBs are dequeued there, so no need for further
844                  * processing.
845                  */
846                 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
847                         return -1;
848                 rc = DWC2_CMPL_DONE;
849         }
850
851         qh->ntd--;
852
853         /* Stop if IOC requested descriptor reached */
854         if (dma_desc->status & HOST_DMA_IOC)
855                 rc = DWC2_CMPL_STOP;
856
857         return rc;
858 }
859
860 static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
861                                          struct dwc2_host_chan *chan,
862                                          enum dwc2_halt_status halt_status)
863 {
864         struct dwc2_hcd_iso_packet_desc *frame_desc;
865         struct dwc2_qtd *qtd, *qtd_tmp;
866         struct dwc2_qh *qh;
867         u16 idx;
868         int rc;
869
870         qh = chan->qh;
871         idx = qh->td_first;
872
873         if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
874                 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
875                         qtd->in_process = 0;
876                 return;
877         }
878
879         if (halt_status == DWC2_HC_XFER_AHB_ERR ||
880             halt_status == DWC2_HC_XFER_BABBLE_ERR) {
881                 /*
882                  * Channel is halted in these error cases, considered as serious
883                  * issues.
884                  * Complete all URBs marking all frames as failed, irrespective
885                  * whether some of the descriptors (frames) succeeded or not.
886                  * Pass error code to completion routine as well, to update
887                  * urb->status, some of class drivers might use it to stop
888                  * queing transfer requests.
889                  */
890                 int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
891                           -EIO : -EOVERFLOW;
892
893                 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
894                                          qtd_list_entry) {
895                         if (qtd->urb) {
896                                 for (idx = 0; idx < qtd->urb->packet_count;
897                                      idx++) {
898                                         frame_desc = &qtd->urb->iso_descs[idx];
899                                         frame_desc->status = err;
900                                 }
901
902                                 dwc2_host_complete(hsotg, qtd, err);
903                         }
904
905                         dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
906                 }
907
908                 return;
909         }
910
911         list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
912                 if (!qtd->in_process)
913                         break;
914                 do {
915                         rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
916                                                           idx);
917                         if (rc < 0)
918                                 return;
919                         idx = dwc2_desclist_idx_inc(idx, qh->interval,
920                                                     chan->speed);
921                         if (rc == DWC2_CMPL_STOP)
922                                 goto stop_scan;
923                         if (rc == DWC2_CMPL_DONE)
924                                 break;
925                 } while (idx != qh->td_first);
926         }
927
928 stop_scan:
929         qh->td_first = idx;
930 }
931
932 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
933                                         struct dwc2_host_chan *chan,
934                                         struct dwc2_qtd *qtd,
935                                         struct dwc2_hcd_dma_desc *dma_desc,
936                                         enum dwc2_halt_status halt_status,
937                                         u32 n_bytes, int *xfer_done)
938 {
939         struct dwc2_hcd_urb *urb = qtd->urb;
940         u16 remain = 0;
941
942         if (chan->ep_is_in)
943                 remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
944                          HOST_DMA_NBYTES_SHIFT;
945
946         dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
947
948         if (halt_status == DWC2_HC_XFER_AHB_ERR) {
949                 dev_err(hsotg->dev, "EIO\n");
950                 urb->status = -EIO;
951                 return 1;
952         }
953
954         if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
955                 switch (halt_status) {
956                 case DWC2_HC_XFER_STALL:
957                         dev_vdbg(hsotg->dev, "Stall\n");
958                         urb->status = -EPIPE;
959                         break;
960                 case DWC2_HC_XFER_BABBLE_ERR:
961                         dev_err(hsotg->dev, "Babble\n");
962                         urb->status = -EOVERFLOW;
963                         break;
964                 case DWC2_HC_XFER_XACT_ERR:
965                         dev_err(hsotg->dev, "XactErr\n");
966                         urb->status = -EPROTO;
967                         break;
968                 default:
969                         dev_err(hsotg->dev,
970                                 "%s: Unhandled descriptor error status (%d)\n",
971                                 __func__, halt_status);
972                         break;
973                 }
974                 return 1;
975         }
976
977         if (dma_desc->status & HOST_DMA_A) {
978                 dev_vdbg(hsotg->dev,
979                          "Active descriptor encountered on channel %d\n",
980                          chan->hc_num);
981                 return 0;
982         }
983
984         if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
985                 if (qtd->control_phase == DWC2_CONTROL_DATA) {
986                         urb->actual_length += n_bytes - remain;
987                         if (remain || urb->actual_length >= urb->length) {
988                                 /*
989                                  * For Control Data stage do not set urb->status
990                                  * to 0, to prevent URB callback. Set it when
991                                  * Status phase is done. See below.
992                                  */
993                                 *xfer_done = 1;
994                         }
995                 } else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
996                         urb->status = 0;
997                         *xfer_done = 1;
998                 }
999                 /* No handling for SETUP stage */
1000         } else {
1001                 /* BULK and INTR */
1002                 urb->actual_length += n_bytes - remain;
1003                 dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
1004                          urb->actual_length);
1005                 if (remain || urb->actual_length >= urb->length) {
1006                         urb->status = 0;
1007                         *xfer_done = 1;
1008                 }
1009         }
1010
1011         return 0;
1012 }
1013
1014 static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1015                                       struct dwc2_host_chan *chan,
1016                                       int chnum, struct dwc2_qtd *qtd,
1017                                       int desc_num,
1018                                       enum dwc2_halt_status halt_status,
1019                                       int *xfer_done)
1020 {
1021         struct dwc2_qh *qh = chan->qh;
1022         struct dwc2_hcd_urb *urb = qtd->urb;
1023         struct dwc2_hcd_dma_desc *dma_desc;
1024         u32 n_bytes;
1025         int failed;
1026
1027         dev_vdbg(hsotg->dev, "%s()\n", __func__);
1028
1029         if (!urb)
1030                 return -EINVAL;
1031
1032         dma_desc = &qh->desc_list[desc_num];
1033         n_bytes = qh->n_bytes[desc_num];
1034         dev_vdbg(hsotg->dev,
1035                  "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
1036                  qtd, urb, desc_num, dma_desc, n_bytes);
1037         failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1038                                                      halt_status, n_bytes,
1039                                                      xfer_done);
1040         if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
1041                 dwc2_host_complete(hsotg, qtd, urb->status);
1042                 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1043                 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n",
1044                          failed, *xfer_done, urb->status);
1045                 return failed;
1046         }
1047
1048         if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1049                 switch (qtd->control_phase) {
1050                 case DWC2_CONTROL_SETUP:
1051                         if (urb->length > 0)
1052                                 qtd->control_phase = DWC2_CONTROL_DATA;
1053                         else
1054                                 qtd->control_phase = DWC2_CONTROL_STATUS;
1055                         dev_vdbg(hsotg->dev,
1056                                  "  Control setup transaction done\n");
1057                         break;
1058                 case DWC2_CONTROL_DATA:
1059                         if (*xfer_done) {
1060                                 qtd->control_phase = DWC2_CONTROL_STATUS;
1061                                 dev_vdbg(hsotg->dev,
1062                                          "  Control data transfer done\n");
1063                         } else if (desc_num + 1 == qtd->n_desc) {
1064                                 /*
1065                                  * Last descriptor for Control data stage which
1066                                  * is not completed yet
1067                                  */
1068                                 dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1069                                                           qtd);
1070                         }
1071                         break;
1072                 default:
1073                         break;
1074                 }
1075         }
1076
1077         return 0;
1078 }
1079
1080 static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1081                                              struct dwc2_host_chan *chan,
1082                                              int chnum,
1083                                              enum dwc2_halt_status halt_status)
1084 {
1085         struct list_head *qtd_item, *qtd_tmp;
1086         struct dwc2_qh *qh = chan->qh;
1087         struct dwc2_qtd *qtd = NULL;
1088         int xfer_done;
1089         int desc_num = 0;
1090
1091         if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1092                 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
1093                         qtd->in_process = 0;
1094                 return;
1095         }
1096
1097         list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1098                 int i;
1099
1100                 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1101                 xfer_done = 0;
1102
1103                 for (i = 0; i < qtd->n_desc; i++) {
1104                         if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1105                                                        desc_num, halt_status,
1106                                                        &xfer_done)) {
1107                                 qtd = NULL;
1108                                 break;
1109                         }
1110                         desc_num++;
1111                 }
1112         }
1113
1114         if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1115                 /*
1116                  * Resetting the data toggle for bulk and interrupt endpoints
1117                  * in case of stall. See handle_hc_stall_intr().
1118                  */
1119                 if (halt_status == DWC2_HC_XFER_STALL)
1120                         qh->data_toggle = DWC2_HC_PID_DATA0;
1121                 else if (qtd)
1122                         dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1123         }
1124
1125         if (halt_status == DWC2_HC_XFER_COMPLETE) {
1126                 if (chan->hcint & HCINTMSK_NYET) {
1127                         /*
1128                          * Got a NYET on the last transaction of the transfer.
1129                          * It means that the endpoint should be in the PING
1130                          * state at the beginning of the next transfer.
1131                          */
1132                         qh->ping_state = 1;
1133                 }
1134         }
1135 }
1136
1137 /**
1138  * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
1139  * status and calls completion routine for the URB if it's done. Called from
1140  * interrupt handlers.
1141  *
1142  * @hsotg:       The HCD state structure for the DWC OTG controller
1143  * @chan:        Host channel the transfer is completed on
1144  * @chnum:       Index of Host channel registers
1145  * @halt_status: Reason the channel is being halted or just XferComplete
1146  *               for isochronous transfers
1147  *
1148  * Releases the channel to be used by other transfers.
1149  * In case of Isochronous endpoint the channel is not halted until the end of
1150  * the session, i.e. QTD list is empty.
1151  * If periodic channel released the FrameList is updated accordingly.
1152  * Calls transaction selection routines to activate pending transfers.
1153  */
1154 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
1155                                  struct dwc2_host_chan *chan, int chnum,
1156                                  enum dwc2_halt_status halt_status)
1157 {
1158         struct dwc2_qh *qh = chan->qh;
1159         int continue_isoc_xfer = 0;
1160         enum dwc2_transaction_type tr_type;
1161
1162         if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1163                 dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
1164
1165                 /* Release the channel if halted or session completed */
1166                 if (halt_status != DWC2_HC_XFER_COMPLETE ||
1167                     list_empty(&qh->qtd_list)) {
1168                         /* Halt the channel if session completed */
1169                         if (halt_status == DWC2_HC_XFER_COMPLETE)
1170                                 dwc2_hc_halt(hsotg, chan, halt_status);
1171                         dwc2_release_channel_ddma(hsotg, qh);
1172                         dwc2_hcd_qh_unlink(hsotg, qh);
1173                 } else {
1174                         /* Keep in assigned schedule to continue transfer */
1175                         list_move(&qh->qh_list_entry,
1176                                   &hsotg->periodic_sched_assigned);
1177                         continue_isoc_xfer = 1;
1178                 }
1179                 /*
1180                  * Todo: Consider the case when period exceeds FrameList size.
1181                  * Frame Rollover interrupt should be used.
1182                  */
1183         } else {
1184                 /*
1185                  * Scan descriptor list to complete the URB(s), then release
1186                  * the channel
1187                  */
1188                 dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
1189                                                  halt_status);
1190                 dwc2_release_channel_ddma(hsotg, qh);
1191                 dwc2_hcd_qh_unlink(hsotg, qh);
1192
1193                 if (!list_empty(&qh->qtd_list)) {
1194                         /*
1195                          * Add back to inactive non-periodic schedule on normal
1196                          * completion
1197                          */
1198                         dwc2_hcd_qh_add(hsotg, qh);
1199                 }
1200         }
1201
1202         tr_type = dwc2_hcd_select_transactions(hsotg);
1203         if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
1204                 if (continue_isoc_xfer) {
1205                         if (tr_type == DWC2_TRANSACTION_NONE)
1206                                 tr_type = DWC2_TRANSACTION_PERIODIC;
1207                         else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
1208                                 tr_type = DWC2_TRANSACTION_ALL;
1209                 }
1210                 dwc2_hcd_queue_transactions(hsotg, tr_type);
1211         }
1212 }