These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / usb / host / xhci-ring.c
index b3a0a22..db0f083 100644 (file)
@@ -302,6 +302,15 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
        ret = xhci_handshake(&xhci->op_regs->cmd_ring,
                        CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
        if (ret < 0) {
+               /* we are about to kill xhci, give it one more chance */
+               xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+                             &xhci->op_regs->cmd_ring);
+               udelay(1000);
+               ret = xhci_handshake(&xhci->op_regs->cmd_ring,
+                                    CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
+               if (ret == 0)
+                       return 0;
+
                xhci_err(xhci, "Stopped the command ring failed, "
                                "maybe the host is dead\n");
                xhci->xhc_state |= XHCI_STATE_DYING;
@@ -1444,7 +1453,7 @@ static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
                 * 1.1 ports are under the USB 2.0 hub.  If the port speed
                 * matches the device speed, it's a similar speed port.
                 */
-               if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
+               if ((port_speed == 0x03) == (hcd->speed >= HCD_USB3))
                        num_similar_speed_ports++;
        }
        return num_similar_speed_ports;
@@ -1506,7 +1515,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
 
        /* Find the right roothub. */
        hcd = xhci_to_hcd(xhci);
-       if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
+       if ((major_revision == 0x03) != (hcd->speed >= HCD_USB3))
                hcd = xhci->shared_hcd;
 
        if (major_revision == 0) {
@@ -1532,7 +1541,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
         * correct bus_state structure.
         */
        bus_state = &xhci->bus_state[hcd_index(hcd)];
-       if (hcd->speed == HCD_USB3)
+       if (hcd->speed >= HCD_USB3)
                port_array = xhci->usb3_ports;
        else
                port_array = xhci->usb2_ports;
@@ -1546,7 +1555,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
                usb_hcd_resume_root_hub(hcd);
        }
 
-       if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
+       if (hcd->speed >= HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
                bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
 
        if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
@@ -1558,7 +1567,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
                        goto cleanup;
                }
 
-               if (DEV_SUPERSPEED(temp)) {
+               if (DEV_SUPERSPEED_ANY(temp)) {
                        xhci_dbg(xhci, "remote wake SS port %d\n", port_id);
                        /* Set a flag to say the port signaled remote wakeup,
                         * so we can tell the difference between the end of
@@ -1574,7 +1583,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
                         */
                        bogus_port_status = true;
                        goto cleanup;
-               } else {
+               } else if (!test_bit(faked_port_index,
+                                    &bus_state->resuming_ports)) {
                        xhci_dbg(xhci, "resume HS port %d\n", port_id);
                        bus_state->resume_done[faked_port_index] = jiffies +
                                msecs_to_jiffies(USB_RESUME_TIMEOUT);
@@ -1586,7 +1596,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
        }
 
        if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
-                       DEV_SUPERSPEED(temp)) {
+                       DEV_SUPERSPEED_ANY(temp)) {
                xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
                /* We've just brought the device into U0 through either the
                 * Resume state after a device remote wakeup, or through the
@@ -1616,7 +1626,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
         * RExit to a disconnect state).  If so, let the the driver know it's
         * out of the RExit state.
         */
-       if (!DEV_SUPERSPEED(temp) &&
+       if (!DEV_SUPERSPEED_ANY(temp) &&
                        test_and_clear_bit(faked_port_index,
                                &bus_state->rexit_ports)) {
                complete(&bus_state->rexit_done[faked_port_index]);
@@ -1624,7 +1634,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
                goto cleanup;
        }
 
-       if (hcd->speed != HCD_USB3)
+       if (hcd->speed < HCD_USB3)
                xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
                                        PORT_PLC);
 
@@ -1812,7 +1822,9 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
        if (skip)
                goto td_cleanup;
 
-       if (trb_comp_code == COMP_STOP_INVAL || trb_comp_code == COMP_STOP) {
+       if (trb_comp_code == COMP_STOP_INVAL ||
+                       trb_comp_code == COMP_STOP ||
+                       trb_comp_code == COMP_STOP_SHORT) {
                /* The Endpoint Stop Command completion will take care of any
                 * stopped TDs.  A stopped TD may be restarted, so don't update
                 * the ring dequeue pointer or take this TD off any lists yet.
@@ -1919,8 +1931,22 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                else
                        *status = 0;
                break;
-       case COMP_STOP_INVAL:
+       case COMP_STOP_SHORT:
+               if (event_trb == ep_ring->dequeue || event_trb == td->last_trb)
+                       xhci_warn(xhci, "WARN: Stopped Short Packet on ctrl setup or status TRB\n");
+               else
+                       td->urb->actual_length =
+                               EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+               return finish_td(xhci, td, event_trb, event, ep, status, false);
        case COMP_STOP:
+               /* Did we stop at data stage? */
+               if (event_trb != ep_ring->dequeue && event_trb != td->last_trb)
+                       td->urb->actual_length =
+                               td->urb->transfer_buffer_length -
+                               EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+               /* fall through */
+       case COMP_STOP_INVAL:
                return finish_td(xhci, td, event_trb, event, ep, status, false);
        default:
                if (!xhci_requires_manual_halt_cleanup(xhci,
@@ -1937,7 +1963,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
                        td->urb->actual_length =
                                td->urb->transfer_buffer_length -
                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-               else
+               else if (!td->urb_length_set)
                        td->urb->actual_length = 0;
 
                return finish_td(xhci, td, event_trb, event, ep, status, false);
@@ -2014,6 +2040,8 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
                }
                if ((xhci->quirks & XHCI_TRUST_TX_LENGTH))
                        trb_comp_code = COMP_SHORT_TX;
+       /* fallthrough */
+       case COMP_STOP_SHORT:
        case COMP_SHORT_TX:
                frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
                                -EREMOTEIO : 0;
@@ -2049,6 +2077,10 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
        if (trb_comp_code == COMP_SUCCESS || skip_td) {
                frame->actual_length = frame->length;
                td->urb->actual_length += frame->length;
+       } else if (trb_comp_code == COMP_STOP_SHORT) {
+               frame->actual_length =
+                       EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+               td->urb->actual_length += frame->actual_length;
        } else {
                for (cur_trb = ep_ring->dequeue,
                     cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
@@ -2129,6 +2161,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                        *status = 0;
                }
                break;
+       case COMP_STOP_SHORT:
        case COMP_SHORT_TX:
                if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
                        *status = -EREMOTEIO;
@@ -2145,8 +2178,20 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                                td->urb->ep->desc.bEndpointAddress,
                                td->urb->transfer_buffer_length,
                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+       /* Stopped - short packet completion */
+       if (trb_comp_code == COMP_STOP_SHORT) {
+               td->urb->actual_length =
+                       EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+               if (td->urb->transfer_buffer_length <
+                               td->urb->actual_length) {
+                       xhci_warn(xhci, "HC gave bad length of %d bytes txed\n",
+                               EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)));
+                       td->urb->actual_length = 0;
+                        /* status will be set by usb core for canceled urbs */
+               }
        /* Fast path - was this the last TRB in the TD for this URB? */
-       if (event_trb == td->last_trb) {
+       } else if (event_trb == td->last_trb) {
                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
                        td->urb->actual_length =
                                td->urb->transfer_buffer_length -
@@ -2230,6 +2275,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
        u32 trb_comp_code;
        int ret = 0;
        int td_num = 0;
+       bool handling_skipped_tds = false;
 
        slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
        xdev = xhci->devs[slot_id];
@@ -2300,6 +2346,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
        case COMP_STOP_INVAL:
                xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
                break;
+       case COMP_STOP_SHORT:
+               xhci_dbg(xhci, "Stopped with short packet transfer detected\n");
+               break;
        case COMP_STALL:
                xhci_dbg(xhci, "Stalled endpoint\n");
                ep->ep_state |= EP_HALTED;
@@ -2363,6 +2412,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                ep->skip = true;
                xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
                goto cleanup;
+       case COMP_PING_ERR:
+               ep->skip = true;
+               xhci_dbg(xhci, "No Ping response error, Skip one Isoc TD\n");
+               goto cleanup;
        default:
                if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
                        status = 0;
@@ -2499,13 +2552,18 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                                                 ep, &status);
 
 cleanup:
+
+
+               handling_skipped_tds = ep->skip &&
+                       trb_comp_code != COMP_MISSED_INT &&
+                       trb_comp_code != COMP_PING_ERR;
+
                /*
-                * Do not update event ring dequeue pointer if ep->skip is set.
-                * Will roll back to continue process missed tds.
+                * Do not update event ring dequeue pointer if we're in a loop
+                * processing missed tds.
                 */
-               if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
+               if (!handling_skipped_tds)
                        inc_deq(xhci, xhci->event_ring);
-               }
 
                if (ret) {
                        urb = td->urb;
@@ -2540,7 +2598,7 @@ cleanup:
         * Process them as short transfer until reach the td pointed by
         * the event.
         */
-       } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
+       } while (handling_skipped_tds);
 
        return 0;
 }
@@ -2981,21 +3039,6 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
 }
 
-/*
- * The TD size is the number of bytes remaining in the TD (including this TRB),
- * right shifted by 10.
- * It must fit in bits 21:17, so it can't be bigger than 31.
- */
-static u32 xhci_td_remainder(unsigned int remainder)
-{
-       u32 max = (1 << (21 - 17 + 1)) - 1;
-
-       if ((remainder >> 10) >= max)
-               return max << 17;
-       else
-               return (remainder >> 10) << 17;
-}
-
 /*
  * For xHCI 1.0 host controllers, TD size is the number of max packet sized
  * packets remaining in the TD (*not* including this TRB).
@@ -3008,30 +3051,36 @@ static u32 xhci_td_remainder(unsigned int remainder)
  *
  * TD size = total_packet_count - packets_transferred
  *
- * It must fit in bits 21:17, so it can't be bigger than 31.
+ * For xHCI 0.96 and older, TD size field should be the remaining bytes
+ * including this TRB, right shifted by 10
+ *
+ * For all hosts it must fit in bits 21:17, so it can't be bigger than 31.
+ * This is taken care of in the TRB_TD_SIZE() macro
+ *
  * The last TRB in a TD must have the TD size set to zero.
  */
-static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
-               unsigned int total_packet_count, struct urb *urb,
-               unsigned int num_trbs_left)
+static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
+                             int trb_buff_len, unsigned int td_total_len,
+                             struct urb *urb, unsigned int num_trbs_left)
 {
-       int packets_transferred;
+       u32 maxp, total_packet_count;
+
+       if (xhci->hci_version < 0x100)
+               return ((td_total_len - transferred) >> 10);
+
+       maxp = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+       total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
 
        /* One TRB with a zero-length data packet. */
-       if (num_trbs_left == 0 || (running_total == 0 && trb_buff_len == 0))
+       if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) ||
+           trb_buff_len == td_total_len)
                return 0;
 
-       /* All the TRB queueing functions don't count the current TRB in
-        * running_total.
-        */
-       packets_transferred = (running_total + trb_buff_len) /
-               GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
-
-       if ((total_packet_count - packets_transferred) > 31)
-               return 31 << 17;
-       return (total_packet_count - packets_transferred) << 17;
+       /* Queueing functions don't count the current TRB into transferred */
+       return (total_packet_count - ((transferred + trb_buff_len) / maxp));
 }
 
+
 static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                struct urb *urb, int slot_id, unsigned int ep_index)
 {
@@ -3041,9 +3090,11 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        struct xhci_td *td;
        struct scatterlist *sg;
        int num_sgs;
-       int trb_buff_len, this_sg_len, running_total;
+       int trb_buff_len, this_sg_len, running_total, ret;
        unsigned int total_packet_count;
+       bool zero_length_needed;
        bool first_trb;
+       int last_trb_num;
        u64 addr;
        bool more_trbs_coming;
 
@@ -3059,13 +3110,27 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
                        usb_endpoint_maxp(&urb->ep->desc));
 
-       trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
+       ret = prepare_transfer(xhci, xhci->devs[slot_id],
                        ep_index, urb->stream_id,
                        num_trbs, urb, 0, mem_flags);
-       if (trb_buff_len < 0)
-               return trb_buff_len;
+       if (ret < 0)
+               return ret;
 
        urb_priv = urb->hcpriv;
+
+       /* Deal with URB_ZERO_PACKET - need one more td/trb */
+       zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
+               urb_priv->length == 2;
+       if (zero_length_needed) {
+               num_trbs++;
+               xhci_dbg(xhci, "Creating zero length td.\n");
+               ret = prepare_transfer(xhci, xhci->devs[slot_id],
+                               ep_index, urb->stream_id,
+                               1, urb, 1, mem_flags);
+               if (ret < 0)
+                       return ret;
+       }
+
        td = urb_priv->td[0];
 
        /*
@@ -3095,6 +3160,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                trb_buff_len = urb->transfer_buffer_length;
 
        first_trb = true;
+       last_trb_num = zero_length_needed ? 2 : 1;
        /* Queue the first TRB, even if it's zero-length */
        do {
                u32 field = 0;
@@ -3112,12 +3178,15 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                /* Chain all the TRBs together; clear the chain bit in the last
                 * TRB to indicate it's the last TRB in the chain.
                 */
-               if (num_trbs > 1) {
+               if (num_trbs > last_trb_num) {
                        field |= TRB_CHAIN;
-               } else {
-                       /* FIXME - add check for ZERO_PACKET flag before this */
+               } else if (num_trbs == last_trb_num) {
                        td->last_trb = ep_ring->enqueue;
                        field |= TRB_IOC;
+               } else if (zero_length_needed && num_trbs == 1) {
+                       trb_buff_len = 0;
+                       urb_priv->td[1]->last_trb = ep_ring->enqueue;
+                       field |= TRB_IOC;
                }
 
                /* Only set interrupt on short packet for IN endpoints */
@@ -3133,17 +3202,12 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                }
 
                /* Set the TRB length, TD size, and interrupter fields. */
-               if (xhci->hci_version < 0x100) {
-                       remainder = xhci_td_remainder(
-                                       urb->transfer_buffer_length -
-                                       running_total);
-               } else {
-                       remainder = xhci_v1_0_td_remainder(running_total,
-                                       trb_buff_len, total_packet_count, urb,
-                                       num_trbs - 1);
-               }
+               remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
+                                          urb->transfer_buffer_length,
+                                          urb, num_trbs - 1);
+
                length_field = TRB_LEN(trb_buff_len) |
-                       remainder |
+                       TRB_TD_SIZE(remainder) |
                        TRB_INTR_TARGET(0);
 
                if (num_trbs > 1)
@@ -3179,7 +3243,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                if (running_total + trb_buff_len > urb->transfer_buffer_length)
                        trb_buff_len =
                                urb->transfer_buffer_length - running_total;
-       } while (running_total < urb->transfer_buffer_length);
+       } while (num_trbs > 0);
 
        check_trb_math(urb, num_trbs, running_total);
        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
@@ -3197,7 +3261,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        int num_trbs;
        struct xhci_generic_trb *start_trb;
        bool first_trb;
+       int last_trb_num;
        bool more_trbs_coming;
+       bool zero_length_needed;
        int start_cycle;
        u32 field, length_field;
 
@@ -3228,7 +3294,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                num_trbs++;
                running_total += TRB_MAX_BUFF_SIZE;
        }
-       /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
 
        ret = prepare_transfer(xhci, xhci->devs[slot_id],
                        ep_index, urb->stream_id,
@@ -3237,6 +3302,20 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                return ret;
 
        urb_priv = urb->hcpriv;
+
+       /* Deal with URB_ZERO_PACKET - need one more td/trb */
+       zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
+               urb_priv->length == 2;
+       if (zero_length_needed) {
+               num_trbs++;
+               xhci_dbg(xhci, "Creating zero length td.\n");
+               ret = prepare_transfer(xhci, xhci->devs[slot_id],
+                               ep_index, urb->stream_id,
+                               1, urb, 1, mem_flags);
+               if (ret < 0)
+                       return ret;
+       }
+
        td = urb_priv->td[0];
 
        /*
@@ -3258,7 +3337,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                trb_buff_len = urb->transfer_buffer_length;
 
        first_trb = true;
-
+       last_trb_num = zero_length_needed ? 2 : 1;
        /* Queue the first TRB, even if it's zero-length */
        do {
                u32 remainder = 0;
@@ -3275,12 +3354,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                /* Chain all the TRBs together; clear the chain bit in the last
                 * TRB to indicate it's the last TRB in the chain.
                 */
-               if (num_trbs > 1) {
+               if (num_trbs > last_trb_num) {
                        field |= TRB_CHAIN;
-               } else {
-                       /* FIXME - add check for ZERO_PACKET flag before this */
+               } else if (num_trbs == last_trb_num) {
                        td->last_trb = ep_ring->enqueue;
                        field |= TRB_IOC;
+               } else if (zero_length_needed && num_trbs == 1) {
+                       trb_buff_len = 0;
+                       urb_priv->td[1]->last_trb = ep_ring->enqueue;
+                       field |= TRB_IOC;
                }
 
                /* Only set interrupt on short packet for IN endpoints */
@@ -3288,17 +3370,12 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                        field |= TRB_ISP;
 
                /* Set the TRB length, TD size, and interrupter fields. */
-               if (xhci->hci_version < 0x100) {
-                       remainder = xhci_td_remainder(
-                                       urb->transfer_buffer_length -
-                                       running_total);
-               } else {
-                       remainder = xhci_v1_0_td_remainder(running_total,
-                                       trb_buff_len, total_packet_count, urb,
-                                       num_trbs - 1);
-               }
+               remainder = xhci_td_remainder(xhci, running_total, trb_buff_len,
+                                          urb->transfer_buffer_length,
+                                          urb, num_trbs - 1);
+
                length_field = TRB_LEN(trb_buff_len) |
-                       remainder |
+                       TRB_TD_SIZE(remainder) |
                        TRB_INTR_TARGET(0);
 
                if (num_trbs > 1)
@@ -3318,7 +3395,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                trb_buff_len = urb->transfer_buffer_length - running_total;
                if (trb_buff_len > TRB_MAX_BUFF_SIZE)
                        trb_buff_len = TRB_MAX_BUFF_SIZE;
-       } while (running_total < urb->transfer_buffer_length);
+       } while (num_trbs > 0);
 
        check_trb_math(urb, num_trbs, running_total);
        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
@@ -3336,7 +3413,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        struct usb_ctrlrequest *setup;
        struct xhci_generic_trb *start_trb;
        int start_cycle;
-       u32 field, length_field;
+       u32 field, length_field, remainder;
        struct urb_priv *urb_priv;
        struct xhci_td *td;
 
@@ -3385,8 +3462,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        if (start_cycle == 0)
                field |= 0x1;
 
-       /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
-       if (xhci->hci_version == 0x100) {
+       /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
+       if (xhci->hci_version >= 0x100) {
                if (urb->transfer_buffer_length > 0) {
                        if (setup->bRequestType & USB_DIR_IN)
                                field |= TRB_TX_TYPE(TRB_DATA_IN);
@@ -3409,9 +3486,15 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        else
                field = TRB_TYPE(TRB_DATA);
 
+       remainder = xhci_td_remainder(xhci, 0,
+                                  urb->transfer_buffer_length,
+                                  urb->transfer_buffer_length,
+                                  urb, 1);
+
        length_field = TRB_LEN(urb->transfer_buffer_length) |
-               xhci_td_remainder(urb->transfer_buffer_length) |
+               TRB_TD_SIZE(remainder) |
                TRB_INTR_TARGET(0);
+
        if (urb->transfer_buffer_length > 0) {
                if (setup->bRequestType & USB_DIR_IN)
                        field |= TRB_DIR_IN;
@@ -3517,6 +3600,97 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
        }
 }
 
+/*
+ * Calculates Frame ID field of the isochronous TRB identifies the
+ * target frame that the Interval associated with this Isochronous
+ * Transfer Descriptor will start on. Refer to 4.11.2.5 in 1.1 spec.
+ *
+ * Returns actual frame id on success, negative value on error.
+ */
+static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
+               struct urb *urb, int index)
+{
+       int start_frame, ist, ret = 0;
+       int start_frame_id, end_frame_id, current_frame_id;
+
+       if (urb->dev->speed == USB_SPEED_LOW ||
+                       urb->dev->speed == USB_SPEED_FULL)
+               start_frame = urb->start_frame + index * urb->interval;
+       else
+               start_frame = (urb->start_frame + index * urb->interval) >> 3;
+
+       /* Isochronous Scheduling Threshold (IST, bits 0~3 in HCSPARAMS2):
+        *
+        * If bit [3] of IST is cleared to '0', software can add a TRB no
+        * later than IST[2:0] Microframes before that TRB is scheduled to
+        * be executed.
+        * If bit [3] of IST is set to '1', software can add a TRB no later
+        * than IST[2:0] Frames before that TRB is scheduled to be executed.
+        */
+       ist = HCS_IST(xhci->hcs_params2) & 0x7;
+       if (HCS_IST(xhci->hcs_params2) & (1 << 3))
+               ist <<= 3;
+
+       /* Software shall not schedule an Isoch TD with a Frame ID value that
+        * is less than the Start Frame ID or greater than the End Frame ID,
+        * where:
+        *
+        * End Frame ID = (Current MFINDEX register value + 895 ms.) MOD 2048
+        * Start Frame ID = (Current MFINDEX register value + IST + 1) MOD 2048
+        *
+        * Both the End Frame ID and Start Frame ID values are calculated
+        * in microframes. When software determines the valid Frame ID value;
+        * The End Frame ID value should be rounded down to the nearest Frame
+        * boundary, and the Start Frame ID value should be rounded up to the
+        * nearest Frame boundary.
+        */
+       current_frame_id = readl(&xhci->run_regs->microframe_index);
+       start_frame_id = roundup(current_frame_id + ist + 1, 8);
+       end_frame_id = rounddown(current_frame_id + 895 * 8, 8);
+
+       start_frame &= 0x7ff;
+       start_frame_id = (start_frame_id >> 3) & 0x7ff;
+       end_frame_id = (end_frame_id >> 3) & 0x7ff;
+
+       xhci_dbg(xhci, "%s: index %d, reg 0x%x start_frame_id 0x%x, end_frame_id 0x%x, start_frame 0x%x\n",
+                __func__, index, readl(&xhci->run_regs->microframe_index),
+                start_frame_id, end_frame_id, start_frame);
+
+       if (start_frame_id < end_frame_id) {
+               if (start_frame > end_frame_id ||
+                               start_frame < start_frame_id)
+                       ret = -EINVAL;
+       } else if (start_frame_id > end_frame_id) {
+               if ((start_frame > end_frame_id &&
+                               start_frame < start_frame_id))
+                       ret = -EINVAL;
+       } else {
+                       ret = -EINVAL;
+       }
+
+       if (index == 0) {
+               if (ret == -EINVAL || start_frame == start_frame_id) {
+                       start_frame = start_frame_id + 1;
+                       if (urb->dev->speed == USB_SPEED_LOW ||
+                                       urb->dev->speed == USB_SPEED_FULL)
+                               urb->start_frame = start_frame;
+                       else
+                               urb->start_frame = start_frame << 3;
+                       ret = 0;
+               }
+       }
+
+       if (ret) {
+               xhci_warn(xhci, "Frame ID %d (reg %d, index %d) beyond range (%d, %d)\n",
+                               start_frame, current_frame_id, index,
+                               start_frame_id, end_frame_id);
+               xhci_warn(xhci, "Ignore frame ID field, use SIA bit instead\n");
+               return ret;
+       }
+
+       return start_frame;
+}
+
 /* This is for isoc transfer */
 static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                struct urb *urb, int slot_id, unsigned int ep_index)
@@ -3533,7 +3707,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
        u64 start_addr, addr;
        int i, j;
        bool more_trbs_coming;
+       struct xhci_virt_ep *xep;
 
+       xep = &xhci->devs[slot_id]->eps[ep_index];
        ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
 
        num_tds = urb->number_of_packets;
@@ -3581,6 +3757,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 
                td = urb_priv->td[i];
                for (j = 0; j < trbs_per_td; j++) {
+                       int frame_id = 0;
                        u32 remainder = 0;
                        field = 0;
 
@@ -3589,8 +3766,20 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                        TRB_TLBPC(residue);
                                /* Queue the isoc TRB */
                                field |= TRB_TYPE(TRB_ISOC);
-                               /* Assume URB_ISO_ASAP is set */
-                               field |= TRB_SIA;
+
+                               /* Calculate Frame ID and SIA fields */
+                               if (!(urb->transfer_flags & URB_ISO_ASAP) &&
+                                               HCC_CFC(xhci->hcc_params)) {
+                                       frame_id = xhci_get_isoc_frame_id(xhci,
+                                                                         urb,
+                                                                         i);
+                                       if (frame_id >= 0)
+                                               field |= TRB_FRAME_ID(frame_id);
+                                       else
+                                               field |= TRB_SIA;
+                               } else
+                                       field |= TRB_SIA;
+
                                if (i == 0) {
                                        if (start_cycle == 0)
                                                field |= 0x1;
@@ -3634,17 +3823,12 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                                trb_buff_len = td_remain_len;
 
                        /* Set the TRB length, TD size, & interrupter fields. */
-                       if (xhci->hci_version < 0x100) {
-                               remainder = xhci_td_remainder(
-                                               td_len - running_total);
-                       } else {
-                               remainder = xhci_v1_0_td_remainder(
-                                               running_total, trb_buff_len,
-                                               total_packet_count, urb,
-                                               (trbs_per_td - j - 1));
-                       }
+                       remainder = xhci_td_remainder(xhci, running_total,
+                                                  trb_buff_len, td_len,
+                                                  urb, trbs_per_td - j - 1);
+
                        length_field = TRB_LEN(trb_buff_len) |
-                               remainder |
+                               TRB_TD_SIZE(remainder) |
                                TRB_INTR_TARGET(0);
 
                        queue_trb(xhci, ep_ring, more_trbs_coming,
@@ -3666,6 +3850,10 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                }
        }
 
+       /* store the next frame id */
+       if (HCC_CFC(xhci->hcc_params))
+               xep->next_frame_id = urb->start_frame + num_tds * urb->interval;
+
        if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
                if (xhci->quirks & XHCI_AMD_PLL_FIX)
                        usb_amd_quirk_pll_disable();
@@ -3702,9 +3890,9 @@ cleanup:
 /*
  * Check transfer ring to guarantee there is enough room for the urb.
  * Update ISO URB start_frame and interval.
- * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
- * update the urb->start_frame by now.
- * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
+ * Update interval as xhci_queue_intr_tx does. Use xhci frame_index to
+ * update urb->start_frame if URB_ISO_ASAP is set in transfer_flags or
+ * Contiguous Frame ID is not supported by HC.
  */
 int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
                struct urb *urb, int slot_id, unsigned int ep_index)
@@ -3717,8 +3905,11 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
        int ep_interval;
        int num_tds, num_trbs, i;
        int ret;
+       struct xhci_virt_ep *xep;
+       int ist;
 
        xdev = xhci->devs[slot_id];
+       xep = &xhci->devs[slot_id]->eps[ep_index];
        ep_ring = xdev->eps[ep_index].ring;
        ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
 
@@ -3735,14 +3926,10 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
        if (ret)
                return ret;
 
-       start_frame = readl(&xhci->run_regs->microframe_index);
-       start_frame &= 0x3fff;
-
-       urb->start_frame = start_frame;
-       if (urb->dev->speed == USB_SPEED_LOW ||
-                       urb->dev->speed == USB_SPEED_FULL)
-               urb->start_frame >>= 3;
-
+       /*
+        * Check interval value. This should be done before we start to
+        * calculate the start frame value.
+        */
        xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
        ep_interval = urb->interval;
        /* Convert to microframes */
@@ -3763,6 +3950,42 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
                                urb->dev->speed == USB_SPEED_FULL)
                        urb->interval /= 8;
        }
+
+       /* Calculate the start frame and put it in urb->start_frame. */
+       if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) {
+               if ((le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
+                               EP_STATE_RUNNING) {
+                       urb->start_frame = xep->next_frame_id;
+                       goto skip_start_over;
+               }
+       }
+
+       start_frame = readl(&xhci->run_regs->microframe_index);
+       start_frame &= 0x3fff;
+       /*
+        * Round up to the next frame and consider the time before trb really
+        * gets scheduled by hardare.
+        */
+       ist = HCS_IST(xhci->hcs_params2) & 0x7;
+       if (HCS_IST(xhci->hcs_params2) & (1 << 3))
+               ist <<= 3;
+       start_frame += ist + XHCI_CFC_DELAY;
+       start_frame = roundup(start_frame, 8);
+
+       /*
+        * Round up to the next ESIT (Endpoint Service Interval Time) if ESIT
+        * is greate than 8 microframes.
+        */
+       if (urb->dev->speed == USB_SPEED_LOW ||
+                       urb->dev->speed == USB_SPEED_FULL) {
+               start_frame = roundup(start_frame, urb->interval << 3);
+               urb->start_frame = start_frame >> 3;
+       } else {
+               start_frame = roundup(start_frame, urb->interval);
+               urb->start_frame = start_frame;
+       }
+
+skip_start_over:
        ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
 
        return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
@@ -3784,8 +4007,11 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
 {
        int reserved_trbs = xhci->cmd_ring_reserved_trbs;
        int ret;
-       if (xhci->xhc_state & XHCI_STATE_DYING)
+
+       if (xhci->xhc_state) {
+               xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
                return -ESHUTDOWN;
+       }
 
        if (!command_must_succeed)
                reserved_trbs++;