2 * Copyright (C) 2015 Michael Brown <mbrown@fensystems.co.uk>.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
30 #include <ipxe/malloc.h>
38 * USB Universal Host Controller Interface (UHCI) driver
42 /******************************************************************************
46 ******************************************************************************
50 * Check that address is reachable
54 * @ret rc Return status code
56 static inline __attribute__ (( always_inline)) int
57 uhci_reachable ( void *addr, size_t len ) {
58 physaddr_t phys = virt_to_phys ( addr );
60 /* Always reachable in a 32-bit build */
61 if ( sizeof ( physaddr_t ) <= sizeof ( uint32_t ) )
64 /* Reachable if below 4GB */
65 if ( ( ( phys + len - 1 ) & ~0xffffffffULL ) == 0 )
71 /******************************************************************************
75 ******************************************************************************
83 static void uhci_run ( struct uhci_device *uhci ) {
86 /* Set run/stop bit */
87 usbcmd = inw ( uhci->regs + UHCI_USBCMD );
88 usbcmd |= ( UHCI_USBCMD_RUN | UHCI_USBCMD_MAX64 );
89 outw ( usbcmd, uhci->regs + UHCI_USBCMD );
96 * @ret rc Return status code
98 static int uhci_stop ( struct uhci_device *uhci ) {
103 /* Clear run/stop bit */
104 usbcmd = inw ( uhci->regs + UHCI_USBCMD );
105 usbcmd &= ~UHCI_USBCMD_RUN;
106 outw ( usbcmd, uhci->regs + UHCI_USBCMD );
108 /* Wait for device to stop */
109 for ( i = 0 ; i < UHCI_STOP_MAX_WAIT_MS ; i++ ) {
111 /* Check if device is stopped */
112 usbsts = inw ( uhci->regs + UHCI_USBSTS );
113 if ( usbsts & UHCI_USBSTS_HCHALTED )
120 DBGC ( uhci, "UHCI %s timed out waiting for stop\n", uhci->name );
127 * @v uhci UHCI device
128 * @ret rc Return status code
130 static int uhci_reset ( struct uhci_device *uhci ) {
135 /* The UHCI specification states that resetting a running
136 * device may result in undefined behaviour, so try stopping
139 if ( ( rc = uhci_stop ( uhci ) ) != 0 ) {
140 /* Ignore errors and attempt to reset the device anyway */
144 outw ( UHCI_USBCMD_HCRESET, uhci->regs + UHCI_USBCMD );
146 /* Wait for reset to complete */
147 for ( i = 0 ; i < UHCI_RESET_MAX_WAIT_MS ; i++ ) {
149 /* Check if reset is complete */
150 usbcmd = inw ( uhci->regs + UHCI_USBCMD );
151 if ( ! ( usbcmd & UHCI_USBCMD_HCRESET ) )
158 DBGC ( uhci, "UHCI %s timed out waiting for reset\n", uhci->name );
162 /******************************************************************************
164 * Transfer descriptor rings
166 ******************************************************************************
170 * Allocate transfer ring
172 * @v ring Transfer ring
173 * @ret rc Return status code
175 static int uhci_ring_alloc ( struct uhci_ring *ring ) {
178 /* Initialise structure */
179 memset ( ring, 0, sizeof ( *ring ) );
181 /* Allocate queue head */
182 ring->head = malloc_dma ( sizeof ( *ring->head ), UHCI_ALIGN );
183 if ( ! ring->head ) {
187 if ( ( rc = uhci_reachable ( ring->head,
188 sizeof ( *ring->head ) ) ) != 0 )
189 goto err_unreachable;
191 /* Initialise queue head */
192 ring->head->current = cpu_to_le32 ( UHCI_LINK_TERMINATE );
197 free_dma ( ring->head, sizeof ( *ring->head ) );
205 * @v ring Transfer ring
207 static void uhci_ring_free ( struct uhci_ring *ring ) {
211 assert ( uhci_ring_fill ( ring ) == 0 );
212 for ( i = 0 ; i < UHCI_RING_COUNT ; i++ )
213 assert ( ring->xfer[i] == NULL );
215 /* Free queue head */
216 free_dma ( ring->head, sizeof ( *ring->head ) );
220 * Enqueue new transfer
222 * @v ring Transfer ring
223 * @v iobuf I/O buffer
224 * @v count Number of descriptors
225 * @ret rc Return status code
227 static int uhci_enqueue ( struct uhci_ring *ring, struct io_buffer *iobuf,
228 unsigned int count ) {
229 struct uhci_transfer *xfer;
230 struct uhci_transfer *end;
231 struct uhci_transfer_descriptor *desc;
232 unsigned int index = ( ring->prod % UHCI_RING_COUNT );
238 assert ( count > 0 );
239 assert ( iobuf != NULL );
241 /* Check for space in ring */
242 if ( ! uhci_ring_remaining ( ring ) ) {
247 /* Check for reachability of I/O buffer */
248 if ( ( rc = uhci_reachable ( iobuf->data, iob_len ( iobuf ) ) ) != 0 )
249 goto err_unreachable_iobuf;
251 /* Allocate transfer */
252 xfer = malloc ( sizeof ( *xfer ) );
258 /* Initialise transfer */
264 /* Allocate transfer descriptors */
265 len = ( count * sizeof ( xfer->desc[0] ) );
266 xfer->desc = malloc_dma ( len, UHCI_ALIGN );
267 if ( ! xfer->desc ) {
271 if ( ( rc = uhci_reachable ( xfer->desc, len ) ) != 0 )
272 goto err_unreachable_desc;
274 /* Initialise transfer descriptors */
275 memset ( xfer->desc, 0, len );
277 for ( ; --count ; desc++ ) {
278 link = ( virt_to_phys ( desc + 1 ) | UHCI_LINK_DEPTH_FIRST );
279 desc->link = cpu_to_le32 ( link );
280 desc->flags = ring->flags;
282 desc->link = cpu_to_le32 ( UHCI_LINK_TERMINATE );
283 desc->flags = ( ring->flags | UHCI_FL_IOC );
287 link = virt_to_phys ( xfer->desc );
288 if ( uhci_ring_fill ( ring ) > 0 ) {
290 end->desc[ end->prod - 1 ].link = cpu_to_le32 ( link );
292 ring->head->current = cpu_to_le32 ( link );
294 assert ( ring->xfer[index] == NULL );
295 ring->xfer[index] = xfer;
301 err_unreachable_desc:
302 free_dma ( xfer->desc, len );
306 err_unreachable_iobuf:
314 * @v ring Transfer ring
316 * @v len Length of data
319 static void uhci_describe ( struct uhci_ring *ring, void *data,
320 size_t len, uint8_t pid ) {
321 struct uhci_transfer *xfer = ring->end;
322 struct uhci_transfer_descriptor *desc;
327 /* Calculate fragment length */
329 if ( frag_len > ring->mtu )
330 frag_len = ring->mtu;
332 /* Populate descriptor */
333 desc = &xfer->desc[xfer->prod++];
334 if ( pid == USB_PID_IN )
335 desc->flags |= UHCI_FL_SPD;
336 control = ( ring->control | UHCI_CONTROL_PID ( pid ) |
337 UHCI_CONTROL_LEN ( frag_len ) );
338 desc->control = cpu_to_le32 ( control );
340 desc->data = virt_to_phys ( data );
342 desc->status = UHCI_STATUS_ACTIVE;
344 /* Update data toggle */
345 ring->control ^= UHCI_CONTROL_TOGGLE;
347 /* Move to next descriptor */
357 * @v ring Transfer ring
358 * @ret iobuf I/O buffer
360 static struct io_buffer * uhci_dequeue ( struct uhci_ring *ring ) {
361 unsigned int index = ( ring->cons % UHCI_RING_COUNT );
362 struct io_buffer *iobuf;
363 struct uhci_transfer *xfer;
367 assert ( uhci_ring_fill ( ring ) > 0 );
369 /* Consume transfer */
370 xfer = ring->xfer[index];
371 assert ( xfer != NULL );
372 assert ( xfer->desc != NULL );
374 assert ( iobuf != NULL );
375 ring->xfer[index] = NULL;
378 /* Free transfer descriptors */
379 len = ( xfer->prod * sizeof ( xfer->desc[0] ) );
380 free_dma ( xfer->desc, len );
391 * @v ring Transfer ring
392 * @v toggle Expected data toggle for next descriptor
394 static void uhci_restart ( struct uhci_ring *ring, uint32_t toggle ) {
395 struct uhci_transfer *xfer;
396 struct uhci_transfer_descriptor *desc;
397 struct uhci_transfer_descriptor *first;
403 assert ( ring->head->current == cpu_to_le32 ( UHCI_LINK_TERMINATE ) );
405 /* If ring is empty, then just update the data toggle for the
408 if ( uhci_ring_fill ( ring ) == 0 ) {
409 ring->control &= ~UHCI_CONTROL_TOGGLE;
410 ring->control |= toggle;
414 /* If expected toggle does not match the toggle in the first
415 * unconsumed descriptor, then invert all toggles.
417 xfer = ring->xfer[ ring->cons % UHCI_RING_COUNT ];
418 assert ( xfer != NULL );
419 assert ( xfer->cons == 0 );
420 first = &xfer->desc[0];
421 if ( ( le32_to_cpu ( first->control ) ^ toggle ) & UHCI_CONTROL_TOGGLE){
423 /* Invert toggle on all unconsumed transfer descriptors */
424 for ( i = ring->cons ; i != ring->prod ; i++ ) {
425 xfer = ring->xfer[ i % UHCI_RING_COUNT ];
426 assert ( xfer != NULL );
427 assert ( xfer->cons == 0 );
428 for ( j = 0 ; j < xfer->prod ; j++ ) {
429 desc = &xfer->desc[j];
431 cpu_to_le32 ( UHCI_CONTROL_TOGGLE );
435 /* Invert toggle for next descriptor to be enqueued */
436 ring->control ^= UHCI_CONTROL_TOGGLE;
439 /* Restart ring at first unconsumed transfer */
440 link = virt_to_phys ( first );
442 ring->head->current = cpu_to_le32 ( link );
445 /******************************************************************************
447 * Schedule management
449 ******************************************************************************
453 * Get link value for a queue head
455 * @v queue Queue head
456 * @ret link Link value
458 static inline uint32_t uhci_link_qh ( struct uhci_queue_head *queue ) {
460 return ( virt_to_phys ( queue ) | UHCI_LINK_TYPE_QH );
464 * (Re)build asynchronous schedule
466 * @v uhci UHCI device
468 static void uhci_async_schedule ( struct uhci_device *uhci ) {
469 struct uhci_endpoint *endpoint;
470 struct uhci_queue_head *queue;
474 /* Build schedule in reverse order of execution. Provided
475 * that we only ever add or remove single endpoints, this can
476 * safely run concurrently with hardware execution of the
479 link = end = uhci_link_qh ( uhci->head );
480 list_for_each_entry_reverse ( endpoint, &uhci->async, schedule ) {
481 queue = endpoint->ring.head;
482 queue->link = cpu_to_le32 ( link );
484 link = uhci_link_qh ( queue );
487 link = UHCI_LINK_TERMINATE;
488 uhci->head->link = cpu_to_le32 ( link );
493 * Add endpoint to asynchronous schedule
495 * @v endpoint Endpoint
497 static void uhci_async_add ( struct uhci_endpoint *endpoint ) {
498 struct uhci_device *uhci = endpoint->uhci;
500 /* Add to end of schedule */
501 list_add_tail ( &endpoint->schedule, &uhci->async );
503 /* Rebuild schedule */
504 uhci_async_schedule ( uhci );
508 * Remove endpoint from asynchronous schedule
510 * @v endpoint Endpoint
512 static void uhci_async_del ( struct uhci_endpoint *endpoint ) {
513 struct uhci_device *uhci = endpoint->uhci;
515 /* Remove from schedule */
516 list_check_contains_entry ( endpoint, &uhci->async, schedule );
517 list_del ( &endpoint->schedule );
519 /* Rebuild schedule */
520 uhci_async_schedule ( uhci );
522 /* Delay for a whole USB frame (with a 100% safety margin) */
527 * (Re)build periodic schedule
529 * @v uhci UHCI device
531 static void uhci_periodic_schedule ( struct uhci_device *uhci ) {
532 struct uhci_endpoint *endpoint;
533 struct uhci_queue_head *queue;
536 unsigned int max_interval;
539 /* Build schedule in reverse order of execution. Provided
540 * that we only ever add or remove single endpoints, this can
541 * safely run concurrently with hardware execution of the
544 DBGCP ( uhci, "UHCI %s periodic schedule: ", uhci->name );
545 link = end = uhci_link_qh ( uhci->head );
546 list_for_each_entry_reverse ( endpoint, &uhci->periodic, schedule ) {
547 queue = endpoint->ring.head;
548 queue->link = cpu_to_le32 ( link );
550 DBGCP ( uhci, "%s%d", ( ( link == end ) ? "" : "<-" ),
551 endpoint->ep->interval );
552 link = uhci_link_qh ( queue );
554 DBGCP ( uhci, "\n" );
556 /* Populate periodic frame list */
557 DBGCP ( uhci, "UHCI %s periodic frame list:", uhci->name );
558 for ( i = 0 ; i < UHCI_FRAMES ; i++ ) {
560 /* Calculate maximum interval (in microframes) which
561 * may appear as part of this frame list.
564 /* Start of list: include all endpoints */
567 /* Calculate highest power-of-two frame interval */
568 max_interval = ( 1 << ( ffs ( i ) - 1 ) );
569 /* Convert to microframes */
571 /* Round up to nearest 2^n-1 */
572 max_interval = ( ( max_interval << 1 ) - 1 );
575 /* Find first endpoint in schedule satisfying this
576 * maximum interval constraint.
578 link = uhci_link_qh ( uhci->head );
579 list_for_each_entry ( endpoint, &uhci->periodic, schedule ) {
580 if ( endpoint->ep->interval <= max_interval ) {
581 queue = endpoint->ring.head;
582 link = uhci_link_qh ( queue );
583 DBGCP ( uhci, " %d:%d",
584 i, endpoint->ep->interval );
588 uhci->frame->link[i] = cpu_to_le32 ( link );
591 DBGCP ( uhci, "\n" );
595 * Add endpoint to periodic schedule
597 * @v endpoint Endpoint
599 static void uhci_periodic_add ( struct uhci_endpoint *endpoint ) {
600 struct uhci_device *uhci = endpoint->uhci;
601 struct uhci_endpoint *before;
602 unsigned int interval = endpoint->ep->interval;
604 /* Find first endpoint with a smaller interval */
605 list_for_each_entry ( before, &uhci->periodic, schedule ) {
606 if ( before->ep->interval < interval )
609 list_add_tail ( &endpoint->schedule, &before->schedule );
611 /* Rebuild schedule */
612 uhci_periodic_schedule ( uhci );
616 * Remove endpoint from periodic schedule
618 * @v endpoint Endpoint
620 static void uhci_periodic_del ( struct uhci_endpoint *endpoint ) {
621 struct uhci_device *uhci = endpoint->uhci;
623 /* Remove from schedule */
624 list_check_contains_entry ( endpoint, &uhci->periodic, schedule );
625 list_del ( &endpoint->schedule );
627 /* Rebuild schedule */
628 uhci_periodic_schedule ( uhci );
630 /* Delay for a whole USB frame (with a 100% safety margin) */
635 * Add endpoint to appropriate schedule
637 * @v endpoint Endpoint
639 static void uhci_schedule_add ( struct uhci_endpoint *endpoint ) {
640 struct usb_endpoint *ep = endpoint->ep;
641 unsigned int attr = ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK );
643 if ( attr == USB_ENDPOINT_ATTR_INTERRUPT ) {
644 uhci_periodic_add ( endpoint );
646 uhci_async_add ( endpoint );
651 * Remove endpoint from appropriate schedule
653 * @v endpoint Endpoint
655 static void uhci_schedule_del ( struct uhci_endpoint *endpoint ) {
656 struct usb_endpoint *ep = endpoint->ep;
657 unsigned int attr = ( ep->attributes & USB_ENDPOINT_ATTR_TYPE_MASK );
659 if ( attr == USB_ENDPOINT_ATTR_INTERRUPT ) {
660 uhci_periodic_del ( endpoint );
662 uhci_async_del ( endpoint );
666 /******************************************************************************
668 * Endpoint operations
670 ******************************************************************************
677 * @ret rc Return status code
679 static int uhci_endpoint_open ( struct usb_endpoint *ep ) {
680 struct usb_device *usb = ep->usb;
681 struct uhci_device *uhci = usb_get_hostdata ( usb );
682 struct uhci_endpoint *endpoint;
685 /* Allocate and initialise structure */
686 endpoint = zalloc ( sizeof ( *endpoint ) );
691 endpoint->uhci = uhci;
693 usb_endpoint_set_hostdata ( ep, endpoint );
695 /* Initialise descriptor ring */
696 if ( ( rc = uhci_ring_alloc ( &endpoint->ring ) ) != 0 )
698 endpoint->ring.mtu = ep->mtu;
699 endpoint->ring.flags = UHCI_FL_CERR_MAX;
700 if ( usb->port->speed < USB_SPEED_FULL )
701 endpoint->ring.flags |= UHCI_FL_LS;
702 endpoint->ring.control = ( UHCI_CONTROL_DEVICE ( usb->address ) |
703 UHCI_CONTROL_ENDPOINT ( ep->address ) );
705 /* Add to list of endpoints */
706 list_add_tail ( &endpoint->list, &uhci->endpoints );
708 /* Add to schedule */
709 uhci_schedule_add ( endpoint );
713 uhci_ring_free ( &endpoint->ring );
725 static void uhci_endpoint_close ( struct usb_endpoint *ep ) {
726 struct uhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
727 struct io_buffer *iobuf;
729 /* Remove from schedule */
730 uhci_schedule_del ( endpoint );
732 /* Cancel any incomplete transfers */
733 while ( uhci_ring_fill ( &endpoint->ring ) ) {
734 iobuf = uhci_dequeue ( &endpoint->ring );
736 usb_complete_err ( ep, iobuf, -ECANCELED );
739 /* Remove from list of endpoints */
740 list_del ( &endpoint->list );
742 /* Free descriptor ring */
743 uhci_ring_free ( &endpoint->ring );
753 * @ret rc Return status code
755 static int uhci_endpoint_reset ( struct usb_endpoint *ep ) {
756 struct uhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
757 struct uhci_ring *ring = &endpoint->ring;
760 uhci_restart ( ring, 0 );
769 * @ret rc Return status code
771 static int uhci_endpoint_mtu ( struct usb_endpoint *ep ) {
772 struct uhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
774 /* Update endpoint MTU */
775 endpoint->ring.mtu = ep->mtu;
781 * Enqueue message transfer
784 * @v iobuf I/O buffer
785 * @ret rc Return status code
787 static int uhci_endpoint_message ( struct usb_endpoint *ep,
788 struct io_buffer *iobuf ) {
789 struct uhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
790 struct uhci_ring *ring = &endpoint->ring;
791 struct usb_setup_packet *packet;
797 /* Calculate number of descriptors */
798 assert ( iob_len ( iobuf ) >= sizeof ( *packet ) );
799 len = ( iob_len ( iobuf ) - sizeof ( *packet ) );
800 count = ( 1 /* setup stage */ +
801 ( ( len + ring->mtu - 1 ) / ring->mtu ) /* data stage */ +
802 1 /* status stage */ );
804 /* Enqueue transfer */
805 if ( ( rc = uhci_enqueue ( ring, iobuf, count ) ) != 0 )
808 /* Describe setup stage */
809 packet = iobuf->data;
810 ring->control &= ~UHCI_CONTROL_TOGGLE;
811 uhci_describe ( ring, packet, sizeof ( *packet ), USB_PID_SETUP );
812 iob_pull ( iobuf, sizeof ( *packet ) );
814 /* Describe data stage, if applicable */
815 assert ( ring->control & UHCI_CONTROL_TOGGLE );
816 input = ( packet->request & cpu_to_le16 ( USB_DIR_IN ) );
818 uhci_describe ( ring, iobuf->data, len,
819 ( input ? USB_PID_IN : USB_PID_OUT ) );
822 /* Describe status stage */
823 ring->control |= UHCI_CONTROL_TOGGLE;
824 uhci_describe ( ring, NULL, 0,
825 ( ( len && input ) ? USB_PID_OUT : USB_PID_IN ) );
828 assert ( ring->end->prod == count );
834 * Enqueue stream transfer
837 * @v iobuf I/O buffer
838 * @v terminate Terminate using a short packet
839 * @ret rc Return status code
841 static int uhci_endpoint_stream ( struct usb_endpoint *ep,
842 struct io_buffer *iobuf, int terminate ) {
843 struct uhci_endpoint *endpoint = usb_endpoint_get_hostdata ( ep );
844 struct uhci_ring *ring = &endpoint->ring;
851 /* Calculate number of descriptors */
852 len = iob_len ( iobuf );
853 zlp = ( terminate && ( ( len & ( ring->mtu - 1 ) ) == 0 ) );
854 count = ( ( ( len + ring->mtu - 1 ) / ring->mtu ) + ( zlp ? 1 : 0 ) );
856 /* Enqueue transfer */
857 if ( ( rc = uhci_enqueue ( ring, iobuf, count ) ) != 0 )
860 /* Describe data packet */
861 input = ( ep->address & USB_DIR_IN );
862 uhci_describe ( ring, iobuf->data, len,
863 ( input ? USB_PID_IN : USB_PID_OUT ) );
865 /* Describe zero-length packet, if applicable */
867 uhci_describe ( ring, NULL, 0, USB_PID_OUT );
870 assert ( ring->end->prod == count );
876 * Check if transfer is a message transfer
878 * @v xfer UHCI transfer
879 * @ret is_message Transfer is a message transfer
881 static inline int uhci_is_message ( struct uhci_transfer *xfer ) {
882 struct uhci_transfer_descriptor *desc = &xfer->desc[0];
884 return ( ( desc->control & cpu_to_le32 ( UHCI_CONTROL_PID_MASK ) ) ==
885 cpu_to_le32 ( UHCI_CONTROL_PID ( USB_PID_SETUP ) ) );
889 * Poll for completions
891 * @v endpoint Endpoint
893 static void uhci_endpoint_poll ( struct uhci_endpoint *endpoint ) {
894 struct uhci_ring *ring = &endpoint->ring;
895 struct uhci_device *uhci = endpoint->uhci;
896 struct usb_endpoint *ep = endpoint->ep;
897 struct usb_device *usb = ep->usb;
898 struct uhci_transfer *xfer;
899 struct uhci_transfer_descriptor *desc;
900 struct io_buffer *iobuf;
908 /* Consume all completed descriptors */
909 while ( uhci_ring_fill ( ring ) ) {
911 /* Stop if we reach an uncompleted descriptor */
912 index = ( ring->cons % UHCI_RING_COUNT );
913 xfer = ring->xfer[index];
914 assert ( xfer != NULL );
915 assert ( xfer->cons < xfer->prod );
916 desc = &xfer->desc[xfer->cons];
918 if ( desc->status & UHCI_STATUS_ACTIVE )
920 control = le32_to_cpu ( desc->control );
921 actual = le16_to_cpu ( desc->actual );
923 /* Update data length, if applicable */
924 if ( UHCI_DATA_PACKET ( control ) )
925 xfer->len += UHCI_ACTUAL_LEN ( actual );
927 /* If we have encountered an error, then deactivate
928 * the queue head (to prevent further hardware
929 * accesses to this transfer), consume the transfer,
930 * and report the error to the USB core.
932 if ( desc->status & UHCI_STATUS_STALLED ) {
933 DBGC ( uhci, "UHCI %s %s completion %d.%d failed "
934 "(status %02x)\n", usb->name,
935 usb_endpoint_name ( ep ), index,
936 xfer->cons, desc->status );
937 link = UHCI_LINK_TERMINATE;
938 ring->head->current = cpu_to_le32 ( link );
940 iobuf = uhci_dequeue ( ring );
941 usb_complete_err ( ep, iobuf, -EIO );
945 /* Consume this descriptor */
948 /* Check for short packets */
949 if ( UHCI_SHORT_PACKET ( control, actual ) ) {
952 assert ( desc->flags & UHCI_FL_SPD );
953 link = virt_to_phys ( desc );
954 assert ( ( le32_to_cpu ( ring->head->current ) &
955 ~( UHCI_ALIGN - 1 ) ) == link );
957 /* If this is a message transfer, then restart
958 * at the status stage.
960 if ( uhci_is_message ( xfer ) ) {
961 xfer->cons = ( xfer->prod - 1 );
962 link = virt_to_phys ( &xfer->desc[xfer->cons] );
963 ring->head->current = cpu_to_le32 ( link );
967 /* Otherwise, this is a stream transfer.
968 * First, prevent further hardware access to
971 link = UHCI_LINK_TERMINATE;
972 ring->head->current = cpu_to_le32 ( link );
975 /* Determine expected data toggle for next descriptor */
976 toggle = ( ( control ^ UHCI_CONTROL_TOGGLE ) &
977 UHCI_CONTROL_TOGGLE );
979 /* Consume this transfer */
981 iobuf = uhci_dequeue ( ring );
983 /* Update packet length */
984 assert ( len <= iob_len ( iobuf ) );
985 iob_unput ( iobuf, ( iob_len ( iobuf ) - len ) );
988 uhci_restart ( ring, toggle );
990 } else if ( xfer->cons == xfer->prod ) {
992 /* Completed a transfer: consume it */
994 iobuf = uhci_dequeue ( ring );
995 assert ( len == iob_len ( iobuf ) );
999 /* Not a short packet and not yet complete:
1000 * continue processing.
1005 /* Report completion to USB core */
1006 usb_complete ( ep, iobuf );
1010 /******************************************************************************
1014 ******************************************************************************
1021 * @ret rc Return status code
1023 static int uhci_device_open ( struct usb_device *usb ) {
1024 struct uhci_device *uhci = usb_bus_get_hostdata ( usb->port->hub->bus );
1026 usb_set_hostdata ( usb, uhci );
1035 static void uhci_device_close ( struct usb_device *usb ) {
1036 struct uhci_device *uhci = usb_get_hostdata ( usb );
1037 struct usb_bus *bus = uhci->bus;
1039 /* Free device address, if assigned */
1041 usb_free_address ( bus, usb->address );
1045 * Assign device address
1048 * @ret rc Return status code
1050 static int uhci_device_address ( struct usb_device *usb ) {
1051 struct uhci_device *uhci = usb_get_hostdata ( usb );
1052 struct usb_bus *bus = uhci->bus;
1053 struct usb_endpoint *ep0 = usb_endpoint ( usb, USB_EP0_ADDRESS );
1054 struct uhci_endpoint *endpoint0 = usb_endpoint_get_hostdata ( ep0 );
1059 assert ( usb->address == 0 );
1060 assert ( ep0 != NULL );
1062 /* Allocate device address */
1063 address = usb_alloc_address ( bus );
1064 if ( address < 0 ) {
1066 DBGC ( uhci, "UHCI %s could not allocate address: %s\n",
1067 usb->name, strerror ( rc ) );
1068 goto err_alloc_address;
1072 if ( ( rc = usb_set_address ( usb, address ) ) != 0 )
1073 goto err_set_address;
1075 /* Update device address */
1076 usb->address = address;
1077 endpoint0->ring.control |= UHCI_CONTROL_DEVICE ( address );
1082 usb_free_address ( bus, address );
1087 /******************************************************************************
1091 ******************************************************************************
1098 * @ret rc Return status code
1100 static int uhci_hub_open ( struct usb_hub *hub __unused ) {
1111 static void uhci_hub_close ( struct usb_hub *hub __unused ) {
1116 /******************************************************************************
1118 * Root hub operations
1120 ******************************************************************************
1127 * @ret rc Return status code
1129 static int uhci_root_open ( struct usb_hub *hub ) {
1130 struct usb_bus *bus = hub->bus;
1131 struct uhci_device *uhci = usb_bus_get_hostdata ( bus );
1133 /* Record hub driver private data */
1134 usb_hub_set_drvdata ( hub, uhci );
1144 static void uhci_root_close ( struct usb_hub *hub ) {
1146 /* Clear hub driver private data */
1147 usb_hub_set_drvdata ( hub, NULL );
1155 * @ret rc Return status code
1157 static int uhci_root_enable ( struct usb_hub *hub, struct usb_port *port ) {
1158 struct uhci_device *uhci = usb_hub_get_drvdata ( hub );
1163 portsc = inw ( uhci->regs + UHCI_PORTSC ( port->address ) );
1164 portsc |= UHCI_PORTSC_PR;
1165 outw ( portsc, uhci->regs + UHCI_PORTSC ( port->address ) );
1166 mdelay ( USB_RESET_DELAY_MS );
1167 portsc &= ~UHCI_PORTSC_PR;
1168 outw ( portsc, uhci->regs + UHCI_PORTSC ( port->address ) );
1169 mdelay ( USB_RESET_RECOVER_DELAY_MS );
1172 portsc |= UHCI_PORTSC_PED;
1173 outw ( portsc, uhci->regs + UHCI_PORTSC ( port->address ) );
1174 mdelay ( USB_RESET_RECOVER_DELAY_MS );
1176 /* Wait for port to become enabled */
1177 for ( i = 0 ; i < UHCI_PORT_ENABLE_MAX_WAIT_MS ; i++ ) {
1179 /* Check port status */
1180 portsc = inw ( uhci->regs + UHCI_PORTSC ( port->address ) );
1181 if ( portsc & UHCI_PORTSC_PED )
1188 DBGC ( uhci, "UHCI %s-%d timed out waiting for port to enable "
1189 "(status %04x)\n", uhci->name, port->address, portsc );
1198 * @ret rc Return status code
1200 static int uhci_root_disable ( struct usb_hub *hub, struct usb_port *port ) {
1201 struct uhci_device *uhci = usb_hub_get_drvdata ( hub );
1205 portsc = inw ( uhci->regs + UHCI_PORTSC ( port->address ) );
1206 portsc &= ~UHCI_PORTSC_PED;
1207 outw ( portsc, uhci->regs + UHCI_PORTSC ( port->address ) );
1213 * Update root hub port speed
1217 * @ret rc Return status code
1219 static int uhci_root_speed ( struct usb_hub *hub, struct usb_port *port ) {
1220 struct uhci_device *uhci = usb_hub_get_drvdata ( hub );
1221 struct pci_device pci;
1225 /* Read port status */
1226 portsc = inw ( uhci->regs + UHCI_PORTSC ( port->address ) );
1227 if ( ! ( portsc & UHCI_PORTSC_CCS ) ) {
1228 /* Port not connected */
1229 speed = USB_SPEED_NONE;
1230 } else if ( uhci->companion &&
1231 ! find_usb_bus_by_location ( BUS_TYPE_PCI,
1232 uhci->companion ) ) {
1233 /* Defer connection detection until companion
1234 * controller has been enumerated.
1236 pci_init ( &pci, uhci->companion );
1237 DBGC ( uhci, "UHCI %s-%d deferring for companion " PCI_FMT "\n",
1238 uhci->name, port->address, PCI_ARGS ( &pci ) );
1239 speed = USB_SPEED_NONE;
1240 } else if ( portsc & UHCI_PORTSC_LS ) {
1241 /* Low-speed device */
1242 speed = USB_SPEED_LOW;
1244 /* Full-speed device */
1245 speed = USB_SPEED_FULL;
1247 port->speed = speed;
1249 /* Record disconnections and clear changes */
1250 port->disconnected |= ( portsc & UHCI_PORTSC_CSC );
1251 outw ( portsc, uhci->regs + UHCI_PORTSC ( port->address ) );
1257 * Clear transaction translator buffer
1261 * @v ep USB endpoint
1262 * @ret rc Return status code
1264 static int uhci_root_clear_tt ( struct usb_hub *hub, struct usb_port *port,
1265 struct usb_endpoint *ep ) {
1266 struct uhci_device *uhci = usb_hub_get_drvdata ( hub );
1268 /* Should never be called; this is a root hub */
1269 DBGC ( uhci, "UHCI %s-%d nonsensical CLEAR_TT for %s %s\n", uhci->name,
1270 port->address, ep->usb->name, usb_endpoint_name ( ep ) );
1276 * Poll for port status changes
1281 static void uhci_root_poll ( struct usb_hub *hub, struct usb_port *port ) {
1282 struct uhci_device *uhci = usb_hub_get_drvdata ( hub );
1286 /* Do nothing unless something has changed */
1287 portsc = inw ( uhci->regs + UHCI_PORTSC ( port->address ) );
1288 change = ( portsc & UHCI_PORTSC_CHANGE );
1292 /* Record disconnections and clear changes */
1293 port->disconnected |= ( portsc & UHCI_PORTSC_CSC );
1294 outw ( portsc, uhci->regs + UHCI_PORTSC ( port->address ) );
1296 /* Report port status change */
1297 usb_port_changed ( port );
1300 /******************************************************************************
1304 ******************************************************************************
1311 * @ret rc Return status code
1313 static int uhci_bus_open ( struct usb_bus *bus ) {
1314 struct uhci_device *uhci = usb_bus_get_hostdata ( bus );
1318 assert ( list_empty ( &uhci->async ) );
1319 assert ( list_empty ( &uhci->periodic ) );
1321 /* Allocate and initialise asynchronous queue head */
1322 uhci->head = malloc_dma ( sizeof ( *uhci->head ), UHCI_ALIGN );
1323 if ( ! uhci->head ) {
1325 goto err_alloc_head;
1327 if ( ( rc = uhci_reachable ( uhci->head, sizeof ( *uhci->head ) ) ) !=0)
1328 goto err_unreachable_head;
1329 memset ( uhci->head, 0, sizeof ( *uhci->head ) );
1330 uhci->head->current = cpu_to_le32 ( UHCI_LINK_TERMINATE );
1331 uhci_async_schedule ( uhci );
1333 /* Allocate periodic frame list */
1334 uhci->frame = malloc_dma ( sizeof ( *uhci->frame ),
1335 sizeof ( *uhci->frame ) );
1336 if ( ! uhci->frame ) {
1338 goto err_alloc_frame;
1340 if ( ( rc = uhci_reachable ( uhci->frame,
1341 sizeof ( *uhci->frame ) ) ) != 0 )
1342 goto err_unreachable_frame;
1343 uhci_periodic_schedule ( uhci );
1344 outl ( virt_to_phys ( uhci->frame ), uhci->regs + UHCI_FLBASEADD );
1346 /* Start controller */
1352 err_unreachable_frame:
1353 free_dma ( uhci->frame, sizeof ( *uhci->frame ) );
1355 err_unreachable_head:
1356 free_dma ( uhci->head, sizeof ( *uhci->head ) );
1366 static void uhci_bus_close ( struct usb_bus *bus ) {
1367 struct uhci_device *uhci = usb_bus_get_hostdata ( bus );
1370 assert ( list_empty ( &uhci->async ) );
1371 assert ( list_empty ( &uhci->periodic ) );
1373 /* Stop controller */
1376 /* Free periodic frame list */
1377 free_dma ( uhci->frame, sizeof ( *uhci->frame ) );
1379 /* Free asynchronous schedule */
1380 free_dma ( uhci->head, sizeof ( *uhci->head ) );
1388 static void uhci_bus_poll ( struct usb_bus *bus ) {
1389 struct uhci_device *uhci = usb_bus_get_hostdata ( bus );
1390 struct usb_hub *hub = bus->hub;
1391 struct uhci_endpoint *endpoint;
1394 /* UHCI defers interrupts (including short packet detection)
1395 * until the end of the frame. This can result in bulk IN
1396 * endpoints remaining halted for much of the time, waiting
1397 * for software action to reset the data toggles. We
1398 * therefore ignore USBSTS and unconditionally poll all
1399 * endpoints for completed transfer descriptors.
1401 * As with EHCI, we trust that completion handlers are minimal
1402 * and will not do anything that could plausibly affect the
1403 * endpoint list itself.
1405 list_for_each_entry ( endpoint, &uhci->endpoints, list )
1406 uhci_endpoint_poll ( endpoint );
1408 /* UHCI provides no single bit to indicate that a port status
1409 * change has occurred. We therefore unconditionally iterate
1410 * over all ports looking for status changes.
1412 for ( i = 1 ; i <= UHCI_PORTS ; i++ )
1413 uhci_root_poll ( hub, usb_port ( hub, i ) );
1416 /******************************************************************************
1420 ******************************************************************************
1423 /** USB host controller operations */
1424 static struct usb_host_operations uhci_operations = {
1426 .open = uhci_endpoint_open,
1427 .close = uhci_endpoint_close,
1428 .reset = uhci_endpoint_reset,
1429 .mtu = uhci_endpoint_mtu,
1430 .message = uhci_endpoint_message,
1431 .stream = uhci_endpoint_stream,
1434 .open = uhci_device_open,
1435 .close = uhci_device_close,
1436 .address = uhci_device_address,
1439 .open = uhci_bus_open,
1440 .close = uhci_bus_close,
1441 .poll = uhci_bus_poll,
1444 .open = uhci_hub_open,
1445 .close = uhci_hub_close,
1448 .open = uhci_root_open,
1449 .close = uhci_root_close,
1450 .enable = uhci_root_enable,
1451 .disable = uhci_root_disable,
1452 .speed = uhci_root_speed,
1453 .clear_tt = uhci_root_clear_tt,
1458 * Locate EHCI companion controller (when no EHCI support is present)
1461 * @ret busdevfn EHCI companion controller bus:dev.fn (if any)
1463 __weak unsigned int ehci_companion ( struct pci_device *pci __unused ) {
1471 * @ret rc Return status code
1473 static int uhci_probe ( struct pci_device *pci ) {
1474 struct uhci_device *uhci;
1475 struct usb_port *port;
1479 /* Allocate and initialise structure */
1480 uhci = zalloc ( sizeof ( *uhci ) );
1485 uhci->name = pci->dev.name;
1486 INIT_LIST_HEAD ( &uhci->endpoints );
1487 INIT_LIST_HEAD ( &uhci->async );
1488 INIT_LIST_HEAD ( &uhci->periodic );
1490 /* Fix up PCI device */
1491 adjust_pci_device ( pci );
1493 /* Identify EHCI companion controller, if any */
1494 uhci->companion = ehci_companion ( pci );
1496 /* Claim ownership from BIOS. (There is no release mechanism
1499 pci_write_config_word ( pci, UHCI_USBLEGSUP, UHCI_USBLEGSUP_DEFAULT );
1502 uhci->regs = pci->ioaddr;
1503 if ( ! uhci->regs ) {
1509 if ( ( rc = uhci_reset ( uhci ) ) != 0 )
1512 /* Allocate USB bus */
1513 uhci->bus = alloc_usb_bus ( &pci->dev, UHCI_PORTS, UHCI_MTU,
1515 if ( ! uhci->bus ) {
1519 usb_bus_set_hostdata ( uhci->bus, uhci );
1520 usb_hub_set_drvdata ( uhci->bus->hub, uhci );
1522 /* Set port protocols */
1523 for ( i = 1 ; i <= UHCI_PORTS ; i++ ) {
1524 port = usb_port ( uhci->bus->hub, i );
1525 port->protocol = USB_PROTO_2_0;
1528 /* Register USB bus */
1529 if ( ( rc = register_usb_bus ( uhci->bus ) ) != 0 )
1532 pci_set_drvdata ( pci, uhci );
1535 unregister_usb_bus ( uhci->bus );
1537 free_usb_bus ( uhci->bus );
1539 uhci_reset ( uhci );
1552 static void uhci_remove ( struct pci_device *pci ) {
1553 struct uhci_device *uhci = pci_get_drvdata ( pci );
1554 struct usb_bus *bus = uhci->bus;
1556 unregister_usb_bus ( bus );
1557 assert ( list_empty ( &uhci->async ) );
1558 assert ( list_empty ( &uhci->periodic ) );
1559 free_usb_bus ( bus );
1560 uhci_reset ( uhci );
1564 /** UHCI PCI device IDs */
1565 static struct pci_device_id uhci_ids[] = {
1566 PCI_ROM ( 0xffff, 0xffff, "uhci", "UHCI", 0 ),
1569 /** UHCI PCI driver */
1570 struct pci_driver uhci_driver __pci_driver = {
1572 .id_count = ( sizeof ( uhci_ids ) / sizeof ( uhci_ids[0] ) ),
1573 .class = PCI_CLASS_ID ( PCI_CLASS_SERIAL, PCI_CLASS_SERIAL_USB,
1574 PCI_CLASS_SERIAL_USB_UHCI ),
1575 .probe = uhci_probe,
1576 .remove = uhci_remove,