1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/errno.h>
14 #include "ozprotocol.h"
20 #include <asm/unaligned.h>
21 #include <linux/uaccess.h>
22 #include <net/psnap.h>
24 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
25 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
26 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
27 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
28 static int oz_send_isoc_frame(struct oz_pd *pd);
29 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
30 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
31 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
32 static void oz_isoc_destructor(struct sk_buff *skb);
35 * Counts the uncompleted isoc frames submitted to netcard.
37 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
39 /* Application handler functions.
41 static const struct oz_app_if g_app_if[OZ_NB_APPS] = {
45 .start = oz_usb_start,
48 .heartbeat = oz_usb_heartbeat,
49 .farewell = oz_usb_farewell,
54 .start = oz_cdev_start,
62 * Context: softirq or process
64 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
69 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n");
71 case OZ_PD_S_CONNECTED:
72 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n");
75 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n");
78 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n");
84 * Context: softirq or process
86 void oz_pd_get(struct oz_pd *pd)
88 atomic_inc(&pd->ref_count);
92 * Context: softirq or process
94 void oz_pd_put(struct oz_pd *pd)
96 if (atomic_dec_and_test(&pd->ref_count))
101 * Context: softirq-serialized
103 struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
108 pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
112 atomic_set(&pd->ref_count, 2);
113 for (i = 0; i < OZ_NB_APPS; i++)
114 spin_lock_init(&pd->app_lock[i]);
115 pd->last_rx_pkt_num = 0xffffffff;
116 oz_pd_set_state(pd, OZ_PD_S_IDLE);
117 pd->max_tx_size = OZ_MAX_TX_SIZE;
118 ether_addr_copy(pd->mac_addr, mac_addr);
119 oz_elt_buf_init(&pd->elt_buff);
120 spin_lock_init(&pd->tx_frame_lock);
121 INIT_LIST_HEAD(&pd->tx_queue);
122 INIT_LIST_HEAD(&pd->farewell_list);
123 pd->last_sent_frame = &pd->tx_queue;
124 spin_lock_init(&pd->stream_lock);
125 INIT_LIST_HEAD(&pd->stream_list);
126 tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
128 tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
130 hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
131 hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
132 pd->heartbeat.function = oz_pd_heartbeat_event;
133 pd->timeout.function = oz_pd_timeout_event;
139 * Context: softirq or process
141 static void oz_pd_free(struct work_struct *work)
143 struct list_head *e, *n;
146 oz_pd_dbg(pd, ON, "Destroying PD\n");
147 pd = container_of(work, struct oz_pd, workitem);
148 /*Disable timer tasklets*/
149 tasklet_kill(&pd->heartbeat_tasklet);
150 tasklet_kill(&pd->timeout_tasklet);
152 /* Free streams, queued tx frames and farewells. */
154 list_for_each_safe(e, n, &pd->stream_list)
155 oz_isoc_stream_free(list_entry(e, struct oz_isoc_stream, link));
157 list_for_each_safe(e, n, &pd->tx_queue) {
158 struct oz_tx_frame *f = list_entry(e, struct oz_tx_frame, link);
162 oz_retire_frame(pd, f);
165 oz_elt_buf_term(&pd->elt_buff);
167 list_for_each_safe(e, n, &pd->farewell_list)
168 kfree(list_entry(e, struct oz_farewell, link));
171 dev_put(pd->net_dev);
176 * Context: softirq or Process
178 void oz_pd_destroy(struct oz_pd *pd)
180 if (hrtimer_active(&pd->timeout))
181 hrtimer_cancel(&pd->timeout);
182 if (hrtimer_active(&pd->heartbeat))
183 hrtimer_cancel(&pd->heartbeat);
185 INIT_WORK(&pd->workitem, oz_pd_free);
186 if (!schedule_work(&pd->workitem))
187 oz_pd_dbg(pd, ON, "failed to schedule workitem\n");
191 * Context: softirq-serialized
193 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
197 oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume);
198 for (i = 0; i < OZ_NB_APPS; i++) {
199 if (g_app_if[i].start && (apps & (1 << i))) {
200 if (g_app_if[i].start(pd, resume)) {
203 "Unable to start service %d\n", i);
206 spin_lock_bh(&g_polling_lock);
207 pd->total_apps |= (1 << i);
209 pd->paused_apps &= ~(1 << i);
210 spin_unlock_bh(&g_polling_lock);
217 * Context: softirq or process
219 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
223 oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
224 for (i = 0; i < OZ_NB_APPS; i++) {
225 if (g_app_if[i].stop && (apps & (1 << i))) {
226 spin_lock_bh(&g_polling_lock);
228 pd->paused_apps |= (1 << i);
230 pd->total_apps &= ~(1 << i);
231 pd->paused_apps &= ~(1 << i);
233 spin_unlock_bh(&g_polling_lock);
234 g_app_if[i].stop(pd, pause);
242 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
246 for (i = 0; i < OZ_NB_APPS; i++) {
247 if (g_app_if[i].heartbeat && (apps & (1 << i))) {
248 if (g_app_if[i].heartbeat(pd))
252 if ((!more) && (hrtimer_active(&pd->heartbeat)))
253 hrtimer_cancel(&pd->heartbeat);
254 if (pd->mode & OZ_F_ISOC_ANYTIME) {
257 while (count-- && (oz_send_isoc_frame(pd) >= 0))
263 * Context: softirq or process
265 void oz_pd_stop(struct oz_pd *pd)
269 oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
270 oz_pd_indicate_farewells(pd);
271 spin_lock_bh(&g_polling_lock);
272 stop_apps = pd->total_apps;
275 spin_unlock_bh(&g_polling_lock);
276 oz_services_stop(pd, stop_apps, 0);
277 spin_lock_bh(&g_polling_lock);
278 oz_pd_set_state(pd, OZ_PD_S_STOPPED);
279 /* Remove from PD list.*/
281 spin_unlock_bh(&g_polling_lock);
282 oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
289 int oz_pd_sleep(struct oz_pd *pd)
294 spin_lock_bh(&g_polling_lock);
295 if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
296 spin_unlock_bh(&g_polling_lock);
299 if (pd->keep_alive && pd->session_id)
300 oz_pd_set_state(pd, OZ_PD_S_SLEEP);
304 stop_apps = pd->total_apps;
305 spin_unlock_bh(&g_polling_lock);
309 oz_services_stop(pd, stop_apps, 1);
310 oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
318 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
320 struct oz_tx_frame *f;
322 f = kmem_cache_alloc(oz_tx_frame_cache, GFP_ATOMIC);
324 f->total_size = sizeof(struct oz_hdr);
325 INIT_LIST_HEAD(&f->link);
326 INIT_LIST_HEAD(&f->elt_list);
332 * Context: softirq or process
334 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
336 pd->nb_queued_isoc_frames--;
337 list_del_init(&f->link);
339 kmem_cache_free(oz_tx_frame_cache, f);
341 oz_dbg(TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
342 pd->nb_queued_isoc_frames);
346 * Context: softirq or process
348 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
350 kmem_cache_free(oz_tx_frame_cache, f);
354 * Context: softirq-serialized
356 static void oz_set_more_bit(struct sk_buff *skb)
358 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
360 oz_hdr->control |= OZ_F_MORE_DATA;
364 * Context: softirq-serialized
366 static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
368 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
370 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
376 int oz_prepare_frame(struct oz_pd *pd, int empty)
378 struct oz_tx_frame *f;
380 if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
382 if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
384 if (!empty && !oz_are_elts_available(&pd->elt_buff))
386 f = oz_tx_frame_alloc(pd);
391 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
392 ++pd->last_tx_pkt_num;
393 put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
395 oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
396 pd->max_tx_size, &f->elt_list);
398 spin_lock(&pd->tx_frame_lock);
399 list_add_tail(&f->link, &pd->tx_queue);
400 pd->nb_queued_frames++;
401 spin_unlock(&pd->tx_frame_lock);
406 * Context: softirq-serialized
408 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
411 struct net_device *dev = pd->net_dev;
412 struct oz_hdr *oz_hdr;
414 struct oz_elt_info *ei;
416 /* Allocate skb with enough space for the lower layers as well
417 * as the space we need.
419 skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
422 /* Reserve the head room for lower layers.
424 skb_reserve(skb, LL_RESERVED_SPACE(dev));
425 skb_reset_network_header(skb);
427 skb->protocol = htons(OZ_ETHERTYPE);
428 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
429 dev->dev_addr, skb->len) < 0)
431 /* Push the tail to the end of the area we are going to copy to.
433 oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
434 f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
435 memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
436 /* Copy the elements into the frame body.
438 elt = (struct oz_elt *)(oz_hdr+1);
439 list_for_each_entry(ei, &f->elt_list, link) {
440 memcpy(elt, ei->data, ei->length);
441 elt = oz_next_elt(elt);
450 * Context: softirq or process
452 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
454 struct oz_elt_info *ei, *n;
456 list_for_each_entry_safe(ei, n, &f->elt_list, link) {
457 list_del_init(&ei->link);
459 ei->callback(pd, ei->context);
460 spin_lock_bh(&pd->elt_buff.lock);
461 oz_elt_info_free(&pd->elt_buff, ei);
462 spin_unlock_bh(&pd->elt_buff.lock);
464 oz_tx_frame_free(pd, f);
468 * Context: softirq-serialized
470 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
473 struct oz_tx_frame *f;
476 spin_lock(&pd->tx_frame_lock);
477 e = pd->last_sent_frame->next;
478 if (e == &pd->tx_queue) {
479 spin_unlock(&pd->tx_frame_lock);
482 f = list_entry(e, struct oz_tx_frame, link);
484 if (f->skb != NULL) {
486 oz_tx_isoc_free(pd, f);
487 spin_unlock(&pd->tx_frame_lock);
489 oz_set_more_bit(skb);
490 oz_set_last_pkt_nb(pd, skb);
491 if ((int)atomic_read(&g_submitted_isoc) <
492 OZ_MAX_SUBMITTED_ISOC) {
493 if (dev_queue_xmit(skb) < 0) {
494 oz_dbg(TX_FRAMES, "Dropping ISOC Frame\n");
497 atomic_inc(&g_submitted_isoc);
498 oz_dbg(TX_FRAMES, "Sending ISOC Frame, nb_isoc= %d\n",
499 pd->nb_queued_isoc_frames);
503 oz_dbg(TX_FRAMES, "Dropping ISOC Frame>\n");
507 pd->last_sent_frame = e;
508 skb = oz_build_frame(pd, f);
509 spin_unlock(&pd->tx_frame_lock);
513 oz_set_more_bit(skb);
514 oz_dbg(TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
515 if (dev_queue_xmit(skb) < 0)
522 * Context: softirq-serialized
524 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
526 while (oz_prepare_frame(pd, 0) >= 0)
529 switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
531 case OZ_F_ISOC_NO_ELTS: {
532 backlog += pd->nb_queued_isoc_frames;
535 if (backlog > OZ_MAX_SUBMITTED_ISOC)
536 backlog = OZ_MAX_SUBMITTED_ISOC;
539 case OZ_NO_ELTS_ANYTIME: {
540 if ((backlog <= 0) && (pd->isoc_sent == 0))
551 if (oz_send_next_queued_frame(pd, backlog) < 0)
556 out: oz_prepare_frame(pd, 1);
557 oz_send_next_queued_frame(pd, 0);
563 static int oz_send_isoc_frame(struct oz_pd *pd)
566 struct net_device *dev = pd->net_dev;
567 struct oz_hdr *oz_hdr;
569 struct oz_elt_info *ei;
571 int total_size = sizeof(struct oz_hdr);
573 oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
574 pd->max_tx_size, &list);
575 if (list_empty(&list))
577 skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
579 oz_dbg(ON, "Cannot alloc skb\n");
580 oz_elt_info_free_chain(&pd->elt_buff, &list);
583 skb_reserve(skb, LL_RESERVED_SPACE(dev));
584 skb_reset_network_header(skb);
586 skb->protocol = htons(OZ_ETHERTYPE);
587 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
588 dev->dev_addr, skb->len) < 0) {
592 oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
593 oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
594 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
595 elt = (struct oz_elt *)(oz_hdr+1);
597 list_for_each_entry(ei, &list, link) {
598 memcpy(elt, ei->data, ei->length);
599 elt = oz_next_elt(elt);
602 oz_elt_info_free_chain(&pd->elt_buff, &list);
607 * Context: softirq-serialized
609 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
611 struct oz_tx_frame *f, *tmp = NULL;
617 spin_lock(&pd->tx_frame_lock);
618 list_for_each_entry(f, &pd->tx_queue, link) {
619 pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
620 diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
621 if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
623 oz_dbg(TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
624 pkt_num, pd->nb_queued_frames);
626 pd->nb_queued_frames--;
629 list_cut_position(&list, &pd->tx_queue, &tmp->link);
630 pd->last_sent_frame = &pd->tx_queue;
631 spin_unlock(&pd->tx_frame_lock);
633 list_for_each_entry_safe(f, tmp, &list, link)
634 oz_retire_frame(pd, f);
638 * Precondition: stream_lock must be held.
641 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
643 struct oz_isoc_stream *st;
645 list_for_each_entry(st, &pd->stream_list, link) {
646 if (st->ep_num == ep_num)
655 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
657 struct oz_isoc_stream *st;
659 st = kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
663 spin_lock_bh(&pd->stream_lock);
664 if (!pd_stream_find(pd, ep_num)) {
665 list_add(&st->link, &pd->stream_list);
668 spin_unlock_bh(&pd->stream_lock);
674 * Context: softirq or process
676 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
685 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
687 struct oz_isoc_stream *st;
689 spin_lock_bh(&pd->stream_lock);
690 st = pd_stream_find(pd, ep_num);
693 spin_unlock_bh(&pd->stream_lock);
695 oz_isoc_stream_free(st);
702 static void oz_isoc_destructor(struct sk_buff *skb)
704 atomic_dec(&g_submitted_isoc);
710 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
712 struct net_device *dev = pd->net_dev;
713 struct oz_isoc_stream *st;
715 struct sk_buff *skb = NULL;
716 struct oz_hdr *oz_hdr = NULL;
719 spin_lock_bh(&pd->stream_lock);
720 st = pd_stream_find(pd, ep_num);
724 nb_units = st->nb_units;
729 spin_unlock_bh(&pd->stream_lock);
733 /* Allocate enough space for max size frame. */
734 skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
738 /* Reserve the head room for lower layers. */
739 skb_reserve(skb, LL_RESERVED_SPACE(dev));
740 skb_reset_network_header(skb);
742 skb->protocol = htons(OZ_ETHERTYPE);
743 /* For audio packet set priority to AC_VO */
745 size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
746 oz_hdr = (struct oz_hdr *)skb_put(skb, size);
748 memcpy(skb_put(skb, len), data, len);
750 if (++nb_units < pd->ms_per_isoc) {
751 spin_lock_bh(&pd->stream_lock);
753 st->nb_units = nb_units;
756 spin_unlock_bh(&pd->stream_lock);
759 struct oz_isoc_large iso;
761 spin_lock_bh(&pd->stream_lock);
762 iso.frame_number = st->frame_num;
763 st->frame_num += nb_units;
764 spin_unlock_bh(&pd->stream_lock);
766 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
767 oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
769 iso.endpoint = ep_num;
770 iso.format = OZ_DATA_F_ISOC_LARGE;
771 iso.ms_data = nb_units;
772 memcpy(oz_hdr, &oz, sizeof(oz));
773 memcpy(oz_hdr+1, &iso, sizeof(iso));
774 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
775 dev->dev_addr, skb->len) < 0)
778 skb->destructor = oz_isoc_destructor;
779 /*Queue for Xmit if mode is not ANYTIME*/
780 if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
781 struct oz_tx_frame *isoc_unit = NULL;
782 int nb = pd->nb_queued_isoc_frames;
784 if (nb >= pd->isoc_latency) {
785 struct oz_tx_frame *f;
787 oz_dbg(TX_FRAMES, "Dropping ISOC Unit nb= %d\n",
789 spin_lock(&pd->tx_frame_lock);
790 list_for_each_entry(f, &pd->tx_queue, link) {
791 if (f->skb != NULL) {
792 oz_tx_isoc_free(pd, f);
796 spin_unlock(&pd->tx_frame_lock);
798 isoc_unit = oz_tx_frame_alloc(pd);
799 if (isoc_unit == NULL)
802 isoc_unit->skb = skb;
803 spin_lock_bh(&pd->tx_frame_lock);
804 list_add_tail(&isoc_unit->link, &pd->tx_queue);
805 pd->nb_queued_isoc_frames++;
806 spin_unlock_bh(&pd->tx_frame_lock);
808 "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
809 pd->nb_queued_isoc_frames, pd->nb_queued_frames);
813 /*In ANYTIME mode Xmit unit immediately*/
814 if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
815 atomic_inc(&g_submitted_isoc);
816 if (dev_queue_xmit(skb) < 0)
831 void oz_apps_init(void)
835 for (i = 0; i < OZ_NB_APPS; i++) {
836 if (g_app_if[i].init)
844 void oz_apps_term(void)
848 /* Terminate all the apps. */
849 for (i = 0; i < OZ_NB_APPS; i++) {
850 if (g_app_if[i].term)
856 * Context: softirq-serialized
858 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
860 if (app_id < OZ_NB_APPS && g_app_if[app_id].rx)
861 g_app_if[app_id].rx(pd, elt);
865 * Context: softirq or process
867 void oz_pd_indicate_farewells(struct oz_pd *pd)
869 struct oz_farewell *f;
870 const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB];
873 spin_lock_bh(&g_polling_lock);
874 if (list_empty(&pd->farewell_list)) {
875 spin_unlock_bh(&g_polling_lock);
878 f = list_first_entry(&pd->farewell_list,
879 struct oz_farewell, link);
881 spin_unlock_bh(&g_polling_lock);
883 ai->farewell(pd, f->ep_num, f->report, f->len);