3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51 #include <linux/spinlock.h>
52 #include <linux/seqlock.h>
53 #include <linux/netdevice.h>
54 #include <linux/moduleparam.h>
55 #include <linux/bitops.h>
56 #include <linux/timer.h>
57 #include <linux/vmalloc.h>
58 #include <linux/highmem.h>
67 /* must be a power of 2 >= 64 <= 32768 */
68 #define SDMA_DESCQ_CNT 2048
69 #define SDMA_DESC_INTR 64
70 #define INVALID_TAIL 0xffff
72 static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
73 module_param(sdma_descq_cnt, uint, S_IRUGO);
74 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
76 static uint sdma_idle_cnt = 250;
77 module_param(sdma_idle_cnt, uint, S_IRUGO);
78 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
81 module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
82 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
84 static uint sdma_desct_intr = SDMA_DESC_INTR;
85 module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR);
86 MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
88 #define SDMA_WAIT_BATCH_SIZE 20
89 /* max wait time for a SDMA engine to indicate it has halted */
90 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
91 /* all SDMA engine errors that cause a halt */
93 #define SD(name) SEND_DMA_##name
94 #define ALL_SDMA_ENG_HALT_ERRS \
95 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
96 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
97 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
98 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
99 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
100 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
101 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
102 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
103 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
104 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
105 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
106 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
107 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
108 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
109 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
110 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
111 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
112 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
114 /* sdma_sendctrl operations */
115 #define SDMA_SENDCTRL_OP_ENABLE (1U << 0)
116 #define SDMA_SENDCTRL_OP_INTENABLE (1U << 1)
117 #define SDMA_SENDCTRL_OP_HALT (1U << 2)
118 #define SDMA_SENDCTRL_OP_CLEANUP (1U << 3)
120 /* handle long defines */
121 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
122 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
123 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
124 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
126 static const char * const sdma_state_names[] = {
127 [sdma_state_s00_hw_down] = "s00_HwDown",
128 [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
129 [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
130 [sdma_state_s20_idle] = "s20_Idle",
131 [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
132 [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
133 [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
134 [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
135 [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
136 [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
137 [sdma_state_s99_running] = "s99_Running",
140 static const char * const sdma_event_names[] = {
141 [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
142 [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
143 [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
144 [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
145 [sdma_event_e30_go_running] = "e30_GoRunning",
146 [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
147 [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
148 [sdma_event_e60_hw_halted] = "e60_HwHalted",
149 [sdma_event_e70_go_idle] = "e70_GoIdle",
150 [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
151 [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
152 [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
153 [sdma_event_e85_link_down] = "e85_LinkDown",
154 [sdma_event_e90_sw_halted] = "e90_SwHalted",
157 static const struct sdma_set_state_action sdma_action_table[] = {
158 [sdma_state_s00_hw_down] = {
159 .go_s99_running_tofalse = 1,
165 [sdma_state_s10_hw_start_up_halt_wait] = {
171 [sdma_state_s15_hw_start_up_clean_wait] = {
177 [sdma_state_s20_idle] = {
183 [sdma_state_s30_sw_clean_up_wait] = {
189 [sdma_state_s40_hw_clean_up_wait] = {
195 [sdma_state_s50_hw_halt_wait] = {
201 [sdma_state_s60_idle_halt_wait] = {
202 .go_s99_running_tofalse = 1,
208 [sdma_state_s80_hw_freeze] = {
214 [sdma_state_s82_freeze_sw_clean] = {
220 [sdma_state_s99_running] = {
225 .go_s99_running_totrue = 1,
229 #define SDMA_TAIL_UPDATE_THRESH 0x1F
231 /* declare all statics here rather than keep sorting */
232 static void sdma_complete(struct kref *);
233 static void sdma_finalput(struct sdma_state *);
234 static void sdma_get(struct sdma_state *);
235 static void sdma_hw_clean_up_task(unsigned long);
236 static void sdma_put(struct sdma_state *);
237 static void sdma_set_state(struct sdma_engine *, enum sdma_states);
238 static void sdma_start_hw_clean_up(struct sdma_engine *);
239 static void sdma_start_sw_clean_up(struct sdma_engine *);
240 static void sdma_sw_clean_up_task(unsigned long);
241 static void sdma_sendctrl(struct sdma_engine *, unsigned);
242 static void init_sdma_regs(struct sdma_engine *, u32, uint);
243 static void sdma_process_event(
244 struct sdma_engine *sde,
245 enum sdma_events event);
246 static void __sdma_process_event(
247 struct sdma_engine *sde,
248 enum sdma_events event);
249 static void dump_sdma_state(struct sdma_engine *sde);
250 static void sdma_make_progress(struct sdma_engine *sde, u64 status);
251 static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail);
252 static void sdma_flush_descq(struct sdma_engine *sde);
255 * sdma_state_name() - return state string from enum
258 static const char *sdma_state_name(enum sdma_states state)
260 return sdma_state_names[state];
263 static void sdma_get(struct sdma_state *ss)
268 static void sdma_complete(struct kref *kref)
270 struct sdma_state *ss =
271 container_of(kref, struct sdma_state, kref);
276 static void sdma_put(struct sdma_state *ss)
278 kref_put(&ss->kref, sdma_complete);
281 static void sdma_finalput(struct sdma_state *ss)
284 wait_for_completion(&ss->comp);
287 static inline void write_sde_csr(
288 struct sdma_engine *sde,
292 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
295 static inline u64 read_sde_csr(
296 struct sdma_engine *sde,
299 return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
303 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
304 * sdma engine 'sde' to drop to 0.
306 static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
309 u64 off = 8 * sde->this_idx;
310 struct hfi1_devdata *dd = sde->dd;
317 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
319 reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
320 reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
323 /* counter is reest if accupancy count changes */
327 /* timed out - bounce the link */
328 dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
329 __func__, sde->this_idx, (u32)reg);
330 queue_work(dd->pport->hfi1_wq,
331 &dd->pport->link_bounce_work);
339 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
340 * and pause for credit return.
342 void sdma_wait(struct hfi1_devdata *dd)
346 for (i = 0; i < dd->num_sdma; i++) {
347 struct sdma_engine *sde = &dd->per_sdma[i];
349 sdma_wait_for_packet_egress(sde, 0);
353 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
357 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
360 reg &= SD(DESC_CNT_CNT_MASK);
361 reg <<= SD(DESC_CNT_CNT_SHIFT);
362 write_sde_csr(sde, SD(DESC_CNT), reg);
366 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
368 * Depending on timing there can be txreqs in two places:
369 * - in the descq ring
370 * - in the flush list
372 * To avoid ordering issues the descq ring needs to be flushed
373 * first followed by the flush list.
375 * This routine is called from two places
376 * - From a work queue item
377 * - Directly from the state machine just before setting the
380 * Must be called with head_lock held
383 static void sdma_flush(struct sdma_engine *sde)
385 struct sdma_txreq *txp, *txp_next;
386 LIST_HEAD(flushlist);
389 /* flush from head to tail */
390 sdma_flush_descq(sde);
391 spin_lock_irqsave(&sde->flushlist_lock, flags);
392 /* copy flush list */
393 list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
394 list_del_init(&txp->list);
395 list_add_tail(&txp->list, &flushlist);
397 spin_unlock_irqrestore(&sde->flushlist_lock, flags);
398 /* flush from flush list */
399 list_for_each_entry_safe(txp, txp_next, &flushlist, list) {
401 /* protect against complete modifying */
402 struct iowait *wait = txp->wait;
404 list_del_init(&txp->list);
405 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
406 trace_hfi1_sdma_out_sn(sde, txp->sn);
407 if (WARN_ON_ONCE(sde->head_sn != txp->sn))
408 dd_dev_err(sde->dd, "expected %llu got %llu\n",
409 sde->head_sn, txp->sn);
412 sdma_txclean(sde->dd, txp);
414 drained = atomic_dec_and_test(&wait->sdma_busy);
416 (*txp->complete)(txp, SDMA_TXREQ_S_ABORTED, drained);
418 iowait_drain_wakeup(wait);
423 * Fields a work request for flushing the descq ring
426 * If the engine has been brought to running during
427 * the scheduling delay, the flush is ignored, assuming
428 * that the process of bringing the engine to running
429 * would have done this flush prior to going to running.
432 static void sdma_field_flush(struct work_struct *work)
435 struct sdma_engine *sde =
436 container_of(work, struct sdma_engine, flush_worker);
438 write_seqlock_irqsave(&sde->head_lock, flags);
439 if (!__sdma_running(sde))
441 write_sequnlock_irqrestore(&sde->head_lock, flags);
444 static void sdma_err_halt_wait(struct work_struct *work)
446 struct sdma_engine *sde = container_of(work, struct sdma_engine,
449 unsigned long timeout;
451 timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
453 statuscsr = read_sde_csr(sde, SD(STATUS));
454 statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
457 if (time_after(jiffies, timeout)) {
459 "SDMA engine %d - timeout waiting for engine to halt\n",
462 * Continue anyway. This could happen if there was
463 * an uncorrectable error in the wrong spot.
467 usleep_range(80, 120);
470 sdma_process_event(sde, sdma_event_e15_hw_halt_done);
473 static void sdma_start_err_halt_wait(struct sdma_engine *sde)
475 schedule_work(&sde->err_halt_worker);
479 static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
481 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
484 struct hfi1_devdata *dd = sde->dd;
486 for (index = 0; index < dd->num_sdma; index++) {
487 struct sdma_engine *curr_sdma = &dd->per_sdma[index];
489 if (curr_sdma != sde)
490 curr_sdma->progress_check_head =
491 curr_sdma->descq_head;
494 "SDMA engine %d - check scheduled\n",
496 mod_timer(&sde->err_progress_check_timer, jiffies + 10);
500 static void sdma_err_progress_check(unsigned long data)
503 struct sdma_engine *sde = (struct sdma_engine *)data;
505 dd_dev_err(sde->dd, "SDE progress check event\n");
506 for (index = 0; index < sde->dd->num_sdma; index++) {
507 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
510 /* check progress on each engine except the current one */
514 * We must lock interrupts when acquiring sde->lock,
515 * to avoid a deadlock if interrupt triggers and spins on
516 * the same lock on same CPU
518 spin_lock_irqsave(&curr_sde->tail_lock, flags);
519 write_seqlock(&curr_sde->head_lock);
521 /* skip non-running queues */
522 if (curr_sde->state.current_state != sdma_state_s99_running) {
523 write_sequnlock(&curr_sde->head_lock);
524 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
528 if ((curr_sde->descq_head != curr_sde->descq_tail) &&
529 (curr_sde->descq_head ==
530 curr_sde->progress_check_head))
531 __sdma_process_event(curr_sde,
532 sdma_event_e90_sw_halted);
533 write_sequnlock(&curr_sde->head_lock);
534 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
536 schedule_work(&sde->err_halt_worker);
539 static void sdma_hw_clean_up_task(unsigned long opaque)
541 struct sdma_engine *sde = (struct sdma_engine *) opaque;
545 #ifdef CONFIG_SDMA_VERBOSITY
546 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
547 sde->this_idx, slashstrip(__FILE__), __LINE__,
550 statuscsr = read_sde_csr(sde, SD(STATUS));
551 statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
557 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
560 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
562 smp_read_barrier_depends(); /* see sdma_update_tail() */
563 return sde->tx_ring[sde->tx_head & sde->sdma_mask];
567 * flush ring for recovery
569 static void sdma_flush_descq(struct sdma_engine *sde)
573 struct sdma_txreq *txp = get_txhead(sde);
575 /* The reason for some of the complexity of this code is that
576 * not all descriptors have corresponding txps. So, we have to
577 * be able to skip over descs until we wander into the range of
578 * the next txp on the list.
580 head = sde->descq_head & sde->sdma_mask;
581 tail = sde->descq_tail & sde->sdma_mask;
582 while (head != tail) {
583 /* advance head, wrap if needed */
584 head = ++sde->descq_head & sde->sdma_mask;
585 /* if now past this txp's descs, do the callback */
586 if (txp && txp->next_descq_idx == head) {
588 /* protect against complete modifying */
589 struct iowait *wait = txp->wait;
591 /* remove from list */
592 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
594 drained = atomic_dec_and_test(&wait->sdma_busy);
595 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
596 trace_hfi1_sdma_out_sn(sde, txp->sn);
597 if (WARN_ON_ONCE(sde->head_sn != txp->sn))
598 dd_dev_err(sde->dd, "expected %llu got %llu\n",
599 sde->head_sn, txp->sn);
602 sdma_txclean(sde->dd, txp);
603 trace_hfi1_sdma_progress(sde, head, tail, txp);
607 SDMA_TXREQ_S_ABORTED,
610 iowait_drain_wakeup(wait);
611 /* see if there is another txp */
612 txp = get_txhead(sde);
617 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
620 static void sdma_sw_clean_up_task(unsigned long opaque)
622 struct sdma_engine *sde = (struct sdma_engine *) opaque;
625 spin_lock_irqsave(&sde->tail_lock, flags);
626 write_seqlock(&sde->head_lock);
629 * At this point, the following should always be true:
630 * - We are halted, so no more descriptors are getting retired.
631 * - We are not running, so no one is submitting new work.
632 * - Only we can send the e40_sw_cleaned, so we can't start
633 * running again until we say so. So, the active list and
634 * descq are ours to play with.
639 * In the error clean up sequence, software clean must be called
640 * before the hardware clean so we can use the hardware head in
641 * the progress routine. A hardware clean or SPC unfreeze will
642 * reset the hardware head.
644 * Process all retired requests. The progress routine will use the
645 * latest physical hardware head - we are not running so speed does
648 sdma_make_progress(sde, 0);
653 * Reset our notion of head and tail.
654 * Note that the HW registers have been reset via an earlier
659 sde->desc_avail = sdma_descq_freecnt(sde);
662 __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
664 write_sequnlock(&sde->head_lock);
665 spin_unlock_irqrestore(&sde->tail_lock, flags);
668 static void sdma_sw_tear_down(struct sdma_engine *sde)
670 struct sdma_state *ss = &sde->state;
672 /* Releasing this reference means the state machine has stopped. */
675 /* stop waiting for all unfreeze events to complete */
676 atomic_set(&sde->dd->sdma_unfreeze_count, -1);
677 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
680 static void sdma_start_hw_clean_up(struct sdma_engine *sde)
682 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
685 static void sdma_start_sw_clean_up(struct sdma_engine *sde)
687 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
690 static void sdma_set_state(struct sdma_engine *sde,
691 enum sdma_states next_state)
693 struct sdma_state *ss = &sde->state;
694 const struct sdma_set_state_action *action = sdma_action_table;
697 trace_hfi1_sdma_state(
699 sdma_state_names[ss->current_state],
700 sdma_state_names[next_state]);
702 /* debugging bookkeeping */
703 ss->previous_state = ss->current_state;
704 ss->previous_op = ss->current_op;
705 ss->current_state = next_state;
707 if (ss->previous_state != sdma_state_s99_running
708 && next_state == sdma_state_s99_running)
711 if (action[next_state].op_enable)
712 op |= SDMA_SENDCTRL_OP_ENABLE;
714 if (action[next_state].op_intenable)
715 op |= SDMA_SENDCTRL_OP_INTENABLE;
717 if (action[next_state].op_halt)
718 op |= SDMA_SENDCTRL_OP_HALT;
720 if (action[next_state].op_cleanup)
721 op |= SDMA_SENDCTRL_OP_CLEANUP;
723 if (action[next_state].go_s99_running_tofalse)
724 ss->go_s99_running = 0;
726 if (action[next_state].go_s99_running_totrue)
727 ss->go_s99_running = 1;
730 sdma_sendctrl(sde, ss->current_op);
734 * sdma_get_descq_cnt() - called when device probed
736 * Return a validated descq count.
738 * This is currently only used in the verbs initialization to build the tx
741 * This will probably be deleted in favor of a more scalable approach to
745 u16 sdma_get_descq_cnt(void)
747 u16 count = sdma_descq_cnt;
750 return SDMA_DESCQ_CNT;
751 /* count must be a power of 2 greater than 64 and less than
752 * 32768. Otherwise return default.
754 if (!is_power_of_2(count))
755 return SDMA_DESCQ_CNT;
756 if (count < 64 || count > 32768)
757 return SDMA_DESCQ_CNT;
762 * sdma_select_engine_vl() - select sdma engine
764 * @selector: a spreading factor
768 * This function returns an engine based on the selector and a vl. The
769 * mapping fields are protected by RCU.
771 struct sdma_engine *sdma_select_engine_vl(
772 struct hfi1_devdata *dd,
776 struct sdma_vl_map *m;
777 struct sdma_map_elem *e;
778 struct sdma_engine *rval;
784 m = rcu_dereference(dd->sdma_map);
789 e = m->map[vl & m->mask];
790 rval = e->sde[selector & e->mask];
793 trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
798 * sdma_select_engine_sc() - select sdma engine
800 * @selector: a spreading factor
804 * This function returns an engine based on the selector and an sc.
806 struct sdma_engine *sdma_select_engine_sc(
807 struct hfi1_devdata *dd,
811 u8 vl = sc_to_vlt(dd, sc5);
813 return sdma_select_engine_vl(dd, selector, vl);
817 * Free the indicated map struct
819 static void sdma_map_free(struct sdma_vl_map *m)
823 for (i = 0; m && i < m->actual_vls; i++)
829 * Handle RCU callback
831 static void sdma_map_rcu_callback(struct rcu_head *list)
833 struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
839 * sdma_map_init - called when # vls change
842 * @num_vls: number of vls
843 * @vl_engines: per vl engine mapping (optional)
845 * This routine changes the mapping based on the number of vls.
847 * vl_engines is used to specify a non-uniform vl/engine loading. NULL
848 * implies auto computing the loading and giving each VLs a uniform
849 * distribution of engines per VL.
851 * The auto algorithm computes the sde_per_vl and the number of extra
852 * engines. Any extra engines are added from the last VL on down.
854 * rcu locking is used here to control access to the mapping fields.
856 * If either the num_vls or num_sdma are non-power of 2, the array sizes
857 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
858 * up to the next highest power of 2 and the first entry is reused
859 * in a round robin fashion.
861 * If an error occurs the map change is not done and the mapping is
865 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
868 int extra, sde_per_vl;
870 u8 lvl_engines[OPA_MAX_VLS];
871 struct sdma_vl_map *oldmap, *newmap;
873 if (!(dd->flags & HFI1_HAS_SEND_DMA))
877 /* truncate divide */
878 sde_per_vl = dd->num_sdma / num_vls;
880 extra = dd->num_sdma % num_vls;
881 vl_engines = lvl_engines;
882 /* add extras from last vl down */
883 for (i = num_vls - 1; i >= 0; i--, extra--)
884 vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
888 sizeof(struct sdma_vl_map) +
889 roundup_pow_of_two(num_vls) *
890 sizeof(struct sdma_map_elem *),
894 newmap->actual_vls = num_vls;
895 newmap->vls = roundup_pow_of_two(num_vls);
896 newmap->mask = (1 << ilog2(newmap->vls)) - 1;
897 for (i = 0; i < newmap->vls; i++) {
898 /* save for wrap around */
899 int first_engine = engine;
901 if (i < newmap->actual_vls) {
902 int sz = roundup_pow_of_two(vl_engines[i]);
904 /* only allocate once */
905 newmap->map[i] = kzalloc(
906 sizeof(struct sdma_map_elem) +
907 sz * sizeof(struct sdma_engine *),
911 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
913 for (j = 0; j < sz; j++) {
914 newmap->map[i]->sde[j] =
915 &dd->per_sdma[engine];
916 if (++engine >= first_engine + vl_engines[i])
917 /* wrap back to first engine */
918 engine = first_engine;
921 /* just re-use entry without allocating */
922 newmap->map[i] = newmap->map[i % num_vls];
924 engine = first_engine + vl_engines[i];
926 /* newmap in hand, save old map */
927 spin_lock_irq(&dd->sde_map_lock);
928 oldmap = rcu_dereference_protected(dd->sdma_map,
929 lockdep_is_held(&dd->sde_map_lock));
932 rcu_assign_pointer(dd->sdma_map, newmap);
934 spin_unlock_irq(&dd->sde_map_lock);
935 /* success, free any old map after grace period */
937 call_rcu(&oldmap->list, sdma_map_rcu_callback);
940 /* free any partial allocation */
941 sdma_map_free(newmap);
946 * Clean up allocated memory.
948 * This routine is can be called regardless of the success of sdma_init()
951 static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
954 struct sdma_engine *sde;
956 if (dd->sdma_pad_dma) {
957 dma_free_coherent(&dd->pcidev->dev, 4,
958 (void *)dd->sdma_pad_dma,
960 dd->sdma_pad_dma = NULL;
961 dd->sdma_pad_phys = 0;
963 if (dd->sdma_heads_dma) {
964 dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
965 (void *)dd->sdma_heads_dma,
966 dd->sdma_heads_phys);
967 dd->sdma_heads_dma = NULL;
968 dd->sdma_heads_phys = 0;
970 for (i = 0; dd->per_sdma && i < num_engines; ++i) {
971 sde = &dd->per_sdma[i];
973 sde->head_dma = NULL;
979 sde->descq_cnt * sizeof(u64[2]),
986 kvfree(sde->tx_ring);
989 spin_lock_irq(&dd->sde_map_lock);
990 kfree(rcu_access_pointer(dd->sdma_map));
991 RCU_INIT_POINTER(dd->sdma_map, NULL);
992 spin_unlock_irq(&dd->sde_map_lock);
999 * sdma_init() - called when device probed
1001 * @port: port number (currently only zero)
1003 * sdma_init initializes the specified number of engines.
1005 * The code initializes each sde, its csrs. Interrupts
1006 * are not required to be enabled.
1009 * 0 - success, -errno on failure
1011 int sdma_init(struct hfi1_devdata *dd, u8 port)
1014 struct sdma_engine *sde;
1017 struct hfi1_pportdata *ppd = dd->pport + port;
1018 u32 per_sdma_credits;
1019 uint idle_cnt = sdma_idle_cnt;
1020 size_t num_engines = dd->chip_sdma_engines;
1022 if (!HFI1_CAP_IS_KSET(SDMA)) {
1023 HFI1_CAP_CLEAR(SDMA_AHG);
1027 /* can't exceed chip support */
1028 mod_num_sdma <= dd->chip_sdma_engines &&
1029 /* count must be >= vls */
1030 mod_num_sdma >= num_vls)
1031 num_engines = mod_num_sdma;
1033 dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
1034 dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
1035 dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
1036 dd->chip_sdma_mem_size);
1039 dd->chip_sdma_mem_size/(num_engines * SDMA_BLOCK_SIZE);
1041 /* set up freeze waitqueue */
1042 init_waitqueue_head(&dd->sdma_unfreeze_wq);
1043 atomic_set(&dd->sdma_unfreeze_count, 0);
1045 descq_cnt = sdma_get_descq_cnt();
1046 dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
1047 num_engines, descq_cnt);
1049 /* alloc memory for array of send engines */
1050 dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL);
1054 idle_cnt = ns_to_cclock(dd, idle_cnt);
1055 if (!sdma_desct_intr)
1056 sdma_desct_intr = SDMA_DESC_INTR;
1058 /* Allocate memory for SendDMA descriptor FIFOs */
1059 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1060 sde = &dd->per_sdma[this_idx];
1063 sde->this_idx = this_idx;
1064 sde->descq_cnt = descq_cnt;
1065 sde->desc_avail = sdma_descq_freecnt(sde);
1066 sde->sdma_shift = ilog2(descq_cnt);
1067 sde->sdma_mask = (1 << sde->sdma_shift) - 1;
1068 sde->descq_full_count = 0;
1070 /* Create a mask for all 3 chip interrupt sources */
1071 sde->imask = (u64)1 << (0*TXE_NUM_SDMA_ENGINES + this_idx)
1072 | (u64)1 << (1*TXE_NUM_SDMA_ENGINES + this_idx)
1073 | (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
1074 /* Create a mask specifically for sdma_idle */
1076 (u64)1 << (2*TXE_NUM_SDMA_ENGINES + this_idx);
1077 /* Create a mask specifically for sdma_progress */
1078 sde->progress_mask =
1079 (u64)1 << (TXE_NUM_SDMA_ENGINES + this_idx);
1080 spin_lock_init(&sde->tail_lock);
1081 seqlock_init(&sde->head_lock);
1082 spin_lock_init(&sde->senddmactrl_lock);
1083 spin_lock_init(&sde->flushlist_lock);
1084 /* insure there is always a zero bit */
1085 sde->ahg_bits = 0xfffffffe00000000ULL;
1087 sdma_set_state(sde, sdma_state_s00_hw_down);
1089 /* set up reference counting */
1090 kref_init(&sde->state.kref);
1091 init_completion(&sde->state.comp);
1093 INIT_LIST_HEAD(&sde->flushlist);
1094 INIT_LIST_HEAD(&sde->dmawait);
1097 get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
1101 SDMA_DESC1_HEAD_TO_HOST_FLAG;
1104 SDMA_DESC1_INT_REQ_FLAG;
1106 tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
1107 (unsigned long)sde);
1109 tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
1110 (unsigned long)sde);
1111 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
1112 INIT_WORK(&sde->flush_worker, sdma_field_flush);
1114 sde->progress_check_head = 0;
1116 setup_timer(&sde->err_progress_check_timer,
1117 sdma_err_progress_check, (unsigned long)sde);
1119 sde->descq = dma_zalloc_coherent(
1121 descq_cnt * sizeof(u64[2]),
1128 kcalloc(descq_cnt, sizeof(struct sdma_txreq *),
1133 sizeof(struct sdma_txreq *) *
1139 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1140 /* Allocate memory for DMA of head registers to memory */
1141 dd->sdma_heads_dma = dma_zalloc_coherent(
1143 dd->sdma_heads_size,
1144 &dd->sdma_heads_phys,
1147 if (!dd->sdma_heads_dma) {
1148 dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1152 /* Allocate memory for pad */
1153 dd->sdma_pad_dma = dma_zalloc_coherent(
1159 if (!dd->sdma_pad_dma) {
1160 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1164 /* assign each engine to different cacheline and init registers */
1165 curr_head = (void *)dd->sdma_heads_dma;
1166 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1167 unsigned long phys_offset;
1169 sde = &dd->per_sdma[this_idx];
1171 sde->head_dma = curr_head;
1172 curr_head += L1_CACHE_BYTES;
1173 phys_offset = (unsigned long)sde->head_dma -
1174 (unsigned long)dd->sdma_heads_dma;
1175 sde->head_phys = dd->sdma_heads_phys + phys_offset;
1176 init_sdma_regs(sde, per_sdma_credits, idle_cnt);
1178 dd->flags |= HFI1_HAS_SEND_DMA;
1179 dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
1180 dd->num_sdma = num_engines;
1181 if (sdma_map_init(dd, port, ppd->vls_operational, NULL))
1183 dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
1187 sdma_clean(dd, num_engines);
1192 * sdma_all_running() - called when the link goes up
1195 * This routine moves all engines to the running state.
1197 void sdma_all_running(struct hfi1_devdata *dd)
1199 struct sdma_engine *sde;
1202 /* move all engines to running */
1203 for (i = 0; i < dd->num_sdma; ++i) {
1204 sde = &dd->per_sdma[i];
1205 sdma_process_event(sde, sdma_event_e30_go_running);
1210 * sdma_all_idle() - called when the link goes down
1213 * This routine moves all engines to the idle state.
1215 void sdma_all_idle(struct hfi1_devdata *dd)
1217 struct sdma_engine *sde;
1220 /* idle all engines */
1221 for (i = 0; i < dd->num_sdma; ++i) {
1222 sde = &dd->per_sdma[i];
1223 sdma_process_event(sde, sdma_event_e70_go_idle);
1228 * sdma_start() - called to kick off state processing for all engines
1231 * This routine is for kicking off the state processing for all required
1232 * sdma engines. Interrupts need to be working at this point.
1235 void sdma_start(struct hfi1_devdata *dd)
1238 struct sdma_engine *sde;
1240 /* kick off the engines state processing */
1241 for (i = 0; i < dd->num_sdma; ++i) {
1242 sde = &dd->per_sdma[i];
1243 sdma_process_event(sde, sdma_event_e10_go_hw_start);
1248 * sdma_exit() - used when module is removed
1251 void sdma_exit(struct hfi1_devdata *dd)
1254 struct sdma_engine *sde;
1256 for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
1259 sde = &dd->per_sdma[this_idx];
1260 if (!list_empty(&sde->dmawait))
1261 dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
1263 sdma_process_event(sde, sdma_event_e00_go_hw_down);
1265 del_timer_sync(&sde->err_progress_check_timer);
1268 * This waits for the state machine to exit so it is not
1269 * necessary to kill the sdma_sw_clean_up_task to make sure
1270 * it is not running.
1272 sdma_finalput(&sde->state);
1274 sdma_clean(dd, dd->num_sdma);
1278 * unmap the indicated descriptor
1280 static inline void sdma_unmap_desc(
1281 struct hfi1_devdata *dd,
1282 struct sdma_desc *descp)
1284 switch (sdma_mapping_type(descp)) {
1285 case SDMA_MAP_SINGLE:
1288 sdma_mapping_addr(descp),
1289 sdma_mapping_len(descp),
1295 sdma_mapping_addr(descp),
1296 sdma_mapping_len(descp),
1303 * return the mode as indicated by the first
1304 * descriptor in the tx.
1306 static inline u8 ahg_mode(struct sdma_txreq *tx)
1308 return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1309 >> SDMA_DESC1_HEADER_MODE_SHIFT;
1313 * sdma_txclean() - clean tx of mappings, descp *kmalloc's
1314 * @dd: hfi1_devdata for unmapping
1315 * @tx: tx request to clean
1317 * This is used in the progress routine to clean the tx or
1318 * by the ULP to toss an in-process tx build.
1320 * The code can be called multiple times without issue.
1324 struct hfi1_devdata *dd,
1325 struct sdma_txreq *tx)
1330 u8 skip = 0, mode = ahg_mode(tx);
1333 sdma_unmap_desc(dd, &tx->descp[0]);
1334 /* determine number of AHG descriptors to skip */
1335 if (mode > SDMA_AHG_APPLY_UPDATE1)
1337 for (i = 1 + skip; i < tx->num_desc; i++)
1338 sdma_unmap_desc(dd, &tx->descp[i]);
1341 kfree(tx->coalesce_buf);
1342 tx->coalesce_buf = NULL;
1343 /* kmalloc'ed descp */
1344 if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
1345 tx->desc_limit = ARRAY_SIZE(tx->descs);
1350 static inline u16 sdma_gethead(struct sdma_engine *sde)
1352 struct hfi1_devdata *dd = sde->dd;
1356 #ifdef CONFIG_SDMA_VERBOSITY
1357 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1358 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1362 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
1363 (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
1364 hwhead = use_dmahead ?
1365 (u16) le64_to_cpu(*sde->head_dma) :
1366 (u16) read_sde_csr(sde, SD(HEAD));
1368 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
1374 swhead = sde->descq_head & sde->sdma_mask;
1375 /* this code is really bad for cache line trading */
1376 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1377 cnt = sde->descq_cnt;
1379 if (swhead < swtail)
1381 sane = (hwhead >= swhead) & (hwhead <= swtail);
1382 else if (swhead > swtail)
1383 /* wrapped around */
1384 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
1388 sane = (hwhead == swhead);
1390 if (unlikely(!sane)) {
1391 dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1393 use_dmahead ? "dma" : "kreg",
1394 hwhead, swhead, swtail, cnt);
1396 /* try one more time, using csr */
1400 /* proceed as if no progress */
1408 * This is called when there are send DMA descriptors that might be
1411 * This is called with head_lock held.
1413 static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail)
1415 struct iowait *wait, *nw;
1416 struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
1417 unsigned i, n = 0, seq;
1418 struct sdma_txreq *stx;
1419 struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
1421 #ifdef CONFIG_SDMA_VERBOSITY
1422 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
1423 slashstrip(__FILE__), __LINE__, __func__);
1424 dd_dev_err(sde->dd, "avail: %u\n", avail);
1428 seq = read_seqbegin(&dev->iowait_lock);
1429 if (!list_empty(&sde->dmawait)) {
1430 /* at least one item */
1431 write_seqlock(&dev->iowait_lock);
1432 /* Harvest waiters wanting DMA descriptors */
1433 list_for_each_entry_safe(
1442 if (n == ARRAY_SIZE(waits))
1444 if (!list_empty(&wait->tx_head)) {
1445 stx = list_first_entry(
1449 num_desc = stx->num_desc;
1451 if (num_desc > avail)
1454 list_del_init(&wait->list);
1457 write_sequnlock(&dev->iowait_lock);
1460 } while (read_seqretry(&dev->iowait_lock, seq));
1462 for (i = 0; i < n; i++)
1463 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
1466 /* head_lock must be held */
1467 static void sdma_make_progress(struct sdma_engine *sde, u64 status)
1469 struct sdma_txreq *txp = NULL;
1471 u16 hwhead, swhead, swtail;
1472 int idle_check_done = 0;
1474 hwhead = sdma_gethead(sde);
1476 /* The reason for some of the complexity of this code is that
1477 * not all descriptors have corresponding txps. So, we have to
1478 * be able to skip over descs until we wander into the range of
1479 * the next txp on the list.
1483 txp = get_txhead(sde);
1484 swhead = sde->descq_head & sde->sdma_mask;
1485 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1486 while (swhead != hwhead) {
1487 /* advance head, wrap if needed */
1488 swhead = ++sde->descq_head & sde->sdma_mask;
1490 /* if now past this txp's descs, do the callback */
1491 if (txp && txp->next_descq_idx == swhead) {
1493 /* protect against complete modifying */
1494 struct iowait *wait = txp->wait;
1496 /* remove from list */
1497 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
1499 drained = atomic_dec_and_test(&wait->sdma_busy);
1500 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
1501 trace_hfi1_sdma_out_sn(sde, txp->sn);
1502 if (WARN_ON_ONCE(sde->head_sn != txp->sn))
1503 dd_dev_err(sde->dd, "expected %llu got %llu\n",
1504 sde->head_sn, txp->sn);
1507 sdma_txclean(sde->dd, txp);
1513 if (wait && drained)
1514 iowait_drain_wakeup(wait);
1515 /* see if there is another txp */
1516 txp = get_txhead(sde);
1518 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1523 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1524 * to updates to the the dma_head location in host memory. The head
1525 * value read might not be fully up to date. If there are pending
1526 * descriptors and the SDMA idle interrupt fired then read from the
1527 * CSR SDMA head instead to get the latest value from the hardware.
1528 * The hardware SDMA head should be read at most once in this invocation
1529 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1531 if ((status & sde->idle_mask) && !idle_check_done) {
1532 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1533 if (swtail != hwhead) {
1534 hwhead = (u16)read_sde_csr(sde, SD(HEAD));
1535 idle_check_done = 1;
1540 sde->last_status = status;
1542 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
1546 * sdma_engine_interrupt() - interrupt handler for engine
1548 * @status: sdma interrupt reason
1550 * Status is a mask of the 3 possible interrupts for this engine. It will
1551 * contain bits _only_ for this SDMA engine. It will contain at least one
1552 * bit, it may contain more.
1554 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
1556 trace_hfi1_sdma_engine_interrupt(sde, status);
1557 write_seqlock(&sde->head_lock);
1558 sdma_set_desc_cnt(sde, sdma_desct_intr);
1559 sdma_make_progress(sde, status);
1560 write_sequnlock(&sde->head_lock);
1564 * sdma_engine_error() - error handler for engine
1566 * @status: sdma interrupt reason
1568 void sdma_engine_error(struct sdma_engine *sde, u64 status)
1570 unsigned long flags;
1572 #ifdef CONFIG_SDMA_VERBOSITY
1573 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1575 (unsigned long long)status,
1576 sdma_state_names[sde->state.current_state]);
1578 spin_lock_irqsave(&sde->tail_lock, flags);
1579 write_seqlock(&sde->head_lock);
1580 if (status & ALL_SDMA_ENG_HALT_ERRS)
1581 __sdma_process_event(sde, sdma_event_e60_hw_halted);
1582 if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
1584 "SDMA (%u) engine error: 0x%llx state %s\n",
1586 (unsigned long long)status,
1587 sdma_state_names[sde->state.current_state]);
1588 dump_sdma_state(sde);
1590 write_sequnlock(&sde->head_lock);
1591 spin_unlock_irqrestore(&sde->tail_lock, flags);
1594 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
1596 u64 set_senddmactrl = 0;
1597 u64 clr_senddmactrl = 0;
1598 unsigned long flags;
1600 #ifdef CONFIG_SDMA_VERBOSITY
1601 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1603 (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
1604 (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
1605 (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
1606 (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
1609 if (op & SDMA_SENDCTRL_OP_ENABLE)
1610 set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1612 clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1614 if (op & SDMA_SENDCTRL_OP_INTENABLE)
1615 set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1617 clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1619 if (op & SDMA_SENDCTRL_OP_HALT)
1620 set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1622 clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1624 spin_lock_irqsave(&sde->senddmactrl_lock, flags);
1626 sde->p_senddmactrl |= set_senddmactrl;
1627 sde->p_senddmactrl &= ~clr_senddmactrl;
1629 if (op & SDMA_SENDCTRL_OP_CLEANUP)
1630 write_sde_csr(sde, SD(CTRL),
1631 sde->p_senddmactrl |
1632 SD(CTRL_SDMA_CLEANUP_SMASK));
1634 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
1636 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
1638 #ifdef CONFIG_SDMA_VERBOSITY
1639 sdma_dumpstate(sde);
1643 static void sdma_setlengen(struct sdma_engine *sde)
1645 #ifdef CONFIG_SDMA_VERBOSITY
1646 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1647 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1651 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1652 * count to enable generation checking and load the internal
1653 * generation counter.
1655 write_sde_csr(sde, SD(LEN_GEN),
1656 (sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT)
1658 write_sde_csr(sde, SD(LEN_GEN),
1659 ((sde->descq_cnt/64) << SD(LEN_GEN_LENGTH_SHIFT))
1660 | (4ULL << SD(LEN_GEN_GENERATION_SHIFT))
1664 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
1666 /* Commit writes to memory and advance the tail on the chip */
1667 smp_wmb(); /* see get_txhead() */
1668 writeq(tail, sde->tail_csr);
1672 * This is called when changing to state s10_hw_start_up_halt_wait as
1673 * a result of send buffer errors or send DMA descriptor errors.
1675 static void sdma_hw_start_up(struct sdma_engine *sde)
1679 #ifdef CONFIG_SDMA_VERBOSITY
1680 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1681 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1684 sdma_setlengen(sde);
1685 sdma_update_tail(sde, 0); /* Set SendDmaTail */
1688 reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
1689 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
1690 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
1693 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
1694 (r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1696 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
1697 (r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1699 * set_sdma_integrity
1701 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
1703 static void set_sdma_integrity(struct sdma_engine *sde)
1705 struct hfi1_devdata *dd = sde->dd;
1708 if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY)))
1711 reg = hfi1_pkt_base_sdma_integrity(dd);
1713 if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
1714 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
1716 SET_STATIC_RATE_CONTROL_SMASK(reg);
1718 write_sde_csr(sde, SD(CHECK_ENABLE), reg);
1722 static void init_sdma_regs(
1723 struct sdma_engine *sde,
1728 #ifdef CONFIG_SDMA_VERBOSITY
1729 struct hfi1_devdata *dd = sde->dd;
1731 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1732 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1735 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
1736 sdma_setlengen(sde);
1737 sdma_update_tail(sde, 0); /* Set SendDmaTail */
1738 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
1739 write_sde_csr(sde, SD(DESC_CNT), 0);
1740 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
1741 write_sde_csr(sde, SD(MEMORY),
1743 SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
1744 ((u64)(credits * sde->this_idx) <<
1745 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
1746 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
1747 set_sdma_integrity(sde);
1748 opmask = OPCODE_CHECK_MASK_DISABLED;
1749 opval = OPCODE_CHECK_VAL_DISABLED;
1750 write_sde_csr(sde, SD(CHECK_OPCODE),
1751 (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
1752 (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
1755 #ifdef CONFIG_SDMA_VERBOSITY
1757 #define sdma_dumpstate_helper0(reg) do { \
1758 csr = read_csr(sde->dd, reg); \
1759 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
1762 #define sdma_dumpstate_helper(reg) do { \
1763 csr = read_sde_csr(sde, reg); \
1764 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
1765 #reg, sde->this_idx, csr); \
1768 #define sdma_dumpstate_helper2(reg) do { \
1769 csr = read_csr(sde->dd, reg + (8 * i)); \
1770 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
1774 void sdma_dumpstate(struct sdma_engine *sde)
1779 sdma_dumpstate_helper(SD(CTRL));
1780 sdma_dumpstate_helper(SD(STATUS));
1781 sdma_dumpstate_helper0(SD(ERR_STATUS));
1782 sdma_dumpstate_helper0(SD(ERR_MASK));
1783 sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
1784 sdma_dumpstate_helper(SD(ENG_ERR_MASK));
1786 for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
1787 sdma_dumpstate_helper2(CCE_INT_STATUS);
1788 sdma_dumpstate_helper2(CCE_INT_MASK);
1789 sdma_dumpstate_helper2(CCE_INT_BLOCKED);
1792 sdma_dumpstate_helper(SD(TAIL));
1793 sdma_dumpstate_helper(SD(HEAD));
1794 sdma_dumpstate_helper(SD(PRIORITY_THLD));
1795 sdma_dumpstate_helper(SD(IDLE_CNT));
1796 sdma_dumpstate_helper(SD(RELOAD_CNT));
1797 sdma_dumpstate_helper(SD(DESC_CNT));
1798 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
1799 sdma_dumpstate_helper(SD(MEMORY));
1800 sdma_dumpstate_helper0(SD(ENGINES));
1801 sdma_dumpstate_helper0(SD(MEM_SIZE));
1802 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
1803 sdma_dumpstate_helper(SD(BASE_ADDR));
1804 sdma_dumpstate_helper(SD(LEN_GEN));
1805 sdma_dumpstate_helper(SD(HEAD_ADDR));
1806 sdma_dumpstate_helper(SD(CHECK_ENABLE));
1807 sdma_dumpstate_helper(SD(CHECK_VL));
1808 sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
1809 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
1810 sdma_dumpstate_helper(SD(CHECK_SLID));
1811 sdma_dumpstate_helper(SD(CHECK_OPCODE));
1815 static void dump_sdma_state(struct sdma_engine *sde)
1817 struct hw_sdma_desc *descq;
1818 struct hw_sdma_desc *descqp;
1823 u16 head, tail, cnt;
1825 head = sde->descq_head & sde->sdma_mask;
1826 tail = sde->descq_tail & sde->sdma_mask;
1827 cnt = sdma_descq_freecnt(sde);
1831 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
1836 !list_empty(&sde->flushlist));
1838 /* print info for each entry in the descriptor queue */
1839 while (head != tail) {
1840 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
1842 descqp = &sde->descq[head];
1843 desc[0] = le64_to_cpu(descqp->qw[0]);
1844 desc[1] = le64_to_cpu(descqp->qw[1]);
1845 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
1846 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
1848 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
1849 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
1850 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
1851 & SDMA_DESC0_PHY_ADDR_MASK;
1852 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
1853 & SDMA_DESC1_GENERATION_MASK;
1854 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
1855 & SDMA_DESC0_BYTE_COUNT_MASK;
1857 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1858 head, flags, addr, gen, len);
1860 "\tdesc0:0x%016llx desc1 0x%016llx\n",
1862 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
1864 "\taidx: %u amode: %u alen: %u\n",
1865 (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
1866 >> SDMA_DESC1_HEADER_INDEX_SHIFT),
1867 (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1868 >> SDMA_DESC1_HEADER_MODE_SHIFT),
1869 (u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK)
1870 >> SDMA_DESC1_HEADER_DWS_SHIFT));
1872 head &= sde->sdma_mask;
1877 "SDE %u STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
1879 * sdma_seqfile_dump_sde() - debugfs dump of sde
1881 * @sde: send dma engine to dump
1883 * This routine dumps the sde to the indicated seq file.
1885 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
1888 struct hw_sdma_desc *descqp;
1894 head = sde->descq_head & sde->sdma_mask;
1895 tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1896 seq_printf(s, SDE_FMT, sde->this_idx,
1897 sdma_state_name(sde->state.current_state),
1898 (unsigned long long)read_sde_csr(sde, SD(CTRL)),
1899 (unsigned long long)read_sde_csr(sde, SD(STATUS)),
1900 (unsigned long long)read_sde_csr(sde,
1901 SD(ENG_ERR_STATUS)),
1902 (unsigned long long)read_sde_csr(sde, SD(TAIL)),
1904 (unsigned long long)read_sde_csr(sde, SD(HEAD)),
1906 (unsigned long long)le64_to_cpu(*sde->head_dma),
1907 (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
1908 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
1909 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
1910 (unsigned long long)sde->last_status,
1911 (unsigned long long)sde->ahg_bits,
1916 !list_empty(&sde->flushlist),
1917 sde->descq_full_count,
1918 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
1920 /* print info for each entry in the descriptor queue */
1921 while (head != tail) {
1922 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
1924 descqp = &sde->descq[head];
1925 desc[0] = le64_to_cpu(descqp->qw[0]);
1926 desc[1] = le64_to_cpu(descqp->qw[1]);
1927 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
1928 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
1930 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
1931 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
1932 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
1933 & SDMA_DESC0_PHY_ADDR_MASK;
1934 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
1935 & SDMA_DESC1_GENERATION_MASK;
1936 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
1937 & SDMA_DESC0_BYTE_COUNT_MASK;
1939 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1940 head, flags, addr, gen, len);
1941 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
1942 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
1943 (u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
1944 >> SDMA_DESC1_HEADER_INDEX_SHIFT),
1945 (u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1946 >> SDMA_DESC1_HEADER_MODE_SHIFT));
1947 head = (head + 1) & sde->sdma_mask;
1952 * add the generation number into
1953 * the qw1 and return
1955 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
1957 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
1959 qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
1960 qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
1961 << SDMA_DESC1_GENERATION_SHIFT;
1966 * This routine submits the indicated tx
1968 * Space has already been guaranteed and
1969 * tail side of ring is locked.
1971 * The hardware tail update is done
1972 * in the caller and that is facilitated
1973 * by returning the new tail.
1975 * There is special case logic for ahg
1976 * to not add the generation number for
1977 * up to 2 descriptors that follow the
1981 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
1985 struct sdma_desc *descp = tx->descp;
1986 u8 skip = 0, mode = ahg_mode(tx);
1988 tail = sde->descq_tail & sde->sdma_mask;
1989 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
1990 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
1991 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
1992 tail, &sde->descq[tail]);
1993 tail = ++sde->descq_tail & sde->sdma_mask;
1995 if (mode > SDMA_AHG_APPLY_UPDATE1)
1997 for (i = 1; i < tx->num_desc; i++, descp++) {
2000 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
2002 /* edits don't have generation */
2006 /* replace generation with real one for non-edits */
2007 qw1 = add_gen(sde, descp->qw[1]);
2009 sde->descq[tail].qw[1] = cpu_to_le64(qw1);
2010 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
2011 tail, &sde->descq[tail]);
2012 tail = ++sde->descq_tail & sde->sdma_mask;
2014 tx->next_descq_idx = tail;
2015 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2016 tx->sn = sde->tail_sn++;
2017 trace_hfi1_sdma_in_sn(sde, tx->sn);
2018 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
2020 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
2021 sde->desc_avail -= tx->num_desc;
2026 * Check for progress
2028 static int sdma_check_progress(
2029 struct sdma_engine *sde,
2030 struct iowait *wait,
2031 struct sdma_txreq *tx)
2035 sde->desc_avail = sdma_descq_freecnt(sde);
2036 if (tx->num_desc <= sde->desc_avail)
2038 /* pulse the head_lock */
2039 if (wait && wait->sleep) {
2042 seq = raw_seqcount_begin(
2043 (const seqcount_t *)&sde->head_lock.seqcount);
2044 ret = wait->sleep(sde, wait, tx, seq);
2046 sde->desc_avail = sdma_descq_freecnt(sde);
2053 * sdma_send_txreq() - submit a tx req to ring
2054 * @sde: sdma engine to use
2055 * @wait: wait structure to use when full (may be NULL)
2056 * @tx: sdma_txreq to submit
2058 * The call submits the tx into the ring. If a iowait structure is non-NULL
2059 * the packet will be queued to the list in wait.
2062 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2063 * ring (wait == NULL)
2064 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2066 int sdma_send_txreq(struct sdma_engine *sde,
2067 struct iowait *wait,
2068 struct sdma_txreq *tx)
2072 unsigned long flags;
2074 /* user should have supplied entire packet */
2075 if (unlikely(tx->tlen))
2078 spin_lock_irqsave(&sde->tail_lock, flags);
2080 if (unlikely(!__sdma_running(sde)))
2082 if (unlikely(tx->num_desc > sde->desc_avail))
2084 tail = submit_tx(sde, tx);
2086 atomic_inc(&wait->sdma_busy);
2087 sdma_update_tail(sde, tail);
2089 spin_unlock_irqrestore(&sde->tail_lock, flags);
2093 atomic_inc(&wait->sdma_busy);
2094 tx->next_descq_idx = 0;
2095 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2096 tx->sn = sde->tail_sn++;
2097 trace_hfi1_sdma_in_sn(sde, tx->sn);
2099 spin_lock(&sde->flushlist_lock);
2100 list_add_tail(&tx->list, &sde->flushlist);
2101 spin_unlock(&sde->flushlist_lock);
2104 wait->count += tx->num_desc;
2106 schedule_work(&sde->flush_worker);
2110 ret = sdma_check_progress(sde, wait, tx);
2111 if (ret == -EAGAIN) {
2115 sde->descq_full_count++;
2120 * sdma_send_txlist() - submit a list of tx req to ring
2121 * @sde: sdma engine to use
2122 * @wait: wait structure to use when full (may be NULL)
2123 * @tx_list: list of sdma_txreqs to submit
2125 * The call submits the list into the ring.
2127 * If the iowait structure is non-NULL and not equal to the iowait list
2128 * the unprocessed part of the list will be appended to the list in wait.
2130 * In all cases, the tx_list will be updated so the head of the tx_list is
2131 * the list of descriptors that have yet to be transmitted.
2133 * The intent of this call is to provide a more efficient
2134 * way of submitting multiple packets to SDMA while holding the tail
2138 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring
2140 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2142 int sdma_send_txlist(struct sdma_engine *sde,
2143 struct iowait *wait,
2144 struct list_head *tx_list)
2146 struct sdma_txreq *tx, *tx_next;
2148 unsigned long flags;
2149 u16 tail = INVALID_TAIL;
2152 spin_lock_irqsave(&sde->tail_lock, flags);
2154 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2156 if (unlikely(!__sdma_running(sde)))
2158 if (unlikely(tx->num_desc > sde->desc_avail))
2160 if (unlikely(tx->tlen)) {
2164 list_del_init(&tx->list);
2165 tail = submit_tx(sde, tx);
2167 if (tail != INVALID_TAIL &&
2168 (count & SDMA_TAIL_UPDATE_THRESH) == 0) {
2169 sdma_update_tail(sde, tail);
2170 tail = INVALID_TAIL;
2175 atomic_add(count, &wait->sdma_busy);
2176 if (tail != INVALID_TAIL)
2177 sdma_update_tail(sde, tail);
2178 spin_unlock_irqrestore(&sde->tail_lock, flags);
2181 spin_lock(&sde->flushlist_lock);
2182 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2184 list_del_init(&tx->list);
2186 atomic_inc(&wait->sdma_busy);
2187 tx->next_descq_idx = 0;
2188 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2189 tx->sn = sde->tail_sn++;
2190 trace_hfi1_sdma_in_sn(sde, tx->sn);
2192 list_add_tail(&tx->list, &sde->flushlist);
2195 wait->count += tx->num_desc;
2198 spin_unlock(&sde->flushlist_lock);
2199 schedule_work(&sde->flush_worker);
2203 ret = sdma_check_progress(sde, wait, tx);
2204 if (ret == -EAGAIN) {
2208 sde->descq_full_count++;
2212 static void sdma_process_event(struct sdma_engine *sde,
2213 enum sdma_events event)
2215 unsigned long flags;
2217 spin_lock_irqsave(&sde->tail_lock, flags);
2218 write_seqlock(&sde->head_lock);
2220 __sdma_process_event(sde, event);
2222 if (sde->state.current_state == sdma_state_s99_running)
2223 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
2225 write_sequnlock(&sde->head_lock);
2226 spin_unlock_irqrestore(&sde->tail_lock, flags);
2229 static void __sdma_process_event(struct sdma_engine *sde,
2230 enum sdma_events event)
2232 struct sdma_state *ss = &sde->state;
2233 int need_progress = 0;
2235 /* CONFIG SDMA temporary */
2236 #ifdef CONFIG_SDMA_VERBOSITY
2237 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
2238 sdma_state_names[ss->current_state],
2239 sdma_event_names[event]);
2242 switch (ss->current_state) {
2243 case sdma_state_s00_hw_down:
2245 case sdma_event_e00_go_hw_down:
2247 case sdma_event_e30_go_running:
2249 * If down, but running requested (usually result
2250 * of link up, then we need to start up.
2251 * This can happen when hw down is requested while
2252 * bringing the link up with traffic active on
2254 ss->go_s99_running = 1;
2255 /* fall through and start dma engine */
2256 case sdma_event_e10_go_hw_start:
2257 /* This reference means the state machine is started */
2258 sdma_get(&sde->state);
2260 sdma_state_s10_hw_start_up_halt_wait);
2262 case sdma_event_e15_hw_halt_done:
2264 case sdma_event_e25_hw_clean_up_done:
2266 case sdma_event_e40_sw_cleaned:
2267 sdma_sw_tear_down(sde);
2269 case sdma_event_e50_hw_cleaned:
2271 case sdma_event_e60_hw_halted:
2273 case sdma_event_e70_go_idle:
2275 case sdma_event_e80_hw_freeze:
2277 case sdma_event_e81_hw_frozen:
2279 case sdma_event_e82_hw_unfreeze:
2281 case sdma_event_e85_link_down:
2283 case sdma_event_e90_sw_halted:
2288 case sdma_state_s10_hw_start_up_halt_wait:
2290 case sdma_event_e00_go_hw_down:
2291 sdma_set_state(sde, sdma_state_s00_hw_down);
2292 sdma_sw_tear_down(sde);
2294 case sdma_event_e10_go_hw_start:
2296 case sdma_event_e15_hw_halt_done:
2298 sdma_state_s15_hw_start_up_clean_wait);
2299 sdma_start_hw_clean_up(sde);
2301 case sdma_event_e25_hw_clean_up_done:
2303 case sdma_event_e30_go_running:
2304 ss->go_s99_running = 1;
2306 case sdma_event_e40_sw_cleaned:
2308 case sdma_event_e50_hw_cleaned:
2310 case sdma_event_e60_hw_halted:
2311 sdma_start_err_halt_wait(sde);
2313 case sdma_event_e70_go_idle:
2314 ss->go_s99_running = 0;
2316 case sdma_event_e80_hw_freeze:
2318 case sdma_event_e81_hw_frozen:
2320 case sdma_event_e82_hw_unfreeze:
2322 case sdma_event_e85_link_down:
2324 case sdma_event_e90_sw_halted:
2329 case sdma_state_s15_hw_start_up_clean_wait:
2331 case sdma_event_e00_go_hw_down:
2332 sdma_set_state(sde, sdma_state_s00_hw_down);
2333 sdma_sw_tear_down(sde);
2335 case sdma_event_e10_go_hw_start:
2337 case sdma_event_e15_hw_halt_done:
2339 case sdma_event_e25_hw_clean_up_done:
2340 sdma_hw_start_up(sde);
2341 sdma_set_state(sde, ss->go_s99_running ?
2342 sdma_state_s99_running :
2343 sdma_state_s20_idle);
2345 case sdma_event_e30_go_running:
2346 ss->go_s99_running = 1;
2348 case sdma_event_e40_sw_cleaned:
2350 case sdma_event_e50_hw_cleaned:
2352 case sdma_event_e60_hw_halted:
2354 case sdma_event_e70_go_idle:
2355 ss->go_s99_running = 0;
2357 case sdma_event_e80_hw_freeze:
2359 case sdma_event_e81_hw_frozen:
2361 case sdma_event_e82_hw_unfreeze:
2363 case sdma_event_e85_link_down:
2365 case sdma_event_e90_sw_halted:
2370 case sdma_state_s20_idle:
2372 case sdma_event_e00_go_hw_down:
2373 sdma_set_state(sde, sdma_state_s00_hw_down);
2374 sdma_sw_tear_down(sde);
2376 case sdma_event_e10_go_hw_start:
2378 case sdma_event_e15_hw_halt_done:
2380 case sdma_event_e25_hw_clean_up_done:
2382 case sdma_event_e30_go_running:
2383 sdma_set_state(sde, sdma_state_s99_running);
2384 ss->go_s99_running = 1;
2386 case sdma_event_e40_sw_cleaned:
2388 case sdma_event_e50_hw_cleaned:
2390 case sdma_event_e60_hw_halted:
2391 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2392 sdma_start_err_halt_wait(sde);
2394 case sdma_event_e70_go_idle:
2396 case sdma_event_e85_link_down:
2398 case sdma_event_e80_hw_freeze:
2399 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2400 atomic_dec(&sde->dd->sdma_unfreeze_count);
2401 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2403 case sdma_event_e81_hw_frozen:
2405 case sdma_event_e82_hw_unfreeze:
2407 case sdma_event_e90_sw_halted:
2412 case sdma_state_s30_sw_clean_up_wait:
2414 case sdma_event_e00_go_hw_down:
2415 sdma_set_state(sde, sdma_state_s00_hw_down);
2417 case sdma_event_e10_go_hw_start:
2419 case sdma_event_e15_hw_halt_done:
2421 case sdma_event_e25_hw_clean_up_done:
2423 case sdma_event_e30_go_running:
2424 ss->go_s99_running = 1;
2426 case sdma_event_e40_sw_cleaned:
2427 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
2428 sdma_start_hw_clean_up(sde);
2430 case sdma_event_e50_hw_cleaned:
2432 case sdma_event_e60_hw_halted:
2434 case sdma_event_e70_go_idle:
2435 ss->go_s99_running = 0;
2437 case sdma_event_e80_hw_freeze:
2439 case sdma_event_e81_hw_frozen:
2441 case sdma_event_e82_hw_unfreeze:
2443 case sdma_event_e85_link_down:
2444 ss->go_s99_running = 0;
2446 case sdma_event_e90_sw_halted:
2451 case sdma_state_s40_hw_clean_up_wait:
2453 case sdma_event_e00_go_hw_down:
2454 sdma_set_state(sde, sdma_state_s00_hw_down);
2455 sdma_start_sw_clean_up(sde);
2457 case sdma_event_e10_go_hw_start:
2459 case sdma_event_e15_hw_halt_done:
2461 case sdma_event_e25_hw_clean_up_done:
2462 sdma_hw_start_up(sde);
2463 sdma_set_state(sde, ss->go_s99_running ?
2464 sdma_state_s99_running :
2465 sdma_state_s20_idle);
2467 case sdma_event_e30_go_running:
2468 ss->go_s99_running = 1;
2470 case sdma_event_e40_sw_cleaned:
2472 case sdma_event_e50_hw_cleaned:
2474 case sdma_event_e60_hw_halted:
2476 case sdma_event_e70_go_idle:
2477 ss->go_s99_running = 0;
2479 case sdma_event_e80_hw_freeze:
2481 case sdma_event_e81_hw_frozen:
2483 case sdma_event_e82_hw_unfreeze:
2485 case sdma_event_e85_link_down:
2486 ss->go_s99_running = 0;
2488 case sdma_event_e90_sw_halted:
2493 case sdma_state_s50_hw_halt_wait:
2495 case sdma_event_e00_go_hw_down:
2496 sdma_set_state(sde, sdma_state_s00_hw_down);
2497 sdma_start_sw_clean_up(sde);
2499 case sdma_event_e10_go_hw_start:
2501 case sdma_event_e15_hw_halt_done:
2502 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2503 sdma_start_sw_clean_up(sde);
2505 case sdma_event_e25_hw_clean_up_done:
2507 case sdma_event_e30_go_running:
2508 ss->go_s99_running = 1;
2510 case sdma_event_e40_sw_cleaned:
2512 case sdma_event_e50_hw_cleaned:
2514 case sdma_event_e60_hw_halted:
2515 sdma_start_err_halt_wait(sde);
2517 case sdma_event_e70_go_idle:
2518 ss->go_s99_running = 0;
2520 case sdma_event_e80_hw_freeze:
2522 case sdma_event_e81_hw_frozen:
2524 case sdma_event_e82_hw_unfreeze:
2526 case sdma_event_e85_link_down:
2527 ss->go_s99_running = 0;
2529 case sdma_event_e90_sw_halted:
2534 case sdma_state_s60_idle_halt_wait:
2536 case sdma_event_e00_go_hw_down:
2537 sdma_set_state(sde, sdma_state_s00_hw_down);
2538 sdma_start_sw_clean_up(sde);
2540 case sdma_event_e10_go_hw_start:
2542 case sdma_event_e15_hw_halt_done:
2543 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2544 sdma_start_sw_clean_up(sde);
2546 case sdma_event_e25_hw_clean_up_done:
2548 case sdma_event_e30_go_running:
2549 ss->go_s99_running = 1;
2551 case sdma_event_e40_sw_cleaned:
2553 case sdma_event_e50_hw_cleaned:
2555 case sdma_event_e60_hw_halted:
2556 sdma_start_err_halt_wait(sde);
2558 case sdma_event_e70_go_idle:
2559 ss->go_s99_running = 0;
2561 case sdma_event_e80_hw_freeze:
2563 case sdma_event_e81_hw_frozen:
2565 case sdma_event_e82_hw_unfreeze:
2567 case sdma_event_e85_link_down:
2569 case sdma_event_e90_sw_halted:
2574 case sdma_state_s80_hw_freeze:
2576 case sdma_event_e00_go_hw_down:
2577 sdma_set_state(sde, sdma_state_s00_hw_down);
2578 sdma_start_sw_clean_up(sde);
2580 case sdma_event_e10_go_hw_start:
2582 case sdma_event_e15_hw_halt_done:
2584 case sdma_event_e25_hw_clean_up_done:
2586 case sdma_event_e30_go_running:
2587 ss->go_s99_running = 1;
2589 case sdma_event_e40_sw_cleaned:
2591 case sdma_event_e50_hw_cleaned:
2593 case sdma_event_e60_hw_halted:
2595 case sdma_event_e70_go_idle:
2596 ss->go_s99_running = 0;
2598 case sdma_event_e80_hw_freeze:
2600 case sdma_event_e81_hw_frozen:
2601 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
2602 sdma_start_sw_clean_up(sde);
2604 case sdma_event_e82_hw_unfreeze:
2606 case sdma_event_e85_link_down:
2608 case sdma_event_e90_sw_halted:
2613 case sdma_state_s82_freeze_sw_clean:
2615 case sdma_event_e00_go_hw_down:
2616 sdma_set_state(sde, sdma_state_s00_hw_down);
2617 sdma_start_sw_clean_up(sde);
2619 case sdma_event_e10_go_hw_start:
2621 case sdma_event_e15_hw_halt_done:
2623 case sdma_event_e25_hw_clean_up_done:
2625 case sdma_event_e30_go_running:
2626 ss->go_s99_running = 1;
2628 case sdma_event_e40_sw_cleaned:
2629 /* notify caller this engine is done cleaning */
2630 atomic_dec(&sde->dd->sdma_unfreeze_count);
2631 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2633 case sdma_event_e50_hw_cleaned:
2635 case sdma_event_e60_hw_halted:
2637 case sdma_event_e70_go_idle:
2638 ss->go_s99_running = 0;
2640 case sdma_event_e80_hw_freeze:
2642 case sdma_event_e81_hw_frozen:
2644 case sdma_event_e82_hw_unfreeze:
2645 sdma_hw_start_up(sde);
2646 sdma_set_state(sde, ss->go_s99_running ?
2647 sdma_state_s99_running :
2648 sdma_state_s20_idle);
2650 case sdma_event_e85_link_down:
2652 case sdma_event_e90_sw_halted:
2657 case sdma_state_s99_running:
2659 case sdma_event_e00_go_hw_down:
2660 sdma_set_state(sde, sdma_state_s00_hw_down);
2661 sdma_start_sw_clean_up(sde);
2663 case sdma_event_e10_go_hw_start:
2665 case sdma_event_e15_hw_halt_done:
2667 case sdma_event_e25_hw_clean_up_done:
2669 case sdma_event_e30_go_running:
2671 case sdma_event_e40_sw_cleaned:
2673 case sdma_event_e50_hw_cleaned:
2675 case sdma_event_e60_hw_halted:
2677 sdma_err_progress_check_schedule(sde);
2678 case sdma_event_e90_sw_halted:
2680 * SW initiated halt does not perform engines
2683 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2684 sdma_start_err_halt_wait(sde);
2686 case sdma_event_e70_go_idle:
2687 sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
2689 case sdma_event_e85_link_down:
2690 ss->go_s99_running = 0;
2692 case sdma_event_e80_hw_freeze:
2693 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2694 atomic_dec(&sde->dd->sdma_unfreeze_count);
2695 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2697 case sdma_event_e81_hw_frozen:
2699 case sdma_event_e82_hw_unfreeze:
2705 ss->last_event = event;
2707 sdma_make_progress(sde, 0);
2711 * _extend_sdma_tx_descs() - helper to extend txreq
2713 * This is called once the initial nominal allocation
2714 * of descriptors in the sdma_txreq is exhausted.
2716 * The code will bump the allocation up to the max
2717 * of MAX_DESC (64) descriptors. There doesn't seem
2718 * much point in an interim step. The last descriptor
2719 * is reserved for coalesce buffer in order to support
2720 * cases where input packet has >MAX_DESC iovecs.
2723 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
2727 /* Handle last descriptor */
2728 if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
2729 /* if tlen is 0, it is for padding, release last descriptor */
2731 tx->desc_limit = MAX_DESC;
2732 } else if (!tx->coalesce_buf) {
2733 /* allocate coalesce buffer with space for padding */
2734 tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
2736 if (!tx->coalesce_buf)
2739 tx->coalesce_idx = 0;
2744 if (unlikely(tx->num_desc == MAX_DESC))
2747 tx->descp = kmalloc_array(
2749 sizeof(struct sdma_desc),
2754 /* reserve last descriptor for coalescing */
2755 tx->desc_limit = MAX_DESC - 1;
2756 /* copy ones already built */
2757 for (i = 0; i < tx->num_desc; i++)
2758 tx->descp[i] = tx->descs[i];
2763 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
2765 * This is called once the initial nominal allocation of descriptors
2766 * in the sdma_txreq is exhausted.
2768 * This function calls _extend_sdma_tx_descs to extend or allocate
2769 * coalesce buffer. If there is a allocated coalesce buffer, it will
2770 * copy the input packet data into the coalesce buffer. It also adds
2771 * coalesce buffer descriptor once whe whole packet is received.
2775 * 0 - coalescing, don't populate descriptor
2776 * 1 - continue with populating descriptor
2778 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
2779 int type, void *kvaddr, struct page *page,
2780 unsigned long offset, u16 len)
2785 rval = _extend_sdma_tx_descs(dd, tx);
2787 sdma_txclean(dd, tx);
2791 /* If coalesce buffer is allocated, copy data into it */
2792 if (tx->coalesce_buf) {
2793 if (type == SDMA_MAP_NONE) {
2794 sdma_txclean(dd, tx);
2798 if (type == SDMA_MAP_PAGE) {
2799 kvaddr = kmap(page);
2801 } else if (WARN_ON(!kvaddr)) {
2802 sdma_txclean(dd, tx);
2806 memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
2807 tx->coalesce_idx += len;
2808 if (type == SDMA_MAP_PAGE)
2811 /* If there is more data, return */
2812 if (tx->tlen - tx->coalesce_idx)
2815 /* Whole packet is received; add any padding */
2816 pad_len = tx->packet_len & (sizeof(u32) - 1);
2818 pad_len = sizeof(u32) - pad_len;
2819 memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len);
2820 /* padding is taken care of for coalescing case */
2821 tx->packet_len += pad_len;
2822 tx->tlen += pad_len;
2825 /* dma map the coalesce buffer */
2826 addr = dma_map_single(&dd->pcidev->dev,
2831 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
2832 sdma_txclean(dd, tx);
2836 /* Add descriptor for coalesce buffer */
2837 tx->desc_limit = MAX_DESC;
2838 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
2845 /* Update sdes when the lmc changes */
2846 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
2848 struct sdma_engine *sde;
2852 sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
2853 SD(CHECK_SLID_MASK_SHIFT)) |
2854 (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
2855 SD(CHECK_SLID_VALUE_SHIFT));
2857 for (i = 0; i < dd->num_sdma; i++) {
2858 hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
2860 sde = &dd->per_sdma[i];
2861 write_sde_csr(sde, SD(CHECK_SLID), sreg);
2865 /* tx not dword sized - pad */
2866 int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
2871 if ((unlikely(tx->num_desc == tx->desc_limit))) {
2872 rval = _extend_sdma_tx_descs(dd, tx);
2874 sdma_txclean(dd, tx);
2878 /* finish the one just added */
2883 sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
2884 _sdma_close_tx(dd, tx);
2889 * Add ahg to the sdma_txreq
2891 * The logic will consume up to 3
2892 * descriptors at the beginning of
2895 void _sdma_txreq_ahgadd(
2896 struct sdma_txreq *tx,
2902 u32 i, shift = 0, desc = 0;
2905 WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
2908 mode = SDMA_AHG_APPLY_UPDATE1;
2909 else if (num_ahg <= 5)
2910 mode = SDMA_AHG_APPLY_UPDATE2;
2912 mode = SDMA_AHG_APPLY_UPDATE3;
2914 /* initialize to consumed descriptors to zero */
2916 case SDMA_AHG_APPLY_UPDATE3:
2918 tx->descs[2].qw[0] = 0;
2919 tx->descs[2].qw[1] = 0;
2921 case SDMA_AHG_APPLY_UPDATE2:
2923 tx->descs[1].qw[0] = 0;
2924 tx->descs[1].qw[1] = 0;
2928 tx->descs[0].qw[1] |=
2929 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
2930 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
2931 (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
2932 << SDMA_DESC1_HEADER_DWS_SHIFT) |
2933 (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
2934 << SDMA_DESC1_HEADER_MODE_SHIFT) |
2935 (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
2936 << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
2937 for (i = 0; i < (num_ahg - 1); i++) {
2938 if (!shift && !(i & 2))
2940 tx->descs[desc].qw[!!(i & 2)] |=
2943 shift = (shift + 32) & 63;
2948 * sdma_ahg_alloc - allocate an AHG entry
2949 * @sde: engine to allocate from
2952 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
2953 * -ENOSPC if an entry is not available
2955 int sdma_ahg_alloc(struct sdma_engine *sde)
2961 trace_hfi1_ahg_allocate(sde, -EINVAL);
2965 nr = ffz(ACCESS_ONCE(sde->ahg_bits));
2967 trace_hfi1_ahg_allocate(sde, -ENOSPC);
2970 oldbit = test_and_set_bit(nr, &sde->ahg_bits);
2975 trace_hfi1_ahg_allocate(sde, nr);
2980 * sdma_ahg_free - free an AHG entry
2981 * @sde: engine to return AHG entry
2982 * @ahg_index: index to free
2984 * This routine frees the indicate AHG entry.
2986 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
2990 trace_hfi1_ahg_deallocate(sde, ahg_index);
2991 if (ahg_index < 0 || ahg_index > 31)
2993 clear_bit(ahg_index, &sde->ahg_bits);
2997 * SPC freeze handling for SDMA engines. Called when the driver knows
2998 * the SPC is going into a freeze but before the freeze is fully
2999 * settled. Generally an error interrupt.
3001 * This event will pull the engine out of running so no more entries can be
3002 * added to the engine's queue.
3004 void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
3007 enum sdma_events event = link_down ? sdma_event_e85_link_down :
3008 sdma_event_e80_hw_freeze;
3010 /* set up the wait but do not wait here */
3011 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3013 /* tell all engines to stop running and wait */
3014 for (i = 0; i < dd->num_sdma; i++)
3015 sdma_process_event(&dd->per_sdma[i], event);
3017 /* sdma_freeze() will wait for all engines to have stopped */
3021 * SPC freeze handling for SDMA engines. Called when the driver knows
3022 * the SPC is fully frozen.
3024 void sdma_freeze(struct hfi1_devdata *dd)
3030 * Make sure all engines have moved out of the running state before
3033 ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
3034 atomic_read(&dd->sdma_unfreeze_count) <= 0);
3035 /* interrupted or count is negative, then unloading - just exit */
3036 if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
3039 /* set up the count for the next wait */
3040 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3042 /* tell all engines that the SPC is frozen, they can start cleaning */
3043 for (i = 0; i < dd->num_sdma; i++)
3044 sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
3047 * Wait for everyone to finish software clean before exiting. The
3048 * software clean will read engine CSRs, so must be completed before
3049 * the next step, which will clear the engine CSRs.
3051 (void) wait_event_interruptible(dd->sdma_unfreeze_wq,
3052 atomic_read(&dd->sdma_unfreeze_count) <= 0);
3053 /* no need to check results - done no matter what */
3057 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
3059 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
3060 * that is left is a software clean. We could do it after the SPC is fully
3061 * frozen, but then we'd have to add another state to wait for the unfreeze.
3062 * Instead, just defer the software clean until the unfreeze step.
3064 void sdma_unfreeze(struct hfi1_devdata *dd)
3068 /* tell all engines start freeze clean up */
3069 for (i = 0; i < dd->num_sdma; i++)
3070 sdma_process_event(&dd->per_sdma[i],
3071 sdma_event_e82_hw_unfreeze);
3075 * _sdma_engine_progress_schedule() - schedule progress on engine
3076 * @sde: sdma_engine to schedule progress
3079 void _sdma_engine_progress_schedule(
3080 struct sdma_engine *sde)
3082 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
3083 /* assume we have selected a good cpu */
3085 CCE_INT_FORCE + (8*(IS_SDMA_START/64)), sde->progress_mask);