5 * This file is provided under a dual BSD/GPLv2 license. When using or
6 * redistributing this file, you may do so under either license.
10 * Copyright(c) 2015 Intel Corporation.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
23 * Copyright(c) 2015 Intel Corporation.
25 * Redistribution and use in source and binary forms, with or without
26 * modification, are permitted provided that the following conditions
29 * - Redistributions of source code must retain the above copyright
30 * notice, this list of conditions and the following disclaimer.
31 * - Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in
33 * the documentation and/or other materials provided with the
35 * - Neither the name of Intel Corporation nor the names of its
36 * contributors may be used to endorse or promote products derived
37 * from this software without specific prior written permission.
39 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
40 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
41 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
42 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
43 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
44 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
45 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
46 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
47 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
48 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
49 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 #include <linux/types.h>
54 #include <linux/list.h>
55 #include <asm/byteorder.h>
56 #include <linux/workqueue.h>
57 #include <linux/rculist.h>
62 /* increased for AHG */
66 /* Hardware limit for SDMA packet size */
67 #define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
70 #define SDMA_TXREQ_S_OK 0
71 #define SDMA_TXREQ_S_SENDERROR 1
72 #define SDMA_TXREQ_S_ABORTED 2
73 #define SDMA_TXREQ_S_SHUTDOWN 3
76 #define SDMA_TXREQ_F_URGENT 0x0001
77 #define SDMA_TXREQ_F_AHG_COPY 0x0002
78 #define SDMA_TXREQ_F_USE_AHG 0x0004
80 #define SDMA_MAP_NONE 0
81 #define SDMA_MAP_SINGLE 1
82 #define SDMA_MAP_PAGE 2
84 #define SDMA_AHG_VALUE_MASK 0xffff
85 #define SDMA_AHG_VALUE_SHIFT 0
86 #define SDMA_AHG_INDEX_MASK 0xf
87 #define SDMA_AHG_INDEX_SHIFT 16
88 #define SDMA_AHG_FIELD_LEN_MASK 0xf
89 #define SDMA_AHG_FIELD_LEN_SHIFT 20
90 #define SDMA_AHG_FIELD_START_MASK 0x1f
91 #define SDMA_AHG_FIELD_START_SHIFT 24
92 #define SDMA_AHG_UPDATE_ENABLE_MASK 0x1
93 #define SDMA_AHG_UPDATE_ENABLE_SHIFT 31
98 * Be aware the ordering and values
99 * for SDMA_AHG_APPLY_UPDATE[123]
100 * are assumed in generating a skip
101 * count in submit_tx() in sdma.c
103 #define SDMA_AHG_NO_AHG 0
104 #define SDMA_AHG_COPY 1
105 #define SDMA_AHG_APPLY_UPDATE1 2
106 #define SDMA_AHG_APPLY_UPDATE2 3
107 #define SDMA_AHG_APPLY_UPDATE3 4
110 * Bits defined in the send DMA descriptor.
112 #define SDMA_DESC0_FIRST_DESC_FLAG (1ULL << 63)
113 #define SDMA_DESC0_LAST_DESC_FLAG (1ULL << 62)
114 #define SDMA_DESC0_BYTE_COUNT_SHIFT 48
115 #define SDMA_DESC0_BYTE_COUNT_WIDTH 14
116 #define SDMA_DESC0_BYTE_COUNT_MASK \
117 ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
118 #define SDMA_DESC0_BYTE_COUNT_SMASK \
119 (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
120 #define SDMA_DESC0_PHY_ADDR_SHIFT 0
121 #define SDMA_DESC0_PHY_ADDR_WIDTH 48
122 #define SDMA_DESC0_PHY_ADDR_MASK \
123 ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
124 #define SDMA_DESC0_PHY_ADDR_SMASK \
125 (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
127 #define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
128 #define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
129 #define SDMA_DESC1_HEADER_UPDATE1_MASK \
130 ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
131 #define SDMA_DESC1_HEADER_UPDATE1_SMASK \
132 (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
133 #define SDMA_DESC1_HEADER_MODE_SHIFT 13
134 #define SDMA_DESC1_HEADER_MODE_WIDTH 3
135 #define SDMA_DESC1_HEADER_MODE_MASK \
136 ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
137 #define SDMA_DESC1_HEADER_MODE_SMASK \
138 (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
139 #define SDMA_DESC1_HEADER_INDEX_SHIFT 8
140 #define SDMA_DESC1_HEADER_INDEX_WIDTH 5
141 #define SDMA_DESC1_HEADER_INDEX_MASK \
142 ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
143 #define SDMA_DESC1_HEADER_INDEX_SMASK \
144 (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
145 #define SDMA_DESC1_HEADER_DWS_SHIFT 4
146 #define SDMA_DESC1_HEADER_DWS_WIDTH 4
147 #define SDMA_DESC1_HEADER_DWS_MASK \
148 ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
149 #define SDMA_DESC1_HEADER_DWS_SMASK \
150 (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
151 #define SDMA_DESC1_GENERATION_SHIFT 2
152 #define SDMA_DESC1_GENERATION_WIDTH 2
153 #define SDMA_DESC1_GENERATION_MASK \
154 ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
155 #define SDMA_DESC1_GENERATION_SMASK \
156 (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
157 #define SDMA_DESC1_INT_REQ_FLAG (1ULL << 1)
158 #define SDMA_DESC1_HEAD_TO_HOST_FLAG (1ULL << 0)
161 sdma_state_s00_hw_down,
162 sdma_state_s10_hw_start_up_halt_wait,
163 sdma_state_s15_hw_start_up_clean_wait,
165 sdma_state_s30_sw_clean_up_wait,
166 sdma_state_s40_hw_clean_up_wait,
167 sdma_state_s50_hw_halt_wait,
168 sdma_state_s60_idle_halt_wait,
169 sdma_state_s80_hw_freeze,
170 sdma_state_s82_freeze_sw_clean,
171 sdma_state_s99_running,
175 sdma_event_e00_go_hw_down,
176 sdma_event_e10_go_hw_start,
177 sdma_event_e15_hw_halt_done,
178 sdma_event_e25_hw_clean_up_done,
179 sdma_event_e30_go_running,
180 sdma_event_e40_sw_cleaned,
181 sdma_event_e50_hw_cleaned,
182 sdma_event_e60_hw_halted,
183 sdma_event_e70_go_idle,
184 sdma_event_e80_hw_freeze,
185 sdma_event_e81_hw_frozen,
186 sdma_event_e82_hw_unfreeze,
187 sdma_event_e85_link_down,
188 sdma_event_e90_sw_halted,
191 struct sdma_set_state_action {
192 unsigned op_enable:1;
193 unsigned op_intenable:1;
195 unsigned op_cleanup:1;
196 unsigned go_s99_running_tofalse:1;
197 unsigned go_s99_running_totrue:1;
202 struct completion comp;
203 enum sdma_states current_state;
205 unsigned go_s99_running;
206 /* debugging/development */
207 enum sdma_states previous_state;
208 unsigned previous_op;
209 enum sdma_events last_event;
213 * DOC: sdma exported routines
215 * These sdma routines fit into three categories:
216 * - The SDMA API for building and submitting packets
219 * - Initialization and tear down routines to buildup
222 * - ISR entrances to handle interrupts, state changes
227 * DOC: sdma PSM/verbs API
229 * The sdma API is designed to be used by both PSM
230 * and verbs to supply packets to the SDMA ring.
232 * The usage of the API is as follows:
234 * Embed a struct iowait in the QP or
235 * PQ. The iowait should be initialized with a
236 * call to iowait_init().
238 * The user of the API should create an allocation method
239 * for their version of the txreq. slabs, pre-allocated lists,
240 * and dma pools can be used. Once the user's overload of
241 * the sdma_txreq has been allocated, the sdma_txreq member
242 * must be initialized with sdma_txinit() or sdma_txinit_ahg().
244 * The txreq must be declared with the sdma_txreq first.
246 * The tx request, once initialized, is manipulated with calls to
247 * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr()
248 * for each disjoint memory location. It is the user's responsibility
249 * to understand the packet boundaries and page boundaries to do the
250 * appropriate number of sdma_txadd_* calls.. The user
251 * must be prepared to deal with failures from these routines due to
252 * either memory allocation or dma_mapping failures.
254 * The mapping specifics for each memory location are recorded
255 * in the tx. Memory locations added with sdma_txadd_page()
256 * and sdma_txadd_kvaddr() are automatically mapped when added
257 * to the tx and nmapped as part of the progress processing in the
258 * SDMA interrupt handling.
260 * sdma_txadd_daddr() is used to add an dma_addr_t memory to the
261 * tx. An example of a use case would be a pre-allocated
262 * set of headers allocated via dma_pool_alloc() or
263 * dma_alloc_coherent(). For these memory locations, it
264 * is the responsibility of the user to handle that unmapping.
265 * (This would usually be at an unload or job termination.)
267 * The routine sdma_send_txreq() is used to submit
268 * a tx to the ring after the appropriate number of
269 * sdma_txadd_* have been done.
271 * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist()
272 * can be used to submit a list of packets.
274 * The user is free to use the link overhead in the struct sdma_txreq as
275 * long as the tx isn't in flight.
277 * The extreme degenerate case of the number of descriptors
278 * exceeding the ring size is automatically handled as
279 * memory locations are added. An overflow of the descriptor
280 * array that is part of the sdma_txreq is also automatically
286 * DOC: Infrastructure calls
288 * sdma_init() is used to initialize data structures and
289 * CSRs for the desired number of SDMA engines.
291 * sdma_start() is used to kick the SDMA engines initialized
292 * with sdma_init(). Interrupts must be enabled at this
293 * point since aspects of the state machine are interrupt
296 * sdma_engine_error() and sdma_engine_interrupt() are
297 * entrances for interrupts.
299 * sdma_map_init() is for the management of the mapping
300 * table when the number of vls is changed.
305 * struct hw_sdma_desc - raw 128 bit SDMA descriptor
307 * This is the raw descriptor in the SDMA ring
309 struct hw_sdma_desc {
310 /* private: don't use directly */
315 * struct sdma_desc - canonical fragment descriptor
317 * This is the descriptor carried in the tx request
318 * corresponding to each fragment.
322 /* private: don't use directly */
327 typedef void (*callback_t)(struct sdma_txreq *, int, int);
330 * struct sdma_txreq - the sdma_txreq structure (one per packet)
331 * @list: for use by user and by queuing for wait
333 * This is the representation of a packet which consists of some
334 * number of fragments. Storage is provided to within the structure.
337 * The storage for the descriptors are automatically extended as needed
338 * when the currently allocation is exceeded.
340 * The user (Verbs or PSM) may overload this structure with fields
341 * specific to their use by putting this struct first in their struct.
342 * The method of allocation of the overloaded structure is user dependent
344 * The list is the only public field in the structure.
349 struct list_head list;
351 struct sdma_desc *descp;
360 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
363 /* private: - used in coalesce/pad processing */
365 /* private: - down-counted to trigger last */
376 struct sdma_desc descs[NUM_DESC];
380 struct hfi1_pio_header phdr;
381 struct sdma_txreq txreq;
383 struct hfi1_swqe *wqe;
384 struct hfi1_mregion *mr;
385 struct hfi1_sge_state *ss;
386 struct sdma_engine *sde;
392 * struct sdma_engine - Data pertaining to each SDMA engine.
393 * @dd: a back-pointer to the device data
394 * @ppd: per port back-pointer
395 * @imask: mask for irq manipulation
396 * @idle_mask: mask for determining if an interrupt is due to sdma_idle
398 * This structure has the state for each sdma_engine.
400 * Accessing to non public fields are not supported
401 * since the private members are subject to change.
405 struct hfi1_devdata *dd;
406 struct hfi1_pportdata *ppd;
408 void __iomem *tail_csr;
409 u64 imask; /* clear interrupt mask */
413 struct workqueue_struct *wq;
415 volatile __le64 *head_dma; /* DMA'ed by chip */
417 dma_addr_t head_phys;
419 struct hw_sdma_desc *descq;
421 unsigned descq_full_count;
422 struct sdma_txreq **tx_ring;
424 dma_addr_t descq_phys;
428 struct sdma_state state;
432 u8 this_idx; /* zero relative engine */
433 /* protect changes to senddmactrl shadow */
434 spinlock_t senddmactrl_lock;
436 u64 p_senddmactrl; /* shadow per-engine SendDmaCtrl */
438 /* read/write using tail_lock */
439 spinlock_t tail_lock ____cacheline_aligned_in_smp;
440 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
447 unsigned long ahg_bits;
455 /* read/write using head_lock */
457 seqlock_t head_lock ____cacheline_aligned_in_smp;
458 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
470 struct list_head dmawait;
472 /* CONFIG SDMA for now, just blindly duplicate */
474 struct tasklet_struct sdma_hw_clean_up_task
475 ____cacheline_aligned_in_smp;
478 struct tasklet_struct sdma_sw_clean_up_task
479 ____cacheline_aligned_in_smp;
481 struct work_struct err_halt_worker;
483 struct timer_list err_progress_check_timer;
484 u32 progress_check_head;
486 struct work_struct flush_worker;
487 spinlock_t flushlist_lock;
489 struct list_head flushlist;
493 int sdma_init(struct hfi1_devdata *dd, u8 port);
494 void sdma_start(struct hfi1_devdata *dd);
495 void sdma_exit(struct hfi1_devdata *dd);
496 void sdma_all_running(struct hfi1_devdata *dd);
497 void sdma_all_idle(struct hfi1_devdata *dd);
498 void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle);
499 void sdma_freeze(struct hfi1_devdata *dd);
500 void sdma_unfreeze(struct hfi1_devdata *dd);
501 void sdma_wait(struct hfi1_devdata *dd);
504 * sdma_empty() - idle engine test
505 * @engine: sdma engine
507 * Currently used by verbs as a latency optimization.
510 * 1 - empty, 0 - non-empty
512 static inline int sdma_empty(struct sdma_engine *sde)
514 return sde->descq_tail == sde->descq_head;
517 static inline u16 sdma_descq_freecnt(struct sdma_engine *sde)
519 return sde->descq_cnt -
521 ACCESS_ONCE(sde->descq_head)) - 1;
524 static inline u16 sdma_descq_inprocess(struct sdma_engine *sde)
526 return sde->descq_cnt - sdma_descq_freecnt(sde);
530 * Either head_lock or tail lock required to see
533 static inline int __sdma_running(struct sdma_engine *engine)
535 return engine->state.current_state == sdma_state_s99_running;
540 * sdma_running() - state suitability test
541 * @engine: sdma engine
543 * sdma_running probes the internal state to determine if it is suitable
544 * for submitting packets.
547 * 1 - ok to submit, 0 - not ok to submit
550 static inline int sdma_running(struct sdma_engine *engine)
555 spin_lock_irqsave(&engine->tail_lock, flags);
556 ret = __sdma_running(engine);
557 spin_unlock_irqrestore(&engine->tail_lock, flags);
561 void _sdma_txreq_ahgadd(
562 struct sdma_txreq *tx,
570 * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG
571 * @tx: tx request to initialize
572 * @flags: flags to key last descriptor additions
573 * @tlen: total packet length (pbc + headers + data)
574 * @ahg_entry: ahg entry to use (0 - 31)
575 * @num_ahg: ahg descriptor for first descriptor (0 - 9)
576 * @ahg: array of AHG descriptors (up to 9 entries)
577 * @ahg_hlen: number of bytes from ASIC entry to use
580 * The allocation of the sdma_txreq and it enclosing structure is user
581 * dependent. This routine must be called to initialize the user independent
584 * The currently supported flags are SDMA_TXREQ_F_URGENT,
585 * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG.
587 * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
588 * completion is desired as soon as possible.
590 * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be
591 * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in
592 * the AHG descriptors into the first 1 to 3 descriptors.
594 * Completions of submitted requests can be gotten on selected
595 * txreqs by giving a completion routine callback to sdma_txinit() or
596 * sdma_txinit_ahg(). The environment in which the callback runs
597 * can be from an ISR, a tasklet, or a thread, so no sleeping
598 * kernel routines can be used. Aspects of the sdma ring may
599 * be locked so care should be taken with locking.
601 * The callback pointer can be NULL to avoid any callback for the packet
602 * being submitted. The callback will be provided this tx, a status, and a flag.
604 * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
605 * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
607 * The flag, if the is the iowait had been used, indicates the iowait
608 * sdma_busy count has reached zero.
610 * user data portion of tlen should be precise. The sdma_txadd_* entrances
611 * will pad with a descriptor references 1 - 3 bytes when the number of bytes
612 * specified in tlen have been supplied to the sdma_txreq.
614 * ahg_hlen is used to determine the number of on-chip entry bytes to
615 * use as the header. This is for cases where the stored header is
616 * larger than the header to be used in a packet. This is typical
617 * for verbs where an RDMA_WRITE_FIRST is larger than the packet in
618 * and RDMA_WRITE_MIDDLE.
621 static inline int sdma_txinit_ahg(
622 struct sdma_txreq *tx,
629 void (*cb)(struct sdma_txreq *, int, int))
633 if (tlen > MAX_SDMA_PKT_SIZE)
635 tx->desc_limit = ARRAY_SIZE(tx->descs);
636 tx->descp = &tx->descs[0];
637 INIT_LIST_HEAD(&tx->list);
641 tx->coalesce_buf = NULL;
643 tx->tlen = tx->packet_len = tlen;
644 tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
645 tx->descs[0].qw[1] = 0;
646 if (flags & SDMA_TXREQ_F_AHG_COPY)
647 tx->descs[0].qw[1] |=
648 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
649 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
650 (((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK)
651 << SDMA_DESC1_HEADER_MODE_SHIFT);
652 else if (flags & SDMA_TXREQ_F_USE_AHG && num_ahg)
653 _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen);
658 * sdma_txinit() - initialize an sdma_txreq struct (no AHG)
659 * @tx: tx request to initialize
660 * @flags: flags to key last descriptor additions
661 * @tlen: total packet length (pbc + headers + data)
662 * @cb: callback pointer
664 * The allocation of the sdma_txreq and it enclosing structure is user
665 * dependent. This routine must be called to initialize the user
666 * independent fields.
668 * The currently supported flags is SDMA_TXREQ_F_URGENT.
670 * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the
671 * completion is desired as soon as possible.
673 * Completions of submitted requests can be gotten on selected
674 * txreqs by giving a completion routine callback to sdma_txinit() or
675 * sdma_txinit_ahg(). The environment in which the callback runs
676 * can be from an ISR, a tasklet, or a thread, so no sleeping
677 * kernel routines can be used. The head size of the sdma ring may
678 * be locked so care should be taken with locking.
680 * The callback pointer can be NULL to avoid any callback for the packet
683 * The callback, if non-NULL, will be provided this tx and a status. The
684 * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR,
685 * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN.
688 static inline int sdma_txinit(
689 struct sdma_txreq *tx,
692 void (*cb)(struct sdma_txreq *, int, int))
694 return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
697 /* helpers - don't use */
698 static inline int sdma_mapping_type(struct sdma_desc *d)
700 return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK)
701 >> SDMA_DESC1_GENERATION_SHIFT;
704 static inline size_t sdma_mapping_len(struct sdma_desc *d)
706 return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK)
707 >> SDMA_DESC0_BYTE_COUNT_SHIFT;
710 static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d)
712 return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK)
713 >> SDMA_DESC0_PHY_ADDR_SHIFT;
716 static inline void make_tx_sdma_desc(
717 struct sdma_txreq *tx,
722 struct sdma_desc *desc = &tx->descp[tx->num_desc];
725 /* qw[0] zero; qw[1] first, ahg mode already in from init */
726 desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK)
727 << SDMA_DESC1_GENERATION_SHIFT;
730 desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK)
731 << SDMA_DESC1_GENERATION_SHIFT;
733 desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK)
734 << SDMA_DESC0_PHY_ADDR_SHIFT) |
735 (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK)
736 << SDMA_DESC0_BYTE_COUNT_SHIFT);
739 /* helper to extend txreq */
740 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
741 int type, void *kvaddr, struct page *page,
742 unsigned long offset, u16 len);
743 int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
744 void sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
746 /* helpers used by public routines */
747 static inline void _sdma_close_tx(struct hfi1_devdata *dd,
748 struct sdma_txreq *tx)
750 tx->descp[tx->num_desc].qw[0] |=
751 SDMA_DESC0_LAST_DESC_FLAG;
752 tx->descp[tx->num_desc].qw[1] |=
754 if (tx->flags & SDMA_TXREQ_F_URGENT)
755 tx->descp[tx->num_desc].qw[1] |=
756 (SDMA_DESC1_HEAD_TO_HOST_FLAG|
757 SDMA_DESC1_INT_REQ_FLAG);
760 static inline int _sdma_txadd_daddr(
761 struct hfi1_devdata *dd,
763 struct sdma_txreq *tx,
773 WARN_ON(len > tx->tlen);
775 /* special cases for last */
777 if (tx->packet_len & (sizeof(u32) - 1))
778 rval = _pad_sdma_tx_descs(dd, tx);
780 _sdma_close_tx(dd, tx);
787 * sdma_txadd_page() - add a page to the sdma_txreq
788 * @dd: the device to use for mapping
789 * @tx: tx request to which the page is added
791 * @offset: offset within the page
792 * @len: length in bytes
794 * This is used to add a page/offset/length descriptor.
796 * The mapping/unmapping of the page/offset/len is automatically handled.
799 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't
800 * extend/coalesce descriptor array
802 static inline int sdma_txadd_page(
803 struct hfi1_devdata *dd,
804 struct sdma_txreq *tx,
806 unsigned long offset,
812 if ((unlikely(tx->num_desc == tx->desc_limit))) {
813 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE,
814 NULL, page, offset, len);
826 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
827 sdma_txclean(dd, tx);
831 return _sdma_txadd_daddr(
832 dd, SDMA_MAP_PAGE, tx, addr, len);
836 * sdma_txadd_daddr() - add a dma address to the sdma_txreq
837 * @dd: the device to use for mapping
838 * @tx: sdma_txreq to which the page is added
839 * @addr: dma address mapped by caller
840 * @len: length in bytes
842 * This is used to add a descriptor for memory that is already dma mapped.
844 * In this case, there is no unmapping as part of the progress processing for
845 * this memory location.
848 * 0 - success, -ENOMEM - couldn't extend descriptor array
851 static inline int sdma_txadd_daddr(
852 struct hfi1_devdata *dd,
853 struct sdma_txreq *tx,
859 if ((unlikely(tx->num_desc == tx->desc_limit))) {
860 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE,
866 return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len);
870 * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq
871 * @dd: the device to use for mapping
872 * @tx: sdma_txreq to which the page is added
873 * @kvaddr: the kernel virtual address
874 * @len: length in bytes
876 * This is used to add a descriptor referenced by the indicated kvaddr and
879 * The mapping/unmapping of the kvaddr and len is automatically handled.
882 * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend/coalesce
885 static inline int sdma_txadd_kvaddr(
886 struct hfi1_devdata *dd,
887 struct sdma_txreq *tx,
894 if ((unlikely(tx->num_desc == tx->desc_limit))) {
895 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE,
896 kvaddr, NULL, 0, len);
901 addr = dma_map_single(
907 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
908 sdma_txclean(dd, tx);
912 return _sdma_txadd_daddr(
913 dd, SDMA_MAP_SINGLE, tx, addr, len);
918 int sdma_send_txreq(struct sdma_engine *sde,
920 struct sdma_txreq *tx);
921 int sdma_send_txlist(struct sdma_engine *sde,
923 struct list_head *tx_list);
925 int sdma_ahg_alloc(struct sdma_engine *sde);
926 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
929 * sdma_build_ahg - build ahg descriptor
935 * Build and return a 32 bit descriptor.
937 static inline u32 sdma_build_ahg_descriptor(
943 return (u32)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT |
944 ((startbit & SDMA_AHG_FIELD_START_MASK) <<
945 SDMA_AHG_FIELD_START_SHIFT) |
946 ((bits & SDMA_AHG_FIELD_LEN_MASK) <<
947 SDMA_AHG_FIELD_LEN_SHIFT) |
948 ((dwindex & SDMA_AHG_INDEX_MASK) <<
949 SDMA_AHG_INDEX_SHIFT) |
950 ((data & SDMA_AHG_VALUE_MASK) <<
951 SDMA_AHG_VALUE_SHIFT));
955 * sdma_progress - use seq number of detect head progress
956 * @sde: sdma_engine to check
957 * @seq: base seq count
958 * @tx: txreq for which we need to check descriptor availability
960 * This is used in the appropriate spot in the sleep routine
961 * to check for potential ring progress. This routine gets the
962 * seqcount before queuing the iowait structure for progress.
964 * If the seqcount indicates that progress needs to be checked,
965 * re-submission is detected by checking whether the descriptor
966 * queue has enough descriptor for the txreq.
968 static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq,
969 struct sdma_txreq *tx)
971 if (read_seqretry(&sde->head_lock, seq)) {
972 sde->desc_avail = sdma_descq_freecnt(sde);
973 if (tx->num_desc > sde->desc_avail)
981 * sdma_iowait_schedule() - initialize wait structure
982 * @sde: sdma_engine to schedule
983 * @wait: wait struct to schedule
985 * This function initializes the iowait
986 * structure embedded in the QP or PQ.
989 static inline void sdma_iowait_schedule(
990 struct sdma_engine *sde,
993 iowait_schedule(wait, sde->wq);
996 /* for use by interrupt handling */
997 void sdma_engine_error(struct sdma_engine *sde, u64 status);
998 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status);
1002 * The diagram below details the relationship of the mapping structures
1004 * Since the mapping now allows for non-uniform engines per vl, the
1005 * number of engines for a vl is either the vl_engines[vl] or
1006 * a computation based on num_sdma/num_vls:
1009 * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls
1011 * n = roundup to next highest power of 2 using nactual
1013 * In the case where there are num_sdma/num_vls doesn't divide
1014 * evenly, the extras are added from the last vl downward.
1016 * For the case where n > nactual, the engines are assigned
1017 * in a round robin fashion wrapping back to the first engine
1018 * for a particular vl.
1021 * | sdma_map_elem[0]
1022 * | +--------------------+
1024 * sdma_vl_map |--------------------|
1025 * +--------------------------+ | sde[0] -> eng 1 |
1026 * | list (RCU) | |--------------------|
1027 * |--------------------------| ->| sde[1] -> eng 2 |
1028 * | mask | --/ |--------------------|
1029 * |--------------------------| -/ | * |
1030 * | actual_vls (max 8) | -/ |--------------------|
1031 * |--------------------------| --/ | sde[n] -> eng n |
1032 * | vls (max 8) | -/ +--------------------+
1033 * |--------------------------| --/
1035 * |--------------------------| +--------------------+
1036 * | map[1] |--- | mask |
1037 * |--------------------------| \---- |--------------------|
1038 * | * | \-- | sde[0] -> eng 1+n |
1039 * | * | \---- |--------------------|
1040 * | * | \->| sde[1] -> eng 2+n |
1041 * |--------------------------| |--------------------|
1042 * | map[vls - 1] |- | * |
1043 * +--------------------------+ \- |--------------------|
1044 * \- | sde[m] -> eng m+n |
1045 * \ +--------------------+
1048 * \- +--------------------+
1050 * \ |--------------------|
1051 * \- | sde[0] -> eng 1+m+n|
1052 * \- |--------------------|
1053 * >| sde[1] -> eng 2+m+n|
1054 * |--------------------|
1056 * |--------------------|
1057 * | sde[o] -> eng o+m+n|
1058 * +--------------------+
1063 * struct sdma_map_elem - mapping for a vl
1064 * @mask - selector mask
1065 * @sde - array of engines for this vl
1067 * The mask is used to "mod" the selector
1068 * to produce index into the trailing
1071 struct sdma_map_elem {
1073 struct sdma_engine *sde[0];
1077 * struct sdma_map_el - mapping for a vl
1078 * @list - rcu head for free callback
1079 * @mask - vl mask to "mod" the vl to produce an index to map array
1080 * @actual_vls - number of vls
1081 * @vls - number of vls rounded to next power of 2
1082 * @map - array of sdma_map_elem entries
1084 * This is the parent mapping structure. The trailing
1085 * members of the struct point to sdma_map_elem entries, which
1086 * in turn point to an array of sde's for that vl.
1088 struct sdma_vl_map {
1089 struct rcu_head list;
1093 struct sdma_map_elem *map[0];
1097 struct hfi1_devdata *dd,
1103 void _sdma_engine_progress_schedule(struct sdma_engine *sde);
1106 * sdma_engine_progress_schedule() - schedule progress on engine
1107 * @sde: sdma_engine to schedule progress
1109 * This is the fast path.
1112 static inline void sdma_engine_progress_schedule(
1113 struct sdma_engine *sde)
1115 if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8))
1117 _sdma_engine_progress_schedule(sde);
1120 struct sdma_engine *sdma_select_engine_sc(
1121 struct hfi1_devdata *dd,
1125 struct sdma_engine *sdma_select_engine_vl(
1126 struct hfi1_devdata *dd,
1130 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *);
1132 #ifdef CONFIG_SDMA_VERBOSITY
1133 void sdma_dumpstate(struct sdma_engine *);
1135 static inline char *slashstrip(char *s)
1145 u16 sdma_get_descq_cnt(void);
1147 extern uint mod_num_sdma;
1149 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid);