These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / staging / rdma / ipath / ipath_user_sdma.c
1 /*
2  * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/mm.h>
33 #include <linux/types.h>
34 #include <linux/device.h>
35 #include <linux/dmapool.h>
36 #include <linux/slab.h>
37 #include <linux/list.h>
38 #include <linux/highmem.h>
39 #include <linux/io.h>
40 #include <linux/uio.h>
41 #include <linux/rbtree.h>
42 #include <linux/spinlock.h>
43 #include <linux/delay.h>
44
45 #include "ipath_kernel.h"
46 #include "ipath_user_sdma.h"
47
48 /* minimum size of header */
49 #define IPATH_USER_SDMA_MIN_HEADER_LENGTH       64
50 /* expected size of headers (for dma_pool) */
51 #define IPATH_USER_SDMA_EXP_HEADER_LENGTH       64
52 /* length mask in PBC (lower 11 bits) */
53 #define IPATH_PBC_LENGTH_MASK                   ((1 << 11) - 1)
54
55 struct ipath_user_sdma_pkt {
56         u8 naddr;               /* dimension of addr (1..3) ... */
57         u32 counter;            /* sdma pkts queued counter for this entry */
58         u64 added;              /* global descq number of entries */
59
60         struct {
61                 u32 offset;                     /* offset for kvaddr, addr */
62                 u32 length;                     /* length in page */
63                 u8  put_page;                   /* should we put_page? */
64                 u8  dma_mapped;                 /* is page dma_mapped? */
65                 struct page *page;              /* may be NULL (coherent mem) */
66                 void *kvaddr;                   /* FIXME: only for pio hack */
67                 dma_addr_t addr;
68         } addr[4];   /* max pages, any more and we coalesce */
69         struct list_head list;  /* list element */
70 };
71
72 struct ipath_user_sdma_queue {
73         /*
74          * pkts sent to dma engine are queued on this
75          * list head.  the type of the elements of this
76          * list are struct ipath_user_sdma_pkt...
77          */
78         struct list_head sent;
79
80         /* headers with expected length are allocated from here... */
81         char header_cache_name[64];
82         struct dma_pool *header_cache;
83
84         /* packets are allocated from the slab cache... */
85         char pkt_slab_name[64];
86         struct kmem_cache *pkt_slab;
87
88         /* as packets go on the queued queue, they are counted... */
89         u32 counter;
90         u32 sent_counter;
91
92         /* dma page table */
93         struct rb_root dma_pages_root;
94
95         /* protect everything above... */
96         struct mutex lock;
97 };
98
99 struct ipath_user_sdma_queue *
100 ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport)
101 {
102         struct ipath_user_sdma_queue *pq =
103                 kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL);
104
105         if (!pq)
106                 goto done;
107
108         pq->counter = 0;
109         pq->sent_counter = 0;
110         INIT_LIST_HEAD(&pq->sent);
111
112         mutex_init(&pq->lock);
113
114         snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
115                  "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);
116         pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
117                                          sizeof(struct ipath_user_sdma_pkt),
118                                          0, 0, NULL);
119
120         if (!pq->pkt_slab)
121                 goto err_kfree;
122
123         snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
124                  "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);
125         pq->header_cache = dma_pool_create(pq->header_cache_name,
126                                            dev,
127                                            IPATH_USER_SDMA_EXP_HEADER_LENGTH,
128                                            4, 0);
129         if (!pq->header_cache)
130                 goto err_slab;
131
132         pq->dma_pages_root = RB_ROOT;
133
134         goto done;
135
136 err_slab:
137         kmem_cache_destroy(pq->pkt_slab);
138 err_kfree:
139         kfree(pq);
140         pq = NULL;
141
142 done:
143         return pq;
144 }
145
146 static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt,
147                                       int i, size_t offset, size_t len,
148                                       int put_page, int dma_mapped,
149                                       struct page *page,
150                                       void *kvaddr, dma_addr_t dma_addr)
151 {
152         pkt->addr[i].offset = offset;
153         pkt->addr[i].length = len;
154         pkt->addr[i].put_page = put_page;
155         pkt->addr[i].dma_mapped = dma_mapped;
156         pkt->addr[i].page = page;
157         pkt->addr[i].kvaddr = kvaddr;
158         pkt->addr[i].addr = dma_addr;
159 }
160
161 static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt,
162                                         u32 counter, size_t offset,
163                                         size_t len, int dma_mapped,
164                                         struct page *page,
165                                         void *kvaddr, dma_addr_t dma_addr)
166 {
167         pkt->naddr = 1;
168         pkt->counter = counter;
169         ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
170                                   kvaddr, dma_addr);
171 }
172
173 /* we've too many pages in the iovec, coalesce to a single page */
174 static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
175                                     struct ipath_user_sdma_pkt *pkt,
176                                     const struct iovec *iov,
177                                     unsigned long niov) {
178         int ret = 0;
179         struct page *page = alloc_page(GFP_KERNEL);
180         void *mpage_save;
181         char *mpage;
182         int i;
183         int len = 0;
184         dma_addr_t dma_addr;
185
186         if (!page) {
187                 ret = -ENOMEM;
188                 goto done;
189         }
190
191         mpage = kmap(page);
192         mpage_save = mpage;
193         for (i = 0; i < niov; i++) {
194                 int cfur;
195
196                 cfur = copy_from_user(mpage,
197                                       iov[i].iov_base, iov[i].iov_len);
198                 if (cfur) {
199                         ret = -EFAULT;
200                         goto free_unmap;
201                 }
202
203                 mpage += iov[i].iov_len;
204                 len += iov[i].iov_len;
205         }
206
207         dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
208                                 DMA_TO_DEVICE);
209         if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
210                 ret = -ENOMEM;
211                 goto free_unmap;
212         }
213
214         ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
215                                   dma_addr);
216         pkt->naddr = 2;
217
218         goto done;
219
220 free_unmap:
221         kunmap(page);
222         __free_page(page);
223 done:
224         return ret;
225 }
226
227 /* how many pages in this iovec element? */
228 static int ipath_user_sdma_num_pages(const struct iovec *iov)
229 {
230         const unsigned long addr  = (unsigned long) iov->iov_base;
231         const unsigned long  len  = iov->iov_len;
232         const unsigned long spage = addr & PAGE_MASK;
233         const unsigned long epage = (addr + len - 1) & PAGE_MASK;
234
235         return 1 + ((epage - spage) >> PAGE_SHIFT);
236 }
237
238 /* truncate length to page boundary */
239 static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len)
240 {
241         const unsigned long offset = offset_in_page(addr);
242
243         return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
244 }
245
246 static void ipath_user_sdma_free_pkt_frag(struct device *dev,
247                                           struct ipath_user_sdma_queue *pq,
248                                           struct ipath_user_sdma_pkt *pkt,
249                                           int frag)
250 {
251         const int i = frag;
252
253         if (pkt->addr[i].page) {
254                 if (pkt->addr[i].dma_mapped)
255                         dma_unmap_page(dev,
256                                        pkt->addr[i].addr,
257                                        pkt->addr[i].length,
258                                        DMA_TO_DEVICE);
259
260                 if (pkt->addr[i].kvaddr)
261                         kunmap(pkt->addr[i].page);
262
263                 if (pkt->addr[i].put_page)
264                         put_page(pkt->addr[i].page);
265                 else
266                         __free_page(pkt->addr[i].page);
267         } else if (pkt->addr[i].kvaddr)
268                 /* free coherent mem from cache... */
269                 dma_pool_free(pq->header_cache,
270                               pkt->addr[i].kvaddr, pkt->addr[i].addr);
271 }
272
273 /* return number of pages pinned... */
274 static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
275                                      struct ipath_user_sdma_pkt *pkt,
276                                      unsigned long addr, int tlen, int npages)
277 {
278         struct page *pages[2];
279         int j;
280         int ret;
281
282         ret = get_user_pages_fast(addr, npages, 0, pages);
283         if (ret != npages) {
284                 int i;
285
286                 for (i = 0; i < ret; i++)
287                         put_page(pages[i]);
288
289                 ret = -ENOMEM;
290                 goto done;
291         }
292
293         for (j = 0; j < npages; j++) {
294                 /* map the pages... */
295                 const int flen =
296                         ipath_user_sdma_page_length(addr, tlen);
297                 dma_addr_t dma_addr =
298                         dma_map_page(&dd->pcidev->dev,
299                                      pages[j], 0, flen, DMA_TO_DEVICE);
300                 unsigned long fofs = offset_in_page(addr);
301
302                 if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
303                         ret = -ENOMEM;
304                         goto done;
305                 }
306
307                 ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
308                                           pages[j], kmap(pages[j]),
309                                           dma_addr);
310
311                 pkt->naddr++;
312                 addr += flen;
313                 tlen -= flen;
314         }
315
316 done:
317         return ret;
318 }
319
320 static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd,
321                                    struct ipath_user_sdma_queue *pq,
322                                    struct ipath_user_sdma_pkt *pkt,
323                                    const struct iovec *iov,
324                                    unsigned long niov)
325 {
326         int ret = 0;
327         unsigned long idx;
328
329         for (idx = 0; idx < niov; idx++) {
330                 const int npages = ipath_user_sdma_num_pages(iov + idx);
331                 const unsigned long addr = (unsigned long) iov[idx].iov_base;
332
333                 ret = ipath_user_sdma_pin_pages(dd, pkt,
334                                                 addr, iov[idx].iov_len,
335                                                 npages);
336                 if (ret < 0)
337                         goto free_pkt;
338         }
339
340         goto done;
341
342 free_pkt:
343         for (idx = 0; idx < pkt->naddr; idx++)
344                 ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
345
346 done:
347         return ret;
348 }
349
350 static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd,
351                                         struct ipath_user_sdma_queue *pq,
352                                         struct ipath_user_sdma_pkt *pkt,
353                                         const struct iovec *iov,
354                                         unsigned long niov, int npages)
355 {
356         int ret = 0;
357
358         if (npages >= ARRAY_SIZE(pkt->addr))
359                 ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);
360         else
361                 ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
362
363         return ret;
364 }
365
366 /* free a packet list -- return counter value of last packet */
367 static void ipath_user_sdma_free_pkt_list(struct device *dev,
368                                           struct ipath_user_sdma_queue *pq,
369                                           struct list_head *list)
370 {
371         struct ipath_user_sdma_pkt *pkt, *pkt_next;
372
373         list_for_each_entry_safe(pkt, pkt_next, list, list) {
374                 int i;
375
376                 for (i = 0; i < pkt->naddr; i++)
377                         ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);
378
379                 kmem_cache_free(pq->pkt_slab, pkt);
380         }
381 }
382
383 /*
384  * copy headers, coalesce etc -- pq->lock must be held
385  *
386  * we queue all the packets to list, returning the
387  * number of bytes total.  list must be empty initially,
388  * as, if there is an error we clean it...
389  */
390 static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
391                                       struct ipath_user_sdma_queue *pq,
392                                       struct list_head *list,
393                                       const struct iovec *iov,
394                                       unsigned long niov,
395                                       int maxpkts)
396 {
397         unsigned long idx = 0;
398         int ret = 0;
399         int npkts = 0;
400         struct page *page = NULL;
401         __le32 *pbc;
402         dma_addr_t dma_addr;
403         struct ipath_user_sdma_pkt *pkt = NULL;
404         size_t len;
405         size_t nw;
406         u32 counter = pq->counter;
407         int dma_mapped = 0;
408
409         while (idx < niov && npkts < maxpkts) {
410                 const unsigned long addr = (unsigned long) iov[idx].iov_base;
411                 const unsigned long idx_save = idx;
412                 unsigned pktnw;
413                 unsigned pktnwc;
414                 int nfrags = 0;
415                 int npages = 0;
416                 int cfur;
417
418                 dma_mapped = 0;
419                 len = iov[idx].iov_len;
420                 nw = len >> 2;
421                 page = NULL;
422
423                 pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
424                 if (!pkt) {
425                         ret = -ENOMEM;
426                         goto free_list;
427                 }
428
429                 if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH ||
430                     len > PAGE_SIZE || len & 3 || addr & 3) {
431                         ret = -EINVAL;
432                         goto free_pkt;
433                 }
434
435                 if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH)
436                         pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
437                                              &dma_addr);
438                 else
439                         pbc = NULL;
440
441                 if (!pbc) {
442                         page = alloc_page(GFP_KERNEL);
443                         if (!page) {
444                                 ret = -ENOMEM;
445                                 goto free_pkt;
446                         }
447                         pbc = kmap(page);
448                 }
449
450                 cfur = copy_from_user(pbc, iov[idx].iov_base, len);
451                 if (cfur) {
452                         ret = -EFAULT;
453                         goto free_pbc;
454                 }
455
456                 /*
457                  * this assignment is a bit strange.  it's because the
458                  * the pbc counts the number of 32 bit words in the full
459                  * packet _except_ the first word of the pbc itself...
460                  */
461                 pktnwc = nw - 1;
462
463                 /*
464                  * pktnw computation yields the number of 32 bit words
465                  * that the caller has indicated in the PBC.  note that
466                  * this is one less than the total number of words that
467                  * goes to the send DMA engine as the first 32 bit word
468                  * of the PBC itself is not counted.  Armed with this count,
469                  * we can verify that the packet is consistent with the
470                  * iovec lengths.
471                  */
472                 pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK;
473                 if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
474                         ret = -EINVAL;
475                         goto free_pbc;
476                 }
477
478
479                 idx++;
480                 while (pktnwc < pktnw && idx < niov) {
481                         const size_t slen = iov[idx].iov_len;
482                         const unsigned long faddr =
483                                 (unsigned long) iov[idx].iov_base;
484
485                         if (slen & 3 || faddr & 3 || !slen ||
486                             slen > PAGE_SIZE) {
487                                 ret = -EINVAL;
488                                 goto free_pbc;
489                         }
490
491                         npages++;
492                         if ((faddr & PAGE_MASK) !=
493                             ((faddr + slen - 1) & PAGE_MASK))
494                                 npages++;
495
496                         pktnwc += slen >> 2;
497                         idx++;
498                         nfrags++;
499                 }
500
501                 if (pktnwc != pktnw) {
502                         ret = -EINVAL;
503                         goto free_pbc;
504                 }
505
506                 if (page) {
507                         dma_addr = dma_map_page(&dd->pcidev->dev,
508                                                 page, 0, len, DMA_TO_DEVICE);
509                         if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
510                                 ret = -ENOMEM;
511                                 goto free_pbc;
512                         }
513
514                         dma_mapped = 1;
515                 }
516
517                 ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
518                                             page, pbc, dma_addr);
519
520                 if (nfrags) {
521                         ret = ipath_user_sdma_init_payload(dd, pq, pkt,
522                                                            iov + idx_save + 1,
523                                                            nfrags, npages);
524                         if (ret < 0)
525                                 goto free_pbc_dma;
526                 }
527
528                 counter++;
529                 npkts++;
530
531                 list_add_tail(&pkt->list, list);
532         }
533
534         ret = idx;
535         goto done;
536
537 free_pbc_dma:
538         if (dma_mapped)
539                 dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
540 free_pbc:
541         if (page) {
542                 kunmap(page);
543                 __free_page(page);
544         } else
545                 dma_pool_free(pq->header_cache, pbc, dma_addr);
546 free_pkt:
547         kmem_cache_free(pq->pkt_slab, pkt);
548 free_list:
549         ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
550 done:
551         return ret;
552 }
553
554 static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq,
555                                                  u32 c)
556 {
557         pq->sent_counter = c;
558 }
559
560 /* try to clean out queue -- needs pq->lock */
561 static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd,
562                                        struct ipath_user_sdma_queue *pq)
563 {
564         struct list_head free_list;
565         struct ipath_user_sdma_pkt *pkt;
566         struct ipath_user_sdma_pkt *pkt_prev;
567         int ret = 0;
568
569         INIT_LIST_HEAD(&free_list);
570
571         list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
572                 s64 descd = dd->ipath_sdma_descq_removed - pkt->added;
573
574                 if (descd < 0)
575                         break;
576
577                 list_move_tail(&pkt->list, &free_list);
578
579                 /* one more packet cleaned */
580                 ret++;
581         }
582
583         if (!list_empty(&free_list)) {
584                 u32 counter;
585
586                 pkt = list_entry(free_list.prev,
587                                  struct ipath_user_sdma_pkt, list);
588                 counter = pkt->counter;
589
590                 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
591                 ipath_user_sdma_set_complete_counter(pq, counter);
592         }
593
594         return ret;
595 }
596
597 void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq)
598 {
599         if (!pq)
600                 return;
601
602         kmem_cache_destroy(pq->pkt_slab);
603         dma_pool_destroy(pq->header_cache);
604         kfree(pq);
605 }
606
607 /* clean descriptor queue, returns > 0 if some elements cleaned */
608 static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd)
609 {
610         int ret;
611         unsigned long flags;
612
613         spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
614         ret = ipath_sdma_make_progress(dd);
615         spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
616
617         return ret;
618 }
619
620 /* we're in close, drain packets so that we can cleanup successfully... */
621 void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
622                                  struct ipath_user_sdma_queue *pq)
623 {
624         int i;
625
626         if (!pq)
627                 return;
628
629         for (i = 0; i < 100; i++) {
630                 mutex_lock(&pq->lock);
631                 if (list_empty(&pq->sent)) {
632                         mutex_unlock(&pq->lock);
633                         break;
634                 }
635                 ipath_user_sdma_hwqueue_clean(dd);
636                 ipath_user_sdma_queue_clean(dd, pq);
637                 mutex_unlock(&pq->lock);
638                 msleep(10);
639         }
640
641         if (!list_empty(&pq->sent)) {
642                 struct list_head free_list;
643
644                 printk(KERN_INFO "drain: lists not empty: forcing!\n");
645                 INIT_LIST_HEAD(&free_list);
646                 mutex_lock(&pq->lock);
647                 list_splice_init(&pq->sent, &free_list);
648                 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
649                 mutex_unlock(&pq->lock);
650         }
651 }
652
653 static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
654                                            u64 addr, u64 dwlen, u64 dwoffset)
655 {
656         return cpu_to_le64(/* SDmaPhyAddr[31:0] */
657                            ((addr & 0xfffffffcULL) << 32) |
658                            /* SDmaGeneration[1:0] */
659                            ((dd->ipath_sdma_generation & 3ULL) << 30) |
660                            /* SDmaDwordCount[10:0] */
661                            ((dwlen & 0x7ffULL) << 16) |
662                            /* SDmaBufOffset[12:2] */
663                            (dwoffset & 0x7ffULL));
664 }
665
666 static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
667 {
668         return descq | cpu_to_le64(1ULL << 12);
669 }
670
671 static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
672 {
673                                               /* last */  /* dma head */
674         return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
675 }
676
677 static inline __le64 ipath_sdma_make_desc1(u64 addr)
678 {
679         /* SDmaPhyAddr[47:32] */
680         return cpu_to_le64(addr >> 32);
681 }
682
683 static void ipath_user_sdma_send_frag(struct ipath_devdata *dd,
684                                       struct ipath_user_sdma_pkt *pkt, int idx,
685                                       unsigned ofs, u16 tail)
686 {
687         const u64 addr = (u64) pkt->addr[idx].addr +
688                 (u64) pkt->addr[idx].offset;
689         const u64 dwlen = (u64) pkt->addr[idx].length / 4;
690         __le64 *descqp;
691         __le64 descq0;
692
693         descqp = &dd->ipath_sdma_descq[tail].qw[0];
694
695         descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);
696         if (idx == 0)
697                 descq0 = ipath_sdma_make_first_desc0(descq0);
698         if (idx == pkt->naddr - 1)
699                 descq0 = ipath_sdma_make_last_desc0(descq0);
700
701         descqp[0] = descq0;
702         descqp[1] = ipath_sdma_make_desc1(addr);
703 }
704
705 /* pq->lock must be held, get packets on the wire... */
706 static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
707                                      struct ipath_user_sdma_queue *pq,
708                                      struct list_head *pktlist)
709 {
710         int ret = 0;
711         unsigned long flags;
712         u16 tail;
713
714         if (list_empty(pktlist))
715                 return 0;
716
717         if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE)))
718                 return -ECOMM;
719
720         spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
721
722         if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {
723                 ret = -ECOMM;
724                 goto unlock;
725         }
726
727         tail = dd->ipath_sdma_descq_tail;
728         while (!list_empty(pktlist)) {
729                 struct ipath_user_sdma_pkt *pkt =
730                         list_entry(pktlist->next, struct ipath_user_sdma_pkt,
731                                    list);
732                 int i;
733                 unsigned ofs = 0;
734                 u16 dtail = tail;
735
736                 if (pkt->naddr > ipath_sdma_descq_freecnt(dd))
737                         goto unlock_check_tail;
738
739                 for (i = 0; i < pkt->naddr; i++) {
740                         ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);
741                         ofs += pkt->addr[i].length >> 2;
742
743                         if (++tail == dd->ipath_sdma_descq_cnt) {
744                                 tail = 0;
745                                 ++dd->ipath_sdma_generation;
746                         }
747                 }
748
749                 if ((ofs<<2) > dd->ipath_ibmaxlen) {
750                         ipath_dbg("packet size %X > ibmax %X, fail\n",
751                                 ofs<<2, dd->ipath_ibmaxlen);
752                         ret = -EMSGSIZE;
753                         goto unlock;
754                 }
755
756                 /*
757                  * if the packet is >= 2KB mtu equivalent, we have to use
758                  * the large buffers, and have to mark each descriptor as
759                  * part of a large buffer packet.
760                  */
761                 if (ofs >= IPATH_SMALLBUF_DWORDS) {
762                         for (i = 0; i < pkt->naddr; i++) {
763                                 dd->ipath_sdma_descq[dtail].qw[0] |=
764                                         cpu_to_le64(1ULL << 14);
765                                 if (++dtail == dd->ipath_sdma_descq_cnt)
766                                         dtail = 0;
767                         }
768                 }
769
770                 dd->ipath_sdma_descq_added += pkt->naddr;
771                 pkt->added = dd->ipath_sdma_descq_added;
772                 list_move_tail(&pkt->list, &pq->sent);
773                 ret++;
774         }
775
776 unlock_check_tail:
777         /* advance the tail on the chip if necessary */
778         if (dd->ipath_sdma_descq_tail != tail) {
779                 wmb();
780                 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
781                 dd->ipath_sdma_descq_tail = tail;
782         }
783
784 unlock:
785         spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
786
787         return ret;
788 }
789
790 int ipath_user_sdma_writev(struct ipath_devdata *dd,
791                            struct ipath_user_sdma_queue *pq,
792                            const struct iovec *iov,
793                            unsigned long dim)
794 {
795         int ret = 0;
796         struct list_head list;
797         int npkts = 0;
798
799         INIT_LIST_HEAD(&list);
800
801         mutex_lock(&pq->lock);
802
803         if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) {
804                 ipath_user_sdma_hwqueue_clean(dd);
805                 ipath_user_sdma_queue_clean(dd, pq);
806         }
807
808         while (dim) {
809                 const int mxp = 8;
810
811                 ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
812                 if (ret <= 0)
813                         goto done_unlock;
814                 else {
815                         dim -= ret;
816                         iov += ret;
817                 }
818
819                 /* force packets onto the sdma hw queue... */
820                 if (!list_empty(&list)) {
821                         /*
822                          * lazily clean hw queue.  the 4 is a guess of about
823                          * how many sdma descriptors a packet will take (it
824                          * doesn't have to be perfect).
825                          */
826                         if (ipath_sdma_descq_freecnt(dd) < ret * 4) {
827                                 ipath_user_sdma_hwqueue_clean(dd);
828                                 ipath_user_sdma_queue_clean(dd, pq);
829                         }
830
831                         ret = ipath_user_sdma_push_pkts(dd, pq, &list);
832                         if (ret < 0)
833                                 goto done_unlock;
834                         else {
835                                 npkts += ret;
836                                 pq->counter += ret;
837
838                                 if (!list_empty(&list))
839                                         goto done_unlock;
840                         }
841                 }
842         }
843
844 done_unlock:
845         if (!list_empty(&list))
846                 ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
847         mutex_unlock(&pq->lock);
848
849         return (ret < 0) ? ret : npkts;
850 }
851
852 int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
853                                   struct ipath_user_sdma_queue *pq)
854 {
855         int ret = 0;
856
857         mutex_lock(&pq->lock);
858         ipath_user_sdma_hwqueue_clean(dd);
859         ret = ipath_user_sdma_queue_clean(dd, pq);
860         mutex_unlock(&pq->lock);
861
862         return ret;
863 }
864
865 u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq)
866 {
867         return pq->sent_counter;
868 }
869
870 u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq)
871 {
872         return pq->counter;
873 }
874