Upgrade to 4.4.50-rt62
[kvmfornfv.git] / kernel / drivers / staging / rdma / hfi1 / file_ops.c
1 /*
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2015 Intel Corporation.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * BSD LICENSE
20  *
21  * Copyright(c) 2015 Intel Corporation.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  *
27  *  - Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  *  - Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in
31  *    the documentation and/or other materials provided with the
32  *    distribution.
33  *  - Neither the name of Intel Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived
35  *    from this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50 #include <linux/pci.h>
51 #include <linux/poll.h>
52 #include <linux/cdev.h>
53 #include <linux/swap.h>
54 #include <linux/vmalloc.h>
55 #include <linux/highmem.h>
56 #include <linux/io.h>
57 #include <linux/jiffies.h>
58 #include <asm/pgtable.h>
59 #include <linux/delay.h>
60 #include <linux/export.h>
61 #include <linux/module.h>
62 #include <linux/cred.h>
63 #include <linux/uio.h>
64
65 #include <rdma/ib.h>
66
67 #include "hfi.h"
68 #include "pio.h"
69 #include "device.h"
70 #include "common.h"
71 #include "trace.h"
72 #include "user_sdma.h"
73 #include "eprom.h"
74
75 #undef pr_fmt
76 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
77
78 #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
79
80 /*
81  * File operation functions
82  */
83 static int hfi1_file_open(struct inode *, struct file *);
84 static int hfi1_file_close(struct inode *, struct file *);
85 static ssize_t hfi1_file_write(struct file *, const char __user *,
86                                size_t, loff_t *);
87 static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
88 static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
89 static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
90
91 static u64 kvirt_to_phys(void *);
92 static int assign_ctxt(struct file *, struct hfi1_user_info *);
93 static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *);
94 static int user_init(struct file *);
95 static int get_ctxt_info(struct file *, void __user *, __u32);
96 static int get_base_info(struct file *, void __user *, __u32);
97 static int setup_ctxt(struct file *);
98 static int setup_subctxt(struct hfi1_ctxtdata *);
99 static int get_user_context(struct file *, struct hfi1_user_info *,
100                             int, unsigned);
101 static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
102 static int allocate_ctxt(struct file *, struct hfi1_devdata *,
103                          struct hfi1_user_info *);
104 static unsigned int poll_urgent(struct file *, struct poll_table_struct *);
105 static unsigned int poll_next(struct file *, struct poll_table_struct *);
106 static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
107 static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
108 static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
109 static int vma_fault(struct vm_area_struct *, struct vm_fault *);
110 static int exp_tid_setup(struct file *, struct hfi1_tid_info *);
111 static int exp_tid_free(struct file *, struct hfi1_tid_info *);
112 static void unlock_exp_tids(struct hfi1_ctxtdata *);
113
114 static const struct file_operations hfi1_file_ops = {
115         .owner = THIS_MODULE,
116         .write = hfi1_file_write,
117         .write_iter = hfi1_write_iter,
118         .open = hfi1_file_open,
119         .release = hfi1_file_close,
120         .poll = hfi1_poll,
121         .mmap = hfi1_file_mmap,
122         .llseek = noop_llseek,
123 };
124
125 static struct vm_operations_struct vm_ops = {
126         .fault = vma_fault,
127 };
128
129 /*
130  * Types of memories mapped into user processes' space
131  */
132 enum mmap_types {
133         PIO_BUFS = 1,
134         PIO_BUFS_SOP,
135         PIO_CRED,
136         RCV_HDRQ,
137         RCV_EGRBUF,
138         UREGS,
139         EVENTS,
140         STATUS,
141         RTAIL,
142         SUBCTXT_UREGS,
143         SUBCTXT_RCV_HDRQ,
144         SUBCTXT_EGRBUF,
145         SDMA_COMP
146 };
147
148 /*
149  * Masks and offsets defining the mmap tokens
150  */
151 #define HFI1_MMAP_OFFSET_MASK   0xfffULL
152 #define HFI1_MMAP_OFFSET_SHIFT  0
153 #define HFI1_MMAP_SUBCTXT_MASK  0xfULL
154 #define HFI1_MMAP_SUBCTXT_SHIFT 12
155 #define HFI1_MMAP_CTXT_MASK     0xffULL
156 #define HFI1_MMAP_CTXT_SHIFT    16
157 #define HFI1_MMAP_TYPE_MASK     0xfULL
158 #define HFI1_MMAP_TYPE_SHIFT    24
159 #define HFI1_MMAP_MAGIC_MASK    0xffffffffULL
160 #define HFI1_MMAP_MAGIC_SHIFT   32
161
162 #define HFI1_MMAP_MAGIC         0xdabbad00
163
164 #define HFI1_MMAP_TOKEN_SET(field, val) \
165         (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
166 #define HFI1_MMAP_TOKEN_GET(field, token) \
167         (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
168 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr)   \
169         (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
170         HFI1_MMAP_TOKEN_SET(TYPE, type) | \
171         HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
172         HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
173         HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
174
175 #define EXP_TID_SET(field, value)                       \
176         (((value) & EXP_TID_TID##field##_MASK) <<       \
177          EXP_TID_TID##field##_SHIFT)
178 #define EXP_TID_CLEAR(tid, field) {                                     \
179                 (tid) &= ~(EXP_TID_TID##field##_MASK <<                 \
180                            EXP_TID_TID##field##_SHIFT);                 \
181                         }
182 #define EXP_TID_RESET(tid, field, value) do {                           \
183                 EXP_TID_CLEAR(tid, field);                              \
184                 (tid) |= EXP_TID_SET(field, value);                     \
185         } while (0)
186
187 #define dbg(fmt, ...)                           \
188         pr_info(fmt, ##__VA_ARGS__)
189
190
191 static inline int is_valid_mmap(u64 token)
192 {
193         return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
194 }
195
196 static int hfi1_file_open(struct inode *inode, struct file *fp)
197 {
198         /* The real work is performed later in assign_ctxt() */
199         fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
200         if (fp->private_data) /* no cpu affinity by default */
201                 ((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1;
202         return fp->private_data ? 0 : -ENOMEM;
203 }
204
205 static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
206                                size_t count, loff_t *offset)
207 {
208         const struct hfi1_cmd __user *ucmd;
209         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
210         struct hfi1_cmd cmd;
211         struct hfi1_user_info uinfo;
212         struct hfi1_tid_info tinfo;
213         ssize_t consumed = 0, copy = 0, ret = 0;
214         void *dest = NULL;
215         __u64 user_val = 0;
216         int uctxt_required = 1;
217         int must_be_root = 0;
218
219         /* FIXME: This interface cannot continue out of staging */
220         if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
221                 return -EACCES;
222
223         if (count < sizeof(cmd)) {
224                 ret = -EINVAL;
225                 goto bail;
226         }
227
228         ucmd = (const struct hfi1_cmd __user *)data;
229         if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
230                 ret = -EFAULT;
231                 goto bail;
232         }
233
234         consumed = sizeof(cmd);
235
236         switch (cmd.type) {
237         case HFI1_CMD_ASSIGN_CTXT:
238                 uctxt_required = 0;     /* assigned user context not required */
239                 copy = sizeof(uinfo);
240                 dest = &uinfo;
241                 break;
242         case HFI1_CMD_SDMA_STATUS_UPD:
243         case HFI1_CMD_CREDIT_UPD:
244                 copy = 0;
245                 break;
246         case HFI1_CMD_TID_UPDATE:
247         case HFI1_CMD_TID_FREE:
248                 copy = sizeof(tinfo);
249                 dest = &tinfo;
250                 break;
251         case HFI1_CMD_USER_INFO:
252         case HFI1_CMD_RECV_CTRL:
253         case HFI1_CMD_POLL_TYPE:
254         case HFI1_CMD_ACK_EVENT:
255         case HFI1_CMD_CTXT_INFO:
256         case HFI1_CMD_SET_PKEY:
257         case HFI1_CMD_CTXT_RESET:
258                 copy = 0;
259                 user_val = cmd.addr;
260                 break;
261         case HFI1_CMD_EP_INFO:
262         case HFI1_CMD_EP_ERASE_CHIP:
263         case HFI1_CMD_EP_ERASE_P0:
264         case HFI1_CMD_EP_ERASE_P1:
265         case HFI1_CMD_EP_READ_P0:
266         case HFI1_CMD_EP_READ_P1:
267         case HFI1_CMD_EP_WRITE_P0:
268         case HFI1_CMD_EP_WRITE_P1:
269                 uctxt_required = 0;     /* assigned user context not required */
270                 must_be_root = 1;       /* validate user */
271                 copy = 0;
272                 break;
273         default:
274                 ret = -EINVAL;
275                 goto bail;
276         }
277
278         /* If the command comes with user data, copy it. */
279         if (copy) {
280                 if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
281                         ret = -EFAULT;
282                         goto bail;
283                 }
284                 consumed += copy;
285         }
286
287         /*
288          * Make sure there is a uctxt when needed.
289          */
290         if (uctxt_required && !uctxt) {
291                 ret = -EINVAL;
292                 goto bail;
293         }
294
295         /* only root can do these operations */
296         if (must_be_root && !capable(CAP_SYS_ADMIN)) {
297                 ret = -EPERM;
298                 goto bail;
299         }
300
301         switch (cmd.type) {
302         case HFI1_CMD_ASSIGN_CTXT:
303                 ret = assign_ctxt(fp, &uinfo);
304                 if (ret < 0)
305                         goto bail;
306                 ret = setup_ctxt(fp);
307                 if (ret)
308                         goto bail;
309                 ret = user_init(fp);
310                 break;
311         case HFI1_CMD_CTXT_INFO:
312                 ret = get_ctxt_info(fp, (void __user *)(unsigned long)
313                                     user_val, cmd.len);
314                 break;
315         case HFI1_CMD_USER_INFO:
316                 ret = get_base_info(fp, (void __user *)(unsigned long)
317                                     user_val, cmd.len);
318                 break;
319         case HFI1_CMD_SDMA_STATUS_UPD:
320                 break;
321         case HFI1_CMD_CREDIT_UPD:
322                 if (uctxt && uctxt->sc)
323                         sc_return_credits(uctxt->sc);
324                 break;
325         case HFI1_CMD_TID_UPDATE:
326                 ret = exp_tid_setup(fp, &tinfo);
327                 if (!ret) {
328                         unsigned long addr;
329                         /*
330                          * Copy the number of tidlist entries we used
331                          * and the length of the buffer we registered.
332                          * These fields are adjacent in the structure so
333                          * we can copy them at the same time.
334                          */
335                         addr = (unsigned long)cmd.addr +
336                                 offsetof(struct hfi1_tid_info, tidcnt);
337                         if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
338                                          sizeof(tinfo.tidcnt) +
339                                          sizeof(tinfo.length)))
340                                 ret = -EFAULT;
341                 }
342                 break;
343         case HFI1_CMD_TID_FREE:
344                 ret = exp_tid_free(fp, &tinfo);
345                 break;
346         case HFI1_CMD_RECV_CTRL:
347                 ret = manage_rcvq(uctxt, subctxt_fp(fp), (int)user_val);
348                 break;
349         case HFI1_CMD_POLL_TYPE:
350                 uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
351                 break;
352         case HFI1_CMD_ACK_EVENT:
353                 ret = user_event_ack(uctxt, subctxt_fp(fp), user_val);
354                 break;
355         case HFI1_CMD_SET_PKEY:
356                 if (HFI1_CAP_IS_USET(PKEY_CHECK))
357                         ret = set_ctxt_pkey(uctxt, subctxt_fp(fp), user_val);
358                 else
359                         ret = -EPERM;
360                 break;
361         case HFI1_CMD_CTXT_RESET: {
362                 struct send_context *sc;
363                 struct hfi1_devdata *dd;
364
365                 if (!uctxt || !uctxt->dd || !uctxt->sc) {
366                         ret = -EINVAL;
367                         break;
368                 }
369                 /*
370                  * There is no protection here. User level has to
371                  * guarantee that no one will be writing to the send
372                  * context while it is being re-initialized.
373                  * If user level breaks that guarantee, it will break
374                  * it's own context and no one else's.
375                  */
376                 dd = uctxt->dd;
377                 sc = uctxt->sc;
378                 /*
379                  * Wait until the interrupt handler has marked the
380                  * context as halted or frozen. Report error if we time
381                  * out.
382                  */
383                 wait_event_interruptible_timeout(
384                         sc->halt_wait, (sc->flags & SCF_HALTED),
385                         msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
386                 if (!(sc->flags & SCF_HALTED)) {
387                         ret = -ENOLCK;
388                         break;
389                 }
390                 /*
391                  * If the send context was halted due to a Freeze,
392                  * wait until the device has been "unfrozen" before
393                  * resetting the context.
394                  */
395                 if (sc->flags & SCF_FROZEN) {
396                         wait_event_interruptible_timeout(
397                                 dd->event_queue,
398                                 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
399                                 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
400                         if (dd->flags & HFI1_FROZEN) {
401                                 ret = -ENOLCK;
402                                 break;
403                         }
404                         if (dd->flags & HFI1_FORCED_FREEZE) {
405                                 /* Don't allow context reset if we are into
406                                  * forced freeze */
407                                 ret = -ENODEV;
408                                 break;
409                         }
410                         sc_disable(sc);
411                         ret = sc_enable(sc);
412                         hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
413                                      uctxt->ctxt);
414                 } else
415                         ret = sc_restart(sc);
416                 if (!ret)
417                         sc_return_credits(sc);
418                 break;
419         }
420         case HFI1_CMD_EP_INFO:
421         case HFI1_CMD_EP_ERASE_CHIP:
422         case HFI1_CMD_EP_ERASE_P0:
423         case HFI1_CMD_EP_ERASE_P1:
424         case HFI1_CMD_EP_READ_P0:
425         case HFI1_CMD_EP_READ_P1:
426         case HFI1_CMD_EP_WRITE_P0:
427         case HFI1_CMD_EP_WRITE_P1:
428                 ret = handle_eprom_command(&cmd);
429                 break;
430         }
431
432         if (ret >= 0)
433                 ret = consumed;
434 bail:
435         return ret;
436 }
437
438 static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
439 {
440         struct hfi1_user_sdma_pkt_q *pq;
441         struct hfi1_user_sdma_comp_q *cq;
442         int ret = 0, done = 0, reqs = 0;
443         unsigned long dim = from->nr_segs;
444
445         if (!user_sdma_comp_fp(kiocb->ki_filp) ||
446             !user_sdma_pkt_fp(kiocb->ki_filp)) {
447                 ret = -EIO;
448                 goto done;
449         }
450
451         if (!iter_is_iovec(from) || !dim) {
452                 ret = -EINVAL;
453                 goto done;
454         }
455
456         hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
457                   ctxt_fp(kiocb->ki_filp)->ctxt, subctxt_fp(kiocb->ki_filp),
458                   dim);
459         pq = user_sdma_pkt_fp(kiocb->ki_filp);
460         cq = user_sdma_comp_fp(kiocb->ki_filp);
461
462         if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
463                 ret = -ENOSPC;
464                 goto done;
465         }
466
467         while (dim) {
468                 unsigned long count = 0;
469
470                 ret = hfi1_user_sdma_process_request(
471                         kiocb->ki_filp, (struct iovec *)(from->iov + done),
472                         dim, &count);
473                 if (ret)
474                         goto done;
475                 dim -= count;
476                 done += count;
477                 reqs++;
478         }
479 done:
480         return ret ? ret : reqs;
481 }
482
483 static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
484 {
485         struct hfi1_ctxtdata *uctxt;
486         struct hfi1_devdata *dd;
487         unsigned long flags, pfn;
488         u64 token = vma->vm_pgoff << PAGE_SHIFT,
489                 memaddr = 0;
490         u8 subctxt, mapio = 0, vmf = 0, type;
491         ssize_t memlen = 0;
492         int ret = 0;
493         u16 ctxt;
494
495         uctxt = ctxt_fp(fp);
496         if (!is_valid_mmap(token) || !uctxt ||
497             !(vma->vm_flags & VM_SHARED)) {
498                 ret = -EINVAL;
499                 goto done;
500         }
501         dd = uctxt->dd;
502         ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
503         subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
504         type = HFI1_MMAP_TOKEN_GET(TYPE, token);
505         if (ctxt != uctxt->ctxt || subctxt != subctxt_fp(fp)) {
506                 ret = -EINVAL;
507                 goto done;
508         }
509
510         flags = vma->vm_flags;
511
512         switch (type) {
513         case PIO_BUFS:
514         case PIO_BUFS_SOP:
515                 memaddr = ((dd->physaddr + TXE_PIO_SEND) +
516                                 /* chip pio base */
517                            (uctxt->sc->hw_context * BIT(16))) +
518                                 /* 64K PIO space / ctxt */
519                         (type == PIO_BUFS_SOP ?
520                                 (TXE_PIO_SIZE / 2) : 0); /* sop? */
521                 /*
522                  * Map only the amount allocated to the context, not the
523                  * entire available context's PIO space.
524                  */
525                 memlen = ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE,
526                                PAGE_SIZE);
527                 flags &= ~VM_MAYREAD;
528                 flags |= VM_DONTCOPY | VM_DONTEXPAND;
529                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
530                 mapio = 1;
531                 break;
532         case PIO_CRED:
533                 if (flags & VM_WRITE) {
534                         ret = -EPERM;
535                         goto done;
536                 }
537                 /*
538                  * The credit return location for this context could be on the
539                  * second or third page allocated for credit returns (if number
540                  * of enabled contexts > 64 and 128 respectively).
541                  */
542                 memaddr = dd->cr_base[uctxt->numa_id].pa +
543                         (((u64)uctxt->sc->hw_free -
544                           (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
545                 memlen = PAGE_SIZE;
546                 flags &= ~VM_MAYWRITE;
547                 flags |= VM_DONTCOPY | VM_DONTEXPAND;
548                 /*
549                  * The driver has already allocated memory for credit
550                  * returns and programmed it into the chip. Has that
551                  * memory been flagged as non-cached?
552                  */
553                 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
554                 mapio = 1;
555                 break;
556         case RCV_HDRQ:
557                 memaddr = uctxt->rcvhdrq_phys;
558                 memlen = uctxt->rcvhdrq_size;
559                 break;
560         case RCV_EGRBUF: {
561                 unsigned long addr;
562                 int i;
563                 /*
564                  * The RcvEgr buffer need to be handled differently
565                  * as multiple non-contiguous pages need to be mapped
566                  * into the user process.
567                  */
568                 memlen = uctxt->egrbufs.size;
569                 if ((vma->vm_end - vma->vm_start) != memlen) {
570                         dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
571                                    (vma->vm_end - vma->vm_start), memlen);
572                         ret = -EINVAL;
573                         goto done;
574                 }
575                 if (vma->vm_flags & VM_WRITE) {
576                         ret = -EPERM;
577                         goto done;
578                 }
579                 vma->vm_flags &= ~VM_MAYWRITE;
580                 addr = vma->vm_start;
581                 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
582                         ret = remap_pfn_range(
583                                 vma, addr,
584                                 uctxt->egrbufs.buffers[i].phys >> PAGE_SHIFT,
585                                 uctxt->egrbufs.buffers[i].len,
586                                 vma->vm_page_prot);
587                         if (ret < 0)
588                                 goto done;
589                         addr += uctxt->egrbufs.buffers[i].len;
590                 }
591                 ret = 0;
592                 goto done;
593         }
594         case UREGS:
595                 /*
596                  * Map only the page that contains this context's user
597                  * registers.
598                  */
599                 memaddr = (unsigned long)
600                         (dd->physaddr + RXE_PER_CONTEXT_USER)
601                         + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
602                 /*
603                  * TidFlow table is on the same page as the rest of the
604                  * user registers.
605                  */
606                 memlen = PAGE_SIZE;
607                 flags |= VM_DONTCOPY | VM_DONTEXPAND;
608                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
609                 mapio = 1;
610                 break;
611         case EVENTS:
612                 /*
613                  * Use the page where this context's flags are. User level
614                  * knows where it's own bitmap is within the page.
615                  */
616                 memaddr = (unsigned long)(dd->events +
617                                           ((uctxt->ctxt - dd->first_user_ctxt) *
618                                            HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
619                 memlen = PAGE_SIZE;
620                 /*
621                  * v3.7 removes VM_RESERVED but the effect is kept by
622                  * using VM_IO.
623                  */
624                 flags |= VM_IO | VM_DONTEXPAND;
625                 vmf = 1;
626                 break;
627         case STATUS:
628                 memaddr = kvirt_to_phys((void *)dd->status);
629                 memlen = PAGE_SIZE;
630                 flags |= VM_IO | VM_DONTEXPAND;
631                 break;
632         case RTAIL:
633                 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
634                         /*
635                          * If the memory allocation failed, the context alloc
636                          * also would have failed, so we would never get here
637                          */
638                         ret = -EINVAL;
639                         goto done;
640                 }
641                 if (flags & VM_WRITE) {
642                         ret = -EPERM;
643                         goto done;
644                 }
645                 memaddr = uctxt->rcvhdrqtailaddr_phys;
646                 memlen = PAGE_SIZE;
647                 flags &= ~VM_MAYWRITE;
648                 break;
649         case SUBCTXT_UREGS:
650                 memaddr = (u64)uctxt->subctxt_uregbase;
651                 memlen = PAGE_SIZE;
652                 flags |= VM_IO | VM_DONTEXPAND;
653                 vmf = 1;
654                 break;
655         case SUBCTXT_RCV_HDRQ:
656                 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
657                 memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
658                 flags |= VM_IO | VM_DONTEXPAND;
659                 vmf = 1;
660                 break;
661         case SUBCTXT_EGRBUF:
662                 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
663                 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
664                 flags |= VM_IO | VM_DONTEXPAND;
665                 flags &= ~VM_MAYWRITE;
666                 vmf = 1;
667                 break;
668         case SDMA_COMP: {
669                 struct hfi1_user_sdma_comp_q *cq;
670
671                 if (!user_sdma_comp_fp(fp)) {
672                         ret = -EFAULT;
673                         goto done;
674                 }
675                 cq = user_sdma_comp_fp(fp);
676                 memaddr = (u64)cq->comps;
677                 memlen = ALIGN(sizeof(*cq->comps) * cq->nentries, PAGE_SIZE);
678                 flags |= VM_IO | VM_DONTEXPAND;
679                 vmf = 1;
680                 break;
681         }
682         default:
683                 ret = -EINVAL;
684                 break;
685         }
686
687         if ((vma->vm_end - vma->vm_start) != memlen) {
688                 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
689                           uctxt->ctxt, subctxt_fp(fp),
690                           (vma->vm_end - vma->vm_start), memlen);
691                 ret = -EINVAL;
692                 goto done;
693         }
694
695         vma->vm_flags = flags;
696         dd_dev_info(dd,
697                     "%s: %u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
698                     __func__, ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
699                     vma->vm_end - vma->vm_start, vma->vm_flags);
700         pfn = (unsigned long)(memaddr >> PAGE_SHIFT);
701         if (vmf) {
702                 vma->vm_pgoff = pfn;
703                 vma->vm_ops = &vm_ops;
704                 ret = 0;
705         } else if (mapio) {
706                 ret = io_remap_pfn_range(vma, vma->vm_start, pfn, memlen,
707                                          vma->vm_page_prot);
708         } else {
709                 ret = remap_pfn_range(vma, vma->vm_start, pfn, memlen,
710                                       vma->vm_page_prot);
711         }
712 done:
713         return ret;
714 }
715
716 /*
717  * Local (non-chip) user memory is not mapped right away but as it is
718  * accessed by the user-level code.
719  */
720 static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
721 {
722         struct page *page;
723
724         page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
725         if (!page)
726                 return VM_FAULT_SIGBUS;
727
728         get_page(page);
729         vmf->page = page;
730
731         return 0;
732 }
733
734 static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
735 {
736         struct hfi1_ctxtdata *uctxt;
737         unsigned pollflag;
738
739         uctxt = ctxt_fp(fp);
740         if (!uctxt)
741                 pollflag = POLLERR;
742         else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
743                 pollflag = poll_urgent(fp, pt);
744         else  if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
745                 pollflag = poll_next(fp, pt);
746         else /* invalid */
747                 pollflag = POLLERR;
748
749         return pollflag;
750 }
751
752 static int hfi1_file_close(struct inode *inode, struct file *fp)
753 {
754         struct hfi1_filedata *fdata = fp->private_data;
755         struct hfi1_ctxtdata *uctxt = fdata->uctxt;
756         struct hfi1_devdata *dd;
757         unsigned long flags, *ev;
758
759         fp->private_data = NULL;
760
761         if (!uctxt)
762                 goto done;
763
764         hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
765         dd = uctxt->dd;
766         mutex_lock(&hfi1_mutex);
767
768         flush_wc();
769         /* drain user sdma queue */
770         if (fdata->pq)
771                 hfi1_user_sdma_free_queues(fdata);
772
773         /*
774          * Clear any left over, unhandled events so the next process that
775          * gets this context doesn't get confused.
776          */
777         ev = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
778                            HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
779         *ev = 0;
780
781         if (--uctxt->cnt) {
782                 uctxt->active_slaves &= ~(1 << fdata->subctxt);
783                 uctxt->subpid[fdata->subctxt] = 0;
784                 mutex_unlock(&hfi1_mutex);
785                 goto done;
786         }
787
788         spin_lock_irqsave(&dd->uctxt_lock, flags);
789         /*
790          * Disable receive context and interrupt available, reset all
791          * RcvCtxtCtrl bits to default values.
792          */
793         hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
794                      HFI1_RCVCTRL_TIDFLOW_DIS |
795                      HFI1_RCVCTRL_INTRAVAIL_DIS |
796                      HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
797                      HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
798                      HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt);
799         /* Clear the context's J_KEY */
800         hfi1_clear_ctxt_jkey(dd, uctxt->ctxt);
801         /*
802          * Reset context integrity checks to default.
803          * (writes to CSRs probably belong in chip.c)
804          */
805         write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
806                         hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
807         sc_disable(uctxt->sc);
808         uctxt->pid = 0;
809         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
810
811         dd->rcd[uctxt->ctxt] = NULL;
812         uctxt->rcvwait_to = 0;
813         uctxt->piowait_to = 0;
814         uctxt->rcvnowait = 0;
815         uctxt->pionowait = 0;
816         uctxt->event_flags = 0;
817
818         hfi1_clear_tids(uctxt);
819         hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
820
821         if (uctxt->tid_pg_list)
822                 unlock_exp_tids(uctxt);
823
824         hfi1_stats.sps_ctxts--;
825         dd->freectxts++;
826         mutex_unlock(&hfi1_mutex);
827         hfi1_free_ctxtdata(dd, uctxt);
828 done:
829         kfree(fdata);
830         return 0;
831 }
832
833 /*
834  * Convert kernel *virtual* addresses to physical addresses.
835  * This is used to vmalloc'ed addresses.
836  */
837 static u64 kvirt_to_phys(void *addr)
838 {
839         struct page *page;
840         u64 paddr = 0;
841
842         page = vmalloc_to_page(addr);
843         if (page)
844                 paddr = page_to_pfn(page) << PAGE_SHIFT;
845
846         return paddr;
847 }
848
849 static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
850 {
851         int i_minor, ret = 0;
852         unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS;
853
854         swmajor = uinfo->userversion >> 16;
855         if (swmajor != HFI1_USER_SWMAJOR) {
856                 ret = -ENODEV;
857                 goto done;
858         }
859
860         swminor = uinfo->userversion & 0xffff;
861
862         if (uinfo->hfi1_alg < HFI1_ALG_COUNT)
863                 alg = uinfo->hfi1_alg;
864
865         mutex_lock(&hfi1_mutex);
866         /* First, lets check if we need to setup a shared context? */
867         if (uinfo->subctxt_cnt)
868                 ret = find_shared_ctxt(fp, uinfo);
869
870         /*
871          * We execute the following block if we couldn't find a
872          * shared context or if context sharing is not required.
873          */
874         if (!ret) {
875                 i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
876                 ret = get_user_context(fp, uinfo, i_minor - 1, alg);
877         }
878         mutex_unlock(&hfi1_mutex);
879 done:
880         return ret;
881 }
882
883 static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
884                             int devno, unsigned alg)
885 {
886         struct hfi1_devdata *dd = NULL;
887         int ret = 0, devmax, npresent, nup, dev;
888
889         devmax = hfi1_count_units(&npresent, &nup);
890         if (!npresent) {
891                 ret = -ENXIO;
892                 goto done;
893         }
894         if (!nup) {
895                 ret = -ENETDOWN;
896                 goto done;
897         }
898         if (devno >= 0) {
899                 dd = hfi1_lookup(devno);
900                 if (!dd)
901                         ret = -ENODEV;
902                 else if (!dd->freectxts)
903                         ret = -EBUSY;
904         } else {
905                 struct hfi1_devdata *pdd;
906
907                 if (alg == HFI1_ALG_ACROSS) {
908                         unsigned free = 0U;
909
910                         for (dev = 0; dev < devmax; dev++) {
911                                 pdd = hfi1_lookup(dev);
912                                 if (pdd && pdd->freectxts &&
913                                     pdd->freectxts > free) {
914                                         dd = pdd;
915                                         free = pdd->freectxts;
916                                 }
917                         }
918                 } else {
919                         for (dev = 0; dev < devmax; dev++) {
920                                 pdd = hfi1_lookup(dev);
921                                 if (pdd && pdd->freectxts) {
922                                         dd = pdd;
923                                         break;
924                                 }
925                         }
926                 }
927                 if (!dd)
928                         ret = -EBUSY;
929         }
930 done:
931         return ret ? ret : allocate_ctxt(fp, dd, uinfo);
932 }
933
934 static int find_shared_ctxt(struct file *fp,
935                             const struct hfi1_user_info *uinfo)
936 {
937         int devmax, ndev, i;
938         int ret = 0;
939
940         devmax = hfi1_count_units(NULL, NULL);
941
942         for (ndev = 0; ndev < devmax; ndev++) {
943                 struct hfi1_devdata *dd = hfi1_lookup(ndev);
944
945                 /* device portion of usable() */
946                 if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase))
947                         continue;
948                 for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
949                         struct hfi1_ctxtdata *uctxt = dd->rcd[i];
950
951                         /* Skip ctxts which are not yet open */
952                         if (!uctxt || !uctxt->cnt)
953                                 continue;
954                         /* Skip ctxt if it doesn't match the requested one */
955                         if (memcmp(uctxt->uuid, uinfo->uuid,
956                                    sizeof(uctxt->uuid)) ||
957                             uctxt->jkey != generate_jkey(current_uid()) ||
958                             uctxt->subctxt_id != uinfo->subctxt_id ||
959                             uctxt->subctxt_cnt != uinfo->subctxt_cnt)
960                                 continue;
961
962                         /* Verify the sharing process matches the master */
963                         if (uctxt->userversion != uinfo->userversion ||
964                             uctxt->cnt >= uctxt->subctxt_cnt) {
965                                 ret = -EINVAL;
966                                 goto done;
967                         }
968                         ctxt_fp(fp) = uctxt;
969                         subctxt_fp(fp) = uctxt->cnt++;
970                         uctxt->subpid[subctxt_fp(fp)] = current->pid;
971                         uctxt->active_slaves |= 1 << subctxt_fp(fp);
972                         ret = 1;
973                         goto done;
974                 }
975         }
976
977 done:
978         return ret;
979 }
980
981 static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
982                          struct hfi1_user_info *uinfo)
983 {
984         struct hfi1_ctxtdata *uctxt;
985         unsigned ctxt;
986         int ret;
987
988         if (dd->flags & HFI1_FROZEN) {
989                 /*
990                  * Pick an error that is unique from all other errors
991                  * that are returned so the user process knows that
992                  * it tried to allocate while the SPC was frozen.  It
993                  * it should be able to retry with success in a short
994                  * while.
995                  */
996                 return -EIO;
997         }
998
999         for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts; ctxt++)
1000                 if (!dd->rcd[ctxt])
1001                         break;
1002
1003         if (ctxt == dd->num_rcv_contexts)
1004                 return -EBUSY;
1005
1006         uctxt = hfi1_create_ctxtdata(dd->pport, ctxt);
1007         if (!uctxt) {
1008                 dd_dev_err(dd,
1009                            "Unable to allocate ctxtdata memory, failing open\n");
1010                 return -ENOMEM;
1011         }
1012         /*
1013          * Allocate and enable a PIO send context.
1014          */
1015         uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize,
1016                              uctxt->numa_id);
1017         if (!uctxt->sc)
1018                 return -ENOMEM;
1019
1020         dbg("allocated send context %u(%u)\n", uctxt->sc->sw_index,
1021                 uctxt->sc->hw_context);
1022         ret = sc_enable(uctxt->sc);
1023         if (ret)
1024                 return ret;
1025         /*
1026          * Setup shared context resources if the user-level has requested
1027          * shared contexts and this is the 'master' process.
1028          * This has to be done here so the rest of the sub-contexts find the
1029          * proper master.
1030          */
1031         if (uinfo->subctxt_cnt && !subctxt_fp(fp)) {
1032                 ret = init_subctxts(uctxt, uinfo);
1033                 /*
1034                  * On error, we don't need to disable and de-allocate the
1035                  * send context because it will be done during file close
1036                  */
1037                 if (ret)
1038                         return ret;
1039         }
1040         uctxt->userversion = uinfo->userversion;
1041         uctxt->pid = current->pid;
1042         uctxt->flags = HFI1_CAP_UGET(MASK);
1043         init_waitqueue_head(&uctxt->wait);
1044         strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1045         memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1046         uctxt->jkey = generate_jkey(current_uid());
1047         INIT_LIST_HEAD(&uctxt->sdma_queues);
1048         spin_lock_init(&uctxt->sdma_qlock);
1049         hfi1_stats.sps_ctxts++;
1050         dd->freectxts--;
1051         ctxt_fp(fp) = uctxt;
1052
1053         return 0;
1054 }
1055
1056 static int init_subctxts(struct hfi1_ctxtdata *uctxt,
1057                          const struct hfi1_user_info *uinfo)
1058 {
1059         int ret = 0;
1060         unsigned num_subctxts;
1061
1062         num_subctxts = uinfo->subctxt_cnt;
1063         if (num_subctxts > HFI1_MAX_SHARED_CTXTS) {
1064                 ret = -EINVAL;
1065                 goto bail;
1066         }
1067
1068         uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1069         uctxt->subctxt_id = uinfo->subctxt_id;
1070         uctxt->active_slaves = 1;
1071         uctxt->redirect_seq_cnt = 1;
1072         set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1073 bail:
1074         return ret;
1075 }
1076
1077 static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1078 {
1079         int ret = 0;
1080         unsigned num_subctxts = uctxt->subctxt_cnt;
1081
1082         uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
1083         if (!uctxt->subctxt_uregbase) {
1084                 ret = -ENOMEM;
1085                 goto bail;
1086         }
1087         /* We can take the size of the RcvHdr Queue from the master */
1088         uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
1089                                                   num_subctxts);
1090         if (!uctxt->subctxt_rcvhdr_base) {
1091                 ret = -ENOMEM;
1092                 goto bail_ureg;
1093         }
1094
1095         uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1096                                                 num_subctxts);
1097         if (!uctxt->subctxt_rcvegrbuf) {
1098                 ret = -ENOMEM;
1099                 goto bail_rhdr;
1100         }
1101         goto bail;
1102 bail_rhdr:
1103         vfree(uctxt->subctxt_rcvhdr_base);
1104 bail_ureg:
1105         vfree(uctxt->subctxt_uregbase);
1106         uctxt->subctxt_uregbase = NULL;
1107 bail:
1108         return ret;
1109 }
1110
1111 static int user_init(struct file *fp)
1112 {
1113         int ret;
1114         unsigned int rcvctrl_ops = 0;
1115         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1116
1117         /* make sure that the context has already been setup */
1118         if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) {
1119                 ret = -EFAULT;
1120                 goto done;
1121         }
1122
1123         /*
1124          * Subctxts don't need to initialize anything since master
1125          * has done it.
1126          */
1127         if (subctxt_fp(fp)) {
1128                 ret = wait_event_interruptible(uctxt->wait,
1129                         !test_bit(HFI1_CTXT_MASTER_UNINIT,
1130                         &uctxt->event_flags));
1131                 goto done;
1132         }
1133
1134         /* initialize poll variables... */
1135         uctxt->urgent = 0;
1136         uctxt->urgent_poll = 0;
1137
1138         /*
1139          * Now enable the ctxt for receive.
1140          * For chips that are set to DMA the tail register to memory
1141          * when they change (and when the update bit transitions from
1142          * 0 to 1.  So for those chips, we turn it off and then back on.
1143          * This will (very briefly) affect any other open ctxts, but the
1144          * duration is very short, and therefore isn't an issue.  We
1145          * explicitly set the in-memory tail copy to 0 beforehand, so we
1146          * don't have to wait to be sure the DMA update has happened
1147          * (chip resets head/tail to 0 on transition to enable).
1148          */
1149         if (uctxt->rcvhdrtail_kvaddr)
1150                 clear_rcvhdrtail(uctxt);
1151
1152         /* Setup J_KEY before enabling the context */
1153         hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
1154
1155         rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
1156         if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP))
1157                 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
1158         /*
1159          * Ignore the bit in the flags for now until proper
1160          * support for multiple packet per rcv array entry is
1161          * added.
1162          */
1163         if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
1164                 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
1165         if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
1166                 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
1167         if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
1168                 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
1169         if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
1170                 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
1171         hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
1172
1173         /* Notify any waiting slaves */
1174         if (uctxt->subctxt_cnt) {
1175                 clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1176                 wake_up(&uctxt->wait);
1177         }
1178         ret = 0;
1179
1180 done:
1181         return ret;
1182 }
1183
1184 static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
1185 {
1186         struct hfi1_ctxt_info cinfo;
1187         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1188         struct hfi1_filedata *fd = fp->private_data;
1189         int ret = 0;
1190
1191         memset(&cinfo, 0, sizeof(cinfo));
1192         ret = hfi1_get_base_kinfo(uctxt, &cinfo);
1193         if (ret < 0)
1194                 goto done;
1195         cinfo.num_active = hfi1_count_active_units();
1196         cinfo.unit = uctxt->dd->unit;
1197         cinfo.ctxt = uctxt->ctxt;
1198         cinfo.subctxt = subctxt_fp(fp);
1199         cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1200                                 uctxt->dd->rcv_entries.group_size) +
1201                 uctxt->expected_count;
1202         cinfo.credits = uctxt->sc->credits;
1203         cinfo.numa_node = uctxt->numa_id;
1204         cinfo.rec_cpu = fd->rec_cpu_num;
1205         cinfo.send_ctxt = uctxt->sc->hw_context;
1206
1207         cinfo.egrtids = uctxt->egrbufs.alloced;
1208         cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
1209         cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
1210         cinfo.sdma_ring_size = user_sdma_comp_fp(fp)->nentries;
1211         cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1212
1213         trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, subctxt_fp(fp), cinfo);
1214         if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
1215                 ret = -EFAULT;
1216 done:
1217         return ret;
1218 }
1219
1220 static int setup_ctxt(struct file *fp)
1221 {
1222         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1223         struct hfi1_devdata *dd = uctxt->dd;
1224         int ret = 0;
1225
1226         /*
1227          * Context should be set up only once (including allocation and
1228          * programming of eager buffers. This is done if context sharing
1229          * is not requested or by the master process.
1230          */
1231         if (!uctxt->subctxt_cnt || !subctxt_fp(fp)) {
1232                 ret = hfi1_init_ctxt(uctxt->sc);
1233                 if (ret)
1234                         goto done;
1235
1236                 /* Now allocate the RcvHdr queue and eager buffers. */
1237                 ret = hfi1_create_rcvhdrq(dd, uctxt);
1238                 if (ret)
1239                         goto done;
1240                 ret = hfi1_setup_eagerbufs(uctxt);
1241                 if (ret)
1242                         goto done;
1243                 if (uctxt->subctxt_cnt && !subctxt_fp(fp)) {
1244                         ret = setup_subctxt(uctxt);
1245                         if (ret)
1246                                 goto done;
1247                 }
1248                 /* Setup Expected Rcv memories */
1249                 uctxt->tid_pg_list = vzalloc(uctxt->expected_count *
1250                                              sizeof(struct page **));
1251                 if (!uctxt->tid_pg_list) {
1252                         ret = -ENOMEM;
1253                         goto done;
1254                 }
1255                 uctxt->physshadow = vzalloc(uctxt->expected_count *
1256                                             sizeof(*uctxt->physshadow));
1257                 if (!uctxt->physshadow) {
1258                         ret = -ENOMEM;
1259                         goto done;
1260                 }
1261                 /* allocate expected TID map and initialize the cursor */
1262                 atomic_set(&uctxt->tidcursor, 0);
1263                 uctxt->numtidgroups = uctxt->expected_count /
1264                         dd->rcv_entries.group_size;
1265                 uctxt->tidmapcnt = uctxt->numtidgroups / BITS_PER_LONG +
1266                         !!(uctxt->numtidgroups % BITS_PER_LONG);
1267                 uctxt->tidusemap = kzalloc_node(uctxt->tidmapcnt *
1268                                                 sizeof(*uctxt->tidusemap),
1269                                                 GFP_KERNEL, uctxt->numa_id);
1270                 if (!uctxt->tidusemap) {
1271                         ret = -ENOMEM;
1272                         goto done;
1273                 }
1274                 /*
1275                  * In case that the number of groups is not a multiple of
1276                  * 64 (the number of groups in a tidusemap element), mark
1277                  * the extra ones as used. This will effectively make them
1278                  * permanently used and should never be assigned. Otherwise,
1279                  * the code which checks how many free groups we have will
1280                  * get completely confused about the state of the bits.
1281                  */
1282                 if (uctxt->numtidgroups % BITS_PER_LONG)
1283                         uctxt->tidusemap[uctxt->tidmapcnt - 1] =
1284                                 ~((1ULL << (uctxt->numtidgroups %
1285                                             BITS_PER_LONG)) - 1);
1286                 trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 0,
1287                                        uctxt->tidusemap, uctxt->tidmapcnt);
1288         }
1289         ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
1290         if (ret)
1291                 goto done;
1292
1293         set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
1294 done:
1295         return ret;
1296 }
1297
1298 static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
1299 {
1300         struct hfi1_base_info binfo;
1301         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1302         struct hfi1_devdata *dd = uctxt->dd;
1303         ssize_t sz;
1304         unsigned offset;
1305         int ret = 0;
1306
1307         trace_hfi1_uctxtdata(uctxt->dd, uctxt);
1308
1309         memset(&binfo, 0, sizeof(binfo));
1310         binfo.hw_version = dd->revision;
1311         binfo.sw_version = HFI1_KERN_SWVERSION;
1312         binfo.bthqp = kdeth_qp;
1313         binfo.jkey = uctxt->jkey;
1314         /*
1315          * If more than 64 contexts are enabled the allocated credit
1316          * return will span two or three contiguous pages. Since we only
1317          * map the page containing the context's credit return address,
1318          * we need to calculate the offset in the proper page.
1319          */
1320         offset = ((u64)uctxt->sc->hw_free -
1321                   (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1322         binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
1323                                                subctxt_fp(fp), offset);
1324         binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
1325                                             subctxt_fp(fp),
1326                                             uctxt->sc->base_addr);
1327         binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
1328                                                 uctxt->ctxt,
1329                                                 subctxt_fp(fp),
1330                                                 uctxt->sc->base_addr);
1331         binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
1332                                                subctxt_fp(fp),
1333                                                uctxt->rcvhdrq);
1334         binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
1335                                                subctxt_fp(fp),
1336                                                uctxt->egrbufs.rcvtids[0].phys);
1337         binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
1338                                                  subctxt_fp(fp), 0);
1339         /*
1340          * user regs are at
1341          * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1342          */
1343         binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
1344                                             subctxt_fp(fp), 0);
1345         offset = offset_in_page((((uctxt->ctxt - dd->first_user_ctxt) *
1346                     HFI1_MAX_SHARED_CTXTS) + subctxt_fp(fp)) *
1347                   sizeof(*dd->events));
1348         binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
1349                                               subctxt_fp(fp),
1350                                               offset);
1351         binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
1352                                               subctxt_fp(fp),
1353                                               dd->status);
1354         if (HFI1_CAP_IS_USET(DMA_RTAIL))
1355                 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
1356                                                        subctxt_fp(fp), 0);
1357         if (uctxt->subctxt_cnt) {
1358                 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
1359                                                         uctxt->ctxt,
1360                                                         subctxt_fp(fp), 0);
1361                 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
1362                                                          uctxt->ctxt,
1363                                                          subctxt_fp(fp), 0);
1364                 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
1365                                                          uctxt->ctxt,
1366                                                          subctxt_fp(fp), 0);
1367         }
1368         sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
1369         if (copy_to_user(ubase, &binfo, sz))
1370                 ret = -EFAULT;
1371         return ret;
1372 }
1373
1374 static unsigned int poll_urgent(struct file *fp,
1375                                 struct poll_table_struct *pt)
1376 {
1377         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1378         struct hfi1_devdata *dd = uctxt->dd;
1379         unsigned pollflag;
1380
1381         poll_wait(fp, &uctxt->wait, pt);
1382
1383         spin_lock_irq(&dd->uctxt_lock);
1384         if (uctxt->urgent != uctxt->urgent_poll) {
1385                 pollflag = POLLIN | POLLRDNORM;
1386                 uctxt->urgent_poll = uctxt->urgent;
1387         } else {
1388                 pollflag = 0;
1389                 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1390         }
1391         spin_unlock_irq(&dd->uctxt_lock);
1392
1393         return pollflag;
1394 }
1395
1396 static unsigned int poll_next(struct file *fp,
1397                               struct poll_table_struct *pt)
1398 {
1399         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1400         struct hfi1_devdata *dd = uctxt->dd;
1401         unsigned pollflag;
1402
1403         poll_wait(fp, &uctxt->wait, pt);
1404
1405         spin_lock_irq(&dd->uctxt_lock);
1406         if (hdrqempty(uctxt)) {
1407                 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
1408                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt);
1409                 pollflag = 0;
1410         } else
1411                 pollflag = POLLIN | POLLRDNORM;
1412         spin_unlock_irq(&dd->uctxt_lock);
1413
1414         return pollflag;
1415 }
1416
1417 /*
1418  * Find all user contexts in use, and set the specified bit in their
1419  * event mask.
1420  * See also find_ctxt() for a similar use, that is specific to send buffers.
1421  */
1422 int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
1423 {
1424         struct hfi1_ctxtdata *uctxt;
1425         struct hfi1_devdata *dd = ppd->dd;
1426         unsigned ctxt;
1427         int ret = 0;
1428         unsigned long flags;
1429
1430         if (!dd->events) {
1431                 ret = -EINVAL;
1432                 goto done;
1433         }
1434
1435         spin_lock_irqsave(&dd->uctxt_lock, flags);
1436         for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts;
1437              ctxt++) {
1438                 uctxt = dd->rcd[ctxt];
1439                 if (uctxt) {
1440                         unsigned long *evs = dd->events +
1441                                 (uctxt->ctxt - dd->first_user_ctxt) *
1442                                 HFI1_MAX_SHARED_CTXTS;
1443                         int i;
1444                         /*
1445                          * subctxt_cnt is 0 if not shared, so do base
1446                          * separately, first, then remaining subctxt, if any
1447                          */
1448                         set_bit(evtbit, evs);
1449                         for (i = 1; i < uctxt->subctxt_cnt; i++)
1450                                 set_bit(evtbit, evs + i);
1451                 }
1452         }
1453         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1454 done:
1455         return ret;
1456 }
1457
1458 /**
1459  * manage_rcvq - manage a context's receive queue
1460  * @uctxt: the context
1461  * @subctxt: the sub-context
1462  * @start_stop: action to carry out
1463  *
1464  * start_stop == 0 disables receive on the context, for use in queue
1465  * overflow conditions.  start_stop==1 re-enables, to be used to
1466  * re-init the software copy of the head register
1467  */
1468 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1469                        int start_stop)
1470 {
1471         struct hfi1_devdata *dd = uctxt->dd;
1472         unsigned int rcvctrl_op;
1473
1474         if (subctxt)
1475                 goto bail;
1476         /* atomically clear receive enable ctxt. */
1477         if (start_stop) {
1478                 /*
1479                  * On enable, force in-memory copy of the tail register to
1480                  * 0, so that protocol code doesn't have to worry about
1481                  * whether or not the chip has yet updated the in-memory
1482                  * copy or not on return from the system call. The chip
1483                  * always resets it's tail register back to 0 on a
1484                  * transition from disabled to enabled.
1485                  */
1486                 if (uctxt->rcvhdrtail_kvaddr)
1487                         clear_rcvhdrtail(uctxt);
1488                 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
1489         } else
1490                 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
1491         hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt);
1492         /* always; new head should be equal to new tail; see above */
1493 bail:
1494         return 0;
1495 }
1496
1497 /*
1498  * clear the event notifier events for this context.
1499  * User process then performs actions appropriate to bit having been
1500  * set, if desired, and checks again in future.
1501  */
1502 static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
1503                           unsigned long events)
1504 {
1505         int i;
1506         struct hfi1_devdata *dd = uctxt->dd;
1507         unsigned long *evs;
1508
1509         if (!dd->events)
1510                 return 0;
1511
1512         evs = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
1513                             HFI1_MAX_SHARED_CTXTS) + subctxt;
1514
1515         for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
1516                 if (!test_bit(i, &events))
1517                         continue;
1518                 clear_bit(i, evs);
1519         }
1520         return 0;
1521 }
1522
1523 #define num_user_pages(vaddr, len)                                      \
1524         (1 + (((((unsigned long)(vaddr) +                               \
1525                  (unsigned long)(len) - 1) & PAGE_MASK) -               \
1526                ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
1527
1528 /**
1529  * tzcnt - count the number of trailing zeros in a 64bit value
1530  * @value: the value to be examined
1531  *
1532  * Returns the number of trailing least significant zeros in the
1533  * the input value. If the value is zero, return the number of
1534  * bits of the value.
1535  */
1536 static inline u8 tzcnt(u64 value)
1537 {
1538         return value ? __builtin_ctzl(value) : sizeof(value) * 8;
1539 }
1540
1541 static inline unsigned num_free_groups(unsigned long map, u16 *start)
1542 {
1543         unsigned free;
1544         u16 bitidx = *start;
1545
1546         if (bitidx >= BITS_PER_LONG)
1547                 return 0;
1548         /* "Turn off" any bits set before our bit index */
1549         map &= ~((1ULL << bitidx) - 1);
1550         free = tzcnt(map) - bitidx;
1551         while (!free && bitidx < BITS_PER_LONG) {
1552                 /* Zero out the last set bit so we look at the rest */
1553                 map &= ~(1ULL << bitidx);
1554                 /*
1555                  * Account for the previously checked bits and advance
1556                  * the bit index. We don't have to check for bitidx
1557                  * getting bigger than BITS_PER_LONG here as it would
1558                  * mean extra instructions that we don't need. If it
1559                  * did happen, it would push free to a negative value
1560                  * which will break the loop.
1561                  */
1562                 free = tzcnt(map) - ++bitidx;
1563         }
1564         *start = bitidx;
1565         return free;
1566 }
1567
1568 static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo)
1569 {
1570         int ret = 0;
1571         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1572         struct hfi1_devdata *dd = uctxt->dd;
1573         unsigned tid, mapped = 0, npages, ngroups, exp_groups,
1574                 tidpairs = uctxt->expected_count / 2;
1575         struct page **pages;
1576         unsigned long vaddr, tidmap[uctxt->tidmapcnt];
1577         dma_addr_t *phys;
1578         u32 tidlist[tidpairs], pairidx = 0, tidcursor;
1579         u16 useidx, idx, bitidx, tidcnt = 0;
1580
1581         vaddr = tinfo->vaddr;
1582
1583         if (offset_in_page(vaddr)) {
1584                 ret = -EINVAL;
1585                 goto bail;
1586         }
1587
1588         npages = num_user_pages(vaddr, tinfo->length);
1589         if (!npages) {
1590                 ret = -EINVAL;
1591                 goto bail;
1592         }
1593         if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
1594                        npages * PAGE_SIZE)) {
1595                 dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
1596                            (void *)vaddr, npages);
1597                 ret = -EFAULT;
1598                 goto bail;
1599         }
1600
1601         memset(tidmap, 0, sizeof(tidmap[0]) * uctxt->tidmapcnt);
1602         memset(tidlist, 0, sizeof(tidlist[0]) * tidpairs);
1603
1604         exp_groups = uctxt->expected_count / dd->rcv_entries.group_size;
1605         /* which group set do we look at first? */
1606         tidcursor = atomic_read(&uctxt->tidcursor);
1607         useidx = (tidcursor >> 16) & 0xffff;
1608         bitidx = tidcursor & 0xffff;
1609
1610         /*
1611          * Keep going until we've mapped all pages or we've exhausted all
1612          * RcvArray entries.
1613          * This iterates over the number of tidmaps + 1
1614          * (idx <= uctxt->tidmapcnt) so we check the bitmap which we
1615          * started from one more time for any free bits before the
1616          * starting point bit.
1617          */
1618         for (mapped = 0, idx = 0;
1619              mapped < npages && idx <= uctxt->tidmapcnt;) {
1620                 u64 i, offset = 0;
1621                 unsigned free, pinned, pmapped = 0, bits_used;
1622                 u16 grp;
1623
1624                 /*
1625                  * "Reserve" the needed group bits under lock so other
1626                  * processes can't step in the middle of it. Once
1627                  * reserved, we don't need the lock anymore since we
1628                  * are guaranteed the groups.
1629                  */
1630                 spin_lock(&uctxt->exp_lock);
1631                 if (uctxt->tidusemap[useidx] == -1ULL ||
1632                     bitidx >= BITS_PER_LONG) {
1633                         /* no free groups in the set, use the next */
1634                         useidx = (useidx + 1) % uctxt->tidmapcnt;
1635                         idx++;
1636                         bitidx = 0;
1637                         spin_unlock(&uctxt->exp_lock);
1638                         continue;
1639                 }
1640                 ngroups = ((npages - mapped) / dd->rcv_entries.group_size) +
1641                         !!((npages - mapped) % dd->rcv_entries.group_size);
1642
1643                 /*
1644                  * If we've gotten here, the current set of groups does have
1645                  * one or more free groups.
1646                  */
1647                 free = num_free_groups(uctxt->tidusemap[useidx], &bitidx);
1648                 if (!free) {
1649                         /*
1650                          * Despite the check above, free could still come back
1651                          * as 0 because we don't check the entire bitmap but
1652                          * we start from bitidx.
1653                          */
1654                         spin_unlock(&uctxt->exp_lock);
1655                         continue;
1656                 }
1657                 bits_used = min(free, ngroups);
1658                 tidmap[useidx] |= ((1ULL << bits_used) - 1) << bitidx;
1659                 uctxt->tidusemap[useidx] |= tidmap[useidx];
1660                 spin_unlock(&uctxt->exp_lock);
1661
1662                 /*
1663                  * At this point, we know where in the map we have free bits.
1664                  * properly offset into the various "shadow" arrays and compute
1665                  * the RcvArray entry index.
1666                  */
1667                 offset = ((useidx * BITS_PER_LONG) + bitidx) *
1668                         dd->rcv_entries.group_size;
1669                 pages = uctxt->tid_pg_list + offset;
1670                 phys = uctxt->physshadow + offset;
1671                 tid = uctxt->expected_base + offset;
1672
1673                 /* Calculate how many pages we can pin based on free bits */
1674                 pinned = min((bits_used * dd->rcv_entries.group_size),
1675                              (npages - mapped));
1676                 /*
1677                  * Now that we know how many free RcvArray entries we have,
1678                  * we can pin that many user pages.
1679                  */
1680                 ret = hfi1_get_user_pages(vaddr + (mapped * PAGE_SIZE),
1681                                           pinned, pages);
1682                 if (ret) {
1683                         /*
1684                          * We can't continue because the pages array won't be
1685                          * initialized. This should never happen,
1686                          * unless perhaps the user has mpin'ed the pages
1687                          * themselves.
1688                          */
1689                         dd_dev_info(dd,
1690                                     "Failed to lock addr %p, %u pages: errno %d\n",
1691                                     (void *) vaddr, pinned, -ret);
1692                         /*
1693                          * Let go of the bits that we reserved since we are not
1694                          * going to use them.
1695                          */
1696                         spin_lock(&uctxt->exp_lock);
1697                         uctxt->tidusemap[useidx] &=
1698                                 ~(((1ULL << bits_used) - 1) << bitidx);
1699                         spin_unlock(&uctxt->exp_lock);
1700                         goto done;
1701                 }
1702                 /*
1703                  * How many groups do we need based on how many pages we have
1704                  * pinned?
1705                  */
1706                 ngroups = (pinned / dd->rcv_entries.group_size) +
1707                         !!(pinned % dd->rcv_entries.group_size);
1708                 /*
1709                  * Keep programming RcvArray entries for all the <ngroups> free
1710                  * groups.
1711                  */
1712                 for (i = 0, grp = 0; grp < ngroups; i++, grp++) {
1713                         unsigned j;
1714                         u32 pair_size = 0, tidsize;
1715                         /*
1716                          * This inner loop will program an entire group or the
1717                          * array of pinned pages (which ever limit is hit
1718                          * first).
1719                          */
1720                         for (j = 0; j < dd->rcv_entries.group_size &&
1721                                      pmapped < pinned; j++, pmapped++, tid++) {
1722                                 tidsize = PAGE_SIZE;
1723                                 phys[pmapped] = hfi1_map_page(dd->pcidev,
1724                                                    pages[pmapped], 0,
1725                                                    tidsize, PCI_DMA_FROMDEVICE);
1726                                 trace_hfi1_exp_rcv_set(uctxt->ctxt,
1727                                                        subctxt_fp(fp),
1728                                                        tid, vaddr,
1729                                                        phys[pmapped],
1730                                                        pages[pmapped]);
1731                                 /*
1732                                  * Each RcvArray entry is programmed with one
1733                                  * page * worth of memory. This will handle
1734                                  * the 8K MTU as well as anything smaller
1735                                  * due to the fact that both entries in the
1736                                  * RcvTidPair are programmed with a page.
1737                                  * PSM currently does not handle anything
1738                                  * bigger than 8K MTU, so should we even worry
1739                                  * about 10K here?
1740                                  */
1741                                 hfi1_put_tid(dd, tid, PT_EXPECTED,
1742                                              phys[pmapped],
1743                                              ilog2(tidsize >> PAGE_SHIFT) + 1);
1744                                 pair_size += tidsize >> PAGE_SHIFT;
1745                                 EXP_TID_RESET(tidlist[pairidx], LEN, pair_size);
1746                                 if (!(tid % 2)) {
1747                                         tidlist[pairidx] |=
1748                                            EXP_TID_SET(IDX,
1749                                                 (tid - uctxt->expected_base)
1750                                                        / 2);
1751                                         tidlist[pairidx] |=
1752                                                 EXP_TID_SET(CTRL, 1);
1753                                         tidcnt++;
1754                                 } else {
1755                                         tidlist[pairidx] |=
1756                                                 EXP_TID_SET(CTRL, 2);
1757                                         pair_size = 0;
1758                                         pairidx++;
1759                                 }
1760                         }
1761                         /*
1762                          * We've programmed the entire group (or as much of the
1763                          * group as we'll use. Now, it's time to push it out...
1764                          */
1765                         flush_wc();
1766                 }
1767                 mapped += pinned;
1768                 atomic_set(&uctxt->tidcursor,
1769                            (((useidx & 0xffffff) << 16) |
1770                             ((bitidx + bits_used) & 0xffffff)));
1771         }
1772         trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 0, uctxt->tidusemap,
1773                                uctxt->tidmapcnt);
1774
1775 done:
1776         /* If we've mapped anything, copy relevant info to user */
1777         if (mapped) {
1778                 if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist,
1779                                  tidlist, sizeof(tidlist[0]) * tidcnt)) {
1780                         ret = -EFAULT;
1781                         goto done;
1782                 }
1783                 /* copy TID info to user */
1784                 if (copy_to_user((void __user *)(unsigned long)tinfo->tidmap,
1785                                  tidmap, sizeof(tidmap[0]) * uctxt->tidmapcnt))
1786                         ret = -EFAULT;
1787         }
1788 bail:
1789         /*
1790          * Calculate mapped length. New Exp TID protocol does not "unwind" and
1791          * report an error if it can't map the entire buffer. It just reports
1792          * the length that was mapped.
1793          */
1794         tinfo->length = mapped * PAGE_SIZE;
1795         tinfo->tidcnt = tidcnt;
1796         return ret;
1797 }
1798
1799 static int exp_tid_free(struct file *fp, struct hfi1_tid_info *tinfo)
1800 {
1801         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1802         struct hfi1_devdata *dd = uctxt->dd;
1803         unsigned long tidmap[uctxt->tidmapcnt];
1804         struct page **pages;
1805         dma_addr_t *phys;
1806         u16 idx, bitidx, tid;
1807         int ret = 0;
1808
1809         if (copy_from_user(&tidmap, (void __user *)(unsigned long)
1810                            tinfo->tidmap,
1811                            sizeof(tidmap[0]) * uctxt->tidmapcnt)) {
1812                 ret = -EFAULT;
1813                 goto done;
1814         }
1815         for (idx = 0; idx < uctxt->tidmapcnt; idx++) {
1816                 unsigned long map;
1817
1818                 bitidx = 0;
1819                 if (!tidmap[idx])
1820                         continue;
1821                 map = tidmap[idx];
1822                 while ((bitidx = tzcnt(map)) < BITS_PER_LONG) {
1823                         int i, pcount = 0;
1824                         struct page *pshadow[dd->rcv_entries.group_size];
1825                         unsigned offset = ((idx * BITS_PER_LONG) + bitidx) *
1826                                 dd->rcv_entries.group_size;
1827
1828                         pages = uctxt->tid_pg_list + offset;
1829                         phys = uctxt->physshadow + offset;
1830                         tid = uctxt->expected_base + offset;
1831                         for (i = 0; i < dd->rcv_entries.group_size;
1832                              i++, tid++) {
1833                                 if (pages[i]) {
1834                                         hfi1_put_tid(dd, tid, PT_INVALID,
1835                                                       0, 0);
1836                                         trace_hfi1_exp_rcv_free(uctxt->ctxt,
1837                                                                 subctxt_fp(fp),
1838                                                                 tid, phys[i],
1839                                                                 pages[i]);
1840                                         pci_unmap_page(dd->pcidev, phys[i],
1841                                               PAGE_SIZE, PCI_DMA_FROMDEVICE);
1842                                         pshadow[pcount] = pages[i];
1843                                         pages[i] = NULL;
1844                                         pcount++;
1845                                         phys[i] = 0;
1846                                 }
1847                         }
1848                         flush_wc();
1849                         hfi1_release_user_pages(pshadow, pcount);
1850                         clear_bit(bitidx, &uctxt->tidusemap[idx]);
1851                         map &= ~(1ULL<<bitidx);
1852                 }
1853         }
1854         trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 1, uctxt->tidusemap,
1855                                uctxt->tidmapcnt);
1856 done:
1857         return ret;
1858 }
1859
1860 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt)
1861 {
1862         struct hfi1_devdata *dd = uctxt->dd;
1863         unsigned tid;
1864
1865         dd_dev_info(dd, "ctxt %u unlocking any locked expTID pages\n",
1866                     uctxt->ctxt);
1867         for (tid = 0; tid < uctxt->expected_count; tid++) {
1868                 struct page *p = uctxt->tid_pg_list[tid];
1869                 dma_addr_t phys;
1870
1871                 if (!p)
1872                         continue;
1873
1874                 phys = uctxt->physshadow[tid];
1875                 uctxt->physshadow[tid] = 0;
1876                 uctxt->tid_pg_list[tid] = NULL;
1877                 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
1878                 hfi1_release_user_pages(&p, 1);
1879         }
1880 }
1881
1882 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1883                          u16 pkey)
1884 {
1885         int ret = -ENOENT, i, intable = 0;
1886         struct hfi1_pportdata *ppd = uctxt->ppd;
1887         struct hfi1_devdata *dd = uctxt->dd;
1888
1889         if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
1890                 ret = -EINVAL;
1891                 goto done;
1892         }
1893
1894         for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
1895                 if (pkey == ppd->pkeys[i]) {
1896                         intable = 1;
1897                         break;
1898                 }
1899
1900         if (intable)
1901                 ret = hfi1_set_ctxt_pkey(dd, uctxt->ctxt, pkey);
1902 done:
1903         return ret;
1904 }
1905
1906 static int ui_open(struct inode *inode, struct file *filp)
1907 {
1908         struct hfi1_devdata *dd;
1909
1910         dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
1911         filp->private_data = dd; /* for other methods */
1912         return 0;
1913 }
1914
1915 static int ui_release(struct inode *inode, struct file *filp)
1916 {
1917         /* nothing to do */
1918         return 0;
1919 }
1920
1921 static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
1922 {
1923         struct hfi1_devdata *dd = filp->private_data;
1924
1925         switch (whence) {
1926         case SEEK_SET:
1927                 break;
1928         case SEEK_CUR:
1929                 offset += filp->f_pos;
1930                 break;
1931         case SEEK_END:
1932                 offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
1933                         offset;
1934                 break;
1935         default:
1936                 return -EINVAL;
1937         }
1938
1939         if (offset < 0)
1940                 return -EINVAL;
1941
1942         if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
1943                 return -EINVAL;
1944
1945         filp->f_pos = offset;
1946
1947         return filp->f_pos;
1948 }
1949
1950
1951 /* NOTE: assumes unsigned long is 8 bytes */
1952 static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
1953                         loff_t *f_pos)
1954 {
1955         struct hfi1_devdata *dd = filp->private_data;
1956         void __iomem *base = dd->kregbase;
1957         unsigned long total, csr_off,
1958                 barlen = (dd->kregend - dd->kregbase);
1959         u64 data;
1960
1961         /* only read 8 byte quantities */
1962         if ((count % 8) != 0)
1963                 return -EINVAL;
1964         /* offset must be 8-byte aligned */
1965         if ((*f_pos % 8) != 0)
1966                 return -EINVAL;
1967         /* destination buffer must be 8-byte aligned */
1968         if ((unsigned long)buf % 8 != 0)
1969                 return -EINVAL;
1970         /* must be in range */
1971         if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
1972                 return -EINVAL;
1973         /* only set the base if we are not starting past the BAR */
1974         if (*f_pos < barlen)
1975                 base += *f_pos;
1976         csr_off = *f_pos;
1977         for (total = 0; total < count; total += 8, csr_off += 8) {
1978                 /* accessing LCB CSRs requires more checks */
1979                 if (is_lcb_offset(csr_off)) {
1980                         if (read_lcb_csr(dd, csr_off, (u64 *)&data))
1981                                 break; /* failed */
1982                 }
1983                 /*
1984                  * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
1985                  * false parity error.  Avoid the whole issue by not reading
1986                  * them.  These registers are defined as having a read value
1987                  * of 0.
1988                  */
1989                 else if (csr_off == ASIC_GPIO_CLEAR
1990                                 || csr_off == ASIC_GPIO_FORCE
1991                                 || csr_off == ASIC_QSFP1_CLEAR
1992                                 || csr_off == ASIC_QSFP1_FORCE
1993                                 || csr_off == ASIC_QSFP2_CLEAR
1994                                 || csr_off == ASIC_QSFP2_FORCE)
1995                         data = 0;
1996                 else if (csr_off >= barlen) {
1997                         /*
1998                          * read_8051_data can read more than just 8 bytes at
1999                          * a time. However, folding this into the loop and
2000                          * handling the reads in 8 byte increments allows us
2001                          * to smoothly transition from chip memory to 8051
2002                          * memory.
2003                          */
2004                         if (read_8051_data(dd,
2005                                            (u32)(csr_off - barlen),
2006                                            sizeof(data), &data))
2007                                 break; /* failed */
2008                 } else
2009                         data = readq(base + total);
2010                 if (put_user(data, (unsigned long __user *)(buf + total)))
2011                         break;
2012         }
2013         *f_pos += total;
2014         return total;
2015 }
2016
2017 /* NOTE: assumes unsigned long is 8 bytes */
2018 static ssize_t ui_write(struct file *filp, const char __user *buf,
2019                         size_t count, loff_t *f_pos)
2020 {
2021         struct hfi1_devdata *dd = filp->private_data;
2022         void __iomem *base;
2023         unsigned long total, data, csr_off;
2024         int in_lcb;
2025
2026         /* only write 8 byte quantities */
2027         if ((count % 8) != 0)
2028                 return -EINVAL;
2029         /* offset must be 8-byte aligned */
2030         if ((*f_pos % 8) != 0)
2031                 return -EINVAL;
2032         /* source buffer must be 8-byte aligned */
2033         if ((unsigned long)buf % 8 != 0)
2034                 return -EINVAL;
2035         /* must be in range */
2036         if (*f_pos + count > dd->kregend - dd->kregbase)
2037                 return -EINVAL;
2038
2039         base = (void __iomem *)dd->kregbase + *f_pos;
2040         csr_off = *f_pos;
2041         in_lcb = 0;
2042         for (total = 0; total < count; total += 8, csr_off += 8) {
2043                 if (get_user(data, (unsigned long __user *)(buf + total)))
2044                         break;
2045                 /* accessing LCB CSRs requires a special procedure */
2046                 if (is_lcb_offset(csr_off)) {
2047                         if (!in_lcb) {
2048                                 int ret = acquire_lcb_access(dd, 1);
2049
2050                                 if (ret)
2051                                         break;
2052                                 in_lcb = 1;
2053                         }
2054                 } else {
2055                         if (in_lcb) {
2056                                 release_lcb_access(dd, 1);
2057                                 in_lcb = 0;
2058                         }
2059                 }
2060                 writeq(data, base + total);
2061         }
2062         if (in_lcb)
2063                 release_lcb_access(dd, 1);
2064         *f_pos += total;
2065         return total;
2066 }
2067
2068 static const struct file_operations ui_file_ops = {
2069         .owner = THIS_MODULE,
2070         .llseek = ui_lseek,
2071         .read = ui_read,
2072         .write = ui_write,
2073         .open = ui_open,
2074         .release = ui_release,
2075 };
2076
2077 #define UI_OFFSET 192   /* device minor offset for UI devices */
2078 static int create_ui = 1;
2079
2080 static struct cdev wildcard_cdev;
2081 static struct device *wildcard_device;
2082
2083 static atomic_t user_count = ATOMIC_INIT(0);
2084
2085 static void user_remove(struct hfi1_devdata *dd)
2086 {
2087         if (atomic_dec_return(&user_count) == 0)
2088                 hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2089
2090         hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2091         hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
2092 }
2093
2094 static int user_add(struct hfi1_devdata *dd)
2095 {
2096         char name[10];
2097         int ret;
2098
2099         if (atomic_inc_return(&user_count) == 1) {
2100                 ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
2101                                      &wildcard_cdev, &wildcard_device,
2102                                      true);
2103                 if (ret)
2104                         goto done;
2105         }
2106
2107         snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
2108         ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
2109                              &dd->user_cdev, &dd->user_device,
2110                              true);
2111         if (ret)
2112                 goto done;
2113
2114         if (create_ui) {
2115                 snprintf(name, sizeof(name),
2116                          "%s_ui%d", class_name(), dd->unit);
2117                 ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
2118                                      &dd->ui_cdev, &dd->ui_device,
2119                                      false);
2120                 if (ret)
2121                         goto done;
2122         }
2123
2124         return 0;
2125 done:
2126         user_remove(dd);
2127         return ret;
2128 }
2129
2130 /*
2131  * Create per-unit files in /dev
2132  */
2133 int hfi1_device_create(struct hfi1_devdata *dd)
2134 {
2135         int r, ret;
2136
2137         r = user_add(dd);
2138         ret = hfi1_diag_add(dd);
2139         if (r && !ret)
2140                 ret = r;
2141         return ret;
2142 }
2143
2144 /*
2145  * Remove per-unit files in /dev
2146  * void, core kernel returns no errors for this stuff
2147  */
2148 void hfi1_device_remove(struct hfi1_devdata *dd)
2149 {
2150         user_remove(dd);
2151         hfi1_diag_remove(dd);
2152 }