These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / staging / rdma / hfi1 / file_ops.c
1 /*
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2015 Intel Corporation.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * BSD LICENSE
20  *
21  * Copyright(c) 2015 Intel Corporation.
22  *
23  * Redistribution and use in source and binary forms, with or without
24  * modification, are permitted provided that the following conditions
25  * are met:
26  *
27  *  - Redistributions of source code must retain the above copyright
28  *    notice, this list of conditions and the following disclaimer.
29  *  - Redistributions in binary form must reproduce the above copyright
30  *    notice, this list of conditions and the following disclaimer in
31  *    the documentation and/or other materials provided with the
32  *    distribution.
33  *  - Neither the name of Intel Corporation nor the names of its
34  *    contributors may be used to endorse or promote products derived
35  *    from this software without specific prior written permission.
36  *
37  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48  *
49  */
50 #include <linux/pci.h>
51 #include <linux/poll.h>
52 #include <linux/cdev.h>
53 #include <linux/swap.h>
54 #include <linux/vmalloc.h>
55 #include <linux/highmem.h>
56 #include <linux/io.h>
57 #include <linux/jiffies.h>
58 #include <asm/pgtable.h>
59 #include <linux/delay.h>
60 #include <linux/export.h>
61 #include <linux/module.h>
62 #include <linux/cred.h>
63 #include <linux/uio.h>
64
65 #include "hfi.h"
66 #include "pio.h"
67 #include "device.h"
68 #include "common.h"
69 #include "trace.h"
70 #include "user_sdma.h"
71 #include "eprom.h"
72
73 #undef pr_fmt
74 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
75
76 #define SEND_CTXT_HALT_TIMEOUT 1000 /* msecs */
77
78 /*
79  * File operation functions
80  */
81 static int hfi1_file_open(struct inode *, struct file *);
82 static int hfi1_file_close(struct inode *, struct file *);
83 static ssize_t hfi1_file_write(struct file *, const char __user *,
84                                size_t, loff_t *);
85 static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
86 static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
87 static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
88
89 static u64 kvirt_to_phys(void *);
90 static int assign_ctxt(struct file *, struct hfi1_user_info *);
91 static int init_subctxts(struct hfi1_ctxtdata *, const struct hfi1_user_info *);
92 static int user_init(struct file *);
93 static int get_ctxt_info(struct file *, void __user *, __u32);
94 static int get_base_info(struct file *, void __user *, __u32);
95 static int setup_ctxt(struct file *);
96 static int setup_subctxt(struct hfi1_ctxtdata *);
97 static int get_user_context(struct file *, struct hfi1_user_info *,
98                             int, unsigned);
99 static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
100 static int allocate_ctxt(struct file *, struct hfi1_devdata *,
101                          struct hfi1_user_info *);
102 static unsigned int poll_urgent(struct file *, struct poll_table_struct *);
103 static unsigned int poll_next(struct file *, struct poll_table_struct *);
104 static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
105 static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
106 static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
107 static int vma_fault(struct vm_area_struct *, struct vm_fault *);
108 static int exp_tid_setup(struct file *, struct hfi1_tid_info *);
109 static int exp_tid_free(struct file *, struct hfi1_tid_info *);
110 static void unlock_exp_tids(struct hfi1_ctxtdata *);
111
112 static const struct file_operations hfi1_file_ops = {
113         .owner = THIS_MODULE,
114         .write = hfi1_file_write,
115         .write_iter = hfi1_write_iter,
116         .open = hfi1_file_open,
117         .release = hfi1_file_close,
118         .poll = hfi1_poll,
119         .mmap = hfi1_file_mmap,
120         .llseek = noop_llseek,
121 };
122
123 static struct vm_operations_struct vm_ops = {
124         .fault = vma_fault,
125 };
126
127 /*
128  * Types of memories mapped into user processes' space
129  */
130 enum mmap_types {
131         PIO_BUFS = 1,
132         PIO_BUFS_SOP,
133         PIO_CRED,
134         RCV_HDRQ,
135         RCV_EGRBUF,
136         UREGS,
137         EVENTS,
138         STATUS,
139         RTAIL,
140         SUBCTXT_UREGS,
141         SUBCTXT_RCV_HDRQ,
142         SUBCTXT_EGRBUF,
143         SDMA_COMP
144 };
145
146 /*
147  * Masks and offsets defining the mmap tokens
148  */
149 #define HFI1_MMAP_OFFSET_MASK   0xfffULL
150 #define HFI1_MMAP_OFFSET_SHIFT  0
151 #define HFI1_MMAP_SUBCTXT_MASK  0xfULL
152 #define HFI1_MMAP_SUBCTXT_SHIFT 12
153 #define HFI1_MMAP_CTXT_MASK     0xffULL
154 #define HFI1_MMAP_CTXT_SHIFT    16
155 #define HFI1_MMAP_TYPE_MASK     0xfULL
156 #define HFI1_MMAP_TYPE_SHIFT    24
157 #define HFI1_MMAP_MAGIC_MASK    0xffffffffULL
158 #define HFI1_MMAP_MAGIC_SHIFT   32
159
160 #define HFI1_MMAP_MAGIC         0xdabbad00
161
162 #define HFI1_MMAP_TOKEN_SET(field, val) \
163         (((val) & HFI1_MMAP_##field##_MASK) << HFI1_MMAP_##field##_SHIFT)
164 #define HFI1_MMAP_TOKEN_GET(field, token) \
165         (((token) >> HFI1_MMAP_##field##_SHIFT) & HFI1_MMAP_##field##_MASK)
166 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr)   \
167         (HFI1_MMAP_TOKEN_SET(MAGIC, HFI1_MMAP_MAGIC) | \
168         HFI1_MMAP_TOKEN_SET(TYPE, type) | \
169         HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \
170         HFI1_MMAP_TOKEN_SET(SUBCTXT, subctxt) | \
171         HFI1_MMAP_TOKEN_SET(OFFSET, (offset_in_page(addr))))
172
173 #define EXP_TID_SET(field, value)                       \
174         (((value) & EXP_TID_TID##field##_MASK) <<       \
175          EXP_TID_TID##field##_SHIFT)
176 #define EXP_TID_CLEAR(tid, field) {                                     \
177                 (tid) &= ~(EXP_TID_TID##field##_MASK <<                 \
178                            EXP_TID_TID##field##_SHIFT);                 \
179                         }
180 #define EXP_TID_RESET(tid, field, value) do {                           \
181                 EXP_TID_CLEAR(tid, field);                              \
182                 (tid) |= EXP_TID_SET(field, value);                     \
183         } while (0)
184
185 #define dbg(fmt, ...)                           \
186         pr_info(fmt, ##__VA_ARGS__)
187
188
189 static inline int is_valid_mmap(u64 token)
190 {
191         return (HFI1_MMAP_TOKEN_GET(MAGIC, token) == HFI1_MMAP_MAGIC);
192 }
193
194 static int hfi1_file_open(struct inode *inode, struct file *fp)
195 {
196         /* The real work is performed later in assign_ctxt() */
197         fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
198         if (fp->private_data) /* no cpu affinity by default */
199                 ((struct hfi1_filedata *)fp->private_data)->rec_cpu_num = -1;
200         return fp->private_data ? 0 : -ENOMEM;
201 }
202
203 static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
204                                size_t count, loff_t *offset)
205 {
206         const struct hfi1_cmd __user *ucmd;
207         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
208         struct hfi1_cmd cmd;
209         struct hfi1_user_info uinfo;
210         struct hfi1_tid_info tinfo;
211         ssize_t consumed = 0, copy = 0, ret = 0;
212         void *dest = NULL;
213         __u64 user_val = 0;
214         int uctxt_required = 1;
215         int must_be_root = 0;
216
217         if (count < sizeof(cmd)) {
218                 ret = -EINVAL;
219                 goto bail;
220         }
221
222         ucmd = (const struct hfi1_cmd __user *)data;
223         if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
224                 ret = -EFAULT;
225                 goto bail;
226         }
227
228         consumed = sizeof(cmd);
229
230         switch (cmd.type) {
231         case HFI1_CMD_ASSIGN_CTXT:
232                 uctxt_required = 0;     /* assigned user context not required */
233                 copy = sizeof(uinfo);
234                 dest = &uinfo;
235                 break;
236         case HFI1_CMD_SDMA_STATUS_UPD:
237         case HFI1_CMD_CREDIT_UPD:
238                 copy = 0;
239                 break;
240         case HFI1_CMD_TID_UPDATE:
241         case HFI1_CMD_TID_FREE:
242                 copy = sizeof(tinfo);
243                 dest = &tinfo;
244                 break;
245         case HFI1_CMD_USER_INFO:
246         case HFI1_CMD_RECV_CTRL:
247         case HFI1_CMD_POLL_TYPE:
248         case HFI1_CMD_ACK_EVENT:
249         case HFI1_CMD_CTXT_INFO:
250         case HFI1_CMD_SET_PKEY:
251         case HFI1_CMD_CTXT_RESET:
252                 copy = 0;
253                 user_val = cmd.addr;
254                 break;
255         case HFI1_CMD_EP_INFO:
256         case HFI1_CMD_EP_ERASE_CHIP:
257         case HFI1_CMD_EP_ERASE_P0:
258         case HFI1_CMD_EP_ERASE_P1:
259         case HFI1_CMD_EP_READ_P0:
260         case HFI1_CMD_EP_READ_P1:
261         case HFI1_CMD_EP_WRITE_P0:
262         case HFI1_CMD_EP_WRITE_P1:
263                 uctxt_required = 0;     /* assigned user context not required */
264                 must_be_root = 1;       /* validate user */
265                 copy = 0;
266                 break;
267         default:
268                 ret = -EINVAL;
269                 goto bail;
270         }
271
272         /* If the command comes with user data, copy it. */
273         if (copy) {
274                 if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
275                         ret = -EFAULT;
276                         goto bail;
277                 }
278                 consumed += copy;
279         }
280
281         /*
282          * Make sure there is a uctxt when needed.
283          */
284         if (uctxt_required && !uctxt) {
285                 ret = -EINVAL;
286                 goto bail;
287         }
288
289         /* only root can do these operations */
290         if (must_be_root && !capable(CAP_SYS_ADMIN)) {
291                 ret = -EPERM;
292                 goto bail;
293         }
294
295         switch (cmd.type) {
296         case HFI1_CMD_ASSIGN_CTXT:
297                 ret = assign_ctxt(fp, &uinfo);
298                 if (ret < 0)
299                         goto bail;
300                 ret = setup_ctxt(fp);
301                 if (ret)
302                         goto bail;
303                 ret = user_init(fp);
304                 break;
305         case HFI1_CMD_CTXT_INFO:
306                 ret = get_ctxt_info(fp, (void __user *)(unsigned long)
307                                     user_val, cmd.len);
308                 break;
309         case HFI1_CMD_USER_INFO:
310                 ret = get_base_info(fp, (void __user *)(unsigned long)
311                                     user_val, cmd.len);
312                 break;
313         case HFI1_CMD_SDMA_STATUS_UPD:
314                 break;
315         case HFI1_CMD_CREDIT_UPD:
316                 if (uctxt && uctxt->sc)
317                         sc_return_credits(uctxt->sc);
318                 break;
319         case HFI1_CMD_TID_UPDATE:
320                 ret = exp_tid_setup(fp, &tinfo);
321                 if (!ret) {
322                         unsigned long addr;
323                         /*
324                          * Copy the number of tidlist entries we used
325                          * and the length of the buffer we registered.
326                          * These fields are adjacent in the structure so
327                          * we can copy them at the same time.
328                          */
329                         addr = (unsigned long)cmd.addr +
330                                 offsetof(struct hfi1_tid_info, tidcnt);
331                         if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
332                                          sizeof(tinfo.tidcnt) +
333                                          sizeof(tinfo.length)))
334                                 ret = -EFAULT;
335                 }
336                 break;
337         case HFI1_CMD_TID_FREE:
338                 ret = exp_tid_free(fp, &tinfo);
339                 break;
340         case HFI1_CMD_RECV_CTRL:
341                 ret = manage_rcvq(uctxt, subctxt_fp(fp), (int)user_val);
342                 break;
343         case HFI1_CMD_POLL_TYPE:
344                 uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
345                 break;
346         case HFI1_CMD_ACK_EVENT:
347                 ret = user_event_ack(uctxt, subctxt_fp(fp), user_val);
348                 break;
349         case HFI1_CMD_SET_PKEY:
350                 if (HFI1_CAP_IS_USET(PKEY_CHECK))
351                         ret = set_ctxt_pkey(uctxt, subctxt_fp(fp), user_val);
352                 else
353                         ret = -EPERM;
354                 break;
355         case HFI1_CMD_CTXT_RESET: {
356                 struct send_context *sc;
357                 struct hfi1_devdata *dd;
358
359                 if (!uctxt || !uctxt->dd || !uctxt->sc) {
360                         ret = -EINVAL;
361                         break;
362                 }
363                 /*
364                  * There is no protection here. User level has to
365                  * guarantee that no one will be writing to the send
366                  * context while it is being re-initialized.
367                  * If user level breaks that guarantee, it will break
368                  * it's own context and no one else's.
369                  */
370                 dd = uctxt->dd;
371                 sc = uctxt->sc;
372                 /*
373                  * Wait until the interrupt handler has marked the
374                  * context as halted or frozen. Report error if we time
375                  * out.
376                  */
377                 wait_event_interruptible_timeout(
378                         sc->halt_wait, (sc->flags & SCF_HALTED),
379                         msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
380                 if (!(sc->flags & SCF_HALTED)) {
381                         ret = -ENOLCK;
382                         break;
383                 }
384                 /*
385                  * If the send context was halted due to a Freeze,
386                  * wait until the device has been "unfrozen" before
387                  * resetting the context.
388                  */
389                 if (sc->flags & SCF_FROZEN) {
390                         wait_event_interruptible_timeout(
391                                 dd->event_queue,
392                                 !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
393                                 msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
394                         if (dd->flags & HFI1_FROZEN) {
395                                 ret = -ENOLCK;
396                                 break;
397                         }
398                         if (dd->flags & HFI1_FORCED_FREEZE) {
399                                 /* Don't allow context reset if we are into
400                                  * forced freeze */
401                                 ret = -ENODEV;
402                                 break;
403                         }
404                         sc_disable(sc);
405                         ret = sc_enable(sc);
406                         hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
407                                      uctxt->ctxt);
408                 } else
409                         ret = sc_restart(sc);
410                 if (!ret)
411                         sc_return_credits(sc);
412                 break;
413         }
414         case HFI1_CMD_EP_INFO:
415         case HFI1_CMD_EP_ERASE_CHIP:
416         case HFI1_CMD_EP_ERASE_P0:
417         case HFI1_CMD_EP_ERASE_P1:
418         case HFI1_CMD_EP_READ_P0:
419         case HFI1_CMD_EP_READ_P1:
420         case HFI1_CMD_EP_WRITE_P0:
421         case HFI1_CMD_EP_WRITE_P1:
422                 ret = handle_eprom_command(&cmd);
423                 break;
424         }
425
426         if (ret >= 0)
427                 ret = consumed;
428 bail:
429         return ret;
430 }
431
432 static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
433 {
434         struct hfi1_user_sdma_pkt_q *pq;
435         struct hfi1_user_sdma_comp_q *cq;
436         int ret = 0, done = 0, reqs = 0;
437         unsigned long dim = from->nr_segs;
438
439         if (!user_sdma_comp_fp(kiocb->ki_filp) ||
440             !user_sdma_pkt_fp(kiocb->ki_filp)) {
441                 ret = -EIO;
442                 goto done;
443         }
444
445         if (!iter_is_iovec(from) || !dim) {
446                 ret = -EINVAL;
447                 goto done;
448         }
449
450         hfi1_cdbg(SDMA, "SDMA request from %u:%u (%lu)",
451                   ctxt_fp(kiocb->ki_filp)->ctxt, subctxt_fp(kiocb->ki_filp),
452                   dim);
453         pq = user_sdma_pkt_fp(kiocb->ki_filp);
454         cq = user_sdma_comp_fp(kiocb->ki_filp);
455
456         if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
457                 ret = -ENOSPC;
458                 goto done;
459         }
460
461         while (dim) {
462                 unsigned long count = 0;
463
464                 ret = hfi1_user_sdma_process_request(
465                         kiocb->ki_filp, (struct iovec *)(from->iov + done),
466                         dim, &count);
467                 if (ret)
468                         goto done;
469                 dim -= count;
470                 done += count;
471                 reqs++;
472         }
473 done:
474         return ret ? ret : reqs;
475 }
476
477 static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
478 {
479         struct hfi1_ctxtdata *uctxt;
480         struct hfi1_devdata *dd;
481         unsigned long flags, pfn;
482         u64 token = vma->vm_pgoff << PAGE_SHIFT,
483                 memaddr = 0;
484         u8 subctxt, mapio = 0, vmf = 0, type;
485         ssize_t memlen = 0;
486         int ret = 0;
487         u16 ctxt;
488
489         uctxt = ctxt_fp(fp);
490         if (!is_valid_mmap(token) || !uctxt ||
491             !(vma->vm_flags & VM_SHARED)) {
492                 ret = -EINVAL;
493                 goto done;
494         }
495         dd = uctxt->dd;
496         ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token);
497         subctxt = HFI1_MMAP_TOKEN_GET(SUBCTXT, token);
498         type = HFI1_MMAP_TOKEN_GET(TYPE, token);
499         if (ctxt != uctxt->ctxt || subctxt != subctxt_fp(fp)) {
500                 ret = -EINVAL;
501                 goto done;
502         }
503
504         flags = vma->vm_flags;
505
506         switch (type) {
507         case PIO_BUFS:
508         case PIO_BUFS_SOP:
509                 memaddr = ((dd->physaddr + TXE_PIO_SEND) +
510                                 /* chip pio base */
511                            (uctxt->sc->hw_context * BIT(16))) +
512                                 /* 64K PIO space / ctxt */
513                         (type == PIO_BUFS_SOP ?
514                                 (TXE_PIO_SIZE / 2) : 0); /* sop? */
515                 /*
516                  * Map only the amount allocated to the context, not the
517                  * entire available context's PIO space.
518                  */
519                 memlen = ALIGN(uctxt->sc->credits * PIO_BLOCK_SIZE,
520                                PAGE_SIZE);
521                 flags &= ~VM_MAYREAD;
522                 flags |= VM_DONTCOPY | VM_DONTEXPAND;
523                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
524                 mapio = 1;
525                 break;
526         case PIO_CRED:
527                 if (flags & VM_WRITE) {
528                         ret = -EPERM;
529                         goto done;
530                 }
531                 /*
532                  * The credit return location for this context could be on the
533                  * second or third page allocated for credit returns (if number
534                  * of enabled contexts > 64 and 128 respectively).
535                  */
536                 memaddr = dd->cr_base[uctxt->numa_id].pa +
537                         (((u64)uctxt->sc->hw_free -
538                           (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
539                 memlen = PAGE_SIZE;
540                 flags &= ~VM_MAYWRITE;
541                 flags |= VM_DONTCOPY | VM_DONTEXPAND;
542                 /*
543                  * The driver has already allocated memory for credit
544                  * returns and programmed it into the chip. Has that
545                  * memory been flagged as non-cached?
546                  */
547                 /* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
548                 mapio = 1;
549                 break;
550         case RCV_HDRQ:
551                 memaddr = uctxt->rcvhdrq_phys;
552                 memlen = uctxt->rcvhdrq_size;
553                 break;
554         case RCV_EGRBUF: {
555                 unsigned long addr;
556                 int i;
557                 /*
558                  * The RcvEgr buffer need to be handled differently
559                  * as multiple non-contiguous pages need to be mapped
560                  * into the user process.
561                  */
562                 memlen = uctxt->egrbufs.size;
563                 if ((vma->vm_end - vma->vm_start) != memlen) {
564                         dd_dev_err(dd, "Eager buffer map size invalid (%lu != %lu)\n",
565                                    (vma->vm_end - vma->vm_start), memlen);
566                         ret = -EINVAL;
567                         goto done;
568                 }
569                 if (vma->vm_flags & VM_WRITE) {
570                         ret = -EPERM;
571                         goto done;
572                 }
573                 vma->vm_flags &= ~VM_MAYWRITE;
574                 addr = vma->vm_start;
575                 for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
576                         ret = remap_pfn_range(
577                                 vma, addr,
578                                 uctxt->egrbufs.buffers[i].phys >> PAGE_SHIFT,
579                                 uctxt->egrbufs.buffers[i].len,
580                                 vma->vm_page_prot);
581                         if (ret < 0)
582                                 goto done;
583                         addr += uctxt->egrbufs.buffers[i].len;
584                 }
585                 ret = 0;
586                 goto done;
587         }
588         case UREGS:
589                 /*
590                  * Map only the page that contains this context's user
591                  * registers.
592                  */
593                 memaddr = (unsigned long)
594                         (dd->physaddr + RXE_PER_CONTEXT_USER)
595                         + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE);
596                 /*
597                  * TidFlow table is on the same page as the rest of the
598                  * user registers.
599                  */
600                 memlen = PAGE_SIZE;
601                 flags |= VM_DONTCOPY | VM_DONTEXPAND;
602                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
603                 mapio = 1;
604                 break;
605         case EVENTS:
606                 /*
607                  * Use the page where this context's flags are. User level
608                  * knows where it's own bitmap is within the page.
609                  */
610                 memaddr = (unsigned long)(dd->events +
611                                           ((uctxt->ctxt - dd->first_user_ctxt) *
612                                            HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
613                 memlen = PAGE_SIZE;
614                 /*
615                  * v3.7 removes VM_RESERVED but the effect is kept by
616                  * using VM_IO.
617                  */
618                 flags |= VM_IO | VM_DONTEXPAND;
619                 vmf = 1;
620                 break;
621         case STATUS:
622                 memaddr = kvirt_to_phys((void *)dd->status);
623                 memlen = PAGE_SIZE;
624                 flags |= VM_IO | VM_DONTEXPAND;
625                 break;
626         case RTAIL:
627                 if (!HFI1_CAP_IS_USET(DMA_RTAIL)) {
628                         /*
629                          * If the memory allocation failed, the context alloc
630                          * also would have failed, so we would never get here
631                          */
632                         ret = -EINVAL;
633                         goto done;
634                 }
635                 if (flags & VM_WRITE) {
636                         ret = -EPERM;
637                         goto done;
638                 }
639                 memaddr = uctxt->rcvhdrqtailaddr_phys;
640                 memlen = PAGE_SIZE;
641                 flags &= ~VM_MAYWRITE;
642                 break;
643         case SUBCTXT_UREGS:
644                 memaddr = (u64)uctxt->subctxt_uregbase;
645                 memlen = PAGE_SIZE;
646                 flags |= VM_IO | VM_DONTEXPAND;
647                 vmf = 1;
648                 break;
649         case SUBCTXT_RCV_HDRQ:
650                 memaddr = (u64)uctxt->subctxt_rcvhdr_base;
651                 memlen = uctxt->rcvhdrq_size * uctxt->subctxt_cnt;
652                 flags |= VM_IO | VM_DONTEXPAND;
653                 vmf = 1;
654                 break;
655         case SUBCTXT_EGRBUF:
656                 memaddr = (u64)uctxt->subctxt_rcvegrbuf;
657                 memlen = uctxt->egrbufs.size * uctxt->subctxt_cnt;
658                 flags |= VM_IO | VM_DONTEXPAND;
659                 flags &= ~VM_MAYWRITE;
660                 vmf = 1;
661                 break;
662         case SDMA_COMP: {
663                 struct hfi1_user_sdma_comp_q *cq;
664
665                 if (!user_sdma_comp_fp(fp)) {
666                         ret = -EFAULT;
667                         goto done;
668                 }
669                 cq = user_sdma_comp_fp(fp);
670                 memaddr = (u64)cq->comps;
671                 memlen = ALIGN(sizeof(*cq->comps) * cq->nentries, PAGE_SIZE);
672                 flags |= VM_IO | VM_DONTEXPAND;
673                 vmf = 1;
674                 break;
675         }
676         default:
677                 ret = -EINVAL;
678                 break;
679         }
680
681         if ((vma->vm_end - vma->vm_start) != memlen) {
682                 hfi1_cdbg(PROC, "%u:%u Memory size mismatch %lu:%lu",
683                           uctxt->ctxt, subctxt_fp(fp),
684                           (vma->vm_end - vma->vm_start), memlen);
685                 ret = -EINVAL;
686                 goto done;
687         }
688
689         vma->vm_flags = flags;
690         dd_dev_info(dd,
691                     "%s: %u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
692                     __func__, ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
693                     vma->vm_end - vma->vm_start, vma->vm_flags);
694         pfn = (unsigned long)(memaddr >> PAGE_SHIFT);
695         if (vmf) {
696                 vma->vm_pgoff = pfn;
697                 vma->vm_ops = &vm_ops;
698                 ret = 0;
699         } else if (mapio) {
700                 ret = io_remap_pfn_range(vma, vma->vm_start, pfn, memlen,
701                                          vma->vm_page_prot);
702         } else {
703                 ret = remap_pfn_range(vma, vma->vm_start, pfn, memlen,
704                                       vma->vm_page_prot);
705         }
706 done:
707         return ret;
708 }
709
710 /*
711  * Local (non-chip) user memory is not mapped right away but as it is
712  * accessed by the user-level code.
713  */
714 static int vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
715 {
716         struct page *page;
717
718         page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
719         if (!page)
720                 return VM_FAULT_SIGBUS;
721
722         get_page(page);
723         vmf->page = page;
724
725         return 0;
726 }
727
728 static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt)
729 {
730         struct hfi1_ctxtdata *uctxt;
731         unsigned pollflag;
732
733         uctxt = ctxt_fp(fp);
734         if (!uctxt)
735                 pollflag = POLLERR;
736         else if (uctxt->poll_type == HFI1_POLL_TYPE_URGENT)
737                 pollflag = poll_urgent(fp, pt);
738         else  if (uctxt->poll_type == HFI1_POLL_TYPE_ANYRCV)
739                 pollflag = poll_next(fp, pt);
740         else /* invalid */
741                 pollflag = POLLERR;
742
743         return pollflag;
744 }
745
746 static int hfi1_file_close(struct inode *inode, struct file *fp)
747 {
748         struct hfi1_filedata *fdata = fp->private_data;
749         struct hfi1_ctxtdata *uctxt = fdata->uctxt;
750         struct hfi1_devdata *dd;
751         unsigned long flags, *ev;
752
753         fp->private_data = NULL;
754
755         if (!uctxt)
756                 goto done;
757
758         hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
759         dd = uctxt->dd;
760         mutex_lock(&hfi1_mutex);
761
762         flush_wc();
763         /* drain user sdma queue */
764         if (fdata->pq)
765                 hfi1_user_sdma_free_queues(fdata);
766
767         /*
768          * Clear any left over, unhandled events so the next process that
769          * gets this context doesn't get confused.
770          */
771         ev = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
772                            HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
773         *ev = 0;
774
775         if (--uctxt->cnt) {
776                 uctxt->active_slaves &= ~(1 << fdata->subctxt);
777                 uctxt->subpid[fdata->subctxt] = 0;
778                 mutex_unlock(&hfi1_mutex);
779                 goto done;
780         }
781
782         spin_lock_irqsave(&dd->uctxt_lock, flags);
783         /*
784          * Disable receive context and interrupt available, reset all
785          * RcvCtxtCtrl bits to default values.
786          */
787         hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
788                      HFI1_RCVCTRL_TIDFLOW_DIS |
789                      HFI1_RCVCTRL_INTRAVAIL_DIS |
790                      HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
791                      HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
792                      HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt->ctxt);
793         /* Clear the context's J_KEY */
794         hfi1_clear_ctxt_jkey(dd, uctxt->ctxt);
795         /*
796          * Reset context integrity checks to default.
797          * (writes to CSRs probably belong in chip.c)
798          */
799         write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
800                         hfi1_pkt_default_send_ctxt_mask(dd, uctxt->sc->type));
801         sc_disable(uctxt->sc);
802         uctxt->pid = 0;
803         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
804
805         dd->rcd[uctxt->ctxt] = NULL;
806         uctxt->rcvwait_to = 0;
807         uctxt->piowait_to = 0;
808         uctxt->rcvnowait = 0;
809         uctxt->pionowait = 0;
810         uctxt->event_flags = 0;
811
812         hfi1_clear_tids(uctxt);
813         hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
814
815         if (uctxt->tid_pg_list)
816                 unlock_exp_tids(uctxt);
817
818         hfi1_stats.sps_ctxts--;
819         dd->freectxts++;
820         mutex_unlock(&hfi1_mutex);
821         hfi1_free_ctxtdata(dd, uctxt);
822 done:
823         kfree(fdata);
824         return 0;
825 }
826
827 /*
828  * Convert kernel *virtual* addresses to physical addresses.
829  * This is used to vmalloc'ed addresses.
830  */
831 static u64 kvirt_to_phys(void *addr)
832 {
833         struct page *page;
834         u64 paddr = 0;
835
836         page = vmalloc_to_page(addr);
837         if (page)
838                 paddr = page_to_pfn(page) << PAGE_SHIFT;
839
840         return paddr;
841 }
842
843 static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
844 {
845         int i_minor, ret = 0;
846         unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS;
847
848         swmajor = uinfo->userversion >> 16;
849         if (swmajor != HFI1_USER_SWMAJOR) {
850                 ret = -ENODEV;
851                 goto done;
852         }
853
854         swminor = uinfo->userversion & 0xffff;
855
856         if (uinfo->hfi1_alg < HFI1_ALG_COUNT)
857                 alg = uinfo->hfi1_alg;
858
859         mutex_lock(&hfi1_mutex);
860         /* First, lets check if we need to setup a shared context? */
861         if (uinfo->subctxt_cnt)
862                 ret = find_shared_ctxt(fp, uinfo);
863
864         /*
865          * We execute the following block if we couldn't find a
866          * shared context or if context sharing is not required.
867          */
868         if (!ret) {
869                 i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
870                 ret = get_user_context(fp, uinfo, i_minor - 1, alg);
871         }
872         mutex_unlock(&hfi1_mutex);
873 done:
874         return ret;
875 }
876
877 static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
878                             int devno, unsigned alg)
879 {
880         struct hfi1_devdata *dd = NULL;
881         int ret = 0, devmax, npresent, nup, dev;
882
883         devmax = hfi1_count_units(&npresent, &nup);
884         if (!npresent) {
885                 ret = -ENXIO;
886                 goto done;
887         }
888         if (!nup) {
889                 ret = -ENETDOWN;
890                 goto done;
891         }
892         if (devno >= 0) {
893                 dd = hfi1_lookup(devno);
894                 if (!dd)
895                         ret = -ENODEV;
896                 else if (!dd->freectxts)
897                         ret = -EBUSY;
898         } else {
899                 struct hfi1_devdata *pdd;
900
901                 if (alg == HFI1_ALG_ACROSS) {
902                         unsigned free = 0U;
903
904                         for (dev = 0; dev < devmax; dev++) {
905                                 pdd = hfi1_lookup(dev);
906                                 if (pdd && pdd->freectxts &&
907                                     pdd->freectxts > free) {
908                                         dd = pdd;
909                                         free = pdd->freectxts;
910                                 }
911                         }
912                 } else {
913                         for (dev = 0; dev < devmax; dev++) {
914                                 pdd = hfi1_lookup(dev);
915                                 if (pdd && pdd->freectxts) {
916                                         dd = pdd;
917                                         break;
918                                 }
919                         }
920                 }
921                 if (!dd)
922                         ret = -EBUSY;
923         }
924 done:
925         return ret ? ret : allocate_ctxt(fp, dd, uinfo);
926 }
927
928 static int find_shared_ctxt(struct file *fp,
929                             const struct hfi1_user_info *uinfo)
930 {
931         int devmax, ndev, i;
932         int ret = 0;
933
934         devmax = hfi1_count_units(NULL, NULL);
935
936         for (ndev = 0; ndev < devmax; ndev++) {
937                 struct hfi1_devdata *dd = hfi1_lookup(ndev);
938
939                 /* device portion of usable() */
940                 if (!(dd && (dd->flags & HFI1_PRESENT) && dd->kregbase))
941                         continue;
942                 for (i = dd->first_user_ctxt; i < dd->num_rcv_contexts; i++) {
943                         struct hfi1_ctxtdata *uctxt = dd->rcd[i];
944
945                         /* Skip ctxts which are not yet open */
946                         if (!uctxt || !uctxt->cnt)
947                                 continue;
948                         /* Skip ctxt if it doesn't match the requested one */
949                         if (memcmp(uctxt->uuid, uinfo->uuid,
950                                    sizeof(uctxt->uuid)) ||
951                             uctxt->jkey != generate_jkey(current_uid()) ||
952                             uctxt->subctxt_id != uinfo->subctxt_id ||
953                             uctxt->subctxt_cnt != uinfo->subctxt_cnt)
954                                 continue;
955
956                         /* Verify the sharing process matches the master */
957                         if (uctxt->userversion != uinfo->userversion ||
958                             uctxt->cnt >= uctxt->subctxt_cnt) {
959                                 ret = -EINVAL;
960                                 goto done;
961                         }
962                         ctxt_fp(fp) = uctxt;
963                         subctxt_fp(fp) = uctxt->cnt++;
964                         uctxt->subpid[subctxt_fp(fp)] = current->pid;
965                         uctxt->active_slaves |= 1 << subctxt_fp(fp);
966                         ret = 1;
967                         goto done;
968                 }
969         }
970
971 done:
972         return ret;
973 }
974
975 static int allocate_ctxt(struct file *fp, struct hfi1_devdata *dd,
976                          struct hfi1_user_info *uinfo)
977 {
978         struct hfi1_ctxtdata *uctxt;
979         unsigned ctxt;
980         int ret;
981
982         if (dd->flags & HFI1_FROZEN) {
983                 /*
984                  * Pick an error that is unique from all other errors
985                  * that are returned so the user process knows that
986                  * it tried to allocate while the SPC was frozen.  It
987                  * it should be able to retry with success in a short
988                  * while.
989                  */
990                 return -EIO;
991         }
992
993         for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts; ctxt++)
994                 if (!dd->rcd[ctxt])
995                         break;
996
997         if (ctxt == dd->num_rcv_contexts)
998                 return -EBUSY;
999
1000         uctxt = hfi1_create_ctxtdata(dd->pport, ctxt);
1001         if (!uctxt) {
1002                 dd_dev_err(dd,
1003                            "Unable to allocate ctxtdata memory, failing open\n");
1004                 return -ENOMEM;
1005         }
1006         /*
1007          * Allocate and enable a PIO send context.
1008          */
1009         uctxt->sc = sc_alloc(dd, SC_USER, uctxt->rcvhdrqentsize,
1010                              uctxt->numa_id);
1011         if (!uctxt->sc)
1012                 return -ENOMEM;
1013
1014         dbg("allocated send context %u(%u)\n", uctxt->sc->sw_index,
1015                 uctxt->sc->hw_context);
1016         ret = sc_enable(uctxt->sc);
1017         if (ret)
1018                 return ret;
1019         /*
1020          * Setup shared context resources if the user-level has requested
1021          * shared contexts and this is the 'master' process.
1022          * This has to be done here so the rest of the sub-contexts find the
1023          * proper master.
1024          */
1025         if (uinfo->subctxt_cnt && !subctxt_fp(fp)) {
1026                 ret = init_subctxts(uctxt, uinfo);
1027                 /*
1028                  * On error, we don't need to disable and de-allocate the
1029                  * send context because it will be done during file close
1030                  */
1031                 if (ret)
1032                         return ret;
1033         }
1034         uctxt->userversion = uinfo->userversion;
1035         uctxt->pid = current->pid;
1036         uctxt->flags = HFI1_CAP_UGET(MASK);
1037         init_waitqueue_head(&uctxt->wait);
1038         strlcpy(uctxt->comm, current->comm, sizeof(uctxt->comm));
1039         memcpy(uctxt->uuid, uinfo->uuid, sizeof(uctxt->uuid));
1040         uctxt->jkey = generate_jkey(current_uid());
1041         INIT_LIST_HEAD(&uctxt->sdma_queues);
1042         spin_lock_init(&uctxt->sdma_qlock);
1043         hfi1_stats.sps_ctxts++;
1044         dd->freectxts--;
1045         ctxt_fp(fp) = uctxt;
1046
1047         return 0;
1048 }
1049
1050 static int init_subctxts(struct hfi1_ctxtdata *uctxt,
1051                          const struct hfi1_user_info *uinfo)
1052 {
1053         int ret = 0;
1054         unsigned num_subctxts;
1055
1056         num_subctxts = uinfo->subctxt_cnt;
1057         if (num_subctxts > HFI1_MAX_SHARED_CTXTS) {
1058                 ret = -EINVAL;
1059                 goto bail;
1060         }
1061
1062         uctxt->subctxt_cnt = uinfo->subctxt_cnt;
1063         uctxt->subctxt_id = uinfo->subctxt_id;
1064         uctxt->active_slaves = 1;
1065         uctxt->redirect_seq_cnt = 1;
1066         set_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1067 bail:
1068         return ret;
1069 }
1070
1071 static int setup_subctxt(struct hfi1_ctxtdata *uctxt)
1072 {
1073         int ret = 0;
1074         unsigned num_subctxts = uctxt->subctxt_cnt;
1075
1076         uctxt->subctxt_uregbase = vmalloc_user(PAGE_SIZE);
1077         if (!uctxt->subctxt_uregbase) {
1078                 ret = -ENOMEM;
1079                 goto bail;
1080         }
1081         /* We can take the size of the RcvHdr Queue from the master */
1082         uctxt->subctxt_rcvhdr_base = vmalloc_user(uctxt->rcvhdrq_size *
1083                                                   num_subctxts);
1084         if (!uctxt->subctxt_rcvhdr_base) {
1085                 ret = -ENOMEM;
1086                 goto bail_ureg;
1087         }
1088
1089         uctxt->subctxt_rcvegrbuf = vmalloc_user(uctxt->egrbufs.size *
1090                                                 num_subctxts);
1091         if (!uctxt->subctxt_rcvegrbuf) {
1092                 ret = -ENOMEM;
1093                 goto bail_rhdr;
1094         }
1095         goto bail;
1096 bail_rhdr:
1097         vfree(uctxt->subctxt_rcvhdr_base);
1098 bail_ureg:
1099         vfree(uctxt->subctxt_uregbase);
1100         uctxt->subctxt_uregbase = NULL;
1101 bail:
1102         return ret;
1103 }
1104
1105 static int user_init(struct file *fp)
1106 {
1107         int ret;
1108         unsigned int rcvctrl_ops = 0;
1109         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1110
1111         /* make sure that the context has already been setup */
1112         if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) {
1113                 ret = -EFAULT;
1114                 goto done;
1115         }
1116
1117         /*
1118          * Subctxts don't need to initialize anything since master
1119          * has done it.
1120          */
1121         if (subctxt_fp(fp)) {
1122                 ret = wait_event_interruptible(uctxt->wait,
1123                         !test_bit(HFI1_CTXT_MASTER_UNINIT,
1124                         &uctxt->event_flags));
1125                 goto done;
1126         }
1127
1128         /* initialize poll variables... */
1129         uctxt->urgent = 0;
1130         uctxt->urgent_poll = 0;
1131
1132         /*
1133          * Now enable the ctxt for receive.
1134          * For chips that are set to DMA the tail register to memory
1135          * when they change (and when the update bit transitions from
1136          * 0 to 1.  So for those chips, we turn it off and then back on.
1137          * This will (very briefly) affect any other open ctxts, but the
1138          * duration is very short, and therefore isn't an issue.  We
1139          * explicitly set the in-memory tail copy to 0 beforehand, so we
1140          * don't have to wait to be sure the DMA update has happened
1141          * (chip resets head/tail to 0 on transition to enable).
1142          */
1143         if (uctxt->rcvhdrtail_kvaddr)
1144                 clear_rcvhdrtail(uctxt);
1145
1146         /* Setup J_KEY before enabling the context */
1147         hfi1_set_ctxt_jkey(uctxt->dd, uctxt->ctxt, uctxt->jkey);
1148
1149         rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
1150         if (HFI1_CAP_KGET_MASK(uctxt->flags, HDRSUPP))
1151                 rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
1152         /*
1153          * Ignore the bit in the flags for now until proper
1154          * support for multiple packet per rcv array entry is
1155          * added.
1156          */
1157         if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
1158                 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
1159         if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
1160                 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
1161         if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
1162                 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
1163         if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
1164                 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
1165         hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt->ctxt);
1166
1167         /* Notify any waiting slaves */
1168         if (uctxt->subctxt_cnt) {
1169                 clear_bit(HFI1_CTXT_MASTER_UNINIT, &uctxt->event_flags);
1170                 wake_up(&uctxt->wait);
1171         }
1172         ret = 0;
1173
1174 done:
1175         return ret;
1176 }
1177
1178 static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
1179 {
1180         struct hfi1_ctxt_info cinfo;
1181         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1182         struct hfi1_filedata *fd = fp->private_data;
1183         int ret = 0;
1184
1185         memset(&cinfo, 0, sizeof(cinfo));
1186         ret = hfi1_get_base_kinfo(uctxt, &cinfo);
1187         if (ret < 0)
1188                 goto done;
1189         cinfo.num_active = hfi1_count_active_units();
1190         cinfo.unit = uctxt->dd->unit;
1191         cinfo.ctxt = uctxt->ctxt;
1192         cinfo.subctxt = subctxt_fp(fp);
1193         cinfo.rcvtids = roundup(uctxt->egrbufs.alloced,
1194                                 uctxt->dd->rcv_entries.group_size) +
1195                 uctxt->expected_count;
1196         cinfo.credits = uctxt->sc->credits;
1197         cinfo.numa_node = uctxt->numa_id;
1198         cinfo.rec_cpu = fd->rec_cpu_num;
1199         cinfo.send_ctxt = uctxt->sc->hw_context;
1200
1201         cinfo.egrtids = uctxt->egrbufs.alloced;
1202         cinfo.rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
1203         cinfo.rcvhdrq_entsize = uctxt->rcvhdrqentsize << 2;
1204         cinfo.sdma_ring_size = user_sdma_comp_fp(fp)->nentries;
1205         cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
1206
1207         trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, subctxt_fp(fp), cinfo);
1208         if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
1209                 ret = -EFAULT;
1210 done:
1211         return ret;
1212 }
1213
1214 static int setup_ctxt(struct file *fp)
1215 {
1216         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1217         struct hfi1_devdata *dd = uctxt->dd;
1218         int ret = 0;
1219
1220         /*
1221          * Context should be set up only once (including allocation and
1222          * programming of eager buffers. This is done if context sharing
1223          * is not requested or by the master process.
1224          */
1225         if (!uctxt->subctxt_cnt || !subctxt_fp(fp)) {
1226                 ret = hfi1_init_ctxt(uctxt->sc);
1227                 if (ret)
1228                         goto done;
1229
1230                 /* Now allocate the RcvHdr queue and eager buffers. */
1231                 ret = hfi1_create_rcvhdrq(dd, uctxt);
1232                 if (ret)
1233                         goto done;
1234                 ret = hfi1_setup_eagerbufs(uctxt);
1235                 if (ret)
1236                         goto done;
1237                 if (uctxt->subctxt_cnt && !subctxt_fp(fp)) {
1238                         ret = setup_subctxt(uctxt);
1239                         if (ret)
1240                                 goto done;
1241                 }
1242                 /* Setup Expected Rcv memories */
1243                 uctxt->tid_pg_list = vzalloc(uctxt->expected_count *
1244                                              sizeof(struct page **));
1245                 if (!uctxt->tid_pg_list) {
1246                         ret = -ENOMEM;
1247                         goto done;
1248                 }
1249                 uctxt->physshadow = vzalloc(uctxt->expected_count *
1250                                             sizeof(*uctxt->physshadow));
1251                 if (!uctxt->physshadow) {
1252                         ret = -ENOMEM;
1253                         goto done;
1254                 }
1255                 /* allocate expected TID map and initialize the cursor */
1256                 atomic_set(&uctxt->tidcursor, 0);
1257                 uctxt->numtidgroups = uctxt->expected_count /
1258                         dd->rcv_entries.group_size;
1259                 uctxt->tidmapcnt = uctxt->numtidgroups / BITS_PER_LONG +
1260                         !!(uctxt->numtidgroups % BITS_PER_LONG);
1261                 uctxt->tidusemap = kzalloc_node(uctxt->tidmapcnt *
1262                                                 sizeof(*uctxt->tidusemap),
1263                                                 GFP_KERNEL, uctxt->numa_id);
1264                 if (!uctxt->tidusemap) {
1265                         ret = -ENOMEM;
1266                         goto done;
1267                 }
1268                 /*
1269                  * In case that the number of groups is not a multiple of
1270                  * 64 (the number of groups in a tidusemap element), mark
1271                  * the extra ones as used. This will effectively make them
1272                  * permanently used and should never be assigned. Otherwise,
1273                  * the code which checks how many free groups we have will
1274                  * get completely confused about the state of the bits.
1275                  */
1276                 if (uctxt->numtidgroups % BITS_PER_LONG)
1277                         uctxt->tidusemap[uctxt->tidmapcnt - 1] =
1278                                 ~((1ULL << (uctxt->numtidgroups %
1279                                             BITS_PER_LONG)) - 1);
1280                 trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 0,
1281                                        uctxt->tidusemap, uctxt->tidmapcnt);
1282         }
1283         ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
1284         if (ret)
1285                 goto done;
1286
1287         set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
1288 done:
1289         return ret;
1290 }
1291
1292 static int get_base_info(struct file *fp, void __user *ubase, __u32 len)
1293 {
1294         struct hfi1_base_info binfo;
1295         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1296         struct hfi1_devdata *dd = uctxt->dd;
1297         ssize_t sz;
1298         unsigned offset;
1299         int ret = 0;
1300
1301         trace_hfi1_uctxtdata(uctxt->dd, uctxt);
1302
1303         memset(&binfo, 0, sizeof(binfo));
1304         binfo.hw_version = dd->revision;
1305         binfo.sw_version = HFI1_KERN_SWVERSION;
1306         binfo.bthqp = kdeth_qp;
1307         binfo.jkey = uctxt->jkey;
1308         /*
1309          * If more than 64 contexts are enabled the allocated credit
1310          * return will span two or three contiguous pages. Since we only
1311          * map the page containing the context's credit return address,
1312          * we need to calculate the offset in the proper page.
1313          */
1314         offset = ((u64)uctxt->sc->hw_free -
1315                   (u64)dd->cr_base[uctxt->numa_id].va) % PAGE_SIZE;
1316         binfo.sc_credits_addr = HFI1_MMAP_TOKEN(PIO_CRED, uctxt->ctxt,
1317                                                subctxt_fp(fp), offset);
1318         binfo.pio_bufbase = HFI1_MMAP_TOKEN(PIO_BUFS, uctxt->ctxt,
1319                                             subctxt_fp(fp),
1320                                             uctxt->sc->base_addr);
1321         binfo.pio_bufbase_sop = HFI1_MMAP_TOKEN(PIO_BUFS_SOP,
1322                                                 uctxt->ctxt,
1323                                                 subctxt_fp(fp),
1324                                                 uctxt->sc->base_addr);
1325         binfo.rcvhdr_bufbase = HFI1_MMAP_TOKEN(RCV_HDRQ, uctxt->ctxt,
1326                                                subctxt_fp(fp),
1327                                                uctxt->rcvhdrq);
1328         binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
1329                                                subctxt_fp(fp),
1330                                                uctxt->egrbufs.rcvtids[0].phys);
1331         binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
1332                                                  subctxt_fp(fp), 0);
1333         /*
1334          * user regs are at
1335          * (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
1336          */
1337         binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
1338                                             subctxt_fp(fp), 0);
1339         offset = offset_in_page((((uctxt->ctxt - dd->first_user_ctxt) *
1340                     HFI1_MAX_SHARED_CTXTS) + subctxt_fp(fp)) *
1341                   sizeof(*dd->events));
1342         binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
1343                                               subctxt_fp(fp),
1344                                               offset);
1345         binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
1346                                               subctxt_fp(fp),
1347                                               dd->status);
1348         if (HFI1_CAP_IS_USET(DMA_RTAIL))
1349                 binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
1350                                                        subctxt_fp(fp), 0);
1351         if (uctxt->subctxt_cnt) {
1352                 binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
1353                                                         uctxt->ctxt,
1354                                                         subctxt_fp(fp), 0);
1355                 binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
1356                                                          uctxt->ctxt,
1357                                                          subctxt_fp(fp), 0);
1358                 binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
1359                                                          uctxt->ctxt,
1360                                                          subctxt_fp(fp), 0);
1361         }
1362         sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
1363         if (copy_to_user(ubase, &binfo, sz))
1364                 ret = -EFAULT;
1365         return ret;
1366 }
1367
1368 static unsigned int poll_urgent(struct file *fp,
1369                                 struct poll_table_struct *pt)
1370 {
1371         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1372         struct hfi1_devdata *dd = uctxt->dd;
1373         unsigned pollflag;
1374
1375         poll_wait(fp, &uctxt->wait, pt);
1376
1377         spin_lock_irq(&dd->uctxt_lock);
1378         if (uctxt->urgent != uctxt->urgent_poll) {
1379                 pollflag = POLLIN | POLLRDNORM;
1380                 uctxt->urgent_poll = uctxt->urgent;
1381         } else {
1382                 pollflag = 0;
1383                 set_bit(HFI1_CTXT_WAITING_URG, &uctxt->event_flags);
1384         }
1385         spin_unlock_irq(&dd->uctxt_lock);
1386
1387         return pollflag;
1388 }
1389
1390 static unsigned int poll_next(struct file *fp,
1391                               struct poll_table_struct *pt)
1392 {
1393         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1394         struct hfi1_devdata *dd = uctxt->dd;
1395         unsigned pollflag;
1396
1397         poll_wait(fp, &uctxt->wait, pt);
1398
1399         spin_lock_irq(&dd->uctxt_lock);
1400         if (hdrqempty(uctxt)) {
1401                 set_bit(HFI1_CTXT_WAITING_RCV, &uctxt->event_flags);
1402                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_INTRAVAIL_ENB, uctxt->ctxt);
1403                 pollflag = 0;
1404         } else
1405                 pollflag = POLLIN | POLLRDNORM;
1406         spin_unlock_irq(&dd->uctxt_lock);
1407
1408         return pollflag;
1409 }
1410
1411 /*
1412  * Find all user contexts in use, and set the specified bit in their
1413  * event mask.
1414  * See also find_ctxt() for a similar use, that is specific to send buffers.
1415  */
1416 int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
1417 {
1418         struct hfi1_ctxtdata *uctxt;
1419         struct hfi1_devdata *dd = ppd->dd;
1420         unsigned ctxt;
1421         int ret = 0;
1422         unsigned long flags;
1423
1424         if (!dd->events) {
1425                 ret = -EINVAL;
1426                 goto done;
1427         }
1428
1429         spin_lock_irqsave(&dd->uctxt_lock, flags);
1430         for (ctxt = dd->first_user_ctxt; ctxt < dd->num_rcv_contexts;
1431              ctxt++) {
1432                 uctxt = dd->rcd[ctxt];
1433                 if (uctxt) {
1434                         unsigned long *evs = dd->events +
1435                                 (uctxt->ctxt - dd->first_user_ctxt) *
1436                                 HFI1_MAX_SHARED_CTXTS;
1437                         int i;
1438                         /*
1439                          * subctxt_cnt is 0 if not shared, so do base
1440                          * separately, first, then remaining subctxt, if any
1441                          */
1442                         set_bit(evtbit, evs);
1443                         for (i = 1; i < uctxt->subctxt_cnt; i++)
1444                                 set_bit(evtbit, evs + i);
1445                 }
1446         }
1447         spin_unlock_irqrestore(&dd->uctxt_lock, flags);
1448 done:
1449         return ret;
1450 }
1451
1452 /**
1453  * manage_rcvq - manage a context's receive queue
1454  * @uctxt: the context
1455  * @subctxt: the sub-context
1456  * @start_stop: action to carry out
1457  *
1458  * start_stop == 0 disables receive on the context, for use in queue
1459  * overflow conditions.  start_stop==1 re-enables, to be used to
1460  * re-init the software copy of the head register
1461  */
1462 static int manage_rcvq(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1463                        int start_stop)
1464 {
1465         struct hfi1_devdata *dd = uctxt->dd;
1466         unsigned int rcvctrl_op;
1467
1468         if (subctxt)
1469                 goto bail;
1470         /* atomically clear receive enable ctxt. */
1471         if (start_stop) {
1472                 /*
1473                  * On enable, force in-memory copy of the tail register to
1474                  * 0, so that protocol code doesn't have to worry about
1475                  * whether or not the chip has yet updated the in-memory
1476                  * copy or not on return from the system call. The chip
1477                  * always resets it's tail register back to 0 on a
1478                  * transition from disabled to enabled.
1479                  */
1480                 if (uctxt->rcvhdrtail_kvaddr)
1481                         clear_rcvhdrtail(uctxt);
1482                 rcvctrl_op = HFI1_RCVCTRL_CTXT_ENB;
1483         } else
1484                 rcvctrl_op = HFI1_RCVCTRL_CTXT_DIS;
1485         hfi1_rcvctrl(dd, rcvctrl_op, uctxt->ctxt);
1486         /* always; new head should be equal to new tail; see above */
1487 bail:
1488         return 0;
1489 }
1490
1491 /*
1492  * clear the event notifier events for this context.
1493  * User process then performs actions appropriate to bit having been
1494  * set, if desired, and checks again in future.
1495  */
1496 static int user_event_ack(struct hfi1_ctxtdata *uctxt, int subctxt,
1497                           unsigned long events)
1498 {
1499         int i;
1500         struct hfi1_devdata *dd = uctxt->dd;
1501         unsigned long *evs;
1502
1503         if (!dd->events)
1504                 return 0;
1505
1506         evs = dd->events + ((uctxt->ctxt - dd->first_user_ctxt) *
1507                             HFI1_MAX_SHARED_CTXTS) + subctxt;
1508
1509         for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
1510                 if (!test_bit(i, &events))
1511                         continue;
1512                 clear_bit(i, evs);
1513         }
1514         return 0;
1515 }
1516
1517 #define num_user_pages(vaddr, len)                                      \
1518         (1 + (((((unsigned long)(vaddr) +                               \
1519                  (unsigned long)(len) - 1) & PAGE_MASK) -               \
1520                ((unsigned long)vaddr & PAGE_MASK)) >> PAGE_SHIFT))
1521
1522 /**
1523  * tzcnt - count the number of trailing zeros in a 64bit value
1524  * @value: the value to be examined
1525  *
1526  * Returns the number of trailing least significant zeros in the
1527  * the input value. If the value is zero, return the number of
1528  * bits of the value.
1529  */
1530 static inline u8 tzcnt(u64 value)
1531 {
1532         return value ? __builtin_ctzl(value) : sizeof(value) * 8;
1533 }
1534
1535 static inline unsigned num_free_groups(unsigned long map, u16 *start)
1536 {
1537         unsigned free;
1538         u16 bitidx = *start;
1539
1540         if (bitidx >= BITS_PER_LONG)
1541                 return 0;
1542         /* "Turn off" any bits set before our bit index */
1543         map &= ~((1ULL << bitidx) - 1);
1544         free = tzcnt(map) - bitidx;
1545         while (!free && bitidx < BITS_PER_LONG) {
1546                 /* Zero out the last set bit so we look at the rest */
1547                 map &= ~(1ULL << bitidx);
1548                 /*
1549                  * Account for the previously checked bits and advance
1550                  * the bit index. We don't have to check for bitidx
1551                  * getting bigger than BITS_PER_LONG here as it would
1552                  * mean extra instructions that we don't need. If it
1553                  * did happen, it would push free to a negative value
1554                  * which will break the loop.
1555                  */
1556                 free = tzcnt(map) - ++bitidx;
1557         }
1558         *start = bitidx;
1559         return free;
1560 }
1561
1562 static int exp_tid_setup(struct file *fp, struct hfi1_tid_info *tinfo)
1563 {
1564         int ret = 0;
1565         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1566         struct hfi1_devdata *dd = uctxt->dd;
1567         unsigned tid, mapped = 0, npages, ngroups, exp_groups,
1568                 tidpairs = uctxt->expected_count / 2;
1569         struct page **pages;
1570         unsigned long vaddr, tidmap[uctxt->tidmapcnt];
1571         dma_addr_t *phys;
1572         u32 tidlist[tidpairs], pairidx = 0, tidcursor;
1573         u16 useidx, idx, bitidx, tidcnt = 0;
1574
1575         vaddr = tinfo->vaddr;
1576
1577         if (offset_in_page(vaddr)) {
1578                 ret = -EINVAL;
1579                 goto bail;
1580         }
1581
1582         npages = num_user_pages(vaddr, tinfo->length);
1583         if (!npages) {
1584                 ret = -EINVAL;
1585                 goto bail;
1586         }
1587         if (!access_ok(VERIFY_WRITE, (void __user *)vaddr,
1588                        npages * PAGE_SIZE)) {
1589                 dd_dev_err(dd, "Fail vaddr %p, %u pages, !access_ok\n",
1590                            (void *)vaddr, npages);
1591                 ret = -EFAULT;
1592                 goto bail;
1593         }
1594
1595         memset(tidmap, 0, sizeof(tidmap[0]) * uctxt->tidmapcnt);
1596         memset(tidlist, 0, sizeof(tidlist[0]) * tidpairs);
1597
1598         exp_groups = uctxt->expected_count / dd->rcv_entries.group_size;
1599         /* which group set do we look at first? */
1600         tidcursor = atomic_read(&uctxt->tidcursor);
1601         useidx = (tidcursor >> 16) & 0xffff;
1602         bitidx = tidcursor & 0xffff;
1603
1604         /*
1605          * Keep going until we've mapped all pages or we've exhausted all
1606          * RcvArray entries.
1607          * This iterates over the number of tidmaps + 1
1608          * (idx <= uctxt->tidmapcnt) so we check the bitmap which we
1609          * started from one more time for any free bits before the
1610          * starting point bit.
1611          */
1612         for (mapped = 0, idx = 0;
1613              mapped < npages && idx <= uctxt->tidmapcnt;) {
1614                 u64 i, offset = 0;
1615                 unsigned free, pinned, pmapped = 0, bits_used;
1616                 u16 grp;
1617
1618                 /*
1619                  * "Reserve" the needed group bits under lock so other
1620                  * processes can't step in the middle of it. Once
1621                  * reserved, we don't need the lock anymore since we
1622                  * are guaranteed the groups.
1623                  */
1624                 spin_lock(&uctxt->exp_lock);
1625                 if (uctxt->tidusemap[useidx] == -1ULL ||
1626                     bitidx >= BITS_PER_LONG) {
1627                         /* no free groups in the set, use the next */
1628                         useidx = (useidx + 1) % uctxt->tidmapcnt;
1629                         idx++;
1630                         bitidx = 0;
1631                         spin_unlock(&uctxt->exp_lock);
1632                         continue;
1633                 }
1634                 ngroups = ((npages - mapped) / dd->rcv_entries.group_size) +
1635                         !!((npages - mapped) % dd->rcv_entries.group_size);
1636
1637                 /*
1638                  * If we've gotten here, the current set of groups does have
1639                  * one or more free groups.
1640                  */
1641                 free = num_free_groups(uctxt->tidusemap[useidx], &bitidx);
1642                 if (!free) {
1643                         /*
1644                          * Despite the check above, free could still come back
1645                          * as 0 because we don't check the entire bitmap but
1646                          * we start from bitidx.
1647                          */
1648                         spin_unlock(&uctxt->exp_lock);
1649                         continue;
1650                 }
1651                 bits_used = min(free, ngroups);
1652                 tidmap[useidx] |= ((1ULL << bits_used) - 1) << bitidx;
1653                 uctxt->tidusemap[useidx] |= tidmap[useidx];
1654                 spin_unlock(&uctxt->exp_lock);
1655
1656                 /*
1657                  * At this point, we know where in the map we have free bits.
1658                  * properly offset into the various "shadow" arrays and compute
1659                  * the RcvArray entry index.
1660                  */
1661                 offset = ((useidx * BITS_PER_LONG) + bitidx) *
1662                         dd->rcv_entries.group_size;
1663                 pages = uctxt->tid_pg_list + offset;
1664                 phys = uctxt->physshadow + offset;
1665                 tid = uctxt->expected_base + offset;
1666
1667                 /* Calculate how many pages we can pin based on free bits */
1668                 pinned = min((bits_used * dd->rcv_entries.group_size),
1669                              (npages - mapped));
1670                 /*
1671                  * Now that we know how many free RcvArray entries we have,
1672                  * we can pin that many user pages.
1673                  */
1674                 ret = hfi1_get_user_pages(vaddr + (mapped * PAGE_SIZE),
1675                                           pinned, pages);
1676                 if (ret) {
1677                         /*
1678                          * We can't continue because the pages array won't be
1679                          * initialized. This should never happen,
1680                          * unless perhaps the user has mpin'ed the pages
1681                          * themselves.
1682                          */
1683                         dd_dev_info(dd,
1684                                     "Failed to lock addr %p, %u pages: errno %d\n",
1685                                     (void *) vaddr, pinned, -ret);
1686                         /*
1687                          * Let go of the bits that we reserved since we are not
1688                          * going to use them.
1689                          */
1690                         spin_lock(&uctxt->exp_lock);
1691                         uctxt->tidusemap[useidx] &=
1692                                 ~(((1ULL << bits_used) - 1) << bitidx);
1693                         spin_unlock(&uctxt->exp_lock);
1694                         goto done;
1695                 }
1696                 /*
1697                  * How many groups do we need based on how many pages we have
1698                  * pinned?
1699                  */
1700                 ngroups = (pinned / dd->rcv_entries.group_size) +
1701                         !!(pinned % dd->rcv_entries.group_size);
1702                 /*
1703                  * Keep programming RcvArray entries for all the <ngroups> free
1704                  * groups.
1705                  */
1706                 for (i = 0, grp = 0; grp < ngroups; i++, grp++) {
1707                         unsigned j;
1708                         u32 pair_size = 0, tidsize;
1709                         /*
1710                          * This inner loop will program an entire group or the
1711                          * array of pinned pages (which ever limit is hit
1712                          * first).
1713                          */
1714                         for (j = 0; j < dd->rcv_entries.group_size &&
1715                                      pmapped < pinned; j++, pmapped++, tid++) {
1716                                 tidsize = PAGE_SIZE;
1717                                 phys[pmapped] = hfi1_map_page(dd->pcidev,
1718                                                    pages[pmapped], 0,
1719                                                    tidsize, PCI_DMA_FROMDEVICE);
1720                                 trace_hfi1_exp_rcv_set(uctxt->ctxt,
1721                                                        subctxt_fp(fp),
1722                                                        tid, vaddr,
1723                                                        phys[pmapped],
1724                                                        pages[pmapped]);
1725                                 /*
1726                                  * Each RcvArray entry is programmed with one
1727                                  * page * worth of memory. This will handle
1728                                  * the 8K MTU as well as anything smaller
1729                                  * due to the fact that both entries in the
1730                                  * RcvTidPair are programmed with a page.
1731                                  * PSM currently does not handle anything
1732                                  * bigger than 8K MTU, so should we even worry
1733                                  * about 10K here?
1734                                  */
1735                                 hfi1_put_tid(dd, tid, PT_EXPECTED,
1736                                              phys[pmapped],
1737                                              ilog2(tidsize >> PAGE_SHIFT) + 1);
1738                                 pair_size += tidsize >> PAGE_SHIFT;
1739                                 EXP_TID_RESET(tidlist[pairidx], LEN, pair_size);
1740                                 if (!(tid % 2)) {
1741                                         tidlist[pairidx] |=
1742                                            EXP_TID_SET(IDX,
1743                                                 (tid - uctxt->expected_base)
1744                                                        / 2);
1745                                         tidlist[pairidx] |=
1746                                                 EXP_TID_SET(CTRL, 1);
1747                                         tidcnt++;
1748                                 } else {
1749                                         tidlist[pairidx] |=
1750                                                 EXP_TID_SET(CTRL, 2);
1751                                         pair_size = 0;
1752                                         pairidx++;
1753                                 }
1754                         }
1755                         /*
1756                          * We've programmed the entire group (or as much of the
1757                          * group as we'll use. Now, it's time to push it out...
1758                          */
1759                         flush_wc();
1760                 }
1761                 mapped += pinned;
1762                 atomic_set(&uctxt->tidcursor,
1763                            (((useidx & 0xffffff) << 16) |
1764                             ((bitidx + bits_used) & 0xffffff)));
1765         }
1766         trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 0, uctxt->tidusemap,
1767                                uctxt->tidmapcnt);
1768
1769 done:
1770         /* If we've mapped anything, copy relevant info to user */
1771         if (mapped) {
1772                 if (copy_to_user((void __user *)(unsigned long)tinfo->tidlist,
1773                                  tidlist, sizeof(tidlist[0]) * tidcnt)) {
1774                         ret = -EFAULT;
1775                         goto done;
1776                 }
1777                 /* copy TID info to user */
1778                 if (copy_to_user((void __user *)(unsigned long)tinfo->tidmap,
1779                                  tidmap, sizeof(tidmap[0]) * uctxt->tidmapcnt))
1780                         ret = -EFAULT;
1781         }
1782 bail:
1783         /*
1784          * Calculate mapped length. New Exp TID protocol does not "unwind" and
1785          * report an error if it can't map the entire buffer. It just reports
1786          * the length that was mapped.
1787          */
1788         tinfo->length = mapped * PAGE_SIZE;
1789         tinfo->tidcnt = tidcnt;
1790         return ret;
1791 }
1792
1793 static int exp_tid_free(struct file *fp, struct hfi1_tid_info *tinfo)
1794 {
1795         struct hfi1_ctxtdata *uctxt = ctxt_fp(fp);
1796         struct hfi1_devdata *dd = uctxt->dd;
1797         unsigned long tidmap[uctxt->tidmapcnt];
1798         struct page **pages;
1799         dma_addr_t *phys;
1800         u16 idx, bitidx, tid;
1801         int ret = 0;
1802
1803         if (copy_from_user(&tidmap, (void __user *)(unsigned long)
1804                            tinfo->tidmap,
1805                            sizeof(tidmap[0]) * uctxt->tidmapcnt)) {
1806                 ret = -EFAULT;
1807                 goto done;
1808         }
1809         for (idx = 0; idx < uctxt->tidmapcnt; idx++) {
1810                 unsigned long map;
1811
1812                 bitidx = 0;
1813                 if (!tidmap[idx])
1814                         continue;
1815                 map = tidmap[idx];
1816                 while ((bitidx = tzcnt(map)) < BITS_PER_LONG) {
1817                         int i, pcount = 0;
1818                         struct page *pshadow[dd->rcv_entries.group_size];
1819                         unsigned offset = ((idx * BITS_PER_LONG) + bitidx) *
1820                                 dd->rcv_entries.group_size;
1821
1822                         pages = uctxt->tid_pg_list + offset;
1823                         phys = uctxt->physshadow + offset;
1824                         tid = uctxt->expected_base + offset;
1825                         for (i = 0; i < dd->rcv_entries.group_size;
1826                              i++, tid++) {
1827                                 if (pages[i]) {
1828                                         hfi1_put_tid(dd, tid, PT_INVALID,
1829                                                       0, 0);
1830                                         trace_hfi1_exp_rcv_free(uctxt->ctxt,
1831                                                                 subctxt_fp(fp),
1832                                                                 tid, phys[i],
1833                                                                 pages[i]);
1834                                         pci_unmap_page(dd->pcidev, phys[i],
1835                                               PAGE_SIZE, PCI_DMA_FROMDEVICE);
1836                                         pshadow[pcount] = pages[i];
1837                                         pages[i] = NULL;
1838                                         pcount++;
1839                                         phys[i] = 0;
1840                                 }
1841                         }
1842                         flush_wc();
1843                         hfi1_release_user_pages(pshadow, pcount);
1844                         clear_bit(bitidx, &uctxt->tidusemap[idx]);
1845                         map &= ~(1ULL<<bitidx);
1846                 }
1847         }
1848         trace_hfi1_exp_tid_map(uctxt->ctxt, subctxt_fp(fp), 1, uctxt->tidusemap,
1849                                uctxt->tidmapcnt);
1850 done:
1851         return ret;
1852 }
1853
1854 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt)
1855 {
1856         struct hfi1_devdata *dd = uctxt->dd;
1857         unsigned tid;
1858
1859         dd_dev_info(dd, "ctxt %u unlocking any locked expTID pages\n",
1860                     uctxt->ctxt);
1861         for (tid = 0; tid < uctxt->expected_count; tid++) {
1862                 struct page *p = uctxt->tid_pg_list[tid];
1863                 dma_addr_t phys;
1864
1865                 if (!p)
1866                         continue;
1867
1868                 phys = uctxt->physshadow[tid];
1869                 uctxt->physshadow[tid] = 0;
1870                 uctxt->tid_pg_list[tid] = NULL;
1871                 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
1872                 hfi1_release_user_pages(&p, 1);
1873         }
1874 }
1875
1876 static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned subctxt,
1877                          u16 pkey)
1878 {
1879         int ret = -ENOENT, i, intable = 0;
1880         struct hfi1_pportdata *ppd = uctxt->ppd;
1881         struct hfi1_devdata *dd = uctxt->dd;
1882
1883         if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
1884                 ret = -EINVAL;
1885                 goto done;
1886         }
1887
1888         for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
1889                 if (pkey == ppd->pkeys[i]) {
1890                         intable = 1;
1891                         break;
1892                 }
1893
1894         if (intable)
1895                 ret = hfi1_set_ctxt_pkey(dd, uctxt->ctxt, pkey);
1896 done:
1897         return ret;
1898 }
1899
1900 static int ui_open(struct inode *inode, struct file *filp)
1901 {
1902         struct hfi1_devdata *dd;
1903
1904         dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
1905         filp->private_data = dd; /* for other methods */
1906         return 0;
1907 }
1908
1909 static int ui_release(struct inode *inode, struct file *filp)
1910 {
1911         /* nothing to do */
1912         return 0;
1913 }
1914
1915 static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
1916 {
1917         struct hfi1_devdata *dd = filp->private_data;
1918
1919         switch (whence) {
1920         case SEEK_SET:
1921                 break;
1922         case SEEK_CUR:
1923                 offset += filp->f_pos;
1924                 break;
1925         case SEEK_END:
1926                 offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
1927                         offset;
1928                 break;
1929         default:
1930                 return -EINVAL;
1931         }
1932
1933         if (offset < 0)
1934                 return -EINVAL;
1935
1936         if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
1937                 return -EINVAL;
1938
1939         filp->f_pos = offset;
1940
1941         return filp->f_pos;
1942 }
1943
1944
1945 /* NOTE: assumes unsigned long is 8 bytes */
1946 static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
1947                         loff_t *f_pos)
1948 {
1949         struct hfi1_devdata *dd = filp->private_data;
1950         void __iomem *base = dd->kregbase;
1951         unsigned long total, csr_off,
1952                 barlen = (dd->kregend - dd->kregbase);
1953         u64 data;
1954
1955         /* only read 8 byte quantities */
1956         if ((count % 8) != 0)
1957                 return -EINVAL;
1958         /* offset must be 8-byte aligned */
1959         if ((*f_pos % 8) != 0)
1960                 return -EINVAL;
1961         /* destination buffer must be 8-byte aligned */
1962         if ((unsigned long)buf % 8 != 0)
1963                 return -EINVAL;
1964         /* must be in range */
1965         if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
1966                 return -EINVAL;
1967         /* only set the base if we are not starting past the BAR */
1968         if (*f_pos < barlen)
1969                 base += *f_pos;
1970         csr_off = *f_pos;
1971         for (total = 0; total < count; total += 8, csr_off += 8) {
1972                 /* accessing LCB CSRs requires more checks */
1973                 if (is_lcb_offset(csr_off)) {
1974                         if (read_lcb_csr(dd, csr_off, (u64 *)&data))
1975                                 break; /* failed */
1976                 }
1977                 /*
1978                  * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
1979                  * false parity error.  Avoid the whole issue by not reading
1980                  * them.  These registers are defined as having a read value
1981                  * of 0.
1982                  */
1983                 else if (csr_off == ASIC_GPIO_CLEAR
1984                                 || csr_off == ASIC_GPIO_FORCE
1985                                 || csr_off == ASIC_QSFP1_CLEAR
1986                                 || csr_off == ASIC_QSFP1_FORCE
1987                                 || csr_off == ASIC_QSFP2_CLEAR
1988                                 || csr_off == ASIC_QSFP2_FORCE)
1989                         data = 0;
1990                 else if (csr_off >= barlen) {
1991                         /*
1992                          * read_8051_data can read more than just 8 bytes at
1993                          * a time. However, folding this into the loop and
1994                          * handling the reads in 8 byte increments allows us
1995                          * to smoothly transition from chip memory to 8051
1996                          * memory.
1997                          */
1998                         if (read_8051_data(dd,
1999                                            (u32)(csr_off - barlen),
2000                                            sizeof(data), &data))
2001                                 break; /* failed */
2002                 } else
2003                         data = readq(base + total);
2004                 if (put_user(data, (unsigned long __user *)(buf + total)))
2005                         break;
2006         }
2007         *f_pos += total;
2008         return total;
2009 }
2010
2011 /* NOTE: assumes unsigned long is 8 bytes */
2012 static ssize_t ui_write(struct file *filp, const char __user *buf,
2013                         size_t count, loff_t *f_pos)
2014 {
2015         struct hfi1_devdata *dd = filp->private_data;
2016         void __iomem *base;
2017         unsigned long total, data, csr_off;
2018         int in_lcb;
2019
2020         /* only write 8 byte quantities */
2021         if ((count % 8) != 0)
2022                 return -EINVAL;
2023         /* offset must be 8-byte aligned */
2024         if ((*f_pos % 8) != 0)
2025                 return -EINVAL;
2026         /* source buffer must be 8-byte aligned */
2027         if ((unsigned long)buf % 8 != 0)
2028                 return -EINVAL;
2029         /* must be in range */
2030         if (*f_pos + count > dd->kregend - dd->kregbase)
2031                 return -EINVAL;
2032
2033         base = (void __iomem *)dd->kregbase + *f_pos;
2034         csr_off = *f_pos;
2035         in_lcb = 0;
2036         for (total = 0; total < count; total += 8, csr_off += 8) {
2037                 if (get_user(data, (unsigned long __user *)(buf + total)))
2038                         break;
2039                 /* accessing LCB CSRs requires a special procedure */
2040                 if (is_lcb_offset(csr_off)) {
2041                         if (!in_lcb) {
2042                                 int ret = acquire_lcb_access(dd, 1);
2043
2044                                 if (ret)
2045                                         break;
2046                                 in_lcb = 1;
2047                         }
2048                 } else {
2049                         if (in_lcb) {
2050                                 release_lcb_access(dd, 1);
2051                                 in_lcb = 0;
2052                         }
2053                 }
2054                 writeq(data, base + total);
2055         }
2056         if (in_lcb)
2057                 release_lcb_access(dd, 1);
2058         *f_pos += total;
2059         return total;
2060 }
2061
2062 static const struct file_operations ui_file_ops = {
2063         .owner = THIS_MODULE,
2064         .llseek = ui_lseek,
2065         .read = ui_read,
2066         .write = ui_write,
2067         .open = ui_open,
2068         .release = ui_release,
2069 };
2070
2071 #define UI_OFFSET 192   /* device minor offset for UI devices */
2072 static int create_ui = 1;
2073
2074 static struct cdev wildcard_cdev;
2075 static struct device *wildcard_device;
2076
2077 static atomic_t user_count = ATOMIC_INIT(0);
2078
2079 static void user_remove(struct hfi1_devdata *dd)
2080 {
2081         if (atomic_dec_return(&user_count) == 0)
2082                 hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device);
2083
2084         hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
2085         hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
2086 }
2087
2088 static int user_add(struct hfi1_devdata *dd)
2089 {
2090         char name[10];
2091         int ret;
2092
2093         if (atomic_inc_return(&user_count) == 1) {
2094                 ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
2095                                      &wildcard_cdev, &wildcard_device,
2096                                      true);
2097                 if (ret)
2098                         goto done;
2099         }
2100
2101         snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
2102         ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
2103                              &dd->user_cdev, &dd->user_device,
2104                              true);
2105         if (ret)
2106                 goto done;
2107
2108         if (create_ui) {
2109                 snprintf(name, sizeof(name),
2110                          "%s_ui%d", class_name(), dd->unit);
2111                 ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
2112                                      &dd->ui_cdev, &dd->ui_device,
2113                                      false);
2114                 if (ret)
2115                         goto done;
2116         }
2117
2118         return 0;
2119 done:
2120         user_remove(dd);
2121         return ret;
2122 }
2123
2124 /*
2125  * Create per-unit files in /dev
2126  */
2127 int hfi1_device_create(struct hfi1_devdata *dd)
2128 {
2129         int r, ret;
2130
2131         r = user_add(dd);
2132         ret = hfi1_diag_add(dd);
2133         if (r && !ret)
2134                 ret = r;
2135         return ret;
2136 }
2137
2138 /*
2139  * Remove per-unit files in /dev
2140  * void, core kernel returns no errors for this stuff
2141  */
2142 void hfi1_device_remove(struct hfi1_devdata *dd)
2143 {
2144         user_remove(dd);
2145         hfi1_diag_remove(dd);
2146 }