Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / infiniband / hw / usnic / usnic_ib_verbs.c
1 /*
2  * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3  *
4  * This program is free software; you may redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; version 2 of the License.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15  * SOFTWARE.
16  *
17  */
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22
23 #include <rdma/ib_user_verbs.h>
24 #include <rdma/ib_addr.h>
25
26 #include "usnic_abi.h"
27 #include "usnic_ib.h"
28 #include "usnic_common_util.h"
29 #include "usnic_ib_qp_grp.h"
30 #include "usnic_fwd.h"
31 #include "usnic_log.h"
32 #include "usnic_uiom.h"
33 #include "usnic_transport.h"
34
35 #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM
36
37 static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver)
38 {
39         *fw_ver = (u64) *fw_ver_str;
40 }
41
42 static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
43                                         struct ib_udata *udata)
44 {
45         struct usnic_ib_dev *us_ibdev;
46         struct usnic_ib_create_qp_resp resp;
47         struct pci_dev *pdev;
48         struct vnic_dev_bar *bar;
49         struct usnic_vnic_res_chunk *chunk;
50         struct usnic_ib_qp_grp_flow *default_flow;
51         int i, err;
52
53         memset(&resp, 0, sizeof(resp));
54
55         us_ibdev = qp_grp->vf->pf;
56         pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic);
57         if (!pdev) {
58                 usnic_err("Failed to get pdev of qp_grp %d\n",
59                                 qp_grp->grp_id);
60                 return -EFAULT;
61         }
62
63         bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0);
64         if (!bar) {
65                 usnic_err("Failed to get bar0 of qp_grp %d vf %s",
66                                 qp_grp->grp_id, pci_name(pdev));
67                 return -EFAULT;
68         }
69
70         resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic);
71         resp.bar_bus_addr = bar->bus_addr;
72         resp.bar_len = bar->len;
73
74         chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ);
75         if (IS_ERR_OR_NULL(chunk)) {
76                 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
77                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ),
78                         qp_grp->grp_id,
79                         PTR_ERR(chunk));
80                 return chunk ? PTR_ERR(chunk) : -ENOMEM;
81         }
82
83         WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ);
84         resp.rq_cnt = chunk->cnt;
85         for (i = 0; i < chunk->cnt; i++)
86                 resp.rq_idx[i] = chunk->res[i]->vnic_idx;
87
88         chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ);
89         if (IS_ERR_OR_NULL(chunk)) {
90                 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
91                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ),
92                         qp_grp->grp_id,
93                         PTR_ERR(chunk));
94                 return chunk ? PTR_ERR(chunk) : -ENOMEM;
95         }
96
97         WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ);
98         resp.wq_cnt = chunk->cnt;
99         for (i = 0; i < chunk->cnt; i++)
100                 resp.wq_idx[i] = chunk->res[i]->vnic_idx;
101
102         chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ);
103         if (IS_ERR_OR_NULL(chunk)) {
104                 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n",
105                         usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ),
106                         qp_grp->grp_id,
107                         PTR_ERR(chunk));
108                 return chunk ? PTR_ERR(chunk) : -ENOMEM;
109         }
110
111         WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
112         resp.cq_cnt = chunk->cnt;
113         for (i = 0; i < chunk->cnt; i++)
114                 resp.cq_idx[i] = chunk->res[i]->vnic_idx;
115
116         default_flow = list_first_entry(&qp_grp->flows_lst,
117                                         struct usnic_ib_qp_grp_flow, link);
118         resp.transport = default_flow->trans_type;
119
120         err = ib_copy_to_udata(udata, &resp, sizeof(resp));
121         if (err) {
122                 usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
123                 return err;
124         }
125
126         return 0;
127 }
128
129 static struct usnic_ib_qp_grp*
130 find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
131                                 struct usnic_ib_pd *pd,
132                                 struct usnic_transport_spec *trans_spec,
133                                 struct usnic_vnic_res_spec *res_spec)
134 {
135         struct usnic_ib_vf *vf;
136         struct usnic_vnic *vnic;
137         struct usnic_ib_qp_grp *qp_grp;
138         struct device *dev, **dev_list;
139         int i, found = 0;
140
141         BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
142
143         if (list_empty(&us_ibdev->vf_dev_list)) {
144                 usnic_info("No vfs to allocate\n");
145                 return NULL;
146         }
147
148         if (usnic_ib_share_vf) {
149                 /* Try to find resouces on a used vf which is in pd */
150                 dev_list = usnic_uiom_get_dev_list(pd->umem_pd);
151                 for (i = 0; dev_list[i]; i++) {
152                         dev = dev_list[i];
153                         vf = pci_get_drvdata(to_pci_dev(dev));
154                         spin_lock(&vf->lock);
155                         vnic = vf->vnic;
156                         if (!usnic_vnic_check_room(vnic, res_spec)) {
157                                 usnic_dbg("Found used vnic %s from %s\n",
158                                                 us_ibdev->ib_dev.name,
159                                                 pci_name(usnic_vnic_get_pdev(
160                                                                         vnic)));
161                                 found = 1;
162                                 break;
163                         }
164                         spin_unlock(&vf->lock);
165
166                 }
167                 usnic_uiom_free_dev_list(dev_list);
168         }
169
170         if (!found) {
171                 /* Try to find resources on an unused vf */
172                 list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
173                         spin_lock(&vf->lock);
174                         vnic = vf->vnic;
175                         if (vf->qp_grp_ref_cnt == 0 &&
176                                 usnic_vnic_check_room(vnic, res_spec) == 0) {
177                                 found = 1;
178                                 break;
179                         }
180                         spin_unlock(&vf->lock);
181                 }
182         }
183
184         if (!found) {
185                 usnic_info("No free qp grp found on %s\n",
186                                 us_ibdev->ib_dev.name);
187                 return ERR_PTR(-ENOMEM);
188         }
189
190         qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec,
191                                                 trans_spec);
192         spin_unlock(&vf->lock);
193         if (IS_ERR_OR_NULL(qp_grp)) {
194                 usnic_err("Failed to allocate qp_grp\n");
195                 return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
196         }
197
198         return qp_grp;
199 }
200
201 static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
202 {
203         struct usnic_ib_vf *vf = qp_grp->vf;
204
205         WARN_ON(qp_grp->state != IB_QPS_RESET);
206
207         spin_lock(&vf->lock);
208         usnic_ib_qp_grp_destroy(qp_grp);
209         spin_unlock(&vf->lock);
210 }
211
212 static void eth_speed_to_ib_speed(int speed, u8 *active_speed,
213                                         u8 *active_width)
214 {
215         if (speed <= 10000) {
216                 *active_width = IB_WIDTH_1X;
217                 *active_speed = IB_SPEED_FDR10;
218         } else if (speed <= 20000) {
219                 *active_width = IB_WIDTH_4X;
220                 *active_speed = IB_SPEED_DDR;
221         } else if (speed <= 30000) {
222                 *active_width = IB_WIDTH_4X;
223                 *active_speed = IB_SPEED_QDR;
224         } else if (speed <= 40000) {
225                 *active_width = IB_WIDTH_4X;
226                 *active_speed = IB_SPEED_FDR10;
227         } else {
228                 *active_width = IB_WIDTH_4X;
229                 *active_speed = IB_SPEED_EDR;
230         }
231 }
232
233 static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
234 {
235         if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN ||
236                         cmd.spec.trans_type >= USNIC_TRANSPORT_MAX)
237                 return -EINVAL;
238
239         return 0;
240 }
241
242 /* Start of ib callback functions */
243
244 enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device,
245                                                 u8 port_num)
246 {
247         return IB_LINK_LAYER_ETHERNET;
248 }
249
250 int usnic_ib_query_device(struct ib_device *ibdev,
251                                 struct ib_device_attr *props)
252 {
253         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
254         union ib_gid gid;
255         struct ethtool_drvinfo info;
256         struct ethtool_cmd cmd;
257         int qp_per_vf;
258
259         usnic_dbg("\n");
260         mutex_lock(&us_ibdev->usdev_lock);
261         us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
262         us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
263         memset(props, 0, sizeof(*props));
264         usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
265                         &gid.raw[0]);
266         memcpy(&props->sys_image_guid, &gid.global.interface_id,
267                 sizeof(gid.global.interface_id));
268         usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver);
269         props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE;
270         props->page_size_cap = USNIC_UIOM_PAGE_SIZE;
271         props->vendor_id = PCI_VENDOR_ID_CISCO;
272         props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC;
273         props->hw_ver = us_ibdev->pdev->subsystem_device;
274         qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ],
275                         us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]);
276         props->max_qp = qp_per_vf *
277                 atomic_read(&us_ibdev->vf_cnt.refcount);
278         props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
279                 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
280         props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] *
281                 atomic_read(&us_ibdev->vf_cnt.refcount);
282         props->max_pd = USNIC_UIOM_MAX_PD_CNT;
283         props->max_mr = USNIC_UIOM_MAX_MR_CNT;
284         props->local_ca_ack_delay = 0;
285         props->max_pkeys = 0;
286         props->atomic_cap = IB_ATOMIC_NONE;
287         props->masked_atomic_cap = props->atomic_cap;
288         props->max_qp_rd_atom = 0;
289         props->max_qp_init_rd_atom = 0;
290         props->max_res_rd_atom = 0;
291         props->max_srq = 0;
292         props->max_srq_wr = 0;
293         props->max_srq_sge = 0;
294         props->max_fast_reg_page_list_len = 0;
295         props->max_mcast_grp = 0;
296         props->max_mcast_qp_attach = 0;
297         props->max_total_mcast_qp_attach = 0;
298         props->max_map_per_fmr = 0;
299         /* Owned by Userspace
300          * max_qp_wr, max_sge, max_sge_rd, max_cqe */
301         mutex_unlock(&us_ibdev->usdev_lock);
302
303         return 0;
304 }
305
306 int usnic_ib_query_port(struct ib_device *ibdev, u8 port,
307                                 struct ib_port_attr *props)
308 {
309         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
310         struct ethtool_cmd cmd;
311
312         usnic_dbg("\n");
313
314         mutex_lock(&us_ibdev->usdev_lock);
315         us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd);
316         memset(props, 0, sizeof(*props));
317
318         props->lid = 0;
319         props->lmc = 1;
320         props->sm_lid = 0;
321         props->sm_sl = 0;
322
323         if (!us_ibdev->ufdev->link_up) {
324                 props->state = IB_PORT_DOWN;
325                 props->phys_state = 3;
326         } else if (!us_ibdev->ufdev->inaddr) {
327                 props->state = IB_PORT_INIT;
328                 props->phys_state = 4;
329         } else {
330                 props->state = IB_PORT_ACTIVE;
331                 props->phys_state = 5;
332         }
333
334         props->port_cap_flags = 0;
335         props->gid_tbl_len = 1;
336         props->pkey_tbl_len = 1;
337         props->bad_pkey_cntr = 0;
338         props->qkey_viol_cntr = 0;
339         eth_speed_to_ib_speed(cmd.speed, &props->active_speed,
340                                 &props->active_width);
341         props->max_mtu = IB_MTU_4096;
342         props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu);
343         /* Userspace will adjust for hdrs */
344         props->max_msg_sz = us_ibdev->ufdev->mtu;
345         props->max_vl_num = 1;
346         mutex_unlock(&us_ibdev->usdev_lock);
347
348         return 0;
349 }
350
351 int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
352                                 int qp_attr_mask,
353                                 struct ib_qp_init_attr *qp_init_attr)
354 {
355         struct usnic_ib_qp_grp *qp_grp;
356         struct usnic_ib_vf *vf;
357         int err;
358
359         usnic_dbg("\n");
360
361         memset(qp_attr, 0, sizeof(*qp_attr));
362         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
363
364         qp_grp = to_uqp_grp(qp);
365         vf = qp_grp->vf;
366         mutex_lock(&vf->pf->usdev_lock);
367         usnic_dbg("\n");
368         qp_attr->qp_state = qp_grp->state;
369         qp_attr->cur_qp_state = qp_grp->state;
370
371         switch (qp_grp->ibqp.qp_type) {
372         case IB_QPT_UD:
373                 qp_attr->qkey = 0;
374                 break;
375         default:
376                 usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type);
377                 err = -EINVAL;
378                 goto err_out;
379         }
380
381         mutex_unlock(&vf->pf->usdev_lock);
382         return 0;
383
384 err_out:
385         mutex_unlock(&vf->pf->usdev_lock);
386         return err;
387 }
388
389 int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
390                                 union ib_gid *gid)
391 {
392
393         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
394         usnic_dbg("\n");
395
396         if (index > 1)
397                 return -EINVAL;
398
399         mutex_lock(&us_ibdev->usdev_lock);
400         memset(&(gid->raw[0]), 0, sizeof(gid->raw));
401         usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr,
402                         &gid->raw[0]);
403         mutex_unlock(&us_ibdev->usdev_lock);
404
405         return 0;
406 }
407
408 int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
409                                 u16 *pkey)
410 {
411         if (index > 1)
412                 return -EINVAL;
413
414         *pkey = 0xffff;
415         return 0;
416 }
417
418 struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
419                                         struct ib_ucontext *context,
420                                         struct ib_udata *udata)
421 {
422         struct usnic_ib_pd *pd;
423         void *umem_pd;
424
425         usnic_dbg("\n");
426
427         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
428         if (!pd)
429                 return ERR_PTR(-ENOMEM);
430
431         umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
432         if (IS_ERR_OR_NULL(umem_pd)) {
433                 kfree(pd);
434                 return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM);
435         }
436
437         usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
438                         pd, context, ibdev->name);
439         return &pd->ibpd;
440 }
441
442 int usnic_ib_dealloc_pd(struct ib_pd *pd)
443 {
444         usnic_info("freeing domain 0x%p\n", pd);
445
446         usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd);
447         kfree(pd);
448         return 0;
449 }
450
451 struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
452                                         struct ib_qp_init_attr *init_attr,
453                                         struct ib_udata *udata)
454 {
455         int err;
456         struct usnic_ib_dev *us_ibdev;
457         struct usnic_ib_qp_grp *qp_grp;
458         struct usnic_ib_ucontext *ucontext;
459         int cq_cnt;
460         struct usnic_vnic_res_spec res_spec;
461         struct usnic_ib_create_qp_cmd cmd;
462         struct usnic_transport_spec trans_spec;
463
464         usnic_dbg("\n");
465
466         ucontext = to_uucontext(pd->uobject->context);
467         us_ibdev = to_usdev(pd->device);
468
469         if (init_attr->create_flags)
470                 return ERR_PTR(-EINVAL);
471
472         err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
473         if (err) {
474                 usnic_err("%s: cannot copy udata for create_qp\n",
475                                 us_ibdev->ib_dev.name);
476                 return ERR_PTR(-EINVAL);
477         }
478
479         err = create_qp_validate_user_data(cmd);
480         if (err) {
481                 usnic_err("%s: Failed to validate user data\n",
482                                 us_ibdev->ib_dev.name);
483                 return ERR_PTR(-EINVAL);
484         }
485
486         if (init_attr->qp_type != IB_QPT_UD) {
487                 usnic_err("%s asked to make a non-UD QP: %d\n",
488                                 us_ibdev->ib_dev.name, init_attr->qp_type);
489                 return ERR_PTR(-EINVAL);
490         }
491
492         trans_spec = cmd.spec;
493         mutex_lock(&us_ibdev->usdev_lock);
494         cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2;
495         res_spec = min_transport_spec[trans_spec.trans_type];
496         usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt);
497         qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd),
498                                                 &trans_spec,
499                                                 &res_spec);
500         if (IS_ERR_OR_NULL(qp_grp)) {
501                 err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM;
502                 goto out_release_mutex;
503         }
504
505         err = usnic_ib_fill_create_qp_resp(qp_grp, udata);
506         if (err) {
507                 err = -EBUSY;
508                 goto out_release_qp_grp;
509         }
510
511         qp_grp->ctx = ucontext;
512         list_add_tail(&qp_grp->link, &ucontext->qp_grp_list);
513         usnic_ib_log_vf(qp_grp->vf);
514         mutex_unlock(&us_ibdev->usdev_lock);
515         return &qp_grp->ibqp;
516
517 out_release_qp_grp:
518         qp_grp_destroy(qp_grp);
519 out_release_mutex:
520         mutex_unlock(&us_ibdev->usdev_lock);
521         return ERR_PTR(err);
522 }
523
524 int usnic_ib_destroy_qp(struct ib_qp *qp)
525 {
526         struct usnic_ib_qp_grp *qp_grp;
527         struct usnic_ib_vf *vf;
528
529         usnic_dbg("\n");
530
531         qp_grp = to_uqp_grp(qp);
532         vf = qp_grp->vf;
533         mutex_lock(&vf->pf->usdev_lock);
534         if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) {
535                 usnic_err("Failed to move qp grp %u to reset\n",
536                                 qp_grp->grp_id);
537         }
538
539         list_del(&qp_grp->link);
540         qp_grp_destroy(qp_grp);
541         mutex_unlock(&vf->pf->usdev_lock);
542
543         return 0;
544 }
545
546 int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
547                                 int attr_mask, struct ib_udata *udata)
548 {
549         struct usnic_ib_qp_grp *qp_grp;
550         int status;
551         usnic_dbg("\n");
552
553         qp_grp = to_uqp_grp(ibqp);
554
555         /* TODO: Future Support All States */
556         mutex_lock(&qp_grp->vf->pf->usdev_lock);
557         if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT) {
558                 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_INIT, NULL);
559         } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTR) {
560                 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTR, NULL);
561         } else if ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_RTS) {
562                 status = usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RTS, NULL);
563         } else {
564                 usnic_err("Unexpected combination mask: %u state: %u\n",
565                                 attr_mask & IB_QP_STATE, attr->qp_state);
566                 status = -EINVAL;
567         }
568
569         mutex_unlock(&qp_grp->vf->pf->usdev_lock);
570         return status;
571 }
572
573 struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, int entries,
574                                         int vector, struct ib_ucontext *context,
575                                         struct ib_udata *udata)
576 {
577         struct ib_cq *cq;
578
579         usnic_dbg("\n");
580         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
581         if (!cq)
582                 return ERR_PTR(-EBUSY);
583
584         return cq;
585 }
586
587 int usnic_ib_destroy_cq(struct ib_cq *cq)
588 {
589         usnic_dbg("\n");
590         kfree(cq);
591         return 0;
592 }
593
594 struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length,
595                                         u64 virt_addr, int access_flags,
596                                         struct ib_udata *udata)
597 {
598         struct usnic_ib_mr *mr;
599         int err;
600
601         usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start,
602                         virt_addr, length);
603
604         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
605         if (IS_ERR_OR_NULL(mr))
606                 return ERR_PTR(mr ? PTR_ERR(mr) : -ENOMEM);
607
608         mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length,
609                                         access_flags, 0);
610         if (IS_ERR_OR_NULL(mr->umem)) {
611                 err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT;
612                 goto err_free;
613         }
614
615         mr->ibmr.lkey = mr->ibmr.rkey = 0;
616         return &mr->ibmr;
617
618 err_free:
619         kfree(mr);
620         return ERR_PTR(err);
621 }
622
623 int usnic_ib_dereg_mr(struct ib_mr *ibmr)
624 {
625         struct usnic_ib_mr *mr = to_umr(ibmr);
626
627         usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length);
628
629         usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing);
630         kfree(mr);
631         return 0;
632 }
633
634 struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev,
635                                                         struct ib_udata *udata)
636 {
637         struct usnic_ib_ucontext *context;
638         struct usnic_ib_dev *us_ibdev = to_usdev(ibdev);
639         usnic_dbg("\n");
640
641         context = kmalloc(sizeof(*context), GFP_KERNEL);
642         if (!context)
643                 return ERR_PTR(-ENOMEM);
644
645         INIT_LIST_HEAD(&context->qp_grp_list);
646         mutex_lock(&us_ibdev->usdev_lock);
647         list_add_tail(&context->link, &us_ibdev->ctx_list);
648         mutex_unlock(&us_ibdev->usdev_lock);
649
650         return &context->ibucontext;
651 }
652
653 int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
654 {
655         struct usnic_ib_ucontext *context = to_uucontext(ibcontext);
656         struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device);
657         usnic_dbg("\n");
658
659         mutex_lock(&us_ibdev->usdev_lock);
660         BUG_ON(!list_empty(&context->qp_grp_list));
661         list_del(&context->link);
662         mutex_unlock(&us_ibdev->usdev_lock);
663         kfree(context);
664         return 0;
665 }
666
667 int usnic_ib_mmap(struct ib_ucontext *context,
668                                 struct vm_area_struct *vma)
669 {
670         struct usnic_ib_ucontext *uctx = to_ucontext(context);
671         struct usnic_ib_dev *us_ibdev;
672         struct usnic_ib_qp_grp *qp_grp;
673         struct usnic_ib_vf *vf;
674         struct vnic_dev_bar *bar;
675         dma_addr_t bus_addr;
676         unsigned int len;
677         unsigned int vfid;
678
679         usnic_dbg("\n");
680
681         us_ibdev = to_usdev(context->device);
682         vma->vm_flags |= VM_IO;
683         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
684         vfid = vma->vm_pgoff;
685         usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
686                         vma->vm_pgoff, PAGE_SHIFT, vfid);
687
688         mutex_lock(&us_ibdev->usdev_lock);
689         list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) {
690                 vf = qp_grp->vf;
691                 if (usnic_vnic_get_index(vf->vnic) == vfid) {
692                         bar = usnic_vnic_get_bar(vf->vnic, 0);
693                         if ((vma->vm_end - vma->vm_start) != bar->len) {
694                                 usnic_err("Bar0 Len %lu - Request map %lu\n",
695                                                 bar->len,
696                                                 vma->vm_end - vma->vm_start);
697                                 mutex_unlock(&us_ibdev->usdev_lock);
698                                 return -EINVAL;
699                         }
700                         bus_addr = bar->bus_addr;
701                         len = bar->len;
702                         usnic_dbg("bus: %pa vaddr: %p size: %ld\n",
703                                         &bus_addr, bar->vaddr, bar->len);
704                         mutex_unlock(&us_ibdev->usdev_lock);
705
706                         return remap_pfn_range(vma,
707                                                 vma->vm_start,
708                                                 bus_addr >> PAGE_SHIFT,
709                                                 len, vma->vm_page_prot);
710                 }
711         }
712
713         mutex_unlock(&us_ibdev->usdev_lock);
714         usnic_err("No VF %u found\n", vfid);
715         return -EINVAL;
716 }
717
718 /* In ib callbacks section -  Start of stub funcs */
719 struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd,
720                                         struct ib_ah_attr *ah_attr)
721 {
722         usnic_dbg("\n");
723         return ERR_PTR(-EPERM);
724 }
725
726 int usnic_ib_destroy_ah(struct ib_ah *ah)
727 {
728         usnic_dbg("\n");
729         return -EINVAL;
730 }
731
732 int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
733                                 struct ib_send_wr **bad_wr)
734 {
735         usnic_dbg("\n");
736         return -EINVAL;
737 }
738
739 int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
740                                 struct ib_recv_wr **bad_wr)
741 {
742         usnic_dbg("\n");
743         return -EINVAL;
744 }
745
746 int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries,
747                                 struct ib_wc *wc)
748 {
749         usnic_dbg("\n");
750         return -EINVAL;
751 }
752
753 int usnic_ib_req_notify_cq(struct ib_cq *cq,
754                                         enum ib_cq_notify_flags flags)
755 {
756         usnic_dbg("\n");
757         return -EINVAL;
758 }
759
760 struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc)
761 {
762         usnic_dbg("\n");
763         return ERR_PTR(-ENOMEM);
764 }
765
766
767 /* In ib callbacks section - End of stub funcs */
768 /* End of ib callbacks section */