Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / infiniband / hw / cxgb4 / provider.c
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/device.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/delay.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/spinlock.h>
41 #include <linux/ethtool.h>
42 #include <linux/rtnetlink.h>
43 #include <linux/inetdevice.h>
44 #include <linux/io.h>
45
46 #include <asm/irq.h>
47 #include <asm/byteorder.h>
48
49 #include <rdma/iw_cm.h>
50 #include <rdma/ib_verbs.h>
51 #include <rdma/ib_smi.h>
52 #include <rdma/ib_umem.h>
53 #include <rdma/ib_user_verbs.h>
54
55 #include "iw_cxgb4.h"
56
57 static int fastreg_support = 1;
58 module_param(fastreg_support, int, 0644);
59 MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
60
61 static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
62                                     struct ib_ah_attr *ah_attr)
63 {
64         return ERR_PTR(-ENOSYS);
65 }
66
67 static int c4iw_ah_destroy(struct ib_ah *ah)
68 {
69         return -ENOSYS;
70 }
71
72 static int c4iw_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
73 {
74         return -ENOSYS;
75 }
76
77 static int c4iw_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
78 {
79         return -ENOSYS;
80 }
81
82 static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
83                             u8 port_num, struct ib_wc *in_wc,
84                             struct ib_grh *in_grh, struct ib_mad *in_mad,
85                             struct ib_mad *out_mad)
86 {
87         return -ENOSYS;
88 }
89
90 static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
91 {
92         struct c4iw_dev *rhp = to_c4iw_dev(context->device);
93         struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
94         struct c4iw_mm_entry *mm, *tmp;
95
96         PDBG("%s context %p\n", __func__, context);
97         list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
98                 kfree(mm);
99         c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
100         kfree(ucontext);
101         return 0;
102 }
103
104 static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
105                                                struct ib_udata *udata)
106 {
107         struct c4iw_ucontext *context;
108         struct c4iw_dev *rhp = to_c4iw_dev(ibdev);
109         static int warned;
110         struct c4iw_alloc_ucontext_resp uresp;
111         int ret = 0;
112         struct c4iw_mm_entry *mm = NULL;
113
114         PDBG("%s ibdev %p\n", __func__, ibdev);
115         context = kzalloc(sizeof(*context), GFP_KERNEL);
116         if (!context) {
117                 ret = -ENOMEM;
118                 goto err;
119         }
120
121         c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
122         INIT_LIST_HEAD(&context->mmaps);
123         spin_lock_init(&context->mmap_lock);
124
125         if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
126                 if (!warned++)
127                         pr_err(MOD "Warning - downlevel libcxgb4 (non-fatal), device status page disabled.");
128                 rhp->rdev.flags |= T4_STATUS_PAGE_DISABLED;
129         } else {
130                 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
131                 if (!mm) {
132                         ret = -ENOMEM;
133                         goto err_free;
134                 }
135
136                 uresp.status_page_size = PAGE_SIZE;
137
138                 spin_lock(&context->mmap_lock);
139                 uresp.status_page_key = context->key;
140                 context->key += PAGE_SIZE;
141                 spin_unlock(&context->mmap_lock);
142
143                 ret = ib_copy_to_udata(udata, &uresp,
144                                        sizeof(uresp) - sizeof(uresp.reserved));
145                 if (ret)
146                         goto err_mm;
147
148                 mm->key = uresp.status_page_key;
149                 mm->addr = virt_to_phys(rhp->rdev.status_page);
150                 mm->len = PAGE_SIZE;
151                 insert_mmap(context, mm);
152         }
153         return &context->ibucontext;
154 err_mm:
155         kfree(mm);
156 err_free:
157         kfree(context);
158 err:
159         return ERR_PTR(ret);
160 }
161
162 static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
163 {
164         int len = vma->vm_end - vma->vm_start;
165         u32 key = vma->vm_pgoff << PAGE_SHIFT;
166         struct c4iw_rdev *rdev;
167         int ret = 0;
168         struct c4iw_mm_entry *mm;
169         struct c4iw_ucontext *ucontext;
170         u64 addr;
171
172         PDBG("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
173              key, len);
174
175         if (vma->vm_start & (PAGE_SIZE-1))
176                 return -EINVAL;
177
178         rdev = &(to_c4iw_dev(context->device)->rdev);
179         ucontext = to_c4iw_ucontext(context);
180
181         mm = remove_mmap(ucontext, key, len);
182         if (!mm)
183                 return -EINVAL;
184         addr = mm->addr;
185         kfree(mm);
186
187         if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) &&
188             (addr < (pci_resource_start(rdev->lldi.pdev, 0) +
189                     pci_resource_len(rdev->lldi.pdev, 0)))) {
190
191                 /*
192                  * MA_SYNC register...
193                  */
194                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
195                 ret = io_remap_pfn_range(vma, vma->vm_start,
196                                          addr >> PAGE_SHIFT,
197                                          len, vma->vm_page_prot);
198         } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) &&
199                    (addr < (pci_resource_start(rdev->lldi.pdev, 2) +
200                     pci_resource_len(rdev->lldi.pdev, 2)))) {
201
202                 /*
203                  * Map user DB or OCQP memory...
204                  */
205                 if (addr >= rdev->oc_mw_pa)
206                         vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
207                 else {
208                         if (is_t5(rdev->lldi.adapter_type))
209                                 vma->vm_page_prot =
210                                         t4_pgprot_wc(vma->vm_page_prot);
211                         else
212                                 vma->vm_page_prot =
213                                         pgprot_noncached(vma->vm_page_prot);
214                 }
215                 ret = io_remap_pfn_range(vma, vma->vm_start,
216                                          addr >> PAGE_SHIFT,
217                                          len, vma->vm_page_prot);
218         } else {
219
220                 /*
221                  * Map WQ or CQ contig dma memory...
222                  */
223                 ret = remap_pfn_range(vma, vma->vm_start,
224                                       addr >> PAGE_SHIFT,
225                                       len, vma->vm_page_prot);
226         }
227
228         return ret;
229 }
230
231 static int c4iw_deallocate_pd(struct ib_pd *pd)
232 {
233         struct c4iw_dev *rhp;
234         struct c4iw_pd *php;
235
236         php = to_c4iw_pd(pd);
237         rhp = php->rhp;
238         PDBG("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
239         c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
240         mutex_lock(&rhp->rdev.stats.lock);
241         rhp->rdev.stats.pd.cur--;
242         mutex_unlock(&rhp->rdev.stats.lock);
243         kfree(php);
244         return 0;
245 }
246
247 static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
248                                       struct ib_ucontext *context,
249                                       struct ib_udata *udata)
250 {
251         struct c4iw_pd *php;
252         u32 pdid;
253         struct c4iw_dev *rhp;
254
255         PDBG("%s ibdev %p\n", __func__, ibdev);
256         rhp = (struct c4iw_dev *) ibdev;
257         pdid =  c4iw_get_resource(&rhp->rdev.resource.pdid_table);
258         if (!pdid)
259                 return ERR_PTR(-EINVAL);
260         php = kzalloc(sizeof(*php), GFP_KERNEL);
261         if (!php) {
262                 c4iw_put_resource(&rhp->rdev.resource.pdid_table, pdid);
263                 return ERR_PTR(-ENOMEM);
264         }
265         php->pdid = pdid;
266         php->rhp = rhp;
267         if (context) {
268                 if (ib_copy_to_udata(udata, &php->pdid, sizeof(u32))) {
269                         c4iw_deallocate_pd(&php->ibpd);
270                         return ERR_PTR(-EFAULT);
271                 }
272         }
273         mutex_lock(&rhp->rdev.stats.lock);
274         rhp->rdev.stats.pd.cur++;
275         if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
276                 rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
277         mutex_unlock(&rhp->rdev.stats.lock);
278         PDBG("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
279         return &php->ibpd;
280 }
281
282 static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
283                            u16 *pkey)
284 {
285         PDBG("%s ibdev %p\n", __func__, ibdev);
286         *pkey = 0;
287         return 0;
288 }
289
290 static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
291                           union ib_gid *gid)
292 {
293         struct c4iw_dev *dev;
294
295         PDBG("%s ibdev %p, port %d, index %d, gid %p\n",
296                __func__, ibdev, port, index, gid);
297         dev = to_c4iw_dev(ibdev);
298         BUG_ON(port == 0);
299         memset(&(gid->raw[0]), 0, sizeof(gid->raw));
300         memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
301         return 0;
302 }
303
304 static int c4iw_query_device(struct ib_device *ibdev,
305                              struct ib_device_attr *props)
306 {
307
308         struct c4iw_dev *dev;
309         PDBG("%s ibdev %p\n", __func__, ibdev);
310
311         dev = to_c4iw_dev(ibdev);
312         memset(props, 0, sizeof *props);
313         memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
314         props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
315         props->fw_ver = dev->rdev.lldi.fw_vers;
316         props->device_cap_flags = dev->device_cap_flags;
317         props->page_size_cap = T4_PAGESIZE_MASK;
318         props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
319         props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
320         props->max_mr_size = T4_MAX_MR_SIZE;
321         props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
322         props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
323         props->max_sge = T4_MAX_RECV_SGE;
324         props->max_sge_rd = 1;
325         props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
326         props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
327                                     c4iw_max_read_depth);
328         props->max_qp_init_rd_atom = props->max_qp_rd_atom;
329         props->max_cq = dev->rdev.lldi.vr->qp.size;
330         props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
331         props->max_mr = c4iw_num_stags(&dev->rdev);
332         props->max_pd = T4_MAX_NUM_PD;
333         props->local_ca_ack_delay = 0;
334         props->max_fast_reg_page_list_len = t4_max_fr_depth(use_dsgl);
335
336         return 0;
337 }
338
339 static int c4iw_query_port(struct ib_device *ibdev, u8 port,
340                            struct ib_port_attr *props)
341 {
342         struct c4iw_dev *dev;
343         struct net_device *netdev;
344         struct in_device *inetdev;
345
346         PDBG("%s ibdev %p\n", __func__, ibdev);
347
348         dev = to_c4iw_dev(ibdev);
349         netdev = dev->rdev.lldi.ports[port-1];
350
351         memset(props, 0, sizeof(struct ib_port_attr));
352         props->max_mtu = IB_MTU_4096;
353         if (netdev->mtu >= 4096)
354                 props->active_mtu = IB_MTU_4096;
355         else if (netdev->mtu >= 2048)
356                 props->active_mtu = IB_MTU_2048;
357         else if (netdev->mtu >= 1024)
358                 props->active_mtu = IB_MTU_1024;
359         else if (netdev->mtu >= 512)
360                 props->active_mtu = IB_MTU_512;
361         else
362                 props->active_mtu = IB_MTU_256;
363
364         if (!netif_carrier_ok(netdev))
365                 props->state = IB_PORT_DOWN;
366         else {
367                 inetdev = in_dev_get(netdev);
368                 if (inetdev) {
369                         if (inetdev->ifa_list)
370                                 props->state = IB_PORT_ACTIVE;
371                         else
372                                 props->state = IB_PORT_INIT;
373                         in_dev_put(inetdev);
374                 } else
375                         props->state = IB_PORT_INIT;
376         }
377
378         props->port_cap_flags =
379             IB_PORT_CM_SUP |
380             IB_PORT_SNMP_TUNNEL_SUP |
381             IB_PORT_REINIT_SUP |
382             IB_PORT_DEVICE_MGMT_SUP |
383             IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
384         props->gid_tbl_len = 1;
385         props->pkey_tbl_len = 1;
386         props->active_width = 2;
387         props->active_speed = IB_SPEED_DDR;
388         props->max_msg_sz = -1;
389
390         return 0;
391 }
392
393 static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
394                         char *buf)
395 {
396         struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
397                                                  ibdev.dev);
398         PDBG("%s dev 0x%p\n", __func__, dev);
399         return sprintf(buf, "%d\n",
400                        CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
401 }
402
403 static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
404                            char *buf)
405 {
406         struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
407                                                  ibdev.dev);
408         PDBG("%s dev 0x%p\n", __func__, dev);
409
410         return sprintf(buf, "%u.%u.%u.%u\n",
411                         FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
412                         FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
413                         FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
414                         FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
415 }
416
417 static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
418                         char *buf)
419 {
420         struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
421                                                  ibdev.dev);
422         struct ethtool_drvinfo info;
423         struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
424
425         PDBG("%s dev 0x%p\n", __func__, dev);
426         lldev->ethtool_ops->get_drvinfo(lldev, &info);
427         return sprintf(buf, "%s\n", info.driver);
428 }
429
430 static ssize_t show_board(struct device *dev, struct device_attribute *attr,
431                           char *buf)
432 {
433         struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
434                                                  ibdev.dev);
435         PDBG("%s dev 0x%p\n", __func__, dev);
436         return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
437                        c4iw_dev->rdev.lldi.pdev->device);
438 }
439
440 static int c4iw_get_mib(struct ib_device *ibdev,
441                         union rdma_protocol_stats *stats)
442 {
443         struct tp_tcp_stats v4, v6;
444         struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
445
446         cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
447         memset(stats, 0, sizeof *stats);
448         stats->iw.tcpInSegs = v4.tcpInSegs + v6.tcpInSegs;
449         stats->iw.tcpOutSegs = v4.tcpOutSegs + v6.tcpOutSegs;
450         stats->iw.tcpRetransSegs = v4.tcpRetransSegs + v6.tcpRetransSegs;
451         stats->iw.tcpOutRsts = v4.tcpOutRsts + v6.tcpOutSegs;
452
453         return 0;
454 }
455
456 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
457 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
458 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
459 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
460
461 static struct device_attribute *c4iw_class_attributes[] = {
462         &dev_attr_hw_rev,
463         &dev_attr_fw_ver,
464         &dev_attr_hca_type,
465         &dev_attr_board_id,
466 };
467
468 int c4iw_register_device(struct c4iw_dev *dev)
469 {
470         int ret;
471         int i;
472
473         PDBG("%s c4iw_dev %p\n", __func__, dev);
474         BUG_ON(!dev->rdev.lldi.ports[0]);
475         strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
476         memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
477         memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
478         dev->ibdev.owner = THIS_MODULE;
479         dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
480         if (fastreg_support)
481                 dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
482         dev->ibdev.local_dma_lkey = 0;
483         dev->ibdev.uverbs_cmd_mask =
484             (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
485             (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
486             (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
487             (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
488             (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
489             (1ull << IB_USER_VERBS_CMD_REG_MR) |
490             (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
491             (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
492             (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
493             (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
494             (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
495             (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
496             (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
497             (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
498             (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
499             (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
500             (1ull << IB_USER_VERBS_CMD_POST_SEND) |
501             (1ull << IB_USER_VERBS_CMD_POST_RECV);
502         dev->ibdev.node_type = RDMA_NODE_RNIC;
503         memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
504         dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
505         dev->ibdev.num_comp_vectors =  dev->rdev.lldi.nciq;
506         dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
507         dev->ibdev.query_device = c4iw_query_device;
508         dev->ibdev.query_port = c4iw_query_port;
509         dev->ibdev.query_pkey = c4iw_query_pkey;
510         dev->ibdev.query_gid = c4iw_query_gid;
511         dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
512         dev->ibdev.dealloc_ucontext = c4iw_dealloc_ucontext;
513         dev->ibdev.mmap = c4iw_mmap;
514         dev->ibdev.alloc_pd = c4iw_allocate_pd;
515         dev->ibdev.dealloc_pd = c4iw_deallocate_pd;
516         dev->ibdev.create_ah = c4iw_ah_create;
517         dev->ibdev.destroy_ah = c4iw_ah_destroy;
518         dev->ibdev.create_qp = c4iw_create_qp;
519         dev->ibdev.modify_qp = c4iw_ib_modify_qp;
520         dev->ibdev.query_qp = c4iw_ib_query_qp;
521         dev->ibdev.destroy_qp = c4iw_destroy_qp;
522         dev->ibdev.create_cq = c4iw_create_cq;
523         dev->ibdev.destroy_cq = c4iw_destroy_cq;
524         dev->ibdev.resize_cq = c4iw_resize_cq;
525         dev->ibdev.poll_cq = c4iw_poll_cq;
526         dev->ibdev.get_dma_mr = c4iw_get_dma_mr;
527         dev->ibdev.reg_phys_mr = c4iw_register_phys_mem;
528         dev->ibdev.rereg_phys_mr = c4iw_reregister_phys_mem;
529         dev->ibdev.reg_user_mr = c4iw_reg_user_mr;
530         dev->ibdev.dereg_mr = c4iw_dereg_mr;
531         dev->ibdev.alloc_mw = c4iw_alloc_mw;
532         dev->ibdev.bind_mw = c4iw_bind_mw;
533         dev->ibdev.dealloc_mw = c4iw_dealloc_mw;
534         dev->ibdev.alloc_fast_reg_mr = c4iw_alloc_fast_reg_mr;
535         dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl;
536         dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl;
537         dev->ibdev.attach_mcast = c4iw_multicast_attach;
538         dev->ibdev.detach_mcast = c4iw_multicast_detach;
539         dev->ibdev.process_mad = c4iw_process_mad;
540         dev->ibdev.req_notify_cq = c4iw_arm_cq;
541         dev->ibdev.post_send = c4iw_post_send;
542         dev->ibdev.post_recv = c4iw_post_receive;
543         dev->ibdev.get_protocol_stats = c4iw_get_mib;
544         dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
545
546         dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
547         if (!dev->ibdev.iwcm)
548                 return -ENOMEM;
549
550         dev->ibdev.iwcm->connect = c4iw_connect;
551         dev->ibdev.iwcm->accept = c4iw_accept_cr;
552         dev->ibdev.iwcm->reject = c4iw_reject_cr;
553         dev->ibdev.iwcm->create_listen = c4iw_create_listen;
554         dev->ibdev.iwcm->destroy_listen = c4iw_destroy_listen;
555         dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
556         dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
557         dev->ibdev.iwcm->get_qp = c4iw_get_qp;
558
559         ret = ib_register_device(&dev->ibdev, NULL);
560         if (ret)
561                 goto bail1;
562
563         for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
564                 ret = device_create_file(&dev->ibdev.dev,
565                                          c4iw_class_attributes[i]);
566                 if (ret)
567                         goto bail2;
568         }
569         return 0;
570 bail2:
571         ib_unregister_device(&dev->ibdev);
572 bail1:
573         kfree(dev->ibdev.iwcm);
574         return ret;
575 }
576
577 void c4iw_unregister_device(struct c4iw_dev *dev)
578 {
579         int i;
580
581         PDBG("%s c4iw_dev %p\n", __func__, dev);
582         for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
583                 device_remove_file(&dev->ibdev.dev,
584                                    c4iw_class_attributes[i]);
585         ib_unregister_device(&dev->ibdev);
586         kfree(dev->ibdev.iwcm);
587         return;
588 }