These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / infiniband / core / mad_rmpp.c
1 /*
2  * Copyright (c) 2005 Intel Inc. All rights reserved.
3  * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved.
4  * Copyright (c) 2014 Intel Corporation.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <linux/slab.h>
36
37 #include "mad_priv.h"
38 #include "mad_rmpp.h"
39
40 enum rmpp_state {
41         RMPP_STATE_ACTIVE,
42         RMPP_STATE_TIMEOUT,
43         RMPP_STATE_COMPLETE,
44         RMPP_STATE_CANCELING
45 };
46
47 struct mad_rmpp_recv {
48         struct ib_mad_agent_private *agent;
49         struct list_head list;
50         struct delayed_work timeout_work;
51         struct delayed_work cleanup_work;
52         struct completion comp;
53         enum rmpp_state state;
54         spinlock_t lock;
55         atomic_t refcount;
56
57         struct ib_ah *ah;
58         struct ib_mad_recv_wc *rmpp_wc;
59         struct ib_mad_recv_buf *cur_seg_buf;
60         int last_ack;
61         int seg_num;
62         int newwin;
63         int repwin;
64
65         __be64 tid;
66         u32 src_qp;
67         u16 slid;
68         u8 mgmt_class;
69         u8 class_version;
70         u8 method;
71         u8 base_version;
72 };
73
74 static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
75 {
76         if (atomic_dec_and_test(&rmpp_recv->refcount))
77                 complete(&rmpp_recv->comp);
78 }
79
80 static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv)
81 {
82         deref_rmpp_recv(rmpp_recv);
83         wait_for_completion(&rmpp_recv->comp);
84         ib_destroy_ah(rmpp_recv->ah);
85         kfree(rmpp_recv);
86 }
87
88 void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
89 {
90         struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv;
91         unsigned long flags;
92
93         spin_lock_irqsave(&agent->lock, flags);
94         list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
95                 if (rmpp_recv->state != RMPP_STATE_COMPLETE)
96                         ib_free_recv_mad(rmpp_recv->rmpp_wc);
97                 rmpp_recv->state = RMPP_STATE_CANCELING;
98         }
99         spin_unlock_irqrestore(&agent->lock, flags);
100
101         list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
102                 cancel_delayed_work(&rmpp_recv->timeout_work);
103                 cancel_delayed_work(&rmpp_recv->cleanup_work);
104         }
105
106         flush_workqueue(agent->qp_info->port_priv->wq);
107
108         list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
109                                  &agent->rmpp_list, list) {
110                 list_del(&rmpp_recv->list);
111                 destroy_rmpp_recv(rmpp_recv);
112         }
113 }
114
115 static void format_ack(struct ib_mad_send_buf *msg,
116                        struct ib_rmpp_mad *data,
117                        struct mad_rmpp_recv *rmpp_recv)
118 {
119         struct ib_rmpp_mad *ack = msg->mad;
120         unsigned long flags;
121
122         memcpy(ack, &data->mad_hdr, msg->hdr_len);
123
124         ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
125         ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK;
126         ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
127
128         spin_lock_irqsave(&rmpp_recv->lock, flags);
129         rmpp_recv->last_ack = rmpp_recv->seg_num;
130         ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num);
131         ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin);
132         spin_unlock_irqrestore(&rmpp_recv->lock, flags);
133 }
134
135 static void ack_recv(struct mad_rmpp_recv *rmpp_recv,
136                      struct ib_mad_recv_wc *recv_wc)
137 {
138         struct ib_mad_send_buf *msg;
139         int ret, hdr_len;
140
141         hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
142         msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp,
143                                  recv_wc->wc->pkey_index, 1, hdr_len,
144                                  0, GFP_KERNEL,
145                                  IB_MGMT_BASE_VERSION);
146         if (IS_ERR(msg))
147                 return;
148
149         format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv);
150         msg->ah = rmpp_recv->ah;
151         ret = ib_post_send_mad(msg, NULL);
152         if (ret)
153                 ib_free_send_mad(msg);
154 }
155
156 static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent,
157                                                   struct ib_mad_recv_wc *recv_wc)
158 {
159         struct ib_mad_send_buf *msg;
160         struct ib_ah *ah;
161         int hdr_len;
162
163         ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc,
164                                   recv_wc->recv_buf.grh, agent->port_num);
165         if (IS_ERR(ah))
166                 return (void *) ah;
167
168         hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class);
169         msg = ib_create_send_mad(agent, recv_wc->wc->src_qp,
170                                  recv_wc->wc->pkey_index, 1,
171                                  hdr_len, 0, GFP_KERNEL,
172                                  IB_MGMT_BASE_VERSION);
173         if (IS_ERR(msg))
174                 ib_destroy_ah(ah);
175         else {
176                 msg->ah = ah;
177                 msg->context[0] = ah;
178         }
179
180         return msg;
181 }
182
183 static void ack_ds_ack(struct ib_mad_agent_private *agent,
184                        struct ib_mad_recv_wc *recv_wc)
185 {
186         struct ib_mad_send_buf *msg;
187         struct ib_rmpp_mad *rmpp_mad;
188         int ret;
189
190         msg = alloc_response_msg(&agent->agent, recv_wc);
191         if (IS_ERR(msg))
192                 return;
193
194         rmpp_mad = msg->mad;
195         memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
196
197         rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
198         ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
199         rmpp_mad->rmpp_hdr.seg_num = 0;
200         rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(1);
201
202         ret = ib_post_send_mad(msg, NULL);
203         if (ret) {
204                 ib_destroy_ah(msg->ah);
205                 ib_free_send_mad(msg);
206         }
207 }
208
209 void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc)
210 {
211         if (mad_send_wc->send_buf->context[0] == mad_send_wc->send_buf->ah)
212                 ib_destroy_ah(mad_send_wc->send_buf->ah);
213         ib_free_send_mad(mad_send_wc->send_buf);
214 }
215
216 static void nack_recv(struct ib_mad_agent_private *agent,
217                       struct ib_mad_recv_wc *recv_wc, u8 rmpp_status)
218 {
219         struct ib_mad_send_buf *msg;
220         struct ib_rmpp_mad *rmpp_mad;
221         int ret;
222
223         msg = alloc_response_msg(&agent->agent, recv_wc);
224         if (IS_ERR(msg))
225                 return;
226
227         rmpp_mad = msg->mad;
228         memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len);
229
230         rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP;
231         rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION;
232         rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT;
233         ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
234         rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status;
235         rmpp_mad->rmpp_hdr.seg_num = 0;
236         rmpp_mad->rmpp_hdr.paylen_newwin = 0;
237
238         ret = ib_post_send_mad(msg, NULL);
239         if (ret) {
240                 ib_destroy_ah(msg->ah);
241                 ib_free_send_mad(msg);
242         }
243 }
244
245 static void recv_timeout_handler(struct work_struct *work)
246 {
247         struct mad_rmpp_recv *rmpp_recv =
248                 container_of(work, struct mad_rmpp_recv, timeout_work.work);
249         struct ib_mad_recv_wc *rmpp_wc;
250         unsigned long flags;
251
252         spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
253         if (rmpp_recv->state != RMPP_STATE_ACTIVE) {
254                 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
255                 return;
256         }
257         rmpp_recv->state = RMPP_STATE_TIMEOUT;
258         list_del(&rmpp_recv->list);
259         spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
260
261         rmpp_wc = rmpp_recv->rmpp_wc;
262         nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L);
263         destroy_rmpp_recv(rmpp_recv);
264         ib_free_recv_mad(rmpp_wc);
265 }
266
267 static void recv_cleanup_handler(struct work_struct *work)
268 {
269         struct mad_rmpp_recv *rmpp_recv =
270                 container_of(work, struct mad_rmpp_recv, cleanup_work.work);
271         unsigned long flags;
272
273         spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
274         if (rmpp_recv->state == RMPP_STATE_CANCELING) {
275                 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
276                 return;
277         }
278         list_del(&rmpp_recv->list);
279         spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
280         destroy_rmpp_recv(rmpp_recv);
281 }
282
283 static struct mad_rmpp_recv *
284 create_rmpp_recv(struct ib_mad_agent_private *agent,
285                  struct ib_mad_recv_wc *mad_recv_wc)
286 {
287         struct mad_rmpp_recv *rmpp_recv;
288         struct ib_mad_hdr *mad_hdr;
289
290         rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL);
291         if (!rmpp_recv)
292                 return NULL;
293
294         rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd,
295                                              mad_recv_wc->wc,
296                                              mad_recv_wc->recv_buf.grh,
297                                              agent->agent.port_num);
298         if (IS_ERR(rmpp_recv->ah))
299                 goto error;
300
301         rmpp_recv->agent = agent;
302         init_completion(&rmpp_recv->comp);
303         INIT_DELAYED_WORK(&rmpp_recv->timeout_work, recv_timeout_handler);
304         INIT_DELAYED_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler);
305         spin_lock_init(&rmpp_recv->lock);
306         rmpp_recv->state = RMPP_STATE_ACTIVE;
307         atomic_set(&rmpp_recv->refcount, 1);
308
309         rmpp_recv->rmpp_wc = mad_recv_wc;
310         rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf;
311         rmpp_recv->newwin = 1;
312         rmpp_recv->seg_num = 1;
313         rmpp_recv->last_ack = 0;
314         rmpp_recv->repwin = 1;
315
316         mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
317         rmpp_recv->tid = mad_hdr->tid;
318         rmpp_recv->src_qp = mad_recv_wc->wc->src_qp;
319         rmpp_recv->slid = mad_recv_wc->wc->slid;
320         rmpp_recv->mgmt_class = mad_hdr->mgmt_class;
321         rmpp_recv->class_version = mad_hdr->class_version;
322         rmpp_recv->method  = mad_hdr->method;
323         rmpp_recv->base_version  = mad_hdr->base_version;
324         return rmpp_recv;
325
326 error:  kfree(rmpp_recv);
327         return NULL;
328 }
329
330 static struct mad_rmpp_recv *
331 find_rmpp_recv(struct ib_mad_agent_private *agent,
332                struct ib_mad_recv_wc *mad_recv_wc)
333 {
334         struct mad_rmpp_recv *rmpp_recv;
335         struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr;
336
337         list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
338                 if (rmpp_recv->tid == mad_hdr->tid &&
339                     rmpp_recv->src_qp == mad_recv_wc->wc->src_qp &&
340                     rmpp_recv->slid == mad_recv_wc->wc->slid &&
341                     rmpp_recv->mgmt_class == mad_hdr->mgmt_class &&
342                     rmpp_recv->class_version == mad_hdr->class_version &&
343                     rmpp_recv->method == mad_hdr->method)
344                         return rmpp_recv;
345         }
346         return NULL;
347 }
348
349 static struct mad_rmpp_recv *
350 acquire_rmpp_recv(struct ib_mad_agent_private *agent,
351                   struct ib_mad_recv_wc *mad_recv_wc)
352 {
353         struct mad_rmpp_recv *rmpp_recv;
354         unsigned long flags;
355
356         spin_lock_irqsave(&agent->lock, flags);
357         rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
358         if (rmpp_recv)
359                 atomic_inc(&rmpp_recv->refcount);
360         spin_unlock_irqrestore(&agent->lock, flags);
361         return rmpp_recv;
362 }
363
364 static struct mad_rmpp_recv *
365 insert_rmpp_recv(struct ib_mad_agent_private *agent,
366                  struct mad_rmpp_recv *rmpp_recv)
367 {
368         struct mad_rmpp_recv *cur_rmpp_recv;
369
370         cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc);
371         if (!cur_rmpp_recv)
372                 list_add_tail(&rmpp_recv->list, &agent->rmpp_list);
373
374         return cur_rmpp_recv;
375 }
376
377 static inline int get_last_flag(struct ib_mad_recv_buf *seg)
378 {
379         struct ib_rmpp_mad *rmpp_mad;
380
381         rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
382         return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST;
383 }
384
385 static inline int get_seg_num(struct ib_mad_recv_buf *seg)
386 {
387         struct ib_rmpp_mad *rmpp_mad;
388
389         rmpp_mad = (struct ib_rmpp_mad *) seg->mad;
390         return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
391 }
392
393 static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list,
394                                                     struct ib_mad_recv_buf *seg)
395 {
396         if (seg->list.next == rmpp_list)
397                 return NULL;
398
399         return container_of(seg->list.next, struct ib_mad_recv_buf, list);
400 }
401
402 static inline int window_size(struct ib_mad_agent_private *agent)
403 {
404         return max(agent->qp_info->recv_queue.max_active >> 3, 1);
405 }
406
407 static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list,
408                                                   int seg_num)
409 {
410         struct ib_mad_recv_buf *seg_buf;
411         int cur_seg_num;
412
413         list_for_each_entry_reverse(seg_buf, rmpp_list, list) {
414                 cur_seg_num = get_seg_num(seg_buf);
415                 if (seg_num > cur_seg_num)
416                         return seg_buf;
417                 if (seg_num == cur_seg_num)
418                         break;
419         }
420         return NULL;
421 }
422
423 static void update_seg_num(struct mad_rmpp_recv *rmpp_recv,
424                            struct ib_mad_recv_buf *new_buf)
425 {
426         struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list;
427
428         while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) {
429                 rmpp_recv->cur_seg_buf = new_buf;
430                 rmpp_recv->seg_num++;
431                 new_buf = get_next_seg(rmpp_list, new_buf);
432         }
433 }
434
435 static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv)
436 {
437         struct ib_rmpp_mad *rmpp_mad;
438         int hdr_size, data_size, pad;
439         bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device,
440                                     rmpp_recv->agent->qp_info->port_priv->port_num);
441
442         rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad;
443
444         hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
445         if (opa && rmpp_recv->base_version == OPA_MGMT_BASE_VERSION) {
446                 data_size = sizeof(struct opa_rmpp_mad) - hdr_size;
447                 pad = OPA_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
448                 if (pad > OPA_MGMT_RMPP_DATA || pad < 0)
449                         pad = 0;
450         } else {
451                 data_size = sizeof(struct ib_rmpp_mad) - hdr_size;
452                 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
453                 if (pad > IB_MGMT_RMPP_DATA || pad < 0)
454                         pad = 0;
455         }
456
457         return hdr_size + rmpp_recv->seg_num * data_size - pad;
458 }
459
460 static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv)
461 {
462         struct ib_mad_recv_wc *rmpp_wc;
463
464         ack_recv(rmpp_recv, rmpp_recv->rmpp_wc);
465         if (rmpp_recv->seg_num > 1)
466                 cancel_delayed_work(&rmpp_recv->timeout_work);
467
468         rmpp_wc = rmpp_recv->rmpp_wc;
469         rmpp_wc->mad_len = get_mad_len(rmpp_recv);
470         /* 10 seconds until we can find the packet lifetime */
471         queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq,
472                            &rmpp_recv->cleanup_work, msecs_to_jiffies(10000));
473         return rmpp_wc;
474 }
475
476 static struct ib_mad_recv_wc *
477 continue_rmpp(struct ib_mad_agent_private *agent,
478               struct ib_mad_recv_wc *mad_recv_wc)
479 {
480         struct mad_rmpp_recv *rmpp_recv;
481         struct ib_mad_recv_buf *prev_buf;
482         struct ib_mad_recv_wc *done_wc;
483         int seg_num;
484         unsigned long flags;
485
486         rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc);
487         if (!rmpp_recv)
488                 goto drop1;
489
490         seg_num = get_seg_num(&mad_recv_wc->recv_buf);
491
492         spin_lock_irqsave(&rmpp_recv->lock, flags);
493         if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) ||
494             (seg_num > rmpp_recv->newwin))
495                 goto drop3;
496
497         if ((seg_num <= rmpp_recv->last_ack) ||
498             (rmpp_recv->state == RMPP_STATE_COMPLETE)) {
499                 spin_unlock_irqrestore(&rmpp_recv->lock, flags);
500                 ack_recv(rmpp_recv, mad_recv_wc);
501                 goto drop2;
502         }
503
504         prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num);
505         if (!prev_buf)
506                 goto drop3;
507
508         done_wc = NULL;
509         list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list);
510         if (rmpp_recv->cur_seg_buf == prev_buf) {
511                 update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf);
512                 if (get_last_flag(rmpp_recv->cur_seg_buf)) {
513                         rmpp_recv->state = RMPP_STATE_COMPLETE;
514                         spin_unlock_irqrestore(&rmpp_recv->lock, flags);
515                         done_wc = complete_rmpp(rmpp_recv);
516                         goto out;
517                 } else if (rmpp_recv->seg_num == rmpp_recv->newwin) {
518                         rmpp_recv->newwin += window_size(agent);
519                         spin_unlock_irqrestore(&rmpp_recv->lock, flags);
520                         ack_recv(rmpp_recv, mad_recv_wc);
521                         goto out;
522                 }
523         }
524         spin_unlock_irqrestore(&rmpp_recv->lock, flags);
525 out:
526         deref_rmpp_recv(rmpp_recv);
527         return done_wc;
528
529 drop3:  spin_unlock_irqrestore(&rmpp_recv->lock, flags);
530 drop2:  deref_rmpp_recv(rmpp_recv);
531 drop1:  ib_free_recv_mad(mad_recv_wc);
532         return NULL;
533 }
534
535 static struct ib_mad_recv_wc *
536 start_rmpp(struct ib_mad_agent_private *agent,
537            struct ib_mad_recv_wc *mad_recv_wc)
538 {
539         struct mad_rmpp_recv *rmpp_recv;
540         unsigned long flags;
541
542         rmpp_recv = create_rmpp_recv(agent, mad_recv_wc);
543         if (!rmpp_recv) {
544                 ib_free_recv_mad(mad_recv_wc);
545                 return NULL;
546         }
547
548         spin_lock_irqsave(&agent->lock, flags);
549         if (insert_rmpp_recv(agent, rmpp_recv)) {
550                 spin_unlock_irqrestore(&agent->lock, flags);
551                 /* duplicate first MAD */
552                 destroy_rmpp_recv(rmpp_recv);
553                 return continue_rmpp(agent, mad_recv_wc);
554         }
555         atomic_inc(&rmpp_recv->refcount);
556
557         if (get_last_flag(&mad_recv_wc->recv_buf)) {
558                 rmpp_recv->state = RMPP_STATE_COMPLETE;
559                 spin_unlock_irqrestore(&agent->lock, flags);
560                 complete_rmpp(rmpp_recv);
561         } else {
562                 spin_unlock_irqrestore(&agent->lock, flags);
563                 /* 40 seconds until we can find the packet lifetimes */
564                 queue_delayed_work(agent->qp_info->port_priv->wq,
565                                    &rmpp_recv->timeout_work,
566                                    msecs_to_jiffies(40000));
567                 rmpp_recv->newwin += window_size(agent);
568                 ack_recv(rmpp_recv, mad_recv_wc);
569                 mad_recv_wc = NULL;
570         }
571         deref_rmpp_recv(rmpp_recv);
572         return mad_recv_wc;
573 }
574
575 static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr)
576 {
577         struct ib_rmpp_mad *rmpp_mad;
578         int timeout;
579         u32 paylen = 0;
580
581         rmpp_mad = mad_send_wr->send_buf.mad;
582         ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
583         rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num);
584
585         if (mad_send_wr->seg_num == 1) {
586                 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST;
587                 paylen = (mad_send_wr->send_buf.seg_count *
588                           mad_send_wr->send_buf.seg_rmpp_size) -
589                           mad_send_wr->pad;
590         }
591
592         if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) {
593                 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST;
594                 paylen = mad_send_wr->send_buf.seg_rmpp_size - mad_send_wr->pad;
595         }
596         rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen);
597
598         /* 2 seconds for an ACK until we can find the packet lifetime */
599         timeout = mad_send_wr->send_buf.timeout_ms;
600         if (!timeout || timeout > 2000)
601                 mad_send_wr->timeout = msecs_to_jiffies(2000);
602
603         return ib_send_mad(mad_send_wr);
604 }
605
606 static void abort_send(struct ib_mad_agent_private *agent,
607                        struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status)
608 {
609         struct ib_mad_send_wr_private *mad_send_wr;
610         struct ib_mad_send_wc wc;
611         unsigned long flags;
612
613         spin_lock_irqsave(&agent->lock, flags);
614         mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
615         if (!mad_send_wr)
616                 goto out;       /* Unmatched send */
617
618         if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
619             (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
620                 goto out;       /* Send is already done */
621
622         ib_mark_mad_done(mad_send_wr);
623         spin_unlock_irqrestore(&agent->lock, flags);
624
625         wc.status = IB_WC_REM_ABORT_ERR;
626         wc.vendor_err = rmpp_status;
627         wc.send_buf = &mad_send_wr->send_buf;
628         ib_mad_complete_send_wr(mad_send_wr, &wc);
629         return;
630 out:
631         spin_unlock_irqrestore(&agent->lock, flags);
632 }
633
634 static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr,
635                                    int seg_num)
636 {
637         struct list_head *list;
638
639         wr->last_ack = seg_num;
640         list = &wr->last_ack_seg->list;
641         list_for_each_entry(wr->last_ack_seg, list, list)
642                 if (wr->last_ack_seg->num == seg_num)
643                         break;
644 }
645
646 static void process_ds_ack(struct ib_mad_agent_private *agent,
647                            struct ib_mad_recv_wc *mad_recv_wc, int newwin)
648 {
649         struct mad_rmpp_recv *rmpp_recv;
650
651         rmpp_recv = find_rmpp_recv(agent, mad_recv_wc);
652         if (rmpp_recv && rmpp_recv->state == RMPP_STATE_COMPLETE)
653                 rmpp_recv->repwin = newwin;
654 }
655
656 static void process_rmpp_ack(struct ib_mad_agent_private *agent,
657                              struct ib_mad_recv_wc *mad_recv_wc)
658 {
659         struct ib_mad_send_wr_private *mad_send_wr;
660         struct ib_rmpp_mad *rmpp_mad;
661         unsigned long flags;
662         int seg_num, newwin, ret;
663
664         rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
665         if (rmpp_mad->rmpp_hdr.rmpp_status) {
666                 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
667                 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
668                 return;
669         }
670
671         seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num);
672         newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin);
673         if (newwin < seg_num) {
674                 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
675                 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S);
676                 return;
677         }
678
679         spin_lock_irqsave(&agent->lock, flags);
680         mad_send_wr = ib_find_send_mad(agent, mad_recv_wc);
681         if (!mad_send_wr) {
682                 if (!seg_num)
683                         process_ds_ack(agent, mad_recv_wc, newwin);
684                 goto out;       /* Unmatched or DS RMPP ACK */
685         }
686
687         if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) &&
688             (mad_send_wr->timeout)) {
689                 spin_unlock_irqrestore(&agent->lock, flags);
690                 ack_ds_ack(agent, mad_recv_wc);
691                 return;         /* Repeated ACK for DS RMPP transaction */
692         }
693
694         if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) ||
695             (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS))
696                 goto out;       /* Send is already done */
697
698         if (seg_num > mad_send_wr->send_buf.seg_count ||
699             seg_num > mad_send_wr->newwin) {
700                 spin_unlock_irqrestore(&agent->lock, flags);
701                 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
702                 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B);
703                 return;
704         }
705
706         if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack)
707                 goto out;       /* Old ACK */
708
709         if (seg_num > mad_send_wr->last_ack) {
710                 adjust_last_ack(mad_send_wr, seg_num);
711                 mad_send_wr->retries_left = mad_send_wr->max_retries;
712         }
713         mad_send_wr->newwin = newwin;
714         if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
715                 /* If no response is expected, the ACK completes the send */
716                 if (!mad_send_wr->send_buf.timeout_ms) {
717                         struct ib_mad_send_wc wc;
718
719                         ib_mark_mad_done(mad_send_wr);
720                         spin_unlock_irqrestore(&agent->lock, flags);
721
722                         wc.status = IB_WC_SUCCESS;
723                         wc.vendor_err = 0;
724                         wc.send_buf = &mad_send_wr->send_buf;
725                         ib_mad_complete_send_wr(mad_send_wr, &wc);
726                         return;
727                 }
728                 if (mad_send_wr->refcount == 1)
729                         ib_reset_mad_timeout(mad_send_wr,
730                                              mad_send_wr->send_buf.timeout_ms);
731                 spin_unlock_irqrestore(&agent->lock, flags);
732                 ack_ds_ack(agent, mad_recv_wc);
733                 return;
734         } else if (mad_send_wr->refcount == 1 &&
735                    mad_send_wr->seg_num < mad_send_wr->newwin &&
736                    mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) {
737                 /* Send failure will just result in a timeout/retry */
738                 ret = send_next_seg(mad_send_wr);
739                 if (ret)
740                         goto out;
741
742                 mad_send_wr->refcount++;
743                 list_move_tail(&mad_send_wr->agent_list,
744                               &mad_send_wr->mad_agent_priv->send_list);
745         }
746 out:
747         spin_unlock_irqrestore(&agent->lock, flags);
748 }
749
750 static struct ib_mad_recv_wc *
751 process_rmpp_data(struct ib_mad_agent_private *agent,
752                   struct ib_mad_recv_wc *mad_recv_wc)
753 {
754         struct ib_rmpp_hdr *rmpp_hdr;
755         u8 rmpp_status;
756
757         rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr;
758
759         if (rmpp_hdr->rmpp_status) {
760                 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS;
761                 goto bad;
762         }
763
764         if (rmpp_hdr->seg_num == cpu_to_be32(1)) {
765                 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) {
766                         rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
767                         goto bad;
768                 }
769                 return start_rmpp(agent, mad_recv_wc);
770         } else {
771                 if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) {
772                         rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG;
773                         goto bad;
774                 }
775                 return continue_rmpp(agent, mad_recv_wc);
776         }
777 bad:
778         nack_recv(agent, mad_recv_wc, rmpp_status);
779         ib_free_recv_mad(mad_recv_wc);
780         return NULL;
781 }
782
783 static void process_rmpp_stop(struct ib_mad_agent_private *agent,
784                               struct ib_mad_recv_wc *mad_recv_wc)
785 {
786         struct ib_rmpp_mad *rmpp_mad;
787
788         rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
789
790         if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) {
791                 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
792                 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
793         } else
794                 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
795 }
796
797 static void process_rmpp_abort(struct ib_mad_agent_private *agent,
798                                struct ib_mad_recv_wc *mad_recv_wc)
799 {
800         struct ib_rmpp_mad *rmpp_mad;
801
802         rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
803
804         if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN ||
805             rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) {
806                 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
807                 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS);
808         } else
809                 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status);
810 }
811
812 struct ib_mad_recv_wc *
813 ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent,
814                         struct ib_mad_recv_wc *mad_recv_wc)
815 {
816         struct ib_rmpp_mad *rmpp_mad;
817
818         rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad;
819         if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE))
820                 return mad_recv_wc;
821
822         if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) {
823                 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
824                 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV);
825                 goto out;
826         }
827
828         switch (rmpp_mad->rmpp_hdr.rmpp_type) {
829         case IB_MGMT_RMPP_TYPE_DATA:
830                 return process_rmpp_data(agent, mad_recv_wc);
831         case IB_MGMT_RMPP_TYPE_ACK:
832                 process_rmpp_ack(agent, mad_recv_wc);
833                 break;
834         case IB_MGMT_RMPP_TYPE_STOP:
835                 process_rmpp_stop(agent, mad_recv_wc);
836                 break;
837         case IB_MGMT_RMPP_TYPE_ABORT:
838                 process_rmpp_abort(agent, mad_recv_wc);
839                 break;
840         default:
841                 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
842                 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT);
843                 break;
844         }
845 out:
846         ib_free_recv_mad(mad_recv_wc);
847         return NULL;
848 }
849
850 static int init_newwin(struct ib_mad_send_wr_private *mad_send_wr)
851 {
852         struct ib_mad_agent_private *agent = mad_send_wr->mad_agent_priv;
853         struct ib_mad_hdr *mad_hdr = mad_send_wr->send_buf.mad;
854         struct mad_rmpp_recv *rmpp_recv;
855         struct ib_ah_attr ah_attr;
856         unsigned long flags;
857         int newwin = 1;
858
859         if (!(mad_hdr->method & IB_MGMT_METHOD_RESP))
860                 goto out;
861
862         spin_lock_irqsave(&agent->lock, flags);
863         list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
864                 if (rmpp_recv->tid != mad_hdr->tid ||
865                     rmpp_recv->mgmt_class != mad_hdr->mgmt_class ||
866                     rmpp_recv->class_version != mad_hdr->class_version ||
867                     (rmpp_recv->method & IB_MGMT_METHOD_RESP))
868                         continue;
869
870                 if (ib_query_ah(mad_send_wr->send_buf.ah, &ah_attr))
871                         continue;
872
873                 if (rmpp_recv->slid == ah_attr.dlid) {
874                         newwin = rmpp_recv->repwin;
875                         break;
876                 }
877         }
878         spin_unlock_irqrestore(&agent->lock, flags);
879 out:
880         return newwin;
881 }
882
883 int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr)
884 {
885         struct ib_rmpp_mad *rmpp_mad;
886         int ret;
887
888         rmpp_mad = mad_send_wr->send_buf.mad;
889         if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
890               IB_MGMT_RMPP_FLAG_ACTIVE))
891                 return IB_RMPP_RESULT_UNHANDLED;
892
893         if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) {
894                 mad_send_wr->seg_num = 1;
895                 return IB_RMPP_RESULT_INTERNAL;
896         }
897
898         mad_send_wr->newwin = init_newwin(mad_send_wr);
899
900         /* We need to wait for the final ACK even if there isn't a response */
901         mad_send_wr->refcount += (mad_send_wr->timeout == 0);
902         ret = send_next_seg(mad_send_wr);
903         if (!ret)
904                 return IB_RMPP_RESULT_CONSUMED;
905         return ret;
906 }
907
908 int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr,
909                             struct ib_mad_send_wc *mad_send_wc)
910 {
911         struct ib_rmpp_mad *rmpp_mad;
912         int ret;
913
914         rmpp_mad = mad_send_wr->send_buf.mad;
915         if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
916               IB_MGMT_RMPP_FLAG_ACTIVE))
917                 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
918
919         if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA)
920                 return IB_RMPP_RESULT_INTERNAL;  /* ACK, STOP, or ABORT */
921
922         if (mad_send_wc->status != IB_WC_SUCCESS ||
923             mad_send_wr->status != IB_WC_SUCCESS)
924                 return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */
925
926         if (!mad_send_wr->timeout)
927                 return IB_RMPP_RESULT_PROCESSED; /* Response received */
928
929         if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) {
930                 mad_send_wr->timeout =
931                         msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
932                 return IB_RMPP_RESULT_PROCESSED; /* Send done */
933         }
934
935         if (mad_send_wr->seg_num == mad_send_wr->newwin ||
936             mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count)
937                 return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */
938
939         ret = send_next_seg(mad_send_wr);
940         if (ret) {
941                 mad_send_wc->status = IB_WC_GENERAL_ERR;
942                 return IB_RMPP_RESULT_PROCESSED;
943         }
944         return IB_RMPP_RESULT_CONSUMED;
945 }
946
947 int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr)
948 {
949         struct ib_rmpp_mad *rmpp_mad;
950         int ret;
951
952         rmpp_mad = mad_send_wr->send_buf.mad;
953         if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
954               IB_MGMT_RMPP_FLAG_ACTIVE))
955                 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
956
957         if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count)
958                 return IB_RMPP_RESULT_PROCESSED;
959
960         mad_send_wr->seg_num = mad_send_wr->last_ack;
961         mad_send_wr->cur_seg = mad_send_wr->last_ack_seg;
962
963         ret = send_next_seg(mad_send_wr);
964         if (ret)
965                 return IB_RMPP_RESULT_PROCESSED;
966
967         return IB_RMPP_RESULT_CONSUMED;
968 }