These changes are the raw update to linux-4.4.6-rt14. Kernel sources
[kvmfornfv.git] / kernel / drivers / misc / mic / scif / scif_rma_list.c
1 /*
2  * Intel MIC Platform Software Stack (MPSS)
3  *
4  * Copyright(c) 2015 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License, version 2, as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13  * General Public License for more details.
14  *
15  * Intel SCIF driver.
16  *
17  */
18 #include "scif_main.h"
19 #include <linux/mmu_notifier.h>
20 #include <linux/highmem.h>
21
22 /*
23  * scif_insert_tcw:
24  *
25  * Insert a temp window to the temp registration list sorted by va_for_temp.
26  * RMA lock must be held.
27  */
28 void scif_insert_tcw(struct scif_window *window, struct list_head *head)
29 {
30         struct scif_window *curr = NULL;
31         struct scif_window *prev = list_entry(head, struct scif_window, list);
32         struct list_head *item;
33
34         INIT_LIST_HEAD(&window->list);
35         /* Compare with tail and if the entry is new tail add it to the end */
36         if (!list_empty(head)) {
37                 curr = list_entry(head->prev, struct scif_window, list);
38                 if (curr->va_for_temp < window->va_for_temp) {
39                         list_add_tail(&window->list, head);
40                         return;
41                 }
42         }
43         list_for_each(item, head) {
44                 curr = list_entry(item, struct scif_window, list);
45                 if (curr->va_for_temp > window->va_for_temp)
46                         break;
47                 prev = curr;
48         }
49         list_add(&window->list, &prev->list);
50 }
51
52 /*
53  * scif_insert_window:
54  *
55  * Insert a window to the self registration list sorted by offset.
56  * RMA lock must be held.
57  */
58 void scif_insert_window(struct scif_window *window, struct list_head *head)
59 {
60         struct scif_window *curr = NULL, *prev = NULL;
61         struct list_head *item;
62
63         INIT_LIST_HEAD(&window->list);
64         list_for_each(item, head) {
65                 curr = list_entry(item, struct scif_window, list);
66                 if (curr->offset > window->offset)
67                         break;
68                 prev = curr;
69         }
70         if (!prev)
71                 list_add(&window->list, head);
72         else
73                 list_add(&window->list, &prev->list);
74         scif_set_window_ref(window, window->nr_pages);
75 }
76
77 /*
78  * scif_query_tcw:
79  *
80  * Query the temp cached registration list of ep for an overlapping window
81  * in case of permission mismatch, destroy the previous window. if permissions
82  * match and overlap is partial, destroy the window but return the new range
83  * RMA lock must be held.
84  */
85 int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *req)
86 {
87         struct list_head *item, *temp, *head = req->head;
88         struct scif_window *window;
89         u64 start_va_window, start_va_req = req->va_for_temp;
90         u64 end_va_window, end_va_req = start_va_req + req->nr_bytes;
91
92         if (!req->nr_bytes)
93                 return -EINVAL;
94         /*
95          * Avoid traversing the entire list to find out that there
96          * is no entry that matches
97          */
98         if (!list_empty(head)) {
99                 window = list_last_entry(head, struct scif_window, list);
100                 end_va_window = window->va_for_temp +
101                         (window->nr_pages << PAGE_SHIFT);
102                 if (start_va_req > end_va_window)
103                         return -ENXIO;
104         }
105         list_for_each_safe(item, temp, head) {
106                 window = list_entry(item, struct scif_window, list);
107                 start_va_window = window->va_for_temp;
108                 end_va_window = window->va_for_temp +
109                         (window->nr_pages << PAGE_SHIFT);
110                 if (start_va_req < start_va_window &&
111                     end_va_req < start_va_window)
112                         break;
113                 if (start_va_req >= end_va_window)
114                         continue;
115                 if ((window->prot & req->prot) == req->prot) {
116                         if (start_va_req >= start_va_window &&
117                             end_va_req <= end_va_window) {
118                                 *req->out_window = window;
119                                 return 0;
120                         }
121                         /* expand window */
122                         if (start_va_req < start_va_window) {
123                                 req->nr_bytes +=
124                                         start_va_window - start_va_req;
125                                 req->va_for_temp = start_va_window;
126                         }
127                         if (end_va_req >= end_va_window)
128                                 req->nr_bytes += end_va_window - end_va_req;
129                 }
130                 /* Destroy the old window to create a new one */
131                 __scif_rma_destroy_tcw_helper(window);
132                 break;
133         }
134         return -ENXIO;
135 }
136
137 /*
138  * scif_query_window:
139  *
140  * Query the registration list and check if a valid contiguous
141  * range of windows exist.
142  * RMA lock must be held.
143  */
144 int scif_query_window(struct scif_rma_req *req)
145 {
146         struct list_head *item;
147         struct scif_window *window;
148         s64 end_offset, offset = req->offset;
149         u64 tmp_min, nr_bytes_left = req->nr_bytes;
150
151         if (!req->nr_bytes)
152                 return -EINVAL;
153
154         list_for_each(item, req->head) {
155                 window = list_entry(item, struct scif_window, list);
156                 end_offset = window->offset +
157                         (window->nr_pages << PAGE_SHIFT);
158                 if (offset < window->offset)
159                         /* Offset not found! */
160                         return -ENXIO;
161                 if (offset >= end_offset)
162                         continue;
163                 /* Check read/write protections. */
164                 if ((window->prot & req->prot) != req->prot)
165                         return -EPERM;
166                 if (nr_bytes_left == req->nr_bytes)
167                         /* Store the first window */
168                         *req->out_window = window;
169                 tmp_min = min((u64)end_offset - offset, nr_bytes_left);
170                 nr_bytes_left -= tmp_min;
171                 offset += tmp_min;
172                 /*
173                  * Range requested encompasses
174                  * multiple windows contiguously.
175                  */
176                 if (!nr_bytes_left) {
177                         /* Done for partial window */
178                         if (req->type == SCIF_WINDOW_PARTIAL ||
179                             req->type == SCIF_WINDOW_SINGLE)
180                                 return 0;
181                         /* Extra logic for full windows */
182                         if (offset == end_offset)
183                                 /* Spanning multiple whole windows */
184                                 return 0;
185                                 /* Not spanning multiple whole windows */
186                         return -ENXIO;
187                 }
188                 if (req->type == SCIF_WINDOW_SINGLE)
189                         break;
190         }
191         dev_err(scif_info.mdev.this_device,
192                 "%s %d ENXIO\n", __func__, __LINE__);
193         return -ENXIO;
194 }
195
196 /*
197  * scif_rma_list_unregister:
198  *
199  * Traverse the self registration list starting from window:
200  * 1) Call scif_unregister_window(..)
201  * RMA lock must be held.
202  */
203 int scif_rma_list_unregister(struct scif_window *window,
204                              s64 offset, int nr_pages)
205 {
206         struct scif_endpt *ep = (struct scif_endpt *)window->ep;
207         struct list_head *head = &ep->rma_info.reg_list;
208         s64 end_offset;
209         int err = 0;
210         int loop_nr_pages;
211         struct scif_window *_window;
212
213         list_for_each_entry_safe_from(window, _window, head, list) {
214                 end_offset = window->offset + (window->nr_pages << PAGE_SHIFT);
215                 loop_nr_pages = min((int)((end_offset - offset) >> PAGE_SHIFT),
216                                     nr_pages);
217                 err = scif_unregister_window(window);
218                 if (err)
219                         return err;
220                 nr_pages -= loop_nr_pages;
221                 offset += (loop_nr_pages << PAGE_SHIFT);
222                 if (!nr_pages)
223                         break;
224         }
225         return 0;
226 }
227
228 /*
229  * scif_unmap_all_window:
230  *
231  * Traverse all the windows in the self registration list and:
232  * 1) Delete any DMA mappings created
233  */
234 void scif_unmap_all_windows(scif_epd_t epd)
235 {
236         struct list_head *item, *tmp;
237         struct scif_window *window;
238         struct scif_endpt *ep = (struct scif_endpt *)epd;
239         struct list_head *head = &ep->rma_info.reg_list;
240
241         mutex_lock(&ep->rma_info.rma_lock);
242         list_for_each_safe(item, tmp, head) {
243                 window = list_entry(item, struct scif_window, list);
244                 scif_unmap_window(ep->remote_dev, window);
245         }
246         mutex_unlock(&ep->rma_info.rma_lock);
247 }
248
249 /*
250  * scif_unregister_all_window:
251  *
252  * Traverse all the windows in the self registration list and:
253  * 1) Call scif_unregister_window(..)
254  * RMA lock must be held.
255  */
256 int scif_unregister_all_windows(scif_epd_t epd)
257 {
258         struct list_head *item, *tmp;
259         struct scif_window *window;
260         struct scif_endpt *ep = (struct scif_endpt *)epd;
261         struct list_head *head = &ep->rma_info.reg_list;
262         int err = 0;
263
264         mutex_lock(&ep->rma_info.rma_lock);
265 retry:
266         item = NULL;
267         tmp = NULL;
268         list_for_each_safe(item, tmp, head) {
269                 window = list_entry(item, struct scif_window, list);
270                 ep->rma_info.async_list_del = 0;
271                 err = scif_unregister_window(window);
272                 if (err)
273                         dev_err(scif_info.mdev.this_device,
274                                 "%s %d err %d\n",
275                                 __func__, __LINE__, err);
276                 /*
277                  * Need to restart list traversal if there has been
278                  * an asynchronous list entry deletion.
279                  */
280                 if (ACCESS_ONCE(ep->rma_info.async_list_del))
281                         goto retry;
282         }
283         mutex_unlock(&ep->rma_info.rma_lock);
284         if (!list_empty(&ep->rma_info.mmn_list)) {
285                 spin_lock(&scif_info.rmalock);
286                 list_add_tail(&ep->mmu_list, &scif_info.mmu_notif_cleanup);
287                 spin_unlock(&scif_info.rmalock);
288                 schedule_work(&scif_info.mmu_notif_work);
289         }
290         return err;
291 }