Add the rt linux 4.1.3-rt3 as base
[kvmfornfv.git] / kernel / drivers / hv / hv_balloon.c
1 /*
2  * Copyright (c) 2012, Microsoft Corporation.
3  *
4  * Author:
5  *   K. Y. Srinivasan <kys@microsoft.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  *
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/mman.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/completion.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/memory.h>
32 #include <linux/notifier.h>
33 #include <linux/percpu_counter.h>
34
35 #include <linux/hyperv.h>
36
37 /*
38  * We begin with definitions supporting the Dynamic Memory protocol
39  * with the host.
40  *
41  * Begin protocol definitions.
42  */
43
44
45
46 /*
47  * Protocol versions. The low word is the minor version, the high word the major
48  * version.
49  *
50  * History:
51  * Initial version 1.0
52  * Changed to 0.1 on 2009/03/25
53  * Changes to 0.2 on 2009/05/14
54  * Changes to 0.3 on 2009/12/03
55  * Changed to 1.0 on 2011/04/05
56  */
57
58 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
59 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
60 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
61
62 enum {
63         DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
64         DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
65
66         DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
67         DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
68
69         DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8
70 };
71
72
73
74 /*
75  * Message Types
76  */
77
78 enum dm_message_type {
79         /*
80          * Version 0.3
81          */
82         DM_ERROR                        = 0,
83         DM_VERSION_REQUEST              = 1,
84         DM_VERSION_RESPONSE             = 2,
85         DM_CAPABILITIES_REPORT          = 3,
86         DM_CAPABILITIES_RESPONSE        = 4,
87         DM_STATUS_REPORT                = 5,
88         DM_BALLOON_REQUEST              = 6,
89         DM_BALLOON_RESPONSE             = 7,
90         DM_UNBALLOON_REQUEST            = 8,
91         DM_UNBALLOON_RESPONSE           = 9,
92         DM_MEM_HOT_ADD_REQUEST          = 10,
93         DM_MEM_HOT_ADD_RESPONSE         = 11,
94         DM_VERSION_03_MAX               = 11,
95         /*
96          * Version 1.0.
97          */
98         DM_INFO_MESSAGE                 = 12,
99         DM_VERSION_1_MAX                = 12
100 };
101
102
103 /*
104  * Structures defining the dynamic memory management
105  * protocol.
106  */
107
108 union dm_version {
109         struct {
110                 __u16 minor_version;
111                 __u16 major_version;
112         };
113         __u32 version;
114 } __packed;
115
116
117 union dm_caps {
118         struct {
119                 __u64 balloon:1;
120                 __u64 hot_add:1;
121                 /*
122                  * To support guests that may have alignment
123                  * limitations on hot-add, the guest can specify
124                  * its alignment requirements; a value of n
125                  * represents an alignment of 2^n in mega bytes.
126                  */
127                 __u64 hot_add_alignment:4;
128                 __u64 reservedz:58;
129         } cap_bits;
130         __u64 caps;
131 } __packed;
132
133 union dm_mem_page_range {
134         struct  {
135                 /*
136                  * The PFN number of the first page in the range.
137                  * 40 bits is the architectural limit of a PFN
138                  * number for AMD64.
139                  */
140                 __u64 start_page:40;
141                 /*
142                  * The number of pages in the range.
143                  */
144                 __u64 page_cnt:24;
145         } finfo;
146         __u64  page_range;
147 } __packed;
148
149
150
151 /*
152  * The header for all dynamic memory messages:
153  *
154  * type: Type of the message.
155  * size: Size of the message in bytes; including the header.
156  * trans_id: The guest is responsible for manufacturing this ID.
157  */
158
159 struct dm_header {
160         __u16 type;
161         __u16 size;
162         __u32 trans_id;
163 } __packed;
164
165 /*
166  * A generic message format for dynamic memory.
167  * Specific message formats are defined later in the file.
168  */
169
170 struct dm_message {
171         struct dm_header hdr;
172         __u8 data[]; /* enclosed message */
173 } __packed;
174
175
176 /*
177  * Specific message types supporting the dynamic memory protocol.
178  */
179
180 /*
181  * Version negotiation message. Sent from the guest to the host.
182  * The guest is free to try different versions until the host
183  * accepts the version.
184  *
185  * dm_version: The protocol version requested.
186  * is_last_attempt: If TRUE, this is the last version guest will request.
187  * reservedz: Reserved field, set to zero.
188  */
189
190 struct dm_version_request {
191         struct dm_header hdr;
192         union dm_version version;
193         __u32 is_last_attempt:1;
194         __u32 reservedz:31;
195 } __packed;
196
197 /*
198  * Version response message; Host to Guest and indicates
199  * if the host has accepted the version sent by the guest.
200  *
201  * is_accepted: If TRUE, host has accepted the version and the guest
202  * should proceed to the next stage of the protocol. FALSE indicates that
203  * guest should re-try with a different version.
204  *
205  * reservedz: Reserved field, set to zero.
206  */
207
208 struct dm_version_response {
209         struct dm_header hdr;
210         __u64 is_accepted:1;
211         __u64 reservedz:63;
212 } __packed;
213
214 /*
215  * Message reporting capabilities. This is sent from the guest to the
216  * host.
217  */
218
219 struct dm_capabilities {
220         struct dm_header hdr;
221         union dm_caps caps;
222         __u64 min_page_cnt;
223         __u64 max_page_number;
224 } __packed;
225
226 /*
227  * Response to the capabilities message. This is sent from the host to the
228  * guest. This message notifies if the host has accepted the guest's
229  * capabilities. If the host has not accepted, the guest must shutdown
230  * the service.
231  *
232  * is_accepted: Indicates if the host has accepted guest's capabilities.
233  * reservedz: Must be 0.
234  */
235
236 struct dm_capabilities_resp_msg {
237         struct dm_header hdr;
238         __u64 is_accepted:1;
239         __u64 reservedz:63;
240 } __packed;
241
242 /*
243  * This message is used to report memory pressure from the guest.
244  * This message is not part of any transaction and there is no
245  * response to this message.
246  *
247  * num_avail: Available memory in pages.
248  * num_committed: Committed memory in pages.
249  * page_file_size: The accumulated size of all page files
250  *                 in the system in pages.
251  * zero_free: The nunber of zero and free pages.
252  * page_file_writes: The writes to the page file in pages.
253  * io_diff: An indicator of file cache efficiency or page file activity,
254  *          calculated as File Cache Page Fault Count - Page Read Count.
255  *          This value is in pages.
256  *
257  * Some of these metrics are Windows specific and fortunately
258  * the algorithm on the host side that computes the guest memory
259  * pressure only uses num_committed value.
260  */
261
262 struct dm_status {
263         struct dm_header hdr;
264         __u64 num_avail;
265         __u64 num_committed;
266         __u64 page_file_size;
267         __u64 zero_free;
268         __u32 page_file_writes;
269         __u32 io_diff;
270 } __packed;
271
272
273 /*
274  * Message to ask the guest to allocate memory - balloon up message.
275  * This message is sent from the host to the guest. The guest may not be
276  * able to allocate as much memory as requested.
277  *
278  * num_pages: number of pages to allocate.
279  */
280
281 struct dm_balloon {
282         struct dm_header hdr;
283         __u32 num_pages;
284         __u32 reservedz;
285 } __packed;
286
287
288 /*
289  * Balloon response message; this message is sent from the guest
290  * to the host in response to the balloon message.
291  *
292  * reservedz: Reserved; must be set to zero.
293  * more_pages: If FALSE, this is the last message of the transaction.
294  * if TRUE there will atleast one more message from the guest.
295  *
296  * range_count: The number of ranges in the range array.
297  *
298  * range_array: An array of page ranges returned to the host.
299  *
300  */
301
302 struct dm_balloon_response {
303         struct dm_header hdr;
304         __u32 reservedz;
305         __u32 more_pages:1;
306         __u32 range_count:31;
307         union dm_mem_page_range range_array[];
308 } __packed;
309
310 /*
311  * Un-balloon message; this message is sent from the host
312  * to the guest to give guest more memory.
313  *
314  * more_pages: If FALSE, this is the last message of the transaction.
315  * if TRUE there will atleast one more message from the guest.
316  *
317  * reservedz: Reserved; must be set to zero.
318  *
319  * range_count: The number of ranges in the range array.
320  *
321  * range_array: An array of page ranges returned to the host.
322  *
323  */
324
325 struct dm_unballoon_request {
326         struct dm_header hdr;
327         __u32 more_pages:1;
328         __u32 reservedz:31;
329         __u32 range_count;
330         union dm_mem_page_range range_array[];
331 } __packed;
332
333 /*
334  * Un-balloon response message; this message is sent from the guest
335  * to the host in response to an unballoon request.
336  *
337  */
338
339 struct dm_unballoon_response {
340         struct dm_header hdr;
341 } __packed;
342
343
344 /*
345  * Hot add request message. Message sent from the host to the guest.
346  *
347  * mem_range: Memory range to hot add.
348  *
349  * On Linux we currently don't support this since we cannot hot add
350  * arbitrary granularity of memory.
351  */
352
353 struct dm_hot_add {
354         struct dm_header hdr;
355         union dm_mem_page_range range;
356 } __packed;
357
358 /*
359  * Hot add response message.
360  * This message is sent by the guest to report the status of a hot add request.
361  * If page_count is less than the requested page count, then the host should
362  * assume all further hot add requests will fail, since this indicates that
363  * the guest has hit an upper physical memory barrier.
364  *
365  * Hot adds may also fail due to low resources; in this case, the guest must
366  * not complete this message until the hot add can succeed, and the host must
367  * not send a new hot add request until the response is sent.
368  * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
369  * times it fails the request.
370  *
371  *
372  * page_count: number of pages that were successfully hot added.
373  *
374  * result: result of the operation 1: success, 0: failure.
375  *
376  */
377
378 struct dm_hot_add_response {
379         struct dm_header hdr;
380         __u32 page_count;
381         __u32 result;
382 } __packed;
383
384 /*
385  * Types of information sent from host to the guest.
386  */
387
388 enum dm_info_type {
389         INFO_TYPE_MAX_PAGE_CNT = 0,
390         MAX_INFO_TYPE
391 };
392
393
394 /*
395  * Header for the information message.
396  */
397
398 struct dm_info_header {
399         enum dm_info_type type;
400         __u32 data_size;
401 } __packed;
402
403 /*
404  * This message is sent from the host to the guest to pass
405  * some relevant information (win8 addition).
406  *
407  * reserved: no used.
408  * info_size: size of the information blob.
409  * info: information blob.
410  */
411
412 struct dm_info_msg {
413         struct dm_header hdr;
414         __u32 reserved;
415         __u32 info_size;
416         __u8  info[];
417 };
418
419 /*
420  * End protocol definitions.
421  */
422
423 /*
424  * State to manage hot adding memory into the guest.
425  * The range start_pfn : end_pfn specifies the range
426  * that the host has asked us to hot add. The range
427  * start_pfn : ha_end_pfn specifies the range that we have
428  * currently hot added. We hot add in multiples of 128M
429  * chunks; it is possible that we may not be able to bring
430  * online all the pages in the region. The range
431  * covered_end_pfn defines the pages that can
432  * be brough online.
433  */
434
435 struct hv_hotadd_state {
436         struct list_head list;
437         unsigned long start_pfn;
438         unsigned long covered_end_pfn;
439         unsigned long ha_end_pfn;
440         unsigned long end_pfn;
441 };
442
443 struct balloon_state {
444         __u32 num_pages;
445         struct work_struct wrk;
446 };
447
448 struct hot_add_wrk {
449         union dm_mem_page_range ha_page_range;
450         union dm_mem_page_range ha_region_range;
451         struct work_struct wrk;
452 };
453
454 static bool hot_add = true;
455 static bool do_hot_add;
456 /*
457  * Delay reporting memory pressure by
458  * the specified number of seconds.
459  */
460 static uint pressure_report_delay = 45;
461
462 /*
463  * The last time we posted a pressure report to host.
464  */
465 static unsigned long last_post_time;
466
467 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
468 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
469
470 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
471 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
472 static atomic_t trans_id = ATOMIC_INIT(0);
473
474 static int dm_ring_size = (5 * PAGE_SIZE);
475
476 /*
477  * Driver specific state.
478  */
479
480 enum hv_dm_state {
481         DM_INITIALIZING = 0,
482         DM_INITIALIZED,
483         DM_BALLOON_UP,
484         DM_BALLOON_DOWN,
485         DM_HOT_ADD,
486         DM_INIT_ERROR
487 };
488
489
490 static __u8 recv_buffer[PAGE_SIZE];
491 static __u8 *send_buffer;
492 #define PAGES_IN_2M     512
493 #define HA_CHUNK (32 * 1024)
494
495 struct hv_dynmem_device {
496         struct hv_device *dev;
497         enum hv_dm_state state;
498         struct completion host_event;
499         struct completion config_event;
500
501         /*
502          * Number of pages we have currently ballooned out.
503          */
504         unsigned int num_pages_ballooned;
505         unsigned int num_pages_onlined;
506         unsigned int num_pages_added;
507
508         /*
509          * State to manage the ballooning (up) operation.
510          */
511         struct balloon_state balloon_wrk;
512
513         /*
514          * State to execute the "hot-add" operation.
515          */
516         struct hot_add_wrk ha_wrk;
517
518         /*
519          * This state tracks if the host has specified a hot-add
520          * region.
521          */
522         bool host_specified_ha_region;
523
524         /*
525          * State to synchronize hot-add.
526          */
527         struct completion  ol_waitevent;
528         bool ha_waiting;
529         /*
530          * This thread handles hot-add
531          * requests from the host as well as notifying
532          * the host with regards to memory pressure in
533          * the guest.
534          */
535         struct task_struct *thread;
536
537         struct mutex ha_region_mutex;
538
539         /*
540          * A list of hot-add regions.
541          */
542         struct list_head ha_region_list;
543
544         /*
545          * We start with the highest version we can support
546          * and downgrade based on the host; we save here the
547          * next version to try.
548          */
549         __u32 next_version;
550 };
551
552 static struct hv_dynmem_device dm_device;
553
554 static void post_status(struct hv_dynmem_device *dm);
555
556 #ifdef CONFIG_MEMORY_HOTPLUG
557 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
558                               void *v)
559 {
560         struct memory_notify *mem = (struct memory_notify *)v;
561
562         switch (val) {
563         case MEM_GOING_ONLINE:
564                 mutex_lock(&dm_device.ha_region_mutex);
565                 break;
566
567         case MEM_ONLINE:
568                 dm_device.num_pages_onlined += mem->nr_pages;
569         case MEM_CANCEL_ONLINE:
570                 mutex_unlock(&dm_device.ha_region_mutex);
571                 if (dm_device.ha_waiting) {
572                         dm_device.ha_waiting = false;
573                         complete(&dm_device.ol_waitevent);
574                 }
575                 break;
576
577         case MEM_OFFLINE:
578                 mutex_lock(&dm_device.ha_region_mutex);
579                 dm_device.num_pages_onlined -= mem->nr_pages;
580                 mutex_unlock(&dm_device.ha_region_mutex);
581                 break;
582         case MEM_GOING_OFFLINE:
583         case MEM_CANCEL_OFFLINE:
584                 break;
585         }
586         return NOTIFY_OK;
587 }
588
589 static struct notifier_block hv_memory_nb = {
590         .notifier_call = hv_memory_notifier,
591         .priority = 0
592 };
593
594
595 static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
596 {
597         int i;
598
599         for (i = 0; i < size; i++) {
600                 struct page *pg;
601                 pg = pfn_to_page(start_pfn + i);
602                 __online_page_set_limits(pg);
603                 __online_page_increment_counters(pg);
604                 __online_page_free(pg);
605         }
606 }
607
608 static void hv_mem_hot_add(unsigned long start, unsigned long size,
609                                 unsigned long pfn_count,
610                                 struct hv_hotadd_state *has)
611 {
612         int ret = 0;
613         int i, nid;
614         unsigned long start_pfn;
615         unsigned long processed_pfn;
616         unsigned long total_pfn = pfn_count;
617
618         for (i = 0; i < (size/HA_CHUNK); i++) {
619                 start_pfn = start + (i * HA_CHUNK);
620                 has->ha_end_pfn +=  HA_CHUNK;
621
622                 if (total_pfn > HA_CHUNK) {
623                         processed_pfn = HA_CHUNK;
624                         total_pfn -= HA_CHUNK;
625                 } else {
626                         processed_pfn = total_pfn;
627                         total_pfn = 0;
628                 }
629
630                 has->covered_end_pfn +=  processed_pfn;
631
632                 init_completion(&dm_device.ol_waitevent);
633                 dm_device.ha_waiting = true;
634
635                 mutex_unlock(&dm_device.ha_region_mutex);
636                 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
637                 ret = add_memory(nid, PFN_PHYS((start_pfn)),
638                                 (HA_CHUNK << PAGE_SHIFT));
639
640                 if (ret) {
641                         pr_info("hot_add memory failed error is %d\n", ret);
642                         if (ret == -EEXIST) {
643                                 /*
644                                  * This error indicates that the error
645                                  * is not a transient failure. This is the
646                                  * case where the guest's physical address map
647                                  * precludes hot adding memory. Stop all further
648                                  * memory hot-add.
649                                  */
650                                 do_hot_add = false;
651                         }
652                         has->ha_end_pfn -= HA_CHUNK;
653                         has->covered_end_pfn -=  processed_pfn;
654                         mutex_lock(&dm_device.ha_region_mutex);
655                         break;
656                 }
657
658                 /*
659                  * Wait for the memory block to be onlined.
660                  * Since the hot add has succeeded, it is ok to
661                  * proceed even if the pages in the hot added region
662                  * have not been "onlined" within the allowed time.
663                  */
664                 wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
665                 mutex_lock(&dm_device.ha_region_mutex);
666                 post_status(&dm_device);
667         }
668
669         return;
670 }
671
672 static void hv_online_page(struct page *pg)
673 {
674         struct list_head *cur;
675         struct hv_hotadd_state *has;
676         unsigned long cur_start_pgp;
677         unsigned long cur_end_pgp;
678
679         list_for_each(cur, &dm_device.ha_region_list) {
680                 has = list_entry(cur, struct hv_hotadd_state, list);
681                 cur_start_pgp = (unsigned long)pfn_to_page(has->start_pfn);
682                 cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
683
684                 if (((unsigned long)pg >= cur_start_pgp) &&
685                         ((unsigned long)pg < cur_end_pgp)) {
686                         /*
687                          * This frame is currently backed; online the
688                          * page.
689                          */
690                         __online_page_set_limits(pg);
691                         __online_page_increment_counters(pg);
692                         __online_page_free(pg);
693                 }
694         }
695 }
696
697 static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
698 {
699         struct list_head *cur;
700         struct hv_hotadd_state *has;
701         unsigned long residual, new_inc;
702
703         if (list_empty(&dm_device.ha_region_list))
704                 return false;
705
706         list_for_each(cur, &dm_device.ha_region_list) {
707                 has = list_entry(cur, struct hv_hotadd_state, list);
708
709                 /*
710                  * If the pfn range we are dealing with is not in the current
711                  * "hot add block", move on.
712                  */
713                 if ((start_pfn >= has->end_pfn))
714                         continue;
715                 /*
716                  * If the current hot add-request extends beyond
717                  * our current limit; extend it.
718                  */
719                 if ((start_pfn + pfn_cnt) > has->end_pfn) {
720                         residual = (start_pfn + pfn_cnt - has->end_pfn);
721                         /*
722                          * Extend the region by multiples of HA_CHUNK.
723                          */
724                         new_inc = (residual / HA_CHUNK) * HA_CHUNK;
725                         if (residual % HA_CHUNK)
726                                 new_inc += HA_CHUNK;
727
728                         has->end_pfn += new_inc;
729                 }
730
731                 /*
732                  * If the current start pfn is not where the covered_end
733                  * is, update it.
734                  */
735
736                 if (has->covered_end_pfn != start_pfn)
737                         has->covered_end_pfn = start_pfn;
738
739                 return true;
740
741         }
742
743         return false;
744 }
745
746 static unsigned long handle_pg_range(unsigned long pg_start,
747                                         unsigned long pg_count)
748 {
749         unsigned long start_pfn = pg_start;
750         unsigned long pfn_cnt = pg_count;
751         unsigned long size;
752         struct list_head *cur;
753         struct hv_hotadd_state *has;
754         unsigned long pgs_ol = 0;
755         unsigned long old_covered_state;
756
757         if (list_empty(&dm_device.ha_region_list))
758                 return 0;
759
760         list_for_each(cur, &dm_device.ha_region_list) {
761                 has = list_entry(cur, struct hv_hotadd_state, list);
762
763                 /*
764                  * If the pfn range we are dealing with is not in the current
765                  * "hot add block", move on.
766                  */
767                 if ((start_pfn >= has->end_pfn))
768                         continue;
769
770                 old_covered_state = has->covered_end_pfn;
771
772                 if (start_pfn < has->ha_end_pfn) {
773                         /*
774                          * This is the case where we are backing pages
775                          * in an already hot added region. Bring
776                          * these pages online first.
777                          */
778                         pgs_ol = has->ha_end_pfn - start_pfn;
779                         if (pgs_ol > pfn_cnt)
780                                 pgs_ol = pfn_cnt;
781
782                         /*
783                          * Check if the corresponding memory block is already
784                          * online by checking its last previously backed page.
785                          * In case it is we need to bring rest (which was not
786                          * backed previously) online too.
787                          */
788                         if (start_pfn > has->start_pfn &&
789                             !PageReserved(pfn_to_page(start_pfn - 1)))
790                                 hv_bring_pgs_online(start_pfn, pgs_ol);
791
792                         has->covered_end_pfn +=  pgs_ol;
793                         pfn_cnt -= pgs_ol;
794                 }
795
796                 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
797                         /*
798                          * We have some residual hot add range
799                          * that needs to be hot added; hot add
800                          * it now. Hot add a multiple of
801                          * of HA_CHUNK that fully covers the pages
802                          * we have.
803                          */
804                         size = (has->end_pfn - has->ha_end_pfn);
805                         if (pfn_cnt <= size) {
806                                 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
807                                 if (pfn_cnt % HA_CHUNK)
808                                         size += HA_CHUNK;
809                         } else {
810                                 pfn_cnt = size;
811                         }
812                         hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
813                 }
814                 /*
815                  * If we managed to online any pages that were given to us,
816                  * we declare success.
817                  */
818                 return has->covered_end_pfn - old_covered_state;
819
820         }
821
822         return 0;
823 }
824
825 static unsigned long process_hot_add(unsigned long pg_start,
826                                         unsigned long pfn_cnt,
827                                         unsigned long rg_start,
828                                         unsigned long rg_size)
829 {
830         struct hv_hotadd_state *ha_region = NULL;
831
832         if (pfn_cnt == 0)
833                 return 0;
834
835         if (!dm_device.host_specified_ha_region)
836                 if (pfn_covered(pg_start, pfn_cnt))
837                         goto do_pg_range;
838
839         /*
840          * If the host has specified a hot-add range; deal with it first.
841          */
842
843         if (rg_size != 0) {
844                 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
845                 if (!ha_region)
846                         return 0;
847
848                 INIT_LIST_HEAD(&ha_region->list);
849
850                 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
851                 ha_region->start_pfn = rg_start;
852                 ha_region->ha_end_pfn = rg_start;
853                 ha_region->covered_end_pfn = pg_start;
854                 ha_region->end_pfn = rg_start + rg_size;
855         }
856
857 do_pg_range:
858         /*
859          * Process the page range specified; bringing them
860          * online if possible.
861          */
862         return handle_pg_range(pg_start, pfn_cnt);
863 }
864
865 #endif
866
867 static void hot_add_req(struct work_struct *dummy)
868 {
869         struct dm_hot_add_response resp;
870 #ifdef CONFIG_MEMORY_HOTPLUG
871         unsigned long pg_start, pfn_cnt;
872         unsigned long rg_start, rg_sz;
873 #endif
874         struct hv_dynmem_device *dm = &dm_device;
875
876         memset(&resp, 0, sizeof(struct dm_hot_add_response));
877         resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
878         resp.hdr.size = sizeof(struct dm_hot_add_response);
879
880 #ifdef CONFIG_MEMORY_HOTPLUG
881         mutex_lock(&dm_device.ha_region_mutex);
882         pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
883         pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
884
885         rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
886         rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
887
888         if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
889                 unsigned long region_size;
890                 unsigned long region_start;
891
892                 /*
893                  * The host has not specified the hot-add region.
894                  * Based on the hot-add page range being specified,
895                  * compute a hot-add region that can cover the pages
896                  * that need to be hot-added while ensuring the alignment
897                  * and size requirements of Linux as it relates to hot-add.
898                  */
899                 region_start = pg_start;
900                 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
901                 if (pfn_cnt % HA_CHUNK)
902                         region_size += HA_CHUNK;
903
904                 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
905
906                 rg_start = region_start;
907                 rg_sz = region_size;
908         }
909
910         if (do_hot_add)
911                 resp.page_count = process_hot_add(pg_start, pfn_cnt,
912                                                 rg_start, rg_sz);
913
914         dm->num_pages_added += resp.page_count;
915         mutex_unlock(&dm_device.ha_region_mutex);
916 #endif
917         /*
918          * The result field of the response structure has the
919          * following semantics:
920          *
921          * 1. If all or some pages hot-added: Guest should return success.
922          *
923          * 2. If no pages could be hot-added:
924          *
925          * If the guest returns success, then the host
926          * will not attempt any further hot-add operations. This
927          * signifies a permanent failure.
928          *
929          * If the guest returns failure, then this failure will be
930          * treated as a transient failure and the host may retry the
931          * hot-add operation after some delay.
932          */
933         if (resp.page_count > 0)
934                 resp.result = 1;
935         else if (!do_hot_add)
936                 resp.result = 1;
937         else
938                 resp.result = 0;
939
940         if (!do_hot_add || (resp.page_count == 0))
941                 pr_info("Memory hot add failed\n");
942
943         dm->state = DM_INITIALIZED;
944         resp.hdr.trans_id = atomic_inc_return(&trans_id);
945         vmbus_sendpacket(dm->dev->channel, &resp,
946                         sizeof(struct dm_hot_add_response),
947                         (unsigned long)NULL,
948                         VM_PKT_DATA_INBAND, 0);
949 }
950
951 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
952 {
953         struct dm_info_header *info_hdr;
954
955         info_hdr = (struct dm_info_header *)msg->info;
956
957         switch (info_hdr->type) {
958         case INFO_TYPE_MAX_PAGE_CNT:
959                 pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
960                 pr_info("Data Size is %d\n", info_hdr->data_size);
961                 break;
962         default:
963                 pr_info("Received Unknown type: %d\n", info_hdr->type);
964         }
965 }
966
967 static unsigned long compute_balloon_floor(void)
968 {
969         unsigned long min_pages;
970 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
971         /* Simple continuous piecewiese linear function:
972          *  max MiB -> min MiB  gradient
973          *       0         0
974          *      16        16
975          *      32        24
976          *     128        72    (1/2)
977          *     512       168    (1/4)
978          *    2048       360    (1/8)
979          *    8192       744    (1/16)
980          *   32768      1512    (1/32)
981          */
982         if (totalram_pages < MB2PAGES(128))
983                 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
984         else if (totalram_pages < MB2PAGES(512))
985                 min_pages = MB2PAGES(40) + (totalram_pages >> 2);
986         else if (totalram_pages < MB2PAGES(2048))
987                 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
988         else if (totalram_pages < MB2PAGES(8192))
989                 min_pages = MB2PAGES(232) + (totalram_pages >> 4);
990         else
991                 min_pages = MB2PAGES(488) + (totalram_pages >> 5);
992 #undef MB2PAGES
993         return min_pages;
994 }
995
996 /*
997  * Post our status as it relates memory pressure to the
998  * host. Host expects the guests to post this status
999  * periodically at 1 second intervals.
1000  *
1001  * The metrics specified in this protocol are very Windows
1002  * specific and so we cook up numbers here to convey our memory
1003  * pressure.
1004  */
1005
1006 static void post_status(struct hv_dynmem_device *dm)
1007 {
1008         struct dm_status status;
1009         struct sysinfo val;
1010         unsigned long now = jiffies;
1011         unsigned long last_post = last_post_time;
1012
1013         if (pressure_report_delay > 0) {
1014                 --pressure_report_delay;
1015                 return;
1016         }
1017
1018         if (!time_after(now, (last_post_time + HZ)))
1019                 return;
1020
1021         si_meminfo(&val);
1022         memset(&status, 0, sizeof(struct dm_status));
1023         status.hdr.type = DM_STATUS_REPORT;
1024         status.hdr.size = sizeof(struct dm_status);
1025         status.hdr.trans_id = atomic_inc_return(&trans_id);
1026
1027         /*
1028          * The host expects the guest to report free and committed memory.
1029          * Furthermore, the host expects the pressure information to include
1030          * the ballooned out pages. For a given amount of memory that we are
1031          * managing we need to compute a floor below which we should not
1032          * balloon. Compute this and add it to the pressure report.
1033          * We also need to report all offline pages (num_pages_added -
1034          * num_pages_onlined) as committed to the host, otherwise it can try
1035          * asking us to balloon them out.
1036          */
1037         status.num_avail = val.freeram;
1038         status.num_committed = vm_memory_committed() +
1039                 dm->num_pages_ballooned +
1040                 (dm->num_pages_added > dm->num_pages_onlined ?
1041                  dm->num_pages_added - dm->num_pages_onlined : 0) +
1042                 compute_balloon_floor();
1043
1044         /*
1045          * If our transaction ID is no longer current, just don't
1046          * send the status. This can happen if we were interrupted
1047          * after we picked our transaction ID.
1048          */
1049         if (status.hdr.trans_id != atomic_read(&trans_id))
1050                 return;
1051
1052         /*
1053          * If the last post time that we sampled has changed,
1054          * we have raced, don't post the status.
1055          */
1056         if (last_post != last_post_time)
1057                 return;
1058
1059         last_post_time = jiffies;
1060         vmbus_sendpacket(dm->dev->channel, &status,
1061                                 sizeof(struct dm_status),
1062                                 (unsigned long)NULL,
1063                                 VM_PKT_DATA_INBAND, 0);
1064
1065 }
1066
1067 static void free_balloon_pages(struct hv_dynmem_device *dm,
1068                          union dm_mem_page_range *range_array)
1069 {
1070         int num_pages = range_array->finfo.page_cnt;
1071         __u64 start_frame = range_array->finfo.start_page;
1072         struct page *pg;
1073         int i;
1074
1075         for (i = 0; i < num_pages; i++) {
1076                 pg = pfn_to_page(i + start_frame);
1077                 __free_page(pg);
1078                 dm->num_pages_ballooned--;
1079         }
1080 }
1081
1082
1083
1084 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1085                                         unsigned int num_pages,
1086                                         struct dm_balloon_response *bl_resp,
1087                                         int alloc_unit)
1088 {
1089         unsigned int i = 0;
1090         struct page *pg;
1091
1092         if (num_pages < alloc_unit)
1093                 return 0;
1094
1095         for (i = 0; (i * alloc_unit) < num_pages; i++) {
1096                 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1097                         PAGE_SIZE)
1098                         return i * alloc_unit;
1099
1100                 /*
1101                  * We execute this code in a thread context. Furthermore,
1102                  * we don't want the kernel to try too hard.
1103                  */
1104                 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1105                                 __GFP_NOMEMALLOC | __GFP_NOWARN,
1106                                 get_order(alloc_unit << PAGE_SHIFT));
1107
1108                 if (!pg)
1109                         return i * alloc_unit;
1110
1111                 dm->num_pages_ballooned += alloc_unit;
1112
1113                 /*
1114                  * If we allocatted 2M pages; split them so we
1115                  * can free them in any order we get.
1116                  */
1117
1118                 if (alloc_unit != 1)
1119                         split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1120
1121                 bl_resp->range_count++;
1122                 bl_resp->range_array[i].finfo.start_page =
1123                         page_to_pfn(pg);
1124                 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1125                 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1126
1127         }
1128
1129         return num_pages;
1130 }
1131
1132
1133
1134 static void balloon_up(struct work_struct *dummy)
1135 {
1136         unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1137         unsigned int num_ballooned = 0;
1138         struct dm_balloon_response *bl_resp;
1139         int alloc_unit;
1140         int ret;
1141         bool done = false;
1142         int i;
1143         struct sysinfo val;
1144         unsigned long floor;
1145
1146         /* The host balloons pages in 2M granularity. */
1147         WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1148
1149         /*
1150          * We will attempt 2M allocations. However, if we fail to
1151          * allocate 2M chunks, we will go back to 4k allocations.
1152          */
1153         alloc_unit = 512;
1154
1155         si_meminfo(&val);
1156         floor = compute_balloon_floor();
1157
1158         /* Refuse to balloon below the floor, keep the 2M granularity. */
1159         if (val.freeram < num_pages || val.freeram - num_pages < floor) {
1160                 num_pages = val.freeram > floor ? (val.freeram - floor) : 0;
1161                 num_pages -= num_pages % PAGES_IN_2M;
1162         }
1163
1164         while (!done) {
1165                 bl_resp = (struct dm_balloon_response *)send_buffer;
1166                 memset(send_buffer, 0, PAGE_SIZE);
1167                 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1168                 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1169                 bl_resp->more_pages = 1;
1170
1171
1172                 num_pages -= num_ballooned;
1173                 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1174                                                     bl_resp, alloc_unit);
1175
1176                 if (alloc_unit != 1 && num_ballooned == 0) {
1177                         alloc_unit = 1;
1178                         continue;
1179                 }
1180
1181                 if (num_ballooned == 0 || num_ballooned == num_pages) {
1182                         bl_resp->more_pages = 0;
1183                         done = true;
1184                         dm_device.state = DM_INITIALIZED;
1185                 }
1186
1187                 /*
1188                  * We are pushing a lot of data through the channel;
1189                  * deal with transient failures caused because of the
1190                  * lack of space in the ring buffer.
1191                  */
1192
1193                 do {
1194                         bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1195                         ret = vmbus_sendpacket(dm_device.dev->channel,
1196                                                 bl_resp,
1197                                                 bl_resp->hdr.size,
1198                                                 (unsigned long)NULL,
1199                                                 VM_PKT_DATA_INBAND, 0);
1200
1201                         if (ret == -EAGAIN)
1202                                 msleep(20);
1203                         post_status(&dm_device);
1204                 } while (ret == -EAGAIN);
1205
1206                 if (ret) {
1207                         /*
1208                          * Free up the memory we allocatted.
1209                          */
1210                         pr_info("Balloon response failed\n");
1211
1212                         for (i = 0; i < bl_resp->range_count; i++)
1213                                 free_balloon_pages(&dm_device,
1214                                                  &bl_resp->range_array[i]);
1215
1216                         done = true;
1217                 }
1218         }
1219
1220 }
1221
1222 static void balloon_down(struct hv_dynmem_device *dm,
1223                         struct dm_unballoon_request *req)
1224 {
1225         union dm_mem_page_range *range_array = req->range_array;
1226         int range_count = req->range_count;
1227         struct dm_unballoon_response resp;
1228         int i;
1229
1230         for (i = 0; i < range_count; i++) {
1231                 free_balloon_pages(dm, &range_array[i]);
1232                 complete(&dm_device.config_event);
1233         }
1234
1235         if (req->more_pages == 1)
1236                 return;
1237
1238         memset(&resp, 0, sizeof(struct dm_unballoon_response));
1239         resp.hdr.type = DM_UNBALLOON_RESPONSE;
1240         resp.hdr.trans_id = atomic_inc_return(&trans_id);
1241         resp.hdr.size = sizeof(struct dm_unballoon_response);
1242
1243         vmbus_sendpacket(dm_device.dev->channel, &resp,
1244                                 sizeof(struct dm_unballoon_response),
1245                                 (unsigned long)NULL,
1246                                 VM_PKT_DATA_INBAND, 0);
1247
1248         dm->state = DM_INITIALIZED;
1249 }
1250
1251 static void balloon_onchannelcallback(void *context);
1252
1253 static int dm_thread_func(void *dm_dev)
1254 {
1255         struct hv_dynmem_device *dm = dm_dev;
1256
1257         while (!kthread_should_stop()) {
1258                 wait_for_completion_interruptible_timeout(
1259                                                 &dm_device.config_event, 1*HZ);
1260                 /*
1261                  * The host expects us to post information on the memory
1262                  * pressure every second.
1263                  */
1264                 reinit_completion(&dm_device.config_event);
1265                 post_status(dm);
1266         }
1267
1268         return 0;
1269 }
1270
1271
1272 static void version_resp(struct hv_dynmem_device *dm,
1273                         struct dm_version_response *vresp)
1274 {
1275         struct dm_version_request version_req;
1276         int ret;
1277
1278         if (vresp->is_accepted) {
1279                 /*
1280                  * We are done; wakeup the
1281                  * context waiting for version
1282                  * negotiation.
1283                  */
1284                 complete(&dm->host_event);
1285                 return;
1286         }
1287         /*
1288          * If there are more versions to try, continue
1289          * with negotiations; if not
1290          * shutdown the service since we are not able
1291          * to negotiate a suitable version number
1292          * with the host.
1293          */
1294         if (dm->next_version == 0)
1295                 goto version_error;
1296
1297         dm->next_version = 0;
1298         memset(&version_req, 0, sizeof(struct dm_version_request));
1299         version_req.hdr.type = DM_VERSION_REQUEST;
1300         version_req.hdr.size = sizeof(struct dm_version_request);
1301         version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1302         version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
1303         version_req.is_last_attempt = 1;
1304
1305         ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1306                                 sizeof(struct dm_version_request),
1307                                 (unsigned long)NULL,
1308                                 VM_PKT_DATA_INBAND, 0);
1309
1310         if (ret)
1311                 goto version_error;
1312
1313         return;
1314
1315 version_error:
1316         dm->state = DM_INIT_ERROR;
1317         complete(&dm->host_event);
1318 }
1319
1320 static void cap_resp(struct hv_dynmem_device *dm,
1321                         struct dm_capabilities_resp_msg *cap_resp)
1322 {
1323         if (!cap_resp->is_accepted) {
1324                 pr_info("Capabilities not accepted by host\n");
1325                 dm->state = DM_INIT_ERROR;
1326         }
1327         complete(&dm->host_event);
1328 }
1329
1330 static void balloon_onchannelcallback(void *context)
1331 {
1332         struct hv_device *dev = context;
1333         u32 recvlen;
1334         u64 requestid;
1335         struct dm_message *dm_msg;
1336         struct dm_header *dm_hdr;
1337         struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1338         struct dm_balloon *bal_msg;
1339         struct dm_hot_add *ha_msg;
1340         union dm_mem_page_range *ha_pg_range;
1341         union dm_mem_page_range *ha_region;
1342
1343         memset(recv_buffer, 0, sizeof(recv_buffer));
1344         vmbus_recvpacket(dev->channel, recv_buffer,
1345                          PAGE_SIZE, &recvlen, &requestid);
1346
1347         if (recvlen > 0) {
1348                 dm_msg = (struct dm_message *)recv_buffer;
1349                 dm_hdr = &dm_msg->hdr;
1350
1351                 switch (dm_hdr->type) {
1352                 case DM_VERSION_RESPONSE:
1353                         version_resp(dm,
1354                                  (struct dm_version_response *)dm_msg);
1355                         break;
1356
1357                 case DM_CAPABILITIES_RESPONSE:
1358                         cap_resp(dm,
1359                                  (struct dm_capabilities_resp_msg *)dm_msg);
1360                         break;
1361
1362                 case DM_BALLOON_REQUEST:
1363                         if (dm->state == DM_BALLOON_UP)
1364                                 pr_warn("Currently ballooning\n");
1365                         bal_msg = (struct dm_balloon *)recv_buffer;
1366                         dm->state = DM_BALLOON_UP;
1367                         dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1368                         schedule_work(&dm_device.balloon_wrk.wrk);
1369                         break;
1370
1371                 case DM_UNBALLOON_REQUEST:
1372                         dm->state = DM_BALLOON_DOWN;
1373                         balloon_down(dm,
1374                                  (struct dm_unballoon_request *)recv_buffer);
1375                         break;
1376
1377                 case DM_MEM_HOT_ADD_REQUEST:
1378                         if (dm->state == DM_HOT_ADD)
1379                                 pr_warn("Currently hot-adding\n");
1380                         dm->state = DM_HOT_ADD;
1381                         ha_msg = (struct dm_hot_add *)recv_buffer;
1382                         if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1383                                 /*
1384                                  * This is a normal hot-add request specifying
1385                                  * hot-add memory.
1386                                  */
1387                                 ha_pg_range = &ha_msg->range;
1388                                 dm->ha_wrk.ha_page_range = *ha_pg_range;
1389                                 dm->ha_wrk.ha_region_range.page_range = 0;
1390                         } else {
1391                                 /*
1392                                  * Host is specifying that we first hot-add
1393                                  * a region and then partially populate this
1394                                  * region.
1395                                  */
1396                                 dm->host_specified_ha_region = true;
1397                                 ha_pg_range = &ha_msg->range;
1398                                 ha_region = &ha_pg_range[1];
1399                                 dm->ha_wrk.ha_page_range = *ha_pg_range;
1400                                 dm->ha_wrk.ha_region_range = *ha_region;
1401                         }
1402                         schedule_work(&dm_device.ha_wrk.wrk);
1403                         break;
1404
1405                 case DM_INFO_MESSAGE:
1406                         process_info(dm, (struct dm_info_msg *)dm_msg);
1407                         break;
1408
1409                 default:
1410                         pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1411
1412                 }
1413         }
1414
1415 }
1416
1417 static int balloon_probe(struct hv_device *dev,
1418                         const struct hv_vmbus_device_id *dev_id)
1419 {
1420         int ret;
1421         unsigned long t;
1422         struct dm_version_request version_req;
1423         struct dm_capabilities cap_msg;
1424
1425         do_hot_add = hot_add;
1426
1427         /*
1428          * First allocate a send buffer.
1429          */
1430
1431         send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1432         if (!send_buffer)
1433                 return -ENOMEM;
1434
1435         ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1436                         balloon_onchannelcallback, dev);
1437
1438         if (ret)
1439                 goto probe_error0;
1440
1441         dm_device.dev = dev;
1442         dm_device.state = DM_INITIALIZING;
1443         dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1444         init_completion(&dm_device.host_event);
1445         init_completion(&dm_device.config_event);
1446         INIT_LIST_HEAD(&dm_device.ha_region_list);
1447         mutex_init(&dm_device.ha_region_mutex);
1448         INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1449         INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1450         dm_device.host_specified_ha_region = false;
1451
1452         dm_device.thread =
1453                  kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1454         if (IS_ERR(dm_device.thread)) {
1455                 ret = PTR_ERR(dm_device.thread);
1456                 goto probe_error1;
1457         }
1458
1459 #ifdef CONFIG_MEMORY_HOTPLUG
1460         set_online_page_callback(&hv_online_page);
1461         register_memory_notifier(&hv_memory_nb);
1462 #endif
1463
1464         hv_set_drvdata(dev, &dm_device);
1465         /*
1466          * Initiate the hand shake with the host and negotiate
1467          * a version that the host can support. We start with the
1468          * highest version number and go down if the host cannot
1469          * support it.
1470          */
1471         memset(&version_req, 0, sizeof(struct dm_version_request));
1472         version_req.hdr.type = DM_VERSION_REQUEST;
1473         version_req.hdr.size = sizeof(struct dm_version_request);
1474         version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1475         version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
1476         version_req.is_last_attempt = 0;
1477
1478         ret = vmbus_sendpacket(dev->channel, &version_req,
1479                                 sizeof(struct dm_version_request),
1480                                 (unsigned long)NULL,
1481                                 VM_PKT_DATA_INBAND, 0);
1482         if (ret)
1483                 goto probe_error2;
1484
1485         t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1486         if (t == 0) {
1487                 ret = -ETIMEDOUT;
1488                 goto probe_error2;
1489         }
1490
1491         /*
1492          * If we could not negotiate a compatible version with the host
1493          * fail the probe function.
1494          */
1495         if (dm_device.state == DM_INIT_ERROR) {
1496                 ret = -ETIMEDOUT;
1497                 goto probe_error2;
1498         }
1499         /*
1500          * Now submit our capabilities to the host.
1501          */
1502         memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1503         cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1504         cap_msg.hdr.size = sizeof(struct dm_capabilities);
1505         cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1506
1507         cap_msg.caps.cap_bits.balloon = 1;
1508         cap_msg.caps.cap_bits.hot_add = 1;
1509
1510         /*
1511          * Specify our alignment requirements as it relates
1512          * memory hot-add. Specify 128MB alignment.
1513          */
1514         cap_msg.caps.cap_bits.hot_add_alignment = 7;
1515
1516         /*
1517          * Currently the host does not use these
1518          * values and we set them to what is done in the
1519          * Windows driver.
1520          */
1521         cap_msg.min_page_cnt = 0;
1522         cap_msg.max_page_number = -1;
1523
1524         ret = vmbus_sendpacket(dev->channel, &cap_msg,
1525                                 sizeof(struct dm_capabilities),
1526                                 (unsigned long)NULL,
1527                                 VM_PKT_DATA_INBAND, 0);
1528         if (ret)
1529                 goto probe_error2;
1530
1531         t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1532         if (t == 0) {
1533                 ret = -ETIMEDOUT;
1534                 goto probe_error2;
1535         }
1536
1537         /*
1538          * If the host does not like our capabilities,
1539          * fail the probe function.
1540          */
1541         if (dm_device.state == DM_INIT_ERROR) {
1542                 ret = -ETIMEDOUT;
1543                 goto probe_error2;
1544         }
1545
1546         dm_device.state = DM_INITIALIZED;
1547
1548         return 0;
1549
1550 probe_error2:
1551 #ifdef CONFIG_MEMORY_HOTPLUG
1552         restore_online_page_callback(&hv_online_page);
1553 #endif
1554         kthread_stop(dm_device.thread);
1555
1556 probe_error1:
1557         vmbus_close(dev->channel);
1558 probe_error0:
1559         kfree(send_buffer);
1560         return ret;
1561 }
1562
1563 static int balloon_remove(struct hv_device *dev)
1564 {
1565         struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1566         struct list_head *cur, *tmp;
1567         struct hv_hotadd_state *has;
1568
1569         if (dm->num_pages_ballooned != 0)
1570                 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1571
1572         cancel_work_sync(&dm->balloon_wrk.wrk);
1573         cancel_work_sync(&dm->ha_wrk.wrk);
1574
1575         vmbus_close(dev->channel);
1576         kthread_stop(dm->thread);
1577         kfree(send_buffer);
1578 #ifdef CONFIG_MEMORY_HOTPLUG
1579         restore_online_page_callback(&hv_online_page);
1580         unregister_memory_notifier(&hv_memory_nb);
1581 #endif
1582         list_for_each_safe(cur, tmp, &dm->ha_region_list) {
1583                 has = list_entry(cur, struct hv_hotadd_state, list);
1584                 list_del(&has->list);
1585                 kfree(has);
1586         }
1587
1588         return 0;
1589 }
1590
1591 static const struct hv_vmbus_device_id id_table[] = {
1592         /* Dynamic Memory Class ID */
1593         /* 525074DC-8985-46e2-8057-A307DC18A502 */
1594         { HV_DM_GUID, },
1595         { },
1596 };
1597
1598 MODULE_DEVICE_TABLE(vmbus, id_table);
1599
1600 static  struct hv_driver balloon_drv = {
1601         .name = "hv_balloon",
1602         .id_table = id_table,
1603         .probe =  balloon_probe,
1604         .remove =  balloon_remove,
1605 };
1606
1607 static int __init init_balloon_drv(void)
1608 {
1609
1610         return vmbus_driver_register(&balloon_drv);
1611 }
1612
1613 module_init(init_balloon_drv);
1614
1615 MODULE_DESCRIPTION("Hyper-V Balloon");
1616 MODULE_LICENSE("GPL");