Add qemu 2.4.0
[kvmfornfv.git] / qemu / include / exec / ram_addr.h
1 /*
2  * Declarations for cpu physical memory functions
3  *
4  * Copyright 2011 Red Hat, Inc. and/or its affiliates
5  *
6  * Authors:
7  *  Avi Kivity <avi@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or
10  * later.  See the COPYING file in the top-level directory.
11  *
12  */
13
14 /*
15  * This header is for use by exec.c and memory.c ONLY.  Do not include it.
16  * The functions declared here will be removed soon.
17  */
18
19 #ifndef RAM_ADDR_H
20 #define RAM_ADDR_H
21
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24
25 ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
26                                     bool share, const char *mem_path,
27                                     Error **errp);
28 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
29                                    MemoryRegion *mr, Error **errp);
30 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp);
31 ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t max_size,
32                                      void (*resized)(const char*,
33                                                      uint64_t length,
34                                                      void *host),
35                                      MemoryRegion *mr, Error **errp);
36 int qemu_get_ram_fd(ram_addr_t addr);
37 void *qemu_get_ram_block_host_ptr(ram_addr_t addr);
38 void *qemu_get_ram_ptr(ram_addr_t addr);
39 void qemu_ram_free(ram_addr_t addr);
40 void qemu_ram_free_from_ptr(ram_addr_t addr);
41
42 int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp);
43
44 #define DIRTY_CLIENTS_ALL     ((1 << DIRTY_MEMORY_NUM) - 1)
45 #define DIRTY_CLIENTS_NOCODE  (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE))
46
47 static inline bool cpu_physical_memory_get_dirty(ram_addr_t start,
48                                                  ram_addr_t length,
49                                                  unsigned client)
50 {
51     unsigned long end, page, next;
52
53     assert(client < DIRTY_MEMORY_NUM);
54
55     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
56     page = start >> TARGET_PAGE_BITS;
57     next = find_next_bit(ram_list.dirty_memory[client], end, page);
58
59     return next < end;
60 }
61
62 static inline bool cpu_physical_memory_all_dirty(ram_addr_t start,
63                                                  ram_addr_t length,
64                                                  unsigned client)
65 {
66     unsigned long end, page, next;
67
68     assert(client < DIRTY_MEMORY_NUM);
69
70     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
71     page = start >> TARGET_PAGE_BITS;
72     next = find_next_zero_bit(ram_list.dirty_memory[client], end, page);
73
74     return next >= end;
75 }
76
77 static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr,
78                                                       unsigned client)
79 {
80     return cpu_physical_memory_get_dirty(addr, 1, client);
81 }
82
83 static inline bool cpu_physical_memory_is_clean(ram_addr_t addr)
84 {
85     bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA);
86     bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE);
87     bool migration =
88         cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION);
89     return !(vga && code && migration);
90 }
91
92 static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start,
93                                                                ram_addr_t length,
94                                                                uint8_t mask)
95 {
96     uint8_t ret = 0;
97
98     if (mask & (1 << DIRTY_MEMORY_VGA) &&
99         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) {
100         ret |= (1 << DIRTY_MEMORY_VGA);
101     }
102     if (mask & (1 << DIRTY_MEMORY_CODE) &&
103         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) {
104         ret |= (1 << DIRTY_MEMORY_CODE);
105     }
106     if (mask & (1 << DIRTY_MEMORY_MIGRATION) &&
107         !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) {
108         ret |= (1 << DIRTY_MEMORY_MIGRATION);
109     }
110     return ret;
111 }
112
113 static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr,
114                                                       unsigned client)
115 {
116     assert(client < DIRTY_MEMORY_NUM);
117     set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]);
118 }
119
120 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
121                                                        ram_addr_t length,
122                                                        uint8_t mask)
123 {
124     unsigned long end, page;
125     unsigned long **d = ram_list.dirty_memory;
126
127     end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS;
128     page = start >> TARGET_PAGE_BITS;
129     if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) {
130         bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page);
131     }
132     if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) {
133         bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page);
134     }
135     if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) {
136         bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page);
137     }
138     xen_modified_memory(start, length);
139 }
140
141 #if !defined(_WIN32)
142 static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
143                                                           ram_addr_t start,
144                                                           ram_addr_t pages)
145 {
146     unsigned long i, j;
147     unsigned long page_number, c;
148     hwaddr addr;
149     ram_addr_t ram_addr;
150     unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
151     unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
152     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
153
154     /* start address is aligned at the start of a word? */
155     if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) &&
156         (hpratio == 1)) {
157         long k;
158         long nr = BITS_TO_LONGS(pages);
159
160         for (k = 0; k < nr; k++) {
161             if (bitmap[k]) {
162                 unsigned long temp = leul_to_cpu(bitmap[k]);
163                 unsigned long **d = ram_list.dirty_memory;
164
165                 atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp);
166                 atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp);
167                 if (tcg_enabled()) {
168                     atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp);
169                 }
170             }
171         }
172         xen_modified_memory(start, pages << TARGET_PAGE_BITS);
173     } else {
174         uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
175         /*
176          * bitmap-traveling is faster than memory-traveling (for addr...)
177          * especially when most of the memory is not dirty.
178          */
179         for (i = 0; i < len; i++) {
180             if (bitmap[i] != 0) {
181                 c = leul_to_cpu(bitmap[i]);
182                 do {
183                     j = ctzl(c);
184                     c &= ~(1ul << j);
185                     page_number = (i * HOST_LONG_BITS + j) * hpratio;
186                     addr = page_number * TARGET_PAGE_SIZE;
187                     ram_addr = start + addr;
188                     cpu_physical_memory_set_dirty_range(ram_addr,
189                                        TARGET_PAGE_SIZE * hpratio, clients);
190                 } while (c != 0);
191             }
192         }
193     }
194 }
195 #endif /* not _WIN32 */
196
197 bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
198                                               ram_addr_t length,
199                                               unsigned client);
200
201 static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
202                                                          ram_addr_t length)
203 {
204     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION);
205     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA);
206     cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE);
207 }
208
209
210 static inline
211 uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
212                                                ram_addr_t start,
213                                                ram_addr_t length)
214 {
215     ram_addr_t addr;
216     unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
217     uint64_t num_dirty = 0;
218
219     /* start address is aligned at the start of a word? */
220     if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
221         int k;
222         int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
223         unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
224
225         for (k = page; k < page + nr; k++) {
226             if (src[k]) {
227                 unsigned long bits = atomic_xchg(&src[k], 0);
228                 unsigned long new_dirty;
229                 new_dirty = ~dest[k];
230                 dest[k] |= bits;
231                 new_dirty &= bits;
232                 num_dirty += ctpopl(new_dirty);
233             }
234         }
235     } else {
236         for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
237             if (cpu_physical_memory_test_and_clear_dirty(
238                         start + addr,
239                         TARGET_PAGE_SIZE,
240                         DIRTY_MEMORY_MIGRATION)) {
241                 long k = (start + addr) >> TARGET_PAGE_BITS;
242                 if (!test_and_set_bit(k, dest)) {
243                     num_dirty++;
244                 }
245             }
246         }
247     }
248
249     return num_dirty;
250 }
251
252 #endif
253 #endif