2 * L2/refcount table cache for the QCOW2 format
4 * Copyright (c) 2010 Kevin Wolf <kwolf@redhat.com>
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Needed for CONFIG_MADVISE */
26 #include "qemu/osdep.h"
28 #if defined(CONFIG_MADVISE) || defined(CONFIG_POSIX_MADVISE)
32 #include "block/block_int.h"
33 #include "qemu-common.h"
37 typedef struct Qcow2CachedTable {
45 Qcow2CachedTable *entries;
46 struct Qcow2Cache *depends;
48 bool depends_on_flush;
51 uint64_t cache_clean_lru_counter;
54 static inline void *qcow2_cache_get_table_addr(BlockDriverState *bs,
55 Qcow2Cache *c, int table)
57 BDRVQcow2State *s = bs->opaque;
58 return (uint8_t *) c->table_array + (size_t) table * s->cluster_size;
61 static inline int qcow2_cache_get_table_idx(BlockDriverState *bs,
62 Qcow2Cache *c, void *table)
64 BDRVQcow2State *s = bs->opaque;
65 ptrdiff_t table_offset = (uint8_t *) table - (uint8_t *) c->table_array;
66 int idx = table_offset / s->cluster_size;
67 assert(idx >= 0 && idx < c->size && table_offset % s->cluster_size == 0);
71 static void qcow2_cache_table_release(BlockDriverState *bs, Qcow2Cache *c,
72 int i, int num_tables)
74 #if QEMU_MADV_DONTNEED != QEMU_MADV_INVALID
75 BDRVQcow2State *s = bs->opaque;
76 void *t = qcow2_cache_get_table_addr(bs, c, i);
77 int align = getpagesize();
78 size_t mem_size = (size_t) s->cluster_size * num_tables;
79 size_t offset = QEMU_ALIGN_UP((uintptr_t) t, align) - (uintptr_t) t;
80 size_t length = QEMU_ALIGN_DOWN(mem_size - offset, align);
82 qemu_madvise((uint8_t *) t + offset, length, QEMU_MADV_DONTNEED);
87 static inline bool can_clean_entry(Qcow2Cache *c, int i)
89 Qcow2CachedTable *t = &c->entries[i];
90 return t->ref == 0 && !t->dirty && t->offset != 0 &&
91 t->lru_counter <= c->cache_clean_lru_counter;
94 void qcow2_cache_clean_unused(BlockDriverState *bs, Qcow2Cache *c)
100 /* Skip the entries that we don't need to clean */
101 while (i < c->size && !can_clean_entry(c, i)) {
105 /* And count how many we can clean in a row */
106 while (i < c->size && can_clean_entry(c, i)) {
107 c->entries[i].offset = 0;
108 c->entries[i].lru_counter = 0;
114 qcow2_cache_table_release(bs, c, i - to_clean, to_clean);
118 c->cache_clean_lru_counter = c->lru_counter;
121 Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables)
123 BDRVQcow2State *s = bs->opaque;
126 c = g_new0(Qcow2Cache, 1);
127 c->size = num_tables;
128 c->entries = g_try_new0(Qcow2CachedTable, num_tables);
129 c->table_array = qemu_try_blockalign(bs->file->bs,
130 (size_t) num_tables * s->cluster_size);
132 if (!c->entries || !c->table_array) {
133 qemu_vfree(c->table_array);
142 int qcow2_cache_destroy(BlockDriverState *bs, Qcow2Cache *c)
146 for (i = 0; i < c->size; i++) {
147 assert(c->entries[i].ref == 0);
150 qemu_vfree(c->table_array);
157 static int qcow2_cache_flush_dependency(BlockDriverState *bs, Qcow2Cache *c)
161 ret = qcow2_cache_flush(bs, c->depends);
167 c->depends_on_flush = false;
172 static int qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)
174 BDRVQcow2State *s = bs->opaque;
177 if (!c->entries[i].dirty || !c->entries[i].offset) {
181 trace_qcow2_cache_entry_flush(qemu_coroutine_self(),
182 c == s->l2_table_cache, i);
185 ret = qcow2_cache_flush_dependency(bs, c);
186 } else if (c->depends_on_flush) {
187 ret = bdrv_flush(bs->file->bs);
189 c->depends_on_flush = false;
197 if (c == s->refcount_block_cache) {
198 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_REFCOUNT_BLOCK,
199 c->entries[i].offset, s->cluster_size);
200 } else if (c == s->l2_table_cache) {
201 ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2,
202 c->entries[i].offset, s->cluster_size);
204 ret = qcow2_pre_write_overlap_check(bs, 0,
205 c->entries[i].offset, s->cluster_size);
212 if (c == s->refcount_block_cache) {
213 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_UPDATE_PART);
214 } else if (c == s->l2_table_cache) {
215 BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
218 ret = bdrv_pwrite(bs->file->bs, c->entries[i].offset,
219 qcow2_cache_get_table_addr(bs, c, i), s->cluster_size);
224 c->entries[i].dirty = false;
229 int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c)
231 BDRVQcow2State *s = bs->opaque;
236 trace_qcow2_cache_flush(qemu_coroutine_self(), c == s->l2_table_cache);
238 for (i = 0; i < c->size; i++) {
239 ret = qcow2_cache_entry_flush(bs, c, i);
240 if (ret < 0 && result != -ENOSPC) {
246 ret = bdrv_flush(bs->file->bs);
255 int qcow2_cache_set_dependency(BlockDriverState *bs, Qcow2Cache *c,
256 Qcow2Cache *dependency)
260 if (dependency->depends) {
261 ret = qcow2_cache_flush_dependency(bs, dependency);
267 if (c->depends && (c->depends != dependency)) {
268 ret = qcow2_cache_flush_dependency(bs, c);
274 c->depends = dependency;
278 void qcow2_cache_depends_on_flush(Qcow2Cache *c)
280 c->depends_on_flush = true;
283 int qcow2_cache_empty(BlockDriverState *bs, Qcow2Cache *c)
287 ret = qcow2_cache_flush(bs, c);
292 for (i = 0; i < c->size; i++) {
293 assert(c->entries[i].ref == 0);
294 c->entries[i].offset = 0;
295 c->entries[i].lru_counter = 0;
298 qcow2_cache_table_release(bs, c, 0, c->size);
305 static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
306 uint64_t offset, void **table, bool read_from_disk)
308 BDRVQcow2State *s = bs->opaque;
312 uint64_t min_lru_counter = UINT64_MAX;
313 int min_lru_index = -1;
315 trace_qcow2_cache_get(qemu_coroutine_self(), c == s->l2_table_cache,
316 offset, read_from_disk);
318 /* Check if the table is already cached */
319 i = lookup_index = (offset / s->cluster_size * 4) % c->size;
321 const Qcow2CachedTable *t = &c->entries[i];
322 if (t->offset == offset) {
325 if (t->ref == 0 && t->lru_counter < min_lru_counter) {
326 min_lru_counter = t->lru_counter;
329 if (++i == c->size) {
332 } while (i != lookup_index);
334 if (min_lru_index == -1) {
335 /* This can't happen in current synchronous code, but leave the check
336 * here as a reminder for whoever starts using AIO with the cache */
340 /* Cache miss: write a table back and replace it */
342 trace_qcow2_cache_get_replace_entry(qemu_coroutine_self(),
343 c == s->l2_table_cache, i);
345 ret = qcow2_cache_entry_flush(bs, c, i);
350 trace_qcow2_cache_get_read(qemu_coroutine_self(),
351 c == s->l2_table_cache, i);
352 c->entries[i].offset = 0;
353 if (read_from_disk) {
354 if (c == s->l2_table_cache) {
355 BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD);
358 ret = bdrv_pread(bs->file->bs, offset,
359 qcow2_cache_get_table_addr(bs, c, i),
366 c->entries[i].offset = offset;
368 /* And return the right table */
371 *table = qcow2_cache_get_table_addr(bs, c, i);
373 trace_qcow2_cache_get_done(qemu_coroutine_self(),
374 c == s->l2_table_cache, i);
379 int qcow2_cache_get(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
382 return qcow2_cache_do_get(bs, c, offset, table, true);
385 int qcow2_cache_get_empty(BlockDriverState *bs, Qcow2Cache *c, uint64_t offset,
388 return qcow2_cache_do_get(bs, c, offset, table, false);
391 void qcow2_cache_put(BlockDriverState *bs, Qcow2Cache *c, void **table)
393 int i = qcow2_cache_get_table_idx(bs, c, *table);
398 if (c->entries[i].ref == 0) {
399 c->entries[i].lru_counter = ++c->lru_counter;
402 assert(c->entries[i].ref >= 0);
405 void qcow2_cache_entry_mark_dirty(BlockDriverState *bs, Qcow2Cache *c,
408 int i = qcow2_cache_get_table_idx(bs, c, table);
409 assert(c->entries[i].offset != 0);
410 c->entries[i].dirty = true;