2 * Compressed RAM block device
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 2012, 2013 Minchan Kim
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/bitops.h>
22 #include <linux/blkdev.h>
23 #include <linux/buffer_head.h>
24 #include <linux/device.h>
25 #include <linux/genhd.h>
26 #include <linux/highmem.h>
27 #include <linux/slab.h>
28 #include <linux/string.h>
29 #include <linux/vmalloc.h>
30 #include <linux/err.h>
31 #include <linux/idr.h>
32 #include <linux/sysfs.h>
36 static DEFINE_IDR(zram_index_idr);
37 /* idr index must be protected */
38 static DEFINE_MUTEX(zram_index_mutex);
40 static int zram_major;
41 static const char *default_compressor = "lzo";
43 /* Module params (documentation at end) */
44 static unsigned int num_devices = 1;
46 static inline void deprecated_attr_warn(const char *name)
48 pr_warn_once("%d (%s) Attribute %s (and others) will be removed. %s\n",
52 "See zram documentation.");
55 #define ZRAM_ATTR_RO(name) \
56 static ssize_t name##_show(struct device *d, \
57 struct device_attribute *attr, char *b) \
59 struct zram *zram = dev_to_zram(d); \
61 deprecated_attr_warn(__stringify(name)); \
62 return scnprintf(b, PAGE_SIZE, "%llu\n", \
63 (u64)atomic64_read(&zram->stats.name)); \
65 static DEVICE_ATTR_RO(name);
67 static inline bool init_done(struct zram *zram)
69 return zram->disksize;
72 static inline struct zram *dev_to_zram(struct device *dev)
74 return (struct zram *)dev_to_disk(dev)->private_data;
77 /* flag operations require table entry bit_spin_lock() being held */
78 static int zram_test_flag(struct zram_meta *meta, u32 index,
79 enum zram_pageflags flag)
81 return meta->table[index].value & BIT(flag);
84 static void zram_set_flag(struct zram_meta *meta, u32 index,
85 enum zram_pageflags flag)
87 meta->table[index].value |= BIT(flag);
90 static void zram_clear_flag(struct zram_meta *meta, u32 index,
91 enum zram_pageflags flag)
93 meta->table[index].value &= ~BIT(flag);
96 static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
98 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
101 static void zram_set_obj_size(struct zram_meta *meta,
102 u32 index, size_t size)
104 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
106 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
109 static inline bool is_partial_io(struct bio_vec *bvec)
111 return bvec->bv_len != PAGE_SIZE;
115 * Check if request is within bounds and aligned on zram logical blocks.
117 static inline bool valid_io_request(struct zram *zram,
118 sector_t start, unsigned int size)
122 /* unaligned request */
123 if (unlikely(start & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
125 if (unlikely(size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
128 end = start + (size >> SECTOR_SHIFT);
129 bound = zram->disksize >> SECTOR_SHIFT;
130 /* out of range range */
131 if (unlikely(start >= bound || end > bound || start > end))
134 /* I/O request is valid */
138 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
140 if (*offset + bvec->bv_len >= PAGE_SIZE)
142 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
145 static inline void update_used_max(struct zram *zram,
146 const unsigned long pages)
148 unsigned long old_max, cur_max;
150 old_max = atomic_long_read(&zram->stats.max_used_pages);
155 old_max = atomic_long_cmpxchg(
156 &zram->stats.max_used_pages, cur_max, pages);
157 } while (old_max != cur_max);
160 static bool page_zero_filled(void *ptr)
165 page = (unsigned long *)ptr;
167 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
175 static void handle_zero_page(struct bio_vec *bvec)
177 struct page *page = bvec->bv_page;
180 user_mem = kmap_atomic(page);
181 if (is_partial_io(bvec))
182 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
184 clear_page(user_mem);
185 kunmap_atomic(user_mem);
187 flush_dcache_page(page);
190 static ssize_t initstate_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
194 struct zram *zram = dev_to_zram(dev);
196 down_read(&zram->init_lock);
197 val = init_done(zram);
198 up_read(&zram->init_lock);
200 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
203 static ssize_t disksize_show(struct device *dev,
204 struct device_attribute *attr, char *buf)
206 struct zram *zram = dev_to_zram(dev);
208 return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
211 static ssize_t orig_data_size_show(struct device *dev,
212 struct device_attribute *attr, char *buf)
214 struct zram *zram = dev_to_zram(dev);
216 deprecated_attr_warn("orig_data_size");
217 return scnprintf(buf, PAGE_SIZE, "%llu\n",
218 (u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
221 static ssize_t mem_used_total_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
225 struct zram *zram = dev_to_zram(dev);
227 deprecated_attr_warn("mem_used_total");
228 down_read(&zram->init_lock);
229 if (init_done(zram)) {
230 struct zram_meta *meta = zram->meta;
231 val = zs_get_total_pages(meta->mem_pool);
233 up_read(&zram->init_lock);
235 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
238 static ssize_t mem_limit_show(struct device *dev,
239 struct device_attribute *attr, char *buf)
242 struct zram *zram = dev_to_zram(dev);
244 deprecated_attr_warn("mem_limit");
245 down_read(&zram->init_lock);
246 val = zram->limit_pages;
247 up_read(&zram->init_lock);
249 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
252 static ssize_t mem_limit_store(struct device *dev,
253 struct device_attribute *attr, const char *buf, size_t len)
257 struct zram *zram = dev_to_zram(dev);
259 limit = memparse(buf, &tmp);
260 if (buf == tmp) /* no chars parsed, invalid input */
263 down_write(&zram->init_lock);
264 zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
265 up_write(&zram->init_lock);
270 static ssize_t mem_used_max_show(struct device *dev,
271 struct device_attribute *attr, char *buf)
274 struct zram *zram = dev_to_zram(dev);
276 deprecated_attr_warn("mem_used_max");
277 down_read(&zram->init_lock);
279 val = atomic_long_read(&zram->stats.max_used_pages);
280 up_read(&zram->init_lock);
282 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
285 static ssize_t mem_used_max_store(struct device *dev,
286 struct device_attribute *attr, const char *buf, size_t len)
290 struct zram *zram = dev_to_zram(dev);
292 err = kstrtoul(buf, 10, &val);
296 down_read(&zram->init_lock);
297 if (init_done(zram)) {
298 struct zram_meta *meta = zram->meta;
299 atomic_long_set(&zram->stats.max_used_pages,
300 zs_get_total_pages(meta->mem_pool));
302 up_read(&zram->init_lock);
307 static ssize_t max_comp_streams_show(struct device *dev,
308 struct device_attribute *attr, char *buf)
311 struct zram *zram = dev_to_zram(dev);
313 down_read(&zram->init_lock);
314 val = zram->max_comp_streams;
315 up_read(&zram->init_lock);
317 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
320 static ssize_t max_comp_streams_store(struct device *dev,
321 struct device_attribute *attr, const char *buf, size_t len)
324 struct zram *zram = dev_to_zram(dev);
327 ret = kstrtoint(buf, 0, &num);
333 down_write(&zram->init_lock);
334 if (init_done(zram)) {
335 if (!zcomp_set_max_streams(zram->comp, num)) {
336 pr_info("Cannot change max compression streams\n");
342 zram->max_comp_streams = num;
345 up_write(&zram->init_lock);
349 static ssize_t comp_algorithm_show(struct device *dev,
350 struct device_attribute *attr, char *buf)
353 struct zram *zram = dev_to_zram(dev);
355 down_read(&zram->init_lock);
356 sz = zcomp_available_show(zram->compressor, buf);
357 up_read(&zram->init_lock);
362 static ssize_t comp_algorithm_store(struct device *dev,
363 struct device_attribute *attr, const char *buf, size_t len)
365 struct zram *zram = dev_to_zram(dev);
368 if (!zcomp_available_algorithm(buf))
371 down_write(&zram->init_lock);
372 if (init_done(zram)) {
373 up_write(&zram->init_lock);
374 pr_info("Can't change algorithm for initialized device\n");
377 strlcpy(zram->compressor, buf, sizeof(zram->compressor));
379 /* ignore trailing newline */
380 sz = strlen(zram->compressor);
381 if (sz > 0 && zram->compressor[sz - 1] == '\n')
382 zram->compressor[sz - 1] = 0x00;
384 up_write(&zram->init_lock);
388 static ssize_t compact_store(struct device *dev,
389 struct device_attribute *attr, const char *buf, size_t len)
391 struct zram *zram = dev_to_zram(dev);
392 struct zram_meta *meta;
394 down_read(&zram->init_lock);
395 if (!init_done(zram)) {
396 up_read(&zram->init_lock);
401 zs_compact(meta->mem_pool);
402 up_read(&zram->init_lock);
407 static ssize_t io_stat_show(struct device *dev,
408 struct device_attribute *attr, char *buf)
410 struct zram *zram = dev_to_zram(dev);
413 down_read(&zram->init_lock);
414 ret = scnprintf(buf, PAGE_SIZE,
415 "%8llu %8llu %8llu %8llu\n",
416 (u64)atomic64_read(&zram->stats.failed_reads),
417 (u64)atomic64_read(&zram->stats.failed_writes),
418 (u64)atomic64_read(&zram->stats.invalid_io),
419 (u64)atomic64_read(&zram->stats.notify_free));
420 up_read(&zram->init_lock);
425 static ssize_t mm_stat_show(struct device *dev,
426 struct device_attribute *attr, char *buf)
428 struct zram *zram = dev_to_zram(dev);
429 struct zs_pool_stats pool_stats;
430 u64 orig_size, mem_used = 0;
434 memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats));
436 down_read(&zram->init_lock);
437 if (init_done(zram)) {
438 mem_used = zs_get_total_pages(zram->meta->mem_pool);
439 zs_pool_stats(zram->meta->mem_pool, &pool_stats);
442 orig_size = atomic64_read(&zram->stats.pages_stored);
443 max_used = atomic_long_read(&zram->stats.max_used_pages);
445 ret = scnprintf(buf, PAGE_SIZE,
446 "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n",
447 orig_size << PAGE_SHIFT,
448 (u64)atomic64_read(&zram->stats.compr_data_size),
449 mem_used << PAGE_SHIFT,
450 zram->limit_pages << PAGE_SHIFT,
451 max_used << PAGE_SHIFT,
452 (u64)atomic64_read(&zram->stats.zero_pages),
453 pool_stats.pages_compacted);
454 up_read(&zram->init_lock);
459 static DEVICE_ATTR_RO(io_stat);
460 static DEVICE_ATTR_RO(mm_stat);
461 ZRAM_ATTR_RO(num_reads);
462 ZRAM_ATTR_RO(num_writes);
463 ZRAM_ATTR_RO(failed_reads);
464 ZRAM_ATTR_RO(failed_writes);
465 ZRAM_ATTR_RO(invalid_io);
466 ZRAM_ATTR_RO(notify_free);
467 ZRAM_ATTR_RO(zero_pages);
468 ZRAM_ATTR_RO(compr_data_size);
470 static inline bool zram_meta_get(struct zram *zram)
472 if (atomic_inc_not_zero(&zram->refcount))
477 static inline void zram_meta_put(struct zram *zram)
479 atomic_dec(&zram->refcount);
482 static void zram_meta_free(struct zram_meta *meta, u64 disksize)
484 size_t num_pages = disksize >> PAGE_SHIFT;
487 /* Free all pages that are still in this zram device */
488 for (index = 0; index < num_pages; index++) {
489 unsigned long handle = meta->table[index].handle;
494 zs_free(meta->mem_pool, handle);
497 zs_destroy_pool(meta->mem_pool);
502 static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
505 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
510 num_pages = disksize >> PAGE_SHIFT;
511 meta->table = vzalloc(num_pages * sizeof(*meta->table));
513 pr_err("Error allocating zram address table\n");
517 meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
518 if (!meta->mem_pool) {
519 pr_err("Error creating memory pool\n");
523 zram_meta_init_table_locks(meta, disksize);
534 * To protect concurrent access to the same index entry,
535 * caller should hold this table index entry's bit_spinlock to
536 * indicate this index entry is accessing.
538 static void zram_free_page(struct zram *zram, size_t index)
540 struct zram_meta *meta = zram->meta;
541 unsigned long handle = meta->table[index].handle;
543 if (unlikely(!handle)) {
545 * No memory is allocated for zero filled pages.
546 * Simply clear zero page flag.
548 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
549 zram_clear_flag(meta, index, ZRAM_ZERO);
550 atomic64_dec(&zram->stats.zero_pages);
555 zs_free(meta->mem_pool, handle);
557 atomic64_sub(zram_get_obj_size(meta, index),
558 &zram->stats.compr_data_size);
559 atomic64_dec(&zram->stats.pages_stored);
561 meta->table[index].handle = 0;
562 zram_set_obj_size(meta, index, 0);
565 static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
569 struct zram_meta *meta = zram->meta;
570 unsigned long handle;
573 zram_lock_table(&meta->table[index]);
574 handle = meta->table[index].handle;
575 size = zram_get_obj_size(meta, index);
577 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
578 zram_unlock_table(&meta->table[index]);
583 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
584 if (size == PAGE_SIZE)
585 copy_page(mem, cmem);
587 ret = zcomp_decompress(zram->comp, cmem, size, mem);
588 zs_unmap_object(meta->mem_pool, handle);
589 zram_unlock_table(&meta->table[index]);
591 /* Should NEVER happen. Return bio error if it does. */
593 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
600 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
601 u32 index, int offset)
605 unsigned char *user_mem, *uncmem = NULL;
606 struct zram_meta *meta = zram->meta;
607 page = bvec->bv_page;
609 zram_lock_table(&meta->table[index]);
610 if (unlikely(!meta->table[index].handle) ||
611 zram_test_flag(meta, index, ZRAM_ZERO)) {
612 zram_unlock_table(&meta->table[index]);
613 handle_zero_page(bvec);
616 zram_unlock_table(&meta->table[index]);
618 if (is_partial_io(bvec))
619 /* Use a temporary buffer to decompress the page */
620 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
622 user_mem = kmap_atomic(page);
623 if (!is_partial_io(bvec))
627 pr_err("Unable to allocate temp memory\n");
632 ret = zram_decompress_page(zram, uncmem, index);
633 /* Should NEVER happen. Return bio error if it does. */
637 if (is_partial_io(bvec))
638 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
641 flush_dcache_page(page);
644 kunmap_atomic(user_mem);
645 if (is_partial_io(bvec))
650 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
655 unsigned long handle;
657 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
658 struct zram_meta *meta = zram->meta;
659 struct zcomp_strm *zstrm = NULL;
660 unsigned long alloced_pages;
662 page = bvec->bv_page;
663 if (is_partial_io(bvec)) {
665 * This is a partial IO. We need to read the full page
666 * before to write the changes.
668 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
673 ret = zram_decompress_page(zram, uncmem, index);
678 zstrm = zcomp_strm_find(zram->comp);
679 user_mem = kmap_atomic(page);
681 if (is_partial_io(bvec)) {
682 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
684 kunmap_atomic(user_mem);
690 if (page_zero_filled(uncmem)) {
692 kunmap_atomic(user_mem);
693 /* Free memory associated with this sector now. */
694 zram_lock_table(&meta->table[index]);
695 zram_free_page(zram, index);
696 zram_set_flag(meta, index, ZRAM_ZERO);
697 zram_unlock_table(&meta->table[index]);
699 atomic64_inc(&zram->stats.zero_pages);
704 ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
705 if (!is_partial_io(bvec)) {
706 kunmap_atomic(user_mem);
712 pr_err("Compression failed! err=%d\n", ret);
716 if (unlikely(clen > max_zpage_size)) {
718 if (is_partial_io(bvec))
722 handle = zs_malloc(meta->mem_pool, clen);
724 pr_err("Error allocating memory for compressed page: %u, size=%zu\n",
730 alloced_pages = zs_get_total_pages(meta->mem_pool);
731 update_used_max(zram, alloced_pages);
733 if (zram->limit_pages && alloced_pages > zram->limit_pages) {
734 zs_free(meta->mem_pool, handle);
739 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
741 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
742 src = kmap_atomic(page);
743 copy_page(cmem, src);
746 memcpy(cmem, src, clen);
749 zcomp_strm_release(zram->comp, zstrm);
751 zs_unmap_object(meta->mem_pool, handle);
754 * Free memory associated with this sector
755 * before overwriting unused sectors.
757 zram_lock_table(&meta->table[index]);
758 zram_free_page(zram, index);
760 meta->table[index].handle = handle;
761 zram_set_obj_size(meta, index, clen);
762 zram_unlock_table(&meta->table[index]);
765 atomic64_add(clen, &zram->stats.compr_data_size);
766 atomic64_inc(&zram->stats.pages_stored);
769 zcomp_strm_release(zram->comp, zstrm);
770 if (is_partial_io(bvec))
776 * zram_bio_discard - handler on discard request
777 * @index: physical block index in PAGE_SIZE units
778 * @offset: byte offset within physical block
780 static void zram_bio_discard(struct zram *zram, u32 index,
781 int offset, struct bio *bio)
783 size_t n = bio->bi_iter.bi_size;
784 struct zram_meta *meta = zram->meta;
787 * zram manages data in physical block size units. Because logical block
788 * size isn't identical with physical block size on some arch, we
789 * could get a discard request pointing to a specific offset within a
790 * certain physical block. Although we can handle this request by
791 * reading that physiclal block and decompressing and partially zeroing
792 * and re-compressing and then re-storing it, this isn't reasonable
793 * because our intent with a discard request is to save memory. So
794 * skipping this logical block is appropriate here.
797 if (n <= (PAGE_SIZE - offset))
800 n -= (PAGE_SIZE - offset);
804 while (n >= PAGE_SIZE) {
805 zram_lock_table(&meta->table[index]);
806 zram_free_page(zram, index);
807 zram_unlock_table(&meta->table[index]);
808 atomic64_inc(&zram->stats.notify_free);
814 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
817 unsigned long start_time = jiffies;
820 generic_start_io_acct(rw, bvec->bv_len >> SECTOR_SHIFT,
824 atomic64_inc(&zram->stats.num_reads);
825 ret = zram_bvec_read(zram, bvec, index, offset);
827 atomic64_inc(&zram->stats.num_writes);
828 ret = zram_bvec_write(zram, bvec, index, offset);
831 generic_end_io_acct(rw, &zram->disk->part0, start_time);
835 atomic64_inc(&zram->stats.failed_reads);
837 atomic64_inc(&zram->stats.failed_writes);
843 static void __zram_make_request(struct zram *zram, struct bio *bio)
848 struct bvec_iter iter;
850 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
851 offset = (bio->bi_iter.bi_sector &
852 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
854 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
855 zram_bio_discard(zram, index, offset, bio);
860 rw = bio_data_dir(bio);
861 bio_for_each_segment(bvec, bio, iter) {
862 int max_transfer_size = PAGE_SIZE - offset;
864 if (bvec.bv_len > max_transfer_size) {
866 * zram_bvec_rw() can only make operation on a single
867 * zram page. Split the bio vector.
871 bv.bv_page = bvec.bv_page;
872 bv.bv_len = max_transfer_size;
873 bv.bv_offset = bvec.bv_offset;
875 if (zram_bvec_rw(zram, &bv, index, offset, rw) < 0)
878 bv.bv_len = bvec.bv_len - max_transfer_size;
879 bv.bv_offset += max_transfer_size;
880 if (zram_bvec_rw(zram, &bv, index + 1, 0, rw) < 0)
883 if (zram_bvec_rw(zram, &bvec, index, offset, rw) < 0)
886 update_position(&index, &offset, &bvec);
897 * Handler function for all zram I/O requests.
899 static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio)
901 struct zram *zram = queue->queuedata;
903 if (unlikely(!zram_meta_get(zram)))
906 blk_queue_split(queue, &bio, queue->bio_split);
908 if (!valid_io_request(zram, bio->bi_iter.bi_sector,
909 bio->bi_iter.bi_size)) {
910 atomic64_inc(&zram->stats.invalid_io);
914 __zram_make_request(zram, bio);
916 return BLK_QC_T_NONE;
921 return BLK_QC_T_NONE;
924 static void zram_slot_free_notify(struct block_device *bdev,
928 struct zram_meta *meta;
930 zram = bdev->bd_disk->private_data;
933 zram_lock_table(&meta->table[index]);
934 zram_free_page(zram, index);
935 zram_unlock_table(&meta->table[index]);
936 atomic64_inc(&zram->stats.notify_free);
939 static int zram_rw_page(struct block_device *bdev, sector_t sector,
940 struct page *page, int rw)
942 int offset, err = -EIO;
947 zram = bdev->bd_disk->private_data;
948 if (unlikely(!zram_meta_get(zram)))
951 if (!valid_io_request(zram, sector, PAGE_SIZE)) {
952 atomic64_inc(&zram->stats.invalid_io);
957 index = sector >> SECTORS_PER_PAGE_SHIFT;
958 offset = sector & (SECTORS_PER_PAGE - 1) << SECTOR_SHIFT;
961 bv.bv_len = PAGE_SIZE;
964 err = zram_bvec_rw(zram, &bv, index, offset, rw);
969 * If I/O fails, just return error(ie, non-zero) without
970 * calling page_endio.
971 * It causes resubmit the I/O with bio request by upper functions
972 * of rw_page(e.g., swap_readpage, __swap_writepage) and
973 * bio->bi_end_io does things to handle the error
974 * (e.g., SetPageError, set_page_dirty and extra works).
977 page_endio(page, rw, 0);
981 static void zram_reset_device(struct zram *zram)
983 struct zram_meta *meta;
987 down_write(&zram->init_lock);
989 zram->limit_pages = 0;
991 if (!init_done(zram)) {
992 up_write(&zram->init_lock);
998 disksize = zram->disksize;
1000 * Refcount will go down to 0 eventually and r/w handler
1001 * cannot handle further I/O so it will bail out by
1002 * check zram_meta_get.
1004 zram_meta_put(zram);
1006 * We want to free zram_meta in process context to avoid
1007 * deadlock between reclaim path and any other locks.
1009 wait_event(zram->io_done, atomic_read(&zram->refcount) == 0);
1012 memset(&zram->stats, 0, sizeof(zram->stats));
1014 zram->max_comp_streams = 1;
1016 set_capacity(zram->disk, 0);
1017 part_stat_set_all(&zram->disk->part0, 0);
1019 up_write(&zram->init_lock);
1020 /* I/O operation under all of CPU are done so let's free */
1021 zram_meta_free(meta, disksize);
1022 zcomp_destroy(comp);
1025 static ssize_t disksize_store(struct device *dev,
1026 struct device_attribute *attr, const char *buf, size_t len)
1030 struct zram_meta *meta;
1031 struct zram *zram = dev_to_zram(dev);
1034 disksize = memparse(buf, NULL);
1038 disksize = PAGE_ALIGN(disksize);
1039 meta = zram_meta_alloc(zram->disk->disk_name, disksize);
1043 comp = zcomp_create(zram->compressor, zram->max_comp_streams);
1045 pr_err("Cannot initialise %s compressing backend\n",
1047 err = PTR_ERR(comp);
1051 down_write(&zram->init_lock);
1052 if (init_done(zram)) {
1053 pr_info("Cannot change disksize for initialized device\n");
1055 goto out_destroy_comp;
1058 init_waitqueue_head(&zram->io_done);
1059 atomic_set(&zram->refcount, 1);
1062 zram->disksize = disksize;
1063 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
1064 up_write(&zram->init_lock);
1067 * Revalidate disk out of the init_lock to avoid lockdep splat.
1068 * It's okay because disk's capacity is protected by init_lock
1069 * so that revalidate_disk always sees up-to-date capacity.
1071 revalidate_disk(zram->disk);
1076 up_write(&zram->init_lock);
1077 zcomp_destroy(comp);
1079 zram_meta_free(meta, disksize);
1083 static ssize_t reset_store(struct device *dev,
1084 struct device_attribute *attr, const char *buf, size_t len)
1087 unsigned short do_reset;
1089 struct block_device *bdev;
1091 ret = kstrtou16(buf, 10, &do_reset);
1098 zram = dev_to_zram(dev);
1099 bdev = bdget_disk(zram->disk, 0);
1103 mutex_lock(&bdev->bd_mutex);
1104 /* Do not reset an active device or claimed device */
1105 if (bdev->bd_openers || zram->claim) {
1106 mutex_unlock(&bdev->bd_mutex);
1111 /* From now on, anyone can't open /dev/zram[0-9] */
1113 mutex_unlock(&bdev->bd_mutex);
1115 /* Make sure all the pending I/O are finished */
1117 zram_reset_device(zram);
1118 revalidate_disk(zram->disk);
1121 mutex_lock(&bdev->bd_mutex);
1122 zram->claim = false;
1123 mutex_unlock(&bdev->bd_mutex);
1128 static int zram_open(struct block_device *bdev, fmode_t mode)
1133 WARN_ON(!mutex_is_locked(&bdev->bd_mutex));
1135 zram = bdev->bd_disk->private_data;
1136 /* zram was claimed to reset so open request fails */
1143 static const struct block_device_operations zram_devops = {
1145 .swap_slot_free_notify = zram_slot_free_notify,
1146 .rw_page = zram_rw_page,
1147 .owner = THIS_MODULE
1150 static DEVICE_ATTR_WO(compact);
1151 static DEVICE_ATTR_RW(disksize);
1152 static DEVICE_ATTR_RO(initstate);
1153 static DEVICE_ATTR_WO(reset);
1154 static DEVICE_ATTR_RO(orig_data_size);
1155 static DEVICE_ATTR_RO(mem_used_total);
1156 static DEVICE_ATTR_RW(mem_limit);
1157 static DEVICE_ATTR_RW(mem_used_max);
1158 static DEVICE_ATTR_RW(max_comp_streams);
1159 static DEVICE_ATTR_RW(comp_algorithm);
1161 static struct attribute *zram_disk_attrs[] = {
1162 &dev_attr_disksize.attr,
1163 &dev_attr_initstate.attr,
1164 &dev_attr_reset.attr,
1165 &dev_attr_num_reads.attr,
1166 &dev_attr_num_writes.attr,
1167 &dev_attr_failed_reads.attr,
1168 &dev_attr_failed_writes.attr,
1169 &dev_attr_compact.attr,
1170 &dev_attr_invalid_io.attr,
1171 &dev_attr_notify_free.attr,
1172 &dev_attr_zero_pages.attr,
1173 &dev_attr_orig_data_size.attr,
1174 &dev_attr_compr_data_size.attr,
1175 &dev_attr_mem_used_total.attr,
1176 &dev_attr_mem_limit.attr,
1177 &dev_attr_mem_used_max.attr,
1178 &dev_attr_max_comp_streams.attr,
1179 &dev_attr_comp_algorithm.attr,
1180 &dev_attr_io_stat.attr,
1181 &dev_attr_mm_stat.attr,
1185 static struct attribute_group zram_disk_attr_group = {
1186 .attrs = zram_disk_attrs,
1190 * Allocate and initialize new zram device. the function returns
1191 * '>= 0' device_id upon success, and negative value otherwise.
1193 static int zram_add(void)
1196 struct request_queue *queue;
1199 zram = kzalloc(sizeof(struct zram), GFP_KERNEL);
1203 ret = idr_alloc(&zram_index_idr, zram, 0, 0, GFP_KERNEL);
1208 init_rwsem(&zram->init_lock);
1210 queue = blk_alloc_queue(GFP_KERNEL);
1212 pr_err("Error allocating disk queue for device %d\n",
1218 blk_queue_make_request(queue, zram_make_request);
1220 /* gendisk structure */
1221 zram->disk = alloc_disk(1);
1223 pr_err("Error allocating disk structure for device %d\n",
1226 goto out_free_queue;
1229 zram->disk->major = zram_major;
1230 zram->disk->first_minor = device_id;
1231 zram->disk->fops = &zram_devops;
1232 zram->disk->queue = queue;
1233 zram->disk->queue->queuedata = zram;
1234 zram->disk->private_data = zram;
1235 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1237 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1238 set_capacity(zram->disk, 0);
1239 /* zram devices sort of resembles non-rotational disks */
1240 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1241 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1243 * To ensure that we always get PAGE_SIZE aligned
1244 * and n*PAGE_SIZED sized I/O requests.
1246 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1247 blk_queue_logical_block_size(zram->disk->queue,
1248 ZRAM_LOGICAL_BLOCK_SIZE);
1249 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
1250 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
1251 zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
1252 blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
1254 * zram_bio_discard() will clear all logical blocks if logical block
1255 * size is identical with physical block size(PAGE_SIZE). But if it is
1256 * different, we will skip discarding some parts of logical blocks in
1257 * the part of the request range which isn't aligned to physical block
1258 * size. So we can't ensure that all discarded logical blocks are
1261 if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
1262 zram->disk->queue->limits.discard_zeroes_data = 1;
1264 zram->disk->queue->limits.discard_zeroes_data = 0;
1265 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1267 add_disk(zram->disk);
1269 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
1270 &zram_disk_attr_group);
1272 pr_err("Error creating sysfs group for device %d\n",
1276 strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1278 zram->max_comp_streams = 1;
1280 pr_info("Added device: %s\n", zram->disk->disk_name);
1284 del_gendisk(zram->disk);
1285 put_disk(zram->disk);
1287 blk_cleanup_queue(queue);
1289 idr_remove(&zram_index_idr, device_id);
1295 static int zram_remove(struct zram *zram)
1297 struct block_device *bdev;
1299 bdev = bdget_disk(zram->disk, 0);
1303 mutex_lock(&bdev->bd_mutex);
1304 if (bdev->bd_openers || zram->claim) {
1305 mutex_unlock(&bdev->bd_mutex);
1311 mutex_unlock(&bdev->bd_mutex);
1314 * Remove sysfs first, so no one will perform a disksize
1315 * store while we destroy the devices. This also helps during
1316 * hot_remove -- zram_reset_device() is the last holder of
1317 * ->init_lock, no later/concurrent disksize_store() or any
1318 * other sysfs handlers are possible.
1320 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
1321 &zram_disk_attr_group);
1323 /* Make sure all the pending I/O are finished */
1325 zram_reset_device(zram);
1328 pr_info("Removed device: %s\n", zram->disk->disk_name);
1330 blk_cleanup_queue(zram->disk->queue);
1331 del_gendisk(zram->disk);
1332 put_disk(zram->disk);
1337 /* zram-control sysfs attributes */
1338 static ssize_t hot_add_show(struct class *class,
1339 struct class_attribute *attr,
1344 mutex_lock(&zram_index_mutex);
1346 mutex_unlock(&zram_index_mutex);
1350 return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
1353 static ssize_t hot_remove_store(struct class *class,
1354 struct class_attribute *attr,
1361 /* dev_id is gendisk->first_minor, which is `int' */
1362 ret = kstrtoint(buf, 10, &dev_id);
1368 mutex_lock(&zram_index_mutex);
1370 zram = idr_find(&zram_index_idr, dev_id);
1372 ret = zram_remove(zram);
1374 idr_remove(&zram_index_idr, dev_id);
1379 mutex_unlock(&zram_index_mutex);
1380 return ret ? ret : count;
1384 * NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
1385 * sense that reading from this file does alter the state of your system -- it
1386 * creates a new un-initialized zram device and returns back this device's
1387 * device_id (or an error code if it fails to create a new device).
1389 static struct class_attribute zram_control_class_attrs[] = {
1390 __ATTR(hot_add, 0400, hot_add_show, NULL),
1391 __ATTR_WO(hot_remove),
1395 static struct class zram_control_class = {
1396 .name = "zram-control",
1397 .owner = THIS_MODULE,
1398 .class_attrs = zram_control_class_attrs,
1401 static int zram_remove_cb(int id, void *ptr, void *data)
1407 static void destroy_devices(void)
1409 class_unregister(&zram_control_class);
1410 idr_for_each(&zram_index_idr, &zram_remove_cb, NULL);
1411 idr_destroy(&zram_index_idr);
1412 unregister_blkdev(zram_major, "zram");
1415 static int __init zram_init(void)
1419 ret = class_register(&zram_control_class);
1421 pr_err("Unable to register zram-control class\n");
1425 zram_major = register_blkdev(0, "zram");
1426 if (zram_major <= 0) {
1427 pr_err("Unable to get major number\n");
1428 class_unregister(&zram_control_class);
1432 while (num_devices != 0) {
1433 mutex_lock(&zram_index_mutex);
1435 mutex_unlock(&zram_index_mutex);
1448 static void __exit zram_exit(void)
1453 module_init(zram_init);
1454 module_exit(zram_exit);
1456 module_param(num_devices, uint, 0);
1457 MODULE_PARM_DESC(num_devices, "Number of pre-created zram devices");
1459 MODULE_LICENSE("Dual BSD/GPL");
1460 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1461 MODULE_DESCRIPTION("Compressed RAM Block Device");