Code Review
/
kvmfornfv.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
To modify Ixia port numbers and IP in pod.yaml
[kvmfornfv.git]
/
kernel
/
fs
/
ext4
/
readpage.c
diff --git
a/kernel/fs/ext4/readpage.c
b/kernel/fs/ext4/readpage.c
index
171b9ac
..
5dc5e95
100644
(file)
--- a/
kernel/fs/ext4/readpage.c
+++ b/
kernel/fs/ext4/readpage.c
@@
-54,15
+54,15
@@
static void completion_pages(struct work_struct *work)
{
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct ext4_crypto_ctx *ctx =
{
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct ext4_crypto_ctx *ctx =
- container_of(work, struct ext4_crypto_ctx, work);
- struct bio *bio = ctx->bio;
+ container_of(work, struct ext4_crypto_ctx,
r.
work);
+ struct bio *bio = ctx->
r.
bio;
struct bio_vec *bv;
int i;
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
struct bio_vec *bv;
int i;
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- int ret = ext4_decrypt(
ctx,
page);
+ int ret = ext4_decrypt(page);
if (ret) {
WARN_ON_ONCE(1);
SetPageError(page);
if (ret) {
WARN_ON_ONCE(1);
SetPageError(page);
@@
-98,7
+98,7
@@
static inline bool ext4_bio_encrypted(struct bio *bio)
* status of that page is hard. See end_buffer_async_read() for the details.
* There is no point in duplicating all that complexity.
*/
* status of that page is hard. See end_buffer_async_read() for the details.
* There is no point in duplicating all that complexity.
*/
-static void mpage_end_io(struct bio *bio
, int err
)
+static void mpage_end_io(struct bio *bio)
{
struct bio_vec *bv;
int i;
{
struct bio_vec *bv;
int i;
@@
-106,19
+106,19
@@
static void mpage_end_io(struct bio *bio, int err)
if (ext4_bio_encrypted(bio)) {
struct ext4_crypto_ctx *ctx = bio->bi_private;
if (ext4_bio_encrypted(bio)) {
struct ext4_crypto_ctx *ctx = bio->bi_private;
- if (
er
r) {
+ if (
bio->bi_erro
r) {
ext4_release_crypto_ctx(ctx);
} else {
ext4_release_crypto_ctx(ctx);
} else {
- INIT_WORK(&ctx->work, completion_pages);
- ctx->bio = bio;
- queue_work(ext4_read_workqueue, &ctx->work);
+ INIT_WORK(&ctx->
r.
work, completion_pages);
+ ctx->
r.
bio = bio;
+ queue_work(ext4_read_workqueue, &ctx->
r.
work);
return;
}
}
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
return;
}
}
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- if (!
er
r) {
+ if (!
bio->bi_erro
r) {
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageUptodate(page);
} else {
ClearPageUptodate(page);
@@
-165,8
+165,8
@@
int ext4_mpage_readpages(struct address_space *mapping,
if (pages) {
page = list_entry(pages->prev, struct page, lru);
list_del(&page->lru);
if (pages) {
page = list_entry(pages->prev, struct page, lru);
list_del(&page->lru);
- if (add_to_page_cache_lru(page, mapping,
-
page->index, GFP_KERNEL
))
+ if (add_to_page_cache_lru(page, mapping,
page->index,
+
mapping_gfp_constraint(mapping, GFP_KERNEL)
))
goto next_page;
}
goto next_page;
}
@@
-284,7
+284,7
@@
int ext4_mpage_readpages(struct address_space *mapping,
goto set_error_page;
}
bio = bio_alloc(GFP_KERNEL,
goto set_error_page;
}
bio = bio_alloc(GFP_KERNEL,
- min_t(int, nr_pages,
bio_get_nr_vecs(bdev)
));
+ min_t(int, nr_pages,
BIO_MAX_PAGES
));
if (!bio) {
if (ctx)
ext4_release_crypto_ctx(ctx);
if (!bio) {
if (ctx)
ext4_release_crypto_ctx(ctx);