aboutsummaryrefslogtreecommitdiff
path: root/drivers/staging/zram/zram_drv.c
diff options
context:
space:
mode:
authorCong Wang <amwang@redhat.com>2011-11-25 23:14:25 +0800
committerCong Wang <xiyou.wangcong@gmail.com>2012-03-20 21:48:20 +0800
commitba82fe2e691146a92a25c85b611b436dc20f7825 (patch)
tree51304ae3094e34cd350be30cdac1d97a453245f9 /drivers/staging/zram/zram_drv.c
parente3debd27636cd49a7424b63cc193141f58f37207 (diff)
zram: remove the second argument of k[un]map_atomic()
Acked-by: Greg Kroah-Hartman <gregkh@suse.de> Signed-off-by: Cong Wang <amwang@redhat.com>
Diffstat (limited to 'drivers/staging/zram/zram_drv.c')
-rw-r--r--drivers/staging/zram/zram_drv.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 2a2a92d389e6..1cf68ad20fa6 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -161,9 +161,9 @@ static void zram_free_page(struct zram *zram, size_t index)
goto out;
}
- obj = kmap_atomic(page, KM_USER0) + offset;
+ obj = kmap_atomic(page) + offset;
clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
- kunmap_atomic(obj, KM_USER0);
+ kunmap_atomic(obj);
xv_free(zram->mem_pool, page, offset);
if (clen <= PAGE_SIZE / 2)
@@ -182,9 +182,9 @@ static void handle_zero_page(struct bio_vec *bvec)
struct page *page = bvec->bv_page;
void *user_mem;
- user_mem = kmap_atomic(page, KM_USER0);
+ user_mem = kmap_atomic(page);
memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
- kunmap_atomic(user_mem, KM_USER0);
+ kunmap_atomic(user_mem);
flush_dcache_page(page);
}
@@ -195,12 +195,12 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
struct page *page = bvec->bv_page;
unsigned char *user_mem, *cmem;
- user_mem = kmap_atomic(page, KM_USER0);
- cmem = kmap_atomic(zram->table[index].page, KM_USER1);
+ user_mem = kmap_atomic(page);
+ cmem = kmap_atomic(zram->table[index].page);
memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
- kunmap_atomic(cmem, KM_USER1);
- kunmap_atomic(user_mem, KM_USER0);
+ kunmap_atomic(cmem);
+ kunmap_atomic(user_mem);
flush_dcache_page(page);
}
@@ -249,12 +249,12 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
}
}
- user_mem = kmap_atomic(page, KM_USER0);
+ user_mem = kmap_atomic(page);
if (!is_partial_io(bvec))
uncmem = user_mem;
clen = PAGE_SIZE;
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
+ cmem = kmap_atomic(zram->table[index].page) +
zram->table[index].offset;
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
@@ -267,8 +267,8 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
kfree(uncmem);
}
- kunmap_atomic(cmem, KM_USER1);
- kunmap_atomic(user_mem, KM_USER0);
+ kunmap_atomic(cmem);
+ kunmap_atomic(user_mem);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) {
@@ -295,20 +295,20 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
return 0;
}
- cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
+ cmem = kmap_atomic(zram->table[index].page) +
zram->table[index].offset;
/* Page is stored uncompressed since it's incompressible */
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
memcpy(mem, cmem, PAGE_SIZE);
- kunmap_atomic(cmem, KM_USER0);
+ kunmap_atomic(cmem);
return 0;
}
ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
xv_get_object_size(cmem) - sizeof(*zheader),
mem, &clen);
- kunmap_atomic(cmem, KM_USER0);
+ kunmap_atomic(cmem);
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret != LZO_E_OK)) {
@@ -359,7 +359,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
zram_test_flag(zram, index, ZRAM_ZERO))
zram_free_page(zram, index);
- user_mem = kmap_atomic(page, KM_USER0);
+ user_mem = kmap_atomic(page);
if (is_partial_io(bvec))
memcpy(uncmem + offset, user_mem + bvec->bv_offset,
@@ -368,7 +368,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
uncmem = user_mem;
if (page_zero_filled(uncmem)) {
- kunmap_atomic(user_mem, KM_USER0);
+ kunmap_atomic(user_mem);
if (is_partial_io(bvec))
kfree(uncmem);
zram_stat_inc(&zram->stats.pages_zero);
@@ -380,7 +380,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
zram->compress_workmem);
- kunmap_atomic(user_mem, KM_USER0);
+ kunmap_atomic(user_mem);
if (is_partial_io(bvec))
kfree(uncmem);
@@ -408,7 +408,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
zram_stat_inc(&zram->stats.pages_expand);
zram->table[index].page = page_store;
- src = kmap_atomic(page, KM_USER0);
+ src = kmap_atomic(page);
goto memstore;
}
@@ -424,7 +424,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
memstore:
zram->table[index].offset = store_offset;
- cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
+ cmem = kmap_atomic(zram->table[index].page) +
zram->table[index].offset;
#if 0
@@ -438,9 +438,9 @@ memstore:
memcpy(cmem, src, clen);
- kunmap_atomic(cmem, KM_USER1);
+ kunmap_atomic(cmem);
if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
- kunmap_atomic(src, KM_USER0);
+ kunmap_atomic(src);
/* Update stats */
zram_stat64_add(zram, &zram->stats.compr_size, clen);