Skip to content

Commit bb91bc7

Browse files
Mikulas Patockakergon
authored andcommitted
dm io: flush cpu cache with vmapped io
For normal kernel pages, CPU cache is synchronized by the dma layer. However, this is not done for pages allocated with vmalloc. If we do I/O to/from vmallocated pages, we must synchronize CPU cache explicitly. Prior to doing I/O on vmallocated page we must call flush_kernel_vmap_range to flush dirty cache on the virtual address. After finished read we must call invalidate_kernel_vmap_range to invalidate cache on the virtual address, so that accesses to the virtual address return newly read data and not stale data from CPU cache. This patch fixes metadata corruption on dm-snapshots on PA-RISC and possibly other architectures with caches indexed by virtual address. Cc: stable <[email protected]> Signed-off-by: Mikulas Patocka <[email protected]> Signed-off-by: Alasdair G Kergon <[email protected]>
1 parent 286f367 commit bb91bc7

File tree

1 file changed

+27
-2
lines changed

1 file changed

+27
-2
lines changed

drivers/md/dm-io.c

Lines changed: 27 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,8 @@ struct io {
3838
struct dm_io_client *client;
3939
io_notify_fn callback;
4040
void *context;
41+
void *vma_invalidate_address;
42+
unsigned long vma_invalidate_size;
4143
} __attribute__((aligned(DM_IO_MAX_REGIONS)));
4244

4345
static struct kmem_cache *_dm_io_cache;
@@ -116,6 +118,10 @@ static void dec_count(struct io *io, unsigned int region, int error)
116118
set_bit(region, &io->error_bits);
117119

118120
if (atomic_dec_and_test(&io->count)) {
121+
if (io->vma_invalidate_size)
122+
invalidate_kernel_vmap_range(io->vma_invalidate_address,
123+
io->vma_invalidate_size);
124+
119125
if (io->sleeper)
120126
wake_up_process(io->sleeper);
121127

@@ -159,6 +165,9 @@ struct dpages {
159165

160166
unsigned context_u;
161167
void *context_ptr;
168+
169+
void *vma_invalidate_address;
170+
unsigned long vma_invalidate_size;
162171
};
163172

164173
/*
@@ -377,6 +386,9 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
377386
io->sleeper = current;
378387
io->client = client;
379388

389+
io->vma_invalidate_address = dp->vma_invalidate_address;
390+
io->vma_invalidate_size = dp->vma_invalidate_size;
391+
380392
dispatch_io(rw, num_regions, where, dp, io, 1);
381393

382394
while (1) {
@@ -415,13 +427,21 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
415427
io->callback = fn;
416428
io->context = context;
417429

430+
io->vma_invalidate_address = dp->vma_invalidate_address;
431+
io->vma_invalidate_size = dp->vma_invalidate_size;
432+
418433
dispatch_io(rw, num_regions, where, dp, io, 0);
419434
return 0;
420435
}
421436

422-
static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
437+
static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
438+
unsigned long size)
423439
{
424440
/* Set up dpages based on memory type */
441+
442+
dp->vma_invalidate_address = NULL;
443+
dp->vma_invalidate_size = 0;
444+
425445
switch (io_req->mem.type) {
426446
case DM_IO_PAGE_LIST:
427447
list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
@@ -432,6 +452,11 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
432452
break;
433453

434454
case DM_IO_VMA:
455+
flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
456+
if ((io_req->bi_rw & RW_MASK) == READ) {
457+
dp->vma_invalidate_address = io_req->mem.ptr.vma;
458+
dp->vma_invalidate_size = size;
459+
}
435460
vm_dp_init(dp, io_req->mem.ptr.vma);
436461
break;
437462

@@ -460,7 +485,7 @@ int dm_io(struct dm_io_request *io_req, unsigned num_regions,
460485
int r;
461486
struct dpages dp;
462487

463-
r = dp_init(io_req, &dp);
488+
r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
464489
if (r)
465490
return r;
466491

0 commit comments

Comments
 (0)