Skip to content

[pull] master from torvalds:master #1837

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 57 commits into from
Apr 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
57 commits
Select commit Hold shift + click to select a range
d05af90
md/raid10: fix missing discard IO accounting
YuKuai-huawei Mar 25, 2025
6ec1f02
md/md-bitmap: fix stats collection for external bitmaps
Apr 3, 2025
1b41940
block: add SPDX header line to blk-throttle.h
tbird20d Apr 11, 2025
40f2eb9
block: fix resource leak in blk_register_queue() error path
Apr 12, 2025
1e440d5
ksmbd: Fix dangling pointer in krb_authenticate
SeanHeelan Apr 7, 2025
1df0d4c
ksmbd: fix WARNING "do not call blocking ops when !TASK_RUNNING"
namjaejeon Apr 15, 2025
21a4e47
ksmbd: fix use-after-free in __smb2_lease_break_noti()
namjaejeon Apr 11, 2025
18b4fac
ksmbd: fix use-after-free in smb_break_all_levII_oplock()
namjaejeon Apr 15, 2025
b37f2f3
ksmbd: fix the warning from __kernel_write_iter
namjaejeon Apr 15, 2025
a93ff74
ksmbd: Prevent integer overflow in calculation of deadtime
Apr 9, 2025
25744f8
io_uring/zcrx: return ifq id to the user
isilence Apr 15, 2025
70e4f9b
io_uring/zcrx: add pp to ifq conversion helper
isilence Apr 15, 2025
1fdb818
loop: aio inherit the ioprio of original request
Apr 14, 2025
e7bc001
loop: properly send KOBJ_CHANGED uevent for disk device
t-8ch Apr 15, 2025
0dba7a0
loop: LOOP_SET_FD: send uevents for partitions
t-8ch Apr 15, 2025
f2fed44
loop: stop using vfs_iter_{read,write} for buffered I/O
Apr 9, 2025
0b7a481
ublk: don't suggest CONFIG_BLK_DEV_UBLK=Y
calebsander Apr 16, 2025
26d7fb4
nvme: fixup scan failure for non-ANA multipath controllers
Apr 14, 2025
08937bc
nvme-multipath: sysfs links may not be created for devices
Apr 15, 2025
b1efcc4
nvmet: auth: use NULL to clear a pointer in nvmet_auth_sq_free()
damien-lemoal Apr 11, 2025
ffe0398
nvmet: pci-epf: always fully initialize completion entries
damien-lemoal Apr 11, 2025
f8e01fa
nvmet: pci-epf: clear CC and CSTS when disabling the controller
damien-lemoal Apr 11, 2025
ad91308
nvmet: pci-epf: cleanup link state management
damien-lemoal Apr 11, 2025
b7c178d
md/raid1: Add check for missing source disk in process_checks()
Apr 8, 2025
39e1605
block: integrity: Do not call set_page_dirty_lock()
martinkpetersen Apr 16, 2025
ec12009
selftests: ublk: fix ublk_find_tgt()
Apr 12, 2025
9cad26d
selftests: ublk: add io_uring uapi header
Apr 12, 2025
8d31a7e
selftests: ublk: cleanup backfile automatically
Apr 12, 2025
573840a
selftests: ublk: make sure _add_ublk_dev can return in sub-shell
Apr 12, 2025
bb2cabf
selftests: ublk: run stress tests in parallel
Apr 12, 2025
d836590
selftests: ublk: add two stress tests for zero copy feature
Apr 12, 2025
62867a0
selftests: ublk: setup ring with IORING_SETUP_SINGLE_ISSUER/IORING_SE…
Apr 12, 2025
2f0a692
selftests: ublk: set queue pthread's cpu affinity
Apr 12, 2025
6c62fd0
selftests: ublk: increase max nr_queues and queue depth
Apr 12, 2025
810b88f
selftests: ublk: support target specific command line
Apr 12, 2025
57e13a2
selftests: ublk: support user recovery
Apr 12, 2025
2f9a30b
selftests: ublk: add test_stress_05.sh
Apr 12, 2025
3bf5406
selftests: ublk: move creating UBLK_TMP into _prep_test()
Apr 12, 2025
b69b8ed
ublk: properly serialize all FETCH_REQs
ps-ushankar Apr 16, 2025
00b3b0d
ublk: add ublk_force_abort_dev()
Apr 16, 2025
7e26cb6
ublk: rely on ->canceling for dealing with ublk_nosrv_dev_should_queu…
Apr 16, 2025
728cbac
ublk: move device reset into ublk_ch_release()
Apr 16, 2025
82a8a30
ublk: improve detection and handling of ublk server exit
ps-ushankar Apr 16, 2025
736b005
ublk: remove __ublk_quiesce_dev()
Apr 16, 2025
e63d222
ublk: simplify aborting ublk request
Apr 16, 2025
8158665
selftests: ublk: add generic_06 for covering fault inject
ps-ushankar Apr 16, 2025
3139100
Merge tag 'md-6.15-20250416' of https://git.kernel.org/pub/scm/linux/…
axboe Apr 17, 2025
81dd1fe
Merge tag 'nvme-6.15-2025-04-17' of git://git.infradead.org/nvme into…
axboe Apr 17, 2025
1ac5712
io_uring/rsrc: don't skip offset calculation
isilence Apr 17, 2025
50169d0
io_uring/rsrc: separate kbuf offset adjustments
isilence Apr 17, 2025
59852eb
io_uring/rsrc: refactor io_import_fixed
isilence Apr 17, 2025
80c7378
io_uring/rsrc: send exact nr_segs for fixed buffer
nj-shetty Apr 17, 2025
b419bed
io_uring/rsrc: ensure segments counts are correct on kbuf buffers
axboe Apr 16, 2025
f12ecf5
io_uring/zcrx: fix late dma unmap for a dead dev
isilence Apr 18, 2025
b1011b2
Merge tag 'io_uring-6.15-20250418' of git://git.kernel.dk/linux
torvalds Apr 18, 2025
f7c2ca2
Merge tag 'block-6.15-20250417' of git://git.kernel.dk/linux
torvalds Apr 18, 2025
7e74f75
Merge tag 'v6.15-rc2-ksmbd-server-fixes' of git://git.samba.org/ksmbd
torvalds Apr 18, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 6 additions & 11 deletions block/bio-integrity.c
Original file line number Diff line number Diff line change
Expand Up @@ -66,16 +66,12 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
}
EXPORT_SYMBOL(bio_integrity_alloc);

static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs,
bool dirty)
static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs)
{
int i;

for (i = 0; i < nr_vecs; i++) {
if (dirty && !PageCompound(bv[i].bv_page))
set_page_dirty_lock(bv[i].bv_page);
for (i = 0; i < nr_vecs; i++)
unpin_user_page(bv[i].bv_page);
}
}

static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
Expand All @@ -91,7 +87,7 @@ static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
ret = copy_to_iter(bvec_virt(bounce_bvec), bytes, &orig_iter);
WARN_ON_ONCE(ret != bytes);

bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs, true);
bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs);
}

/**
Expand All @@ -111,8 +107,7 @@ void bio_integrity_unmap_user(struct bio *bio)
return;
}

bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt,
bio_data_dir(bio) == READ);
bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt);
}

/**
Expand Down Expand Up @@ -198,7 +193,7 @@ static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
}

if (write)
bio_integrity_unpin_bvec(bvec, nr_vecs, false);
bio_integrity_unpin_bvec(bvec, nr_vecs);
else
memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec));

Expand Down Expand Up @@ -319,7 +314,7 @@ int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
return 0;

release_pages:
bio_integrity_unpin_bvec(bvec, nr_bvecs, false);
bio_integrity_unpin_bvec(bvec, nr_bvecs);
free_bvec:
if (bvec != stack_vec)
kfree(bvec);
Expand Down
2 changes: 2 additions & 0 deletions block/blk-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -909,6 +909,8 @@ int blk_register_queue(struct gendisk *disk)
out_debugfs_remove:
blk_debugfs_remove(disk);
mutex_unlock(&q->sysfs_lock);
if (queue_is_mq(q))
blk_mq_sysfs_unregister(disk);
out_put_queue_kobj:
kobject_put(&disk->queue_kobj);
return ret;
Expand Down
1 change: 1 addition & 0 deletions block/blk-throttle.h
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BLK_THROTTLE_H
#define BLK_THROTTLE_H

Expand Down
6 changes: 0 additions & 6 deletions drivers/block/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -388,12 +388,6 @@ config BLK_DEV_UBLK
definition isn't finalized yet, and might change according to future
requirement, so mark is as experimental now.

Say Y if you want to get better performance because task_work_add()
can be used in IO path for replacing io_uring cmd, which will become
shared between IO tasks and ubq daemon, meantime task_work_add() can
can handle batch more effectively, but task_work_add() isn't exported
for module, so ublk has to be built to kernel.

config BLKDEV_UBLK_LEGACY_OPCODES
bool "Support legacy command opcode"
depends on BLK_DEV_UBLK
Expand Down
121 changes: 22 additions & 99 deletions drivers/block/loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -211,72 +211,6 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
}

static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
struct iov_iter i;
ssize_t bw;

iov_iter_bvec(&i, ITER_SOURCE, bvec, 1, bvec->bv_len);

bw = vfs_iter_write(file, &i, ppos, 0);

if (likely(bw == bvec->bv_len))
return 0;

printk_ratelimited(KERN_ERR
"loop: Write error at byte offset %llu, length %i.\n",
(unsigned long long)*ppos, bvec->bv_len);
if (bw >= 0)
bw = -EIO;
return bw;
}

static int lo_write_simple(struct loop_device *lo, struct request *rq,
loff_t pos)
{
struct bio_vec bvec;
struct req_iterator iter;
int ret = 0;

rq_for_each_segment(bvec, rq, iter) {
ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
if (ret < 0)
break;
cond_resched();
}

return ret;
}

static int lo_read_simple(struct loop_device *lo, struct request *rq,
loff_t pos)
{
struct bio_vec bvec;
struct req_iterator iter;
struct iov_iter i;
ssize_t len;

rq_for_each_segment(bvec, rq, iter) {
iov_iter_bvec(&i, ITER_DEST, &bvec, 1, bvec.bv_len);
len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
if (len < 0)
return len;

flush_dcache_page(bvec.bv_page);

if (len != bvec.bv_len) {
struct bio *bio;

__rq_for_each_bio(bio, rq)
zero_fill_bio(bio);
break;
}
cond_resched();
}

return 0;
}

static void loop_clear_limits(struct loop_device *lo, int mode)
{
struct queue_limits lim = queue_limits_start_update(lo->lo_queue);
Expand Down Expand Up @@ -342,7 +276,7 @@ static void lo_complete_rq(struct request *rq)
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
blk_status_t ret = BLK_STS_OK;

if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
if (cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
req_op(rq) != REQ_OP_READ) {
if (cmd->ret < 0)
ret = errno_to_blk_status(cmd->ret);
Expand All @@ -358,14 +292,13 @@ static void lo_complete_rq(struct request *rq)
cmd->ret = 0;
blk_mq_requeue_request(rq, true);
} else {
if (cmd->use_aio) {
struct bio *bio = rq->bio;
struct bio *bio = rq->bio;

while (bio) {
zero_fill_bio(bio);
bio = bio->bi_next;
}
while (bio) {
zero_fill_bio(bio);
bio = bio->bi_next;
}

ret = BLK_STS_IOERR;
end_io:
blk_mq_end_request(rq, ret);
Expand Down Expand Up @@ -445,9 +378,14 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,

cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
cmd->iocb.ki_complete = lo_rw_aio_complete;
cmd->iocb.ki_flags = IOCB_DIRECT;
cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
cmd->iocb.ki_ioprio = req_get_ioprio(rq);
if (cmd->use_aio) {
cmd->iocb.ki_complete = lo_rw_aio_complete;
cmd->iocb.ki_flags = IOCB_DIRECT;
} else {
cmd->iocb.ki_complete = NULL;
cmd->iocb.ki_flags = 0;
}

if (rw == ITER_SOURCE)
ret = file->f_op->write_iter(&cmd->iocb, &iter);
Expand All @@ -458,23 +396,14 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,

if (ret != -EIOCBQUEUED)
lo_rw_aio_complete(&cmd->iocb, ret);
return 0;
return -EIOCBQUEUED;
}

static int do_req_filebacked(struct loop_device *lo, struct request *rq)
{
struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;

/*
* lo_write_simple and lo_read_simple should have been covered
* by io submit style function like lo_rw_aio(), one blocker
* is that lo_read_simple() need to call flush_dcache_page after
* the page is written from kernel, and it isn't easy to handle
* this in io submit style function which submits all segments
* of the req at one time. And direct read IO doesn't need to
* run flush_dcache_page().
*/
switch (req_op(rq)) {
case REQ_OP_FLUSH:
return lo_req_flush(lo, rq);
Expand All @@ -490,15 +419,9 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
case REQ_OP_DISCARD:
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
else
return lo_write_simple(lo, rq, pos);
return lo_rw_aio(lo, cmd, pos, ITER_SOURCE);
case REQ_OP_READ:
if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, ITER_DEST);
else
return lo_read_simple(lo, rq, pos);
return lo_rw_aio(lo, cmd, pos, ITER_DEST);
default:
WARN_ON_ONCE(1);
return -EIO;
Expand Down Expand Up @@ -662,19 +585,20 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
* dependency.
*/
fput(old_file);
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
if (partscan)
loop_reread_partitions(lo);

error = 0;
done:
/* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
return error;

out_err:
loop_global_unlock(lo, is_loop);
out_putf:
fput(file);
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
goto done;
}

Expand Down Expand Up @@ -1129,8 +1053,8 @@ static int loop_configure(struct loop_device *lo, blk_mode_t mode,
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);

/* enable and uncork uevent now that we are done */
dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0);
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);

loop_global_unlock(lo, is_loop);
if (partscan)
Expand Down Expand Up @@ -1921,7 +1845,6 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
struct loop_device *lo = rq->q->queuedata;
int ret = 0;
struct mem_cgroup *old_memcg = NULL;
const bool use_aio = cmd->use_aio;

if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
ret = -EIO;
Expand Down Expand Up @@ -1951,7 +1874,7 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
}
failed:
/* complete non-aio request */
if (!use_aio || ret) {
if (ret != -EIOCBQUEUED) {
if (ret == -EOPNOTSUPP)
cmd->ret = ret;
else
Expand Down
Loading