block-6.18-20251031

-----BEGIN PGP SIGNATURE-----
 
 iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmkE0BoQHGF4Ym9lQGtl
 cm5lbC5kawAKCRD301j7KXHgpvHID/0bh5wEXK/IFMIDdiyqdbr2GsoMfHxiM2k0
 OSeQdwMgEGPY2frB8SirTBZWPIskOFdgSbRQyuYiu5XpdbsRJY+JdtkaGYp1L+Fc
 4I4C1ZpK4Kdlw+nbBrcUxdedYKx4ZN/00otZWV2K2ZpJFn1ZCLhyInZZ8ZbosKEn
 HeAW54YLu+q3pO9BSbJBO97FP38AZAOqkT9suUDkQYUUnNivejFKV0qbKlRm5v4H
 fQLU2sfT1J78DHdhJ1Gdk+uNKzVuYxR7lJRC+1c0yi2fZN3VGNRYlTk1f4VX3mOn
 RcRaUr4r9LMZc9K2IYEpQgAyuznttokWI0SkklFVTFDZwa1KmsIZjEccNXvESDXN
 vSxUXuZtgePo2qijK0F8VoPqgQRLBoP5MeAfp+VlkUWAu49zljwrIZXuZl0xuHpT
 JIEzbzvk+KfPS/gKtQdWxuN3eqZvv596SxnWnzGMg17zmhsj2kEZ9BF4Q+9BNVMZ
 NdK0jmdsBA3iTI8xVy2ajEY6U2W3KDdkSKPWR2SDg+vBd/qu3VBmrC9ptr1AoYpO
 54UOyBtIAumMyaOAUDSGiKC4KSbgMWUhN2uBFC8uWvuh733Z333xb9BnY1T7D624
 cfacmSzkoXKUACmcLaod2+MDJlSXhxmOtVN65euxst8ZGQsSah1TpY4Tr/2UHKnO
 ru+vbsqJyQ==
 =rC6y
 -----END PGP SIGNATURE-----

Merge tag 'block-6.18-20251031' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux

Pull block fixes from Jens Axboe:

 - Fix blk-crypto reporting EIO when EINVAL is the correct error code

 - Two bug fixes for the block zone support

 - NVME pull request via Keith:
      - Target side authentication fixup
      - Peer-to-peer metadata fixup

 - null_blk DMA alignment fix

* tag 'block-6.18-20251031' of git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux:
  null_blk: set dma alignment to logical block size
  blk-crypto: use BLK_STS_INVAL for alignment errors
  block: make REQ_OP_ZONE_OPEN a write operation
  block: fix op_is_zone_mgmt() to handle REQ_OP_ZONE_RESET_ALL
  nvme-pci: use blk_map_iter for p2p metadata
  nvmet-auth: update sc_c in host response
This commit is contained in:
Linus Torvalds 2025-10-31 12:57:19 -07:00
commit a5beb58e53
5 changed files with 21 additions and 11 deletions

View File

@ -292,7 +292,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
} }
if (!bio_crypt_check_alignment(bio)) { if (!bio_crypt_check_alignment(bio)) {
bio->bi_status = BLK_STS_IOERR; bio->bi_status = BLK_STS_INVAL;
goto fail; goto fail;
} }

View File

@ -1949,6 +1949,7 @@ static int null_add_dev(struct nullb_device *dev)
.logical_block_size = dev->blocksize, .logical_block_size = dev->blocksize,
.physical_block_size = dev->blocksize, .physical_block_size = dev->blocksize,
.max_hw_sectors = dev->max_sectors, .max_hw_sectors = dev->max_sectors,
.dma_alignment = dev->blocksize - 1,
}; };
struct nullb *nullb; struct nullb *nullb;

View File

@ -1042,7 +1042,7 @@ static blk_status_t nvme_map_data(struct request *req)
return nvme_pci_setup_data_prp(req, &iter); return nvme_pci_setup_data_prp(req, &iter);
} }
static blk_status_t nvme_pci_setup_meta_sgls(struct request *req) static blk_status_t nvme_pci_setup_meta_iter(struct request *req)
{ {
struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
unsigned int entries = req->nr_integrity_segments; unsigned int entries = req->nr_integrity_segments;
@ -1072,8 +1072,12 @@ static blk_status_t nvme_pci_setup_meta_sgls(struct request *req)
* descriptor provides an explicit length, so we're relying on that * descriptor provides an explicit length, so we're relying on that
* mechanism to catch any misunderstandings between the application and * mechanism to catch any misunderstandings between the application and
* device. * device.
*
* P2P DMA also needs to use the blk_dma_iter method, so mptr setup
* leverages this routine when that happens.
*/ */
if (entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD)) { if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl) ||
(entries == 1 && !(nvme_req(req)->flags & NVME_REQ_USERCMD))) {
iod->cmd.common.metadata = cpu_to_le64(iter.addr); iod->cmd.common.metadata = cpu_to_le64(iter.addr);
iod->meta_total_len = iter.len; iod->meta_total_len = iter.len;
iod->meta_dma = iter.addr; iod->meta_dma = iter.addr;
@ -1114,6 +1118,9 @@ static blk_status_t nvme_pci_setup_meta_mptr(struct request *req)
struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
struct bio_vec bv = rq_integrity_vec(req); struct bio_vec bv = rq_integrity_vec(req);
if (is_pci_p2pdma_page(bv.bv_page))
return nvme_pci_setup_meta_iter(req);
iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0); iod->meta_dma = dma_map_bvec(nvmeq->dev->dev, &bv, rq_dma_dir(req), 0);
if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma)) if (dma_mapping_error(nvmeq->dev->dev, iod->meta_dma))
return BLK_STS_IOERR; return BLK_STS_IOERR;
@ -1128,7 +1135,7 @@ static blk_status_t nvme_map_metadata(struct request *req)
if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) && if ((iod->cmd.common.flags & NVME_CMD_SGL_METABUF) &&
nvme_pci_metadata_use_sgls(req)) nvme_pci_metadata_use_sgls(req))
return nvme_pci_setup_meta_sgls(req); return nvme_pci_setup_meta_iter(req);
return nvme_pci_setup_meta_mptr(req); return nvme_pci_setup_meta_mptr(req);
} }

View File

@ -298,7 +298,7 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
const char *hash_name; const char *hash_name;
u8 *challenge = req->sq->dhchap_c1; u8 *challenge = req->sq->dhchap_c1;
struct nvme_dhchap_key *transformed_key; struct nvme_dhchap_key *transformed_key;
u8 buf[4]; u8 buf[4], sc_c = ctrl->concat ? 1 : 0;
int ret; int ret;
hash_name = nvme_auth_hmac_name(ctrl->shash_id); hash_name = nvme_auth_hmac_name(ctrl->shash_id);
@ -367,13 +367,14 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
ret = crypto_shash_update(shash, buf, 2); ret = crypto_shash_update(shash, buf, 2);
if (ret) if (ret)
goto out; goto out;
memset(buf, 0, 4); *buf = sc_c;
ret = crypto_shash_update(shash, buf, 1); ret = crypto_shash_update(shash, buf, 1);
if (ret) if (ret)
goto out; goto out;
ret = crypto_shash_update(shash, "HostHost", 8); ret = crypto_shash_update(shash, "HostHost", 8);
if (ret) if (ret)
goto out; goto out;
memset(buf, 0, 4);
ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn)); ret = crypto_shash_update(shash, ctrl->hostnqn, strlen(ctrl->hostnqn));
if (ret) if (ret)
goto out; goto out;

View File

@ -341,15 +341,15 @@ enum req_op {
/* write the zero filled sector many times */ /* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9, REQ_OP_WRITE_ZEROES = (__force blk_opf_t)9,
/* Open a zone */ /* Open a zone */
REQ_OP_ZONE_OPEN = (__force blk_opf_t)10, REQ_OP_ZONE_OPEN = (__force blk_opf_t)11,
/* Close a zone */ /* Close a zone */
REQ_OP_ZONE_CLOSE = (__force blk_opf_t)11, REQ_OP_ZONE_CLOSE = (__force blk_opf_t)13,
/* Transition a zone to full */ /* Transition a zone to full */
REQ_OP_ZONE_FINISH = (__force blk_opf_t)13, REQ_OP_ZONE_FINISH = (__force blk_opf_t)15,
/* reset a zone write pointer */ /* reset a zone write pointer */
REQ_OP_ZONE_RESET = (__force blk_opf_t)15, REQ_OP_ZONE_RESET = (__force blk_opf_t)17,
/* reset all the zone present on the device */ /* reset all the zone present on the device */
REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)17, REQ_OP_ZONE_RESET_ALL = (__force blk_opf_t)19,
/* Driver private requests */ /* Driver private requests */
REQ_OP_DRV_IN = (__force blk_opf_t)34, REQ_OP_DRV_IN = (__force blk_opf_t)34,
@ -478,6 +478,7 @@ static inline bool op_is_zone_mgmt(enum req_op op)
{ {
switch (op & REQ_OP_MASK) { switch (op & REQ_OP_MASK) {
case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_RESET:
case REQ_OP_ZONE_RESET_ALL:
case REQ_OP_ZONE_OPEN: case REQ_OP_ZONE_OPEN:
case REQ_OP_ZONE_CLOSE: case REQ_OP_ZONE_CLOSE:
case REQ_OP_ZONE_FINISH: case REQ_OP_ZONE_FINISH: