dm-crypt: track tag_offset in convert_context

commit 8b8f8037765757861f899ed3a2bfb34525b5c065 upstream.

dm-crypt uses tag_offset to index the integrity metadata for each crypt
sector. When the initial crypt_convert() returns BLK_STS_DEV_RESOURCE,
dm-crypt will try to continue the crypt/decrypt procedure in a kworker.
However, it resets tag_offset as zero instead of using the tag_offset
related with current sector. It may return unexpected data when using
random IV or return unexpected integrity related error.

Fix the problem by tracking tag_offset in per-IO convert_context.
Therefore, when the crypt/decrypt procedure continues in a kworker, it
could use the next tag_offset saved in convert_context.

Fixes: 8abec36d1274 ("dm crypt: do not wait for backlogged crypto request completion in softirq")
Cc: stable@vger.kernel.org
Signed-off-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Hou Tao 2025-01-20 16:29:51 +08:00 committed by Greg Kroah-Hartman
parent eaf019088c
commit 7428e565c4

View File

@ -56,6 +56,7 @@ struct convert_context {
struct bio *bio_out; struct bio *bio_out;
struct bvec_iter iter_out; struct bvec_iter iter_out;
atomic_t cc_pending; atomic_t cc_pending;
unsigned int tag_offset;
u64 cc_sector; u64 cc_sector;
union { union {
struct skcipher_request *req; struct skcipher_request *req;
@ -1223,6 +1224,7 @@ static void crypt_convert_init(struct crypt_config *cc,
if (bio_out) if (bio_out)
ctx->iter_out = bio_out->bi_iter; ctx->iter_out = bio_out->bi_iter;
ctx->cc_sector = sector + cc->iv_offset; ctx->cc_sector = sector + cc->iv_offset;
ctx->tag_offset = 0;
init_completion(&ctx->restart); init_completion(&ctx->restart);
} }
@ -1554,7 +1556,6 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_
static blk_status_t crypt_convert(struct crypt_config *cc, static blk_status_t crypt_convert(struct crypt_config *cc,
struct convert_context *ctx, bool atomic, bool reset_pending) struct convert_context *ctx, bool atomic, bool reset_pending)
{ {
unsigned int tag_offset = 0;
unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT; unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT;
int r; int r;
@ -1577,9 +1578,9 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
atomic_inc(&ctx->cc_pending); atomic_inc(&ctx->cc_pending);
if (crypt_integrity_aead(cc)) if (crypt_integrity_aead(cc))
r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, tag_offset); r = crypt_convert_block_aead(cc, ctx, ctx->r.req_aead, ctx->tag_offset);
else else
r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, tag_offset); r = crypt_convert_block_skcipher(cc, ctx, ctx->r.req, ctx->tag_offset);
switch (r) { switch (r) {
/* /*
@ -1599,8 +1600,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
* exit and continue processing in a workqueue * exit and continue processing in a workqueue
*/ */
ctx->r.req = NULL; ctx->r.req = NULL;
ctx->tag_offset++;
ctx->cc_sector += sector_step; ctx->cc_sector += sector_step;
tag_offset++;
return BLK_STS_DEV_RESOURCE; return BLK_STS_DEV_RESOURCE;
} }
} else { } else {
@ -1614,8 +1615,8 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
*/ */
case -EINPROGRESS: case -EINPROGRESS:
ctx->r.req = NULL; ctx->r.req = NULL;
ctx->tag_offset++;
ctx->cc_sector += sector_step; ctx->cc_sector += sector_step;
tag_offset++;
continue; continue;
/* /*
* The request was already processed (synchronously). * The request was already processed (synchronously).
@ -1623,7 +1624,7 @@ static blk_status_t crypt_convert(struct crypt_config *cc,
case 0: case 0:
atomic_dec(&ctx->cc_pending); atomic_dec(&ctx->cc_pending);
ctx->cc_sector += sector_step; ctx->cc_sector += sector_step;
tag_offset++; ctx->tag_offset++;
if (!atomic) if (!atomic)
cond_resched(); cond_resched();
continue; continue;