PM: hibernate: dynamically allocate crc->unc_len/unc for configurable threads

Convert crc->unc_len and crc->unc from fixed-size arrays to dynamically
allocated arrays, sized according to the actual number of threads selected
at runtime. This removes the fixed limit imposed by CMP_THREADS.

Signed-off-by: Xueqin Luo <luoxueqin@kylinos.cn>
Link: https://patch.msgid.link/b5db63bb95729482d2649b12d3a11cb7547b7fcc.1761046167.git.luoxueqin@kylinos.cn
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
Xueqin Luo 2025-10-21 19:37:26 +08:00 committed by Rafael J. Wysocki
parent d3db87f89c
commit e114e2eb7e
1 changed files with 44 additions and 14 deletions

View File

@ -585,10 +585,48 @@ struct crc_data {
wait_queue_head_t go; /* start crc update */
wait_queue_head_t done; /* crc update done */
u32 *crc32; /* points to handle's crc32 */
size_t *unc_len[CMP_THREADS]; /* uncompressed lengths */
unsigned char *unc[CMP_THREADS]; /* uncompressed data */
size_t **unc_len; /* uncompressed lengths */
unsigned char **unc; /* uncompressed data */
};
static struct crc_data *alloc_crc_data(int nr_threads)
{
struct crc_data *crc;
crc = kzalloc(sizeof(*crc), GFP_KERNEL);
if (!crc)
return NULL;
crc->unc = kcalloc(nr_threads, sizeof(*crc->unc), GFP_KERNEL);
if (!crc->unc)
goto err_free_crc;
crc->unc_len = kcalloc(nr_threads, sizeof(*crc->unc_len), GFP_KERNEL);
if (!crc->unc_len)
goto err_free_unc;
return crc;
err_free_unc:
kfree(crc->unc);
err_free_crc:
kfree(crc);
return NULL;
}
static void free_crc_data(struct crc_data *crc)
{
if (!crc)
return;
if (crc->thr)
kthread_stop(crc->thr);
kfree(crc->unc_len);
kfree(crc->unc);
kfree(crc);
}
/*
* CRC32 update function that runs in its own thread.
*/
@ -719,7 +757,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
goto out_clean;
}
crc = kzalloc(sizeof(*crc), GFP_KERNEL);
crc = alloc_crc_data(nr_threads);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
@ -885,11 +923,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
out_clean:
hib_finish_batch(&hb);
if (crc) {
if (crc->thr)
kthread_stop(crc->thr);
kfree(crc);
}
free_crc_data(crc);
if (data) {
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)
@ -1239,7 +1273,7 @@ static int load_compressed_image(struct swap_map_handle *handle,
goto out_clean;
}
crc = kzalloc(sizeof(*crc), GFP_KERNEL);
crc = alloc_crc_data(nr_threads);
if (!crc) {
pr_err("Failed to allocate crc\n");
ret = -ENOMEM;
@ -1506,11 +1540,7 @@ static int load_compressed_image(struct swap_map_handle *handle,
hib_finish_batch(&hb);
for (i = 0; i < ring_size; i++)
free_page((unsigned long)page[i]);
if (crc) {
if (crc->thr)
kthread_stop(crc->thr);
kfree(crc);
}
free_crc_data(crc);
if (data) {
for (thr = 0; thr < nr_threads; thr++) {
if (data[thr].thr)