mirror of https://github.com/torvalds/linux.git
Merge back system sleep material for 6.19
This commit is contained in:
commit
1cf9c4f115
|
|
@ -454,3 +454,19 @@ Description:
|
|||
disables it. Reads from the file return the current value.
|
||||
The default is "1" if the build-time "SUSPEND_SKIP_SYNC" config
|
||||
flag is unset, or "0" otherwise.
|
||||
|
||||
What: /sys/power/hibernate_compression_threads
|
||||
Date: October 2025
|
||||
Contact: <luoxueqin@kylinos.cn>
|
||||
Description:
|
||||
Controls the number of threads used for compression
|
||||
and decompression of hibernation images.
|
||||
|
||||
The value can be adjusted at runtime to balance
|
||||
performance and CPU utilization.
|
||||
|
||||
The change takes effect on the next hibernation or
|
||||
resume operation.
|
||||
|
||||
Minimum value: 1
|
||||
Default value: 3
|
||||
|
|
|
|||
|
|
@ -1907,6 +1907,16 @@
|
|||
/sys/power/pm_test). Only available when CONFIG_PM_DEBUG
|
||||
is set. Default value is 5.
|
||||
|
||||
hibernate_compression_threads=
|
||||
[HIBERNATION]
|
||||
Set the number of threads used for compressing or decompressing
|
||||
hibernation images.
|
||||
|
||||
Format: <integer>
|
||||
Default: 3
|
||||
Minimum: 1
|
||||
Example: hibernate_compression_threads=4
|
||||
|
||||
highmem=nn[KMG] [KNL,BOOT,EARLY] forces the highmem zone to have an exact
|
||||
size of <nn>. This works even on boxes that have no
|
||||
highmem otherwise. This also works to reduce highmem
|
||||
|
|
|
|||
|
|
@ -8,6 +8,13 @@
|
|||
#include <linux/pm_runtime.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#define CALL_PM_OP(dev, op) \
|
||||
({ \
|
||||
struct device *_dev = (dev); \
|
||||
const struct dev_pm_ops *pm = _dev->driver ? _dev->driver->pm : NULL; \
|
||||
pm && pm->op ? pm->op(_dev) : 0; \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/**
|
||||
* pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
|
||||
|
|
@ -19,12 +26,7 @@
|
|||
*/
|
||||
int pm_generic_runtime_suspend(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
int ret;
|
||||
|
||||
ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
|
||||
|
||||
return ret;
|
||||
return CALL_PM_OP(dev, runtime_suspend);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
|
||||
|
||||
|
|
@ -38,12 +40,7 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
|
|||
*/
|
||||
int pm_generic_runtime_resume(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
int ret;
|
||||
|
||||
ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
|
||||
|
||||
return ret;
|
||||
return CALL_PM_OP(dev, runtime_resume);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
|
||||
#endif /* CONFIG_PM */
|
||||
|
|
@ -72,9 +69,7 @@ int pm_generic_prepare(struct device *dev)
|
|||
*/
|
||||
int pm_generic_suspend_noirq(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
|
||||
return CALL_PM_OP(dev, suspend_noirq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
|
||||
|
||||
|
|
@ -84,9 +79,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
|
|||
*/
|
||||
int pm_generic_suspend_late(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
|
||||
return CALL_PM_OP(dev, suspend_late);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
|
||||
|
||||
|
|
@ -96,9 +89,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
|
|||
*/
|
||||
int pm_generic_suspend(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->suspend ? pm->suspend(dev) : 0;
|
||||
return CALL_PM_OP(dev, suspend);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_suspend);
|
||||
|
||||
|
|
@ -108,9 +99,7 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend);
|
|||
*/
|
||||
int pm_generic_freeze_noirq(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
|
||||
return CALL_PM_OP(dev, freeze_noirq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
|
||||
|
||||
|
|
@ -120,9 +109,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
|
|||
*/
|
||||
int pm_generic_freeze(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->freeze ? pm->freeze(dev) : 0;
|
||||
return CALL_PM_OP(dev, freeze);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_freeze);
|
||||
|
||||
|
|
@ -132,9 +119,7 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze);
|
|||
*/
|
||||
int pm_generic_poweroff_noirq(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
|
||||
return CALL_PM_OP(dev, poweroff_noirq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
|
||||
|
||||
|
|
@ -144,9 +129,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
|
|||
*/
|
||||
int pm_generic_poweroff_late(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
|
||||
return CALL_PM_OP(dev, poweroff_late);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
|
||||
|
||||
|
|
@ -156,9 +139,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
|
|||
*/
|
||||
int pm_generic_poweroff(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->poweroff ? pm->poweroff(dev) : 0;
|
||||
return CALL_PM_OP(dev, poweroff);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
|
||||
|
||||
|
|
@ -168,9 +149,7 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
|
|||
*/
|
||||
int pm_generic_thaw_noirq(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
|
||||
return CALL_PM_OP(dev, thaw_noirq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
|
||||
|
||||
|
|
@ -180,9 +159,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
|
|||
*/
|
||||
int pm_generic_thaw(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->thaw ? pm->thaw(dev) : 0;
|
||||
return CALL_PM_OP(dev, thaw);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_thaw);
|
||||
|
||||
|
|
@ -192,9 +169,7 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
|
|||
*/
|
||||
int pm_generic_resume_noirq(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
|
||||
return CALL_PM_OP(dev, resume_noirq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
|
||||
|
||||
|
|
@ -204,9 +179,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
|
|||
*/
|
||||
int pm_generic_resume_early(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->resume_early ? pm->resume_early(dev) : 0;
|
||||
return CALL_PM_OP(dev, resume_early);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_resume_early);
|
||||
|
||||
|
|
@ -216,9 +189,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_early);
|
|||
*/
|
||||
int pm_generic_resume(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->resume ? pm->resume(dev) : 0;
|
||||
return CALL_PM_OP(dev, resume);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_resume);
|
||||
|
||||
|
|
@ -228,9 +199,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
|
|||
*/
|
||||
int pm_generic_restore_noirq(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
|
||||
return CALL_PM_OP(dev, restore_noirq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
|
||||
|
||||
|
|
@ -240,9 +209,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
|
|||
*/
|
||||
int pm_generic_restore_early(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->restore_early ? pm->restore_early(dev) : 0;
|
||||
return CALL_PM_OP(dev, restore_early);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_restore_early);
|
||||
|
||||
|
|
@ -252,9 +219,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_early);
|
|||
*/
|
||||
int pm_generic_restore(struct device *dev)
|
||||
{
|
||||
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
|
||||
|
||||
return pm && pm->restore ? pm->restore(dev) : 0;
|
||||
return CALL_PM_OP(dev, restore);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pm_generic_restore);
|
||||
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/cpufreq.h>
|
||||
#include <linux/devfreq.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/nmi.h>
|
||||
|
||||
#include "../base.h"
|
||||
#include "power.h"
|
||||
|
|
@ -515,6 +516,11 @@ struct dpm_watchdog {
|
|||
#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
|
||||
struct dpm_watchdog wd
|
||||
|
||||
static bool __read_mostly dpm_watchdog_all_cpu_backtrace;
|
||||
module_param(dpm_watchdog_all_cpu_backtrace, bool, 0644);
|
||||
MODULE_PARM_DESC(dpm_watchdog_all_cpu_backtrace,
|
||||
"Backtrace all CPUs on DPM watchdog timeout");
|
||||
|
||||
/**
|
||||
* dpm_watchdog_handler - Driver suspend / resume watchdog handler.
|
||||
* @t: The timer that PM watchdog depends on.
|
||||
|
|
@ -530,8 +536,12 @@ static void dpm_watchdog_handler(struct timer_list *t)
|
|||
unsigned int time_left;
|
||||
|
||||
if (wd->fatal) {
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
|
||||
dev_emerg(wd->dev, "**** DPM device timeout ****\n");
|
||||
show_stack(wd->tsk, NULL, KERN_EMERG);
|
||||
if (dpm_watchdog_all_cpu_backtrace)
|
||||
trigger_allbutcpu_cpu_backtrace(this_cpu);
|
||||
panic("%s %s: unrecoverable failure\n",
|
||||
dev_driver_string(wd->dev), dev_name(wd->dev));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -238,10 +238,8 @@ int show_trace_dev_match(char *buf, size_t size)
|
|||
unsigned int hash = hash_string(DEVSEED, dev_name(dev),
|
||||
DEVHASH);
|
||||
if (hash == value) {
|
||||
int len = snprintf(buf, size, "%s\n",
|
||||
int len = scnprintf(buf, size, "%s\n",
|
||||
dev_driver_string(dev));
|
||||
if (len > size)
|
||||
len = size;
|
||||
buf += len;
|
||||
ret += len;
|
||||
size -= len;
|
||||
|
|
|
|||
|
|
@ -22,14 +22,18 @@ extern bool pm_nosig_freezing; /* PM nosig freezing in effect */
|
|||
extern unsigned int freeze_timeout_msecs;
|
||||
|
||||
/*
|
||||
* Check if a process has been frozen
|
||||
* Check if a process has been frozen for PM or cgroup1 freezer. Note that
|
||||
* cgroup2 freezer uses the job control mechanism and does not interact with
|
||||
* the PM freezer.
|
||||
*/
|
||||
extern bool frozen(struct task_struct *p);
|
||||
|
||||
extern bool freezing_slow_path(struct task_struct *p);
|
||||
|
||||
/*
|
||||
* Check if there is a request to freeze a process
|
||||
* Check if there is a request to freeze a task from PM or cgroup1 freezer.
|
||||
* Note that cgroup2 freezer uses the job control mechanism and does not
|
||||
* interact with the PM freezer.
|
||||
*/
|
||||
static inline bool freezing(struct task_struct *p)
|
||||
{
|
||||
|
|
@ -63,9 +67,9 @@ extern bool freeze_task(struct task_struct *p);
|
|||
extern bool set_freezable(void);
|
||||
|
||||
#ifdef CONFIG_CGROUP_FREEZER
|
||||
extern bool cgroup_freezing(struct task_struct *task);
|
||||
extern bool cgroup1_freezing(struct task_struct *task);
|
||||
#else /* !CONFIG_CGROUP_FREEZER */
|
||||
static inline bool cgroup_freezing(struct task_struct *task)
|
||||
static inline bool cgroup1_freezing(struct task_struct *task)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,11 +25,12 @@ extern void (*pm_power_off)(void);
|
|||
|
||||
struct device; /* we have a circular dep with device.h */
|
||||
#ifdef CONFIG_VT_CONSOLE_SLEEP
|
||||
extern void pm_vt_switch_required(struct device *dev, bool required);
|
||||
extern int pm_vt_switch_required(struct device *dev, bool required);
|
||||
extern void pm_vt_switch_unregister(struct device *dev);
|
||||
#else
|
||||
static inline void pm_vt_switch_required(struct device *dev, bool required)
|
||||
static inline int pm_vt_switch_required(struct device *dev, bool required)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void pm_vt_switch_unregister(struct device *dev)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ static struct freezer *parent_freezer(struct freezer *freezer)
|
|||
return css_freezer(freezer->css.parent);
|
||||
}
|
||||
|
||||
bool cgroup_freezing(struct task_struct *task)
|
||||
bool cgroup1_freezing(struct task_struct *task)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ bool freezing_slow_path(struct task_struct *p)
|
|||
if (tsk_is_oom_victim(p))
|
||||
return false;
|
||||
|
||||
if (pm_nosig_freezing || cgroup_freezing(p))
|
||||
if (pm_nosig_freezing || cgroup1_freezing(p))
|
||||
return true;
|
||||
|
||||
if (pm_freezing && !(p->flags & PF_KTHREAD))
|
||||
|
|
|
|||
|
|
@ -44,9 +44,10 @@ static LIST_HEAD(pm_vt_switch_list);
|
|||
* no_console_suspend argument has been passed on the command line, VT
|
||||
* switches will occur.
|
||||
*/
|
||||
void pm_vt_switch_required(struct device *dev, bool required)
|
||||
int pm_vt_switch_required(struct device *dev, bool required)
|
||||
{
|
||||
struct pm_vt_switch *entry, *tmp;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&vt_switch_mutex);
|
||||
list_for_each_entry(tmp, &pm_vt_switch_list, head) {
|
||||
|
|
@ -58,8 +59,10 @@ void pm_vt_switch_required(struct device *dev, bool required)
|
|||
}
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
if (!entry) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
entry->required = required;
|
||||
entry->dev = dev;
|
||||
|
|
@ -67,6 +70,7 @@ void pm_vt_switch_required(struct device *dev, bool required)
|
|||
list_add(&entry->head, &pm_vt_switch_list);
|
||||
out:
|
||||
mutex_unlock(&vt_switch_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(pm_vt_switch_required);
|
||||
|
||||
|
|
|
|||
|
|
@ -2110,22 +2110,20 @@ asmlinkage __visible int swsusp_save(void)
|
|||
{
|
||||
unsigned int nr_pages, nr_highmem;
|
||||
|
||||
pr_info("Creating image:\n");
|
||||
pm_deferred_pr_dbg("Creating image\n");
|
||||
|
||||
drain_local_pages(NULL);
|
||||
nr_pages = count_data_pages();
|
||||
nr_highmem = count_highmem_pages();
|
||||
pr_info("Need to copy %u pages\n", nr_pages + nr_highmem);
|
||||
pm_deferred_pr_dbg("Need to copy %u pages\n", nr_pages + nr_highmem);
|
||||
|
||||
if (!enough_free_mem(nr_pages, nr_highmem)) {
|
||||
pr_err("Not enough free memory\n");
|
||||
pm_deferred_pr_dbg("Not enough free memory for image creation\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (swsusp_alloc(©_bm, nr_pages, nr_highmem)) {
|
||||
pr_err("Memory allocation failed\n");
|
||||
if (swsusp_alloc(©_bm, nr_pages, nr_highmem))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* During allocating of suspend pagedir, new cold pages may appear.
|
||||
|
|
@ -2144,7 +2142,8 @@ asmlinkage __visible int swsusp_save(void)
|
|||
nr_zero_pages = nr_pages - nr_copy_pages;
|
||||
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
|
||||
|
||||
pr_info("Image created (%d pages copied, %d zero pages)\n", nr_copy_pages, nr_zero_pages);
|
||||
pm_deferred_pr_dbg("Image created (%d pages copied, %d zero pages)\n",
|
||||
nr_copy_pages, nr_zero_pages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -519,8 +519,9 @@ static int swap_writer_finish(struct swap_map_handle *handle,
|
|||
CMP_HEADER, PAGE_SIZE)
|
||||
#define CMP_SIZE (CMP_PAGES * PAGE_SIZE)
|
||||
|
||||
/* Maximum number of threads for compression/decompression. */
|
||||
/* Default number of threads for compression/decompression. */
|
||||
#define CMP_THREADS 3
|
||||
static unsigned int hibernate_compression_threads = CMP_THREADS;
|
||||
|
||||
/* Minimum/maximum number of pages for read buffering. */
|
||||
#define CMP_MIN_RD_PAGES 1024
|
||||
|
|
@ -585,10 +586,48 @@ struct crc_data {
|
|||
wait_queue_head_t go; /* start crc update */
|
||||
wait_queue_head_t done; /* crc update done */
|
||||
u32 *crc32; /* points to handle's crc32 */
|
||||
size_t *unc_len[CMP_THREADS]; /* uncompressed lengths */
|
||||
unsigned char *unc[CMP_THREADS]; /* uncompressed data */
|
||||
size_t **unc_len; /* uncompressed lengths */
|
||||
unsigned char **unc; /* uncompressed data */
|
||||
};
|
||||
|
||||
static struct crc_data *alloc_crc_data(int nr_threads)
|
||||
{
|
||||
struct crc_data *crc;
|
||||
|
||||
crc = kzalloc(sizeof(*crc), GFP_KERNEL);
|
||||
if (!crc)
|
||||
return NULL;
|
||||
|
||||
crc->unc = kcalloc(nr_threads, sizeof(*crc->unc), GFP_KERNEL);
|
||||
if (!crc->unc)
|
||||
goto err_free_crc;
|
||||
|
||||
crc->unc_len = kcalloc(nr_threads, sizeof(*crc->unc_len), GFP_KERNEL);
|
||||
if (!crc->unc_len)
|
||||
goto err_free_unc;
|
||||
|
||||
return crc;
|
||||
|
||||
err_free_unc:
|
||||
kfree(crc->unc);
|
||||
err_free_crc:
|
||||
kfree(crc);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void free_crc_data(struct crc_data *crc)
|
||||
{
|
||||
if (!crc)
|
||||
return;
|
||||
|
||||
if (crc->thr)
|
||||
kthread_stop(crc->thr);
|
||||
|
||||
kfree(crc->unc_len);
|
||||
kfree(crc->unc);
|
||||
kfree(crc);
|
||||
}
|
||||
|
||||
/*
|
||||
* CRC32 update function that runs in its own thread.
|
||||
*/
|
||||
|
|
@ -703,7 +742,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
|
|||
* footprint.
|
||||
*/
|
||||
nr_threads = num_online_cpus() - 1;
|
||||
nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
|
||||
nr_threads = clamp_val(nr_threads, 1, hibernate_compression_threads);
|
||||
|
||||
page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH);
|
||||
if (!page) {
|
||||
|
|
@ -719,7 +758,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
|
|||
goto out_clean;
|
||||
}
|
||||
|
||||
crc = kzalloc(sizeof(*crc), GFP_KERNEL);
|
||||
crc = alloc_crc_data(nr_threads);
|
||||
if (!crc) {
|
||||
pr_err("Failed to allocate crc\n");
|
||||
ret = -ENOMEM;
|
||||
|
|
@ -885,11 +924,7 @@ static int save_compressed_image(struct swap_map_handle *handle,
|
|||
|
||||
out_clean:
|
||||
hib_finish_batch(&hb);
|
||||
if (crc) {
|
||||
if (crc->thr)
|
||||
kthread_stop(crc->thr);
|
||||
kfree(crc);
|
||||
}
|
||||
free_crc_data(crc);
|
||||
if (data) {
|
||||
for (thr = 0; thr < nr_threads; thr++) {
|
||||
if (data[thr].thr)
|
||||
|
|
@ -1223,7 +1258,7 @@ static int load_compressed_image(struct swap_map_handle *handle,
|
|||
* footprint.
|
||||
*/
|
||||
nr_threads = num_online_cpus() - 1;
|
||||
nr_threads = clamp_val(nr_threads, 1, CMP_THREADS);
|
||||
nr_threads = clamp_val(nr_threads, 1, hibernate_compression_threads);
|
||||
|
||||
page = vmalloc_array(CMP_MAX_RD_PAGES, sizeof(*page));
|
||||
if (!page) {
|
||||
|
|
@ -1239,7 +1274,7 @@ static int load_compressed_image(struct swap_map_handle *handle,
|
|||
goto out_clean;
|
||||
}
|
||||
|
||||
crc = kzalloc(sizeof(*crc), GFP_KERNEL);
|
||||
crc = alloc_crc_data(nr_threads);
|
||||
if (!crc) {
|
||||
pr_err("Failed to allocate crc\n");
|
||||
ret = -ENOMEM;
|
||||
|
|
@ -1506,11 +1541,7 @@ static int load_compressed_image(struct swap_map_handle *handle,
|
|||
hib_finish_batch(&hb);
|
||||
for (i = 0; i < ring_size; i++)
|
||||
free_page((unsigned long)page[i]);
|
||||
if (crc) {
|
||||
if (crc->thr)
|
||||
kthread_stop(crc->thr);
|
||||
kfree(crc);
|
||||
}
|
||||
free_crc_data(crc);
|
||||
if (data) {
|
||||
for (thr = 0; thr < nr_threads; thr++) {
|
||||
if (data[thr].thr)
|
||||
|
|
@ -1658,8 +1689,46 @@ int swsusp_unmark(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static ssize_t hibernate_compression_threads_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%d\n", hibernate_compression_threads);
|
||||
}
|
||||
|
||||
static ssize_t hibernate_compression_threads_store(struct kobject *kobj,
|
||||
struct kobj_attribute *attr,
|
||||
const char *buf, size_t n)
|
||||
{
|
||||
unsigned long val;
|
||||
|
||||
if (kstrtoul(buf, 0, &val))
|
||||
return -EINVAL;
|
||||
|
||||
if (val < 1)
|
||||
return -EINVAL;
|
||||
|
||||
hibernate_compression_threads = val;
|
||||
return n;
|
||||
}
|
||||
power_attr(hibernate_compression_threads);
|
||||
|
||||
static struct attribute *g[] = {
|
||||
&hibernate_compression_threads_attr.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group attr_group = {
|
||||
.attrs = g,
|
||||
};
|
||||
|
||||
static int __init swsusp_header_init(void)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = sysfs_create_group(power_kobj, &attr_group);
|
||||
if (error)
|
||||
return -ENOMEM;
|
||||
|
||||
swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
|
||||
if (!swsusp_header)
|
||||
panic("Could not allocate memory for swsusp_header\n");
|
||||
|
|
@ -1667,3 +1736,19 @@ static int __init swsusp_header_init(void)
|
|||
}
|
||||
|
||||
core_initcall(swsusp_header_init);
|
||||
|
||||
static int __init hibernate_compression_threads_setup(char *str)
|
||||
{
|
||||
int rc = kstrtouint(str, 0, &hibernate_compression_threads);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (hibernate_compression_threads < 1)
|
||||
hibernate_compression_threads = CMP_THREADS;
|
||||
|
||||
return 1;
|
||||
|
||||
}
|
||||
|
||||
__setup("hibernate_compression_threads=", hibernate_compression_threads_setup);
|
||||
|
|
|
|||
Loading…
Reference in New Issue