Merge branches 'pm-qos' and 'pm-tools'

Merge PM QoS updates and a cpupower utility update for 6.19-rc1:

 - Introduce and document a QoS limit on CPU exit latency during wakeup
   from suspend-to-idle (Ulf Hansson)

 - Add support for building libcpupower statically (Zuo An)

* pm-qos:
  Documentation: power/cpuidle: Document the CPU system wakeup latency QoS
  cpuidle: Respect the CPU system wakeup QoS limit for cpuidle
  sched: idle: Respect the CPU system wakeup QoS limit for s2idle
  pmdomain: Respect the CPU system wakeup QoS limit for cpuidle
  pmdomain: Respect the CPU system wakeup QoS limit for s2idle
  PM: QoS: Introduce a CPU system wakeup QoS limit

* pm-tools:
  tools/power/cpupower: Support building libcpupower statically
This commit is contained in:
Rafael J. Wysocki 2025-11-28 16:50:45 +01:00
commit 7cede21e9f
13 changed files with 224 additions and 30 deletions

View File

@ -580,6 +580,15 @@ the given CPU as the upper limit for the exit latency of the idle states that
they are allowed to select for that CPU. They should never select any idle they are allowed to select for that CPU. They should never select any idle
states with exit latency beyond that limit. states with exit latency beyond that limit.
While the above CPU QoS constraints apply to CPU idle time management, user
space may also request a CPU system wakeup latency QoS limit, via the
`cpu_wakeup_latency` file. This QoS constraint is respected when selecting a
suitable idle state for the CPUs, while entering the system-wide suspend-to-idle
sleep state, but also to the regular CPU idle time management.
Note that, the management of the `cpu_wakeup_latency` file works according to
the 'cpu_dma_latency' file from user space point of view. Moreover, the unit
is also microseconds.
Idle States Control Via Kernel Command Line Idle States Control Via Kernel Command Line
=========================================== ===========================================

View File

@ -55,7 +55,8 @@ int cpu_latency_qos_request_active(handle):
From user space: From user space:
The infrastructure exposes one device node, /dev/cpu_dma_latency, for the CPU The infrastructure exposes two separate device nodes, /dev/cpu_dma_latency for
the CPU latency QoS and /dev/cpu_wakeup_latency for the CPU system wakeup
latency QoS. latency QoS.
Only processes can register a PM QoS request. To provide for automatic Only processes can register a PM QoS request. To provide for automatic
@ -63,15 +64,15 @@ cleanup of a process, the interface requires the process to register its
parameter requests as follows. parameter requests as follows.
To register the default PM QoS target for the CPU latency QoS, the process must To register the default PM QoS target for the CPU latency QoS, the process must
open /dev/cpu_dma_latency. open /dev/cpu_dma_latency. To register a CPU system wakeup QoS limit, the
process must open /dev/cpu_wakeup_latency.
As long as the device node is held open that process has a registered As long as the device node is held open that process has a registered
request on the parameter. request on the parameter.
To change the requested target value, the process needs to write an s32 value to To change the requested target value, the process needs to write an s32 value to
the open device node. Alternatively, it can write a hex string for the value the open device node. Alternatively, it can write a hex string for the value
using the 10 char long format e.g. "0x12345678". This translates to a using the 10 char long format e.g. "0x12345678".
cpu_latency_qos_update_request() call.
To remove the user mode request for a target value simply close the device To remove the user mode request for a target value simply close the device
node. node.

View File

@ -184,20 +184,22 @@ static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv,
* cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle. * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle.
* @drv: cpuidle driver for the given CPU. * @drv: cpuidle driver for the given CPU.
* @dev: cpuidle device for the given CPU. * @dev: cpuidle device for the given CPU.
* @latency_limit_ns: Idle state exit latency limit
* *
* If there are states with the ->enter_s2idle callback, find the deepest of * If there are states with the ->enter_s2idle callback, find the deepest of
* them and enter it with frozen tick. * them and enter it with frozen tick.
*/ */
int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev) int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
u64 latency_limit_ns)
{ {
int index; int index;
/* /*
* Find the deepest state with ->enter_s2idle present, which guarantees * Find the deepest state with ->enter_s2idle present that meets the
* that interrupts won't be enabled when it exits and allows the tick to * specified latency limit, which guarantees that interrupts won't be
* be frozen safely. * enabled when it exits and allows the tick to be frozen safely.
*/ */
index = find_deepest_state(drv, dev, U64_MAX, 0, true); index = find_deepest_state(drv, dev, latency_limit_ns, 0, true);
if (index > 0) { if (index > 0) {
enter_s2idle_proper(drv, dev, index); enter_s2idle_proper(drv, dev, index);
local_irq_enable(); local_irq_enable();

View File

@ -111,6 +111,10 @@ s64 cpuidle_governor_latency_req(unsigned int cpu)
struct device *device = get_cpu_device(cpu); struct device *device = get_cpu_device(cpu);
int device_req = dev_pm_qos_raw_resume_latency(device); int device_req = dev_pm_qos_raw_resume_latency(device);
int global_req = cpu_latency_qos_limit(); int global_req = cpu_latency_qos_limit();
int global_wake_req = cpu_wakeup_latency_qos_limit();
if (global_req > global_wake_req)
global_req = global_wake_req;
if (device_req > global_req) if (device_req > global_req)
device_req = global_req; device_req = global_req;

View File

@ -1425,8 +1425,14 @@ static void genpd_sync_power_off(struct generic_pm_domain *genpd, bool use_lock,
return; return;
} }
/* Choose the deepest state when suspending */ if (genpd->gov && genpd->gov->system_power_down_ok) {
if (!genpd->gov->system_power_down_ok(&genpd->domain))
return;
} else {
/* Default to the deepest state. */
genpd->state_idx = genpd->state_count - 1; genpd->state_idx = genpd->state_count - 1;
}
if (_genpd_power_off(genpd, false)) { if (_genpd_power_off(genpd, false)) {
genpd->states[genpd->state_idx].rejected++; genpd->states[genpd->state_idx].rejected++;
return; return;

View File

@ -351,7 +351,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
ktime_t domain_wakeup, next_hrtimer; ktime_t domain_wakeup, next_hrtimer;
ktime_t now = ktime_get(); ktime_t now = ktime_get();
struct device *cpu_dev; struct device *cpu_dev;
s64 cpu_constraint, global_constraint; s64 cpu_constraint, global_constraint, wakeup_constraint;
s64 idle_duration_ns; s64 idle_duration_ns;
int cpu, i; int cpu, i;
@ -362,7 +362,11 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN)) if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
return true; return true;
wakeup_constraint = cpu_wakeup_latency_qos_limit();
global_constraint = cpu_latency_qos_limit(); global_constraint = cpu_latency_qos_limit();
if (global_constraint > wakeup_constraint)
global_constraint = wakeup_constraint;
/* /*
* Find the next wakeup for any of the online CPUs within the PM domain * Find the next wakeup for any of the online CPUs within the PM domain
* and its subdomains. Note, we only need the genpd->cpus, as it already * and its subdomains. Note, we only need the genpd->cpus, as it already
@ -415,9 +419,36 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd)
return false; return false;
} }
static bool cpu_system_power_down_ok(struct dev_pm_domain *pd)
{
s64 constraint_ns = cpu_wakeup_latency_qos_limit() * NSEC_PER_USEC;
struct generic_pm_domain *genpd = pd_to_genpd(pd);
int state_idx = genpd->state_count - 1;
if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN)) {
genpd->state_idx = state_idx;
return true;
}
/* Find the deepest state for the latency constraint. */
while (state_idx >= 0) {
s64 latency_ns = genpd->states[state_idx].power_off_latency_ns +
genpd->states[state_idx].power_on_latency_ns;
if (latency_ns <= constraint_ns) {
genpd->state_idx = state_idx;
return true;
}
state_idx--;
}
return false;
}
struct dev_power_governor pm_domain_cpu_gov = { struct dev_power_governor pm_domain_cpu_gov = {
.suspend_ok = default_suspend_ok, .suspend_ok = default_suspend_ok,
.power_down_ok = cpu_power_down_ok, .power_down_ok = cpu_power_down_ok,
.system_power_down_ok = cpu_system_power_down_ok,
}; };
#endif #endif

View File

@ -248,7 +248,8 @@ extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev, struct cpuidle_device *dev,
u64 latency_limit_ns); u64 latency_limit_ns);
extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv, extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev); struct cpuidle_device *dev,
u64 latency_limit_ns);
extern void cpuidle_use_deepest_state(u64 latency_limit_ns); extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
#else #else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
@ -256,7 +257,8 @@ static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
u64 latency_limit_ns) u64 latency_limit_ns)
{return -ENODEV; } {return -ENODEV; }
static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv, static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev) struct cpuidle_device *dev,
u64 latency_limit_ns)
{return -ENODEV; } {return -ENODEV; }
static inline void cpuidle_use_deepest_state(u64 latency_limit_ns) static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
{ {

View File

@ -153,6 +153,7 @@ enum genpd_sync_state {
}; };
struct dev_power_governor { struct dev_power_governor {
bool (*system_power_down_ok)(struct dev_pm_domain *domain);
bool (*power_down_ok)(struct dev_pm_domain *domain); bool (*power_down_ok)(struct dev_pm_domain *domain);
bool (*suspend_ok)(struct device *dev); bool (*suspend_ok)(struct device *dev);
}; };

View File

@ -162,6 +162,15 @@ static inline void cpu_latency_qos_update_request(struct pm_qos_request *req,
static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) {} static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) {}
#endif #endif
#ifdef CONFIG_PM_QOS_CPU_SYSTEM_WAKEUP
s32 cpu_wakeup_latency_qos_limit(void);
#else
static inline s32 cpu_wakeup_latency_qos_limit(void)
{
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
}
#endif
#ifdef CONFIG_PM #ifdef CONFIG_PM
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask); enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask); enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);

View File

@ -202,6 +202,17 @@ config PM_WAKELOCKS_GC
depends on PM_WAKELOCKS depends on PM_WAKELOCKS
default y default y
config PM_QOS_CPU_SYSTEM_WAKEUP
bool "User space interface for CPU system wakeup QoS"
depends on CPU_IDLE
help
Enable this to allow user space via the cpu_wakeup_latency file to
specify a CPU system wakeup latency limit.
This may be particularly useful for platforms supporting multiple low
power states for CPUs during system-wide suspend and s2idle in
particular.
config PM config PM
bool "Device power management core functionality" bool "Device power management core functionality"
help help

View File

@ -415,6 +415,105 @@ static struct miscdevice cpu_latency_qos_miscdev = {
.fops = &cpu_latency_qos_fops, .fops = &cpu_latency_qos_fops,
}; };
#ifdef CONFIG_PM_QOS_CPU_SYSTEM_WAKEUP
/* The CPU system wakeup latency QoS. */
static struct pm_qos_constraints cpu_wakeup_latency_constraints = {
.list = PLIST_HEAD_INIT(cpu_wakeup_latency_constraints.list),
.target_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT,
.default_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT,
.no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT,
.type = PM_QOS_MIN,
};
/**
* cpu_wakeup_latency_qos_limit - Current CPU system wakeup latency QoS limit.
*
* Returns the current CPU system wakeup latency QoS limit that may have been
* requested by user space.
*/
s32 cpu_wakeup_latency_qos_limit(void)
{
return pm_qos_read_value(&cpu_wakeup_latency_constraints);
}
static int cpu_wakeup_latency_qos_open(struct inode *inode, struct file *filp)
{
struct pm_qos_request *req;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
req->qos = &cpu_wakeup_latency_constraints;
pm_qos_update_target(req->qos, &req->node, PM_QOS_ADD_REQ,
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
filp->private_data = req;
return 0;
}
static int cpu_wakeup_latency_qos_release(struct inode *inode,
struct file *filp)
{
struct pm_qos_request *req = filp->private_data;
filp->private_data = NULL;
pm_qos_update_target(req->qos, &req->node, PM_QOS_REMOVE_REQ,
PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
kfree(req);
return 0;
}
static ssize_t cpu_wakeup_latency_qos_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
{
s32 value = pm_qos_read_value(&cpu_wakeup_latency_constraints);
return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
}
static ssize_t cpu_wakeup_latency_qos_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *f_pos)
{
struct pm_qos_request *req = filp->private_data;
s32 value;
if (count == sizeof(s32)) {
if (copy_from_user(&value, buf, sizeof(s32)))
return -EFAULT;
} else {
int ret;
ret = kstrtos32_from_user(buf, count, 16, &value);
if (ret)
return ret;
}
if (value < 0)
return -EINVAL;
pm_qos_update_target(req->qos, &req->node, PM_QOS_UPDATE_REQ, value);
return count;
}
static const struct file_operations cpu_wakeup_latency_qos_fops = {
.open = cpu_wakeup_latency_qos_open,
.release = cpu_wakeup_latency_qos_release,
.read = cpu_wakeup_latency_qos_read,
.write = cpu_wakeup_latency_qos_write,
.llseek = noop_llseek,
};
static struct miscdevice cpu_wakeup_latency_qos_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "cpu_wakeup_latency",
.fops = &cpu_wakeup_latency_qos_fops,
};
#endif /* CONFIG_PM_QOS_CPU_SYSTEM_WAKEUP */
static int __init cpu_latency_qos_init(void) static int __init cpu_latency_qos_init(void)
{ {
int ret; int ret;
@ -424,6 +523,13 @@ static int __init cpu_latency_qos_init(void)
pr_err("%s: %s setup failed\n", __func__, pr_err("%s: %s setup failed\n", __func__,
cpu_latency_qos_miscdev.name); cpu_latency_qos_miscdev.name);
#ifdef CONFIG_PM_QOS_CPU_SYSTEM_WAKEUP
ret = misc_register(&cpu_wakeup_latency_qos_miscdev);
if (ret < 0)
pr_err("%s: %s setup failed\n", __func__,
cpu_wakeup_latency_qos_miscdev.name);
#endif
return ret; return ret;
} }
late_initcall(cpu_latency_qos_init); late_initcall(cpu_latency_qos_init);

View File

@ -131,12 +131,13 @@ void __cpuidle default_idle_call(void)
} }
static int call_cpuidle_s2idle(struct cpuidle_driver *drv, static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev) struct cpuidle_device *dev,
u64 max_latency_ns)
{ {
if (current_clr_polling_and_test()) if (current_clr_polling_and_test())
return -EBUSY; return -EBUSY;
return cpuidle_enter_s2idle(drv, dev); return cpuidle_enter_s2idle(drv, dev, max_latency_ns);
} }
static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
@ -205,12 +206,13 @@ static void cpuidle_idle_call(void)
u64 max_latency_ns; u64 max_latency_ns;
if (idle_should_enter_s2idle()) { if (idle_should_enter_s2idle()) {
max_latency_ns = cpu_wakeup_latency_qos_limit() *
NSEC_PER_USEC;
entered_state = call_cpuidle_s2idle(drv, dev); entered_state = call_cpuidle_s2idle(drv, dev,
max_latency_ns);
if (entered_state > 0) if (entered_state > 0)
goto exit_idle; goto exit_idle;
max_latency_ns = U64_MAX;
} else { } else {
max_latency_ns = dev->forced_idle_latency_limit_ns; max_latency_ns = dev->forced_idle_latency_limit_ns;
} }

View File

@ -37,9 +37,7 @@ NLS ?= true
# cpufreq-bench benchmarking tool # cpufreq-bench benchmarking tool
CPUFREQ_BENCH ?= true CPUFREQ_BENCH ?= true
# Do not build libraries, but build the code in statically # Build the code, including libraries, statically.
# Libraries are still built, otherwise the Makefile code would
# be rather ugly.
export STATIC ?= false export STATIC ?= false
# Prefix to the directories we're installing to # Prefix to the directories we're installing to
@ -207,14 +205,25 @@ $(OUTPUT)lib/%.o: $(LIB_SRC) $(LIB_HEADERS)
$(ECHO) " CC " $@ $(ECHO) " CC " $@
$(QUIET) $(CC) $(CFLAGS) -fPIC -o $@ -c lib/$*.c $(QUIET) $(CC) $(CFLAGS) -fPIC -o $@ -c lib/$*.c
$(OUTPUT)libcpupower.so.$(LIB_VER): $(LIB_OBJS) ifeq ($(strip $(STATIC)),true)
LIBCPUPOWER := libcpupower.a
else
LIBCPUPOWER := libcpupower.so.$(LIB_VER)
endif
$(OUTPUT)$(LIBCPUPOWER): $(LIB_OBJS)
ifeq ($(strip $(STATIC)),true)
$(ECHO) " AR " $@
$(QUIET) $(AR) rcs $@ $(LIB_OBJS)
else
$(ECHO) " LD " $@ $(ECHO) " LD " $@
$(QUIET) $(CC) -shared $(CFLAGS) $(LDFLAGS) -o $@ \ $(QUIET) $(CC) -shared $(CFLAGS) $(LDFLAGS) -o $@ \
-Wl,-soname,libcpupower.so.$(LIB_MAJ) $(LIB_OBJS) -Wl,-soname,libcpupower.so.$(LIB_MAJ) $(LIB_OBJS)
@ln -sf $(@F) $(OUTPUT)libcpupower.so @ln -sf $(@F) $(OUTPUT)libcpupower.so
@ln -sf $(@F) $(OUTPUT)libcpupower.so.$(LIB_MAJ) @ln -sf $(@F) $(OUTPUT)libcpupower.so.$(LIB_MAJ)
endif
libcpupower: $(OUTPUT)libcpupower.so.$(LIB_VER) libcpupower: $(OUTPUT)$(LIBCPUPOWER)
# Let all .o files depend on its .c file and all headers # Let all .o files depend on its .c file and all headers
# Might be worth to put this into utils/Makefile at some point of time # Might be worth to put this into utils/Makefile at some point of time
@ -224,7 +233,7 @@ $(OUTPUT)%.o: %.c
$(ECHO) " CC " $@ $(ECHO) " CC " $@
$(QUIET) $(CC) $(CFLAGS) -I./lib -I ./utils -o $@ -c $*.c $(QUIET) $(CC) $(CFLAGS) -I./lib -I ./utils -o $@ -c $*.c
$(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)libcpupower.so.$(LIB_VER) $(OUTPUT)cpupower: $(UTIL_OBJS) $(OUTPUT)$(LIBCPUPOWER)
$(ECHO) " CC " $@ $(ECHO) " CC " $@
ifeq ($(strip $(STATIC)),true) ifeq ($(strip $(STATIC)),true)
$(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lrt -lpci -L$(OUTPUT) -o $@ $(QUIET) $(CC) $(CFLAGS) $(LDFLAGS) $(UTIL_OBJS) -lrt -lpci -L$(OUTPUT) -o $@
@ -269,7 +278,7 @@ update-po: $(OUTPUT)po/$(PACKAGE).pot
done; done;
endif endif
compile-bench: $(OUTPUT)libcpupower.so.$(LIB_VER) compile-bench: $(OUTPUT)$(LIBCPUPOWER)
@V=$(V) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT) @V=$(V) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT)
# we compile into subdirectories. if the target directory is not the # we compile into subdirectories. if the target directory is not the
@ -287,6 +296,7 @@ clean:
-find $(OUTPUT) \( -not -type d \) -and \( -name '*~' -o -name '*.[oas]' \) -type f -print \ -find $(OUTPUT) \( -not -type d \) -and \( -name '*~' -o -name '*.[oas]' \) -type f -print \
| xargs rm -f | xargs rm -f
-rm -f $(OUTPUT)cpupower -rm -f $(OUTPUT)cpupower
-rm -f $(OUTPUT)libcpupower.a
-rm -f $(OUTPUT)libcpupower.so* -rm -f $(OUTPUT)libcpupower.so*
-rm -rf $(OUTPUT)po/*.gmo -rm -rf $(OUTPUT)po/*.gmo
-rm -rf $(OUTPUT)po/*.pot -rm -rf $(OUTPUT)po/*.pot
@ -295,7 +305,11 @@ clean:
install-lib: libcpupower install-lib: libcpupower
$(INSTALL) -d $(DESTDIR)${libdir} $(INSTALL) -d $(DESTDIR)${libdir}
ifeq ($(strip $(STATIC)),true)
$(CP) $(OUTPUT)libcpupower.a $(DESTDIR)${libdir}/
else
$(CP) $(OUTPUT)libcpupower.so* $(DESTDIR)${libdir}/ $(CP) $(OUTPUT)libcpupower.so* $(DESTDIR)${libdir}/
endif
$(INSTALL) -d $(DESTDIR)${includedir} $(INSTALL) -d $(DESTDIR)${includedir}
$(INSTALL_DATA) lib/cpufreq.h $(DESTDIR)${includedir}/cpufreq.h $(INSTALL_DATA) lib/cpufreq.h $(DESTDIR)${includedir}/cpufreq.h
$(INSTALL_DATA) lib/cpuidle.h $(DESTDIR)${includedir}/cpuidle.h $(INSTALL_DATA) lib/cpuidle.h $(DESTDIR)${includedir}/cpuidle.h
@ -336,11 +350,7 @@ install-bench: compile-bench
@#DESTDIR must be set from outside to survive @#DESTDIR must be set from outside to survive
@sbindir=$(sbindir) bindir=$(bindir) docdir=$(docdir) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT) install @sbindir=$(sbindir) bindir=$(bindir) docdir=$(docdir) confdir=$(confdir) $(MAKE) -C bench O=$(OUTPUT) install
ifeq ($(strip $(STATIC)),true)
install: all install-tools install-man $(INSTALL_NLS) $(INSTALL_BENCH)
else
install: all install-lib install-tools install-man $(INSTALL_NLS) $(INSTALL_BENCH) install: all install-lib install-tools install-man $(INSTALL_NLS) $(INSTALL_BENCH)
endif
uninstall: uninstall:
- rm -f $(DESTDIR)${libdir}/libcpupower.* - rm -f $(DESTDIR)${libdir}/libcpupower.*