Merge branches 'pm-em' and 'pm-opp'

Merge energy model management updates and operating performance points
(OPP) library changes for 6.19-rc1:

 - Add support for sending netlink notifications to user space on energy
   model updates (Changwoo Mini, Peng Fan)

 - Minor improvements to the Rust OPP interface (Tamir Duberstein)

 - Fixes to scope-based pointers in the OPP library (Viresh Kumar)

* pm-em:
  PM: EM: Add to em_pd_list only when no failure
  PM: EM: Notify an event when the performance domain changes
  PM: EM: Implement em_notify_pd_created/updated()
  PM: EM: Implement em_notify_pd_deleted()
  PM: EM: Implement em_nl_get_pd_table_doit()
  PM: EM: Implement em_nl_get_pds_doit()
  PM: EM: Add an iterator and accessor for the performance domain
  PM: EM: Add a skeleton code for netlink notification
  PM: EM: Add em.yaml and autogen files
  PM: EM: Expose the ID of a performance domain via debugfs
  PM: EM: Assign a unique ID when creating a performance domain

* pm-opp:
  rust: opp: simplify callers of `to_c_str_array`
  OPP: Initialize scope-based pointers inline
  rust: opp: fix broken rustdoc link
This commit is contained in:
Rafael J. Wysocki 2025-11-28 16:44:00 +01:00
commit 638757c9c9
14 changed files with 870 additions and 154 deletions

View File

@ -0,0 +1,113 @@
# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
name: em
doc: |
Energy model netlink interface to notify its changes.
protocol: genetlink
uapi-header: linux/energy_model.h
attribute-sets:
-
name: pds
attributes:
-
name: pd
type: nest
nested-attributes: pd
multi-attr: true
-
name: pd
attributes:
-
name: pad
type: pad
-
name: pd-id
type: u32
-
name: flags
type: u64
-
name: cpus
type: string
-
name: pd-table
attributes:
-
name: pd-id
type: u32
-
name: ps
type: nest
nested-attributes: ps
multi-attr: true
-
name: ps
attributes:
-
name: pad
type: pad
-
name: performance
type: u64
-
name: frequency
type: u64
-
name: power
type: u64
-
name: cost
type: u64
-
name: flags
type: u64
operations:
list:
-
name: get-pds
attribute-set: pds
doc: Get the list of information for all performance domains.
do:
reply:
attributes:
- pd
-
name: get-pd-table
attribute-set: pd-table
doc: Get the energy model table of a performance domain.
do:
request:
attributes:
- pd-id
reply:
attributes:
- pd-id
- ps
-
name: pd-created
doc: A performance domain is created.
notify: get-pd-table
mcgrp: event
-
name: pd-updated
doc: A performance domain is updated.
notify: get-pd-table
mcgrp: event
-
name: pd-deleted
doc: A performance domain is deleted.
attribute-set: pd-table
event:
attributes:
- pd-id
mcgrp: event
mcast-groups:
list:
-
name: event

View File

@ -9188,6 +9188,9 @@ S: Maintained
F: kernel/power/energy_model.c
F: include/linux/energy_model.h
F: Documentation/power/energy-model.rst
F: Documentation/netlink/specs/em.yaml
F: include/uapi/linux/energy_model.h
F: kernel/power/em_netlink*.*
EPAPR HYPERVISOR BYTE CHANNEL DEVICE DRIVER
M: Laurentiu Tudor <laurentiu.tudor@nxp.com>

View File

@ -309,9 +309,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
*/
unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
{
struct opp_table *opp_table __free(put_opp_table);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
return 0;
@ -327,7 +327,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
*/
unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
{
struct opp_table *opp_table __free(put_opp_table);
struct dev_pm_opp *opp;
struct regulator *reg;
unsigned long latency_ns = 0;
@ -337,7 +336,9 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
unsigned long max;
} *uV;
opp_table = _find_opp_table(dev);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
if (IS_ERR(opp_table))
return 0;
@ -409,10 +410,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
*/
unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev)
{
struct opp_table *opp_table __free(put_opp_table);
unsigned long freq = 0;
opp_table = _find_opp_table(dev);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
if (IS_ERR(opp_table))
return 0;
@ -447,9 +449,9 @@ int _get_opp_count(struct opp_table *opp_table)
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
struct opp_table *opp_table __free(put_opp_table);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
dev_dbg(dev, "%s: OPP table not found (%ld)\n",
__func__, PTR_ERR(opp_table));
@ -605,9 +607,9 @@ _find_key(struct device *dev, unsigned long *key, int index, bool available,
unsigned long opp_key, unsigned long key),
bool (*assert)(struct opp_table *opp_table, unsigned int index))
{
struct opp_table *opp_table __free(put_opp_table);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
dev_err(dev, "%s: OPP table not found (%ld)\n", __func__,
PTR_ERR(opp_table));
@ -1410,12 +1412,13 @@ static int _set_opp(struct device *dev, struct opp_table *opp_table,
*/
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
struct opp_table *opp_table __free(put_opp_table);
struct dev_pm_opp *opp __free(put_opp) = NULL;
unsigned long freq = 0, temp_freq;
bool forced = false;
opp_table = _find_opp_table(dev);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
if (IS_ERR(opp_table)) {
dev_err(dev, "%s: device's opp table doesn't exist\n", __func__);
return PTR_ERR(opp_table);
@ -1477,9 +1480,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
*/
int dev_pm_opp_set_opp(struct device *dev, struct dev_pm_opp *opp)
{
struct opp_table *opp_table __free(put_opp_table);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
dev_err(dev, "%s: device opp doesn't exist\n", __func__);
return PTR_ERR(opp_table);
@ -1794,10 +1797,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put);
*/
void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
struct opp_table *opp_table __free(put_opp_table);
struct dev_pm_opp *opp = NULL, *iter;
opp_table = _find_opp_table(dev);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
if (IS_ERR(opp_table))
return;
@ -1885,9 +1889,9 @@ bool _opp_remove_all_static(struct opp_table *opp_table)
*/
void dev_pm_opp_remove_all_dynamic(struct device *dev)
{
struct opp_table *opp_table __free(put_opp_table);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
return;
@ -2871,10 +2875,11 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
bool availability_req)
{
struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp;
struct opp_table *opp_table __free(put_opp_table);
/* Find the opp_table */
opp_table = _find_opp_table(dev);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
if (IS_ERR(opp_table)) {
dev_warn(dev, "%s: Device OPP not found (%ld)\n", __func__,
PTR_ERR(opp_table));
@ -2932,11 +2937,12 @@ int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
{
struct dev_pm_opp *opp __free(put_opp) = ERR_PTR(-ENODEV), *tmp_opp;
struct opp_table *opp_table __free(put_opp_table);
int r;
/* Find the opp_table */
opp_table = _find_opp_table(dev);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
if (IS_ERR(opp_table)) {
r = PTR_ERR(opp_table);
dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
@ -2986,12 +2992,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_adjust_voltage);
*/
int dev_pm_opp_sync_regulators(struct device *dev)
{
struct opp_table *opp_table __free(put_opp_table);
struct regulator *reg;
int ret, i;
/* Device may not have OPP table */
opp_table = _find_opp_table(dev);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
if (IS_ERR(opp_table))
return 0;
@ -3062,9 +3069,9 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
*/
int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb)
{
struct opp_table *opp_table __free(put_opp_table);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
@ -3082,9 +3089,9 @@ EXPORT_SYMBOL(dev_pm_opp_register_notifier);
int dev_pm_opp_unregister_notifier(struct device *dev,
struct notifier_block *nb)
{
struct opp_table *opp_table __free(put_opp_table);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
@ -3101,10 +3108,10 @@ EXPORT_SYMBOL(dev_pm_opp_unregister_notifier);
*/
void dev_pm_opp_remove_table(struct device *dev)
{
struct opp_table *opp_table __free(put_opp_table);
/* Check for existing table for 'dev' */
opp_table = _find_opp_table(dev);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(dev);
if (IS_ERR(opp_table)) {
int error = PTR_ERR(opp_table);

View File

@ -56,10 +56,10 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
return -ENOMEM;
for (i = 0, rate = 0; i < max_opps; i++, rate++) {
struct dev_pm_opp *opp __free(put_opp);
/* find next rate */
opp = dev_pm_opp_find_freq_ceil(dev, &rate);
struct dev_pm_opp *opp __free(put_opp) =
dev_pm_opp_find_freq_ceil(dev, &rate);
if (IS_ERR(opp)) {
ret = PTR_ERR(opp);
goto out;
@ -154,12 +154,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
const struct cpumask *cpumask)
{
struct opp_table *opp_table __free(put_opp_table);
struct opp_device *opp_dev;
struct device *dev;
int cpu;
opp_table = _find_opp_table(cpu_dev);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(cpu_dev);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);
@ -201,10 +202,11 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
*/
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{
struct opp_table *opp_table __free(put_opp_table);
struct opp_device *opp_dev;
opp_table = _find_opp_table(cpu_dev);
struct opp_table *opp_table __free(put_opp_table) =
_find_opp_table(cpu_dev);
if (IS_ERR(opp_table))
return PTR_ERR(opp_table);

View File

@ -45,9 +45,10 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
struct opp_table *_managed_opp(struct device *dev, int index)
{
struct opp_table *opp_table, *managed_table = NULL;
struct device_node *np __free(device_node);
np = _opp_of_get_opp_desc_node(dev->of_node, index);
struct device_node *np __free(device_node) =
_opp_of_get_opp_desc_node(dev->of_node, index);
if (!np)
return NULL;
@ -95,10 +96,11 @@ static struct device_node *of_parse_required_opp(struct device_node *np,
/* The caller must call dev_pm_opp_put_opp_table() after the table is used */
static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
{
struct device_node *opp_table_np __free(device_node);
struct opp_table *opp_table;
opp_table_np = of_get_parent(opp_np);
struct device_node *opp_table_np __free(device_node) =
of_get_parent(opp_np);
if (!opp_table_np)
return ERR_PTR(-ENODEV);
@ -146,12 +148,13 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
struct device_node *opp_np)
{
struct opp_table **required_opp_tables;
struct device_node *np __free(device_node);
bool lazy = false;
int count, i, size;
/* Traversing the first OPP node is all we need */
np = of_get_next_available_child(opp_np, NULL);
struct device_node *np __free(device_node) =
of_get_next_available_child(opp_np, NULL);
if (!np) {
dev_warn(dev, "Empty OPP table\n");
return;
@ -171,9 +174,9 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
opp_table->required_opp_count = count;
for (i = 0; i < count; i++) {
struct device_node *required_np __free(device_node);
struct device_node *required_np __free(device_node) =
of_parse_required_opp(np, i);
required_np = of_parse_required_opp(np, i);
if (!required_np) {
_opp_table_free_required_tables(opp_table);
return;
@ -199,14 +202,15 @@ static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
int index)
{
struct device_node *np __free(device_node), *opp_np;
struct device_node *opp_np;
u32 val;
/*
* Only required for backward compatibility with v1 bindings, but isn't
* harmful for other cases. And so we do it unconditionally.
*/
np = of_node_get(dev->of_node);
struct device_node *np __free(device_node) = of_node_get(dev->of_node);
if (!np)
return;
@ -273,9 +277,9 @@ void _of_clear_opp(struct opp_table *opp_table, struct dev_pm_opp *opp)
static int _link_required_opps(struct dev_pm_opp *opp,
struct opp_table *required_table, int index)
{
struct device_node *np __free(device_node);
struct device_node *np __free(device_node) =
of_parse_required_opp(opp->np, index);
np = of_parse_required_opp(opp->np, index);
if (unlikely(!np))
return -ENODEV;
@ -349,16 +353,13 @@ static void lazy_link_required_opp_table(struct opp_table *new_table)
guard(mutex)(&opp_table_lock);
list_for_each_entry_safe(opp_table, temp, &lazy_opp_tables, lazy) {
struct device_node *opp_np __free(device_node);
bool lazy = false;
/* opp_np can't be invalid here */
opp_np = of_get_next_available_child(opp_table->np, NULL);
struct device_node *opp_np __free(device_node) =
of_get_next_available_child(opp_table->np, NULL);
for (i = 0; i < opp_table->required_opp_count; i++) {
struct device_node *required_np __free(device_node) = NULL;
struct device_node *required_table_np __free(device_node) = NULL;
required_opp_tables = opp_table->required_opp_tables;
/* Required opp-table is already parsed */
@ -366,8 +367,10 @@ static void lazy_link_required_opp_table(struct opp_table *new_table)
continue;
/* required_np can't be invalid here */
required_np = of_parse_required_opp(opp_np, i);
required_table_np = of_get_parent(required_np);
struct device_node *required_np __free(device_node) =
of_parse_required_opp(opp_np, i);
struct device_node *required_table_np __free(device_node) =
of_get_parent(required_np);
/*
* Newly added table isn't the required opp-table for
@ -402,13 +405,12 @@ static void lazy_link_required_opp_table(struct opp_table *new_table)
static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
{
struct device_node *opp_np __free(device_node) = NULL;
struct device_node *np __free(device_node) = NULL;
struct property *prop;
if (!opp_table) {
struct device_node *np __free(device_node);
struct device_node *np __free(device_node) =
of_node_get(dev->of_node);
np = of_node_get(dev->of_node);
if (!np)
return -ENODEV;
@ -422,7 +424,9 @@ static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
return 0;
/* Checking only first OPP is sufficient */
np = of_get_next_available_child(opp_np, NULL);
struct device_node *np __free(device_node) =
of_get_next_available_child(opp_np, NULL);
if (!np) {
dev_err(dev, "OPP table empty\n");
return -EINVAL;
@ -1269,11 +1273,12 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
struct cpumask *cpumask)
{
struct device_node *np __free(device_node);
int cpu;
/* Get OPP descriptor node */
np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
struct device_node *np __free(device_node) =
dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!np) {
dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__);
return -ENOENT;
@ -1286,13 +1291,12 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
return 0;
for_each_possible_cpu(cpu) {
struct device_node *cpu_np __free(device_node) = NULL;
struct device_node *tmp_np __free(device_node) = NULL;
if (cpu == cpu_dev->id)
continue;
cpu_np = of_cpu_device_node_get(cpu);
struct device_node *cpu_np __free(device_node) =
of_cpu_device_node_get(cpu);
if (!cpu_np) {
dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
__func__, cpu);
@ -1300,7 +1304,9 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
}
/* Get OPP descriptor node */
tmp_np = _opp_of_get_opp_desc_node(cpu_np, 0);
struct device_node *tmp_np __free(device_node) =
_opp_of_get_opp_desc_node(cpu_np, 0);
if (!tmp_np) {
pr_err("%pOF: Couldn't find opp node\n", cpu_np);
return -ENOENT;
@ -1328,16 +1334,17 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
*/
int of_get_required_opp_performance_state(struct device_node *np, int index)
{
struct device_node *required_np __free(device_node);
struct opp_table *opp_table __free(put_opp_table) = NULL;
struct dev_pm_opp *opp __free(put_opp) = NULL;
int pstate = -EINVAL;
required_np = of_parse_required_opp(np, index);
struct device_node *required_np __free(device_node) =
of_parse_required_opp(np, index);
if (!required_np)
return -ENODEV;
opp_table = _find_table_of_opp_np(required_np);
struct opp_table *opp_table __free(put_opp_table) =
_find_table_of_opp_np(required_np);
if (IS_ERR(opp_table)) {
pr_err("%s: Failed to find required OPP table %pOF: %ld\n",
__func__, np, PTR_ERR(opp_table));
@ -1350,7 +1357,9 @@ int of_get_required_opp_performance_state(struct device_node *np, int index)
return -EINVAL;
}
opp = _find_opp_of_np(opp_table, required_np);
struct dev_pm_opp *opp __free(put_opp) =
_find_opp_of_np(opp_table, required_np);
if (opp) {
if (opp->level == OPP_LEVEL_UNSET) {
pr_err("%s: OPP levels aren't available for %pOF\n",
@ -1376,14 +1385,17 @@ EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
*/
bool dev_pm_opp_of_has_required_opp(struct device *dev)
{
struct device_node *np __free(device_node) = NULL, *opp_np __free(device_node);
int count;
opp_np = _opp_of_get_opp_desc_node(dev->of_node, 0);
struct device_node *opp_np __free(device_node) =
_opp_of_get_opp_desc_node(dev->of_node, 0);
if (!opp_np)
return false;
np = of_get_next_available_child(opp_np, NULL);
struct device_node *np __free(device_node) =
of_get_next_available_child(opp_np, NULL);
if (!np) {
dev_warn(dev, "Empty OPP table\n");
return false;
@ -1425,12 +1437,14 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
static int __maybe_unused
_get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz)
{
struct dev_pm_opp *opp __free(put_opp);
unsigned long opp_freq, opp_power;
/* Find the right frequency and related OPP */
opp_freq = *kHz * 1000;
opp = dev_pm_opp_find_freq_ceil(dev, &opp_freq);
struct dev_pm_opp *opp __free(put_opp) =
dev_pm_opp_find_freq_ceil(dev, &opp_freq);
if (IS_ERR(opp))
return -EINVAL;
@ -1465,14 +1479,13 @@ _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz)
int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
unsigned long *kHz)
{
struct dev_pm_opp *opp __free(put_opp) = NULL;
struct device_node *np __free(device_node);
unsigned long mV, Hz;
u32 cap;
u64 tmp;
int ret;
np = of_node_get(dev->of_node);
struct device_node *np __free(device_node) = of_node_get(dev->of_node);
if (!np)
return -EINVAL;
@ -1481,7 +1494,10 @@ int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW,
return -EINVAL;
Hz = *kHz * 1000;
opp = dev_pm_opp_find_freq_ceil(dev, &Hz);
struct dev_pm_opp *opp __free(put_opp) =
dev_pm_opp_find_freq_ceil(dev, &Hz);
if (IS_ERR(opp))
return -EINVAL;
@ -1502,11 +1518,12 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_calc_power);
static bool _of_has_opp_microwatt_property(struct device *dev)
{
struct dev_pm_opp *opp __free(put_opp);
unsigned long freq = 0;
/* Check if at least one OPP has needed property */
opp = dev_pm_opp_find_freq_ceil(dev, &freq);
struct dev_pm_opp *opp __free(put_opp) =
dev_pm_opp_find_freq_ceil(dev, &freq);
if (IS_ERR(opp))
return false;
@ -1526,12 +1543,16 @@ static bool _of_has_opp_microwatt_property(struct device *dev)
*/
int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
{
struct device_node *np __free(device_node) = NULL;
struct em_data_callback em_cb;
int ret, nr_opp;
u32 cap;
if (IS_ERR_OR_NULL(dev)) {
if (IS_ERR_OR_NULL(dev))
return -EINVAL;
struct device_node *np __free(device_node) = of_node_get(dev->of_node);
if (!np) {
ret = -EINVAL;
goto failed;
}
@ -1548,12 +1569,6 @@ int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
goto register_em;
}
np = of_node_get(dev->of_node);
if (!np) {
ret = -EINVAL;
goto failed;
}
/*
* Register an EM only if the 'dynamic-power-coefficient' property is
* set in devicetree. It is assumed the voltage values are known if that

View File

@ -54,6 +54,8 @@ struct em_perf_table {
/**
* struct em_perf_domain - Performance domain
* @em_table: Pointer to the runtime modifiable em_perf_table
* @node: node in em_pd_list (in energy_model.c)
* @id: A unique ID number for each performance domain
* @nr_perf_states: Number of performance states
* @min_perf_state: Minimum allowed Performance State index
* @max_perf_state: Maximum allowed Performance State index
@ -71,6 +73,8 @@ struct em_perf_table {
*/
struct em_perf_domain {
struct em_perf_table __rcu *em_table;
struct list_head node;
int id;
int nr_perf_states;
int min_perf_state;
int max_perf_state;

View File

@ -0,0 +1,62 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/em.yaml */
/* YNL-GEN uapi header */
#ifndef _UAPI_LINUX_ENERGY_MODEL_H
#define _UAPI_LINUX_ENERGY_MODEL_H
#define EM_FAMILY_NAME "em"
#define EM_FAMILY_VERSION 1
enum {
EM_A_PDS_PD = 1,
__EM_A_PDS_MAX,
EM_A_PDS_MAX = (__EM_A_PDS_MAX - 1)
};
enum {
EM_A_PD_PAD = 1,
EM_A_PD_PD_ID,
EM_A_PD_FLAGS,
EM_A_PD_CPUS,
__EM_A_PD_MAX,
EM_A_PD_MAX = (__EM_A_PD_MAX - 1)
};
enum {
EM_A_PD_TABLE_PD_ID = 1,
EM_A_PD_TABLE_PS,
__EM_A_PD_TABLE_MAX,
EM_A_PD_TABLE_MAX = (__EM_A_PD_TABLE_MAX - 1)
};
enum {
EM_A_PS_PAD = 1,
EM_A_PS_PERFORMANCE,
EM_A_PS_FREQUENCY,
EM_A_PS_POWER,
EM_A_PS_COST,
EM_A_PS_FLAGS,
__EM_A_PS_MAX,
EM_A_PS_MAX = (__EM_A_PS_MAX - 1)
};
enum {
EM_CMD_GET_PDS = 1,
EM_CMD_GET_PD_TABLE,
EM_CMD_PD_CREATED,
EM_CMD_PD_UPDATED,
EM_CMD_PD_DELETED,
__EM_CMD_MAX,
EM_CMD_MAX = (__EM_CMD_MAX - 1)
};
#define EM_MCGRP_EVENT "event"
#endif /* _UAPI_LINUX_ENERGY_MODEL_H */

View File

@ -21,4 +21,6 @@ obj-$(CONFIG_PM_WAKELOCKS) += wakelock.o
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
obj-$(CONFIG_ENERGY_MODEL) += energy_model.o
obj-$(CONFIG_ENERGY_MODEL) += em.o
em-y := energy_model.o
em-$(CONFIG_NET) += em_netlink_autogen.o em_netlink.o

308
kernel/power/em_netlink.c Normal file
View File

@ -0,0 +1,308 @@
// SPDX-License-Identifier: GPL-2.0
/*
*
* Generic netlink for energy model.
*
* Copyright (c) 2025 Valve Corporation.
* Author: Changwoo Min <changwoo@igalia.com>
*/
#define pr_fmt(fmt) "energy_model: " fmt
#include <linux/energy_model.h>
#include <net/sock.h>
#include <net/genetlink.h>
#include <uapi/linux/energy_model.h>
#include "em_netlink.h"
#include "em_netlink_autogen.h"
#define EM_A_PD_CPUS_LEN 256
/*************************** Command encoding ********************************/
static int __em_nl_get_pd_size(struct em_perf_domain *pd, void *data)
{
char cpus_buf[EM_A_PD_CPUS_LEN];
int *tot_msg_sz = data;
int msg_sz, cpus_sz;
cpus_sz = snprintf(cpus_buf, sizeof(cpus_buf), "%*pb",
cpumask_pr_args(to_cpumask(pd->cpus)));
msg_sz = nla_total_size(0) + /* EM_A_PDS_PD */
nla_total_size(sizeof(u32)) + /* EM_A_PD_PD_ID */
nla_total_size_64bit(sizeof(u64)) + /* EM_A_PD_FLAGS */
nla_total_size(cpus_sz); /* EM_A_PD_CPUS */
*tot_msg_sz += nlmsg_total_size(genlmsg_msg_size(msg_sz));
return 0;
}
static int __em_nl_get_pd(struct em_perf_domain *pd, void *data)
{
char cpus_buf[EM_A_PD_CPUS_LEN];
struct sk_buff *msg = data;
struct nlattr *entry;
entry = nla_nest_start(msg, EM_A_PDS_PD);
if (!entry)
goto out_cancel_nest;
if (nla_put_u32(msg, EM_A_PD_PD_ID, pd->id))
goto out_cancel_nest;
if (nla_put_u64_64bit(msg, EM_A_PD_FLAGS, pd->flags, EM_A_PD_PAD))
goto out_cancel_nest;
snprintf(cpus_buf, sizeof(cpus_buf), "%*pb",
cpumask_pr_args(to_cpumask(pd->cpus)));
if (nla_put_string(msg, EM_A_PD_CPUS, cpus_buf))
goto out_cancel_nest;
nla_nest_end(msg, entry);
return 0;
out_cancel_nest:
nla_nest_cancel(msg, entry);
return -EMSGSIZE;
}
int em_nl_get_pds_doit(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
void *hdr;
int cmd = info->genlhdr->cmd;
int ret = -EMSGSIZE, msg_sz = 0;
for_each_em_perf_domain(__em_nl_get_pd_size, &msg_sz);
msg = genlmsg_new(msg_sz, GFP_KERNEL);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &em_nl_family, 0, cmd);
if (!hdr)
goto out_free_msg;
ret = for_each_em_perf_domain(__em_nl_get_pd, msg);
if (ret)
goto out_cancel_msg;
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
out_cancel_msg:
genlmsg_cancel(msg, hdr);
out_free_msg:
nlmsg_free(msg);
return ret;
}
static struct em_perf_domain *__em_nl_get_pd_table_id(struct nlattr **attrs)
{
struct em_perf_domain *pd;
int id;
if (!attrs[EM_A_PD_TABLE_PD_ID])
return NULL;
id = nla_get_u32(attrs[EM_A_PD_TABLE_PD_ID]);
pd = em_perf_domain_get_by_id(id);
return pd;
}
static int __em_nl_get_pd_table_size(const struct em_perf_domain *pd)
{
int id_sz, ps_sz;
id_sz = nla_total_size(sizeof(u32)); /* EM_A_PD_TABLE_PD_ID */
ps_sz = nla_total_size(0) + /* EM_A_PD_TABLE_PS */
nla_total_size_64bit(sizeof(u64)) + /* EM_A_PS_PERFORMANCE */
nla_total_size_64bit(sizeof(u64)) + /* EM_A_PS_FREQUENCY */
nla_total_size_64bit(sizeof(u64)) + /* EM_A_PS_POWER */
nla_total_size_64bit(sizeof(u64)) + /* EM_A_PS_COST */
nla_total_size_64bit(sizeof(u64)); /* EM_A_PS_FLAGS */
ps_sz *= pd->nr_perf_states;
return nlmsg_total_size(genlmsg_msg_size(id_sz + ps_sz));
}
static int __em_nl_get_pd_table(struct sk_buff *msg, const struct em_perf_domain *pd)
{
struct em_perf_state *table, *ps;
struct nlattr *entry;
int i;
if (nla_put_u32(msg, EM_A_PD_TABLE_PD_ID, pd->id))
goto out_err;
rcu_read_lock();
table = em_perf_state_from_pd((struct em_perf_domain *)pd);
for (i = 0; i < pd->nr_perf_states; i++) {
ps = &table[i];
entry = nla_nest_start(msg, EM_A_PD_TABLE_PS);
if (!entry)
goto out_unlock_ps;
if (nla_put_u64_64bit(msg, EM_A_PS_PERFORMANCE,
ps->performance, EM_A_PS_PAD))
goto out_cancel_ps_nest;
if (nla_put_u64_64bit(msg, EM_A_PS_FREQUENCY,
ps->frequency, EM_A_PS_PAD))
goto out_cancel_ps_nest;
if (nla_put_u64_64bit(msg, EM_A_PS_POWER,
ps->power, EM_A_PS_PAD))
goto out_cancel_ps_nest;
if (nla_put_u64_64bit(msg, EM_A_PS_COST,
ps->cost, EM_A_PS_PAD))
goto out_cancel_ps_nest;
if (nla_put_u64_64bit(msg, EM_A_PS_FLAGS,
ps->flags, EM_A_PS_PAD))
goto out_cancel_ps_nest;
nla_nest_end(msg, entry);
}
rcu_read_unlock();
return 0;
out_cancel_ps_nest:
nla_nest_cancel(msg, entry);
out_unlock_ps:
rcu_read_unlock();
out_err:
return -EMSGSIZE;
}
int em_nl_get_pd_table_doit(struct sk_buff *skb, struct genl_info *info)
{
int cmd = info->genlhdr->cmd;
int msg_sz, ret = -EMSGSIZE;
struct em_perf_domain *pd;
struct sk_buff *msg;
void *hdr;
pd = __em_nl_get_pd_table_id(info->attrs);
if (!pd)
return -EINVAL;
msg_sz = __em_nl_get_pd_table_size(pd);
msg = genlmsg_new(msg_sz, GFP_KERNEL);
if (!msg)
return -ENOMEM;
hdr = genlmsg_put_reply(msg, info, &em_nl_family, 0, cmd);
if (!hdr)
goto out_free_msg;
ret = __em_nl_get_pd_table(msg, pd);
if (ret)
goto out_free_msg;
genlmsg_end(msg, hdr);
return genlmsg_reply(msg, info);
out_free_msg:
nlmsg_free(msg);
return ret;
}
/**************************** Event encoding *********************************/
static void __em_notify_pd_table(const struct em_perf_domain *pd, int ntf_type)
{
struct sk_buff *msg;
int msg_sz, ret = -EMSGSIZE;
void *hdr;
if (!genl_has_listeners(&em_nl_family, &init_net, EM_NLGRP_EVENT))
return;
msg_sz = __em_nl_get_pd_table_size(pd);
msg = genlmsg_new(msg_sz, GFP_KERNEL);
if (!msg)
return;
hdr = genlmsg_put(msg, 0, 0, &em_nl_family, 0, ntf_type);
if (!hdr)
goto out_free_msg;
ret = __em_nl_get_pd_table(msg, pd);
if (ret)
goto out_free_msg;
genlmsg_end(msg, hdr);
genlmsg_multicast(&em_nl_family, msg, 0, EM_NLGRP_EVENT, GFP_KERNEL);
return;
out_free_msg:
nlmsg_free(msg);
return;
}
void em_notify_pd_created(const struct em_perf_domain *pd)
{
__em_notify_pd_table(pd, EM_CMD_PD_CREATED);
}
void em_notify_pd_updated(const struct em_perf_domain *pd)
{
__em_notify_pd_table(pd, EM_CMD_PD_UPDATED);
}
static int __em_notify_pd_deleted_size(const struct em_perf_domain *pd)
{
int id_sz = nla_total_size(sizeof(u32)); /* EM_A_PD_TABLE_PD_ID */
return nlmsg_total_size(genlmsg_msg_size(id_sz));
}
void em_notify_pd_deleted(const struct em_perf_domain *pd)
{
struct sk_buff *msg;
void *hdr;
int msg_sz;
if (!genl_has_listeners(&em_nl_family, &init_net, EM_NLGRP_EVENT))
return;
msg_sz = __em_notify_pd_deleted_size(pd);
msg = genlmsg_new(msg_sz, GFP_KERNEL);
if (!msg)
return;
hdr = genlmsg_put(msg, 0, 0, &em_nl_family, 0, EM_CMD_PD_DELETED);
if (!hdr)
goto out_free_msg;
if (nla_put_u32(msg, EM_A_PD_TABLE_PD_ID, pd->id)) {
goto out_free_msg;
}
genlmsg_end(msg, hdr);
genlmsg_multicast(&em_nl_family, msg, 0, EM_NLGRP_EVENT, GFP_KERNEL);
return;
out_free_msg:
nlmsg_free(msg);
return;
}
/**************************** Initialization *********************************/
static int __init em_netlink_init(void)
{
return genl_register_family(&em_nl_family);
}
postcore_initcall(em_netlink_init);

39
kernel/power/em_netlink.h Normal file
View File

@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
*
* Generic netlink for energy model.
*
* Copyright (c) 2025 Valve Corporation.
* Author: Changwoo Min <changwoo@igalia.com>
*/
#ifndef _EM_NETLINK_H
#define _EM_NETLINK_H
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_NET)
int for_each_em_perf_domain(int (*cb)(struct em_perf_domain*, void *),
void *data);
struct em_perf_domain *em_perf_domain_get_by_id(int id);
void em_notify_pd_created(const struct em_perf_domain *pd);
void em_notify_pd_deleted(const struct em_perf_domain *pd);
void em_notify_pd_updated(const struct em_perf_domain *pd);
#else
static inline
int for_each_em_perf_domain(int (*cb)(struct em_perf_domain*, void *),
void *data)
{
return -EINVAL;
}
static inline
struct em_perf_domain *em_perf_domain_get_by_id(int id)
{
return NULL;
}
static inline void em_notify_pd_created(const struct em_perf_domain *pd) {}
static inline void em_notify_pd_deleted(const struct em_perf_domain *pd) {}
static inline void em_notify_pd_updated(const struct em_perf_domain *pd) {}
#endif
#endif /* _EM_NETLINK_H */

View File

@ -0,0 +1,48 @@
// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/em.yaml */
/* YNL-GEN kernel source */
#include <net/netlink.h>
#include <net/genetlink.h>
#include "em_netlink_autogen.h"
#include <uapi/linux/energy_model.h>
/* EM_CMD_GET_PD_TABLE - do */
static const struct nla_policy em_get_pd_table_nl_policy[EM_A_PD_TABLE_PD_ID + 1] = {
[EM_A_PD_TABLE_PD_ID] = { .type = NLA_U32, },
};
/* Ops table for em */
static const struct genl_split_ops em_nl_ops[] = {
{
.cmd = EM_CMD_GET_PDS,
.doit = em_nl_get_pds_doit,
.flags = GENL_CMD_CAP_DO,
},
{
.cmd = EM_CMD_GET_PD_TABLE,
.doit = em_nl_get_pd_table_doit,
.policy = em_get_pd_table_nl_policy,
.maxattr = EM_A_PD_TABLE_PD_ID,
.flags = GENL_CMD_CAP_DO,
},
};
static const struct genl_multicast_group em_nl_mcgrps[] = {
[EM_NLGRP_EVENT] = { "event", },
};
struct genl_family em_nl_family __ro_after_init = {
.name = EM_FAMILY_NAME,
.version = EM_FAMILY_VERSION,
.netnsok = true,
.parallel_ops = true,
.module = THIS_MODULE,
.split_ops = em_nl_ops,
.n_split_ops = ARRAY_SIZE(em_nl_ops),
.mcgrps = em_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(em_nl_mcgrps),
};

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/* Do not edit directly, auto-generated from: */
/* Documentation/netlink/specs/em.yaml */
/* YNL-GEN kernel header */
#ifndef _LINUX_EM_GEN_H
#define _LINUX_EM_GEN_H
#include <net/netlink.h>
#include <net/genetlink.h>
#include <uapi/linux/energy_model.h>
int em_nl_get_pds_doit(struct sk_buff *skb, struct genl_info *info);
int em_nl_get_pd_table_doit(struct sk_buff *skb, struct genl_info *info);
enum {
EM_NLGRP_EVENT,
};
extern struct genl_family em_nl_family;
#endif /* _LINUX_EM_GEN_H */

View File

@ -17,12 +17,24 @@
#include <linux/sched/topology.h>
#include <linux/slab.h>
#include "em_netlink.h"
/*
* Mutex serializing the registrations of performance domains and letting
* callbacks defined by drivers sleep.
*/
static DEFINE_MUTEX(em_pd_mutex);
/*
* Manage performance domains with IDs. One can iterate the performance domains
* through the list and pick one with their associated ID. The mutex serializes
* the list access. When holding em_pd_list_mutex, em_pd_mutex should not be
* taken to avoid potential deadlock.
*/
static DEFINE_IDA(em_pd_ida);
static LIST_HEAD(em_pd_list);
static DEFINE_MUTEX(em_pd_list_mutex);
static void em_cpufreq_update_efficiencies(struct device *dev,
struct em_perf_state *table);
static void em_check_capacity_update(void);
@ -116,6 +128,16 @@ static int em_debug_flags_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(em_debug_flags);
static int em_debug_id_show(struct seq_file *s, void *unused)
{
struct em_perf_domain *pd = s->private;
seq_printf(s, "%d\n", pd->id);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(em_debug_id);
static void em_debug_create_pd(struct device *dev)
{
struct em_dbg_info *em_dbg;
@ -132,6 +154,8 @@ static void em_debug_create_pd(struct device *dev)
debugfs_create_file("flags", 0444, d, dev->em_pd,
&em_debug_flags_fops);
debugfs_create_file("id", 0444, d, dev->em_pd, &em_debug_id_fops);
em_dbg = devm_kcalloc(dev, dev->em_pd->nr_perf_states,
sizeof(*em_dbg), GFP_KERNEL);
if (!em_dbg)
@ -328,6 +352,8 @@ int em_dev_update_perf_domain(struct device *dev,
em_table_free(old_table);
mutex_unlock(&em_pd_mutex);
em_notify_pd_updated(pd);
return 0;
}
EXPORT_SYMBOL_GPL(em_dev_update_perf_domain);
@ -396,7 +422,7 @@ static int em_create_pd(struct device *dev, int nr_states,
struct em_perf_table *em_table;
struct em_perf_domain *pd;
struct device *cpu_dev;
int cpu, ret, num_cpus;
int cpu, ret, num_cpus, id;
if (_is_cpu_device(dev)) {
num_cpus = cpumask_weight(cpus);
@ -420,6 +446,13 @@ static int em_create_pd(struct device *dev, int nr_states,
pd->nr_perf_states = nr_states;
INIT_LIST_HEAD(&pd->node);
id = ida_alloc(&em_pd_ida, GFP_KERNEL);
if (id < 0)
return -ENOMEM;
pd->id = id;
em_table = em_table_alloc(pd);
if (!em_table)
goto free_pd;
@ -444,6 +477,7 @@ static int em_create_pd(struct device *dev, int nr_states,
kfree(em_table);
free_pd:
kfree(pd);
ida_free(&em_pd_ida, id);
return -EINVAL;
}
@ -659,8 +693,16 @@ int em_dev_register_pd_no_update(struct device *dev, unsigned int nr_states,
unlock:
mutex_unlock(&em_pd_mutex);
if (ret)
return ret;
return ret;
mutex_lock(&em_pd_list_mutex);
list_add_tail(&dev->em_pd->node, &em_pd_list);
mutex_unlock(&em_pd_list_mutex);
em_notify_pd_created(dev->em_pd);
return 0;
}
EXPORT_SYMBOL_GPL(em_dev_register_pd_no_update);
@ -678,6 +720,12 @@ void em_dev_unregister_perf_domain(struct device *dev)
if (_is_cpu_device(dev))
return;
mutex_lock(&em_pd_list_mutex);
list_del_init(&dev->em_pd->node);
mutex_unlock(&em_pd_list_mutex);
em_notify_pd_deleted(dev->em_pd);
/*
* The mutex separates all register/unregister requests and protects
* from potential clean-up/setup issues in the debugfs directories.
@ -689,6 +737,8 @@ void em_dev_unregister_perf_domain(struct device *dev)
em_table_free(rcu_dereference_protected(dev->em_pd->em_table,
lockdep_is_held(&em_pd_mutex)));
ida_free(&em_pd_ida, dev->em_pd->id);
kfree(dev->em_pd);
dev->em_pd = NULL;
mutex_unlock(&em_pd_mutex);
@ -958,3 +1008,39 @@ void em_rebuild_sched_domains(void)
*/
schedule_work(&rebuild_sd_work);
}
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_NET)
int for_each_em_perf_domain(int (*cb)(struct em_perf_domain*, void *),
void *data)
{
struct em_perf_domain *pd;
lockdep_assert_not_held(&em_pd_mutex);
guard(mutex)(&em_pd_list_mutex);
list_for_each_entry(pd, &em_pd_list, node) {
int ret;
ret = cb(pd, data);
if (ret)
return ret;
}
return 0;
}
struct em_perf_domain *em_perf_domain_get_by_id(int id)
{
struct em_perf_domain *pd;
lockdep_assert_not_held(&em_pd_mutex);
guard(mutex)(&em_pd_list_mutex);
list_for_each_entry(pd, &em_pd_list, node) {
if (pd->id == id)
return pd;
}
return NULL;
}
#endif

View File

@ -87,7 +87,7 @@ fn drop(&mut self) {
use macros::vtable;
/// Creates a null-terminated slice of pointers to [`Cstring`]s.
/// Creates a null-terminated slice of pointers to [`CString`]s.
fn to_c_str_array(names: &[CString]) -> Result<KVec<*const u8>> {
// Allocated a null-terminated vector of pointers.
let mut list = KVec::with_capacity(names.len() + 1, GFP_KERNEL)?;
@ -443,66 +443,70 @@ pub fn set_supported_hw(mut self, hw: KVec<u32>) -> Result<Self> {
///
/// The returned [`ConfigToken`] will remove the configuration when dropped.
pub fn set(self, dev: &Device) -> Result<ConfigToken> {
let (_clk_list, clk_names) = match &self.clk_names {
Some(x) => {
let list = to_c_str_array(x)?;
let ptr = list.as_ptr();
(Some(list), ptr)
}
None => (None, ptr::null()),
let clk_names = self.clk_names.as_deref().map(to_c_str_array).transpose()?;
let regulator_names = self
.regulator_names
.as_deref()
.map(to_c_str_array)
.transpose()?;
let set_config = || {
let clk_names = clk_names.as_ref().map_or(ptr::null(), |c| c.as_ptr());
let regulator_names = regulator_names.as_ref().map_or(ptr::null(), |c| c.as_ptr());
let prop_name = self
.prop_name
.as_ref()
.map_or(ptr::null(), |p| p.as_char_ptr());
let (supported_hw, supported_hw_count) = self
.supported_hw
.as_ref()
.map_or((ptr::null(), 0), |hw| (hw.as_ptr(), hw.len() as u32));
let (required_dev, required_dev_index) = self
.required_dev
.as_ref()
.map_or((ptr::null_mut(), 0), |(dev, idx)| (dev.as_raw(), *idx));
let mut config = bindings::dev_pm_opp_config {
clk_names,
config_clks: if T::HAS_CONFIG_CLKS {
Some(Self::config_clks)
} else {
None
},
prop_name,
regulator_names,
config_regulators: if T::HAS_CONFIG_REGULATORS {
Some(Self::config_regulators)
} else {
None
},
supported_hw,
supported_hw_count,
required_dev,
required_dev_index,
};
// SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
// requirements. The OPP core guarantees not to access fields of [`Config`] after this
// call and so we don't need to save a copy of them for future use.
let ret = unsafe { bindings::dev_pm_opp_set_config(dev.as_raw(), &mut config) };
to_result(ret).map(|()| ConfigToken(ret))
};
let (_regulator_list, regulator_names) = match &self.regulator_names {
Some(x) => {
let list = to_c_str_array(x)?;
let ptr = list.as_ptr();
(Some(list), ptr)
}
None => (None, ptr::null()),
};
// Ensure the closure does not accidentally drop owned data; if violated, the compiler
// produces E0525 with e.g.:
//
// ```
// closure is `FnOnce` because it moves the variable `clk_names` out of its environment
// ```
let _: &dyn Fn() -> _ = &set_config;
let prop_name = self
.prop_name
.as_ref()
.map_or(ptr::null(), |p| p.as_char_ptr());
let (supported_hw, supported_hw_count) = self
.supported_hw
.as_ref()
.map_or((ptr::null(), 0), |hw| (hw.as_ptr(), hw.len() as u32));
let (required_dev, required_dev_index) = self
.required_dev
.as_ref()
.map_or((ptr::null_mut(), 0), |(dev, idx)| (dev.as_raw(), *idx));
let mut config = bindings::dev_pm_opp_config {
clk_names,
config_clks: if T::HAS_CONFIG_CLKS {
Some(Self::config_clks)
} else {
None
},
prop_name,
regulator_names,
config_regulators: if T::HAS_CONFIG_REGULATORS {
Some(Self::config_regulators)
} else {
None
},
supported_hw,
supported_hw_count,
required_dev,
required_dev_index,
};
// SAFETY: The requirements are satisfied by the existence of [`Device`] and its safety
// requirements. The OPP core guarantees not to access fields of [`Config`] after this call
// and so we don't need to save a copy of them for future use.
let ret = unsafe { bindings::dev_pm_opp_set_config(dev.as_raw(), &mut config) };
to_result(ret).map(|()| ConfigToken(ret))
set_config()
}
/// Config's clk callback.