Driver core changes for 6.18-rc1

- Auxiliary:
    - Drop call to dev_pm_domain_detach() in auxiliary_bus_probe()
    - Optimize logic of auxiliary_match_id()
 
 - Rust:
   - Auxiliary:
     - Use primitive C types from prelude
 
   - DebugFs:
     - Add debugfs support for simple read/write files and custom callbacks
       through a File-type-based and directory-scope-based API
     - Sample driver code for the File-type-based API
     - Sample module code for the directory-scope-based API
 
   - I/O:
     - Add io::poll module and implement Rust specific read_poll_timeout()
       helper
 
   - IRQ:
     - Implement support for threaded and non-threaded device IRQs based on
       (&Device<Bound>, IRQ number) tuples (IrqRequest)
     - Provide &Device<Bound> cookie in IRQ handlers
 
   - PCI:
     - Support IRQ requests from IRQ vectors for a specific pci::Device<Bound>
     - Implement accessors for subsystem IDs, revision, devid and resource start
     - Provide dedicated pci::Vendor and pci::Class types for vendor and class
       ID numbers
     - Implement Display to print actual vendor and class names; Debug to print
       the raw ID numbers
     - Add pci::DeviceId::from_class_and_vendor() helper
     - Use primitive C types from prelude
     - Various minor inline and (safety) comment improvements
 
   - Platform:
     - Support IRQ requests from IRQ vectors for a specific
       platform::Device<Bound>
 
   - Nova:
     - Use pci::DeviceId::from_class_and_vendor() to avoid probing
       non-display/compute PCI functions
 
   - Misc:
     - Add helper for cpu_relax()
     - Update ARef import from sync::aref
 
 - sysfs:
   - Remove bin_attrs_new field from struct attribute_group
   - Remove read_new() and write_new() from struct bin_attribute
 
 - Misc:
   - Document potential race condition in get_dev_from_fwnode()
   - Constify node_group argument in software node registration functions
   - Fix order of kernel-doc parameters in various functions
   - Set power.no_pm flag for faux devices
   - Set power.no_callbacks flag along with the power.no_pm flag
   - Constify the pmu_bus bus type
   - Minor spelling fixes
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQS2q/xV6QjXAdC7k+1FlHeO1qrKLgUCaNmQGwAKCRBFlHeO1qrK
 LmPzAP9msIvK8eFT4CEDK4buX1gd+VBOdy8mAjAeJ2F80FIo8wEAtOdddNaaqWVF
 m4ac2/a2bSRKMGPX+wIM7d2HGyC7sgY=
 =XbU+
 -----END PGP SIGNATURE-----

Merge tag 'driver-core-6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core

Pull driver core updates from Danilo Krummrich:
 "Auxiliary:
   - Drop call to dev_pm_domain_detach() in auxiliary_bus_probe()
   - Optimize logic of auxiliary_match_id()

  Rust:
   - Auxiliary:
      - Use primitive C types from prelude

   - DebugFs:
      - Add debugfs support for simple read/write files and custom
        callbacks through a File-type-based and directory-scope-based
        API
      - Sample driver code for the File-type-based API
      - Sample module code for the directory-scope-based API

   - I/O:
      - Add io::poll module and implement Rust specific
        read_poll_timeout() helper

   - IRQ:
      - Implement support for threaded and non-threaded device IRQs
        based on (&Device<Bound>, IRQ number) tuples (IrqRequest)
      - Provide &Device<Bound> cookie in IRQ handlers

   - PCI:
      - Support IRQ requests from IRQ vectors for a specific
        pci::Device<Bound>
      - Implement accessors for subsystem IDs, revision, devid and
        resource start
      - Provide dedicated pci::Vendor and pci::Class types for vendor
        and class ID numbers
      - Implement Display to print actual vendor and class names; Debug
        to print the raw ID numbers
      - Add pci::DeviceId::from_class_and_vendor() helper
      - Use primitive C types from prelude
      - Various minor inline and (safety) comment improvements

   - Platform:
      - Support IRQ requests from IRQ vectors for a specific
        platform::Device<Bound>

   - Nova:
      - Use pci::DeviceId::from_class_and_vendor() to avoid probing
        non-display/compute PCI functions

   - Misc:
      - Add helper for cpu_relax()
      - Update ARef import from sync::aref

  sysfs:
   - Remove bin_attrs_new field from struct attribute_group
   - Remove read_new() and write_new() from struct bin_attribute

  Misc:
   - Document potential race condition in get_dev_from_fwnode()
   - Constify node_group argument in software node registration
     functions
   - Fix order of kernel-doc parameters in various functions
   - Set power.no_pm flag for faux devices
   - Set power.no_callbacks flag along with the power.no_pm flag
   - Constify the pmu_bus bus type
   - Minor spelling fixes"

* tag 'driver-core-6.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core: (43 commits)
  rust: pci: display symbolic PCI vendor names
  rust: pci: display symbolic PCI class names
  rust: pci: fix incorrect platform reference in PCI driver probe doc comment
  rust: pci: fix incorrect platform reference in PCI driver unbind doc comment
  perf: make pmu_bus const
  samples: rust: Add scoped debugfs sample driver
  rust: debugfs: Add support for scoped directories
  samples: rust: Add debugfs sample driver
  rust: debugfs: Add support for callback-based files
  rust: debugfs: Add support for writable files
  rust: debugfs: Add support for read-only files
  rust: debugfs: Add initial support for directories
  driver core: auxiliary bus: Optimize logic of auxiliary_match_id()
  driver core: auxiliary bus: Drop dev_pm_domain_detach() call
  driver core: Fix order of the kernel-doc parameters
  driver core: get_dev_from_fwnode(): document potential race
  drivers: base: fix "publically"->"publicly"
  driver core/PM: Set power.no_callbacks along with power.no_pm
  driver core: faux: Set power.no_pm for faux devices
  rust: pci: inline several tiny functions
  ...
This commit is contained in:
Linus Torvalds 2025-10-01 08:39:23 -07:00
commit eb3289fc47
43 changed files with 3392 additions and 99 deletions

View File

@ -7021,6 +7021,21 @@ F: drivers/devfreq/event/
F: include/dt-bindings/pmu/exynos_ppmu.h
F: include/linux/devfreq-event.h
DEVICE I/O & IRQ [RUST]
M: Danilo Krummrich <dakr@kernel.org>
M: Alice Ryhl <aliceryhl@google.com>
M: Daniel Almeida <daniel.almeida@collabora.com>
L: rust-for-linux@vger.kernel.org
S: Supported
W: https://rust-for-linux.com
B: https://github.com/Rust-for-Linux/linux/issues
C: https://rust-for-linux.zulipchat.com
T: git git://git.kernel.org/pub/scm/linux/kernel/git/driver-core/driver-core.git
F: rust/kernel/io.rs
F: rust/kernel/io/
F: rust/kernel/irq.rs
F: rust/kernel/irq/
DEVICE RESOURCE MANAGEMENT HELPERS
M: Hans de Goede <hansg@kernel.org>
R: Matti Vaittinen <mazziesaccount@gmail.com>
@ -7472,6 +7487,8 @@ F: include/linux/kobj*
F: include/linux/property.h
F: include/linux/sysfs.h
F: lib/kobj*
F: rust/kernel/debugfs.rs
F: rust/kernel/debugfs/
F: rust/kernel/device.rs
F: rust/kernel/device/
F: rust/kernel/device_id.rs
@ -7479,6 +7496,8 @@ F: rust/kernel/devres.rs
F: rust/kernel/driver.rs
F: rust/kernel/faux.rs
F: rust/kernel/platform.rs
F: samples/rust/rust_debugfs.rs
F: samples/rust/rust_debugfs_scoped.rs
F: samples/rust/rust_driver_platform.rs
F: samples/rust/rust_driver_faux.rs
@ -19574,6 +19593,7 @@ C: irc://irc.oftc.net/linux-pci
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
F: rust/helpers/pci.c
F: rust/kernel/pci.rs
F: rust/kernel/pci/
F: samples/rust/rust_driver_pci.rs
PCIE BANDWIDTH CONTROLLER

View File

@ -171,17 +171,18 @@
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
{
const char *auxdev_name = dev_name(&auxdev->dev);
const char *p = strrchr(auxdev_name, '.');
int match_size;
if (!p)
return NULL;
match_size = p - auxdev_name;
for (; id->name[0]; id++) {
const char *p = strrchr(dev_name(&auxdev->dev), '.');
int match_size;
if (!p)
continue;
match_size = p - dev_name(&auxdev->dev);
/* use dev_name(&auxdev->dev) prefix before last '.' char to match to */
if (strlen(id->name) == match_size &&
!strncmp(dev_name(&auxdev->dev), id->name, match_size))
!strncmp(auxdev_name, id->name, match_size))
return id;
}
return NULL;
@ -217,17 +218,14 @@ static int auxiliary_bus_probe(struct device *dev)
struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
int ret;
ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON);
ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON |
PD_FLAG_DETACH_POWER_OFF);
if (ret) {
dev_warn(dev, "Failed to attach to PM Domain : %d\n", ret);
return ret;
}
ret = auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
if (ret)
dev_pm_domain_detach(dev, true);
return ret;
return auxdrv->probe(auxdev, auxiliary_match_id(auxdrv->id_table, auxdev));
}
static void auxiliary_bus_remove(struct device *dev)
@ -237,7 +235,6 @@ static void auxiliary_bus_remove(struct device *dev)
if (auxdrv->remove)
auxdrv->remove(auxdev);
dev_pm_domain_detach(dev, true);
}
static void auxiliary_bus_shutdown(struct device *dev)

View File

@ -3994,8 +3994,8 @@ const char *device_get_devnode(const struct device *dev,
/**
* device_for_each_child - device child iterator.
* @parent: parent struct device.
* @fn: function to be called for each device.
* @data: data for the callback.
* @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@ -4024,8 +4024,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child);
/**
* device_for_each_child_reverse - device child iterator in reversed order.
* @parent: parent struct device.
* @fn: function to be called for each device.
* @data: data for the callback.
* @fn: function to be called for each device.
*
* Iterate over @parent's child devices, and call @fn for each,
* passing it @data.
@ -4055,8 +4055,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse);
* device_for_each_child_reverse_from - device child iterator in reversed order.
* @parent: parent struct device.
* @from: optional starting point in child list
* @fn: function to be called for each device.
* @data: data for the callback.
* @fn: function to be called for each device.
*
* Iterate over @parent's child devices, starting at @from, and call @fn
* for each, passing it @data. This helper is identical to
@ -4089,8 +4089,8 @@ EXPORT_SYMBOL_GPL(device_for_each_child_reverse_from);
/**
* device_find_child - device iterator for locating a particular device.
* @parent: parent struct device
* @match: Callback function to check device
* @data: Data to pass to match function
* @match: Callback function to check device
*
* This is similar to the device_for_each_child() function above, but it
* returns a reference to a device that is 'found' for later use, as
@ -5278,6 +5278,25 @@ void device_set_node(struct device *dev, struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(device_set_node);
/**
* get_dev_from_fwnode - Obtain a reference count of the struct device the
* struct fwnode_handle is associated with.
* @fwnode: The pointer to the struct fwnode_handle to obtain the struct device
* reference count of.
*
* This function obtains a reference count of the device the device pointer
* embedded in the struct fwnode_handle points to.
*
* Note that the struct device pointer embedded in struct fwnode_handle does
* *not* have a reference count of the struct device itself.
*
* Hence, it is a UAF (and thus a bug) to call this function if the caller can't
* guarantee that the last reference count of the corresponding struct device is
* not dropped concurrently.
*
* This is possible since struct fwnode_handle has its own reference count and
* hence can out-live the struct device it is associated with.
*/
struct device *get_dev_from_fwnode(struct fwnode_handle *fwnode)
{
return get_device((fwnode)->dev);

View File

@ -325,7 +325,7 @@ static void cpu_device_release(struct device *dev)
* This is an empty function to prevent the driver core from spitting a
* warning at us. Yes, I know this is directly opposite of what the
* documentation for the driver core and kobjects say, and the author
* of this code has already been publically ridiculed for doing
* of this code has already been publicly ridiculed for doing
* something as foolish as this. However, at this point in time, it is
* the only way to handle the issue of statically allocated cpu
* devices. The different architectures will have their cpu device

View File

@ -155,6 +155,7 @@ struct faux_device *faux_device_create_with_groups(const char *name,
dev->parent = &faux_bus_root;
dev->bus = &faux_bus_type;
dev_set_name(dev, "%s", name);
device_set_pm_not_required(dev);
ret = device_add(dev);
if (ret) {

View File

@ -844,7 +844,7 @@ swnode_register(const struct software_node *node, struct swnode *parent,
* of this function or by ordering the array such that parent comes before
* child.
*/
int software_node_register_node_group(const struct software_node **node_group)
int software_node_register_node_group(const struct software_node * const *node_group)
{
unsigned int i;
int ret;
@ -877,8 +877,7 @@ EXPORT_SYMBOL_GPL(software_node_register_node_group);
* remove the nodes individually, in the correct order (child before
* parent).
*/
void software_node_unregister_node_group(
const struct software_node **node_group)
void software_node_unregister_node_group(const struct software_node * const *node_group)
{
unsigned int i = 0;

View File

@ -1,6 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
use kernel::{auxiliary, bindings, c_str, device::Core, pci, prelude::*, sizes::SZ_16M, sync::Arc};
use kernel::{
auxiliary, c_str,
device::Core,
pci,
pci::{Class, ClassMask, Vendor},
prelude::*,
sizes::SZ_16M,
sync::Arc,
};
use crate::gpu::Gpu;
@ -18,10 +26,25 @@ pub(crate) struct NovaCore {
PCI_TABLE,
MODULE_PCI_TABLE,
<NovaCore as pci::Driver>::IdInfo,
[(
pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_NVIDIA, bindings::PCI_ANY_ID as u32),
()
)]
[
// Modern NVIDIA GPUs will show up as either VGA or 3D controllers.
(
pci::DeviceId::from_class_and_vendor(
Class::DISPLAY_VGA,
ClassMask::ClassSubclass,
Vendor::NVIDIA
),
()
),
(
pci::DeviceId::from_class_and_vendor(
Class::DISPLAY_3D,
ClassMask::ClassSubclass,
Vendor::NVIDIA
),
()
),
]
);
impl pci::Driver for NovaCore {

View File

@ -97,12 +97,9 @@ static ssize_t sysfs_kf_bin_read(struct kernfs_open_file *of, char *buf,
count = size - pos;
}
if (!battr->read && !battr->read_new)
if (!battr->read)
return -EIO;
if (battr->read_new)
return battr->read_new(of->file, kobj, battr, buf, pos, count);
return battr->read(of->file, kobj, battr, buf, pos, count);
}
@ -161,12 +158,9 @@ static ssize_t sysfs_kf_bin_write(struct kernfs_open_file *of, char *buf,
if (!count)
return 0;
if (!battr->write && !battr->write_new)
if (!battr->write)
return -EIO;
if (battr->write_new)
return battr->write_new(of->file, kobj, battr, buf, pos, count);
return battr->write(of->file, kobj, battr, buf, pos, count);
}
@ -335,19 +329,13 @@ int sysfs_add_bin_file_mode_ns(struct kernfs_node *parent,
const struct kernfs_ops *ops;
struct kernfs_node *kn;
if (battr->read && battr->read_new)
return -EINVAL;
if (battr->write && battr->write_new)
return -EINVAL;
if (battr->mmap)
ops = &sysfs_bin_kfops_mmap;
else if ((battr->read || battr->read_new) && (battr->write || battr->write_new))
else if (battr->read && battr->write)
ops = &sysfs_bin_kfops_rw;
else if (battr->read || battr->read_new)
else if (battr->read)
ops = &sysfs_bin_kfops_ro;
else if (battr->write || battr->write_new)
else if (battr->write)
ops = &sysfs_bin_kfops_wo;
else
ops = &sysfs_file_kfops_empty;

View File

@ -851,6 +851,9 @@ static inline bool device_pm_not_required(struct device *dev)
static inline void device_set_pm_not_required(struct device *dev)
{
dev->power.no_pm = true;
#ifdef CONFIG_PM
dev->power.no_callbacks = true;
#endif
}
static inline void dev_pm_syscore_device(struct device *dev, bool val)

View File

@ -574,8 +574,8 @@ const struct software_node *
software_node_find_by_name(const struct software_node *parent,
const char *name);
int software_node_register_node_group(const struct software_node **node_group);
void software_node_unregister_node_group(const struct software_node **node_group);
int software_node_register_node_group(const struct software_node * const *node_group);
void software_node_unregister_node_group(const struct software_node * const *node_group);
int software_node_register(const struct software_node *node);
void software_node_unregister(const struct software_node *node);

View File

@ -106,10 +106,7 @@ struct attribute_group {
const struct bin_attribute *,
int);
struct attribute **attrs;
union {
const struct bin_attribute *const *bin_attrs;
const struct bin_attribute *const *bin_attrs_new;
};
const struct bin_attribute *const *bin_attrs;
};
#define SYSFS_PREALLOC 010000
@ -293,7 +290,7 @@ __ATTRIBUTE_GROUPS(_name)
#define BIN_ATTRIBUTE_GROUPS(_name) \
static const struct attribute_group _name##_group = { \
.bin_attrs_new = _name##_attrs, \
.bin_attrs = _name##_attrs, \
}; \
__ATTRIBUTE_GROUPS(_name)
@ -308,12 +305,8 @@ struct bin_attribute {
struct address_space *(*f_mapping)(void);
ssize_t (*read)(struct file *, struct kobject *, const struct bin_attribute *,
char *, loff_t, size_t);
ssize_t (*read_new)(struct file *, struct kobject *, const struct bin_attribute *,
char *, loff_t, size_t);
ssize_t (*write)(struct file *, struct kobject *, const struct bin_attribute *,
char *, loff_t, size_t);
ssize_t (*write_new)(struct file *, struct kobject *,
const struct bin_attribute *, char *, loff_t, size_t);
loff_t (*llseek)(struct file *, struct kobject *, const struct bin_attribute *,
loff_t, int);
int (*mmap)(struct file *, struct kobject *, const struct bin_attribute *attr,

View File

@ -12234,7 +12234,7 @@ static const struct attribute_group *pmu_dev_groups[] = {
};
static int pmu_bus_running;
static struct bus_type pmu_bus = {
static const struct bus_type pmu_bus = {
.name = "event_source",
.dev_groups = pmu_dev_groups,
};

View File

@ -46,12 +46,14 @@
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/cred.h>
#include <linux/debugfs.h>
#include <linux/device/faux.h>
#include <linux/dma-mapping.h>
#include <linux/errname.h>
#include <linux/ethtool.h>
#include <linux/file.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>

View File

@ -24,6 +24,7 @@
#include "dma.c"
#include "drm.c"
#include "err.c"
#include "irq.c"
#include "fs.c"
#include "io.c"
#include "jump_label.c"
@ -36,6 +37,7 @@
#include "pid_namespace.c"
#include "platform.c"
#include "poll.c"
#include "processor.c"
#include "property.c"
#include "rbtree.c"
#include "rcu.c"

9
rust/helpers/irq.c Normal file
View File

@ -0,0 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/interrupt.h>
int rust_helper_request_irq(unsigned int irq, irq_handler_t handler,
unsigned long flags, const char *name, void *dev)
{
return request_irq(irq, handler, flags, name, dev);
}

View File

@ -2,6 +2,16 @@
#include <linux/pci.h>
u16 rust_helper_pci_dev_id(struct pci_dev *dev)
{
return PCI_DEVID(dev->bus->number, dev->devfn);
}
resource_size_t rust_helper_pci_resource_start(struct pci_dev *pdev, int bar)
{
return pci_resource_start(pdev, bar);
}
resource_size_t rust_helper_pci_resource_len(struct pci_dev *pdev, int bar)
{
return pci_resource_len(pdev, bar);
@ -11,3 +21,11 @@ bool rust_helper_dev_is_pci(const struct device *dev)
{
return dev_is_pci(dev);
}
#ifndef CONFIG_PCI_MSI
int rust_helper_pci_irq_vector(struct pci_dev *pdev, unsigned int nvec)
{
return pci_irq_vector(pdev, nvec);
}
#endif

8
rust/helpers/processor.c Normal file
View File

@ -0,0 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/processor.h>
void rust_helper_cpu_relax(void)
{
cpu_relax();
}

View File

@ -55,7 +55,7 @@ impl<T: Driver + 'static> Adapter<T> {
extern "C" fn probe_callback(
adev: *mut bindings::auxiliary_device,
id: *const bindings::auxiliary_device_id,
) -> kernel::ffi::c_int {
) -> c_int {
// SAFETY: The auxiliary bus only ever calls the probe callback with a valid pointer to a
// `struct auxiliary_device`.
//
@ -245,7 +245,7 @@ extern "C" fn release(dev: *mut bindings::device) {
kernel::impl_device_context_into_aref!(Device);
// SAFETY: Instances of `Device` are always reference-counted.
unsafe impl crate::types::AlwaysRefCounted for Device {
unsafe impl crate::sync::aref::AlwaysRefCounted for Device {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
unsafe { bindings::get_device(self.as_ref().as_raw()) };

594
rust/kernel/debugfs.rs Normal file
View File

@ -0,0 +1,594 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2025 Google LLC.
//! DebugFS Abstraction
//!
//! C header: [`include/linux/debugfs.h`](srctree/include/linux/debugfs.h)
// When DebugFS is disabled, many parameters are dead. Linting for this isn't helpful.
#![cfg_attr(not(CONFIG_DEBUG_FS), allow(unused_variables))]
use crate::prelude::*;
use crate::str::CStr;
#[cfg(CONFIG_DEBUG_FS)]
use crate::sync::Arc;
use crate::uaccess::UserSliceReader;
use core::fmt;
use core::marker::PhantomData;
use core::marker::PhantomPinned;
#[cfg(CONFIG_DEBUG_FS)]
use core::mem::ManuallyDrop;
use core::ops::Deref;
mod traits;
pub use traits::{Reader, Writer};
mod callback_adapters;
use callback_adapters::{FormatAdapter, NoWriter, WritableAdapter};
mod file_ops;
use file_ops::{FileOps, ReadFile, ReadWriteFile, WriteFile};
#[cfg(CONFIG_DEBUG_FS)]
mod entry;
#[cfg(CONFIG_DEBUG_FS)]
use entry::Entry;
/// Owning handle to a DebugFS directory.
///
/// The directory in the filesystem represented by [`Dir`] will be removed when handle has been
/// dropped *and* all children have been removed.
// If we have a parent, we hold a reference to it in the `Entry`. This prevents the `dentry`
// we point to from being cleaned up if our parent `Dir`/`Entry` is dropped before us.
//
// The `None` option indicates that the `Arc` could not be allocated, so our children would not be
// able to refer to us. In this case, we need to silently fail. All future child directories/files
// will silently fail as well.
#[derive(Clone)]
pub struct Dir(#[cfg(CONFIG_DEBUG_FS)] Option<Arc<Entry<'static>>>);
impl Dir {
/// Create a new directory in DebugFS. If `parent` is [`None`], it will be created at the root.
fn create(name: &CStr, parent: Option<&Dir>) -> Self {
#[cfg(CONFIG_DEBUG_FS)]
{
let parent_entry = match parent {
// If the parent couldn't be allocated, just early-return
Some(Dir(None)) => return Self(None),
Some(Dir(Some(entry))) => Some(entry.clone()),
None => None,
};
Self(
// If Arc creation fails, the `Entry` will be dropped, so the directory will be
// cleaned up.
Arc::new(Entry::dynamic_dir(name, parent_entry), GFP_KERNEL).ok(),
)
}
#[cfg(not(CONFIG_DEBUG_FS))]
Self()
}
/// Creates a DebugFS file which will own the data produced by the initializer provided in
/// `data`.
fn create_file<'a, T, E: 'a>(
&'a self,
name: &'a CStr,
data: impl PinInit<T, E> + 'a,
file_ops: &'static FileOps<T>,
) -> impl PinInit<File<T>, E> + 'a
where
T: Sync + 'static,
{
let scope = Scope::<T>::new(data, move |data| {
#[cfg(CONFIG_DEBUG_FS)]
if let Some(parent) = &self.0 {
// SAFETY: Because data derives from a scope, and our entry will be dropped before
// the data is dropped, it is guaranteed to outlive the entry we return.
unsafe { Entry::dynamic_file(name, parent.clone(), data, file_ops) }
} else {
Entry::empty()
}
});
try_pin_init! {
File {
scope <- scope
} ? E
}
}
/// Create a new directory in DebugFS at the root.
///
/// # Examples
///
/// ```
/// # use kernel::c_str;
/// # use kernel::debugfs::Dir;
/// let debugfs = Dir::new(c_str!("parent"));
/// ```
pub fn new(name: &CStr) -> Self {
Dir::create(name, None)
}
/// Creates a subdirectory within this directory.
///
/// # Examples
///
/// ```
/// # use kernel::c_str;
/// # use kernel::debugfs::Dir;
/// let parent = Dir::new(c_str!("parent"));
/// let child = parent.subdir(c_str!("child"));
/// ```
pub fn subdir(&self, name: &CStr) -> Self {
Dir::create(name, Some(self))
}
/// Creates a read-only file in this directory.
///
/// The file's contents are produced by invoking [`Writer::write`] on the value initialized by
/// `data`.
///
/// # Examples
///
/// ```
/// # use kernel::c_str;
/// # use kernel::debugfs::Dir;
/// # use kernel::prelude::*;
/// # let dir = Dir::new(c_str!("my_debugfs_dir"));
/// let file = KBox::pin_init(dir.read_only_file(c_str!("foo"), 200), GFP_KERNEL)?;
/// // "my_debugfs_dir/foo" now contains the number 200.
/// // The file is removed when `file` is dropped.
/// # Ok::<(), Error>(())
/// ```
pub fn read_only_file<'a, T, E: 'a>(
&'a self,
name: &'a CStr,
data: impl PinInit<T, E> + 'a,
) -> impl PinInit<File<T>, E> + 'a
where
T: Writer + Send + Sync + 'static,
{
let file_ops = &<T as ReadFile<_>>::FILE_OPS;
self.create_file(name, data, file_ops)
}
/// Creates a read-only file in this directory, with contents from a callback.
///
/// `f` must be a function item or a non-capturing closure.
/// This is statically asserted and not a safety requirement.
///
/// # Examples
///
/// ```
/// # use core::sync::atomic::{AtomicU32, Ordering};
/// # use kernel::c_str;
/// # use kernel::debugfs::Dir;
/// # use kernel::prelude::*;
/// # let dir = Dir::new(c_str!("foo"));
/// let file = KBox::pin_init(
/// dir.read_callback_file(c_str!("bar"),
/// AtomicU32::new(3),
/// &|val, f| {
/// let out = val.load(Ordering::Relaxed);
/// writeln!(f, "{out:#010x}")
/// }),
/// GFP_KERNEL)?;
/// // Reading "foo/bar" will show "0x00000003".
/// file.store(10, Ordering::Relaxed);
/// // Reading "foo/bar" will now show "0x0000000a".
/// # Ok::<(), Error>(())
/// ```
pub fn read_callback_file<'a, T, E: 'a, F>(
&'a self,
name: &'a CStr,
data: impl PinInit<T, E> + 'a,
_f: &'static F,
) -> impl PinInit<File<T>, E> + 'a
where
T: Send + Sync + 'static,
F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync,
{
let file_ops = <FormatAdapter<T, F>>::FILE_OPS.adapt();
self.create_file(name, data, file_ops)
}
/// Creates a read-write file in this directory.
///
/// Reading the file uses the [`Writer`] implementation.
/// Writing to the file uses the [`Reader`] implementation.
pub fn read_write_file<'a, T, E: 'a>(
&'a self,
name: &'a CStr,
data: impl PinInit<T, E> + 'a,
) -> impl PinInit<File<T>, E> + 'a
where
T: Writer + Reader + Send + Sync + 'static,
{
let file_ops = &<T as ReadWriteFile<_>>::FILE_OPS;
self.create_file(name, data, file_ops)
}
/// Creates a read-write file in this directory, with logic from callbacks.
///
/// Reading from the file is handled by `f`. Writing to the file is handled by `w`.
///
/// `f` and `w` must be function items or non-capturing closures.
/// This is statically asserted and not a safety requirement.
pub fn read_write_callback_file<'a, T, E: 'a, F, W>(
&'a self,
name: &'a CStr,
data: impl PinInit<T, E> + 'a,
_f: &'static F,
_w: &'static W,
) -> impl PinInit<File<T>, E> + 'a
where
T: Send + Sync + 'static,
F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync,
W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync,
{
let file_ops =
<WritableAdapter<FormatAdapter<T, F>, W> as file_ops::ReadWriteFile<_>>::FILE_OPS
.adapt()
.adapt();
self.create_file(name, data, file_ops)
}
/// Creates a write-only file in this directory.
///
/// The file owns its backing data. Writing to the file uses the [`Reader`]
/// implementation.
///
/// The file is removed when the returned [`File`] is dropped.
pub fn write_only_file<'a, T, E: 'a>(
&'a self,
name: &'a CStr,
data: impl PinInit<T, E> + 'a,
) -> impl PinInit<File<T>, E> + 'a
where
T: Reader + Send + Sync + 'static,
{
self.create_file(name, data, &T::FILE_OPS)
}
/// Creates a write-only file in this directory, with write logic from a callback.
///
/// `w` must be a function item or a non-capturing closure.
/// This is statically asserted and not a safety requirement.
pub fn write_callback_file<'a, T, E: 'a, W>(
&'a self,
name: &'a CStr,
data: impl PinInit<T, E> + 'a,
_w: &'static W,
) -> impl PinInit<File<T>, E> + 'a
where
T: Send + Sync + 'static,
W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync,
{
let file_ops = <WritableAdapter<NoWriter<T>, W> as WriteFile<_>>::FILE_OPS
.adapt()
.adapt();
self.create_file(name, data, file_ops)
}
// While this function is safe, it is intentionally not public because it's a bit of a
// footgun.
//
// Unless you also extract the `entry` later and schedule it for `Drop` at the appropriate
// time, a `ScopedDir` with a `Dir` parent will never be deleted.
fn scoped_dir<'data>(&self, name: &CStr) -> ScopedDir<'data, 'static> {
#[cfg(CONFIG_DEBUG_FS)]
{
let parent_entry = match &self.0 {
None => return ScopedDir::empty(),
Some(entry) => entry.clone(),
};
ScopedDir {
entry: ManuallyDrop::new(Entry::dynamic_dir(name, Some(parent_entry))),
_phantom: PhantomData,
}
}
#[cfg(not(CONFIG_DEBUG_FS))]
ScopedDir::empty()
}
/// Creates a new scope, which is a directory associated with some data `T`.
///
/// The created directory will be a subdirectory of `self`. The `init` closure is called to
/// populate the directory with files and subdirectories. These files can reference the data
/// stored in the scope.
///
/// The entire directory tree created within the scope will be removed when the returned
/// `Scope` handle is dropped.
pub fn scope<'a, T: 'a, E: 'a, F>(
&'a self,
data: impl PinInit<T, E> + 'a,
name: &'a CStr,
init: F,
) -> impl PinInit<Scope<T>, E> + 'a
where
F: for<'data, 'dir> FnOnce(&'data T, &'dir ScopedDir<'data, 'dir>) + 'a,
{
Scope::new(data, |data| {
let scoped = self.scoped_dir(name);
init(data, &scoped);
scoped.into_entry()
})
}
}
#[pin_data]
/// Handle to a DebugFS scope, which ensures that attached `data` will outlive the DebugFS entry
/// without moving.
///
/// This is internally used to back [`File`], and used in the API to represent the attachment
/// of a directory lifetime to a data structure which may be jointly accessed by a number of
/// different files.
///
/// When dropped, a `Scope` will remove all directories and files in the filesystem backed by the
/// attached data structure prior to releasing the attached data.
pub struct Scope<T> {
// This order is load-bearing for drops - `_entry` must be dropped before `data`.
#[cfg(CONFIG_DEBUG_FS)]
_entry: Entry<'static>,
#[pin]
data: T,
// Even if `T` is `Unpin`, we still can't allow it to be moved.
#[pin]
_pin: PhantomPinned,
}
#[pin_data]
/// Handle to a DebugFS file, owning its backing data.
///
/// When dropped, the DebugFS file will be removed and the attached data will be dropped.
pub struct File<T> {
#[pin]
scope: Scope<T>,
}
#[cfg(not(CONFIG_DEBUG_FS))]
impl<'b, T: 'b> Scope<T> {
fn new<E: 'b, F>(data: impl PinInit<T, E> + 'b, init: F) -> impl PinInit<Self, E> + 'b
where
F: for<'a> FnOnce(&'a T) + 'b,
{
try_pin_init! {
Self {
data <- data,
_pin: PhantomPinned
} ? E
}
.pin_chain(|scope| {
init(&scope.data);
Ok(())
})
}
}
#[cfg(CONFIG_DEBUG_FS)]
impl<'b, T: 'b> Scope<T> {
fn entry_mut(self: Pin<&mut Self>) -> &mut Entry<'static> {
// SAFETY: _entry is not structurally pinned.
unsafe { &mut Pin::into_inner_unchecked(self)._entry }
}
fn new<E: 'b, F>(data: impl PinInit<T, E> + 'b, init: F) -> impl PinInit<Self, E> + 'b
where
F: for<'a> FnOnce(&'a T) -> Entry<'static> + 'b,
{
try_pin_init! {
Self {
_entry: Entry::empty(),
data <- data,
_pin: PhantomPinned
} ? E
}
.pin_chain(|scope| {
*scope.entry_mut() = init(&scope.data);
Ok(())
})
}
}
impl<'a, T: 'a> Scope<T> {
/// Creates a new scope, which is a directory at the root of the debugfs filesystem,
/// associated with some data `T`.
///
/// The `init` closure is called to populate the directory with files and subdirectories. These
/// files can reference the data stored in the scope.
///
/// The entire directory tree created within the scope will be removed when the returned
/// `Scope` handle is dropped.
pub fn dir<E: 'a, F>(
data: impl PinInit<T, E> + 'a,
name: &'a CStr,
init: F,
) -> impl PinInit<Self, E> + 'a
where
F: for<'data, 'dir> FnOnce(&'data T, &'dir ScopedDir<'data, 'dir>) + 'a,
{
Scope::new(data, |data| {
let scoped = ScopedDir::new(name);
init(data, &scoped);
scoped.into_entry()
})
}
}
impl<T> Deref for Scope<T> {
type Target = T;
fn deref(&self) -> &T {
&self.data
}
}
impl<T> Deref for File<T> {
type Target = T;
fn deref(&self) -> &T {
&self.scope
}
}
/// A handle to a directory which will live at most `'dir`, accessing data that will live for at
/// least `'data`.
///
/// Dropping a ScopedDir will not delete or clean it up, this is expected to occur through dropping
/// the `Scope` that created it.
pub struct ScopedDir<'data, 'dir> {
#[cfg(CONFIG_DEBUG_FS)]
entry: ManuallyDrop<Entry<'dir>>,
_phantom: PhantomData<fn(&'data ()) -> &'dir ()>,
}
impl<'data, 'dir> ScopedDir<'data, 'dir> {
/// Creates a subdirectory inside this `ScopedDir`.
///
/// The returned directory handle cannot outlive this one.
pub fn dir<'dir2>(&'dir2 self, name: &CStr) -> ScopedDir<'data, 'dir2> {
#[cfg(not(CONFIG_DEBUG_FS))]
let _ = name;
ScopedDir {
#[cfg(CONFIG_DEBUG_FS)]
entry: ManuallyDrop::new(Entry::dir(name, Some(&*self.entry))),
_phantom: PhantomData,
}
}
fn create_file<T: Sync>(&self, name: &CStr, data: &'data T, vtable: &'static FileOps<T>) {
#[cfg(CONFIG_DEBUG_FS)]
core::mem::forget(Entry::file(name, &self.entry, data, vtable));
}
/// Creates a read-only file in this directory.
///
/// The file's contents are produced by invoking [`Writer::write`].
///
/// This function does not produce an owning handle to the file. The created
/// file is removed when the [`Scope`] that this directory belongs
/// to is dropped.
pub fn read_only_file<T: Writer + Send + Sync + 'static>(&self, name: &CStr, data: &'data T) {
self.create_file(name, data, &T::FILE_OPS)
}
/// Creates a read-only file in this directory, with contents from a callback.
///
/// The file contents are generated by calling `f` with `data`.
///
///
/// `f` must be a function item or a non-capturing closure.
/// This is statically asserted and not a safety requirement.
///
/// This function does not produce an owning handle to the file. The created
/// file is removed when the [`Scope`] that this directory belongs
/// to is dropped.
pub fn read_callback_file<T, F>(&self, name: &CStr, data: &'data T, _f: &'static F)
where
T: Send + Sync + 'static,
F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync,
{
let vtable = <FormatAdapter<T, F> as ReadFile<_>>::FILE_OPS.adapt();
self.create_file(name, data, vtable)
}
/// Creates a read-write file in this directory.
///
/// Reading the file uses the [`Writer`] implementation on `data`. Writing to the file uses
/// the [`Reader`] implementation on `data`.
///
/// This function does not produce an owning handle to the file. The created
/// file is removed when the [`Scope`] that this directory belongs
/// to is dropped.
pub fn read_write_file<T: Writer + Reader + Send + Sync + 'static>(
&self,
name: &CStr,
data: &'data T,
) {
let vtable = &<T as ReadWriteFile<_>>::FILE_OPS;
self.create_file(name, data, vtable)
}
/// Creates a read-write file in this directory, with logic from callbacks.
///
/// Reading from the file is handled by `f`. Writing to the file is handled by `w`.
///
/// `f` and `w` must be function items or non-capturing closures.
/// This is statically asserted and not a safety requirement.
///
/// This function does not produce an owning handle to the file. The created
/// file is removed when the [`Scope`] that this directory belongs
/// to is dropped.
pub fn read_write_callback_file<T, F, W>(
&self,
name: &CStr,
data: &'data T,
_f: &'static F,
_w: &'static W,
) where
T: Send + Sync + 'static,
F: Fn(&T, &mut fmt::Formatter<'_>) -> fmt::Result + Send + Sync,
W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync,
{
let vtable = <WritableAdapter<FormatAdapter<T, F>, W> as ReadWriteFile<_>>::FILE_OPS
.adapt()
.adapt();
self.create_file(name, data, vtable)
}
/// Creates a write-only file in this directory.
///
/// Writing to the file uses the [`Reader`] implementation on `data`.
///
/// This function does not produce an owning handle to the file. The created
/// file is removed when the [`Scope`] that this directory belongs
/// to is dropped.
pub fn write_only_file<T: Reader + Send + Sync + 'static>(&self, name: &CStr, data: &'data T) {
let vtable = &<T as WriteFile<_>>::FILE_OPS;
self.create_file(name, data, vtable)
}
/// Creates a write-only file in this directory, with write logic from a callback.
///
/// Writing to the file is handled by `w`.
///
/// `w` must be a function item or a non-capturing closure.
/// This is statically asserted and not a safety requirement.
///
/// This function does not produce an owning handle to the file. The created
/// file is removed when the [`Scope`] that this directory belongs
/// to is dropped.
pub fn write_only_callback_file<T, W>(&self, name: &CStr, data: &'data T, _w: &'static W)
where
T: Send + Sync + 'static,
W: Fn(&T, &mut UserSliceReader) -> Result + Send + Sync,
{
let vtable = &<WritableAdapter<NoWriter<T>, W> as WriteFile<_>>::FILE_OPS
.adapt()
.adapt();
self.create_file(name, data, vtable)
}
fn empty() -> Self {
ScopedDir {
#[cfg(CONFIG_DEBUG_FS)]
entry: ManuallyDrop::new(Entry::empty()),
_phantom: PhantomData,
}
}
#[cfg(CONFIG_DEBUG_FS)]
fn into_entry(self) -> Entry<'dir> {
ManuallyDrop::into_inner(self.entry)
}
#[cfg(not(CONFIG_DEBUG_FS))]
fn into_entry(self) {}
}
impl<'data> ScopedDir<'data, 'static> {
// This is safe, but intentionally not exported due to footgun status. A ScopedDir with no
// parent will never be released by default, and needs to have its entry extracted and used
// somewhere.
fn new(name: &CStr) -> ScopedDir<'data, 'static> {
ScopedDir {
#[cfg(CONFIG_DEBUG_FS)]
entry: ManuallyDrop::new(Entry::dir(name, None)),
_phantom: PhantomData,
}
}
}

View File

@ -0,0 +1,122 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2025 Google LLC.
//! Adapters which allow the user to supply a write or read implementation as a value rather
//! than a trait implementation. If provided, it will override the trait implementation.
use super::{Reader, Writer};
use crate::prelude::*;
use crate::uaccess::UserSliceReader;
use core::fmt;
use core::fmt::Formatter;
use core::marker::PhantomData;
use core::ops::Deref;
/// # Safety
///
/// To implement this trait, it must be safe to cast a `&Self` to a `&Inner`.
/// It is intended for use in unstacking adapters out of `FileOps` backings.
pub(crate) unsafe trait Adapter {
type Inner;
}
/// Adapter to implement `Reader` via a callback with the same representation as `T`.
///
/// * Layer it on top of `WriterAdapter` if you want to add a custom callback for `write`.
/// * Layer it on top of `NoWriter` to pass through any support present on the underlying type.
///
/// # Invariants
///
/// If an instance for `WritableAdapter<_, W>` is constructed, `W` is inhabited.
#[repr(transparent)]
pub(crate) struct WritableAdapter<D, W> {
inner: D,
_writer: PhantomData<W>,
}
// SAFETY: Stripping off the adapter only removes constraints
unsafe impl<D, W> Adapter for WritableAdapter<D, W> {
type Inner = D;
}
impl<D: Writer, W> Writer for WritableAdapter<D, W> {
fn write(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
self.inner.write(fmt)
}
}
impl<D: Deref, W> Reader for WritableAdapter<D, W>
where
W: Fn(&D::Target, &mut UserSliceReader) -> Result + Send + Sync + 'static,
{
fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
// SAFETY: WritableAdapter<_, W> can only be constructed if W is inhabited
let w: &W = unsafe { materialize_zst() };
w(self.inner.deref(), reader)
}
}
/// Adapter to implement `Writer` via a callback with the same representation as `T`.
///
/// # Invariants
///
/// If an instance for `FormatAdapter<_, F>` is constructed, `F` is inhabited.
#[repr(transparent)]
pub(crate) struct FormatAdapter<D, F> {
inner: D,
_formatter: PhantomData<F>,
}
impl<D, F> Deref for FormatAdapter<D, F> {
type Target = D;
fn deref(&self) -> &D {
&self.inner
}
}
impl<D, F> Writer for FormatAdapter<D, F>
where
F: Fn(&D, &mut Formatter<'_>) -> fmt::Result + 'static,
{
fn write(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
// SAFETY: FormatAdapter<_, F> can only be constructed if F is inhabited
let f: &F = unsafe { materialize_zst() };
f(&self.inner, fmt)
}
}
// SAFETY: Stripping off the adapter only removes constraints
unsafe impl<D, F> Adapter for FormatAdapter<D, F> {
type Inner = D;
}
#[repr(transparent)]
pub(crate) struct NoWriter<D> {
inner: D,
}
// SAFETY: Stripping off the adapter only removes constraints
unsafe impl<D> Adapter for NoWriter<D> {
type Inner = D;
}
impl<D> Deref for NoWriter<D> {
type Target = D;
fn deref(&self) -> &D {
&self.inner
}
}
/// For types with a unique value, produce a static reference to it.
///
/// # Safety
///
/// The caller asserts that F is inhabited
unsafe fn materialize_zst<F>() -> &'static F {
const { assert!(core::mem::size_of::<F>() == 0) };
let zst_dangle: core::ptr::NonNull<F> = core::ptr::NonNull::dangling();
// SAFETY: While the pointer is dangling, it is a dangling pointer to a ZST, based on the
// assertion above. The type is also inhabited, by the caller's assertion. This means
// we can materialize it.
unsafe { zst_dangle.as_ref() }
}

View File

@ -0,0 +1,164 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2025 Google LLC.
use crate::debugfs::file_ops::FileOps;
use crate::ffi::c_void;
use crate::str::CStr;
use crate::sync::Arc;
use core::marker::PhantomData;
/// Owning handle to a DebugFS entry.
///
/// # Invariants
///
/// The wrapped pointer will always be `NULL`, an error, or an owned DebugFS `dentry`.
pub(crate) struct Entry<'a> {
entry: *mut bindings::dentry,
// If we were created with an owning parent, this is the keep-alive
_parent: Option<Arc<Entry<'static>>>,
// If we were created with a non-owning parent, this prevents us from outliving it
_phantom: PhantomData<&'a ()>,
}
// SAFETY: [`Entry`] is just a `dentry` under the hood, which the API promises can be transferred
// between threads.
unsafe impl Send for Entry<'_> {}
// SAFETY: All the C functions we call on the `dentry` pointer are threadsafe.
unsafe impl Sync for Entry<'_> {}
impl Entry<'static> {
pub(crate) fn dynamic_dir(name: &CStr, parent: Option<Arc<Self>>) -> Self {
let parent_ptr = match &parent {
Some(entry) => entry.as_ptr(),
None => core::ptr::null_mut(),
};
// SAFETY: The invariants of this function's arguments ensure the safety of this call.
// * `name` is a valid C string by the invariants of `&CStr`.
// * `parent_ptr` is either `NULL` (if `parent` is `None`), or a pointer to a valid
// `dentry` by our invariant. `debugfs_create_dir` handles `NULL` pointers correctly.
let entry = unsafe { bindings::debugfs_create_dir(name.as_char_ptr(), parent_ptr) };
Entry {
entry,
_parent: parent,
_phantom: PhantomData,
}
}
/// # Safety
///
/// * `data` must outlive the returned `Entry`.
pub(crate) unsafe fn dynamic_file<T>(
name: &CStr,
parent: Arc<Self>,
data: &T,
file_ops: &'static FileOps<T>,
) -> Self {
// SAFETY: The invariants of this function's arguments ensure the safety of this call.
// * `name` is a valid C string by the invariants of `&CStr`.
// * `parent.as_ptr()` is a pointer to a valid `dentry` by invariant.
// * The caller guarantees that `data` will outlive the returned `Entry`.
// * The guarantees on `FileOps` assert the vtable will be compatible with the data we have
// provided.
let entry = unsafe {
bindings::debugfs_create_file_full(
name.as_char_ptr(),
file_ops.mode(),
parent.as_ptr(),
core::ptr::from_ref(data) as *mut c_void,
core::ptr::null(),
&**file_ops,
)
};
Entry {
entry,
_parent: Some(parent),
_phantom: PhantomData,
}
}
}
impl<'a> Entry<'a> {
pub(crate) fn dir(name: &CStr, parent: Option<&'a Entry<'_>>) -> Self {
let parent_ptr = match &parent {
Some(entry) => entry.as_ptr(),
None => core::ptr::null_mut(),
};
// SAFETY: The invariants of this function's arguments ensure the safety of this call.
// * `name` is a valid C string by the invariants of `&CStr`.
// * `parent_ptr` is either `NULL` (if `parent` is `None`), or a pointer to a valid
// `dentry` (because `parent` is a valid reference to an `Entry`). The lifetime `'a`
// ensures that the parent outlives this entry.
let entry = unsafe { bindings::debugfs_create_dir(name.as_char_ptr(), parent_ptr) };
Entry {
entry,
_parent: None,
_phantom: PhantomData,
}
}
pub(crate) fn file<T>(
name: &CStr,
parent: &'a Entry<'_>,
data: &'a T,
file_ops: &FileOps<T>,
) -> Self {
// SAFETY: The invariants of this function's arguments ensure the safety of this call.
// * `name` is a valid C string by the invariants of `&CStr`.
// * `parent.as_ptr()` is a pointer to a valid `dentry` because we have `&'a Entry`.
// * `data` is a valid pointer to `T` for lifetime `'a`.
// * The returned `Entry` has lifetime `'a`, so it cannot outlive `parent` or `data`.
// * The caller guarantees that `vtable` is compatible with `data`.
// * The guarantees on `FileOps` assert the vtable will be compatible with the data we have
// provided.
let entry = unsafe {
bindings::debugfs_create_file_full(
name.as_char_ptr(),
file_ops.mode(),
parent.as_ptr(),
core::ptr::from_ref(data) as *mut c_void,
core::ptr::null(),
&**file_ops,
)
};
Entry {
entry,
_parent: None,
_phantom: PhantomData,
}
}
}
impl Entry<'_> {
/// Constructs a placeholder DebugFS [`Entry`].
pub(crate) fn empty() -> Self {
Self {
entry: core::ptr::null_mut(),
_parent: None,
_phantom: PhantomData,
}
}
/// Returns the pointer representation of the DebugFS directory.
///
/// # Guarantees
///
/// Due to the type invariant, the value returned from this function will always be an error
/// code, NULL, or a live DebugFS directory. If it is live, it will remain live at least as
/// long as this entry lives.
pub(crate) fn as_ptr(&self) -> *mut bindings::dentry {
self.entry
}
}
impl Drop for Entry<'_> {
fn drop(&mut self) {
// SAFETY: `debugfs_remove` can take `NULL`, error values, and legal DebugFS dentries.
// `as_ptr` guarantees that the pointer is of this form.
unsafe { bindings::debugfs_remove(self.as_ptr()) }
}
}

View File

@ -0,0 +1,247 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2025 Google LLC.
use super::{Reader, Writer};
use crate::debugfs::callback_adapters::Adapter;
use crate::prelude::*;
use crate::seq_file::SeqFile;
use crate::seq_print;
use crate::uaccess::UserSlice;
use core::fmt::{Display, Formatter, Result};
use core::marker::PhantomData;
#[cfg(CONFIG_DEBUG_FS)]
use core::ops::Deref;
/// # Invariant
///
/// `FileOps<T>` will always contain an `operations` which is safe to use for a file backed
/// off an inode which has a pointer to a `T` in its private data that is safe to convert
/// into a reference.
pub(super) struct FileOps<T> {
#[cfg(CONFIG_DEBUG_FS)]
operations: bindings::file_operations,
#[cfg(CONFIG_DEBUG_FS)]
mode: u16,
_phantom: PhantomData<T>,
}
impl<T> FileOps<T> {
/// # Safety
///
/// The caller asserts that the provided `operations` is safe to use for a file whose
/// inode has a pointer to `T` in its private data that is safe to convert into a reference.
const unsafe fn new(operations: bindings::file_operations, mode: u16) -> Self {
Self {
#[cfg(CONFIG_DEBUG_FS)]
operations,
#[cfg(CONFIG_DEBUG_FS)]
mode,
_phantom: PhantomData,
}
}
#[cfg(CONFIG_DEBUG_FS)]
pub(crate) const fn mode(&self) -> u16 {
self.mode
}
}
impl<T: Adapter> FileOps<T> {
pub(super) const fn adapt(&self) -> &FileOps<T::Inner> {
// SAFETY: `Adapter` asserts that `T` can be legally cast to `T::Inner`.
unsafe { core::mem::transmute(self) }
}
}
#[cfg(CONFIG_DEBUG_FS)]
impl<T> Deref for FileOps<T> {
type Target = bindings::file_operations;
fn deref(&self) -> &Self::Target {
&self.operations
}
}
struct WriterAdapter<T>(T);
impl<'a, T: Writer> Display for WriterAdapter<&'a T> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
self.0.write(f)
}
}
/// Implements `open` for `file_operations` via `single_open` to fill out a `seq_file`.
///
/// # Safety
///
/// * `inode`'s private pointer must point to a value of type `T` which will outlive the `inode`
/// and will not have any unique references alias it during the call.
/// * `file` must point to a live, not-yet-initialized file object.
unsafe extern "C" fn writer_open<T: Writer + Sync>(
inode: *mut bindings::inode,
file: *mut bindings::file,
) -> c_int {
// SAFETY: The caller ensures that `inode` is a valid pointer.
let data = unsafe { (*inode).i_private };
// SAFETY:
// * `file` is acceptable by caller precondition.
// * `print_act` will be called on a `seq_file` with private data set to the third argument,
// so we meet its safety requirements.
// * The `data` pointer passed in the third argument is a valid `T` pointer that outlives
// this call by caller preconditions.
unsafe { bindings::single_open(file, Some(writer_act::<T>), data) }
}
/// Prints private data stashed in a seq_file to that seq file.
///
/// # Safety
///
/// `seq` must point to a live `seq_file` whose private data is a valid pointer to a `T` which may
/// not have any unique references alias it during the call.
unsafe extern "C" fn writer_act<T: Writer + Sync>(
seq: *mut bindings::seq_file,
_: *mut c_void,
) -> c_int {
// SAFETY: By caller precondition, this pointer is valid pointer to a `T`, and
// there are not and will not be any unique references until we are done.
let data = unsafe { &*((*seq).private.cast::<T>()) };
// SAFETY: By caller precondition, `seq_file` points to a live `seq_file`, so we can lift
// it.
let seq_file = unsafe { SeqFile::from_raw(seq) };
seq_print!(seq_file, "{}", WriterAdapter(data));
0
}
// Work around lack of generic const items.
pub(crate) trait ReadFile<T> {
const FILE_OPS: FileOps<T>;
}
impl<T: Writer + Sync> ReadFile<T> for T {
const FILE_OPS: FileOps<T> = {
let operations = bindings::file_operations {
read: Some(bindings::seq_read),
llseek: Some(bindings::seq_lseek),
release: Some(bindings::single_release),
open: Some(writer_open::<Self>),
// SAFETY: `file_operations` supports zeroes in all fields.
..unsafe { core::mem::zeroed() }
};
// SAFETY: `operations` is all stock `seq_file` implementations except for `writer_open`.
// `open`'s only requirement beyond what is provided to all open functions is that the
// inode's data pointer must point to a `T` that will outlive it, which matches the
// `FileOps` requirements.
unsafe { FileOps::new(operations, 0o400) }
};
}
fn read<T: Reader + Sync>(data: &T, buf: *const c_char, count: usize) -> isize {
let mut reader = UserSlice::new(UserPtr::from_ptr(buf as *mut c_void), count).reader();
if let Err(e) = data.read_from_slice(&mut reader) {
return e.to_errno() as isize;
}
count as isize
}
/// # Safety
///
/// `file` must be a valid pointer to a `file` struct.
/// The `private_data` of the file must contain a valid pointer to a `seq_file` whose
/// `private` data in turn points to a `T` that implements `Reader`.
/// `buf` must be a valid user-space buffer.
pub(crate) unsafe extern "C" fn write<T: Reader + Sync>(
file: *mut bindings::file,
buf: *const c_char,
count: usize,
_ppos: *mut bindings::loff_t,
) -> isize {
// SAFETY: The file was opened with `single_open`, which sets `private_data` to a `seq_file`.
let seq = unsafe { &mut *((*file).private_data.cast::<bindings::seq_file>()) };
// SAFETY: By caller precondition, this pointer is live and points to a value of type `T`.
let data = unsafe { &*(seq.private as *const T) };
read(data, buf, count)
}
// A trait to get the file operations for a type.
pub(crate) trait ReadWriteFile<T> {
const FILE_OPS: FileOps<T>;
}
impl<T: Writer + Reader + Sync> ReadWriteFile<T> for T {
const FILE_OPS: FileOps<T> = {
let operations = bindings::file_operations {
open: Some(writer_open::<T>),
read: Some(bindings::seq_read),
write: Some(write::<T>),
llseek: Some(bindings::seq_lseek),
release: Some(bindings::single_release),
// SAFETY: `file_operations` supports zeroes in all fields.
..unsafe { core::mem::zeroed() }
};
// SAFETY: `operations` is all stock `seq_file` implementations except for `writer_open`
// and `write`.
// `writer_open`'s only requirement beyond what is provided to all open functions is that
// the inode's data pointer must point to a `T` that will outlive it, which matches the
// `FileOps` requirements.
// `write` only requires that the file's private data pointer points to `seq_file`
// which points to a `T` that will outlive it, which matches what `writer_open`
// provides.
unsafe { FileOps::new(operations, 0o600) }
};
}
/// # Safety
///
/// `inode` must be a valid pointer to an `inode` struct.
/// `file` must be a valid pointer to a `file` struct.
unsafe extern "C" fn write_only_open(
inode: *mut bindings::inode,
file: *mut bindings::file,
) -> c_int {
// SAFETY: The caller ensures that `inode` and `file` are valid pointers.
unsafe { (*file).private_data = (*inode).i_private };
0
}
/// # Safety
///
/// * `file` must be a valid pointer to a `file` struct.
/// * The `private_data` of the file must contain a valid pointer to a `T` that implements
/// `Reader`.
/// * `buf` must be a valid user-space buffer.
pub(crate) unsafe extern "C" fn write_only_write<T: Reader + Sync>(
file: *mut bindings::file,
buf: *const c_char,
count: usize,
_ppos: *mut bindings::loff_t,
) -> isize {
// SAFETY: The caller ensures that `file` is a valid pointer and that `private_data` holds a
// valid pointer to `T`.
let data = unsafe { &*((*file).private_data as *const T) };
read(data, buf, count)
}
pub(crate) trait WriteFile<T> {
const FILE_OPS: FileOps<T>;
}
impl<T: Reader + Sync> WriteFile<T> for T {
const FILE_OPS: FileOps<T> = {
let operations = bindings::file_operations {
open: Some(write_only_open),
write: Some(write_only_write::<T>),
llseek: Some(bindings::noop_llseek),
// SAFETY: `file_operations` supports zeroes in all fields.
..unsafe { core::mem::zeroed() }
};
// SAFETY:
// * `write_only_open` populates the file private data with the inode private data
// * `write_only_write`'s only requirement is that the private data of the file point to
// a `T` and be legal to convert to a shared reference, which `write_only_open`
// satisfies.
unsafe { FileOps::new(operations, 0o200) }
};
}

View File

@ -0,0 +1,102 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2025 Google LLC.
//! Traits for rendering or updating values exported to DebugFS.
use crate::prelude::*;
use crate::sync::Mutex;
use crate::uaccess::UserSliceReader;
use core::fmt::{self, Debug, Formatter};
use core::str::FromStr;
use core::sync::atomic::{
AtomicI16, AtomicI32, AtomicI64, AtomicI8, AtomicIsize, AtomicU16, AtomicU32, AtomicU64,
AtomicU8, AtomicUsize, Ordering,
};
/// A trait for types that can be written into a string.
///
/// This works very similarly to `Debug`, and is automatically implemented if `Debug` is
/// implemented for a type. It is also implemented for any writable type inside a `Mutex`.
///
/// The derived implementation of `Debug` [may
/// change](https://doc.rust-lang.org/std/fmt/trait.Debug.html#stability)
/// between Rust versions, so if stability is key for your use case, please implement `Writer`
/// explicitly instead.
pub trait Writer {
/// Formats the value using the given formatter.
fn write(&self, f: &mut Formatter<'_>) -> fmt::Result;
}
impl<T: Writer> Writer for Mutex<T> {
fn write(&self, f: &mut Formatter<'_>) -> fmt::Result {
self.lock().write(f)
}
}
impl<T: Debug> Writer for T {
fn write(&self, f: &mut Formatter<'_>) -> fmt::Result {
writeln!(f, "{self:?}")
}
}
/// A trait for types that can be updated from a user slice.
///
/// This works similarly to `FromStr`, but operates on a `UserSliceReader` rather than a &str.
///
/// It is automatically implemented for all atomic integers, or any type that implements `FromStr`
/// wrapped in a `Mutex`.
pub trait Reader {
/// Updates the value from the given user slice.
fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result;
}
impl<T: FromStr> Reader for Mutex<T> {
fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
let mut buf = [0u8; 128];
if reader.len() > buf.len() {
return Err(EINVAL);
}
let n = reader.len();
reader.read_slice(&mut buf[..n])?;
let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?;
let val = s.trim().parse::<T>().map_err(|_| EINVAL)?;
*self.lock() = val;
Ok(())
}
}
macro_rules! impl_reader_for_atomic {
($(($atomic_type:ty, $int_type:ty)),*) => {
$(
impl Reader for $atomic_type {
fn read_from_slice(&self, reader: &mut UserSliceReader) -> Result {
let mut buf = [0u8; 21]; // Enough for a 64-bit number.
if reader.len() > buf.len() {
return Err(EINVAL);
}
let n = reader.len();
reader.read_slice(&mut buf[..n])?;
let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?;
let val = s.trim().parse::<$int_type>().map_err(|_| EINVAL)?;
self.store(val, Ordering::Relaxed);
Ok(())
}
}
)*
};
}
impl_reader_for_atomic!(
(AtomicI16, i16),
(AtomicI32, i32),
(AtomicI64, i64),
(AtomicI8, i8),
(AtomicIsize, isize),
(AtomicU16, u16),
(AtomicU32, u32),
(AtomicU64, u64),
(AtomicU8, u8),
(AtomicUsize, usize)
);

View File

@ -6,7 +6,8 @@
use crate::{
bindings, fmt,
types::{ARef, ForeignOwnable, Opaque},
sync::aref::ARef,
types::{ForeignOwnable, Opaque},
};
use core::{marker::PhantomData, ptr};
@ -406,7 +407,7 @@ pub fn fwnode(&self) -> Option<&property::FwNode> {
kernel::impl_device_context_into_aref!(Device);
// SAFETY: Instances of `Device` are always reference-counted.
unsafe impl crate::types::AlwaysRefCounted for Device {
unsafe impl crate::sync::aref::AlwaysRefCounted for Device {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
unsafe { bindings::get_device(self.as_raw()) };
@ -572,7 +573,7 @@ macro_rules! impl_device_context_deref {
#[macro_export]
macro_rules! __impl_device_context_into_aref {
($src:ty, $device:tt) => {
impl ::core::convert::From<&$device<$src>> for $crate::types::ARef<$device> {
impl ::core::convert::From<&$device<$src>> for $crate::sync::aref::ARef<$device> {
fn from(dev: &$device<$src>) -> Self {
(&**dev).into()
}

View File

@ -13,8 +13,8 @@
ffi::c_void,
prelude::*,
revocable::{Revocable, RevocableGuard},
sync::{rcu, Completion},
types::{ARef, ForeignOwnable, Opaque, ScopeGuard},
sync::{aref::ARef, rcu, Completion},
types::{ForeignOwnable, Opaque, ScopeGuard},
};
use pin_init::Wrapper;

View File

@ -8,6 +8,7 @@
use crate::{bindings, build_assert, ffi::c_void};
pub mod mem;
pub mod poll;
pub mod resource;
pub use resource::Resource;

104
rust/kernel/io/poll.rs Normal file
View File

@ -0,0 +1,104 @@
// SPDX-License-Identifier: GPL-2.0
//! IO polling.
//!
//! C header: [`include/linux/iopoll.h`](srctree/include/linux/iopoll.h).
use crate::{
error::{code::*, Result},
processor::cpu_relax,
task::might_sleep,
time::{delay::fsleep, Delta, Instant, Monotonic},
};
/// Polls periodically until a condition is met, an error occurs,
/// or the timeout is reached.
///
/// The function repeatedly executes the given operation `op` closure and
/// checks its result using the condition closure `cond`.
///
/// If `cond` returns `true`, the function returns successfully with
/// the result of `op`. Otherwise, it waits for a duration specified
/// by `sleep_delta` before executing `op` again.
///
/// This process continues until either `op` returns an error, `cond`
/// returns `true`, or the timeout specified by `timeout_delta` is
/// reached.
///
/// This function can only be used in a nonatomic context.
///
/// # Errors
///
/// If `op` returns an error, then that error is returned directly.
///
/// If the timeout specified by `timeout_delta` is reached, then
/// `Err(ETIMEDOUT)` is returned.
///
/// # Examples
///
/// ```no_run
/// use kernel::io::{Io, poll::read_poll_timeout};
/// use kernel::time::Delta;
///
/// const HW_READY: u16 = 0x01;
///
/// fn wait_for_hardware<const SIZE: usize>(io: &Io<SIZE>) -> Result<()> {
/// match read_poll_timeout(
/// // The `op` closure reads the value of a specific status register.
/// || io.try_read16(0x1000),
/// // The `cond` closure takes a reference to the value returned by `op`
/// // and checks whether the hardware is ready.
/// |val: &u16| *val == HW_READY,
/// Delta::from_millis(50),
/// Delta::from_secs(3),
/// ) {
/// Ok(_) => {
/// // The hardware is ready. The returned value of the `op` closure
/// // isn't used.
/// Ok(())
/// }
/// Err(e) => Err(e),
/// }
/// }
/// ```
#[track_caller]
pub fn read_poll_timeout<Op, Cond, T>(
mut op: Op,
mut cond: Cond,
sleep_delta: Delta,
timeout_delta: Delta,
) -> Result<T>
where
Op: FnMut() -> Result<T>,
Cond: FnMut(&T) -> bool,
{
let start: Instant<Monotonic> = Instant::now();
// Unlike the C version, we always call `might_sleep()` unconditionally,
// as conditional calls are error-prone. We clearly separate
// `read_poll_timeout()` and `read_poll_timeout_atomic()` to aid
// tools like klint.
might_sleep();
loop {
let val = op()?;
if cond(&val) {
// Unlike the C version, we immediately return.
// We know the condition is met so we don't need to check again.
return Ok(val);
}
if start.elapsed() > timeout_delta {
// Unlike the C version, we immediately return.
// We have just called `op()` so we don't need to call it again.
return Err(ETIMEDOUT);
}
if !sleep_delta.is_zero() {
fsleep(sleep_delta);
}
// `fsleep()` could be a busy-wait loop so we always call `cpu_relax()`.
cpu_relax();
}
}

24
rust/kernel/irq.rs Normal file
View File

@ -0,0 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
//! IRQ abstractions.
//!
//! An IRQ is an interrupt request from a device. It is used to get the CPU's
//! attention so it can service a hardware event in a timely manner.
//!
//! The current abstractions handle IRQ requests and handlers, i.e.: it allows
//! drivers to register a handler for a given IRQ line.
//!
//! C header: [`include/linux/device.h`](srctree/include/linux/interrupt.h)
/// Flags to be used when registering IRQ handlers.
mod flags;
/// IRQ allocation and handling.
mod request;
pub use flags::Flags;
pub use request::{
Handler, IrqRequest, IrqReturn, Registration, ThreadedHandler, ThreadedIrqReturn,
ThreadedRegistration,
};

124
rust/kernel/irq/flags.rs Normal file
View File

@ -0,0 +1,124 @@
// SPDX-License-Identifier: GPL-2.0
// SPDX-FileCopyrightText: Copyright 2025 Collabora ltd.
use crate::bindings;
use crate::prelude::*;
/// Flags to be used when registering IRQ handlers.
///
/// Flags can be used to request specific behaviors when registering an IRQ
/// handler, and can be combined using the `|`, `&`, and `!` operators to
/// further control the system's behavior.
///
/// A common use case is to register a shared interrupt, as sharing the line
/// between devices is increasingly common in modern systems and is even
/// required for some buses. This requires setting [`Flags::SHARED`] when
/// requesting the interrupt. Other use cases include setting the trigger type
/// through `Flags::TRIGGER_*`, which determines when the interrupt fires, or
/// controlling whether the interrupt is masked after the handler runs by using
/// [`Flags::ONESHOT`].
///
/// If an invalid combination of flags is provided, the system will refuse to
/// register the handler, and lower layers will enforce certain flags when
/// necessary. This means, for example, that all the
/// [`crate::irq::Registration`] for a shared interrupt have to agree on
/// [`Flags::SHARED`] and on the same trigger type, if set.
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct Flags(c_ulong);
impl Flags {
/// Use the interrupt line as already configured.
pub const TRIGGER_NONE: Flags = Flags::new(bindings::IRQF_TRIGGER_NONE);
/// The interrupt is triggered when the signal goes from low to high.
pub const TRIGGER_RISING: Flags = Flags::new(bindings::IRQF_TRIGGER_RISING);
/// The interrupt is triggered when the signal goes from high to low.
pub const TRIGGER_FALLING: Flags = Flags::new(bindings::IRQF_TRIGGER_FALLING);
/// The interrupt is triggered while the signal is held high.
pub const TRIGGER_HIGH: Flags = Flags::new(bindings::IRQF_TRIGGER_HIGH);
/// The interrupt is triggered while the signal is held low.
pub const TRIGGER_LOW: Flags = Flags::new(bindings::IRQF_TRIGGER_LOW);
/// Allow sharing the IRQ among several devices.
pub const SHARED: Flags = Flags::new(bindings::IRQF_SHARED);
/// Set by callers when they expect sharing mismatches to occur.
pub const PROBE_SHARED: Flags = Flags::new(bindings::IRQF_PROBE_SHARED);
/// Flag to mark this interrupt as timer interrupt.
pub const TIMER: Flags = Flags::new(bindings::IRQF_TIMER);
/// Interrupt is per CPU.
pub const PERCPU: Flags = Flags::new(bindings::IRQF_PERCPU);
/// Flag to exclude this interrupt from irq balancing.
pub const NOBALANCING: Flags = Flags::new(bindings::IRQF_NOBALANCING);
/// Interrupt is used for polling (only the interrupt that is registered
/// first in a shared interrupt is considered for performance reasons).
pub const IRQPOLL: Flags = Flags::new(bindings::IRQF_IRQPOLL);
/// Interrupt is not re-enabled after the hardirq handler finished. Used by
/// threaded interrupts which need to keep the irq line disabled until the
/// threaded handler has been run.
pub const ONESHOT: Flags = Flags::new(bindings::IRQF_ONESHOT);
/// Do not disable this IRQ during suspend. Does not guarantee that this
/// interrupt will wake the system from a suspended state.
pub const NO_SUSPEND: Flags = Flags::new(bindings::IRQF_NO_SUSPEND);
/// Force enable it on resume even if [`Flags::NO_SUSPEND`] is set.
pub const FORCE_RESUME: Flags = Flags::new(bindings::IRQF_FORCE_RESUME);
/// Interrupt cannot be threaded.
pub const NO_THREAD: Flags = Flags::new(bindings::IRQF_NO_THREAD);
/// Resume IRQ early during syscore instead of at device resume time.
pub const EARLY_RESUME: Flags = Flags::new(bindings::IRQF_EARLY_RESUME);
/// If the IRQ is shared with a [`Flags::NO_SUSPEND`] user, execute this
/// interrupt handler after suspending interrupts. For system wakeup devices
/// users need to implement wakeup detection in their interrupt handlers.
pub const COND_SUSPEND: Flags = Flags::new(bindings::IRQF_COND_SUSPEND);
/// Don't enable IRQ or NMI automatically when users request it. Users will
/// enable it explicitly by `enable_irq` or `enable_nmi` later.
pub const NO_AUTOEN: Flags = Flags::new(bindings::IRQF_NO_AUTOEN);
/// Exclude from runnaway detection for IPI and similar handlers, depends on
/// `PERCPU`.
pub const NO_DEBUG: Flags = Flags::new(bindings::IRQF_NO_DEBUG);
pub(crate) fn into_inner(self) -> c_ulong {
self.0
}
const fn new(value: u32) -> Self {
build_assert!(value as u64 <= c_ulong::MAX as u64);
Self(value as c_ulong)
}
}
impl core::ops::BitOr for Flags {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
Self(self.0 | rhs.0)
}
}
impl core::ops::BitAnd for Flags {
type Output = Self;
fn bitand(self, rhs: Self) -> Self::Output {
Self(self.0 & rhs.0)
}
}
impl core::ops::Not for Flags {
type Output = Self;
fn not(self) -> Self::Output {
Self(!self.0)
}
}

507
rust/kernel/irq/request.rs Normal file
View File

@ -0,0 +1,507 @@
// SPDX-License-Identifier: GPL-2.0
// SPDX-FileCopyrightText: Copyright 2025 Collabora ltd.
//! This module provides types like [`Registration`] and
//! [`ThreadedRegistration`], which allow users to register handlers for a given
//! IRQ line.
use core::marker::PhantomPinned;
use crate::alloc::Allocator;
use crate::device::{Bound, Device};
use crate::devres::Devres;
use crate::error::to_result;
use crate::irq::flags::Flags;
use crate::prelude::*;
use crate::str::CStr;
use crate::sync::Arc;
/// The value that can be returned from a [`Handler`] or a [`ThreadedHandler`].
#[repr(u32)]
pub enum IrqReturn {
/// The interrupt was not from this device or was not handled.
None = bindings::irqreturn_IRQ_NONE,
/// The interrupt was handled by this device.
Handled = bindings::irqreturn_IRQ_HANDLED,
}
/// Callbacks for an IRQ handler.
pub trait Handler: Sync {
/// The hard IRQ handler.
///
/// This is executed in interrupt context, hence all corresponding
/// limitations do apply.
///
/// All work that does not necessarily need to be executed from
/// interrupt context, should be deferred to a threaded handler.
/// See also [`ThreadedRegistration`].
fn handle(&self, device: &Device<Bound>) -> IrqReturn;
}
impl<T: ?Sized + Handler + Send> Handler for Arc<T> {
fn handle(&self, device: &Device<Bound>) -> IrqReturn {
T::handle(self, device)
}
}
impl<T: ?Sized + Handler, A: Allocator> Handler for Box<T, A> {
fn handle(&self, device: &Device<Bound>) -> IrqReturn {
T::handle(self, device)
}
}
/// # Invariants
///
/// - `self.irq` is the same as the one passed to `request_{threaded}_irq`.
/// - `cookie` was passed to `request_{threaded}_irq` as the cookie. It is guaranteed to be unique
/// by the type system, since each call to `new` will return a different instance of
/// `Registration`.
#[pin_data(PinnedDrop)]
struct RegistrationInner {
irq: u32,
cookie: *mut c_void,
}
impl RegistrationInner {
fn synchronize(&self) {
// SAFETY: safe as per the invariants of `RegistrationInner`
unsafe { bindings::synchronize_irq(self.irq) };
}
}
#[pinned_drop]
impl PinnedDrop for RegistrationInner {
fn drop(self: Pin<&mut Self>) {
// SAFETY:
//
// Safe as per the invariants of `RegistrationInner` and:
//
// - The containing struct is `!Unpin` and was initialized using
// pin-init, so it occupied the same memory location for the entirety of
// its lifetime.
//
// Notice that this will block until all handlers finish executing,
// i.e.: at no point will &self be invalid while the handler is running.
unsafe { bindings::free_irq(self.irq, self.cookie) };
}
}
// SAFETY: We only use `inner` on drop, which called at most once with no
// concurrent access.
unsafe impl Sync for RegistrationInner {}
// SAFETY: It is safe to send `RegistrationInner` across threads.
unsafe impl Send for RegistrationInner {}
/// A request for an IRQ line for a given device.
///
/// # Invariants
///
/// - `ìrq` is the number of an interrupt source of `dev`.
/// - `irq` has not been registered yet.
pub struct IrqRequest<'a> {
dev: &'a Device<Bound>,
irq: u32,
}
impl<'a> IrqRequest<'a> {
/// Creates a new IRQ request for the given device and IRQ number.
///
/// # Safety
///
/// - `irq` should be a valid IRQ number for `dev`.
pub(crate) unsafe fn new(dev: &'a Device<Bound>, irq: u32) -> Self {
// INVARIANT: `irq` is a valid IRQ number for `dev`.
IrqRequest { dev, irq }
}
/// Returns the IRQ number of an [`IrqRequest`].
pub fn irq(&self) -> u32 {
self.irq
}
}
/// A registration of an IRQ handler for a given IRQ line.
///
/// # Examples
///
/// The following is an example of using `Registration`. It uses a
/// [`Completion`] to coordinate between the IRQ
/// handler and process context. [`Completion`] uses interior mutability, so the
/// handler can signal with [`Completion::complete_all()`] and the process
/// context can wait with [`Completion::wait_for_completion()`] even though
/// there is no way to get a mutable reference to the any of the fields in
/// `Data`.
///
/// [`Completion`]: kernel::sync::Completion
/// [`Completion::complete_all()`]: kernel::sync::Completion::complete_all
/// [`Completion::wait_for_completion()`]: kernel::sync::Completion::wait_for_completion
///
/// ```
/// use kernel::c_str;
/// use kernel::device::{Bound, Device};
/// use kernel::irq::{self, Flags, IrqRequest, IrqReturn, Registration};
/// use kernel::prelude::*;
/// use kernel::sync::{Arc, Completion};
///
/// // Data shared between process and IRQ context.
/// #[pin_data]
/// struct Data {
/// #[pin]
/// completion: Completion,
/// }
///
/// impl irq::Handler for Data {
/// // Executed in IRQ context.
/// fn handle(&self, _dev: &Device<Bound>) -> IrqReturn {
/// self.completion.complete_all();
/// IrqReturn::Handled
/// }
/// }
///
/// // Registers an IRQ handler for the given IrqRequest.
/// //
/// // This runs in process context and assumes `request` was previously acquired from a device.
/// fn register_irq(
/// handler: impl PinInit<Data, Error>,
/// request: IrqRequest<'_>,
/// ) -> Result<Arc<Registration<Data>>> {
/// let registration = Registration::new(request, Flags::SHARED, c_str!("my_device"), handler);
///
/// let registration = Arc::pin_init(registration, GFP_KERNEL)?;
///
/// registration.handler().completion.wait_for_completion();
///
/// Ok(registration)
/// }
/// # Ok::<(), Error>(())
/// ```
///
/// # Invariants
///
/// * We own an irq handler whose cookie is a pointer to `Self`.
#[pin_data]
pub struct Registration<T: Handler + 'static> {
#[pin]
inner: Devres<RegistrationInner>,
#[pin]
handler: T,
/// Pinned because we need address stability so that we can pass a pointer
/// to the callback.
#[pin]
_pin: PhantomPinned,
}
impl<T: Handler + 'static> Registration<T> {
/// Registers the IRQ handler with the system for the given IRQ number.
pub fn new<'a>(
request: IrqRequest<'a>,
flags: Flags,
name: &'static CStr,
handler: impl PinInit<T, Error> + 'a,
) -> impl PinInit<Self, Error> + 'a {
try_pin_init!(&this in Self {
handler <- handler,
inner <- Devres::new(
request.dev,
try_pin_init!(RegistrationInner {
// INVARIANT: `this` is a valid pointer to the `Registration` instance
cookie: this.as_ptr().cast::<c_void>(),
irq: {
// SAFETY:
// - The callbacks are valid for use with request_irq.
// - If this succeeds, the slot is guaranteed to be valid until the
// destructor of Self runs, which will deregister the callbacks
// before the memory location becomes invalid.
// - When request_irq is called, everything that handle_irq_callback will
// touch has already been initialized, so it's safe for the callback to
// be called immediately.
to_result(unsafe {
bindings::request_irq(
request.irq,
Some(handle_irq_callback::<T>),
flags.into_inner(),
name.as_char_ptr(),
this.as_ptr().cast::<c_void>(),
)
})?;
request.irq
}
})
),
_pin: PhantomPinned,
})
}
/// Returns a reference to the handler that was registered with the system.
pub fn handler(&self) -> &T {
&self.handler
}
/// Wait for pending IRQ handlers on other CPUs.
///
/// This will attempt to access the inner [`Devres`] container.
pub fn try_synchronize(&self) -> Result {
let inner = self.inner.try_access().ok_or(ENODEV)?;
inner.synchronize();
Ok(())
}
/// Wait for pending IRQ handlers on other CPUs.
pub fn synchronize(&self, dev: &Device<Bound>) -> Result {
let inner = self.inner.access(dev)?;
inner.synchronize();
Ok(())
}
}
/// # Safety
///
/// This function should be only used as the callback in `request_irq`.
unsafe extern "C" fn handle_irq_callback<T: Handler>(_irq: i32, ptr: *mut c_void) -> c_uint {
// SAFETY: `ptr` is a pointer to `Registration<T>` set in `Registration::new`
let registration = unsafe { &*(ptr as *const Registration<T>) };
// SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq
// callback is running implies that the device has not yet been unbound.
let device = unsafe { registration.inner.device().as_bound() };
T::handle(&registration.handler, device) as c_uint
}
/// The value that can be returned from [`ThreadedHandler::handle`].
#[repr(u32)]
pub enum ThreadedIrqReturn {
/// The interrupt was not from this device or was not handled.
None = bindings::irqreturn_IRQ_NONE,
/// The interrupt was handled by this device.
Handled = bindings::irqreturn_IRQ_HANDLED,
/// The handler wants the handler thread to wake up.
WakeThread = bindings::irqreturn_IRQ_WAKE_THREAD,
}
/// Callbacks for a threaded IRQ handler.
pub trait ThreadedHandler: Sync {
/// The hard IRQ handler.
///
/// This is executed in interrupt context, hence all corresponding
/// limitations do apply. All work that does not necessarily need to be
/// executed from interrupt context, should be deferred to the threaded
/// handler, i.e. [`ThreadedHandler::handle_threaded`].
///
/// The default implementation returns [`ThreadedIrqReturn::WakeThread`].
#[expect(unused_variables)]
fn handle(&self, device: &Device<Bound>) -> ThreadedIrqReturn {
ThreadedIrqReturn::WakeThread
}
/// The threaded IRQ handler.
///
/// This is executed in process context. The kernel creates a dedicated
/// `kthread` for this purpose.
fn handle_threaded(&self, device: &Device<Bound>) -> IrqReturn;
}
impl<T: ?Sized + ThreadedHandler + Send> ThreadedHandler for Arc<T> {
fn handle(&self, device: &Device<Bound>) -> ThreadedIrqReturn {
T::handle(self, device)
}
fn handle_threaded(&self, device: &Device<Bound>) -> IrqReturn {
T::handle_threaded(self, device)
}
}
impl<T: ?Sized + ThreadedHandler, A: Allocator> ThreadedHandler for Box<T, A> {
fn handle(&self, device: &Device<Bound>) -> ThreadedIrqReturn {
T::handle(self, device)
}
fn handle_threaded(&self, device: &Device<Bound>) -> IrqReturn {
T::handle_threaded(self, device)
}
}
/// A registration of a threaded IRQ handler for a given IRQ line.
///
/// Two callbacks are required: one to handle the IRQ, and one to handle any
/// other work in a separate thread.
///
/// The thread handler is only called if the IRQ handler returns
/// [`ThreadedIrqReturn::WakeThread`].
///
/// # Examples
///
/// The following is an example of using [`ThreadedRegistration`]. It uses a
/// [`Mutex`](kernel::sync::Mutex) to provide interior mutability.
///
/// ```
/// use kernel::c_str;
/// use kernel::device::{Bound, Device};
/// use kernel::irq::{
/// self, Flags, IrqRequest, IrqReturn, ThreadedHandler, ThreadedIrqReturn,
/// ThreadedRegistration,
/// };
/// use kernel::prelude::*;
/// use kernel::sync::{Arc, Mutex};
///
/// // Declare a struct that will be passed in when the interrupt fires. The u32
/// // merely serves as an example of some internal data.
/// //
/// // [`irq::ThreadedHandler::handle`] takes `&self`. This example
/// // illustrates how interior mutability can be used when sharing the data
/// // between process context and IRQ context.
/// #[pin_data]
/// struct Data {
/// #[pin]
/// value: Mutex<u32>,
/// }
///
/// impl ThreadedHandler for Data {
/// // This will run (in a separate kthread) if and only if
/// // [`ThreadedHandler::handle`] returns [`WakeThread`], which it does by
/// // default.
/// fn handle_threaded(&self, _dev: &Device<Bound>) -> IrqReturn {
/// let mut data = self.value.lock();
/// *data += 1;
/// IrqReturn::Handled
/// }
/// }
///
/// // Registers a threaded IRQ handler for the given [`IrqRequest`].
/// //
/// // This is executing in process context and assumes that `request` was
/// // previously acquired from a device.
/// fn register_threaded_irq(
/// handler: impl PinInit<Data, Error>,
/// request: IrqRequest<'_>,
/// ) -> Result<Arc<ThreadedRegistration<Data>>> {
/// let registration =
/// ThreadedRegistration::new(request, Flags::SHARED, c_str!("my_device"), handler);
///
/// let registration = Arc::pin_init(registration, GFP_KERNEL)?;
///
/// {
/// // The data can be accessed from process context too.
/// let mut data = registration.handler().value.lock();
/// *data += 1;
/// }
///
/// Ok(registration)
/// }
/// # Ok::<(), Error>(())
/// ```
///
/// # Invariants
///
/// * We own an irq handler whose cookie is a pointer to `Self`.
#[pin_data]
pub struct ThreadedRegistration<T: ThreadedHandler + 'static> {
#[pin]
inner: Devres<RegistrationInner>,
#[pin]
handler: T,
/// Pinned because we need address stability so that we can pass a pointer
/// to the callback.
#[pin]
_pin: PhantomPinned,
}
impl<T: ThreadedHandler + 'static> ThreadedRegistration<T> {
/// Registers the IRQ handler with the system for the given IRQ number.
pub fn new<'a>(
request: IrqRequest<'a>,
flags: Flags,
name: &'static CStr,
handler: impl PinInit<T, Error> + 'a,
) -> impl PinInit<Self, Error> + 'a {
try_pin_init!(&this in Self {
handler <- handler,
inner <- Devres::new(
request.dev,
try_pin_init!(RegistrationInner {
// INVARIANT: `this` is a valid pointer to the `ThreadedRegistration` instance.
cookie: this.as_ptr().cast::<c_void>(),
irq: {
// SAFETY:
// - The callbacks are valid for use with request_threaded_irq.
// - If this succeeds, the slot is guaranteed to be valid until the
// destructor of Self runs, which will deregister the callbacks
// before the memory location becomes invalid.
// - When request_threaded_irq is called, everything that the two callbacks
// will touch has already been initialized, so it's safe for the
// callbacks to be called immediately.
to_result(unsafe {
bindings::request_threaded_irq(
request.irq,
Some(handle_threaded_irq_callback::<T>),
Some(thread_fn_callback::<T>),
flags.into_inner(),
name.as_char_ptr(),
this.as_ptr().cast::<c_void>(),
)
})?;
request.irq
}
})
),
_pin: PhantomPinned,
})
}
/// Returns a reference to the handler that was registered with the system.
pub fn handler(&self) -> &T {
&self.handler
}
/// Wait for pending IRQ handlers on other CPUs.
///
/// This will attempt to access the inner [`Devres`] container.
pub fn try_synchronize(&self) -> Result {
let inner = self.inner.try_access().ok_or(ENODEV)?;
inner.synchronize();
Ok(())
}
/// Wait for pending IRQ handlers on other CPUs.
pub fn synchronize(&self, dev: &Device<Bound>) -> Result {
let inner = self.inner.access(dev)?;
inner.synchronize();
Ok(())
}
}
/// # Safety
///
/// This function should be only used as the callback in `request_threaded_irq`.
unsafe extern "C" fn handle_threaded_irq_callback<T: ThreadedHandler>(
_irq: i32,
ptr: *mut c_void,
) -> c_uint {
// SAFETY: `ptr` is a pointer to `ThreadedRegistration<T>` set in `ThreadedRegistration::new`
let registration = unsafe { &*(ptr as *const ThreadedRegistration<T>) };
// SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq
// callback is running implies that the device has not yet been unbound.
let device = unsafe { registration.inner.device().as_bound() };
T::handle(&registration.handler, device) as c_uint
}
/// # Safety
///
/// This function should be only used as the callback in `request_threaded_irq`.
unsafe extern "C" fn thread_fn_callback<T: ThreadedHandler>(_irq: i32, ptr: *mut c_void) -> c_uint {
// SAFETY: `ptr` is a pointer to `ThreadedRegistration<T>` set in `ThreadedRegistration::new`
let registration = unsafe { &*(ptr as *const ThreadedRegistration<T>) };
// SAFETY: The irq callback is removed before the device is unbound, so the fact that the irq
// callback is running implies that the device has not yet been unbound.
let device = unsafe { registration.inner.device().as_bound() };
T::handle_threaded(&registration.handler, device) as c_uint
}

View File

@ -78,6 +78,7 @@
pub mod cpufreq;
pub mod cpumask;
pub mod cred;
pub mod debugfs;
pub mod device;
pub mod device_id;
pub mod devres;
@ -94,6 +95,7 @@
pub mod init;
pub mod io;
pub mod ioctl;
pub mod irq;
pub mod jump_label;
#[cfg(CONFIG_KUNIT)]
pub mod kunit;
@ -112,6 +114,7 @@
pub mod platform;
pub mod prelude;
pub mod print;
pub mod processor;
pub mod ptr;
pub mod rbtree;
pub mod regulator;

View File

@ -10,10 +10,11 @@
devres::Devres,
driver,
error::{from_result, to_result, Result},
io::Io,
io::IoRaw,
io::{Io, IoRaw},
irq::{self, IrqRequest},
str::CStr,
types::{ARef, Opaque},
sync::aref::ARef,
types::Opaque,
ThisModule,
};
use core::{
@ -23,6 +24,10 @@
};
use kernel::prelude::*;
mod id;
pub use self::id::{Class, ClassMask, Vendor};
/// An adapter for the registration of PCI drivers.
pub struct Adapter<T: Driver>(T);
@ -60,7 +65,7 @@ impl<T: Driver + 'static> Adapter<T> {
extern "C" fn probe_callback(
pdev: *mut bindings::pci_dev,
id: *const bindings::pci_device_id,
) -> kernel::ffi::c_int {
) -> c_int {
// SAFETY: The PCI bus only ever calls the probe callback with a valid pointer to a
// `struct pci_dev`.
//
@ -128,10 +133,11 @@ impl DeviceId {
/// Equivalent to C's `PCI_DEVICE` macro.
///
/// Create a new `pci::DeviceId` from a vendor and device ID number.
pub const fn from_id(vendor: u32, device: u32) -> Self {
/// Create a new `pci::DeviceId` from a vendor and device ID.
#[inline]
pub const fn from_id(vendor: Vendor, device: u32) -> Self {
Self(bindings::pci_device_id {
vendor,
vendor: vendor.as_raw() as u32,
device,
subvendor: DeviceId::PCI_ANY_ID,
subdevice: DeviceId::PCI_ANY_ID,
@ -145,6 +151,7 @@ pub const fn from_id(vendor: u32, device: u32) -> Self {
/// Equivalent to C's `PCI_DEVICE_CLASS` macro.
///
/// Create a new `pci::DeviceId` from a class number and mask.
#[inline]
pub const fn from_class(class: u32, class_mask: u32) -> Self {
Self(bindings::pci_device_id {
vendor: DeviceId::PCI_ANY_ID,
@ -157,6 +164,29 @@ pub const fn from_class(class: u32, class_mask: u32) -> Self {
override_only: 0,
})
}
/// Create a new [`DeviceId`] from a class number, mask, and specific vendor.
///
/// This is more targeted than [`DeviceId::from_class`]: in addition to matching by [`Vendor`],
/// it also matches the PCI [`Class`] (up to the entire 24 bits, depending on the
/// [`ClassMask`]).
#[inline]
pub const fn from_class_and_vendor(
class: Class,
class_mask: ClassMask,
vendor: Vendor,
) -> Self {
Self(bindings::pci_device_id {
vendor: vendor.as_raw() as u32,
device: DeviceId::PCI_ANY_ID,
subvendor: DeviceId::PCI_ANY_ID,
subdevice: DeviceId::PCI_ANY_ID,
class: class.as_raw(),
class_mask: class_mask.as_raw(),
driver_data: 0,
override_only: 0,
})
}
}
// SAFETY: `DeviceId` is a `#[repr(transparent)]` wrapper of `pci_device_id` and does not add
@ -206,7 +236,7 @@ macro_rules! pci_device_table {
/// <MyDriver as pci::Driver>::IdInfo,
/// [
/// (
/// pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, bindings::PCI_ANY_ID as u32),
/// pci::DeviceId::from_id(pci::Vendor::REDHAT, bindings::PCI_ANY_ID as u32),
/// (),
/// )
/// ]
@ -240,11 +270,11 @@ pub trait Driver: Send {
/// PCI driver probe.
///
/// Called when a new platform device is added or discovered.
/// Implementers should attempt to initialize the device here.
/// Called when a new pci device is added or discovered. Implementers should
/// attempt to initialize the device here.
fn probe(dev: &Device<device::Core>, id_info: &Self::IdInfo) -> Result<Pin<KBox<Self>>>;
/// Platform driver unbind.
/// PCI driver unbind.
///
/// Called when a [`Device`] is unbound from its bound [`Driver`]. Implementing this callback
/// is optional.
@ -347,7 +377,7 @@ unsafe fn do_release(pdev: &Device, ioptr: usize, num: i32) {
// `ioptr` is valid by the safety requirements.
// `num` is valid by the safety requirements.
unsafe {
bindings::pci_iounmap(pdev.as_raw(), ioptr as *mut kernel::ffi::c_void);
bindings::pci_iounmap(pdev.as_raw(), ioptr as *mut c_void);
bindings::pci_release_region(pdev.as_raw(), num);
}
}
@ -359,6 +389,7 @@ fn release(&self) {
}
impl Bar {
#[inline]
fn index_is_valid(index: u32) -> bool {
// A `struct pci_dev` owns an array of resources with at most `PCI_NUM_RESOURCES` entries.
index < bindings::PCI_NUM_RESOURCES
@ -381,24 +412,90 @@ fn deref(&self) -> &Self::Target {
}
impl<Ctx: device::DeviceContext> Device<Ctx> {
#[inline]
fn as_raw(&self) -> *mut bindings::pci_dev {
self.0.get()
}
}
impl Device {
/// Returns the PCI vendor ID.
pub fn vendor_id(&self) -> u16 {
/// Returns the PCI vendor ID as [`Vendor`].
///
/// # Examples
///
/// ```
/// # use kernel::{device::Core, pci::{self, Vendor}, prelude::*};
/// fn log_device_info(pdev: &pci::Device<Core>) -> Result {
/// // Get an instance of `Vendor`.
/// let vendor = pdev.vendor_id();
/// dev_info!(
/// pdev.as_ref(),
/// "Device: Vendor={}, Device=0x{:x}\n",
/// vendor,
/// pdev.device_id()
/// );
/// Ok(())
/// }
/// ```
#[inline]
pub fn vendor_id(&self) -> Vendor {
// SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
unsafe { (*self.as_raw()).vendor }
let vendor_id = unsafe { (*self.as_raw()).vendor };
Vendor::from_raw(vendor_id)
}
/// Returns the PCI device ID.
#[inline]
pub fn device_id(&self) -> u16 {
// SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
// SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
// `struct pci_dev`.
unsafe { (*self.as_raw()).device }
}
/// Returns the PCI revision ID.
#[inline]
pub fn revision_id(&self) -> u8 {
// SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
// `struct pci_dev`.
unsafe { (*self.as_raw()).revision }
}
/// Returns the PCI bus device/function.
#[inline]
pub fn dev_id(&self) -> u16 {
// SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
// `struct pci_dev`.
unsafe { bindings::pci_dev_id(self.as_raw()) }
}
/// Returns the PCI subsystem vendor ID.
#[inline]
pub fn subsystem_vendor_id(&self) -> u16 {
// SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
// `struct pci_dev`.
unsafe { (*self.as_raw()).subsystem_vendor }
}
/// Returns the PCI subsystem device ID.
#[inline]
pub fn subsystem_device_id(&self) -> u16 {
// SAFETY: By its type invariant `self.as_raw` is always a valid pointer to a
// `struct pci_dev`.
unsafe { (*self.as_raw()).subsystem_device }
}
/// Returns the start of the given PCI bar resource.
pub fn resource_start(&self, bar: u32) -> Result<bindings::resource_size_t> {
if !Bar::index_is_valid(bar) {
return Err(EINVAL);
}
// SAFETY:
// - `bar` is a valid bar number, as guaranteed by the above call to `Bar::index_is_valid`,
// - by its type invariant `self.as_raw` is always a valid pointer to a `struct pci_dev`.
Ok(unsafe { bindings::pci_resource_start(self.as_raw(), bar.try_into()?) })
}
/// Returns the size of the given PCI bar resource.
pub fn resource_len(&self, bar: u32) -> Result<bindings::resource_size_t> {
if !Bar::index_is_valid(bar) {
@ -410,6 +507,13 @@ pub fn resource_len(&self, bar: u32) -> Result<bindings::resource_size_t> {
// - by its type invariant `self.as_raw` is always a valid pointer to a `struct pci_dev`.
Ok(unsafe { bindings::pci_resource_len(self.as_raw(), bar.try_into()?) })
}
/// Returns the PCI class as a `Class` struct.
#[inline]
pub fn pci_class(&self) -> Class {
// SAFETY: `self.as_raw` is a valid pointer to a `struct pci_dev`.
Class::from_raw(unsafe { (*self.as_raw()).class })
}
}
impl Device<device::Bound> {
@ -431,6 +535,47 @@ pub fn iomap_region<'a>(
) -> impl PinInit<Devres<Bar>, Error> + 'a {
self.iomap_region_sized::<0>(bar, name)
}
/// Returns an [`IrqRequest`] for the IRQ vector at the given index, if any.
pub fn irq_vector(&self, index: u32) -> Result<IrqRequest<'_>> {
// SAFETY: `self.as_raw` returns a valid pointer to a `struct pci_dev`.
let irq = unsafe { crate::bindings::pci_irq_vector(self.as_raw(), index) };
if irq < 0 {
return Err(crate::error::Error::from_errno(irq));
}
// SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
}
/// Returns a [`kernel::irq::Registration`] for the IRQ vector at the given
/// index.
pub fn request_irq<'a, T: crate::irq::Handler + 'static>(
&'a self,
index: u32,
flags: irq::Flags,
name: &'static CStr,
handler: impl PinInit<T, Error> + 'a,
) -> Result<impl PinInit<irq::Registration<T>, Error> + 'a> {
let request = self.irq_vector(index)?;
Ok(irq::Registration::<T>::new(request, flags, name, handler))
}
/// Returns a [`kernel::irq::ThreadedRegistration`] for the IRQ vector at
/// the given index.
pub fn request_threaded_irq<'a, T: crate::irq::ThreadedHandler + 'static>(
&'a self,
index: u32,
flags: irq::Flags,
name: &'static CStr,
handler: impl PinInit<T, Error> + 'a,
) -> Result<impl PinInit<irq::ThreadedRegistration<T>, Error> + 'a> {
let request = self.irq_vector(index)?;
Ok(irq::ThreadedRegistration::<T>::new(
request, flags, name, handler,
))
}
}
impl Device<device::Core> {
@ -441,6 +586,7 @@ pub fn enable_device_mem(&self) -> Result {
}
/// Enable bus-mastering for this device.
#[inline]
pub fn set_master(&self) {
// SAFETY: `self.as_raw` is guaranteed to be a pointer to a valid `struct pci_dev`.
unsafe { bindings::pci_set_master(self.as_raw()) };
@ -455,7 +601,7 @@ pub fn set_master(&self) {
impl crate::dma::Device for Device<device::Core> {}
// SAFETY: Instances of `Device` are always reference-counted.
unsafe impl crate::types::AlwaysRefCounted for Device {
unsafe impl crate::sync::aref::AlwaysRefCounted for Device {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
unsafe { bindings::pci_dev_get(self.as_raw()) };

578
rust/kernel/pci/id.rs Normal file
View File

@ -0,0 +1,578 @@
// SPDX-License-Identifier: GPL-2.0
//! PCI device identifiers and related types.
//!
//! This module contains PCI class codes, Vendor IDs, and supporting types.
use crate::{bindings, error::code::EINVAL, error::Error, prelude::*};
use core::fmt;
/// PCI device class codes.
///
/// Each entry contains the full 24-bit PCI class code (base class in bits
/// 23-16, subclass in bits 15-8, programming interface in bits 7-0).
///
/// # Examples
///
/// ```
/// # use kernel::{device::Core, pci::{self, Class}, prelude::*};
/// fn probe_device(pdev: &pci::Device<Core>) -> Result {
/// let pci_class = pdev.pci_class();
/// dev_info!(
/// pdev.as_ref(),
/// "Detected PCI class: {}\n",
/// pci_class
/// );
/// Ok(())
/// }
/// ```
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(transparent)]
pub struct Class(u32);
/// PCI class mask constants for matching [`Class`] codes.
#[repr(u32)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ClassMask {
/// Match the full 24-bit class code.
Full = 0xffffff,
/// Match the upper 16 bits of the class code (base class and subclass only)
ClassSubclass = 0xffff00,
}
macro_rules! define_all_pci_classes {
(
$($variant:ident = $binding:expr,)+
) => {
impl Class {
$(
#[allow(missing_docs)]
pub const $variant: Self = Self(Self::to_24bit_class($binding));
)+
}
impl fmt::Display for Class {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
$(
&Self::$variant => write!(f, stringify!($variant)),
)+
_ => <Self as fmt::Debug>::fmt(self, f),
}
}
}
};
}
/// Once constructed, a [`Class`] contains a valid PCI class code.
impl Class {
/// Create a [`Class`] from a raw 24-bit class code.
#[inline]
pub(super) fn from_raw(class_code: u32) -> Self {
Self(class_code)
}
/// Get the raw 24-bit class code value.
#[inline]
pub const fn as_raw(self) -> u32 {
self.0
}
// Converts a PCI class constant to 24-bit format.
//
// Many device drivers use only the upper 16 bits (base class and subclass),
// but some use the full 24 bits. In order to support both cases, store the
// class code as a 24-bit value, where 16-bit values are shifted up 8 bits.
const fn to_24bit_class(val: u32) -> u32 {
if val > 0xFFFF {
val
} else {
val << 8
}
}
}
impl fmt::Debug for Class {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0x{:06x}", self.0)
}
}
impl ClassMask {
/// Get the raw mask value.
#[inline]
pub const fn as_raw(self) -> u32 {
self as u32
}
}
impl TryFrom<u32> for ClassMask {
type Error = Error;
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
0xffffff => Ok(ClassMask::Full),
0xffff00 => Ok(ClassMask::ClassSubclass),
_ => Err(EINVAL),
}
}
}
/// PCI vendor IDs.
///
/// Each entry contains the 16-bit PCI vendor ID as assigned by the PCI SIG.
#[derive(Clone, Copy, PartialEq, Eq)]
#[repr(transparent)]
pub struct Vendor(u16);
macro_rules! define_all_pci_vendors {
(
$($variant:ident = $binding:expr,)+
) => {
impl Vendor {
$(
#[allow(missing_docs)]
pub const $variant: Self = Self($binding as u16);
)+
}
impl fmt::Display for Vendor {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
$(
&Self::$variant => write!(f, stringify!($variant)),
)+
_ => <Self as fmt::Debug>::fmt(self, f),
}
}
}
};
}
/// Once constructed, a `Vendor` contains a valid PCI Vendor ID.
impl Vendor {
/// Create a Vendor from a raw 16-bit vendor ID.
#[inline]
pub(super) fn from_raw(vendor_id: u16) -> Self {
Self(vendor_id)
}
/// Get the raw 16-bit vendor ID value.
#[inline]
pub const fn as_raw(self) -> u16 {
self.0
}
}
impl fmt::Debug for Vendor {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0x{:04x}", self.0)
}
}
define_all_pci_classes! {
NOT_DEFINED = bindings::PCI_CLASS_NOT_DEFINED, // 0x000000
NOT_DEFINED_VGA = bindings::PCI_CLASS_NOT_DEFINED_VGA, // 0x000100
STORAGE_SCSI = bindings::PCI_CLASS_STORAGE_SCSI, // 0x010000
STORAGE_IDE = bindings::PCI_CLASS_STORAGE_IDE, // 0x010100
STORAGE_FLOPPY = bindings::PCI_CLASS_STORAGE_FLOPPY, // 0x010200
STORAGE_IPI = bindings::PCI_CLASS_STORAGE_IPI, // 0x010300
STORAGE_RAID = bindings::PCI_CLASS_STORAGE_RAID, // 0x010400
STORAGE_SATA = bindings::PCI_CLASS_STORAGE_SATA, // 0x010600
STORAGE_SATA_AHCI = bindings::PCI_CLASS_STORAGE_SATA_AHCI, // 0x010601
STORAGE_SAS = bindings::PCI_CLASS_STORAGE_SAS, // 0x010700
STORAGE_EXPRESS = bindings::PCI_CLASS_STORAGE_EXPRESS, // 0x010802
STORAGE_OTHER = bindings::PCI_CLASS_STORAGE_OTHER, // 0x018000
NETWORK_ETHERNET = bindings::PCI_CLASS_NETWORK_ETHERNET, // 0x020000
NETWORK_TOKEN_RING = bindings::PCI_CLASS_NETWORK_TOKEN_RING, // 0x020100
NETWORK_FDDI = bindings::PCI_CLASS_NETWORK_FDDI, // 0x020200
NETWORK_ATM = bindings::PCI_CLASS_NETWORK_ATM, // 0x020300
NETWORK_OTHER = bindings::PCI_CLASS_NETWORK_OTHER, // 0x028000
DISPLAY_VGA = bindings::PCI_CLASS_DISPLAY_VGA, // 0x030000
DISPLAY_XGA = bindings::PCI_CLASS_DISPLAY_XGA, // 0x030100
DISPLAY_3D = bindings::PCI_CLASS_DISPLAY_3D, // 0x030200
DISPLAY_OTHER = bindings::PCI_CLASS_DISPLAY_OTHER, // 0x038000
MULTIMEDIA_VIDEO = bindings::PCI_CLASS_MULTIMEDIA_VIDEO, // 0x040000
MULTIMEDIA_AUDIO = bindings::PCI_CLASS_MULTIMEDIA_AUDIO, // 0x040100
MULTIMEDIA_PHONE = bindings::PCI_CLASS_MULTIMEDIA_PHONE, // 0x040200
MULTIMEDIA_HD_AUDIO = bindings::PCI_CLASS_MULTIMEDIA_HD_AUDIO, // 0x040300
MULTIMEDIA_OTHER = bindings::PCI_CLASS_MULTIMEDIA_OTHER, // 0x048000
MEMORY_RAM = bindings::PCI_CLASS_MEMORY_RAM, // 0x050000
MEMORY_FLASH = bindings::PCI_CLASS_MEMORY_FLASH, // 0x050100
MEMORY_CXL = bindings::PCI_CLASS_MEMORY_CXL, // 0x050200
MEMORY_OTHER = bindings::PCI_CLASS_MEMORY_OTHER, // 0x058000
BRIDGE_HOST = bindings::PCI_CLASS_BRIDGE_HOST, // 0x060000
BRIDGE_ISA = bindings::PCI_CLASS_BRIDGE_ISA, // 0x060100
BRIDGE_EISA = bindings::PCI_CLASS_BRIDGE_EISA, // 0x060200
BRIDGE_MC = bindings::PCI_CLASS_BRIDGE_MC, // 0x060300
BRIDGE_PCI_NORMAL = bindings::PCI_CLASS_BRIDGE_PCI_NORMAL, // 0x060400
BRIDGE_PCI_SUBTRACTIVE = bindings::PCI_CLASS_BRIDGE_PCI_SUBTRACTIVE, // 0x060401
BRIDGE_PCMCIA = bindings::PCI_CLASS_BRIDGE_PCMCIA, // 0x060500
BRIDGE_NUBUS = bindings::PCI_CLASS_BRIDGE_NUBUS, // 0x060600
BRIDGE_CARDBUS = bindings::PCI_CLASS_BRIDGE_CARDBUS, // 0x060700
BRIDGE_RACEWAY = bindings::PCI_CLASS_BRIDGE_RACEWAY, // 0x060800
BRIDGE_OTHER = bindings::PCI_CLASS_BRIDGE_OTHER, // 0x068000
COMMUNICATION_SERIAL = bindings::PCI_CLASS_COMMUNICATION_SERIAL, // 0x070000
COMMUNICATION_PARALLEL = bindings::PCI_CLASS_COMMUNICATION_PARALLEL, // 0x070100
COMMUNICATION_MULTISERIAL = bindings::PCI_CLASS_COMMUNICATION_MULTISERIAL, // 0x070200
COMMUNICATION_MODEM = bindings::PCI_CLASS_COMMUNICATION_MODEM, // 0x070300
COMMUNICATION_OTHER = bindings::PCI_CLASS_COMMUNICATION_OTHER, // 0x078000
SYSTEM_PIC = bindings::PCI_CLASS_SYSTEM_PIC, // 0x080000
SYSTEM_PIC_IOAPIC = bindings::PCI_CLASS_SYSTEM_PIC_IOAPIC, // 0x080010
SYSTEM_PIC_IOXAPIC = bindings::PCI_CLASS_SYSTEM_PIC_IOXAPIC, // 0x080020
SYSTEM_DMA = bindings::PCI_CLASS_SYSTEM_DMA, // 0x080100
SYSTEM_TIMER = bindings::PCI_CLASS_SYSTEM_TIMER, // 0x080200
SYSTEM_RTC = bindings::PCI_CLASS_SYSTEM_RTC, // 0x080300
SYSTEM_PCI_HOTPLUG = bindings::PCI_CLASS_SYSTEM_PCI_HOTPLUG, // 0x080400
SYSTEM_SDHCI = bindings::PCI_CLASS_SYSTEM_SDHCI, // 0x080500
SYSTEM_RCEC = bindings::PCI_CLASS_SYSTEM_RCEC, // 0x080700
SYSTEM_OTHER = bindings::PCI_CLASS_SYSTEM_OTHER, // 0x088000
INPUT_KEYBOARD = bindings::PCI_CLASS_INPUT_KEYBOARD, // 0x090000
INPUT_PEN = bindings::PCI_CLASS_INPUT_PEN, // 0x090100
INPUT_MOUSE = bindings::PCI_CLASS_INPUT_MOUSE, // 0x090200
INPUT_SCANNER = bindings::PCI_CLASS_INPUT_SCANNER, // 0x090300
INPUT_GAMEPORT = bindings::PCI_CLASS_INPUT_GAMEPORT, // 0x090400
INPUT_OTHER = bindings::PCI_CLASS_INPUT_OTHER, // 0x098000
DOCKING_GENERIC = bindings::PCI_CLASS_DOCKING_GENERIC, // 0x0a0000
DOCKING_OTHER = bindings::PCI_CLASS_DOCKING_OTHER, // 0x0a8000
PROCESSOR_386 = bindings::PCI_CLASS_PROCESSOR_386, // 0x0b0000
PROCESSOR_486 = bindings::PCI_CLASS_PROCESSOR_486, // 0x0b0100
PROCESSOR_PENTIUM = bindings::PCI_CLASS_PROCESSOR_PENTIUM, // 0x0b0200
PROCESSOR_ALPHA = bindings::PCI_CLASS_PROCESSOR_ALPHA, // 0x0b1000
PROCESSOR_POWERPC = bindings::PCI_CLASS_PROCESSOR_POWERPC, // 0x0b2000
PROCESSOR_MIPS = bindings::PCI_CLASS_PROCESSOR_MIPS, // 0x0b3000
PROCESSOR_CO = bindings::PCI_CLASS_PROCESSOR_CO, // 0x0b4000
SERIAL_FIREWIRE = bindings::PCI_CLASS_SERIAL_FIREWIRE, // 0x0c0000
SERIAL_FIREWIRE_OHCI = bindings::PCI_CLASS_SERIAL_FIREWIRE_OHCI, // 0x0c0010
SERIAL_ACCESS = bindings::PCI_CLASS_SERIAL_ACCESS, // 0x0c0100
SERIAL_SSA = bindings::PCI_CLASS_SERIAL_SSA, // 0x0c0200
SERIAL_USB_UHCI = bindings::PCI_CLASS_SERIAL_USB_UHCI, // 0x0c0300
SERIAL_USB_OHCI = bindings::PCI_CLASS_SERIAL_USB_OHCI, // 0x0c0310
SERIAL_USB_EHCI = bindings::PCI_CLASS_SERIAL_USB_EHCI, // 0x0c0320
SERIAL_USB_XHCI = bindings::PCI_CLASS_SERIAL_USB_XHCI, // 0x0c0330
SERIAL_USB_CDNS = bindings::PCI_CLASS_SERIAL_USB_CDNS, // 0x0c0380
SERIAL_USB_DEVICE = bindings::PCI_CLASS_SERIAL_USB_DEVICE, // 0x0c03fe
SERIAL_FIBER = bindings::PCI_CLASS_SERIAL_FIBER, // 0x0c0400
SERIAL_SMBUS = bindings::PCI_CLASS_SERIAL_SMBUS, // 0x0c0500
SERIAL_IPMI_SMIC = bindings::PCI_CLASS_SERIAL_IPMI_SMIC, // 0x0c0700
SERIAL_IPMI_KCS = bindings::PCI_CLASS_SERIAL_IPMI_KCS, // 0x0c0701
SERIAL_IPMI_BT = bindings::PCI_CLASS_SERIAL_IPMI_BT, // 0x0c0702
WIRELESS_RF_CONTROLLER = bindings::PCI_CLASS_WIRELESS_RF_CONTROLLER, // 0x0d1000
WIRELESS_WHCI = bindings::PCI_CLASS_WIRELESS_WHCI, // 0x0d1010
INTELLIGENT_I2O = bindings::PCI_CLASS_INTELLIGENT_I2O, // 0x0e0000
SATELLITE_TV = bindings::PCI_CLASS_SATELLITE_TV, // 0x0f0000
SATELLITE_AUDIO = bindings::PCI_CLASS_SATELLITE_AUDIO, // 0x0f0100
SATELLITE_VOICE = bindings::PCI_CLASS_SATELLITE_VOICE, // 0x0f0300
SATELLITE_DATA = bindings::PCI_CLASS_SATELLITE_DATA, // 0x0f0400
CRYPT_NETWORK = bindings::PCI_CLASS_CRYPT_NETWORK, // 0x100000
CRYPT_ENTERTAINMENT = bindings::PCI_CLASS_CRYPT_ENTERTAINMENT, // 0x100100
CRYPT_OTHER = bindings::PCI_CLASS_CRYPT_OTHER, // 0x108000
SP_DPIO = bindings::PCI_CLASS_SP_DPIO, // 0x110000
SP_OTHER = bindings::PCI_CLASS_SP_OTHER, // 0x118000
ACCELERATOR_PROCESSING = bindings::PCI_CLASS_ACCELERATOR_PROCESSING, // 0x120000
OTHERS = bindings::PCI_CLASS_OTHERS, // 0xff0000
}
define_all_pci_vendors! {
PCI_SIG = bindings::PCI_VENDOR_ID_PCI_SIG, // 0x0001
LOONGSON = bindings::PCI_VENDOR_ID_LOONGSON, // 0x0014
SOLIDIGM = bindings::PCI_VENDOR_ID_SOLIDIGM, // 0x025e
TTTECH = bindings::PCI_VENDOR_ID_TTTECH, // 0x0357
DYNALINK = bindings::PCI_VENDOR_ID_DYNALINK, // 0x0675
UBIQUITI = bindings::PCI_VENDOR_ID_UBIQUITI, // 0x0777
BERKOM = bindings::PCI_VENDOR_ID_BERKOM, // 0x0871
ITTIM = bindings::PCI_VENDOR_ID_ITTIM, // 0x0b48
COMPAQ = bindings::PCI_VENDOR_ID_COMPAQ, // 0x0e11
LSI_LOGIC = bindings::PCI_VENDOR_ID_LSI_LOGIC, // 0x1000
ATI = bindings::PCI_VENDOR_ID_ATI, // 0x1002
VLSI = bindings::PCI_VENDOR_ID_VLSI, // 0x1004
ADL = bindings::PCI_VENDOR_ID_ADL, // 0x1005
NS = bindings::PCI_VENDOR_ID_NS, // 0x100b
TSENG = bindings::PCI_VENDOR_ID_TSENG, // 0x100c
WEITEK = bindings::PCI_VENDOR_ID_WEITEK, // 0x100e
DEC = bindings::PCI_VENDOR_ID_DEC, // 0x1011
CIRRUS = bindings::PCI_VENDOR_ID_CIRRUS, // 0x1013
IBM = bindings::PCI_VENDOR_ID_IBM, // 0x1014
UNISYS = bindings::PCI_VENDOR_ID_UNISYS, // 0x1018
COMPEX2 = bindings::PCI_VENDOR_ID_COMPEX2, // 0x101a
WD = bindings::PCI_VENDOR_ID_WD, // 0x101c
AMI = bindings::PCI_VENDOR_ID_AMI, // 0x101e
AMD = bindings::PCI_VENDOR_ID_AMD, // 0x1022
TRIDENT = bindings::PCI_VENDOR_ID_TRIDENT, // 0x1023
AI = bindings::PCI_VENDOR_ID_AI, // 0x1025
DELL = bindings::PCI_VENDOR_ID_DELL, // 0x1028
MATROX = bindings::PCI_VENDOR_ID_MATROX, // 0x102B
MOBILITY_ELECTRONICS = bindings::PCI_VENDOR_ID_MOBILITY_ELECTRONICS, // 0x14f2
CT = bindings::PCI_VENDOR_ID_CT, // 0x102c
MIRO = bindings::PCI_VENDOR_ID_MIRO, // 0x1031
NEC = bindings::PCI_VENDOR_ID_NEC, // 0x1033
FD = bindings::PCI_VENDOR_ID_FD, // 0x1036
SI = bindings::PCI_VENDOR_ID_SI, // 0x1039
HP = bindings::PCI_VENDOR_ID_HP, // 0x103c
HP_3PAR = bindings::PCI_VENDOR_ID_HP_3PAR, // 0x1590
PCTECH = bindings::PCI_VENDOR_ID_PCTECH, // 0x1042
ASUSTEK = bindings::PCI_VENDOR_ID_ASUSTEK, // 0x1043
DPT = bindings::PCI_VENDOR_ID_DPT, // 0x1044
OPTI = bindings::PCI_VENDOR_ID_OPTI, // 0x1045
ELSA = bindings::PCI_VENDOR_ID_ELSA, // 0x1048
STMICRO = bindings::PCI_VENDOR_ID_STMICRO, // 0x104A
BUSLOGIC = bindings::PCI_VENDOR_ID_BUSLOGIC, // 0x104B
TI = bindings::PCI_VENDOR_ID_TI, // 0x104c
SONY = bindings::PCI_VENDOR_ID_SONY, // 0x104d
WINBOND2 = bindings::PCI_VENDOR_ID_WINBOND2, // 0x1050
ANIGMA = bindings::PCI_VENDOR_ID_ANIGMA, // 0x1051
EFAR = bindings::PCI_VENDOR_ID_EFAR, // 0x1055
MOTOROLA = bindings::PCI_VENDOR_ID_MOTOROLA, // 0x1057
PROMISE = bindings::PCI_VENDOR_ID_PROMISE, // 0x105a
FOXCONN = bindings::PCI_VENDOR_ID_FOXCONN, // 0x105b
UMC = bindings::PCI_VENDOR_ID_UMC, // 0x1060
PICOPOWER = bindings::PCI_VENDOR_ID_PICOPOWER, // 0x1066
MYLEX = bindings::PCI_VENDOR_ID_MYLEX, // 0x1069
APPLE = bindings::PCI_VENDOR_ID_APPLE, // 0x106b
YAMAHA = bindings::PCI_VENDOR_ID_YAMAHA, // 0x1073
QLOGIC = bindings::PCI_VENDOR_ID_QLOGIC, // 0x1077
CYRIX = bindings::PCI_VENDOR_ID_CYRIX, // 0x1078
CONTAQ = bindings::PCI_VENDOR_ID_CONTAQ, // 0x1080
OLICOM = bindings::PCI_VENDOR_ID_OLICOM, // 0x108d
SUN = bindings::PCI_VENDOR_ID_SUN, // 0x108e
NI = bindings::PCI_VENDOR_ID_NI, // 0x1093
CMD = bindings::PCI_VENDOR_ID_CMD, // 0x1095
BROOKTREE = bindings::PCI_VENDOR_ID_BROOKTREE, // 0x109e
SGI = bindings::PCI_VENDOR_ID_SGI, // 0x10a9
WINBOND = bindings::PCI_VENDOR_ID_WINBOND, // 0x10ad
PLX = bindings::PCI_VENDOR_ID_PLX, // 0x10b5
MADGE = bindings::PCI_VENDOR_ID_MADGE, // 0x10b6
THREECOM = bindings::PCI_VENDOR_ID_3COM, // 0x10b7
AL = bindings::PCI_VENDOR_ID_AL, // 0x10b9
NEOMAGIC = bindings::PCI_VENDOR_ID_NEOMAGIC, // 0x10c8
TCONRAD = bindings::PCI_VENDOR_ID_TCONRAD, // 0x10da
ROHM = bindings::PCI_VENDOR_ID_ROHM, // 0x10db
NVIDIA = bindings::PCI_VENDOR_ID_NVIDIA, // 0x10de
IMS = bindings::PCI_VENDOR_ID_IMS, // 0x10e0
AMCC = bindings::PCI_VENDOR_ID_AMCC, // 0x10e8
AMPERE = bindings::PCI_VENDOR_ID_AMPERE, // 0x1def
INTERG = bindings::PCI_VENDOR_ID_INTERG, // 0x10ea
REALTEK = bindings::PCI_VENDOR_ID_REALTEK, // 0x10ec
XILINX = bindings::PCI_VENDOR_ID_XILINX, // 0x10ee
INIT = bindings::PCI_VENDOR_ID_INIT, // 0x1101
CREATIVE = bindings::PCI_VENDOR_ID_CREATIVE, // 0x1102
TTI = bindings::PCI_VENDOR_ID_TTI, // 0x1103
SIGMA = bindings::PCI_VENDOR_ID_SIGMA, // 0x1105
VIA = bindings::PCI_VENDOR_ID_VIA, // 0x1106
SIEMENS = bindings::PCI_VENDOR_ID_SIEMENS, // 0x110A
VORTEX = bindings::PCI_VENDOR_ID_VORTEX, // 0x1119
EF = bindings::PCI_VENDOR_ID_EF, // 0x111a
IDT = bindings::PCI_VENDOR_ID_IDT, // 0x111d
FORE = bindings::PCI_VENDOR_ID_FORE, // 0x1127
PHILIPS = bindings::PCI_VENDOR_ID_PHILIPS, // 0x1131
EICON = bindings::PCI_VENDOR_ID_EICON, // 0x1133
CISCO = bindings::PCI_VENDOR_ID_CISCO, // 0x1137
ZIATECH = bindings::PCI_VENDOR_ID_ZIATECH, // 0x1138
SYSKONNECT = bindings::PCI_VENDOR_ID_SYSKONNECT, // 0x1148
DIGI = bindings::PCI_VENDOR_ID_DIGI, // 0x114f
XIRCOM = bindings::PCI_VENDOR_ID_XIRCOM, // 0x115d
SERVERWORKS = bindings::PCI_VENDOR_ID_SERVERWORKS, // 0x1166
ALTERA = bindings::PCI_VENDOR_ID_ALTERA, // 0x1172
SBE = bindings::PCI_VENDOR_ID_SBE, // 0x1176
TOSHIBA = bindings::PCI_VENDOR_ID_TOSHIBA, // 0x1179
TOSHIBA_2 = bindings::PCI_VENDOR_ID_TOSHIBA_2, // 0x102f
ATTO = bindings::PCI_VENDOR_ID_ATTO, // 0x117c
RICOH = bindings::PCI_VENDOR_ID_RICOH, // 0x1180
DLINK = bindings::PCI_VENDOR_ID_DLINK, // 0x1186
ARTOP = bindings::PCI_VENDOR_ID_ARTOP, // 0x1191
ZEITNET = bindings::PCI_VENDOR_ID_ZEITNET, // 0x1193
FUJITSU_ME = bindings::PCI_VENDOR_ID_FUJITSU_ME, // 0x119e
MARVELL = bindings::PCI_VENDOR_ID_MARVELL, // 0x11ab
MARVELL_EXT = bindings::PCI_VENDOR_ID_MARVELL_EXT, // 0x1b4b
V3 = bindings::PCI_VENDOR_ID_V3, // 0x11b0
ATT = bindings::PCI_VENDOR_ID_ATT, // 0x11c1
SPECIALIX = bindings::PCI_VENDOR_ID_SPECIALIX, // 0x11cb
ANALOG_DEVICES = bindings::PCI_VENDOR_ID_ANALOG_DEVICES, // 0x11d4
ZORAN = bindings::PCI_VENDOR_ID_ZORAN, // 0x11de
COMPEX = bindings::PCI_VENDOR_ID_COMPEX, // 0x11f6
MICROSEMI = bindings::PCI_VENDOR_ID_MICROSEMI, // 0x11f8
RP = bindings::PCI_VENDOR_ID_RP, // 0x11fe
CYCLADES = bindings::PCI_VENDOR_ID_CYCLADES, // 0x120e
ESSENTIAL = bindings::PCI_VENDOR_ID_ESSENTIAL, // 0x120f
O2 = bindings::PCI_VENDOR_ID_O2, // 0x1217
THREEDX = bindings::PCI_VENDOR_ID_3DFX, // 0x121a
AVM = bindings::PCI_VENDOR_ID_AVM, // 0x1244
STALLION = bindings::PCI_VENDOR_ID_STALLION, // 0x124d
AT = bindings::PCI_VENDOR_ID_AT, // 0x1259
ASIX = bindings::PCI_VENDOR_ID_ASIX, // 0x125b
ESS = bindings::PCI_VENDOR_ID_ESS, // 0x125d
SATSAGEM = bindings::PCI_VENDOR_ID_SATSAGEM, // 0x1267
ENSONIQ = bindings::PCI_VENDOR_ID_ENSONIQ, // 0x1274
TRANSMETA = bindings::PCI_VENDOR_ID_TRANSMETA, // 0x1279
ROCKWELL = bindings::PCI_VENDOR_ID_ROCKWELL, // 0x127A
ITE = bindings::PCI_VENDOR_ID_ITE, // 0x1283
ALTEON = bindings::PCI_VENDOR_ID_ALTEON, // 0x12ae
NVIDIA_SGS = bindings::PCI_VENDOR_ID_NVIDIA_SGS, // 0x12d2
PERICOM = bindings::PCI_VENDOR_ID_PERICOM, // 0x12D8
AUREAL = bindings::PCI_VENDOR_ID_AUREAL, // 0x12eb
ELECTRONICDESIGNGMBH = bindings::PCI_VENDOR_ID_ELECTRONICDESIGNGMBH, // 0x12f8
ESDGMBH = bindings::PCI_VENDOR_ID_ESDGMBH, // 0x12fe
CB = bindings::PCI_VENDOR_ID_CB, // 0x1307
SIIG = bindings::PCI_VENDOR_ID_SIIG, // 0x131f
RADISYS = bindings::PCI_VENDOR_ID_RADISYS, // 0x1331
MICRO_MEMORY = bindings::PCI_VENDOR_ID_MICRO_MEMORY, // 0x1332
DOMEX = bindings::PCI_VENDOR_ID_DOMEX, // 0x134a
INTASHIELD = bindings::PCI_VENDOR_ID_INTASHIELD, // 0x135a
QUATECH = bindings::PCI_VENDOR_ID_QUATECH, // 0x135C
SEALEVEL = bindings::PCI_VENDOR_ID_SEALEVEL, // 0x135e
HYPERCOPE = bindings::PCI_VENDOR_ID_HYPERCOPE, // 0x1365
DIGIGRAM = bindings::PCI_VENDOR_ID_DIGIGRAM, // 0x1369
KAWASAKI = bindings::PCI_VENDOR_ID_KAWASAKI, // 0x136b
CNET = bindings::PCI_VENDOR_ID_CNET, // 0x1371
LMC = bindings::PCI_VENDOR_ID_LMC, // 0x1376
NETGEAR = bindings::PCI_VENDOR_ID_NETGEAR, // 0x1385
APPLICOM = bindings::PCI_VENDOR_ID_APPLICOM, // 0x1389
MOXA = bindings::PCI_VENDOR_ID_MOXA, // 0x1393
CCD = bindings::PCI_VENDOR_ID_CCD, // 0x1397
EXAR = bindings::PCI_VENDOR_ID_EXAR, // 0x13a8
MICROGATE = bindings::PCI_VENDOR_ID_MICROGATE, // 0x13c0
THREEWARE = bindings::PCI_VENDOR_ID_3WARE, // 0x13C1
IOMEGA = bindings::PCI_VENDOR_ID_IOMEGA, // 0x13ca
ABOCOM = bindings::PCI_VENDOR_ID_ABOCOM, // 0x13D1
SUNDANCE = bindings::PCI_VENDOR_ID_SUNDANCE, // 0x13f0
CMEDIA = bindings::PCI_VENDOR_ID_CMEDIA, // 0x13f6
ADVANTECH = bindings::PCI_VENDOR_ID_ADVANTECH, // 0x13fe
MEILHAUS = bindings::PCI_VENDOR_ID_MEILHAUS, // 0x1402
LAVA = bindings::PCI_VENDOR_ID_LAVA, // 0x1407
TIMEDIA = bindings::PCI_VENDOR_ID_TIMEDIA, // 0x1409
ICE = bindings::PCI_VENDOR_ID_ICE, // 0x1412
MICROSOFT = bindings::PCI_VENDOR_ID_MICROSOFT, // 0x1414
OXSEMI = bindings::PCI_VENDOR_ID_OXSEMI, // 0x1415
CHELSIO = bindings::PCI_VENDOR_ID_CHELSIO, // 0x1425
EDIMAX = bindings::PCI_VENDOR_ID_EDIMAX, // 0x1432
ADLINK = bindings::PCI_VENDOR_ID_ADLINK, // 0x144a
SAMSUNG = bindings::PCI_VENDOR_ID_SAMSUNG, // 0x144d
GIGABYTE = bindings::PCI_VENDOR_ID_GIGABYTE, // 0x1458
AMBIT = bindings::PCI_VENDOR_ID_AMBIT, // 0x1468
MYRICOM = bindings::PCI_VENDOR_ID_MYRICOM, // 0x14c1
MEDIATEK = bindings::PCI_VENDOR_ID_MEDIATEK, // 0x14c3
TITAN = bindings::PCI_VENDOR_ID_TITAN, // 0x14D2
PANACOM = bindings::PCI_VENDOR_ID_PANACOM, // 0x14d4
SIPACKETS = bindings::PCI_VENDOR_ID_SIPACKETS, // 0x14d9
AFAVLAB = bindings::PCI_VENDOR_ID_AFAVLAB, // 0x14db
AMPLICON = bindings::PCI_VENDOR_ID_AMPLICON, // 0x14dc
BCM_GVC = bindings::PCI_VENDOR_ID_BCM_GVC, // 0x14a4
BROADCOM = bindings::PCI_VENDOR_ID_BROADCOM, // 0x14e4
TOPIC = bindings::PCI_VENDOR_ID_TOPIC, // 0x151f
MAINPINE = bindings::PCI_VENDOR_ID_MAINPINE, // 0x1522
ENE = bindings::PCI_VENDOR_ID_ENE, // 0x1524
SYBA = bindings::PCI_VENDOR_ID_SYBA, // 0x1592
MORETON = bindings::PCI_VENDOR_ID_MORETON, // 0x15aa
VMWARE = bindings::PCI_VENDOR_ID_VMWARE, // 0x15ad
ZOLTRIX = bindings::PCI_VENDOR_ID_ZOLTRIX, // 0x15b0
MELLANOX = bindings::PCI_VENDOR_ID_MELLANOX, // 0x15b3
DFI = bindings::PCI_VENDOR_ID_DFI, // 0x15bd
QUICKNET = bindings::PCI_VENDOR_ID_QUICKNET, // 0x15e2
ADDIDATA = bindings::PCI_VENDOR_ID_ADDIDATA, // 0x15B8
PDC = bindings::PCI_VENDOR_ID_PDC, // 0x15e9
FARSITE = bindings::PCI_VENDOR_ID_FARSITE, // 0x1619
ARIMA = bindings::PCI_VENDOR_ID_ARIMA, // 0x161f
BROCADE = bindings::PCI_VENDOR_ID_BROCADE, // 0x1657
SIBYTE = bindings::PCI_VENDOR_ID_SIBYTE, // 0x166d
ATHEROS = bindings::PCI_VENDOR_ID_ATHEROS, // 0x168c
NETCELL = bindings::PCI_VENDOR_ID_NETCELL, // 0x169c
CENATEK = bindings::PCI_VENDOR_ID_CENATEK, // 0x16CA
SYNOPSYS = bindings::PCI_VENDOR_ID_SYNOPSYS, // 0x16c3
USR = bindings::PCI_VENDOR_ID_USR, // 0x16ec
VITESSE = bindings::PCI_VENDOR_ID_VITESSE, // 0x1725
LINKSYS = bindings::PCI_VENDOR_ID_LINKSYS, // 0x1737
ALTIMA = bindings::PCI_VENDOR_ID_ALTIMA, // 0x173b
CAVIUM = bindings::PCI_VENDOR_ID_CAVIUM, // 0x177d
TECHWELL = bindings::PCI_VENDOR_ID_TECHWELL, // 0x1797
BELKIN = bindings::PCI_VENDOR_ID_BELKIN, // 0x1799
RDC = bindings::PCI_VENDOR_ID_RDC, // 0x17f3
GLI = bindings::PCI_VENDOR_ID_GLI, // 0x17a0
LENOVO = bindings::PCI_VENDOR_ID_LENOVO, // 0x17aa
QCOM = bindings::PCI_VENDOR_ID_QCOM, // 0x17cb
CDNS = bindings::PCI_VENDOR_ID_CDNS, // 0x17cd
ARECA = bindings::PCI_VENDOR_ID_ARECA, // 0x17d3
S2IO = bindings::PCI_VENDOR_ID_S2IO, // 0x17d5
SITECOM = bindings::PCI_VENDOR_ID_SITECOM, // 0x182d
TOPSPIN = bindings::PCI_VENDOR_ID_TOPSPIN, // 0x1867
COMMTECH = bindings::PCI_VENDOR_ID_COMMTECH, // 0x18f7
SILAN = bindings::PCI_VENDOR_ID_SILAN, // 0x1904
RENESAS = bindings::PCI_VENDOR_ID_RENESAS, // 0x1912
SOLARFLARE = bindings::PCI_VENDOR_ID_SOLARFLARE, // 0x1924
TDI = bindings::PCI_VENDOR_ID_TDI, // 0x192E
NXP = bindings::PCI_VENDOR_ID_NXP, // 0x1957
PASEMI = bindings::PCI_VENDOR_ID_PASEMI, // 0x1959
ATTANSIC = bindings::PCI_VENDOR_ID_ATTANSIC, // 0x1969
JMICRON = bindings::PCI_VENDOR_ID_JMICRON, // 0x197B
KORENIX = bindings::PCI_VENDOR_ID_KORENIX, // 0x1982
HUAWEI = bindings::PCI_VENDOR_ID_HUAWEI, // 0x19e5
NETRONOME = bindings::PCI_VENDOR_ID_NETRONOME, // 0x19ee
QMI = bindings::PCI_VENDOR_ID_QMI, // 0x1a32
AZWAVE = bindings::PCI_VENDOR_ID_AZWAVE, // 0x1a3b
REDHAT_QUMRANET = bindings::PCI_VENDOR_ID_REDHAT_QUMRANET, // 0x1af4
ASMEDIA = bindings::PCI_VENDOR_ID_ASMEDIA, // 0x1b21
REDHAT = bindings::PCI_VENDOR_ID_REDHAT, // 0x1b36
WCHIC = bindings::PCI_VENDOR_ID_WCHIC, // 0x1c00
SILICOM_DENMARK = bindings::PCI_VENDOR_ID_SILICOM_DENMARK, // 0x1c2c
AMAZON_ANNAPURNA_LABS = bindings::PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, // 0x1c36
CIRCUITCO = bindings::PCI_VENDOR_ID_CIRCUITCO, // 0x1cc8
AMAZON = bindings::PCI_VENDOR_ID_AMAZON, // 0x1d0f
ZHAOXIN = bindings::PCI_VENDOR_ID_ZHAOXIN, // 0x1d17
ROCKCHIP = bindings::PCI_VENDOR_ID_ROCKCHIP, // 0x1d87
HYGON = bindings::PCI_VENDOR_ID_HYGON, // 0x1d94
META = bindings::PCI_VENDOR_ID_META, // 0x1d9b
FUNGIBLE = bindings::PCI_VENDOR_ID_FUNGIBLE, // 0x1dad
HXT = bindings::PCI_VENDOR_ID_HXT, // 0x1dbf
TEKRAM = bindings::PCI_VENDOR_ID_TEKRAM, // 0x1de1
RPI = bindings::PCI_VENDOR_ID_RPI, // 0x1de4
ALIBABA = bindings::PCI_VENDOR_ID_ALIBABA, // 0x1ded
CXL = bindings::PCI_VENDOR_ID_CXL, // 0x1e98
TEHUTI = bindings::PCI_VENDOR_ID_TEHUTI, // 0x1fc9
SUNIX = bindings::PCI_VENDOR_ID_SUNIX, // 0x1fd4
HINT = bindings::PCI_VENDOR_ID_HINT, // 0x3388
THREEDLABS = bindings::PCI_VENDOR_ID_3DLABS, // 0x3d3d
NETXEN = bindings::PCI_VENDOR_ID_NETXEN, // 0x4040
AKS = bindings::PCI_VENDOR_ID_AKS, // 0x416c
WCHCN = bindings::PCI_VENDOR_ID_WCHCN, // 0x4348
ACCESSIO = bindings::PCI_VENDOR_ID_ACCESSIO, // 0x494f
S3 = bindings::PCI_VENDOR_ID_S3, // 0x5333
DUNORD = bindings::PCI_VENDOR_ID_DUNORD, // 0x5544
DCI = bindings::PCI_VENDOR_ID_DCI, // 0x6666
GLENFLY = bindings::PCI_VENDOR_ID_GLENFLY, // 0x6766
INTEL = bindings::PCI_VENDOR_ID_INTEL, // 0x8086
WANGXUN = bindings::PCI_VENDOR_ID_WANGXUN, // 0x8088
SCALEMP = bindings::PCI_VENDOR_ID_SCALEMP, // 0x8686
COMPUTONE = bindings::PCI_VENDOR_ID_COMPUTONE, // 0x8e0e
KTI = bindings::PCI_VENDOR_ID_KTI, // 0x8e2e
ADAPTEC = bindings::PCI_VENDOR_ID_ADAPTEC, // 0x9004
ADAPTEC2 = bindings::PCI_VENDOR_ID_ADAPTEC2, // 0x9005
HOLTEK = bindings::PCI_VENDOR_ID_HOLTEK, // 0x9412
NETMOS = bindings::PCI_VENDOR_ID_NETMOS, // 0x9710
THREECOM_2 = bindings::PCI_VENDOR_ID_3COM_2, // 0xa727
SOLIDRUN = bindings::PCI_VENDOR_ID_SOLIDRUN, // 0xd063
DIGIUM = bindings::PCI_VENDOR_ID_DIGIUM, // 0xd161
TIGERJET = bindings::PCI_VENDOR_ID_TIGERJET, // 0xe159
XILINX_RME = bindings::PCI_VENDOR_ID_XILINX_RME, // 0xea60
XEN = bindings::PCI_VENDOR_ID_XEN, // 0x5853
OCZ = bindings::PCI_VENDOR_ID_OCZ, // 0x1b85
NCUBE = bindings::PCI_VENDOR_ID_NCUBE, // 0x10ff
}

View File

@ -10,6 +10,7 @@
driver,
error::{from_result, to_result, Result},
io::{mem::IoRequest, Resource},
irq::{self, IrqRequest},
of,
prelude::*,
types::Opaque,
@ -284,6 +285,181 @@ pub fn io_request_by_name(&self, name: &CStr) -> Option<IoRequest<'_>> {
}
}
macro_rules! define_irq_accessor_by_index {
(
$(#[$meta:meta])* $fn_name:ident,
$request_fn:ident,
$reg_type:ident,
$handler_trait:ident
) => {
$(#[$meta])*
pub fn $fn_name<'a, T: irq::$handler_trait + 'static>(
&'a self,
flags: irq::Flags,
index: u32,
name: &'static CStr,
handler: impl PinInit<T, Error> + 'a,
) -> Result<impl PinInit<irq::$reg_type<T>, Error> + 'a> {
let request = self.$request_fn(index)?;
Ok(irq::$reg_type::<T>::new(
request,
flags,
name,
handler,
))
}
};
}
macro_rules! define_irq_accessor_by_name {
(
$(#[$meta:meta])* $fn_name:ident,
$request_fn:ident,
$reg_type:ident,
$handler_trait:ident
) => {
$(#[$meta])*
pub fn $fn_name<'a, T: irq::$handler_trait + 'static>(
&'a self,
flags: irq::Flags,
irq_name: &CStr,
name: &'static CStr,
handler: impl PinInit<T, Error> + 'a,
) -> Result<impl PinInit<irq::$reg_type<T>, Error> + 'a> {
let request = self.$request_fn(irq_name)?;
Ok(irq::$reg_type::<T>::new(
request,
flags,
name,
handler,
))
}
};
}
impl Device<Bound> {
/// Returns an [`IrqRequest`] for the IRQ at the given index, if any.
pub fn irq_by_index(&self, index: u32) -> Result<IrqRequest<'_>> {
// SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`.
let irq = unsafe { bindings::platform_get_irq(self.as_raw(), index) };
if irq < 0 {
return Err(Error::from_errno(irq));
}
// SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
}
/// Returns an [`IrqRequest`] for the IRQ at the given index, but does not
/// print an error if the IRQ cannot be obtained.
pub fn optional_irq_by_index(&self, index: u32) -> Result<IrqRequest<'_>> {
// SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`.
let irq = unsafe { bindings::platform_get_irq_optional(self.as_raw(), index) };
if irq < 0 {
return Err(Error::from_errno(irq));
}
// SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
}
/// Returns an [`IrqRequest`] for the IRQ with the given name, if any.
pub fn irq_by_name(&self, name: &CStr) -> Result<IrqRequest<'_>> {
// SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`.
let irq = unsafe { bindings::platform_get_irq_byname(self.as_raw(), name.as_char_ptr()) };
if irq < 0 {
return Err(Error::from_errno(irq));
}
// SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
}
/// Returns an [`IrqRequest`] for the IRQ with the given name, but does not
/// print an error if the IRQ cannot be obtained.
pub fn optional_irq_by_name(&self, name: &CStr) -> Result<IrqRequest<'_>> {
// SAFETY: `self.as_raw` returns a valid pointer to a `struct platform_device`.
let irq = unsafe {
bindings::platform_get_irq_byname_optional(self.as_raw(), name.as_char_ptr())
};
if irq < 0 {
return Err(Error::from_errno(irq));
}
// SAFETY: `irq` is guaranteed to be a valid IRQ number for `&self`.
Ok(unsafe { IrqRequest::new(self.as_ref(), irq as u32) })
}
define_irq_accessor_by_index!(
/// Returns a [`irq::Registration`] for the IRQ at the given index.
request_irq_by_index,
irq_by_index,
Registration,
Handler
);
define_irq_accessor_by_name!(
/// Returns a [`irq::Registration`] for the IRQ with the given name.
request_irq_by_name,
irq_by_name,
Registration,
Handler
);
define_irq_accessor_by_index!(
/// Does the same as [`Self::request_irq_by_index`], except that it does
/// not print an error message if the IRQ cannot be obtained.
request_optional_irq_by_index,
optional_irq_by_index,
Registration,
Handler
);
define_irq_accessor_by_name!(
/// Does the same as [`Self::request_irq_by_name`], except that it does
/// not print an error message if the IRQ cannot be obtained.
request_optional_irq_by_name,
optional_irq_by_name,
Registration,
Handler
);
define_irq_accessor_by_index!(
/// Returns a [`irq::ThreadedRegistration`] for the IRQ at the given index.
request_threaded_irq_by_index,
irq_by_index,
ThreadedRegistration,
ThreadedHandler
);
define_irq_accessor_by_name!(
/// Returns a [`irq::ThreadedRegistration`] for the IRQ with the given name.
request_threaded_irq_by_name,
irq_by_name,
ThreadedRegistration,
ThreadedHandler
);
define_irq_accessor_by_index!(
/// Does the same as [`Self::request_threaded_irq_by_index`], except
/// that it does not print an error message if the IRQ cannot be
/// obtained.
request_optional_threaded_irq_by_index,
optional_irq_by_index,
ThreadedRegistration,
ThreadedHandler
);
define_irq_accessor_by_name!(
/// Does the same as [`Self::request_threaded_irq_by_name`], except that
/// it does not print an error message if the IRQ cannot be obtained.
request_optional_threaded_irq_by_name,
optional_irq_by_name,
ThreadedRegistration,
ThreadedHandler
);
}
// SAFETY: `Device` is a transparent wrapper of a type that doesn't depend on `Device`'s generic
// argument.
kernel::impl_device_context_deref!(unsafe { Device });
@ -292,7 +468,7 @@ pub fn io_request_by_name(&self, name: &CStr) -> Option<IoRequest<'_>> {
impl crate::dma::Device for Device<device::Core> {}
// SAFETY: Instances of `Device` are always reference-counted.
unsafe impl crate::types::AlwaysRefCounted for Device {
unsafe impl crate::sync::aref::AlwaysRefCounted for Device {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference guarantees that the refcount is non-zero.
unsafe { bindings::get_device(self.as_ref().as_raw()) };

14
rust/kernel/processor.rs Normal file
View File

@ -0,0 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
//! Processor related primitives.
//!
//! C header: [`include/linux/processor.h`](srctree/include/linux/processor.h)
/// Lower CPU power consumption or yield to a hyperthreaded twin processor.
///
/// It also happens to serve as a compiler barrier.
#[inline]
pub fn cpu_relax() {
// SAFETY: Always safe to call.
unsafe { bindings::cpu_relax() }
}

View File

@ -62,6 +62,28 @@ config SAMPLE_RUST_DMA
If unsure, say N.
config SAMPLE_RUST_DEBUGFS
tristate "DebugFS Test Module"
depends on DEBUG_FS
help
This option builds the Rust DebugFS Test module sample.
To compile this as a module, choose M here:
the module will be called rust_debugfs.
If unsure, say N.
config SAMPLE_RUST_DEBUGFS_SCOPED
tristate "Scoped DebugFS Test Module"
depends on DEBUG_FS
help
This option builds the Rust Scoped DebugFS Test module sample.
To compile this as a module, choose M here:
the module will be called rust_debugfs_scoped.
If unsure, say N.
config SAMPLE_RUST_DRIVER_PCI
tristate "PCI Driver"
depends on PCI

View File

@ -4,6 +4,8 @@ ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_SAMPLE_RUST_MINIMAL) += rust_minimal.o
obj-$(CONFIG_SAMPLE_RUST_MISC_DEVICE) += rust_misc_device.o
obj-$(CONFIG_SAMPLE_RUST_PRINT) += rust_print.o
obj-$(CONFIG_SAMPLE_RUST_DEBUGFS) += rust_debugfs.o
obj-$(CONFIG_SAMPLE_RUST_DEBUGFS_SCOPED) += rust_debugfs_scoped.o
obj-$(CONFIG_SAMPLE_RUST_DMA) += rust_dma.o
obj-$(CONFIG_SAMPLE_RUST_DRIVER_PCI) += rust_driver_pci.o
obj-$(CONFIG_SAMPLE_RUST_DRIVER_PLATFORM) += rust_driver_platform.o

View File

@ -0,0 +1,151 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2025 Google LLC.
//! Sample DebugFS exporting platform driver
//!
//! To successfully probe this driver with ACPI, use an ssdt that looks like
//!
//! ```dsl
//! DefinitionBlock ("", "SSDT", 2, "TEST", "VIRTACPI", 0x00000001)
//! {
//! Scope (\_SB)
//! {
//! Device (T432)
//! {
//! Name (_HID, "LNUXBEEF") // ACPI hardware ID to match
//! Name (_UID, 1)
//! Name (_STA, 0x0F) // Device present, enabled
//! Name (_DSD, Package () { // Sample attribute
//! ToUUID("daffd814-6eba-4d8c-8a91-bc9bbf4aa301"),
//! Package() {
//! Package(2) {"compatible", "sample-debugfs"}
//! }
//! })
//! Name (_CRS, ResourceTemplate ()
//! {
//! Memory32Fixed (ReadWrite, 0xFED00000, 0x1000)
//! })
//! }
//! }
//! }
//! ```
use core::str::FromStr;
use core::sync::atomic::AtomicUsize;
use core::sync::atomic::Ordering;
use kernel::c_str;
use kernel::debugfs::{Dir, File};
use kernel::new_mutex;
use kernel::prelude::*;
use kernel::sync::Mutex;
use kernel::{acpi, device::Core, of, platform, str::CString, types::ARef};
kernel::module_platform_driver! {
type: RustDebugFs,
name: "rust_debugfs",
authors: ["Matthew Maurer"],
description: "Rust DebugFS usage sample",
license: "GPL",
}
#[pin_data]
struct RustDebugFs {
pdev: ARef<platform::Device>,
// As we only hold these for drop effect (to remove the directory/files) we have a leading
// underscore to indicate to the compiler that we don't expect to use this field directly.
_debugfs: Dir,
#[pin]
_compatible: File<CString>,
#[pin]
counter: File<AtomicUsize>,
#[pin]
inner: File<Mutex<Inner>>,
}
#[derive(Debug)]
struct Inner {
x: u32,
y: u32,
}
impl FromStr for Inner {
type Err = Error;
fn from_str(s: &str) -> Result<Self> {
let mut parts = s.split_whitespace();
let x = parts
.next()
.ok_or(EINVAL)?
.parse::<u32>()
.map_err(|_| EINVAL)?;
let y = parts
.next()
.ok_or(EINVAL)?
.parse::<u32>()
.map_err(|_| EINVAL)?;
if parts.next().is_some() {
return Err(EINVAL);
}
Ok(Inner { x, y })
}
}
kernel::acpi_device_table!(
ACPI_TABLE,
MODULE_ACPI_TABLE,
<RustDebugFs as platform::Driver>::IdInfo,
[(acpi::DeviceId::new(c_str!("LNUXBEEF")), ())]
);
impl platform::Driver for RustDebugFs {
type IdInfo = ();
const OF_ID_TABLE: Option<of::IdTable<Self::IdInfo>> = None;
const ACPI_ID_TABLE: Option<acpi::IdTable<Self::IdInfo>> = Some(&ACPI_TABLE);
fn probe(
pdev: &platform::Device<Core>,
_info: Option<&Self::IdInfo>,
) -> Result<Pin<KBox<Self>>> {
let result = KBox::try_pin_init(RustDebugFs::new(pdev), GFP_KERNEL)?;
// We can still mutate fields through the files which are atomic or mutexed:
result.counter.store(91, Ordering::Relaxed);
{
let mut guard = result.inner.lock();
guard.x = guard.y;
guard.y = 42;
}
Ok(result)
}
}
impl RustDebugFs {
fn build_counter(dir: &Dir) -> impl PinInit<File<AtomicUsize>> + '_ {
dir.read_write_file(c_str!("counter"), AtomicUsize::new(0))
}
fn build_inner(dir: &Dir) -> impl PinInit<File<Mutex<Inner>>> + '_ {
dir.read_write_file(c_str!("pair"), new_mutex!(Inner { x: 3, y: 10 }))
}
fn new(pdev: &platform::Device<Core>) -> impl PinInit<Self, Error> + '_ {
let debugfs = Dir::new(c_str!("sample_debugfs"));
let dev = pdev.as_ref();
try_pin_init! {
Self {
_compatible <- debugfs.read_only_file(
c_str!("compatible"),
dev.fwnode()
.ok_or(ENOENT)?
.property_read::<CString>(c_str!("compatible"))
.required_by(dev)?,
),
counter <- Self::build_counter(&debugfs),
inner <- Self::build_inner(&debugfs),
_debugfs: debugfs,
pdev: pdev.into(),
}
}
}
}

View File

@ -0,0 +1,134 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2025 Google LLC.
//! Sample DebugFS exporting platform driver that demonstrates the use of
//! `Scope::dir` to create a variety of files without the need to separately
//! track them all.
use core::sync::atomic::AtomicUsize;
use kernel::debugfs::{Dir, Scope};
use kernel::prelude::*;
use kernel::sync::Mutex;
use kernel::{c_str, new_mutex, str::CString};
module! {
type: RustScopedDebugFs,
name: "rust_debugfs_scoped",
authors: ["Matthew Maurer"],
description: "Rust Scoped DebugFS usage sample",
license: "GPL",
}
fn remove_file_write(
mod_data: &ModuleData,
reader: &mut kernel::uaccess::UserSliceReader,
) -> Result {
let mut buf = [0u8; 128];
if reader.len() >= buf.len() {
return Err(EINVAL);
}
let n = reader.len();
reader.read_slice(&mut buf[..n])?;
let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?.trim();
let nul_idx = s.len();
buf[nul_idx] = 0;
let to_remove = CStr::from_bytes_with_nul(&buf[..nul_idx + 1]).map_err(|_| EINVAL)?;
mod_data
.devices
.lock()
.retain(|device| device.name.as_bytes() != to_remove.as_bytes());
Ok(())
}
fn create_file_write(
mod_data: &ModuleData,
reader: &mut kernel::uaccess::UserSliceReader,
) -> Result {
let mut buf = [0u8; 128];
if reader.len() > buf.len() {
return Err(EINVAL);
}
let n = reader.len();
reader.read_slice(&mut buf[..n])?;
let mut nums = KVec::new();
let s = core::str::from_utf8(&buf[..n]).map_err(|_| EINVAL)?.trim();
let mut items = s.split_whitespace();
let name_str = items.next().ok_or(EINVAL)?;
let name = CString::try_from_fmt(fmt!("{name_str}"))?;
let file_name = CString::try_from_fmt(fmt!("{name_str}"))?;
for sub in items {
nums.push(
AtomicUsize::new(sub.parse().map_err(|_| EINVAL)?),
GFP_KERNEL,
)?;
}
let scope = KBox::pin_init(
mod_data
.device_dir
.scope(DeviceData { name, nums }, &file_name, |dev_data, dir| {
for (idx, val) in dev_data.nums.iter().enumerate() {
let Ok(name) = CString::try_from_fmt(fmt!("{idx}")) else {
return;
};
dir.read_write_file(&name, val);
}
}),
GFP_KERNEL,
)?;
(*mod_data.devices.lock()).push(scope, GFP_KERNEL)?;
Ok(())
}
struct RustScopedDebugFs {
_data: Pin<KBox<Scope<ModuleData>>>,
}
#[pin_data]
struct ModuleData {
device_dir: Dir,
#[pin]
devices: Mutex<KVec<Pin<KBox<Scope<DeviceData>>>>>,
}
impl ModuleData {
fn init(device_dir: Dir) -> impl PinInit<Self> {
pin_init! {
Self {
device_dir: device_dir,
devices <- new_mutex!(KVec::new())
}
}
}
}
struct DeviceData {
name: CString,
nums: KVec<AtomicUsize>,
}
fn init_control(base_dir: &Dir, dyn_dirs: Dir) -> impl PinInit<Scope<ModuleData>> + '_ {
base_dir.scope(
ModuleData::init(dyn_dirs),
c_str!("control"),
|data, dir| {
dir.write_only_callback_file(c_str!("create"), data, &create_file_write);
dir.write_only_callback_file(c_str!("remove"), data, &remove_file_write);
},
)
}
impl kernel::Module for RustScopedDebugFs {
fn init(_module: &'static kernel::ThisModule) -> Result<Self> {
let base_dir = Dir::new(c_str!("rust_scoped_debugfs"));
let dyn_dirs = base_dir.subdir(c_str!("dynamic"));
Ok(Self {
_data: KBox::pin_init(init_control(&base_dir, dyn_dirs), GFP_KERNEL)?,
})
}
}

View File

@ -5,7 +5,6 @@
//! To make this driver probe, QEMU must be run with `-device pci-testdev`.
use kernel::{
bindings,
device::Core,
dma::{CoherentAllocation, Device, DmaMask},
pci,
@ -45,10 +44,7 @@ unsafe impl kernel::transmute::FromBytes for MyStruct {}
PCI_TABLE,
MODULE_PCI_TABLE,
<DmaSampleDriver as pci::Driver>::IdInfo,
[(
pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, 0x5),
()
)]
[(pci::DeviceId::from_id(pci::Vendor::REDHAT, 0x5), ())]
);
impl pci::Driver for DmaSampleDriver {

View File

@ -5,7 +5,7 @@
//! To make this driver probe, QEMU must be run with `-device pci-testdev`.
use kernel::{
auxiliary, bindings, c_str, device::Core, driver, error::Error, pci, prelude::*, InPlaceModule,
auxiliary, c_str, device::Core, driver, error::Error, pci, prelude::*, InPlaceModule,
};
use pin_init::PinInit;
@ -50,10 +50,7 @@ struct ParentDriver {
PCI_TABLE,
MODULE_PCI_TABLE,
<ParentDriver as pci::Driver>::IdInfo,
[(
pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, 0x5),
()
)]
[(pci::DeviceId::from_id(pci::Vendor::REDHAT, 0x5), ())]
);
impl pci::Driver for ParentDriver {
@ -81,11 +78,12 @@ fn connect(adev: &auxiliary::Device) -> Result<()> {
let parent = adev.parent().ok_or(EINVAL)?;
let pdev: &pci::Device = parent.try_into()?;
let vendor = pdev.vendor_id();
dev_info!(
adev.as_ref(),
"Connect auxiliary {} with parent: VendorID={:#x}, DeviceID={:#x}\n",
"Connect auxiliary {} with parent: VendorID={}, DeviceID={:#x}\n",
adev.id(),
pdev.vendor_id(),
vendor,
pdev.device_id()
);

View File

@ -4,7 +4,7 @@
//!
//! To make this driver probe, QEMU must be run with `-device pci-testdev`.
use kernel::{bindings, c_str, device::Core, devres::Devres, pci, prelude::*, types::ARef};
use kernel::{c_str, device::Core, devres::Devres, pci, prelude::*, sync::aref::ARef};
struct Regs;
@ -38,7 +38,7 @@ struct SampleDriver {
MODULE_PCI_TABLE,
<SampleDriver as pci::Driver>::IdInfo,
[(
pci::DeviceId::from_id(bindings::PCI_VENDOR_ID_REDHAT, 0x5),
pci::DeviceId::from_id(pci::Vendor::REDHAT, 0x5),
TestIndex::NO_EVENTFD
)]
);
@ -66,10 +66,11 @@ impl pci::Driver for SampleDriver {
const ID_TABLE: pci::IdTable<Self::IdInfo> = &PCI_TABLE;
fn probe(pdev: &pci::Device<Core>, info: &Self::IdInfo) -> Result<Pin<KBox<Self>>> {
let vendor = pdev.vendor_id();
dev_dbg!(
pdev.as_ref(),
"Probe Rust PCI driver sample (PCI ID: 0x{:x}, 0x{:x}).\n",
pdev.vendor_id(),
"Probe Rust PCI driver sample (PCI ID: {}, 0x{:x}).\n",
vendor,
pdev.device_id()
);

View File

@ -72,7 +72,7 @@
of, platform,
prelude::*,
str::CString,
types::ARef,
sync::aref::ARef,
};
struct SampleDriver {