mirror of https://github.com/torvalds/linux.git
thunderbolt: Changes for v6.18 merge window
This includes following USB4/Thunderbolt changes for the v6.18 merge window: - HMAC hashing improvements - Switch to use Linux Foundation IDs for XDomain discovery - Use is_pciehp instead of is_hotplug_bridge - Fixes for various kernel-doc issues - Fix use-after-free in DP tunneling error path. I'm sending the UAF fix with this pull request because it came quite late and I would like to give it some exposure before it lands the mainline. All these except the UAF fix have been in linux-next with no reported issues. -----BEGIN PGP SIGNATURE----- iQJUBAABCgA+FiEEVTdhRGBbNzLrSUBaAP2fSd+ZWKAFAmjVCmAgHG1pa2Eud2Vz dGVyYmVyZ0BsaW51eC5pbnRlbC5jb20ACgkQAP2fSd+ZWKAVtw/7BySDtLjnahgk ca1b1ydoeRXNUYubnRxiHKOTN4+an4IbXMoBbmxt3Zl4BUF1W4AOvmTHCt0uifJX 085tPl5hmuA4Nhi5MsrcJ+dk/G7VoS4JOHm8YZvVlvs4c2/Mh5KK1/QjEQEd4r/s vZ8TIYqpkGxzDDYzgSAQMW0o0fILcysCH3o4L4G+5rHlDrhvwDH11MPjnOKqnOEu f1I6Dbn1OC9E48bNYw5mhdfGweC6Br3rM2ycjSIoKFPzlyzYnqD8FZgGsN5YrGvU BMZRbV30j2kjIZNJtZ3e1SuztQ/kTrWca1mn+E9FAQPou/IpYTDCL6A43sJmLAw0 qEMqq3JrsC4glzj5KpX/AnQnUNe+GSUmEtkDrYKuCSz8+9F1NBNqyn918fFOZ6fk JlSN9bOR57rCxCdL3HgXS6oJHGa74zxU2DTvEhjoqfbt5NsmPy82a961vhfjzRFW KdA2cTjrDOPruSYyQ0vvbWzDs/svUPYCdvQq1pFnzbp/ui6X/hqiU4Zqitp4t0NF sfKuKjGY5GwZVBtQlhXyFgJvI1DAupfLQqP80N2WO0EaUMzKsTw1o90XrP1ZO5ik pTmRTlBmg0GaJncW++e2ZjbiJ5RyD7zYN6GCFPlyA3uBVM3T0AOGrijhjleKqoBS cKgeR/5hZNndhPN/pJqv3rrGNRLC9po= =bZIH -----END PGP SIGNATURE----- Merge tag 'thunderbolt-for-v6.18-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt into usb-next Mika writes: thunderbolt: Changes for v6.18 merge window This includes following USB4/Thunderbolt changes for the v6.18 merge window: - HMAC hashing improvements - Switch to use Linux Foundation IDs for XDomain discovery - Use is_pciehp instead of is_hotplug_bridge - Fixes for various kernel-doc issues - Fix use-after-free in DP tunneling error path. I'm sending the UAF fix with this pull request because it came quite late and I would like to give it some exposure before it lands the mainline. All these except the UAF fix have been in linux-next with no reported issues. * tag 'thunderbolt-for-v6.18-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt: (33 commits) thunderbolt: Fix use-after-free in tb_dp_dprx_work thunderbolt: Update thunderbolt.h header file thunderbolt: Update xdomain.c function documentation thunderbolt: Update usb4_port.c function documentation thunderbolt: Update usb4.c function documentation thunderbolt: Update tunnel.h function documentation thunderbolt: Update tunnel.c function documentation thunderbolt: Update tmu.c function documentation thunderbolt: Add missing documentation in tb.h thunderbolt: Update tb.h function documentation thunderbolt: Update tb.c function documentation thunderbolt: Update switch.c function documentation thunderbolt: Update retimer.c function documentation thunderbolt: Update property.c function documentation thunderbolt: Update path.c function documentation thunderbolt: Update nvm.c function documentation thunderbolt: Add missing documentation in nhi_regs.h ring_desc structure thunderbolt: Update nhi.c function documentation thunderbolt: Update lc.c function documentation thunderbolt: Update eeprom.c function documentation ...
This commit is contained in:
commit
ef351f8e39
5
CREDITS
5
CREDITS
|
|
@ -1890,6 +1890,11 @@ S: Reading
|
|||
S: RG6 2NU
|
||||
S: United Kingdom
|
||||
|
||||
N: Michael Jamet
|
||||
E: michael.jamet@intel.com
|
||||
D: Thunderbolt/USB4 driver maintainer
|
||||
D: Thunderbolt/USB4 networking driver maintainer
|
||||
|
||||
N: Dave Jeffery
|
||||
E: dhjeffery@gmail.com
|
||||
D: SCSI hacks and IBM ServeRAID RAID driver maintenance
|
||||
|
|
|
|||
|
|
@ -25138,7 +25138,6 @@ F: drivers/thunderbolt/dma_test.c
|
|||
|
||||
THUNDERBOLT DRIVER
|
||||
M: Andreas Noever <andreas.noever@gmail.com>
|
||||
M: Michael Jamet <michael.jamet@intel.com>
|
||||
M: Mika Westerberg <westeri@kernel.org>
|
||||
M: Yehezkel Bernat <YehezkelShB@gmail.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
|
|
@ -25149,7 +25148,6 @@ F: drivers/thunderbolt/
|
|||
F: include/linux/thunderbolt.h
|
||||
|
||||
THUNDERBOLT NETWORK DRIVER
|
||||
M: Michael Jamet <michael.jamet@intel.com>
|
||||
M: Mika Westerberg <westeri@kernel.org>
|
||||
M: Yehezkel Bernat <YehezkelShB@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
|
|
|
|||
|
|
@ -3829,7 +3829,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, 0xcf80, quirk_no_pm_reset);
|
|||
*/
|
||||
static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
|
||||
{
|
||||
if (pdev->is_hotplug_bridge &&
|
||||
if (pdev->is_pciehp &&
|
||||
(pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
|
||||
pdev->revision <= 1))
|
||||
pdev->no_msi = 1;
|
||||
|
|
|
|||
|
|
@ -4,8 +4,8 @@ menuconfig USB4
|
|||
depends on PCI
|
||||
select APPLE_PROPERTIES if EFI_STUB && X86
|
||||
select CRC32
|
||||
select CRYPTO
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_LIB_SHA256
|
||||
select CRYPTO_LIB_UTILS
|
||||
select NVMEM
|
||||
help
|
||||
USB4 and Thunderbolt driver. USB4 is the public specification
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
|
|||
* @nhi ACPI node. For each reference a device link is added. The link
|
||||
* is automatically removed by the driver core.
|
||||
*
|
||||
* Returns %true if at least one link was created.
|
||||
* Returns %true if at least one link was created, %false otherwise.
|
||||
*/
|
||||
bool tb_acpi_add_links(struct tb_nhi *nhi)
|
||||
{
|
||||
|
|
@ -113,8 +113,10 @@ bool tb_acpi_add_links(struct tb_nhi *nhi)
|
|||
/**
|
||||
* tb_acpi_is_native() - Did the platform grant native TBT/USB4 control
|
||||
*
|
||||
* Returns %true if the platform granted OS native control over
|
||||
* TBT/USB4. In this case software based connection manager can be used,
|
||||
* Return: %true if the platform granted OS native control over
|
||||
* TBT/USB4, %false otherwise.
|
||||
*
|
||||
* When returned %true, software based connection manager can be used,
|
||||
* otherwise there is firmware based connection manager running.
|
||||
*/
|
||||
bool tb_acpi_is_native(void)
|
||||
|
|
@ -126,8 +128,8 @@ bool tb_acpi_is_native(void)
|
|||
/**
|
||||
* tb_acpi_may_tunnel_usb3() - Is USB3 tunneling allowed by the platform
|
||||
*
|
||||
* When software based connection manager is used, this function
|
||||
* returns %true if platform allows native USB3 tunneling.
|
||||
* Return: %true if software based connection manager is used and
|
||||
* platform allows native USB 3.x tunneling, %false otherwise.
|
||||
*/
|
||||
bool tb_acpi_may_tunnel_usb3(void)
|
||||
{
|
||||
|
|
@ -139,8 +141,8 @@ bool tb_acpi_may_tunnel_usb3(void)
|
|||
/**
|
||||
* tb_acpi_may_tunnel_dp() - Is DisplayPort tunneling allowed by the platform
|
||||
*
|
||||
* When software based connection manager is used, this function
|
||||
* returns %true if platform allows native DP tunneling.
|
||||
* Return: %true if software based connection manager is used and
|
||||
* platform allows native DP tunneling, %false otherwise.
|
||||
*/
|
||||
bool tb_acpi_may_tunnel_dp(void)
|
||||
{
|
||||
|
|
@ -152,8 +154,8 @@ bool tb_acpi_may_tunnel_dp(void)
|
|||
/**
|
||||
* tb_acpi_may_tunnel_pcie() - Is PCIe tunneling allowed by the platform
|
||||
*
|
||||
* When software based connection manager is used, this function
|
||||
* returns %true if platform allows native PCIe tunneling.
|
||||
* Return: %true if software based connection manager is used and
|
||||
* platform allows native PCIe tunneling, %false otherwise.
|
||||
*/
|
||||
bool tb_acpi_may_tunnel_pcie(void)
|
||||
{
|
||||
|
|
@ -165,8 +167,8 @@ bool tb_acpi_may_tunnel_pcie(void)
|
|||
/**
|
||||
* tb_acpi_is_xdomain_allowed() - Are XDomain connections allowed
|
||||
*
|
||||
* When software based connection manager is used, this function
|
||||
* returns %true if platform allows XDomain connections.
|
||||
* Return: %true if software based connection manager is used and
|
||||
* platform allows XDomain tunneling, %false otherwise.
|
||||
*/
|
||||
bool tb_acpi_is_xdomain_allowed(void)
|
||||
{
|
||||
|
|
@ -256,7 +258,7 @@ static int tb_acpi_retimer_set_power(struct tb_port *port, bool power)
|
|||
*
|
||||
* This should only be called if the USB4/TBT link is not up.
|
||||
*
|
||||
* Returns %0 on success.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_acpi_power_on_retimers(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -270,7 +272,7 @@ int tb_acpi_power_on_retimers(struct tb_port *port)
|
|||
* This is the opposite of tb_acpi_power_on_retimers(). After returning
|
||||
* successfully the normal operations with the @port can continue.
|
||||
*
|
||||
* Returns %0 on success.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_acpi_power_off_retimers(struct tb_port *port)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -64,10 +64,14 @@ static void tb_port_dummy_read(struct tb_port *port)
|
|||
* @port: Port to find the capability for
|
||||
* @offset: Previous capability offset (%0 for start)
|
||||
*
|
||||
* Returns dword offset of the next capability in port config space
|
||||
* capability list and returns it. Passing %0 returns the first entry in
|
||||
* the capability list. If no next capability is found returns %0. In case
|
||||
* of failure returns negative errno.
|
||||
* Finds dword offset of the next capability in port config space
|
||||
* capability list. When passed %0 in @offset parameter, first entry
|
||||
* will be returned, if it exists.
|
||||
*
|
||||
* Return:
|
||||
* * Double word offset of the first or next capability - On success.
|
||||
* * %0 - If no next capability is found.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int tb_port_next_cap(struct tb_port *port, unsigned int offset)
|
||||
{
|
||||
|
|
@ -112,9 +116,10 @@ static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
|
|||
* @port: Port to find the capability for
|
||||
* @cap: Capability to look
|
||||
*
|
||||
* Returns offset to start of capability or %-ENOENT if no such
|
||||
* capability was found. Negative errno is returned if there was an
|
||||
* error.
|
||||
* Return:
|
||||
* * Offset to the start of capability - On success.
|
||||
* * %-ENOENT - If no such capability was found.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
|
||||
{
|
||||
|
|
@ -137,10 +142,14 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
|
|||
* @sw: Switch to find the capability for
|
||||
* @offset: Previous capability offset (%0 for start)
|
||||
*
|
||||
* Finds dword offset of the next capability in router config space
|
||||
* capability list and returns it. Passing %0 returns the first entry in
|
||||
* the capability list. If no next capability is found returns %0. In case
|
||||
* of failure returns negative errno.
|
||||
* Finds dword offset of the next capability in port config space
|
||||
* capability list. When passed %0 in @offset parameter, first entry
|
||||
* will be returned, if it exists.
|
||||
*
|
||||
* Return:
|
||||
* * Double word offset of the first or next capability - On success.
|
||||
* * %0 - If no next capability is found.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset)
|
||||
{
|
||||
|
|
@ -181,9 +190,10 @@ int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset)
|
|||
* @sw: Switch to find the capability for
|
||||
* @cap: Capability to look
|
||||
*
|
||||
* Returns offset to start of capability or %-ENOENT if no such
|
||||
* capability was found. Negative errno is returned if there was an
|
||||
* error.
|
||||
* Return:
|
||||
* * Offset to the start of capability - On success.
|
||||
* * %-ENOENT - If no such capability was found.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
|
||||
{
|
||||
|
|
@ -213,10 +223,13 @@ int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
|
|||
* @sw: Switch to find the capability for
|
||||
* @vsec: Vendor specific capability to look
|
||||
*
|
||||
* Functions enumerates vendor specific capabilities (VSEC) of a switch
|
||||
* and returns offset when capability matching @vsec is found. If no
|
||||
* such capability is found returns %-ENOENT. In case of error returns
|
||||
* negative errno.
|
||||
* This function enumerates vendor specific capabilities (VSEC) of a
|
||||
* switch and returns offset when capability matching @vsec is found.
|
||||
*
|
||||
* Return:
|
||||
* * Offset of capability - On success.
|
||||
* * %-ENOENT - If capability was not found.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -167,7 +167,8 @@ static int tb_port_clx(struct tb_port *port)
|
|||
* @port: USB4 port to check
|
||||
* @clx: Mask of CL states to check
|
||||
*
|
||||
* Returns true if any of the given CL states is enabled for @port.
|
||||
* Return: %true if any of the given CL states is enabled for @port,
|
||||
* %false otherwise.
|
||||
*/
|
||||
bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx)
|
||||
{
|
||||
|
|
@ -177,6 +178,8 @@ bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx)
|
|||
/**
|
||||
* tb_switch_clx_is_supported() - Is CLx supported on this type of router
|
||||
* @sw: The router to check CLx support for
|
||||
*
|
||||
* Return: %true if CLx is supported, %false otherwise.
|
||||
*/
|
||||
static bool tb_switch_clx_is_supported(const struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -203,7 +206,7 @@ static bool tb_switch_clx_is_supported(const struct tb_switch *sw)
|
|||
* Can be called for any router. Initializes the current CL state by
|
||||
* reading it from the hardware.
|
||||
*
|
||||
* Returns %0 in case of success and negative errno in case of failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_clx_init(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -313,7 +316,7 @@ static bool validate_mask(unsigned int clx)
|
|||
* is not inter-domain link. The complete set of conditions is described in CM
|
||||
* Guide 1.0 section 8.1.
|
||||
*
|
||||
* Returns %0 on success or an error code on failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx)
|
||||
{
|
||||
|
|
@ -390,8 +393,7 @@ int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx)
|
|||
* Disables all CL states of the given router. Can be called on any
|
||||
* router and if the states were not enabled already does nothing.
|
||||
*
|
||||
* Returns the CL states that were disabled or negative errno in case of
|
||||
* failure.
|
||||
* Return: CL states that were disabled or negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_clx_disable(struct tb_switch *sw)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -82,6 +82,8 @@ static DEFINE_MUTEX(tb_cfg_request_lock);
|
|||
*
|
||||
* This is refcounted object so when you are done with this, call
|
||||
* tb_cfg_request_put() to it.
|
||||
*
|
||||
* Return: &struct tb_cfg_request on success, %NULL otherwise.
|
||||
*/
|
||||
struct tb_cfg_request *tb_cfg_request_alloc(void)
|
||||
{
|
||||
|
|
@ -359,7 +361,7 @@ static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
|
|||
*
|
||||
* len must be a multiple of four.
|
||||
*
|
||||
* Return: Returns 0 on success or an error code on failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
|
||||
enum tb_cfg_pkg_type type)
|
||||
|
|
@ -539,6 +541,8 @@ static void tb_cfg_request_work(struct work_struct *work)
|
|||
*
|
||||
* This queues @req on the given control channel without waiting for it
|
||||
* to complete. When the request completes @callback is called.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
|
||||
void (*callback)(void *), void *callback_data)
|
||||
|
|
@ -605,6 +609,9 @@ static void tb_cfg_request_complete(void *data)
|
|||
* triggers the request is canceled before function returns. Note the
|
||||
* caller needs to make sure only one message for given switch is active
|
||||
* at a time.
|
||||
*
|
||||
* Return: &struct tb_cfg_result with non-zero @err field if error
|
||||
* has occurred.
|
||||
*/
|
||||
struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
|
||||
struct tb_cfg_request *req,
|
||||
|
|
@ -641,7 +648,7 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
|
|||
*
|
||||
* cb will be invoked once for every hot plug event.
|
||||
*
|
||||
* Return: Returns a pointer on success or NULL on failure.
|
||||
* Return: Pointer to &struct tb_ctl, %NULL on failure.
|
||||
*/
|
||||
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
|
||||
event_cb cb, void *cb_data)
|
||||
|
|
@ -764,8 +771,9 @@ void tb_ctl_stop(struct tb_ctl *ctl)
|
|||
* @route: Router that originated the event
|
||||
* @error: Pointer to the notification package
|
||||
*
|
||||
* Call this as response for non-plug notification to ack it. Returns
|
||||
* %0 on success or an error code on failure.
|
||||
* Call this as a response for non-plug notification to ack it.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
|
||||
const struct cfg_error_pkg *error)
|
||||
|
|
@ -827,8 +835,9 @@ int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
|
|||
* @port: Port where the hot plug/unplug happened
|
||||
* @unplug: Ack hot plug or unplug
|
||||
*
|
||||
* Call this as response for hot plug/unplug event to ack it.
|
||||
* Returns %0 on success or an error code on failure.
|
||||
* Call this as a response for hot plug/unplug event to ack it.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
|
||||
{
|
||||
|
|
@ -895,6 +904,9 @@ static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
|
|||
* If the switch at route is incorrectly configured then we will not receive a
|
||||
* reply (even though the switch will reset). The caller should check for
|
||||
* -ETIMEDOUT and attempt to reconfigure the switch.
|
||||
*
|
||||
* Return: &struct tb_cfg_result with non-zero @err field if error
|
||||
* has occurred.
|
||||
*/
|
||||
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
|
||||
{
|
||||
|
|
@ -937,6 +949,9 @@ struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
|
|||
* @timeout_msec: Timeout in ms how long to wait for the response
|
||||
*
|
||||
* Reads from router config space without translating the possible error.
|
||||
*
|
||||
* Return: &struct tb_cfg_result with non-zero @err field if error
|
||||
* has occurred.
|
||||
*/
|
||||
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
|
||||
u64 route, u32 port, enum tb_cfg_space space,
|
||||
|
|
@ -1008,6 +1023,9 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
|
|||
* @timeout_msec: Timeout in ms how long to wait for the response
|
||||
*
|
||||
* Writes to router config space without translating the possible error.
|
||||
*
|
||||
* Return: &struct tb_cfg_result with non-zero @err field if error
|
||||
* has occurred.
|
||||
*/
|
||||
struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
|
||||
u64 route, u32 port, enum tb_cfg_space space,
|
||||
|
|
@ -1150,8 +1168,7 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
|
|||
* Reads the first dword from the switches TB_CFG_SWITCH config area and
|
||||
* returns the port number from which the reply originated.
|
||||
*
|
||||
* Return: Returns the upstream port number on success or an error code on
|
||||
* failure.
|
||||
* Return: Upstream port number on success or negative error code on failure.
|
||||
*/
|
||||
int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -54,6 +54,7 @@ struct ctl_pkg {
|
|||
* @kref: Reference count
|
||||
* @ctl: Pointer to the control channel structure. Only set when the
|
||||
* request is queued.
|
||||
* @request: Request is stored here
|
||||
* @request_size: Size of the request packet (in bytes)
|
||||
* @request_type: Type of the request packet
|
||||
* @response: Response is stored here
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/debugfs.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/string_choices.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include "tb.h"
|
||||
|
|
@ -691,7 +692,7 @@ static int margining_caps_show(struct seq_file *s, void *not_used)
|
|||
seq_printf(s, "0x%08x\n", margining->caps[i]);
|
||||
|
||||
seq_printf(s, "# software margining: %s\n",
|
||||
supports_software(margining) ? "yes" : "no");
|
||||
str_yes_no(supports_software(margining)));
|
||||
if (supports_hardware(margining)) {
|
||||
seq_puts(s, "# hardware margining: yes\n");
|
||||
seq_puts(s, "# minimum BER level contour: ");
|
||||
|
|
|
|||
|
|
@ -197,6 +197,8 @@ static int dma_find_port(struct tb_switch *sw)
|
|||
*
|
||||
* The DMA control port is functional also when the switch is in safe
|
||||
* mode.
|
||||
*
|
||||
* Return: &struct tb_dma_port on success, %NULL otherwise.
|
||||
*/
|
||||
struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -354,6 +356,8 @@ static int dma_port_flash_write_block(void *data, unsigned int dwaddress,
|
|||
* @address: Address relative to the start of active region
|
||||
* @buf: Buffer where the data is read
|
||||
* @size: Size of the buffer
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
|
||||
void *buf, size_t size)
|
||||
|
|
@ -372,6 +376,8 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
|
|||
* Writes block of data to the non-active flash region of the switch. If
|
||||
* the address is given as %DMA_PORT_CSS_ADDRESS the block is written
|
||||
* using CSS command.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
|
||||
const void *buf, size_t size)
|
||||
|
|
@ -393,6 +399,8 @@ int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
|
|||
* dma_port_flash_update_auth_status() to get status of this command.
|
||||
* This is because if the switch in question is root switch the
|
||||
* thunderbolt host controller gets reset as well.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int dma_port_flash_update_auth(struct tb_dma_port *dma)
|
||||
{
|
||||
|
|
@ -410,12 +418,13 @@ int dma_port_flash_update_auth(struct tb_dma_port *dma)
|
|||
* @status: Status code of the operation
|
||||
*
|
||||
* The function checks if there is status available from the last update
|
||||
* auth command. Returns %0 if there is no status and no further
|
||||
* action is required. If there is status, %1 is returned instead and
|
||||
* @status holds the failure code.
|
||||
* auth command.
|
||||
*
|
||||
* Negative return means there was an error reading status from the
|
||||
* switch.
|
||||
* Return:
|
||||
* * %0 - If there is no status and no further action is required.
|
||||
* * %1 - If there is some status. @status holds the failure code.
|
||||
* * Negative errno - An error occurred when reading status from the
|
||||
* switch.
|
||||
*/
|
||||
int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
|
||||
{
|
||||
|
|
@ -446,6 +455,8 @@ int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
|
|||
* @dma: DMA control port
|
||||
*
|
||||
* Triggers power cycle to the switch.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int dma_port_power_cycle(struct tb_dma_port *dma)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -12,7 +12,8 @@
|
|||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/random.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/sha2.h>
|
||||
#include <crypto/utils.h>
|
||||
|
||||
#include "tb.h"
|
||||
|
||||
|
|
@ -368,7 +369,7 @@ static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
|
|||
* Call tb_domain_put() to release the domain before it has been added
|
||||
* to the system.
|
||||
*
|
||||
* Return: allocated domain structure on %NULL in case of error
|
||||
* Return: Pointer to &struct tb or %NULL in case of error.
|
||||
*/
|
||||
struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize)
|
||||
{
|
||||
|
|
@ -430,7 +431,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
|
|||
* and release the domain after this function has been called, call
|
||||
* tb_domain_remove().
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of error
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_domain_add(struct tb *tb, bool reset)
|
||||
{
|
||||
|
|
@ -518,6 +519,8 @@ void tb_domain_remove(struct tb *tb)
|
|||
* @tb: Domain to suspend
|
||||
*
|
||||
* Suspends all devices in the domain and stops the control channel.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_domain_suspend_noirq(struct tb *tb)
|
||||
{
|
||||
|
|
@ -544,6 +547,8 @@ int tb_domain_suspend_noirq(struct tb *tb)
|
|||
*
|
||||
* Re-starts the control channel, and resumes all devices connected to
|
||||
* the domain.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_domain_resume_noirq(struct tb *tb)
|
||||
{
|
||||
|
|
@ -643,6 +648,8 @@ int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw)
|
|||
* This will approve switch by connection manager specific means. In
|
||||
* case of success the connection manager will create PCIe tunnel from
|
||||
* parent to @sw.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -708,8 +715,6 @@ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
|
|||
u8 response[TB_SWITCH_KEY_SIZE];
|
||||
u8 hmac[TB_SWITCH_KEY_SIZE];
|
||||
struct tb_switch *parent_sw;
|
||||
struct crypto_shash *tfm;
|
||||
struct shash_desc *shash;
|
||||
int ret;
|
||||
|
||||
if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
|
||||
|
|
@ -725,45 +730,15 @@ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
|
||||
if (IS_ERR(tfm))
|
||||
return PTR_ERR(tfm);
|
||||
|
||||
ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
|
||||
if (ret)
|
||||
goto err_free_tfm;
|
||||
|
||||
shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
|
||||
GFP_KERNEL);
|
||||
if (!shash) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_tfm;
|
||||
}
|
||||
|
||||
shash->tfm = tfm;
|
||||
|
||||
memset(hmac, 0, sizeof(hmac));
|
||||
ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
|
||||
if (ret)
|
||||
goto err_free_shash;
|
||||
static_assert(sizeof(hmac) == SHA256_DIGEST_SIZE);
|
||||
hmac_sha256_usingrawkey(sw->key, TB_SWITCH_KEY_SIZE,
|
||||
challenge, sizeof(challenge), hmac);
|
||||
|
||||
/* The returned HMAC must match the one we calculated */
|
||||
if (memcmp(response, hmac, sizeof(hmac))) {
|
||||
ret = -EKEYREJECTED;
|
||||
goto err_free_shash;
|
||||
}
|
||||
|
||||
crypto_free_shash(tfm);
|
||||
kfree(shash);
|
||||
if (crypto_memneq(response, hmac, sizeof(hmac)))
|
||||
return -EKEYREJECTED;
|
||||
|
||||
return tb->cm_ops->approve_switch(tb, sw);
|
||||
|
||||
err_free_shash:
|
||||
kfree(shash);
|
||||
err_free_tfm:
|
||||
crypto_free_shash(tfm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -773,7 +748,7 @@ int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
|
|||
* This needs to be called in preparation for NVM upgrade of the host
|
||||
* controller. Makes sure all PCIe paths are disconnected.
|
||||
*
|
||||
* Return %0 on success and negative errno in case of error.
|
||||
* Return: %0 on success and negative errno in case of error.
|
||||
*/
|
||||
int tb_domain_disconnect_pcie_paths(struct tb *tb)
|
||||
{
|
||||
|
|
@ -795,9 +770,11 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb)
|
|||
* Calls connection manager specific method to enable DMA paths to the
|
||||
* XDomain in question.
|
||||
*
|
||||
* Return: 0% in case of success and negative errno otherwise. In
|
||||
* particular returns %-ENOTSUPP if the connection manager
|
||||
* implementation does not support XDomains.
|
||||
* Return:
|
||||
* * %0 - On success.
|
||||
* * %-ENOTSUPP - If the connection manager implementation does not support
|
||||
* XDomains.
|
||||
* * Negative errno - An error occurred.
|
||||
*/
|
||||
int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
|
||||
int transmit_path, int transmit_ring,
|
||||
|
|
@ -822,9 +799,11 @@ int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
|
|||
* Calls connection manager specific method to disconnect DMA paths to
|
||||
* the XDomain in question.
|
||||
*
|
||||
* Return: 0% in case of success and negative errno otherwise. In
|
||||
* particular returns %-ENOTSUPP if the connection manager
|
||||
* implementation does not support XDomains.
|
||||
* Return:
|
||||
* * %0 - On success.
|
||||
* * %-ENOTSUPP - If the connection manager implementation does not support
|
||||
* XDomains.
|
||||
* * Negative errno - An error occurred.
|
||||
*/
|
||||
int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
|
||||
int transmit_path, int transmit_ring,
|
||||
|
|
|
|||
|
|
@ -298,6 +298,8 @@ struct tb_drom_entry_desc {
|
|||
*
|
||||
* Does not use the cached copy in sw->drom. Used during resume to check switch
|
||||
* identity.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
|
||||
{
|
||||
|
|
@ -709,7 +711,7 @@ static int tb_drom_device_read(struct tb_switch *sw)
|
|||
* populates the fields in @sw accordingly. Can be called for any router
|
||||
* generation.
|
||||
*
|
||||
* Returns %0 in case of success and negative errno otherwise.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_drom_read(struct tb_switch *sw)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -14,6 +14,8 @@
|
|||
* tb_lc_read_uuid() - Read switch UUID from link controller common register
|
||||
* @sw: Switch whose UUID is read
|
||||
* @uuid: UUID is placed here
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
|
||||
{
|
||||
|
|
@ -52,9 +54,10 @@ static int find_port_lc_cap(struct tb_port *port)
|
|||
* @port: Port that is reset
|
||||
*
|
||||
* Triggers downstream port reset through link controller registers.
|
||||
* Returns %0 in case of success negative errno otherwise. Only supports
|
||||
* non-USB4 routers with link controller (that's Thunderbolt 2 and
|
||||
* Thunderbolt 3).
|
||||
* Only supports non-USB4 routers with link controller (that's
|
||||
* Thunderbolt 2 and Thunderbolt 3).
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_lc_reset_port(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -132,6 +135,8 @@ static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
|
|||
* @port: Port that is set as configured
|
||||
*
|
||||
* Sets the port configured for power management purposes.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_lc_configure_port(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -143,6 +148,8 @@ int tb_lc_configure_port(struct tb_port *port)
|
|||
* @port: Port that is set as configured
|
||||
*
|
||||
* Sets the port unconfigured for power management purposes.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
void tb_lc_unconfigure_port(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -184,8 +191,10 @@ static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
|
|||
* tb_lc_configure_xdomain() - Inform LC that the link is XDomain
|
||||
* @port: Switch downstream port connected to another host
|
||||
*
|
||||
* Sets the lane configured for XDomain accordingly so that the LC knows
|
||||
* about this. Returns %0 in success and negative errno in failure.
|
||||
* Sets the lane configured for XDomain accordingly so that LC knows
|
||||
* about this.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_lc_configure_xdomain(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -211,7 +220,7 @@ void tb_lc_unconfigure_xdomain(struct tb_port *port)
|
|||
* sleep. Should be called for those downstream lane adapters that were
|
||||
* not connected (tb_lc_configure_port() was not called) before sleep.
|
||||
*
|
||||
* Returns %0 in success and negative errno in case of failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_lc_start_lane_initialization(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -244,6 +253,8 @@ int tb_lc_start_lane_initialization(struct tb_port *port)
|
|||
*
|
||||
* TB_LC_LINK_ATTR_CPS bit reflects if the link supports CLx including
|
||||
* active cables (if connected on the link).
|
||||
*
|
||||
* Return: %true if CLx is supported, %false otherwise.
|
||||
*/
|
||||
bool tb_lc_is_clx_supported(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -266,7 +277,8 @@ bool tb_lc_is_clx_supported(struct tb_port *port)
|
|||
* tb_lc_is_usb_plugged() - Is there USB device connected to port
|
||||
* @port: Device router lane 0 adapter
|
||||
*
|
||||
* Returns true if the @port has USB type-C device connected.
|
||||
* Return: %true if the @port has USB Type-C device connected, %false
|
||||
* otherwise.
|
||||
*/
|
||||
bool tb_lc_is_usb_plugged(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -292,7 +304,8 @@ bool tb_lc_is_usb_plugged(struct tb_port *port)
|
|||
* tb_lc_is_xhci_connected() - Is the internal xHCI connected
|
||||
* @port: Device router lane 0 adapter
|
||||
*
|
||||
* Returns true if the internal xHCI has been connected to @port.
|
||||
* Return: %true if the internal xHCI has been connected to
|
||||
* @port, %false otherwise.
|
||||
*/
|
||||
bool tb_lc_is_xhci_connected(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -343,9 +356,10 @@ static int __tb_lc_xhci_connect(struct tb_port *port, bool connect)
|
|||
* tb_lc_xhci_connect() - Connect internal xHCI
|
||||
* @port: Device router lane 0 adapter
|
||||
*
|
||||
* Tells LC to connect the internal xHCI to @port. Returns %0 on success
|
||||
* and negative errno in case of failure. Can be called for Thunderbolt 3
|
||||
* routers only.
|
||||
* Tells LC to connect the internal xHCI to @port. Can be called for
|
||||
* Thunderbolt 3 routers only.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_lc_xhci_connect(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -408,6 +422,8 @@ static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
|
|||
* @flags: Wakeup flags (%0 to disable)
|
||||
*
|
||||
* For each LC sets wake bits accordingly.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
|
||||
{
|
||||
|
|
@ -447,6 +463,8 @@ int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
|
|||
*
|
||||
* Let the switch link controllers know that the switch is going to
|
||||
* sleep.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_lc_set_sleep(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -491,6 +509,8 @@ int tb_lc_set_sleep(struct tb_switch *sw)
|
|||
*
|
||||
* Checks whether conditions for lane bonding from parent to @sw are
|
||||
* possible.
|
||||
*
|
||||
* Return: %true if lane bonding is possible, %false otherwise.
|
||||
*/
|
||||
bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -562,6 +582,8 @@ static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
|
|||
*
|
||||
* Queries through LC SNK_ALLOCATION registers whether DP sink is available
|
||||
* for the given DP IN port or not.
|
||||
*
|
||||
* Return: %true if DP sink is available, %false otherwise.
|
||||
*/
|
||||
bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
|
||||
{
|
||||
|
|
@ -586,10 +608,12 @@ bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
|
|||
* @sw: Switch whose DP sink is allocated
|
||||
* @in: DP IN port the DP sink is allocated for
|
||||
*
|
||||
* Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
|
||||
* resource is available and allocation is successful returns %0. In all
|
||||
* other cases returs negative errno. In particular %-EBUSY is returned if
|
||||
* the resource was not available.
|
||||
* Allocate DP sink for @in via LC SNK_ALLOCATION registers.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - If the resource is available and allocation is successful.
|
||||
* * %-EBUSY - If resource is not available.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
|
||||
{
|
||||
|
|
@ -637,6 +661,8 @@ int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
|
|||
* @in: DP IN port whose DP sink is de-allocated
|
||||
*
|
||||
* De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
|
||||
{
|
||||
|
|
@ -680,6 +706,8 @@ int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
|
|||
*
|
||||
* This is useful to let authentication cycle pass even without
|
||||
* a Thunderbolt link present.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_lc_force_power(struct tb_switch *sw)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/string_choices.h>
|
||||
#include <linux/string_helpers.h>
|
||||
|
||||
#include "nhi.h"
|
||||
|
|
@ -146,7 +147,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
|
|||
dev_WARN(&ring->nhi->pdev->dev,
|
||||
"interrupt for %s %d is already %s\n",
|
||||
RING_TYPE(ring), ring->hop,
|
||||
active ? "enabled" : "disabled");
|
||||
str_enabled_disabled(active));
|
||||
|
||||
if (active)
|
||||
iowrite32(new, ring->nhi->iobase + reg);
|
||||
|
|
@ -343,8 +344,10 @@ EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
|
|||
*
|
||||
* This function can be called when @start_poll callback of the @ring
|
||||
* has been called. It will read one completed frame from the ring and
|
||||
* return it to the caller. Returns %NULL if there is no more completed
|
||||
* frames.
|
||||
* return it to the caller.
|
||||
*
|
||||
* Return: Pointer to &struct ring_frame, %NULL if there is no more
|
||||
* completed frames.
|
||||
*/
|
||||
struct ring_frame *tb_ring_poll(struct tb_ring *ring)
|
||||
{
|
||||
|
|
@ -639,6 +642,8 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
|
|||
* @hop: HopID (ring) to allocate
|
||||
* @size: Number of entries in the ring
|
||||
* @flags: Flags for the ring
|
||||
*
|
||||
* Return: Pointer to &struct tb_ring, %NULL otherwise.
|
||||
*/
|
||||
struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
|
||||
unsigned int flags)
|
||||
|
|
@ -660,6 +665,8 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
|
|||
* interrupt is triggered and masked, instead of callback
|
||||
* in each Rx frame.
|
||||
* @poll_data: Optional data passed to @start_poll
|
||||
*
|
||||
* Return: Pointer to &struct tb_ring, %NULL otherwise.
|
||||
*/
|
||||
struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
|
||||
unsigned int flags, int e2e_tx_hop,
|
||||
|
|
@ -853,8 +860,9 @@ EXPORT_SYMBOL_GPL(tb_ring_free);
|
|||
* @cmd: Command to send
|
||||
* @data: Data to be send with the command
|
||||
*
|
||||
* Sends mailbox command to the firmware running on NHI. Returns %0 in
|
||||
* case of success and negative errno in case of failure.
|
||||
* Sends mailbox command to the firmware running on NHI.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
|
||||
{
|
||||
|
|
@ -890,6 +898,8 @@ int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
|
|||
*
|
||||
* The function reads current firmware operation mode using NHI mailbox
|
||||
* registers and returns it to the caller.
|
||||
*
|
||||
* Return: &enum nhi_fw_mode.
|
||||
*/
|
||||
enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -21,6 +21,12 @@ enum ring_flags {
|
|||
|
||||
/**
|
||||
* struct ring_desc - TX/RX ring entry
|
||||
* @phys: DMA mapped address of the frame
|
||||
* @length: Size of the ring
|
||||
* @eof: End of frame protocol defined field
|
||||
* @sof: Start of frame protocol defined field
|
||||
* @flags: Ring descriptor flags
|
||||
* @time: Fill with zero
|
||||
*
|
||||
* For TX set length/eof/sof.
|
||||
* For RX length/eof/sof are set by the NHI.
|
||||
|
|
|
|||
|
|
@ -278,9 +278,13 @@ static const struct tb_nvm_vendor retimer_nvm_vendors[] = {
|
|||
* tb_nvm_alloc() - Allocate new NVM structure
|
||||
* @dev: Device owning the NVM
|
||||
*
|
||||
* Allocates new NVM structure with unique @id and returns it. In case
|
||||
* of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the
|
||||
* NVM format of the @dev is not known by the kernel.
|
||||
* Allocates new NVM structure with unique @id and returns it.
|
||||
*
|
||||
* Return:
|
||||
* * Pointer to &struct tb_nvm - On success.
|
||||
* * %-EOPNOTSUPP - If the NVM format of the @dev is not known by the
|
||||
* kernel.
|
||||
* * %ERR_PTR - In case of failure.
|
||||
*/
|
||||
struct tb_nvm *tb_nvm_alloc(struct device *dev)
|
||||
{
|
||||
|
|
@ -347,9 +351,10 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
|
|||
* tb_nvm_read_version() - Read and populate NVM version
|
||||
* @nvm: NVM structure
|
||||
*
|
||||
* Uses vendor specific means to read out and fill in the existing
|
||||
* active NVM version. Returns %0 in case of success and negative errno
|
||||
* otherwise.
|
||||
* Uses vendor specific means to read and fill out the existing
|
||||
* active NVM version.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_nvm_read_version(struct tb_nvm *nvm)
|
||||
{
|
||||
|
|
@ -365,12 +370,11 @@ int tb_nvm_read_version(struct tb_nvm *nvm)
|
|||
* tb_nvm_validate() - Validate new NVM image
|
||||
* @nvm: NVM structure
|
||||
*
|
||||
* Runs vendor specific validation over the new NVM image and if all
|
||||
* checks pass returns %0. As side effect updates @nvm->buf_data_start
|
||||
* and @nvm->buf_data_size fields to match the actual data to be written
|
||||
* to the NVM.
|
||||
* Runs vendor specific validation over the new NVM image. As a
|
||||
* side effect, updates @nvm->buf_data_start and @nvm->buf_data_size
|
||||
* fields to match the actual data to be written to the NVM.
|
||||
*
|
||||
* If the validation does not pass then returns negative errno.
|
||||
* Return: %0 on successful validation, negative errno otherwise.
|
||||
*/
|
||||
int tb_nvm_validate(struct tb_nvm *nvm)
|
||||
{
|
||||
|
|
@ -405,7 +409,7 @@ int tb_nvm_validate(struct tb_nvm *nvm)
|
|||
* the image, this function does that. Can be called even if the device
|
||||
* does not need this.
|
||||
*
|
||||
* Returns %0 in case of success and negative errno otherwise.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_nvm_write_headers(struct tb_nvm *nvm)
|
||||
{
|
||||
|
|
@ -423,7 +427,8 @@ int tb_nvm_write_headers(struct tb_nvm *nvm)
|
|||
* Registers new active NVmem device for @nvm. The @reg_read is called
|
||||
* directly from NVMem so it must handle possible concurrent access if
|
||||
* needed. The first parameter passed to @reg_read is @nvm structure.
|
||||
* Returns %0 in success and negative errno otherwise.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read)
|
||||
{
|
||||
|
|
@ -461,6 +466,11 @@ int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read)
|
|||
* Helper function to cache the new NVM image before it is actually
|
||||
* written to the flash. Copies @bytes from @val to @nvm->buf starting
|
||||
* from @offset.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - On success.
|
||||
* * %-ENOMEM - If buffer allocation failed.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
|
||||
size_t bytes)
|
||||
|
|
@ -488,7 +498,7 @@ int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
|
|||
* needed. The first parameter passed to @reg_write is @nvm structure.
|
||||
* The size of the NVMem device is set to %NVM_MAX_SIZE.
|
||||
*
|
||||
* Returns %0 in success and negative errno otherwise.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write)
|
||||
{
|
||||
|
|
@ -545,7 +555,7 @@ void tb_nvm_free(struct tb_nvm *nvm)
|
|||
* This is a generic function that reads data from NVM or NVM like
|
||||
* device.
|
||||
*
|
||||
* Returns %0 on success and negative errno otherwise.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
|
||||
unsigned int retries, read_block_fn read_block,
|
||||
|
|
@ -592,7 +602,7 @@ int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
|
|||
*
|
||||
* This is generic function that writes data to NVM or NVM like device.
|
||||
*
|
||||
* Returns %0 on success and negative errno otherwise.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
|
||||
unsigned int retries, write_block_fn write_block,
|
||||
|
|
|
|||
|
|
@ -96,7 +96,7 @@ static int tb_path_find_src_hopid(struct tb_port *src,
|
|||
* that the @dst port is the expected one. If it is not, the path can be
|
||||
* cleaned up by calling tb_path_deactivate() before tb_path_free().
|
||||
*
|
||||
* Return: Discovered path on success, %NULL in case of failure
|
||||
* Return: Pointer to &struct tb_path, %NULL in case of failure.
|
||||
*/
|
||||
struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
|
||||
struct tb_port *dst, int dst_hopid,
|
||||
|
|
@ -233,7 +233,7 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
|
|||
* links on the path, prioritizes using @link_nr but takes into account
|
||||
* that the lanes may be bonded.
|
||||
*
|
||||
* Return: Returns a tb_path on success or NULL on failure.
|
||||
* Return: Pointer to &struct tb_path, %NULL in case of failure.
|
||||
*/
|
||||
struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
|
||||
struct tb_port *dst, int dst_hopid, int link_nr,
|
||||
|
|
@ -452,7 +452,9 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
|
|||
* @hop_index: HopID of the path to be cleared
|
||||
*
|
||||
* This deactivates or clears a single path config space entry at
|
||||
* @hop_index. Returns %0 in success and negative errno otherwise.
|
||||
* @hop_index.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_path_deactivate_hop(struct tb_port *port, int hop_index)
|
||||
{
|
||||
|
|
@ -498,7 +500,7 @@ void tb_path_deactivate(struct tb_path *path)
|
|||
* Activate a path starting with the last hop and iterating backwards. The
|
||||
* caller must fill path->hops before calling tb_path_activate().
|
||||
*
|
||||
* Return: Returns 0 on success or an error code on failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_path_activate(struct tb_path *path)
|
||||
{
|
||||
|
|
@ -592,7 +594,7 @@ int tb_path_activate(struct tb_path *path)
|
|||
* tb_path_is_invalid() - check whether any ports on the path are invalid
|
||||
* @path: Path to check
|
||||
*
|
||||
* Return: Returns true if the path is invalid, false otherwise.
|
||||
* Return: %true if the path is invalid, %false otherwise.
|
||||
*/
|
||||
bool tb_path_is_invalid(struct tb_path *path)
|
||||
{
|
||||
|
|
@ -613,6 +615,8 @@ bool tb_path_is_invalid(struct tb_path *path)
|
|||
*
|
||||
* Goes over all hops on path and checks if @port is any of them.
|
||||
* Direction does not matter.
|
||||
*
|
||||
* Return: %true if port is on the path, %false otherwise.
|
||||
*/
|
||||
bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -211,11 +211,13 @@ static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
|
|||
*
|
||||
* This function parses the XDomain properties data block into format that
|
||||
* can be traversed using the helper functions provided by this module.
|
||||
* Upon success returns the parsed directory. In case of error returns
|
||||
* %NULL. The resulting &struct tb_property_dir needs to be released by
|
||||
*
|
||||
* The resulting &struct tb_property_dir needs to be released by
|
||||
* calling tb_property_free_dir() when not needed anymore.
|
||||
*
|
||||
* The @block is expected to be root directory.
|
||||
*
|
||||
* Return: Pointer to &struct tb_property_dir, %NULL in case of failure.
|
||||
*/
|
||||
struct tb_property_dir *tb_property_parse_dir(const u32 *block,
|
||||
size_t block_len)
|
||||
|
|
@ -238,6 +240,8 @@ struct tb_property_dir *tb_property_parse_dir(const u32 *block,
|
|||
*
|
||||
* Creates new, empty property directory. If @uuid is %NULL then the
|
||||
* directory is assumed to be root directory.
|
||||
*
|
||||
* Return: Pointer to &struct tb_property_dir, %NULL in case of failure.
|
||||
*/
|
||||
struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid)
|
||||
{
|
||||
|
|
@ -481,9 +485,11 @@ static ssize_t __tb_property_format_dir(const struct tb_property_dir *dir,
|
|||
* @block_len: Length of the property block
|
||||
*
|
||||
* This function formats the directory to the packed format that can be
|
||||
* then send over the thunderbolt fabric to receiving host. Returns %0 in
|
||||
* case of success and negative errno on faulure. Passing %NULL in @block
|
||||
* returns number of entries the block takes.
|
||||
* then sent over the thunderbolt fabric to receiving host.
|
||||
*
|
||||
* Passing %NULL in @block returns number of entries the block takes.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
|
||||
size_t block_len)
|
||||
|
|
@ -505,9 +511,9 @@ ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
|
|||
* tb_property_copy_dir() - Take a deep copy of directory
|
||||
* @dir: Directory to copy
|
||||
*
|
||||
* This function takes a deep copy of @dir and returns back the copy. In
|
||||
* case of error returns %NULL. The resulting directory needs to be
|
||||
* released by calling tb_property_free_dir().
|
||||
* The resulting directory needs to be released by calling tb_property_free_dir().
|
||||
*
|
||||
* Return: Pointer to &struct tb_property_dir, %NULL in case of failure.
|
||||
*/
|
||||
struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir)
|
||||
{
|
||||
|
|
@ -577,6 +583,8 @@ struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir)
|
|||
* @parent: Directory to add the property
|
||||
* @key: Key for the property
|
||||
* @value: Immediate value to store with the property
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
|
||||
u32 value)
|
||||
|
|
@ -606,6 +614,8 @@ EXPORT_SYMBOL_GPL(tb_property_add_immediate);
|
|||
* @buflen: Number of bytes in the data buffer
|
||||
*
|
||||
* Function takes a copy of @buf and adds it to the directory.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_property_add_data(struct tb_property_dir *parent, const char *key,
|
||||
const void *buf, size_t buflen)
|
||||
|
|
@ -642,6 +652,8 @@ EXPORT_SYMBOL_GPL(tb_property_add_data);
|
|||
* @text: String to add
|
||||
*
|
||||
* Function takes a copy of @text and adds it to the directory.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_property_add_text(struct tb_property_dir *parent, const char *key,
|
||||
const char *text)
|
||||
|
|
@ -676,6 +688,8 @@ EXPORT_SYMBOL_GPL(tb_property_add_text);
|
|||
* @parent: Directory to add the property
|
||||
* @key: Key for the property
|
||||
* @dir: Directory to add
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
|
||||
struct tb_property_dir *dir)
|
||||
|
|
@ -716,8 +730,10 @@ EXPORT_SYMBOL_GPL(tb_property_remove);
|
|||
* @key: Key to look for
|
||||
* @type: Type of the property
|
||||
*
|
||||
* Finds and returns property from the given directory. Does not recurse
|
||||
* into sub-directories. Returns %NULL if the property was not found.
|
||||
* Finds and returns property from the given directory. Does not
|
||||
* recurse into sub-directories.
|
||||
*
|
||||
* Return: Pointer to &struct tb_property, %NULL if the property was not found.
|
||||
*/
|
||||
struct tb_property *tb_property_find(struct tb_property_dir *dir,
|
||||
const char *key, enum tb_property_type type)
|
||||
|
|
@ -737,6 +753,8 @@ EXPORT_SYMBOL_GPL(tb_property_find);
|
|||
* tb_property_get_next() - Get next property from directory
|
||||
* @dir: Directory holding properties
|
||||
* @prev: Previous property in the directory (%NULL returns the first)
|
||||
*
|
||||
* Return: Pointer to &struct tb_property, %NULL if property was not found.
|
||||
*/
|
||||
struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
|
||||
struct tb_property *prev)
|
||||
|
|
|
|||
|
|
@ -27,8 +27,9 @@
|
|||
* @buf: Data read from NVM is stored here
|
||||
* @size: Number of bytes to read
|
||||
*
|
||||
* Reads retimer NVM and copies the contents to @buf. Returns %0 if the
|
||||
* read was successful and negative errno in case of failure.
|
||||
* Reads retimer NVM and copies the contents to @buf.
|
||||
*
|
||||
* Return: %0 if the read was successful, negative errno in case of failure.
|
||||
*/
|
||||
int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
|
||||
size_t size)
|
||||
|
|
@ -503,6 +504,8 @@ static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
|
|||
* Then Tries to enumerate on-board retimers connected to @port. Found
|
||||
* retimers are registered as children of @port if @add is set. Does
|
||||
* not scan for cable retimers for now.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_retimer_scan(struct tb_port *port, bool add)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -290,8 +290,9 @@ static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
|
|||
* @size: Size of the buffer in bytes
|
||||
*
|
||||
* Reads from router NVM and returns the requested data in @buf. Locking
|
||||
* is up to the caller. Returns %0 in success and negative errno in case
|
||||
* of failure.
|
||||
* is up to the caller.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
|
||||
size_t size)
|
||||
|
|
@ -464,7 +465,7 @@ static void tb_dump_port(struct tb *tb, const struct tb_port *port)
|
|||
*
|
||||
* The port must have a TB_CAP_PHY (i.e. it should be a real port).
|
||||
*
|
||||
* Return: Returns an enum tb_port_state on success or an error code on failure.
|
||||
* Return: &enum tb_port_state or negative error code on failure.
|
||||
*/
|
||||
int tb_port_state(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -491,9 +492,11 @@ int tb_port_state(struct tb_port *port)
|
|||
* switch resume). Otherwise we only wait if a device is registered but the link
|
||||
* has not yet been established.
|
||||
*
|
||||
* Return: Returns an error code on failure. Returns 0 if the port is not
|
||||
* connected or failed to reach state TB_PORT_UP within one second. Returns 1
|
||||
* if the port is connected and in state TB_PORT_UP.
|
||||
* Return:
|
||||
* * %0 - If the port is not connected or failed to reach
|
||||
* state %TB_PORT_UP within one second.
|
||||
* * %1 - If the port is connected and in state %TB_PORT_UP.
|
||||
* * Negative errno - An error occurred.
|
||||
*/
|
||||
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
|
||||
{
|
||||
|
|
@ -562,7 +565,7 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
|
|||
* Change the number of NFC credits allocated to @port by @credits. To remove
|
||||
* NFC credits pass a negative amount of credits.
|
||||
*
|
||||
* Return: Returns 0 on success or an error code on failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_port_add_nfc_credits(struct tb_port *port, int credits)
|
||||
{
|
||||
|
|
@ -599,7 +602,7 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
|
|||
* @port: Port whose counters to clear
|
||||
* @counter: Counter index to clear
|
||||
*
|
||||
* Return: Returns 0 on success or an error code on failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_port_clear_counter(struct tb_port *port, int counter)
|
||||
{
|
||||
|
|
@ -614,6 +617,8 @@ int tb_port_clear_counter(struct tb_port *port, int counter)
|
|||
*
|
||||
* Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
|
||||
* downstream router accessible for CM.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_port_unlock(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -659,6 +664,8 @@ static int __tb_port_enable(struct tb_port *port, bool enable)
|
|||
* @port: Port to enable (can be %NULL)
|
||||
*
|
||||
* This is used for lane 0 and 1 adapters to enable it.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_port_enable(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -670,6 +677,8 @@ int tb_port_enable(struct tb_port *port)
|
|||
* @port: Port to disable (can be %NULL)
|
||||
*
|
||||
* This is used for lane 0 and 1 adapters to disable it.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_port_disable(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -689,7 +698,7 @@ static int tb_port_reset(struct tb_port *port)
|
|||
* This is a helper method for tb_switch_alloc. Does not check or initialize
|
||||
* any downstream switches.
|
||||
*
|
||||
* Return: Returns 0 on success or an error code on failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
static int tb_init_port(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -847,9 +856,9 @@ static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
|
|||
* link port, the function follows that link and returns another end on
|
||||
* that same link.
|
||||
*
|
||||
* If the @end port has been reached, return %NULL.
|
||||
*
|
||||
* Domain tb->lock must be held when this function is called.
|
||||
*
|
||||
* Return: Pointer to &struct tb_port, %NULL if the @end port has been reached.
|
||||
*/
|
||||
struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
|
||||
struct tb_port *prev)
|
||||
|
|
@ -894,7 +903,7 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
|
|||
* tb_port_get_link_speed() - Get current link speed
|
||||
* @port: Port to check (USB4 or CIO)
|
||||
*
|
||||
* Returns link speed in Gb/s or negative errno in case of failure.
|
||||
* Return: Link speed in Gb/s or negative errno in case of failure.
|
||||
*/
|
||||
int tb_port_get_link_speed(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -926,9 +935,11 @@ int tb_port_get_link_speed(struct tb_port *port)
|
|||
* tb_port_get_link_generation() - Returns link generation
|
||||
* @port: Lane adapter
|
||||
*
|
||||
* Returns link generation as number or negative errno in case of
|
||||
* failure. Does not distinguish between Thunderbolt 1 and Thunderbolt 2
|
||||
* links so for those always returns 2.
|
||||
* Return: Link generation as a number or negative errno in case of
|
||||
* failure.
|
||||
*
|
||||
* Does not distinguish between Thunderbolt 1 and Thunderbolt 2
|
||||
* links so for those always returns %2.
|
||||
*/
|
||||
int tb_port_get_link_generation(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -952,8 +963,8 @@ int tb_port_get_link_generation(struct tb_port *port)
|
|||
* tb_port_get_link_width() - Get current link width
|
||||
* @port: Port to check (USB4 or CIO)
|
||||
*
|
||||
* Returns link width. Return the link width as encoded in &enum
|
||||
* tb_link_width or negative errno in case of failure.
|
||||
* Return: Link width encoded in &enum tb_link_width or
|
||||
* negative errno in case of failure.
|
||||
*/
|
||||
int tb_port_get_link_width(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -979,7 +990,9 @@ int tb_port_get_link_width(struct tb_port *port)
|
|||
* @width: Widths to check (bitmask)
|
||||
*
|
||||
* Can be called to any lane adapter. Checks if given @width is
|
||||
* supported by the hardware and returns %true if it is.
|
||||
* supported by the hardware.
|
||||
*
|
||||
* Return: %true if link width is supported, %false otherwise.
|
||||
*/
|
||||
bool tb_port_width_supported(struct tb_port *port, unsigned int width)
|
||||
{
|
||||
|
|
@ -1016,7 +1029,7 @@ bool tb_port_width_supported(struct tb_port *port, unsigned int width)
|
|||
* Sets the target link width of the lane adapter to @width. Does not
|
||||
* enable/disable lane bonding. For that call tb_port_set_lane_bonding().
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of error
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
|
||||
{
|
||||
|
|
@ -1070,7 +1083,7 @@ int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
|
|||
* cases one should use tb_port_lane_bonding_enable() instead to enable
|
||||
* lane bonding.
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of error
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
|
||||
{
|
||||
|
|
@ -1104,7 +1117,7 @@ static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
|
|||
* tb_port_wait_for_link_width() before enabling any paths through the
|
||||
* link to make sure the link is in expected state.
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of error
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_port_lane_bonding_enable(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1181,9 +1194,14 @@ void tb_port_lane_bonding_disable(struct tb_port *port)
|
|||
*
|
||||
* Should be used after both ends of the link have been bonded (or
|
||||
* bonding has been disabled) to wait until the link actually reaches
|
||||
* the expected state. Returns %-ETIMEDOUT if the width was not reached
|
||||
* within the given timeout, %0 if it did. Can be passed a mask of
|
||||
* expected widths and succeeds if any of the widths is reached.
|
||||
* the expected state.
|
||||
*
|
||||
* Can be passed a mask of expected widths.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - If link reaches any of the specified widths.
|
||||
* * %-ETIMEDOUT - If link does not reach specified width.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width,
|
||||
int timeout_msec)
|
||||
|
|
@ -1248,6 +1266,8 @@ static int tb_port_do_update_credits(struct tb_port *port)
|
|||
* After the link is bonded (or bonding was disabled) the port total
|
||||
* credits may change, so this function needs to be called to re-read
|
||||
* the credits. Updates also the second lane adapter.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_port_update_credits(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1303,6 +1323,8 @@ static bool tb_port_resume(struct tb_port *port)
|
|||
/**
|
||||
* tb_port_is_enabled() - Is the adapter port enabled
|
||||
* @port: Port to check
|
||||
*
|
||||
* Return: %true if port is enabled, %false otherwise.
|
||||
*/
|
||||
bool tb_port_is_enabled(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1327,6 +1349,8 @@ bool tb_port_is_enabled(struct tb_port *port)
|
|||
/**
|
||||
* tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
|
||||
* @port: USB3 adapter port to check
|
||||
*
|
||||
* Return: %true if port is enabled, %false otherwise.
|
||||
*/
|
||||
bool tb_usb3_port_is_enabled(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1343,6 +1367,8 @@ bool tb_usb3_port_is_enabled(struct tb_port *port)
|
|||
* tb_usb3_port_enable() - Enable USB3 adapter port
|
||||
* @port: USB3 adapter port to enable
|
||||
* @enable: Enable/disable the USB3 adapter
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_usb3_port_enable(struct tb_port *port, bool enable)
|
||||
{
|
||||
|
|
@ -1358,6 +1384,8 @@ int tb_usb3_port_enable(struct tb_port *port, bool enable)
|
|||
/**
|
||||
* tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
|
||||
* @port: PCIe port to check
|
||||
*
|
||||
* Return: %true if port is enabled, %false otherwise.
|
||||
*/
|
||||
bool tb_pci_port_is_enabled(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1374,6 +1402,8 @@ bool tb_pci_port_is_enabled(struct tb_port *port)
|
|||
* tb_pci_port_enable() - Enable PCIe adapter port
|
||||
* @port: PCIe port to enable
|
||||
* @enable: Enable/disable the PCIe adapter
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_pci_port_enable(struct tb_port *port, bool enable)
|
||||
{
|
||||
|
|
@ -1389,6 +1419,8 @@ int tb_pci_port_enable(struct tb_port *port, bool enable)
|
|||
* @port: DP out port to check
|
||||
*
|
||||
* Checks if the DP OUT adapter port has HPD bit already set.
|
||||
*
|
||||
* Return: %1 if HPD is active, %0 otherwise.
|
||||
*/
|
||||
int tb_dp_port_hpd_is_active(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1408,6 +1440,8 @@ int tb_dp_port_hpd_is_active(struct tb_port *port)
|
|||
* @port: Port to clear HPD
|
||||
*
|
||||
* If the DP IN port has HPD set, this function can be used to clear it.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_dp_port_hpd_clear(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1434,6 +1468,8 @@ int tb_dp_port_hpd_clear(struct tb_port *port)
|
|||
* Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
|
||||
* router DP adapters too but does not program the values as the fields
|
||||
* are read-only.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
|
||||
unsigned int aux_tx, unsigned int aux_rx)
|
||||
|
|
@ -1466,6 +1502,8 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
|
|||
/**
|
||||
* tb_dp_port_is_enabled() - Is DP adapter port enabled
|
||||
* @port: DP adapter port to check
|
||||
*
|
||||
* Return: %true if DP port is enabled, %false otherwise.
|
||||
*/
|
||||
bool tb_dp_port_is_enabled(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1485,6 +1523,8 @@ bool tb_dp_port_is_enabled(struct tb_port *port)
|
|||
*
|
||||
* Once Hop IDs are programmed DP paths can be enabled or disabled by
|
||||
* calling this function.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_dp_port_enable(struct tb_port *port, bool enable)
|
||||
{
|
||||
|
|
@ -1634,7 +1674,7 @@ static bool tb_switch_enumerated(struct tb_switch *sw)
|
|||
*
|
||||
* If the router is not enumerated does nothing.
|
||||
*
|
||||
* Returns %0 on success or negative errno in case of failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_reset(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -1670,8 +1710,12 @@ int tb_switch_reset(struct tb_switch *sw)
|
|||
* @timeout_msec: Timeout in ms how long to wait
|
||||
*
|
||||
* Wait till the specified bits in specified offset reach specified value.
|
||||
* Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
|
||||
* within the given timeout or a negative errno in case of failure.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - On success.
|
||||
* * %-ETIMEDOUT - If the @value was not reached within
|
||||
* the given timeout.
|
||||
* * Negative errno - In case of failure.
|
||||
*/
|
||||
int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
|
||||
u32 value, int timeout_msec)
|
||||
|
|
@ -1700,7 +1744,7 @@ int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
|
|||
*
|
||||
* Also configures a sane plug_events_delay of 255ms.
|
||||
*
|
||||
* Return: Returns 0 on success or an error code on failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
static int tb_plug_events_active(struct tb_switch *sw, bool active)
|
||||
{
|
||||
|
|
@ -2406,8 +2450,7 @@ static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
|
|||
* separately. The returned switch should be released by calling
|
||||
* tb_switch_put().
|
||||
*
|
||||
* Return: Pointer to the allocated switch or ERR_PTR() in case of
|
||||
* failure.
|
||||
* Return: Pointer to &struct tb_switch or ERR_PTR() in case of failure.
|
||||
*/
|
||||
struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
|
||||
u64 route)
|
||||
|
|
@ -2526,7 +2569,7 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
|
|||
*
|
||||
* The returned switch must be released by calling tb_switch_put().
|
||||
*
|
||||
* Return: Pointer to the allocated switch or ERR_PTR() in case of failure
|
||||
* Return: Pointer to &struct tb_switch or ERR_PTR() in case of failure.
|
||||
*/
|
||||
struct tb_switch *
|
||||
tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
|
||||
|
|
@ -2562,7 +2605,7 @@ tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
|
|||
* connection manager to use. Can be called to the switch again after
|
||||
* resume from low power states to re-initialize it.
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of failure
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_configure(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -2625,7 +2668,7 @@ int tb_switch_configure(struct tb_switch *sw)
|
|||
* Needs to be called before any tunnels can be setup through the
|
||||
* router. Can be called to any router.
|
||||
*
|
||||
* Returns %0 in success and negative errno otherwise.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_configuration_valid(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -2900,6 +2943,8 @@ static void tb_switch_link_init(struct tb_switch *sw)
|
|||
* Connection manager can call this function to enable lane bonding of a
|
||||
* switch. If conditions are correct and both switches support the feature,
|
||||
* lanes are bonded. It is safe to call this to any switch.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
static int tb_switch_lane_bonding_enable(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -2950,6 +2995,8 @@ static int tb_switch_lane_bonding_enable(struct tb_switch *sw)
|
|||
*
|
||||
* Disables lane bonding between @sw and parent. This can be called even
|
||||
* if lanes were not bonded originally.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
static int tb_switch_lane_bonding_disable(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -3074,7 +3121,7 @@ static int tb_switch_asym_disable(struct tb_switch *sw)
|
|||
*
|
||||
* Does nothing for host router.
|
||||
*
|
||||
* Returns %0 in case of success, negative errno otherwise.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
|
||||
{
|
||||
|
|
@ -3145,7 +3192,7 @@ int tb_switch_set_link_width(struct tb_switch *sw, enum tb_link_width width)
|
|||
*
|
||||
* It is recommended that this is called after lane bonding is enabled.
|
||||
*
|
||||
* Returns %0 on success and negative errno in case of error.
|
||||
* Return: %0 on success and negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_configure_link(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -3245,7 +3292,7 @@ static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
|
|||
* exposed to the userspace when this function successfully returns. To
|
||||
* remove and release the switch, call tb_switch_remove().
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of failure
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_add(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -3467,6 +3514,8 @@ static void tb_switch_check_wakes(struct tb_switch *sw)
|
|||
* suspend. If this is resume from system sleep, notifies PM core about the
|
||||
* wakes occurred during suspend. Disables all wakes, except USB4 wake of
|
||||
* upstream port for USB4 routers that shall be always enabled.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_resume(struct tb_switch *sw, bool runtime)
|
||||
{
|
||||
|
|
@ -3617,7 +3666,9 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
|
|||
* @in: DP IN port
|
||||
*
|
||||
* Queries availability of DP resource for DP tunneling using switch
|
||||
* specific means. Returns %true if resource is available.
|
||||
* specific means.
|
||||
*
|
||||
* Return: %true if resource is available, %false otherwise.
|
||||
*/
|
||||
bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
|
||||
{
|
||||
|
|
@ -3633,7 +3684,8 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
|
|||
*
|
||||
* Allocates DP resource for DP tunneling. The resource must be
|
||||
* available for this to succeed (see tb_switch_query_dp_resource()).
|
||||
* Returns %0 in success and negative errno otherwise.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
|
||||
{
|
||||
|
|
@ -3718,6 +3770,8 @@ static int tb_switch_match(struct device *dev, const void *data)
|
|||
*
|
||||
* Returned switch has reference count increased so the caller needs to
|
||||
* call tb_switch_put() when done with the switch.
|
||||
*
|
||||
* Return: Pointer to &struct tb_switch, %NULL if not found.
|
||||
*/
|
||||
struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
|
||||
{
|
||||
|
|
@ -3743,6 +3797,8 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
|
|||
*
|
||||
* Returned switch has reference count increased so the caller needs to
|
||||
* call tb_switch_put() when done with the switch.
|
||||
*
|
||||
* Return: Pointer to &struct tb_switch, %NULL if not found.
|
||||
*/
|
||||
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
|
||||
{
|
||||
|
|
@ -3767,6 +3823,8 @@ struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
|
|||
*
|
||||
* Returned switch has reference count increased so the caller needs to
|
||||
* call tb_switch_put() when done with the switch.
|
||||
*
|
||||
* Return: Pointer to &struct tb_switch, %NULL if not found.
|
||||
*/
|
||||
struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
|
||||
{
|
||||
|
|
@ -3791,6 +3849,8 @@ struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
|
|||
* tb_switch_find_port() - return the first port of @type on @sw or NULL
|
||||
* @sw: Switch to find the port from
|
||||
* @type: Port type to look for
|
||||
*
|
||||
* Return: Pointer to &struct tb_port, %NULL if not found.
|
||||
*/
|
||||
struct tb_port *tb_switch_find_port(struct tb_switch *sw,
|
||||
enum tb_port_type type)
|
||||
|
|
@ -3859,6 +3919,8 @@ static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge
|
|||
* entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
|
||||
* was configured. Due to Intel platforms limitation, shall be called only
|
||||
* for first hop switch.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_pcie_l1_enable(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -3893,6 +3955,8 @@ int tb_switch_pcie_l1_enable(struct tb_switch *sw)
|
|||
* connected to the type-C port. Call only after PCIe tunnel has been
|
||||
* established. The function only does the connect if not done already
|
||||
* so can be called several times for the same router.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_xhci_connect(struct tb_switch *sw)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -225,14 +225,12 @@ static int tb_enable_clx(struct tb_switch *sw)
|
|||
return ret == -EOPNOTSUPP ? 0 : ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_disable_clx() - Disable CL states up to host router
|
||||
* @sw: Router to start
|
||||
/*
|
||||
* Disables CL states from @sw up to the host router.
|
||||
*
|
||||
* Disables CL states from @sw up to the host router. Returns true if
|
||||
* any CL state were disabled. This can be used to figure out whether
|
||||
* the link was setup by us or the boot firmware so we don't
|
||||
* accidentally enable them if they were not enabled during discovery.
|
||||
* This can be used to figure out whether the link was setup by us or the
|
||||
* boot firmware so we don't accidentally enable them if they were not
|
||||
* enabled during discovery.
|
||||
*/
|
||||
static bool tb_disable_clx(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -456,10 +454,8 @@ static void tb_scan_xdomain(struct tb_port *port)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tb_find_unused_port() - return the first inactive port on @sw
|
||||
* @sw: Switch to find the port on
|
||||
* @type: Port type to look for
|
||||
/*
|
||||
* Returns the first inactive port on @sw.
|
||||
*/
|
||||
static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
|
||||
enum tb_port_type type)
|
||||
|
|
@ -549,6 +545,8 @@ static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
|
|||
* from @src_port to @dst_port. Does not take USB3 tunnel starting from
|
||||
* @src_port and ending on @src_port into account because that bandwidth is
|
||||
* already included in as part of the "first hop" USB3 tunnel.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
|
||||
struct tb_port *src_port,
|
||||
|
|
@ -601,6 +599,8 @@ static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
|
|||
* If there is bandwidth reserved for any of the groups between
|
||||
* @src_port and @dst_port (but not yet used) that is also taken into
|
||||
* account in the returned consumed bandwidth.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
static int tb_consumed_dp_bandwidth(struct tb *tb,
|
||||
struct tb_port *src_port,
|
||||
|
|
@ -701,6 +701,8 @@ static bool tb_asym_supported(struct tb_port *src_port, struct tb_port *dst_port
|
|||
* single link at @port. If @include_asym is set then includes the
|
||||
* additional banwdith if the links are transitioned into asymmetric to
|
||||
* direction from @src_port to @dst_port.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
|
||||
struct tb_port *dst_port, struct tb_port *port,
|
||||
|
|
@ -807,6 +809,8 @@ static int tb_maximum_bandwidth(struct tb *tb, struct tb_port *src_port,
|
|||
* If @include_asym is true then includes also bandwidth that can be
|
||||
* added when the links are transitioned into asymmetric (but does not
|
||||
* transition the links).
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
|
||||
struct tb_port *dst_port, int *available_up,
|
||||
|
|
@ -1029,6 +1033,8 @@ static int tb_create_usb3_tunnels(struct tb_switch *sw)
|
|||
* (requested + currently consumed) on that link exceed @asym_threshold.
|
||||
*
|
||||
* Must be called with available >= requested over all links.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
|
||||
struct tb_port *dst_port, int requested_up,
|
||||
|
|
@ -1135,6 +1141,8 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
|
|||
* Goes over each link from @src_port to @dst_port and tries to
|
||||
* transition the link to symmetric if the currently consumed bandwidth
|
||||
* allows and link asymmetric preference is ignored (if @keep_asym is %false).
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
|
||||
struct tb_port *dst_port, bool keep_asym)
|
||||
|
|
@ -3336,7 +3344,7 @@ static bool tb_apple_add_links(struct tb_nhi *nhi)
|
|||
if (!pci_is_pcie(pdev))
|
||||
continue;
|
||||
if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
|
||||
!pdev->is_hotplug_bridge)
|
||||
!pdev->is_pciehp)
|
||||
continue;
|
||||
|
||||
link = device_link_add(&pdev->dev, &nhi->pdev->dev,
|
||||
|
|
|
|||
|
|
@ -324,7 +324,7 @@ struct usb4_port {
|
|||
};
|
||||
|
||||
/**
|
||||
* tb_retimer: Thunderbolt retimer
|
||||
* struct tb_retimer - Thunderbolt retimer
|
||||
* @dev: Device for the retimer
|
||||
* @tb: Pointer to the domain the retimer belongs to
|
||||
* @index: Retimer index facing the router USB4 port
|
||||
|
|
@ -552,13 +552,14 @@ static inline void *tb_priv(struct tb *tb)
|
|||
|
||||
/**
|
||||
* tb_upstream_port() - return the upstream port of a switch
|
||||
* @sw: Router
|
||||
*
|
||||
* Every switch has an upstream port (for the root switch it is the NHI).
|
||||
*
|
||||
* During switch alloc/init tb_upstream_port()->remote may be NULL, even for
|
||||
* non root switches (on the NHI port remote is always NULL).
|
||||
*
|
||||
* Return: Returns the upstream port of the switch.
|
||||
* Return: Pointer to &struct tb_port.
|
||||
*/
|
||||
static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -569,8 +570,8 @@ static inline struct tb_port *tb_upstream_port(struct tb_switch *sw)
|
|||
* tb_is_upstream_port() - Is the port upstream facing
|
||||
* @port: Port to check
|
||||
*
|
||||
* Returns true if @port is upstream facing port. In case of dual link
|
||||
* ports both return true.
|
||||
* Return: %true if @port is upstream facing port. In case of dual link
|
||||
* ports, both return %true.
|
||||
*/
|
||||
static inline bool tb_is_upstream_port(const struct tb_port *port)
|
||||
{
|
||||
|
|
@ -613,7 +614,7 @@ static inline const char *tb_width_name(enum tb_link_width width)
|
|||
* tb_port_has_remote() - Does the port have switch connected downstream
|
||||
* @port: Port to check
|
||||
*
|
||||
* Returns true only when the port is primary port and has remote set.
|
||||
* Return: %true only when the port is primary port and has remote set.
|
||||
*/
|
||||
static inline bool tb_port_has_remote(const struct tb_port *port)
|
||||
{
|
||||
|
|
@ -905,8 +906,9 @@ static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw)
|
|||
* tb_switch_downstream_port() - Return downstream facing port of parent router
|
||||
* @sw: Device router pointer
|
||||
*
|
||||
* Only call for device routers. Returns the downstream facing port of
|
||||
* the parent router.
|
||||
* Call only for device routers.
|
||||
*
|
||||
* Return: Pointer to &struct tb_port or %NULL in case of failure.
|
||||
*/
|
||||
static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -918,6 +920,8 @@ static inline struct tb_port *tb_switch_downstream_port(struct tb_switch *sw)
|
|||
/**
|
||||
* tb_switch_depth() - Returns depth of the connected router
|
||||
* @sw: Router
|
||||
*
|
||||
* Return: Router depth level as a number.
|
||||
*/
|
||||
static inline int tb_switch_depth(const struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -1010,6 +1014,9 @@ static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
|
|||
* is handling @sw this function can be called. It is valid to call this
|
||||
* after tb_switch_alloc() and tb_switch_configure() has been called
|
||||
* (latter only for SW CM case).
|
||||
*
|
||||
* Return: %true if switch is handled by ICM, %false if handled by
|
||||
* software CM.
|
||||
*/
|
||||
static inline bool tb_switch_is_icm(const struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -1037,6 +1044,8 @@ int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode);
|
|||
*
|
||||
* Checks if given router TMU mode is configured to @mode. Note the
|
||||
* router TMU might not be enabled to this mode.
|
||||
*
|
||||
* Return: %true if TMU mode is equal to @mode, %false otherwise.
|
||||
*/
|
||||
static inline bool tb_switch_tmu_is_configured(const struct tb_switch *sw,
|
||||
enum tb_switch_tmu_mode mode)
|
||||
|
|
@ -1048,8 +1057,8 @@ static inline bool tb_switch_tmu_is_configured(const struct tb_switch *sw,
|
|||
* tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled
|
||||
* @sw: Router whose TMU mode to check
|
||||
*
|
||||
* Return true if hardware TMU configuration matches the requested
|
||||
* configuration (and is not %TB_SWITCH_TMU_MODE_OFF).
|
||||
* Return: %true if hardware TMU configuration matches the requested
|
||||
* configuration (and is not %TB_SWITCH_TMU_MODE_OFF), %false otherwise.
|
||||
*/
|
||||
static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -1069,9 +1078,10 @@ int tb_switch_clx_disable(struct tb_switch *sw);
|
|||
* @clx: The CLx states to check for
|
||||
*
|
||||
* Checks if the specified CLx is enabled on the router upstream link.
|
||||
* Returns true if any of the given states is enabled.
|
||||
*
|
||||
* Not applicable for a host router.
|
||||
*
|
||||
* Return: %true if any of the given states is enabled, %false otherwise.
|
||||
*/
|
||||
static inline bool tb_switch_clx_is_enabled(const struct tb_switch *sw,
|
||||
unsigned int clx)
|
||||
|
|
@ -1103,7 +1113,7 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
|
|||
* @src: Source adapter
|
||||
* @dst: Destination adapter
|
||||
*
|
||||
* Returns %true only if the specified path from source adapter (@src)
|
||||
* Return: %true only if the specified path from source adapter (@src)
|
||||
* to destination adapter (@dst) is directed downstream.
|
||||
*/
|
||||
static inline bool
|
||||
|
|
@ -1232,10 +1242,11 @@ static inline int tb_route_length(u64 route)
|
|||
|
||||
/**
|
||||
* tb_downstream_route() - get route to downstream switch
|
||||
* @port: Port to check
|
||||
*
|
||||
* Port must not be the upstream port (otherwise a loop is created).
|
||||
*
|
||||
* Return: Returns a route to the switch behind @port.
|
||||
* Return: Route to the switch behind @port.
|
||||
*/
|
||||
static inline u64 tb_downstream_route(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1263,7 +1274,7 @@ static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd)
|
|||
* tb_xdomain_downstream_port() - Return downstream facing port of parent router
|
||||
* @xd: Xdomain pointer
|
||||
*
|
||||
* Returns the downstream port the XDomain is connected to.
|
||||
* Return: Pointer to &struct tb_port or %NULL in case of failure.
|
||||
*/
|
||||
static inline struct tb_port *tb_xdomain_downstream_port(struct tb_xdomain *xd)
|
||||
{
|
||||
|
|
@ -1291,7 +1302,7 @@ static inline struct tb_retimer *tb_to_retimer(struct device *dev)
|
|||
* usb4_switch_version() - Returns USB4 version of the router
|
||||
* @sw: Router to check
|
||||
*
|
||||
* Returns major version of USB4 router (%1 for v1, %2 for v2 and so
|
||||
* Return: Major version of USB4 router (%1 for v1, %2 for v2 and so
|
||||
* on). Can be called to pre-USB4 router too and in that case returns %0.
|
||||
*/
|
||||
static inline unsigned int usb4_switch_version(const struct tb_switch *sw)
|
||||
|
|
@ -1303,7 +1314,7 @@ static inline unsigned int usb4_switch_version(const struct tb_switch *sw)
|
|||
* tb_switch_is_usb4() - Is the switch USB4 compliant
|
||||
* @sw: Switch to check
|
||||
*
|
||||
* Returns true if the @sw is USB4 compliant router, false otherwise.
|
||||
* Return: %true if the @sw is USB4 compliant router, %false otherwise.
|
||||
*/
|
||||
static inline bool tb_switch_is_usb4(const struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -1355,7 +1366,7 @@ int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width
|
|||
int usb4_port_asym_start(struct tb_port *port);
|
||||
|
||||
/**
|
||||
* enum tb_sb_target - Sideband transaction target
|
||||
* enum usb4_sb_target - Sideband transaction target
|
||||
* @USB4_SB_TARGET_ROUTER: Target is the router itself
|
||||
* @USB4_SB_TARGET_PARTNER: Target is partner
|
||||
* @USB4_SB_TARGET_RETIMER: Target is retimer
|
||||
|
|
@ -1400,6 +1411,8 @@ enum usb4_margining_lane {
|
|||
* @voltage_time_offset: Offset for voltage / time for software margining
|
||||
* @optional_voltage_offset_range: Enable optional extended voltage range
|
||||
* @right_high: %false if left/low margin test is performed, %true if right/high
|
||||
* @upper_eye: %true if margin test is done on upper eye, %false if done on
|
||||
* lower eye
|
||||
* @time: %true if time margining is used instead of voltage
|
||||
*/
|
||||
struct usb4_port_margining_params {
|
||||
|
|
|
|||
|
|
@ -405,6 +405,8 @@ static int tmu_mode_init(struct tb_switch *sw)
|
|||
* This function must be called before other TMU related functions to
|
||||
* makes the internal structures are filled in correctly. Does not
|
||||
* change any hardware configuration.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_tmu_init(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -439,6 +441,8 @@ int tb_switch_tmu_init(struct tb_switch *sw)
|
|||
* @sw: Switch whose time to update
|
||||
*
|
||||
* Updates switch local time using time posting procedure.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_tmu_post_time(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -555,6 +559,8 @@ static int disable_enhanced(struct tb_port *up, struct tb_port *down)
|
|||
* @sw: Switch whose TMU to disable
|
||||
*
|
||||
* Turns off TMU of @sw if it is enabled. If not enabled does nothing.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_tmu_disable(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -938,6 +944,8 @@ static int tb_switch_tmu_change_mode(struct tb_switch *sw)
|
|||
* Enables TMU of a router to be in uni-directional Normal/HiFi or
|
||||
* bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
|
||||
* required before calling this function.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_switch_tmu_enable(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -1017,9 +1025,11 @@ int tb_switch_tmu_enable(struct tb_switch *sw)
|
|||
* Selects the TMU mode that is enabled when tb_switch_tmu_enable() is
|
||||
* next called.
|
||||
*
|
||||
* Returns %0 in success and negative errno otherwise. Specifically
|
||||
* returns %-EOPNOTSUPP if the requested mode is not possible (not
|
||||
* supported by the router and/or topology).
|
||||
* Return:
|
||||
* * %0 - On success.
|
||||
* * %-EOPNOTSUPP - If the requested mode is not possible (not supported by
|
||||
* the router and/or topology).
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -121,6 +121,8 @@ static inline unsigned int tb_usable_credits(const struct tb_port *port)
|
|||
* @port: Lane adapter to check
|
||||
* @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
|
||||
* streams possible through this lane adapter
|
||||
*
|
||||
* Return: Number of available credits.
|
||||
*/
|
||||
static unsigned int tb_available_credits(const struct tb_port *port,
|
||||
size_t *max_dp_streams)
|
||||
|
|
@ -415,8 +417,9 @@ static int tb_pci_init_path(struct tb_path *path)
|
|||
* @alloc_hopid: Allocate HopIDs from visited ports
|
||||
*
|
||||
* If @down adapter is active, follows the tunnel to the PCIe upstream
|
||||
* adapter and back. Returns the discovered tunnel or %NULL if there was
|
||||
* no tunnel.
|
||||
* adapter and back.
|
||||
*
|
||||
* Return: Pointer to &struct tb_tunnel or %NULL if there was no tunnel.
|
||||
*/
|
||||
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
|
||||
bool alloc_hopid)
|
||||
|
|
@ -496,7 +499,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
|
|||
* Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
|
||||
* TB_TYPE_PCIE_DOWN.
|
||||
*
|
||||
* Return: Returns a tb_tunnel on success or NULL on failure.
|
||||
* Return: Pointer to @struct tb_tunnel or %NULL on failure.
|
||||
*/
|
||||
struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
|
||||
struct tb_port *down)
|
||||
|
|
@ -543,9 +546,12 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
|
|||
*
|
||||
* Can be called to any connected lane 0 adapter to find out how much
|
||||
* bandwidth needs to be left in reserve for possible PCIe bulk traffic.
|
||||
* Returns true if there is something to be reserved and writes the
|
||||
* amount to @reserved_down/@reserved_up. Otherwise returns false and
|
||||
* does not touch the parameters.
|
||||
*
|
||||
* Return:
|
||||
* * %true - If there is something to be reserved. Writes the amount to
|
||||
* @reserved_down/@reserved_up.
|
||||
* * %false - Nothing to be reserved. Leaves @reserved_down/@reserved_up
|
||||
* unmodified.
|
||||
*/
|
||||
bool tb_tunnel_reserved_pci(struct tb_port *port, int *reserved_up,
|
||||
int *reserved_down)
|
||||
|
|
@ -1073,6 +1079,7 @@ static void tb_dp_dprx_work(struct work_struct *work)
|
|||
|
||||
if (tunnel->callback)
|
||||
tunnel->callback(tunnel, tunnel->callback_data);
|
||||
tb_tunnel_put(tunnel);
|
||||
}
|
||||
|
||||
static int tb_dp_dprx_start(struct tb_tunnel *tunnel)
|
||||
|
|
@ -1100,8 +1107,8 @@ static void tb_dp_dprx_stop(struct tb_tunnel *tunnel)
|
|||
if (tunnel->dprx_started) {
|
||||
tunnel->dprx_started = false;
|
||||
tunnel->dprx_canceled = true;
|
||||
cancel_delayed_work(&tunnel->dprx_work);
|
||||
tb_tunnel_put(tunnel);
|
||||
if (cancel_delayed_work(&tunnel->dprx_work))
|
||||
tb_tunnel_put(tunnel);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1151,7 +1158,8 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
|
|||
* @tunnel: DP tunnel to check
|
||||
* @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
|
||||
*
|
||||
* Returns maximum possible bandwidth for this tunnel in Mb/s.
|
||||
* Return: Maximum possible bandwidth for this tunnel in Mb/s, negative errno
|
||||
* in case of failure.
|
||||
*/
|
||||
static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
|
||||
int *max_bw_rounded)
|
||||
|
|
@ -1547,7 +1555,7 @@ static void tb_dp_dump(struct tb_tunnel *tunnel)
|
|||
* and back. Returns the discovered tunnel or %NULL if there was no
|
||||
* tunnel.
|
||||
*
|
||||
* Return: DP tunnel or %NULL if no tunnel found.
|
||||
* Return: Pointer to &struct tb_tunnel or %NULL if no tunnel found.
|
||||
*/
|
||||
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
|
||||
bool alloc_hopid)
|
||||
|
|
@ -1648,7 +1656,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
|
|||
* successful (or if it returns %false there was some sort of issue).
|
||||
* The @callback is called without @tb->lock held.
|
||||
*
|
||||
* Return: Returns a tb_tunnel on success or &NULL on failure.
|
||||
* Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
|
||||
*/
|
||||
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
|
||||
struct tb_port *out, int link_nr,
|
||||
|
|
@ -1861,7 +1869,7 @@ static void tb_dma_destroy(struct tb_tunnel *tunnel)
|
|||
* @receive_ring: NHI ring number used to receive packets from the
|
||||
* other domain. Set to %-1 if RX path is not needed.
|
||||
*
|
||||
* Return: Returns a tb_tunnel on success or NULL on failure.
|
||||
* Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
|
||||
*/
|
||||
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
|
||||
struct tb_port *dst, int transmit_path,
|
||||
|
|
@ -1938,7 +1946,8 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
|
|||
*
|
||||
* This function can be used to match specific DMA tunnel, if there are
|
||||
* multiple DMA tunnels going through the same XDomain connection.
|
||||
* Returns true if there is match and false otherwise.
|
||||
*
|
||||
* Return: %true if there is a match, %false otherwise.
|
||||
*/
|
||||
bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
|
||||
int transmit_ring, int receive_path, int receive_ring)
|
||||
|
|
@ -2160,8 +2169,9 @@ static void tb_usb3_init_path(struct tb_path *path)
|
|||
* @alloc_hopid: Allocate HopIDs from visited ports
|
||||
*
|
||||
* If @down adapter is active, follows the tunnel to the USB3 upstream
|
||||
* adapter and back. Returns the discovered tunnel or %NULL if there was
|
||||
* no tunnel.
|
||||
* adapter and back.
|
||||
*
|
||||
* Return: Pointer to &struct tb_tunnel or %NULL if there was no tunnel.
|
||||
*/
|
||||
struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
|
||||
bool alloc_hopid)
|
||||
|
|
@ -2266,7 +2276,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
|
|||
* Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
|
||||
* @TB_TYPE_USB3_DOWN.
|
||||
*
|
||||
* Return: Returns a tb_tunnel on success or %NULL on failure.
|
||||
* Return: Pointer to @struct tb_tunnel or %NULL in case of failure.
|
||||
*/
|
||||
struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
|
||||
struct tb_port *down, int max_up,
|
||||
|
|
@ -2337,6 +2347,8 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
|
|||
/**
|
||||
* tb_tunnel_is_invalid - check whether an activated path is still valid
|
||||
* @tunnel: Tunnel to check
|
||||
*
|
||||
* Return: %true if path is valid, %false otherwise.
|
||||
*/
|
||||
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
|
||||
{
|
||||
|
|
@ -2355,10 +2367,11 @@ bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
|
|||
* tb_tunnel_activate() - activate a tunnel
|
||||
* @tunnel: Tunnel to activate
|
||||
*
|
||||
* Return: 0 on success and negative errno in case if failure.
|
||||
* Specifically returns %-EINPROGRESS if the tunnel activation is still
|
||||
* in progress (that's for DP tunnels to complete DPRX capabilities
|
||||
* read).
|
||||
* Return:
|
||||
* * %0 - On success.
|
||||
* * %-EINPROGRESS - If the tunnel activation is still in progress (that's
|
||||
* for DP tunnels to complete DPRX capabilities read).
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int tb_tunnel_activate(struct tb_tunnel *tunnel)
|
||||
{
|
||||
|
|
@ -2438,8 +2451,8 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
|
|||
* @tunnel: Tunnel to check
|
||||
* @port: Port to check
|
||||
*
|
||||
* Returns true if @tunnel goes through @port (direction does not matter),
|
||||
* false otherwise.
|
||||
* Return: %true if @tunnel goes through @port (direction does not matter),
|
||||
* %false otherwise.
|
||||
*/
|
||||
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
|
||||
const struct tb_port *port)
|
||||
|
|
@ -2469,9 +2482,11 @@ static bool tb_tunnel_is_activated(const struct tb_tunnel *tunnel)
|
|||
* @max_up: Maximum upstream bandwidth in Mb/s
|
||||
* @max_down: Maximum downstream bandwidth in Mb/s
|
||||
*
|
||||
* Returns maximum possible bandwidth this tunnel can go if not limited
|
||||
* by other bandwidth clients. If the tunnel does not support this
|
||||
* returns %-EOPNOTSUPP.
|
||||
* Return:
|
||||
* * Maximum possible bandwidth this tunnel can support if not
|
||||
* limited by other bandwidth clients.
|
||||
* * %-EOPNOTSUPP - If the tunnel does not support this function.
|
||||
* * %-ENOTCONN - If the tunnel is not active.
|
||||
*/
|
||||
int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
|
||||
int *max_down)
|
||||
|
|
@ -2491,8 +2506,12 @@ int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
|
|||
* @allocated_down: Currently allocated downstream bandwidth in Mb/s is
|
||||
* stored here
|
||||
*
|
||||
* Returns the bandwidth allocated for the tunnel. This may be higher
|
||||
* than what the tunnel actually consumes.
|
||||
* Return:
|
||||
* * Bandwidth allocated for the tunnel. This may be higher than what the
|
||||
* tunnel actually consumes.
|
||||
* * %-EOPNOTSUPP - If the tunnel does not support this function.
|
||||
* * %-ENOTCONN - If the tunnel is not active.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
|
||||
int *allocated_down)
|
||||
|
|
@ -2512,10 +2531,12 @@ int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
|
|||
* @alloc_up: New upstream bandwidth in Mb/s
|
||||
* @alloc_down: New downstream bandwidth in Mb/s
|
||||
*
|
||||
* Tries to change tunnel bandwidth allocation. If succeeds returns %0
|
||||
* and updates @alloc_up and @alloc_down to that was actually allocated
|
||||
* (it may not be the same as passed originally). Returns negative errno
|
||||
* in case of failure.
|
||||
* Tries to change tunnel bandwidth allocation.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - On success. Updates @alloc_up and @alloc_down to values that were
|
||||
* actually allocated (it may not be the same as passed originally).
|
||||
* * Negative errno - In case of failure.
|
||||
*/
|
||||
int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
|
||||
int *alloc_down)
|
||||
|
|
@ -2546,8 +2567,9 @@ int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
|
|||
* Can be %NULL.
|
||||
*
|
||||
* Stores the amount of isochronous bandwidth @tunnel consumes in
|
||||
* @consumed_up and @consumed_down. In case of success returns %0,
|
||||
* negative errno otherwise.
|
||||
* @consumed_up and @consumed_down.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
|
||||
int *consumed_down)
|
||||
|
|
@ -2585,7 +2607,7 @@ int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
|
|||
* If tunnel supports dynamic bandwidth management (USB3 tunnels at the
|
||||
* moment) this function makes it to release all the unused bandwidth.
|
||||
*
|
||||
* Returns %0 in case of success and negative errno otherwise.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -142,10 +142,11 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
|
|||
* tb_tunnel_is_active() - Is tunnel fully activated
|
||||
* @tunnel: Tunnel to check
|
||||
*
|
||||
* Returns %true if @tunnel is fully activated. For other than DP
|
||||
* tunnels this is pretty much once tb_tunnel_activate() returns
|
||||
* successfully. However, for DP tunnels this returns %true only once the
|
||||
* DPRX capabilities read has been issued successfully.
|
||||
* Return: %true if @tunnel is fully activated.
|
||||
*
|
||||
* Note for DP tunnels this returns %true only once the DPRX capabilities
|
||||
* read has been issued successfully. For other tunnels, this function
|
||||
* returns %true pretty much once tb_tunnel_activate() returns successfully.
|
||||
*/
|
||||
static inline bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/string_choices.h>
|
||||
#include <linux/units.h>
|
||||
|
||||
#include "sb_regs.h"
|
||||
|
|
@ -172,8 +173,8 @@ void usb4_switch_check_wakes(struct tb_switch *sw)
|
|||
return;
|
||||
|
||||
tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
|
||||
(val & ROUTER_CS_6_WOPS) ? "yes" : "no",
|
||||
(val & ROUTER_CS_6_WOUS) ? "yes" : "no");
|
||||
str_yes_no(val & ROUTER_CS_6_WOPS),
|
||||
str_yes_no(val & ROUTER_CS_6_WOUS));
|
||||
|
||||
wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
|
||||
}
|
||||
|
|
@ -191,9 +192,9 @@ void usb4_switch_check_wakes(struct tb_switch *sw)
|
|||
break;
|
||||
|
||||
tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n",
|
||||
(val & PORT_CS_18_WOU4S) ? "yes" : "no",
|
||||
(val & PORT_CS_18_WOCS) ? "yes" : "no",
|
||||
(val & PORT_CS_18_WODS) ? "yes" : "no");
|
||||
str_yes_no(val & PORT_CS_18_WOU4S),
|
||||
str_yes_no(val & PORT_CS_18_WOCS),
|
||||
str_yes_no(val & PORT_CS_18_WODS));
|
||||
|
||||
wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS |
|
||||
PORT_CS_18_WODS);
|
||||
|
|
@ -236,6 +237,8 @@ static bool link_is_usb4(struct tb_port *port)
|
|||
*
|
||||
* This does not set the configuration valid bit of the router. To do
|
||||
* that call usb4_switch_configuration_valid().
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_switch_setup(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -260,7 +263,7 @@ int usb4_switch_setup(struct tb_switch *sw)
|
|||
tbt3 = !(val & ROUTER_CS_6_TNS);
|
||||
|
||||
tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
|
||||
tbt3 ? "yes" : "no", xhci ? "yes" : "no");
|
||||
str_yes_no(tbt3), str_yes_no(xhci));
|
||||
|
||||
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
|
||||
if (ret)
|
||||
|
|
@ -303,7 +306,7 @@ int usb4_switch_setup(struct tb_switch *sw)
|
|||
* usb4_switch_setup() has been called. Can be called to host and device
|
||||
* routers (does nothing for the latter).
|
||||
*
|
||||
* Returns %0 in success and negative errno otherwise.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_switch_configuration_valid(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -333,6 +336,8 @@ int usb4_switch_configuration_valid(struct tb_switch *sw)
|
|||
* @uid: UID is stored here
|
||||
*
|
||||
* Reads 64-bit UID from USB4 router config space.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
|
||||
{
|
||||
|
|
@ -370,6 +375,8 @@ static int usb4_switch_drom_read_block(void *data,
|
|||
* Uses USB4 router operations to read router DROM. For devices this
|
||||
* should always work but for hosts it may return %-EOPNOTSUPP in which
|
||||
* case the host router does not have DROM.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
|
||||
size_t size)
|
||||
|
|
@ -384,6 +391,8 @@ int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
|
|||
*
|
||||
* Checks whether conditions are met so that lane bonding can be
|
||||
* established with the upstream router. Call only for device routers.
|
||||
*
|
||||
* Return: %true if lane bonding is possible, %false otherwise.
|
||||
*/
|
||||
bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -406,6 +415,8 @@ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
|
|||
* @runtime: Wake is being programmed during system runtime
|
||||
*
|
||||
* Enables/disables router to wake up from sleep.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
|
||||
{
|
||||
|
|
@ -483,8 +494,10 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
|
|||
* usb4_switch_set_sleep() - Prepare the router to enter sleep
|
||||
* @sw: USB4 router
|
||||
*
|
||||
* Sets sleep bit for the router. Returns when the router sleep ready
|
||||
* Sets sleep bit for the router and waits until router sleep ready
|
||||
* bit has been asserted.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_switch_set_sleep(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -510,9 +523,10 @@ int usb4_switch_set_sleep(struct tb_switch *sw)
|
|||
* usb4_switch_nvm_sector_size() - Return router NVM sector size
|
||||
* @sw: USB4 router
|
||||
*
|
||||
* If the router supports NVM operations this function returns the NVM
|
||||
* sector size in bytes. If NVM operations are not supported returns
|
||||
* %-EOPNOTSUPP.
|
||||
* Return:
|
||||
* * NVM sector size in bytes if router supports NVM operations.
|
||||
* * %-EOPNOTSUPP - If router does not support NVM operations.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int usb4_switch_nvm_sector_size(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -559,8 +573,12 @@ static int usb4_switch_nvm_read_block(void *data,
|
|||
* @buf: Read data is placed here
|
||||
* @size: How many bytes to read
|
||||
*
|
||||
* Reads NVM contents of the router. If NVM is not supported returns
|
||||
* %-EOPNOTSUPP.
|
||||
* Reads NVM contents of the router.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - Read completed successfully.
|
||||
* * %-EOPNOTSUPP - NVM not supported.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
|
||||
size_t size)
|
||||
|
|
@ -577,7 +595,7 @@ int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
|
|||
* Explicitly sets NVM write offset. Normally when writing to NVM this
|
||||
* is done automatically by usb4_switch_nvm_write().
|
||||
*
|
||||
* Returns %0 in success and negative errno if there was a failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address)
|
||||
{
|
||||
|
|
@ -619,8 +637,12 @@ static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
|
|||
* @buf: Pointer to the data to write
|
||||
* @size: Size of @buf in bytes
|
||||
*
|
||||
* Writes @buf to the router NVM using USB4 router operations. If NVM
|
||||
* write is not supported returns %-EOPNOTSUPP.
|
||||
* Writes @buf to the router NVM using USB4 router operations.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - Write completed successfully.
|
||||
* * %-EOPNOTSUPP - NVM write not supported.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
|
||||
const void *buf, size_t size)
|
||||
|
|
@ -642,11 +664,13 @@ int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
|
|||
* After the new NVM has been written via usb4_switch_nvm_write(), this
|
||||
* function triggers NVM authentication process. The router gets power
|
||||
* cycled and if the authentication is successful the new NVM starts
|
||||
* running. In case of failure returns negative errno.
|
||||
* running.
|
||||
*
|
||||
* The caller should call usb4_switch_nvm_authenticate_status() to read
|
||||
* the status of the authentication after power cycle. It should be the
|
||||
* first router operation to avoid the status being lost.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_switch_nvm_authenticate(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -674,11 +698,13 @@ int usb4_switch_nvm_authenticate(struct tb_switch *sw)
|
|||
* @status: Status code of the operation
|
||||
*
|
||||
* The function checks if there is status available from the last NVM
|
||||
* authenticate router operation. If there is status then %0 is returned
|
||||
* and the status code is placed in @status. Returns negative errno in case
|
||||
* of failure.
|
||||
* authenticate router operation.
|
||||
*
|
||||
* Must be called before any other router operation.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - If there is status. Status code is placed in @status.
|
||||
* * Negative errno - Failure occurred.
|
||||
*/
|
||||
int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
|
||||
{
|
||||
|
|
@ -722,7 +748,7 @@ int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
|
|||
* allocation fields accordingly. Specifically @sw->credits_allocation
|
||||
* is set to %true if these parameters can be used in tunneling.
|
||||
*
|
||||
* Returns %0 on success and negative errno otherwise.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_switch_credits_init(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -861,8 +887,10 @@ int usb4_switch_credits_init(struct tb_switch *sw)
|
|||
* @in: DP IN adapter
|
||||
*
|
||||
* For DP tunneling this function can be used to query availability of
|
||||
* DP IN resource. Returns true if the resource is available for DP
|
||||
* tunneling, false otherwise.
|
||||
* DP IN resource.
|
||||
*
|
||||
* Return: %true if the resource is available for DP tunneling, %false
|
||||
* otherwise.
|
||||
*/
|
||||
bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
|
||||
{
|
||||
|
|
@ -890,9 +918,12 @@ bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
|
|||
* @in: DP IN adapter
|
||||
*
|
||||
* Allocates DP IN resource for DP tunneling using USB4 router
|
||||
* operations. If the resource was allocated returns %0. Otherwise
|
||||
* returns negative errno, in particular %-EBUSY if the resource is
|
||||
* already allocated.
|
||||
* operations.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - Resource allocated successfully.
|
||||
* * %-EBUSY - Resource is already allocated.
|
||||
* * Negative errno - Other failure occurred.
|
||||
*/
|
||||
int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
|
||||
{
|
||||
|
|
@ -916,6 +947,8 @@ int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
|
|||
* @in: DP IN adapter
|
||||
*
|
||||
* Releases the previously allocated DP IN resource.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
|
||||
{
|
||||
|
|
@ -971,6 +1004,8 @@ int usb4_port_index(const struct tb_switch *sw, const struct tb_port *port)
|
|||
* downstream adapters where the PCIe topology is extended. This
|
||||
* function returns the corresponding downstream PCIe adapter or %NULL
|
||||
* if no such mapping was possible.
|
||||
*
|
||||
* Return: Pointer to &struct tb_port or %NULL if not found.
|
||||
*/
|
||||
struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
|
||||
const struct tb_port *port)
|
||||
|
|
@ -1002,6 +1037,8 @@ struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
|
|||
* downstream adapters where the USB 3.x topology is extended. This
|
||||
* function returns the corresponding downstream USB 3.x adapter or
|
||||
* %NULL if no such mapping was possible.
|
||||
*
|
||||
* Return: Pointer to &struct tb_port or %NULL if not found.
|
||||
*/
|
||||
struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
|
||||
const struct tb_port *port)
|
||||
|
|
@ -1031,7 +1068,7 @@ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
|
|||
* For USB4 router finds all USB4 ports and registers devices for each.
|
||||
* Can be called to any router.
|
||||
*
|
||||
* Return %0 in case of success and negative errno in case of failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_switch_add_ports(struct tb_switch *sw)
|
||||
{
|
||||
|
|
@ -1084,6 +1121,8 @@ void usb4_switch_remove_ports(struct tb_switch *sw)
|
|||
*
|
||||
* Unlocks USB4 downstream port so that the connection manager can
|
||||
* access the router below this port.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_unlock(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1104,6 +1143,8 @@ int usb4_port_unlock(struct tb_port *port)
|
|||
*
|
||||
* Enables hot plug events on a given port. This is only intended
|
||||
* to be used on lane, DP-IN, and DP-OUT adapters.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_hotplug_enable(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1123,6 +1164,8 @@ int usb4_port_hotplug_enable(struct tb_port *port)
|
|||
* @port: USB4 port to reset
|
||||
*
|
||||
* Issues downstream port reset to @port.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_reset(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1184,6 +1227,8 @@ static int usb4_port_set_configured(struct tb_port *port, bool configured)
|
|||
* @port: USB4 router
|
||||
*
|
||||
* Sets the USB4 link to be configured for power management purposes.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_configure(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1195,6 +1240,8 @@ int usb4_port_configure(struct tb_port *port)
|
|||
* @port: USB4 router
|
||||
*
|
||||
* Sets the USB4 link to be unconfigured for power management purposes.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
void usb4_port_unconfigure(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1229,7 +1276,9 @@ static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
|
|||
* @xd: XDomain that is connected to the port
|
||||
*
|
||||
* Marks the USB4 port as being connected to another host and updates
|
||||
* the link type. Returns %0 in success and negative errno in failure.
|
||||
* the link type.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
|
||||
{
|
||||
|
|
@ -1299,7 +1348,8 @@ static int usb4_port_write_data(struct tb_port *port, const void *data,
|
|||
* @size: Size of @buf
|
||||
*
|
||||
* Reads data from sideband register @reg and copies it into @buf.
|
||||
* Returns %0 in case of success and negative errno in case of failure.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index,
|
||||
u8 reg, void *buf, u8 size)
|
||||
|
|
@ -1350,8 +1400,9 @@ int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, u8 index
|
|||
* @buf: Data to write
|
||||
* @size: Size of @buf
|
||||
*
|
||||
* Writes @buf to sideband register @reg. Returns %0 in case of success
|
||||
* and negative errno in case of failure.
|
||||
* Writes @buf to sideband register @reg.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
|
||||
u8 index, u8 reg, const void *buf, u8 size)
|
||||
|
|
@ -1468,8 +1519,7 @@ static int usb4_port_set_router_offline(struct tb_port *port, bool offline)
|
|||
* port does not react on hotplug events anymore. This needs to be
|
||||
* called before retimer access is done when the USB4 links is not up.
|
||||
*
|
||||
* Returns %0 in case of success and negative errno if there was an
|
||||
* error.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_router_offline(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1481,6 +1531,8 @@ int usb4_port_router_offline(struct tb_port *port)
|
|||
* @port: USB4 port
|
||||
*
|
||||
* Makes the USB4 port functional again.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_router_online(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1492,8 +1544,9 @@ int usb4_port_router_online(struct tb_port *port)
|
|||
* @port: USB4 port
|
||||
*
|
||||
* This forces the USB4 port to send broadcast RT transaction which
|
||||
* makes the retimers on the link to assign index to themselves. Returns
|
||||
* %0 in case of success and negative errno if there was an error.
|
||||
* makes the retimers on the link assign index to themselves.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_enumerate_retimers(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1510,6 +1563,8 @@ int usb4_port_enumerate_retimers(struct tb_port *port)
|
|||
*
|
||||
* PORT_CS_18_CPS bit reflects if the link supports CLx including
|
||||
* active cables (if connected on the link).
|
||||
*
|
||||
* Return: %true if Clx is supported, %false otherwise.
|
||||
*/
|
||||
bool usb4_port_clx_supported(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1528,8 +1583,9 @@ bool usb4_port_clx_supported(struct tb_port *port)
|
|||
* usb4_port_asym_supported() - If the port supports asymmetric link
|
||||
* @port: USB4 port
|
||||
*
|
||||
* Checks if the port and the cable supports asymmetric link and returns
|
||||
* %true in that case.
|
||||
* Checks if the port and the cable support asymmetric link.
|
||||
*
|
||||
* Return: %true if asymmetric link is supported, %false otherwise.
|
||||
*/
|
||||
bool usb4_port_asym_supported(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1551,6 +1607,8 @@ bool usb4_port_asym_supported(struct tb_port *port)
|
|||
*
|
||||
* Sets USB4 port link width to @width. Can be called for widths where
|
||||
* usb4_port_asym_width_supported() returned @true.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width)
|
||||
{
|
||||
|
|
@ -1595,8 +1653,10 @@ int usb4_port_asym_set_link_width(struct tb_port *port, enum tb_link_width width
|
|||
* (according to what was previously set in tb_port_set_link_width().
|
||||
* Wait for completion of the change.
|
||||
*
|
||||
* Returns %0 in case of success, %-ETIMEDOUT if case of timeout or
|
||||
* a negative errno in case of a failure.
|
||||
* Return:
|
||||
* * %0 - Symmetry change was successful.
|
||||
* * %-ETIMEDOUT - Timeout occurred.
|
||||
* * Negative errno - Other failure occurred.
|
||||
*/
|
||||
int usb4_port_asym_start(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -1640,6 +1700,8 @@ int usb4_port_asym_start(struct tb_port *port)
|
|||
* @ncaps: Number of elements in the caps array
|
||||
*
|
||||
* Reads the USB4 port lane margining capabilities into @caps.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target,
|
||||
u8 index, u32 *caps, size_t ncaps)
|
||||
|
|
@ -1666,6 +1728,8 @@ int usb4_port_margining_caps(struct tb_port *port, enum usb4_sb_target target,
|
|||
*
|
||||
* Runs hardware lane margining on USB4 port and returns the result in
|
||||
* @results.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
|
||||
u8 index, const struct usb4_port_margining_params *params,
|
||||
|
|
@ -1710,8 +1774,9 @@ int usb4_port_hw_margin(struct tb_port *port, enum usb4_sb_target target,
|
|||
* @results: Data word for the operation completion data
|
||||
*
|
||||
* Runs software lane margining on USB4 port. Read back the error
|
||||
* counters by calling usb4_port_sw_margin_errors(). Returns %0 in
|
||||
* success and negative errno otherwise.
|
||||
* counters by calling usb4_port_sw_margin_errors().
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target,
|
||||
u8 index, const struct usb4_port_margining_params *params,
|
||||
|
|
@ -1758,7 +1823,8 @@ int usb4_port_sw_margin(struct tb_port *port, enum usb4_sb_target target,
|
|||
* @errors: Error metadata is copied here.
|
||||
*
|
||||
* This reads back the software margining error counters from the port.
|
||||
* Returns %0 in success and negative errno otherwise.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_sw_margin_errors(struct tb_port *port, enum usb4_sb_target target,
|
||||
u8 index, u32 *errors)
|
||||
|
|
@ -1789,6 +1855,8 @@ static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
|
|||
*
|
||||
* Enables sideband channel transations on SBTX. Can be used when USB4
|
||||
* link does not go up, for example if there is no device connected.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
|
||||
{
|
||||
|
|
@ -1816,6 +1884,8 @@ int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
|
|||
*
|
||||
* Disables sideband channel transations on SBTX. The reverse of
|
||||
* usb4_port_retimer_set_inbound_sbtx().
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
|
||||
{
|
||||
|
|
@ -1828,10 +1898,12 @@ int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
|
|||
* @port: USB4 port
|
||||
* @index: Retimer index
|
||||
*
|
||||
* If the retimer at @index is last one (connected directly to the
|
||||
* Type-C port) this function returns %1. If it is not returns %0. If
|
||||
* the retimer is not present returns %-ENODEV. Otherwise returns
|
||||
* negative errno.
|
||||
* Return:
|
||||
* * %1 - Retimer at @index is the last one (connected directly to the
|
||||
* Type-C port).
|
||||
* * %0 - Retimer at @index is not the last one.
|
||||
* * %-ENODEV - Retimer is not present.
|
||||
* * Negative errno - Other failure occurred.
|
||||
*/
|
||||
int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
|
||||
{
|
||||
|
|
@ -1853,9 +1925,11 @@ int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
|
|||
* @port: USB4 port
|
||||
* @index: Retimer index
|
||||
*
|
||||
* If the retimer at @index is last cable retimer this function returns
|
||||
* %1 and %0 if it is on-board retimer. In case a retimer is not present
|
||||
* at @index returns %-ENODEV. Otherwise returns negative errno.
|
||||
* Return:
|
||||
* * %1 - Retimer at @index is the last cable retimer.
|
||||
* * %0 - Retimer at @index is on-board retimer.
|
||||
* * %-ENODEV - Retimer is not present.
|
||||
* * Negative errno - Other failure occurred.
|
||||
*/
|
||||
int usb4_port_retimer_is_cable(struct tb_port *port, u8 index)
|
||||
{
|
||||
|
|
@ -1879,9 +1953,12 @@ int usb4_port_retimer_is_cable(struct tb_port *port, u8 index)
|
|||
*
|
||||
* Reads NVM sector size (in bytes) of a retimer at @index. This
|
||||
* operation can be used to determine whether the retimer supports NVM
|
||||
* upgrade for example. Returns sector size in bytes or negative errno
|
||||
* in case of error. Specifically returns %-ENODEV if there is no
|
||||
* retimer at @index.
|
||||
* upgrade for example.
|
||||
*
|
||||
* Return:
|
||||
* * Sector size in bytes.
|
||||
* * %-ENODEV - If there is no retimer at @index.
|
||||
* * Negative errno - In case of an error.
|
||||
*/
|
||||
int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
|
||||
{
|
||||
|
|
@ -1907,7 +1984,7 @@ int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
|
|||
* Exlicitly sets NVM write offset. Normally when writing to NVM this is
|
||||
* done automatically by usb4_port_retimer_nvm_write().
|
||||
*
|
||||
* Returns %0 in success and negative errno if there was a failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
|
||||
unsigned int address)
|
||||
|
|
@ -1960,9 +2037,12 @@ static int usb4_port_retimer_nvm_write_next_block(void *data,
|
|||
* @size: Size in bytes how much to write
|
||||
*
|
||||
* Writes @size bytes from @buf to the retimer NVM. Used for NVM
|
||||
* upgrade. Returns %0 if the data was written successfully and negative
|
||||
* errno in case of failure. Specifically returns %-ENODEV if there is
|
||||
* no retimer at @index.
|
||||
* upgrade.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - If the data was written successfully.
|
||||
* * %-ENODEV - If there is no retimer at @index.
|
||||
* * Negative errno - In case of an error.
|
||||
*/
|
||||
int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
|
||||
const void *buf, size_t size)
|
||||
|
|
@ -1988,6 +2068,8 @@ int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int add
|
|||
* successful the retimer restarts with the new NVM and may not have the
|
||||
* index set so one needs to call usb4_port_enumerate_retimers() to
|
||||
* force index to be assigned.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
|
||||
{
|
||||
|
|
@ -2012,9 +2094,9 @@ int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
|
|||
* This can be called after usb4_port_retimer_nvm_authenticate() and
|
||||
* usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
|
||||
*
|
||||
* Returns %0 if the authentication status was successfully read. The
|
||||
* Return: %0 if the authentication status was successfully read. The
|
||||
* completion metadata (the result) is then stored into @status. If
|
||||
* reading the status fails, returns negative errno.
|
||||
* status read fails, returns negative errno.
|
||||
*/
|
||||
int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
|
||||
u32 *status)
|
||||
|
|
@ -2082,9 +2164,12 @@ static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
|
|||
* @buf: Data read from NVM is stored here
|
||||
* @size: Number of bytes to read
|
||||
*
|
||||
* Reads retimer NVM and copies the contents to @buf. Returns %0 if the
|
||||
* read was successful and negative errno in case of failure.
|
||||
* Specifically returns %-ENODEV if there is no retimer at @index.
|
||||
* Reads retimer NVM and copies the contents to @buf.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - If the read was successful.
|
||||
* * %-ENODEV - If there is no retimer at @index.
|
||||
* * Negative errno - In case of an error.
|
||||
*/
|
||||
int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
|
||||
unsigned int address, void *buf, size_t size)
|
||||
|
|
@ -2108,8 +2193,8 @@ usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
|
|||
* usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
|
||||
* @port: USB3 adapter port
|
||||
*
|
||||
* Return maximum supported link rate of a USB3 adapter in Mb/s.
|
||||
* Negative errno in case of error.
|
||||
* Return: Maximum supported link rate of a USB3 adapter in Mb/s.
|
||||
* Negative errno in case of an error.
|
||||
*/
|
||||
int usb4_usb3_port_max_link_rate(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -2227,8 +2312,9 @@ static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
|
|||
* @downstream_bw: Allocated downstream bandwidth is stored here
|
||||
*
|
||||
* Stores currently allocated USB3 bandwidth into @upstream_bw and
|
||||
* @downstream_bw in Mb/s. Returns %0 in case of success and negative
|
||||
* errno in failure.
|
||||
* @downstream_bw in Mb/s.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
|
||||
int *downstream_bw)
|
||||
|
|
@ -2330,8 +2416,7 @@ static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
|
|||
* cannot be taken away by CM). The actual new values are returned in
|
||||
* @upstream_bw and @downstream_bw.
|
||||
*
|
||||
* Returns %0 in case of success and negative errno if there was a
|
||||
* failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
|
||||
int *downstream_bw)
|
||||
|
|
@ -2373,7 +2458,7 @@ int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
|
|||
* Releases USB3 allocated bandwidth down to what is actually consumed.
|
||||
* The new bandwidth is returned in @upstream_bw and @downstream_bw.
|
||||
*
|
||||
* Returns 0% in success and negative errno in case of failure.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
|
||||
int *downstream_bw)
|
||||
|
|
@ -2425,9 +2510,12 @@ static bool is_usb4_dpin(const struct tb_port *port)
|
|||
* @port: DP IN adapter
|
||||
* @cm_id: CM ID to assign
|
||||
*
|
||||
* Sets CM ID for the @port. Returns %0 on success and negative errno
|
||||
* otherwise. Speficially returns %-EOPNOTSUPP if the @port does not
|
||||
* support this.
|
||||
* Sets CM ID for the @port.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - On success.
|
||||
* * %-EOPNOTSUPP - If the @port does not support this.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
|
||||
{
|
||||
|
|
@ -2454,8 +2542,10 @@ int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
|
|||
* supported
|
||||
* @port: DP IN adapter to check
|
||||
*
|
||||
* Can be called to any DP IN adapter. Returns true if the adapter
|
||||
* supports USB4 bandwidth allocation mode, false otherwise.
|
||||
* Can be called to any DP IN adapter.
|
||||
*
|
||||
* Return: %true if the adapter supports USB4 bandwidth allocation mode,
|
||||
* %false otherwise.
|
||||
*/
|
||||
bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -2478,8 +2568,10 @@ bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port)
|
|||
* enabled
|
||||
* @port: DP IN adapter to check
|
||||
*
|
||||
* Can be called to any DP IN adapter. Returns true if the bandwidth
|
||||
* allocation mode has been enabled, false otherwise.
|
||||
* Can be called to any DP IN adapter.
|
||||
*
|
||||
* Return: %true if the bandwidth allocation mode has been enabled,
|
||||
* %false otherwise.
|
||||
*/
|
||||
bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -2504,9 +2596,12 @@ bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port)
|
|||
* @supported: Does the CM support bandwidth allocation mode
|
||||
*
|
||||
* Can be called to any DP IN adapter. Sets or clears the CM support bit
|
||||
* of the DP IN adapter. Returns %0 in success and negative errno
|
||||
* otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
|
||||
* does not support this.
|
||||
* of the DP IN adapter.
|
||||
*
|
||||
* * Return:
|
||||
* * %0 - On success.
|
||||
* * %-EOPNOTSUPP - If the passed IN adapter does not support this.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
|
||||
bool supported)
|
||||
|
|
@ -2536,8 +2631,12 @@ int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
|
|||
* @port: DP IN adapter
|
||||
*
|
||||
* Reads bandwidth allocation Group ID from the DP IN adapter and
|
||||
* returns it. If the adapter does not support setting Group_ID
|
||||
* %-EOPNOTSUPP is returned.
|
||||
* returns it.
|
||||
*
|
||||
* Return:
|
||||
* * Group ID assigned to adapter @port.
|
||||
* * %-EOPNOTSUPP - If adapter does not support setting GROUP_ID.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int usb4_dp_port_group_id(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -2561,9 +2660,11 @@ int usb4_dp_port_group_id(struct tb_port *port)
|
|||
* @group_id: Group ID for the adapter
|
||||
*
|
||||
* Sets bandwidth allocation mode Group ID for the DP IN adapter.
|
||||
* Returns %0 in case of success and negative errno otherwise.
|
||||
* Specifically returns %-EOPNOTSUPP if the adapter does not support
|
||||
* this.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - On success.
|
||||
* * %-EOPNOTSUPP - If the adapter does not support this.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int usb4_dp_port_set_group_id(struct tb_port *port, int group_id)
|
||||
{
|
||||
|
|
@ -2591,9 +2692,12 @@ int usb4_dp_port_set_group_id(struct tb_port *port, int group_id)
|
|||
* @rate: Non-reduced rate in Mb/s is placed here
|
||||
* @lanes: Non-reduced lanes are placed here
|
||||
*
|
||||
* Reads the non-reduced rate and lanes from the DP IN adapter. Returns
|
||||
* %0 in success and negative errno otherwise. Specifically returns
|
||||
* %-EOPNOTSUPP if the adapter does not support this.
|
||||
* Reads the non-reduced rate and lanes from the DP IN adapter.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - On success.
|
||||
* * %-EOPNOTSUPP - If the adapter does not support this.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes)
|
||||
{
|
||||
|
|
@ -2646,10 +2750,13 @@ int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes)
|
|||
* @rate: Non-reduced rate in Mb/s
|
||||
* @lanes: Non-reduced lanes
|
||||
*
|
||||
* Before the capabilities reduction this function can be used to set
|
||||
* the non-reduced values for the DP IN adapter. Returns %0 in success
|
||||
* and negative errno otherwise. If the adapter does not support this
|
||||
* %-EOPNOTSUPP is returned.
|
||||
* Before the capabilities reduction, this function can be used to set
|
||||
* the non-reduced values for the DP IN adapter.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - On success.
|
||||
* * %-EOPNOTSUPP - If the adapter does not support this.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes)
|
||||
{
|
||||
|
|
@ -2708,9 +2815,13 @@ int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes)
|
|||
* usb4_dp_port_granularity() - Return granularity for the bandwidth values
|
||||
* @port: DP IN adapter
|
||||
*
|
||||
* Reads the programmed granularity from @port. If the DP IN adapter does
|
||||
* not support bandwidth allocation mode returns %-EOPNOTSUPP and negative
|
||||
* errno in other error cases.
|
||||
* Reads the programmed granularity from @port.
|
||||
*
|
||||
* Return:
|
||||
* * Granularity value of a @port.
|
||||
* * %-EOPNOTSUPP - If the DP IN adapter does not support bandwidth
|
||||
* allocation mode.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int usb4_dp_port_granularity(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -2746,8 +2857,12 @@ int usb4_dp_port_granularity(struct tb_port *port)
|
|||
* @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250.
|
||||
*
|
||||
* Sets the granularity used with the estimated, allocated and requested
|
||||
* bandwidth. Returns %0 in success and negative errno otherwise. If the
|
||||
* adapter does not support this %-EOPNOTSUPP is returned.
|
||||
* bandwidth.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - On success.
|
||||
* * %-EOPNOTSUPP - If the adapter does not support this.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
|
||||
{
|
||||
|
|
@ -2788,10 +2903,13 @@ int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
|
|||
* @bw: Estimated bandwidth in Mb/s.
|
||||
*
|
||||
* Sets the estimated bandwidth to @bw. Set the granularity by calling
|
||||
* usb4_dp_port_set_granularity() before calling this. The @bw is round
|
||||
* down to the closest granularity multiplier. Returns %0 in success
|
||||
* and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
|
||||
* the adapter does not support this.
|
||||
* usb4_dp_port_set_granularity() before calling this. The @bw is rounded
|
||||
* down to the closest granularity multiplier.
|
||||
*
|
||||
* Return:
|
||||
* * %0 - On success.
|
||||
* * %-EOPNOTSUPP - If the adapter does not support this.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw)
|
||||
{
|
||||
|
|
@ -2822,9 +2940,10 @@ int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw)
|
|||
* usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth
|
||||
* @port: DP IN adapter
|
||||
*
|
||||
* Reads and returns allocated bandwidth for @port in Mb/s (taking into
|
||||
* account the programmed granularity). Returns negative errno in case
|
||||
* of error.
|
||||
* Reads the allocated bandwidth for @port in Mb/s (taking into account
|
||||
* the programmed granularity).
|
||||
*
|
||||
* Return: Allocated bandwidth in Mb/s or negative errno in case of an error.
|
||||
*/
|
||||
int usb4_dp_port_allocated_bandwidth(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -2919,8 +3038,9 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
|
|||
* @bw: New allocated bandwidth in Mb/s
|
||||
*
|
||||
* Communicates the new allocated bandwidth with the DPCD (graphics
|
||||
* driver). Takes into account the programmed granularity. Returns %0 in
|
||||
* success and negative errno in case of error.
|
||||
* driver). Takes into account the programmed granularity.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw)
|
||||
{
|
||||
|
|
@ -2960,10 +3080,15 @@ int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw)
|
|||
* @port: DP IN adapter
|
||||
*
|
||||
* Reads the DPCD (graphics driver) requested bandwidth and returns it
|
||||
* in Mb/s. Takes the programmed granularity into account. In case of
|
||||
* error returns negative errno. Specifically returns %-EOPNOTSUPP if
|
||||
* the adapter does not support bandwidth allocation mode, and %ENODATA
|
||||
* if there is no active bandwidth request from the graphics driver.
|
||||
* in Mb/s. Takes the programmed granularity into account.
|
||||
*
|
||||
* Return:
|
||||
* * Requested bandwidth in Mb/s - On success.
|
||||
* * %-EOPNOTSUPP - If the adapter does not support bandwidth allocation
|
||||
* mode.
|
||||
* * %ENODATA - If there is no active bandwidth request from the graphics
|
||||
* driver.
|
||||
* * Negative errno - On failure.
|
||||
*/
|
||||
int usb4_dp_port_requested_bandwidth(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -2995,8 +3120,9 @@ int usb4_dp_port_requested_bandwidth(struct tb_port *port)
|
|||
* @enable: Enable/disable extended encapsulation
|
||||
*
|
||||
* Enables or disables extended encapsulation used in PCIe tunneling. Caller
|
||||
* needs to make sure both adapters support this before enabling. Returns %0 on
|
||||
* success and negative errno otherwise.
|
||||
* needs to make sure both adapters support this before enabling.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -296,8 +296,9 @@ const struct device_type usb4_port_device_type = {
|
|||
* usb4_port_device_add() - Add USB4 port device
|
||||
* @port: Lane 0 adapter port to add the USB4 port
|
||||
*
|
||||
* Creates and registers a USB4 port device for @port. Returns the new
|
||||
* USB4 port device pointer or ERR_PTR() in case of error.
|
||||
* Creates and registers a USB4 port device for @port.
|
||||
*
|
||||
* Return: Pointer to &struct usb4_port or ERR_PTR() in case of an error.
|
||||
*/
|
||||
struct usb4_port *usb4_port_device_add(struct tb_port *port)
|
||||
{
|
||||
|
|
@ -356,6 +357,8 @@ void usb4_port_device_remove(struct usb4_port *usb4)
|
|||
* @usb4: USB4 port device
|
||||
*
|
||||
* Used to resume USB4 port device after sleep state.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int usb4_port_device_resume(struct usb4_port *usb4)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -160,7 +160,7 @@ static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
|
|||
* This can be used to send a XDomain response message to the other
|
||||
* domain. No response for the message is expected.
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of failure
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
|
||||
size_t size, enum tb_cfg_pkg_type type)
|
||||
|
|
@ -212,7 +212,7 @@ static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
|
|||
* the other domain. The function waits until the response is received
|
||||
* or when timeout triggers. Whichever comes first.
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of failure
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
|
||||
size_t request_size, enum tb_cfg_pkg_type request_type,
|
||||
|
|
@ -613,6 +613,8 @@ static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
|
|||
* messages. After this function is called the service driver needs to
|
||||
* be able to handle calls to callback whenever a package with the
|
||||
* registered protocol is received.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_register_protocol_handler(struct tb_protocol_handler *handler)
|
||||
{
|
||||
|
|
@ -877,6 +879,8 @@ tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
|
|||
* @drv: Driver to register
|
||||
*
|
||||
* Registers new service driver from @drv to the bus.
|
||||
*
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_register_service_driver(struct tb_service_driver *drv)
|
||||
{
|
||||
|
|
@ -1955,6 +1959,8 @@ static void tb_xdomain_link_exit(struct tb_xdomain *xd)
|
|||
*
|
||||
* Allocates new XDomain structure and returns pointer to that. The
|
||||
* object must be released by calling tb_xdomain_put().
|
||||
*
|
||||
* Return: Pointer to &struct tb_xdomain, %NULL in case of failure.
|
||||
*/
|
||||
struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
|
||||
u64 route, const uuid_t *local_uuid,
|
||||
|
|
@ -2091,7 +2097,7 @@ void tb_xdomain_remove(struct tb_xdomain *xd)
|
|||
* to enable bonding by first enabling the port and waiting for the CL0
|
||||
* state.
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of error.
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
|
||||
{
|
||||
|
|
@ -2171,10 +2177,14 @@ EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
|
|||
* @xd: XDomain connection
|
||||
* @hopid: Preferred HopID or %-1 for next available
|
||||
*
|
||||
* Returns allocated HopID or negative errno. Specifically returns
|
||||
* %-ENOSPC if there are no more available HopIDs. Returned HopID is
|
||||
* guaranteed to be within range supported by the input lane adapter.
|
||||
* Returned HopID is guaranteed to be within range supported by the input
|
||||
* lane adapter.
|
||||
* Call tb_xdomain_release_in_hopid() to release the allocated HopID.
|
||||
*
|
||||
* Return:
|
||||
* * Allocated HopID - On success.
|
||||
* * %-ENOSPC - If there are no more available HopIDs.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid)
|
||||
{
|
||||
|
|
@ -2193,10 +2203,14 @@ EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid);
|
|||
* @xd: XDomain connection
|
||||
* @hopid: Preferred HopID or %-1 for next available
|
||||
*
|
||||
* Returns allocated HopID or negative errno. Specifically returns
|
||||
* %-ENOSPC if there are no more available HopIDs. Returned HopID is
|
||||
* guaranteed to be within range supported by the output lane adapter.
|
||||
* Call tb_xdomain_release_in_hopid() to release the allocated HopID.
|
||||
* Returned HopID is guaranteed to be within range supported by the
|
||||
* output lane adapter.
|
||||
* Call tb_xdomain_release_out_hopid() to release the allocated HopID.
|
||||
*
|
||||
* Return:
|
||||
* * Allocated HopID - On success.
|
||||
* * %-ENOSPC - If there are no more available HopIDs.
|
||||
* * Negative errno - Another error occurred.
|
||||
*/
|
||||
int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid)
|
||||
{
|
||||
|
|
@ -2245,7 +2259,7 @@ EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid);
|
|||
* path. If a transmit or receive path is not needed, pass %-1 for those
|
||||
* parameters.
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of error
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
|
||||
int transmit_ring, int receive_path,
|
||||
|
|
@ -2270,7 +2284,7 @@ EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
|
|||
* as path/ring parameter means don't care. Normally the callers should
|
||||
* pass the same values here as they do when paths are enabled.
|
||||
*
|
||||
* Return: %0 in case of success and negative errno in case of error
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
|
||||
int transmit_ring, int receive_path,
|
||||
|
|
@ -2335,6 +2349,8 @@ static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
|
|||
* to the bus (handshake is still in progress).
|
||||
*
|
||||
* The caller needs to hold @tb->lock.
|
||||
*
|
||||
* Return: Pointer to &struct tb_xdomain or %NULL if not found.
|
||||
*/
|
||||
struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
|
||||
{
|
||||
|
|
@ -2364,6 +2380,8 @@ EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
|
|||
* to the bus (handshake is still in progress).
|
||||
*
|
||||
* The caller needs to hold @tb->lock.
|
||||
*
|
||||
* Return: Pointer to &struct tb_xdomain or %NULL if not found.
|
||||
*/
|
||||
struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
|
||||
u8 depth)
|
||||
|
|
@ -2393,6 +2411,8 @@ struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
|
|||
* to the bus (handshake is still in progress).
|
||||
*
|
||||
* The caller needs to hold @tb->lock.
|
||||
*
|
||||
* Return: Pointer to &struct tb_xdomain or %NULL if not found.
|
||||
*/
|
||||
struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
|
||||
{
|
||||
|
|
@ -2491,7 +2511,7 @@ static bool remove_directory(const char *key, const struct tb_property_dir *dir)
|
|||
* notified so they can re-read properties of this host if they are
|
||||
* interested.
|
||||
*
|
||||
* Return: %0 on success and negative errno on failure
|
||||
* Return: %0 on success, negative errno otherwise.
|
||||
*/
|
||||
int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
|
||||
{
|
||||
|
|
@ -2562,10 +2582,9 @@ int tb_xdomain_init(void)
|
|||
* Rest of the properties are filled dynamically based on these
|
||||
* when the P2P connection is made.
|
||||
*/
|
||||
tb_property_add_immediate(xdomain_property_dir, "vendorid",
|
||||
PCI_VENDOR_ID_INTEL);
|
||||
tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
|
||||
tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
|
||||
tb_property_add_immediate(xdomain_property_dir, "vendorid", 0x1d6b);
|
||||
tb_property_add_text(xdomain_property_dir, "vendorid", "Linux");
|
||||
tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x0004);
|
||||
tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
|
||||
|
||||
xdomain_property_block_gen = get_random_u32();
|
||||
|
|
|
|||
|
|
@ -213,7 +213,7 @@ enum tb_link_width {
|
|||
* queried first
|
||||
* @service_ids: Used to generate IDs for the services
|
||||
* @in_hopids: Input HopIDs for DMA tunneling
|
||||
* @out_hopids; Output HopIDs for DMA tunneling
|
||||
* @out_hopids: Output HopIDs for DMA tunneling
|
||||
* @local_property_block: Local block of properties
|
||||
* @local_property_block_gen: Generation of @local_property_block
|
||||
* @local_property_block_len: Length of the @local_property_block in dwords
|
||||
|
|
@ -356,7 +356,7 @@ int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
|
|||
unsigned int timeout_msec);
|
||||
|
||||
/**
|
||||
* tb_protocol_handler - Protocol specific handler
|
||||
* struct tb_protocol_handler - Protocol specific handler
|
||||
* @uuid: XDomain messages with this UUID are dispatched to this handler
|
||||
* @callback: Callback called with the XDomain message. Returning %1
|
||||
* here tells the XDomain core that the message was handled
|
||||
|
|
@ -437,7 +437,7 @@ static inline struct tb_service *tb_to_service(struct device *dev)
|
|||
}
|
||||
|
||||
/**
|
||||
* tb_service_driver - Thunderbolt service driver
|
||||
* struct tb_service_driver - Thunderbolt service driver
|
||||
* @driver: Driver structure
|
||||
* @probe: Called when the driver is probed
|
||||
* @remove: Called when the driver is removed (optional)
|
||||
|
|
@ -519,6 +519,7 @@ struct tb_nhi {
|
|||
* @head: Head of the ring (write next descriptor here)
|
||||
* @tail: Tail of the ring (complete next descriptor here)
|
||||
* @descriptors: Allocated descriptors for this ring
|
||||
* @descriptors_dma: DMA address of descriptors for this ring
|
||||
* @queue: Queue holding frames to be transferred over this ring
|
||||
* @in_flight: Queue holding frames that are currently in flight
|
||||
* @work: Interrupt work structure
|
||||
|
|
@ -571,12 +572,12 @@ typedef void (*ring_cb)(struct tb_ring *, struct ring_frame *, bool canceled);
|
|||
|
||||
/**
|
||||
* enum ring_desc_flags - Flags for DMA ring descriptor
|
||||
* %RING_DESC_ISOCH: Enable isonchronous DMA (Tx only)
|
||||
* %RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only)
|
||||
* %RING_DESC_COMPLETED: Descriptor completed (set by NHI)
|
||||
* %RING_DESC_POSTED: Always set this
|
||||
* %RING_DESC_BUFFER_OVERRUN: RX buffer overrun
|
||||
* %RING_DESC_INTERRUPT: Request an interrupt on completion
|
||||
* @RING_DESC_ISOCH: Enable isonchronous DMA (Tx only)
|
||||
* @RING_DESC_CRC_ERROR: In frame mode CRC check failed for the frame (Rx only)
|
||||
* @RING_DESC_COMPLETED: Descriptor completed (set by NHI)
|
||||
* @RING_DESC_POSTED: Always set this
|
||||
* @RING_DESC_BUFFER_OVERRUN: RX buffer overrun
|
||||
* @RING_DESC_INTERRUPT: Request an interrupt on completion
|
||||
*/
|
||||
enum ring_desc_flags {
|
||||
RING_DESC_ISOCH = 0x1,
|
||||
|
|
@ -636,7 +637,7 @@ int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame);
|
|||
* If ring_stop() is called after the packet has been enqueued
|
||||
* @frame->callback will be called with canceled set to true.
|
||||
*
|
||||
* Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
|
||||
* Return: %-ESHUTDOWN if ring_stop() has been called, %0 otherwise.
|
||||
*/
|
||||
static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
|
||||
{
|
||||
|
|
@ -657,7 +658,7 @@ static inline int tb_ring_rx(struct tb_ring *ring, struct ring_frame *frame)
|
|||
* If ring_stop() is called after the packet has been enqueued @frame->callback
|
||||
* will be called with canceled set to true.
|
||||
*
|
||||
* Return: Returns %-ESHUTDOWN if ring_stop has been called. Zero otherwise.
|
||||
* Return: %-ESHUTDOWN if ring_stop has been called, %0 otherwise.
|
||||
*/
|
||||
static inline int tb_ring_tx(struct tb_ring *ring, struct ring_frame *frame)
|
||||
{
|
||||
|
|
@ -675,6 +676,8 @@ void tb_ring_poll_complete(struct tb_ring *ring);
|
|||
*
|
||||
* Use this function when you are mapping DMA for buffers that are
|
||||
* passed to the ring for sending/receiving.
|
||||
*
|
||||
* Return: Pointer to device used for DMA mapping.
|
||||
*/
|
||||
static inline struct device *tb_ring_dma_device(struct tb_ring *ring)
|
||||
{
|
||||
|
|
|
|||
Loading…
Reference in New Issue