mirror of https://github.com/torvalds/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Conflicts: net/xdp/xsk.c0ebc27a4c6("xsk: avoid data corruption on cq descriptor number")8da7bea7db("xsk: add indirect call for xsk_destruct_skb")30ed05adca("xsk: use a smaller new lock for shared pool case") https://lore.kernel.org/20251127105450.4a1665ec@canb.auug.org.au https://lore.kernel.org/eb4eee14-7e24-4d1b-b312-e9ea738fefee@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
db4029859d
2
.mailmap
2
.mailmap
|
|
@ -691,6 +691,8 @@ Sachin Mokashi <sachin.mokashi@intel.com> <sachinx.mokashi@intel.com>
|
|||
Sachin P Sant <ssant@in.ibm.com>
|
||||
Sai Prakash Ranjan <quic_saipraka@quicinc.com> <saiprakash.ranjan@codeaurora.org>
|
||||
Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
|
||||
Sam Protsenko <semen.protsenko@linaro.org>
|
||||
Sam Protsenko <semen.protsenko@linaro.org> <semen.protsenko@globallogic.com>
|
||||
Sam Ravnborg <sam@mars.ravnborg.org>
|
||||
Sankeerth Billakanti <quic_sbillaka@quicinc.com> <sbillaka@codeaurora.org>
|
||||
Santosh Shilimkar <santosh.shilimkar@oracle.org>
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ patternProperties:
|
|||
groups:
|
||||
description:
|
||||
Name of the pin group to use for the functions.
|
||||
$ref: /schemas/types.yaml#/definitions/string
|
||||
items:
|
||||
enum: [i2c0_grp, i2c1_grp, i2c2_grp, i2c3_grp, i2c4_grp,
|
||||
i2c5_grp, i2c6_grp, i2c7_grp, i2c8_grp,
|
||||
spi0_grp, spi0_cs0_grp, spi0_cs1_grp, spi0_cs2_grp,
|
||||
|
|
@ -62,6 +62,8 @@ patternProperties:
|
|||
pwm2_gpio10_grp, pwm2_gpio14_grp, pwm2_gpio18_grp,
|
||||
pwm3_gpio7_grp, pwm3_gpio11_grp, pwm3_gpio15_grp,
|
||||
pwm3_gpio19_grp, pcmif_out_grp, pcmif_in_grp]
|
||||
minItems: 1
|
||||
maxItems: 8
|
||||
|
||||
drive-strength:
|
||||
enum: [2, 4, 6, 8, 16, 24, 32]
|
||||
|
|
|
|||
|
|
@ -74,6 +74,7 @@ patternProperties:
|
|||
|
||||
'^conf':
|
||||
type: object
|
||||
unevaluatedProperties: false
|
||||
description:
|
||||
Pinctrl node's client devices use subnodes for pin configurations,
|
||||
which in turn use the standard properties below.
|
||||
|
|
|
|||
|
|
@ -400,19 +400,30 @@ can report through the rotational axes (absolute and/or relative rx, ry, rz).
|
|||
All other axes retain their meaning. A device must not mix
|
||||
regular directional axes and accelerometer axes on the same event node.
|
||||
|
||||
INPUT_PROP_HAPTIC_TOUCHPAD
|
||||
--------------------------
|
||||
INPUT_PROP_PRESSUREPAD
|
||||
----------------------
|
||||
|
||||
The INPUT_PROP_PRESSUREPAD property indicates that the device provides
|
||||
simulated haptic feedback (e.g. a vibrator motor situated below the surface)
|
||||
instead of physical haptic feedback (e.g. a hinge). This property is only set
|
||||
if the device:
|
||||
|
||||
The INPUT_PROP_HAPTIC_TOUCHPAD property indicates that device:
|
||||
- supports simple haptic auto and manual triggering
|
||||
- can differentiate between at least 5 fingers
|
||||
- uses correct resolution for the X/Y (units and value)
|
||||
- reports correct force per touch, and correct units for them (newtons or grams)
|
||||
- follows the MT protocol type B
|
||||
|
||||
If the simulated haptic feedback is controllable by userspace the device must:
|
||||
|
||||
- support simple haptic auto and manual triggering, and
|
||||
- report correct force per touch, and correct units for them (newtons or grams), and
|
||||
- provide the EV_FF FF_HAPTIC force feedback effect.
|
||||
|
||||
Summing up, such devices follow the MS spec for input devices in
|
||||
Win8 and Win8.1, and in addition support the Simple haptic controller HID table,
|
||||
and report correct units for the pressure.
|
||||
Win8 and Win8.1, and in addition may support the Simple haptic controller HID
|
||||
table, and report correct units for the pressure.
|
||||
|
||||
Where applicable, this property is set in addition to INPUT_PROP_BUTTONPAD, it
|
||||
does not replace that property.
|
||||
|
||||
Guidelines
|
||||
==========
|
||||
|
|
|
|||
|
|
@ -54,6 +54,7 @@ to matching WMI devices using a struct wmi_device_id table:
|
|||
::
|
||||
|
||||
static const struct wmi_device_id foo_id_table[] = {
|
||||
/* Only use uppercase letters! */
|
||||
{ "936DA01F-9ABD-4D9D-80C7-02AF85C822A8", NULL },
|
||||
{ }
|
||||
};
|
||||
|
|
|
|||
36
MAINTAINERS
36
MAINTAINERS
|
|
@ -3927,7 +3927,7 @@ F: crypto/async_tx/
|
|||
F: include/linux/async_tx.h
|
||||
|
||||
AT24 EEPROM DRIVER
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
|
||||
|
|
@ -10678,7 +10678,7 @@ F: tools/gpio/gpio-sloppy-logic-analyzer.sh
|
|||
|
||||
GPIO SUBSYSTEM
|
||||
M: Linus Walleij <linus.walleij@linaro.org>
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-gpio@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
|
||||
|
|
@ -10695,7 +10695,7 @@ K: GPIOD_FLAGS_BIT_NONEXCLUSIVE
|
|||
K: devm_gpiod_unhinge
|
||||
|
||||
GPIO UAPI
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
R: Kent Gibson <warthog618@gmail.com>
|
||||
L: linux-gpio@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
@ -13799,6 +13799,7 @@ F: Documentation/admin-guide/mm/kho.rst
|
|||
F: Documentation/core-api/kho/*
|
||||
F: include/linux/kexec_handover.h
|
||||
F: kernel/kexec_handover.c
|
||||
F: lib/test_kho.c
|
||||
F: tools/testing/selftests/kho/
|
||||
|
||||
KEYS-ENCRYPTED
|
||||
|
|
@ -15310,7 +15311,7 @@ F: drivers/pwm/pwm-max7360.c
|
|||
F: include/linux/mfd/max7360.h
|
||||
|
||||
MAXIM MAX77650 PMIC MFD DRIVER
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/*/*max77650.yaml
|
||||
|
|
@ -19919,7 +19920,7 @@ F: drivers/pci/p2pdma.c
|
|||
F: include/linux/pci-p2pdma.h
|
||||
|
||||
PCI POWER CONTROL
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci.git
|
||||
|
|
@ -20516,7 +20517,7 @@ F: include/linux/powercap.h
|
|||
F: kernel/configs/nopm.config
|
||||
|
||||
POWER SEQUENCING
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
|
||||
|
|
@ -21319,7 +21320,7 @@ F: Documentation/tee/qtee.rst
|
|||
F: drivers/tee/qcomtee/
|
||||
|
||||
QUALCOMM TRUST ZONE MEMORY ALLOCATOR
|
||||
M: Bartosz Golaszewski <bartosz.golaszewski@linaro.org>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/firmware/qcom/qcom_tzmem.c
|
||||
|
|
@ -22670,7 +22671,7 @@ F: arch/s390/mm
|
|||
|
||||
S390 NETWORK DRIVERS
|
||||
M: Alexandra Winter <wintera@linux.ibm.com>
|
||||
R: Aswin Karuvally <aswin@linux.ibm.com>
|
||||
M: Aswin Karuvally <aswin@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
|
@ -25687,7 +25688,7 @@ F: Documentation/devicetree/bindings/crypto/ti,am62l-dthev2.yaml
|
|||
F: drivers/crypto/ti/
|
||||
|
||||
TI DAVINCI MACHINE SUPPORT
|
||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
||||
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
|
||||
|
|
@ -27137,7 +27138,7 @@ S: Maintained
|
|||
F: drivers/char/virtio_console.c
|
||||
F: include/uapi/linux/virtio_console.h
|
||||
|
||||
VIRTIO CORE AND NET DRIVERS
|
||||
VIRTIO CORE
|
||||
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||
M: Jason Wang <jasowang@redhat.com>
|
||||
R: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
|
||||
|
|
@ -27150,7 +27151,6 @@ F: Documentation/devicetree/bindings/virtio/
|
|||
F: Documentation/driver-api/virtio/
|
||||
F: drivers/block/virtio_blk.c
|
||||
F: drivers/crypto/virtio/
|
||||
F: drivers/net/virtio_net.c
|
||||
F: drivers/vdpa/
|
||||
F: drivers/virtio/
|
||||
F: include/linux/vdpa.h
|
||||
|
|
@ -27159,7 +27159,6 @@ F: include/linux/vringh.h
|
|||
F: include/uapi/linux/virtio_*.h
|
||||
F: net/vmw_vsock/virtio*
|
||||
F: tools/virtio/
|
||||
F: tools/testing/selftests/drivers/net/virtio_net/
|
||||
|
||||
VIRTIO CRYPTO DRIVER
|
||||
M: Gonglei <arei.gonglei@huawei.com>
|
||||
|
|
@ -27271,6 +27270,19 @@ W: https://virtio-mem.gitlab.io/
|
|||
F: drivers/virtio/virtio_mem.c
|
||||
F: include/uapi/linux/virtio_mem.h
|
||||
|
||||
VIRTIO NET DRIVER
|
||||
M: "Michael S. Tsirkin" <mst@redhat.com>
|
||||
M: Jason Wang <jasowang@redhat.com>
|
||||
R: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
|
||||
R: Eugenio Pérez <eperezma@redhat.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: virtualization@lists.linux.dev
|
||||
S: Maintained
|
||||
F: drivers/net/virtio_net.c
|
||||
F: include/linux/virtio_net.h
|
||||
F: include/uapi/linux/virtio_net.h
|
||||
F: tools/testing/selftests/drivers/net/virtio_net/
|
||||
|
||||
VIRTIO PMEM DRIVER
|
||||
M: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
|
||||
L: virtualization@lists.linux.dev
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 18
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
|||
|
|
@ -351,16 +351,6 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
|
|||
* as long as we take care not to create a writable
|
||||
* mapping for executable code.
|
||||
*/
|
||||
fallthrough;
|
||||
|
||||
case EFI_ACPI_MEMORY_NVS:
|
||||
/*
|
||||
* ACPI NVS marks an area reserved for use by the
|
||||
* firmware, even after exiting the boot service.
|
||||
* This may be used by the firmware for sharing dynamic
|
||||
* tables/data (e.g., ACPI CCEL) with the OS. Map it
|
||||
* as read-only.
|
||||
*/
|
||||
prot = PAGE_KERNEL_RO;
|
||||
break;
|
||||
|
||||
|
|
|
|||
|
|
@ -1032,6 +1032,8 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
|||
|
||||
if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
|
||||
/* No point mitigating Spectre-BHB alone. */
|
||||
} else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
|
||||
/* Do nothing */
|
||||
} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_HW, &system_bhb_mitigations);
|
||||
|
|
|
|||
|
|
@ -55,6 +55,27 @@ enum cpu_type_enum {
|
|||
CPU_LAST
|
||||
};
|
||||
|
||||
static inline char *id_to_core_name(unsigned int id)
|
||||
{
|
||||
if ((id & PRID_COMP_MASK) != PRID_COMP_LOONGSON)
|
||||
return "Unknown";
|
||||
|
||||
switch (id & PRID_SERIES_MASK) {
|
||||
case PRID_SERIES_LA132:
|
||||
return "LA132";
|
||||
case PRID_SERIES_LA264:
|
||||
return "LA264";
|
||||
case PRID_SERIES_LA364:
|
||||
return "LA364";
|
||||
case PRID_SERIES_LA464:
|
||||
return "LA464";
|
||||
case PRID_SERIES_LA664:
|
||||
return "LA664";
|
||||
default:
|
||||
return "Unknown";
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLER__ */
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -10,10 +10,6 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifndef __KERNEL__
|
||||
#include <stdint.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For PTRACE_{POKE,PEEK}USR. 0 - 31 are GPRs,
|
||||
* 32 is syscall's original ARG0, 33 is PC, 34 is BADVADDR.
|
||||
|
|
@ -41,44 +37,44 @@ struct user_pt_regs {
|
|||
} __attribute__((aligned(8)));
|
||||
|
||||
struct user_fp_state {
|
||||
uint64_t fpr[32];
|
||||
uint64_t fcc;
|
||||
uint32_t fcsr;
|
||||
__u64 fpr[32];
|
||||
__u64 fcc;
|
||||
__u32 fcsr;
|
||||
};
|
||||
|
||||
struct user_lsx_state {
|
||||
/* 32 registers, 128 bits width per register. */
|
||||
uint64_t vregs[32*2];
|
||||
__u64 vregs[32*2];
|
||||
};
|
||||
|
||||
struct user_lasx_state {
|
||||
/* 32 registers, 256 bits width per register. */
|
||||
uint64_t vregs[32*4];
|
||||
__u64 vregs[32*4];
|
||||
};
|
||||
|
||||
struct user_lbt_state {
|
||||
uint64_t scr[4];
|
||||
uint32_t eflags;
|
||||
uint32_t ftop;
|
||||
__u64 scr[4];
|
||||
__u32 eflags;
|
||||
__u32 ftop;
|
||||
};
|
||||
|
||||
struct user_watch_state {
|
||||
uint64_t dbg_info;
|
||||
__u64 dbg_info;
|
||||
struct {
|
||||
uint64_t addr;
|
||||
uint64_t mask;
|
||||
uint32_t ctrl;
|
||||
uint32_t pad;
|
||||
__u64 addr;
|
||||
__u64 mask;
|
||||
__u32 ctrl;
|
||||
__u32 pad;
|
||||
} dbg_regs[8];
|
||||
};
|
||||
|
||||
struct user_watch_state_v2 {
|
||||
uint64_t dbg_info;
|
||||
__u64 dbg_info;
|
||||
struct {
|
||||
uint64_t addr;
|
||||
uint64_t mask;
|
||||
uint32_t ctrl;
|
||||
uint32_t pad;
|
||||
__u64 addr;
|
||||
__u64 mask;
|
||||
__u32 ctrl;
|
||||
__u32 pad;
|
||||
} dbg_regs[14];
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -277,7 +277,7 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int
|
|||
uint32_t config;
|
||||
uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
|
||||
uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
|
||||
const char *core_name = "Unknown";
|
||||
const char *core_name = id_to_core_name(c->processor_id);
|
||||
|
||||
switch (BIT(fls(c->isa_level) - 1)) {
|
||||
case LOONGARCH_CPU_ISA_LA32R:
|
||||
|
|
@ -291,35 +291,23 @@ static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int
|
|||
break;
|
||||
}
|
||||
|
||||
switch (c->processor_id & PRID_SERIES_MASK) {
|
||||
case PRID_SERIES_LA132:
|
||||
core_name = "LA132";
|
||||
break;
|
||||
case PRID_SERIES_LA264:
|
||||
core_name = "LA264";
|
||||
break;
|
||||
case PRID_SERIES_LA364:
|
||||
core_name = "LA364";
|
||||
break;
|
||||
case PRID_SERIES_LA464:
|
||||
core_name = "LA464";
|
||||
break;
|
||||
case PRID_SERIES_LA664:
|
||||
core_name = "LA664";
|
||||
break;
|
||||
}
|
||||
|
||||
pr_info("%s Processor probed (%s Core)\n", __cpu_family[cpu], core_name);
|
||||
|
||||
if (!cpu_has_iocsr)
|
||||
if (!cpu_has_iocsr) {
|
||||
__cpu_full_name[cpu] = "Unknown";
|
||||
return;
|
||||
|
||||
if (!__cpu_full_name[cpu])
|
||||
__cpu_full_name[cpu] = cpu_full_name;
|
||||
}
|
||||
|
||||
*vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
|
||||
*cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
|
||||
|
||||
if (!__cpu_full_name[cpu]) {
|
||||
if (((char *)vendor)[0] == 0)
|
||||
__cpu_full_name[cpu] = "Unknown";
|
||||
else
|
||||
__cpu_full_name[cpu] = cpu_full_name;
|
||||
}
|
||||
|
||||
config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
|
||||
if (config & IOCSRF_CSRIPI)
|
||||
c->options |= LOONGARCH_CPU_CSRIPI;
|
||||
|
|
|
|||
|
|
@ -237,6 +237,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
|
|||
#ifdef CONFIG_SMP
|
||||
crash_smp_send_stop();
|
||||
#endif
|
||||
machine_kexec_mask_interrupts();
|
||||
cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
|
||||
|
||||
pr_info("Starting crashdump kernel...\n");
|
||||
|
|
@ -274,6 +275,7 @@ void machine_kexec(struct kimage *image)
|
|||
|
||||
/* We do not want to be bothered. */
|
||||
local_irq_disable();
|
||||
machine_kexec_mask_interrupts();
|
||||
|
||||
pr_notice("EFI boot flag: 0x%lx\n", efi_boot);
|
||||
pr_notice("Command line addr: 0x%lx\n", cmdline_ptr);
|
||||
|
|
|
|||
|
|
@ -158,35 +158,9 @@ static void __init node_mem_init(unsigned int node)
|
|||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
|
||||
/*
|
||||
* add_numamem_region
|
||||
*
|
||||
* Add a uasable memory region described by BIOS. The
|
||||
* routine gets each intersection between BIOS's region
|
||||
* and node's region, and adds them into node's memblock
|
||||
* pool.
|
||||
*
|
||||
*/
|
||||
static void __init add_numamem_region(u64 start, u64 end, u32 type)
|
||||
{
|
||||
u32 node = pa_to_nid(start);
|
||||
u64 size = end - start;
|
||||
static unsigned long num_physpages;
|
||||
|
||||
if (start >= end) {
|
||||
pr_debug("Invalid region: %016llx-%016llx\n", start, end);
|
||||
return;
|
||||
}
|
||||
|
||||
num_physpages += (size >> PAGE_SHIFT);
|
||||
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
|
||||
node, type, start, size);
|
||||
pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
|
||||
start >> PAGE_SHIFT, end >> PAGE_SHIFT, num_physpages);
|
||||
memblock_set_node(start, size, &memblock.memory, node);
|
||||
}
|
||||
|
||||
static void __init init_node_memblock(void)
|
||||
static void __init info_node_memblock(void)
|
||||
{
|
||||
u32 mem_type;
|
||||
u64 mem_end, mem_start, mem_size;
|
||||
|
|
@ -206,12 +180,20 @@ static void __init init_node_memblock(void)
|
|||
case EFI_BOOT_SERVICES_DATA:
|
||||
case EFI_PERSISTENT_MEMORY:
|
||||
case EFI_CONVENTIONAL_MEMORY:
|
||||
add_numamem_region(mem_start, mem_end, mem_type);
|
||||
num_physpages += (mem_size >> PAGE_SHIFT);
|
||||
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
|
||||
(u32)pa_to_nid(mem_start), mem_type, mem_start, mem_size);
|
||||
pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
|
||||
mem_start >> PAGE_SHIFT, mem_end >> PAGE_SHIFT, num_physpages);
|
||||
break;
|
||||
case EFI_PAL_CODE:
|
||||
case EFI_UNUSABLE_MEMORY:
|
||||
case EFI_ACPI_RECLAIM_MEMORY:
|
||||
add_numamem_region(mem_start, mem_end, mem_type);
|
||||
num_physpages += (mem_size >> PAGE_SHIFT);
|
||||
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
|
||||
(u32)pa_to_nid(mem_start), mem_type, mem_start, mem_size);
|
||||
pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
|
||||
mem_start >> PAGE_SHIFT, mem_end >> PAGE_SHIFT, num_physpages);
|
||||
fallthrough;
|
||||
case EFI_RESERVED_TYPE:
|
||||
case EFI_RUNTIME_SERVICES_CODE:
|
||||
|
|
@ -249,22 +231,16 @@ int __init init_numa_memory(void)
|
|||
for (i = 0; i < NR_CPUS; i++)
|
||||
set_cpuid_to_node(i, NUMA_NO_NODE);
|
||||
|
||||
numa_reset_distance();
|
||||
nodes_clear(numa_nodes_parsed);
|
||||
nodes_clear(node_possible_map);
|
||||
nodes_clear(node_online_map);
|
||||
WARN_ON(memblock_clear_hotplug(0, PHYS_ADDR_MAX));
|
||||
|
||||
/* Parse SRAT and SLIT if provided by firmware. */
|
||||
ret = acpi_disabled ? fake_numa_init() : acpi_numa_init();
|
||||
if (!acpi_disabled)
|
||||
ret = numa_memblks_init(acpi_numa_init, false);
|
||||
else
|
||||
ret = numa_memblks_init(fake_numa_init, false);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
node_possible_map = numa_nodes_parsed;
|
||||
if (WARN_ON(nodes_empty(node_possible_map)))
|
||||
return -EINVAL;
|
||||
|
||||
init_node_memblock();
|
||||
info_node_memblock();
|
||||
if (!memblock_validate_numa_coverage(SZ_1M))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
{
|
||||
unsigned long n = (unsigned long) v - 1;
|
||||
unsigned int isa = cpu_data[n].isa_level;
|
||||
unsigned int prid = cpu_data[n].processor_id;
|
||||
unsigned int version = cpu_data[n].processor_id & 0xff;
|
||||
unsigned int fp_version = cpu_data[n].fpu_vers;
|
||||
|
||||
|
|
@ -37,6 +38,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
seq_printf(m, "global_id\t\t: %d\n", cpu_data[n].global_id);
|
||||
seq_printf(m, "CPU Family\t\t: %s\n", __cpu_family[n]);
|
||||
seq_printf(m, "Model Name\t\t: %s\n", __cpu_full_name[n]);
|
||||
seq_printf(m, "PRID\t\t\t: %s (%08x)\n", id_to_core_name(prid), prid);
|
||||
seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version);
|
||||
seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version);
|
||||
seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n",
|
||||
|
|
|
|||
|
|
@ -1624,6 +1624,9 @@ static int __arch_prepare_bpf_trampoline(struct jit_ctx *ctx, struct bpf_tramp_i
|
|||
/* Direct jump skips 5 NOP instructions */
|
||||
else if (is_bpf_text_address((unsigned long)orig_call))
|
||||
orig_call += LOONGARCH_BPF_FENTRY_NBYTES;
|
||||
/* Module tracing not supported - cause kernel lockups */
|
||||
else if (is_module_text_address((unsigned long)orig_call))
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||
move_addr(ctx, LOONGARCH_GPR_A0, (const u64)im);
|
||||
|
|
|
|||
|
|
@ -50,11 +50,11 @@ static int __init pcibios_init(void)
|
|||
*/
|
||||
lsize = cpu_last_level_cache_line_size();
|
||||
|
||||
BUG_ON(!lsize);
|
||||
|
||||
if (lsize) {
|
||||
pci_dfl_cache_line_size = lsize >> 2;
|
||||
|
||||
pr_debug("PCI: pci_cache_line_size set to %d bytes\n", lsize);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ cpus: cpus {
|
|||
|
||||
cpu@0 {
|
||||
device_type = "cpu";
|
||||
compatible = "mips,mips24KEc";
|
||||
compatible = "mips,mips34Kc";
|
||||
reg = <0>;
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -692,7 +692,7 @@ unsigned long mips_stack_top(void)
|
|||
/* Space for the VDSO, data page & GIC user page */
|
||||
if (current->thread.abi) {
|
||||
top -= PAGE_ALIGN(current->thread.abi->vdso->size);
|
||||
top -= PAGE_SIZE;
|
||||
top -= VDSO_NR_PAGES * PAGE_SIZE;
|
||||
top -= mips_gic_present() ? PAGE_SIZE : 0;
|
||||
|
||||
/* Space to randomize the VDSO base */
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpu-type.h>
|
||||
|
|
@ -508,53 +509,77 @@ static int __init set_ntlb(char *str)
|
|||
|
||||
__setup("ntlb=", set_ntlb);
|
||||
|
||||
/* Initialise all TLB entries with unique values */
|
||||
|
||||
/* Comparison function for EntryHi VPN fields. */
|
||||
static int r4k_vpn_cmp(const void *a, const void *b)
|
||||
{
|
||||
long v = *(unsigned long *)a - *(unsigned long *)b;
|
||||
int s = sizeof(long) > sizeof(int) ? sizeof(long) * 8 - 1: 0;
|
||||
return s ? (v != 0) | v >> s : v;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise all TLB entries with unique values that do not clash with
|
||||
* what we have been handed over and what we'll be using ourselves.
|
||||
*/
|
||||
static void r4k_tlb_uniquify(void)
|
||||
{
|
||||
int entry = num_wired_entries();
|
||||
unsigned long tlb_vpns[1 << MIPS_CONF1_TLBS_SIZE];
|
||||
int tlbsize = current_cpu_data.tlbsize;
|
||||
int start = num_wired_entries();
|
||||
unsigned long vpn_mask;
|
||||
int cnt, ent, idx, i;
|
||||
|
||||
vpn_mask = GENMASK(cpu_vmbits - 1, 13);
|
||||
vpn_mask |= IS_ENABLED(CONFIG_64BIT) ? 3ULL << 62 : 1 << 31;
|
||||
|
||||
htw_stop();
|
||||
|
||||
for (i = start, cnt = 0; i < tlbsize; i++, cnt++) {
|
||||
unsigned long vpn;
|
||||
|
||||
write_c0_index(i);
|
||||
mtc0_tlbr_hazard();
|
||||
tlb_read();
|
||||
tlb_read_hazard();
|
||||
vpn = read_c0_entryhi();
|
||||
vpn &= vpn_mask & PAGE_MASK;
|
||||
tlb_vpns[cnt] = vpn;
|
||||
|
||||
/* Prevent any large pages from overlapping regular ones. */
|
||||
write_c0_pagemask(read_c0_pagemask() & PM_DEFAULT_MASK);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
}
|
||||
|
||||
sort(tlb_vpns, cnt, sizeof(tlb_vpns[0]), r4k_vpn_cmp, NULL);
|
||||
|
||||
write_c0_pagemask(PM_DEFAULT_MASK);
|
||||
write_c0_entrylo0(0);
|
||||
write_c0_entrylo1(0);
|
||||
|
||||
while (entry < current_cpu_data.tlbsize) {
|
||||
unsigned long asid_mask = cpu_asid_mask(¤t_cpu_data);
|
||||
unsigned long asid = 0;
|
||||
int idx;
|
||||
idx = 0;
|
||||
ent = tlbsize;
|
||||
for (i = start; i < tlbsize; i++)
|
||||
while (1) {
|
||||
unsigned long entryhi, vpn;
|
||||
|
||||
/* Skip wired MMID to make ginvt_mmid work */
|
||||
if (cpu_has_mmid)
|
||||
asid = MMID_KERNEL_WIRED + 1;
|
||||
entryhi = UNIQUE_ENTRYHI(ent);
|
||||
vpn = entryhi & vpn_mask & PAGE_MASK;
|
||||
|
||||
/* Check for match before using UNIQUE_ENTRYHI */
|
||||
do {
|
||||
if (cpu_has_mmid) {
|
||||
write_c0_memorymapid(asid);
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
|
||||
} else {
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(entry) | asid);
|
||||
}
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_probe();
|
||||
tlb_probe_hazard();
|
||||
idx = read_c0_index();
|
||||
/* No match or match is on current entry */
|
||||
if (idx < 0 || idx == entry)
|
||||
break;
|
||||
/*
|
||||
* If we hit a match, we need to try again with
|
||||
* a different ASID.
|
||||
*/
|
||||
asid++;
|
||||
} while (asid < asid_mask);
|
||||
|
||||
if (idx >= 0 && idx != entry)
|
||||
panic("Unable to uniquify TLB entry %d", idx);
|
||||
|
||||
write_c0_index(entry);
|
||||
if (idx >= cnt || vpn < tlb_vpns[idx]) {
|
||||
write_c0_entryhi(entryhi);
|
||||
write_c0_index(i);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
entry++;
|
||||
ent++;
|
||||
break;
|
||||
} else if (vpn == tlb_vpns[idx]) {
|
||||
ent++;
|
||||
} else {
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
|
||||
tlbw_use_hazard();
|
||||
|
|
@ -602,6 +627,7 @@ static void r4k_tlb_configure(void)
|
|||
|
||||
/* From this point on the ARC firmware is dead. */
|
||||
r4k_tlb_uniquify();
|
||||
local_flush_tlb_all();
|
||||
|
||||
/* Did I tell you that ARC SUCKS? */
|
||||
}
|
||||
|
|
|
|||
|
|
@ -241,16 +241,22 @@ void __init prom_init(void)
|
|||
#endif
|
||||
|
||||
/*
|
||||
* Setup the Malta max (2GB) memory for PCI DMA in host bridge
|
||||
* in transparent addressing mode.
|
||||
* Set up memory mapping in host bridge for PCI DMA masters,
|
||||
* in transparent addressing mode. For EVA use the Malta
|
||||
* maximum of 2 GiB memory in the alias space at 0x80000000
|
||||
* as per PHYS_OFFSET. Otherwise use 256 MiB of memory in
|
||||
* the regular space, avoiding mapping the PCI MMIO window
|
||||
* for DMA as it seems to confuse the system controller's
|
||||
* logic, causing PCI MMIO to stop working.
|
||||
*/
|
||||
mask = PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH;
|
||||
MSC_WRITE(MSC01_PCI_BAR0, mask);
|
||||
MSC_WRITE(MSC01_PCI_HEAD4, mask);
|
||||
mask = PHYS_OFFSET ? PHYS_OFFSET : 0xf0000000;
|
||||
MSC_WRITE(MSC01_PCI_BAR0,
|
||||
mask | PCI_BASE_ADDRESS_MEM_PREFETCH);
|
||||
MSC_WRITE(MSC01_PCI_HEAD4,
|
||||
PHYS_OFFSET | PCI_BASE_ADDRESS_MEM_PREFETCH);
|
||||
|
||||
mask &= MSC01_PCI_BAR0_SIZE_MSK;
|
||||
MSC_WRITE(MSC01_PCI_P2SCMSKL, mask);
|
||||
MSC_WRITE(MSC01_PCI_P2SCMAPL, mask);
|
||||
MSC_WRITE(MSC01_PCI_P2SCMAPL, PHYS_OFFSET);
|
||||
|
||||
/* Don't handle target retries indefinitely. */
|
||||
if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==
|
||||
|
|
|
|||
|
|
@ -7,8 +7,8 @@
|
|||
|
||||
#define ANDES_VENDOR_ID 0x31e
|
||||
#define MICROCHIP_VENDOR_ID 0x029
|
||||
#define MIPS_VENDOR_ID 0x127
|
||||
#define SIFIVE_VENDOR_ID 0x489
|
||||
#define THEAD_VENDOR_ID 0x5b7
|
||||
#define MIPS_VENDOR_ID 0x722
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -648,9 +648,9 @@ int sbi_debug_console_read(char *bytes, unsigned int num_bytes)
|
|||
|
||||
void __init sbi_init(void)
|
||||
{
|
||||
bool srst_power_off = false;
|
||||
int ret;
|
||||
|
||||
sbi_set_power_off();
|
||||
ret = sbi_get_spec_version();
|
||||
if (ret > 0)
|
||||
sbi_spec_version = ret;
|
||||
|
|
@ -683,6 +683,7 @@ void __init sbi_init(void)
|
|||
sbi_probe_extension(SBI_EXT_SRST)) {
|
||||
pr_info("SBI SRST extension detected\n");
|
||||
register_platform_power_off(sbi_srst_power_off);
|
||||
srst_power_off = true;
|
||||
sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
|
||||
sbi_srst_reboot_nb.priority = 192;
|
||||
register_restart_handler(&sbi_srst_reboot_nb);
|
||||
|
|
@ -702,4 +703,7 @@ void __init sbi_init(void)
|
|||
__sbi_send_ipi = __sbi_send_ipi_v01;
|
||||
__sbi_rfence = __sbi_rfence_v01;
|
||||
}
|
||||
|
||||
if (!srst_power_off)
|
||||
sbi_set_power_off();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1325,8 +1325,6 @@ static void uncore_pci_sub_driver_init(void)
|
|||
continue;
|
||||
|
||||
pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
|
||||
if (!pmu)
|
||||
continue;
|
||||
|
||||
if (uncore_pci_get_dev_die_info(pci_sub_dev, &die))
|
||||
continue;
|
||||
|
|
|
|||
|
|
@ -182,6 +182,7 @@ bool einj_initialized __ro_after_init;
|
|||
|
||||
static void __iomem *einj_param;
|
||||
static u32 v5param_size;
|
||||
static u32 v66param_size;
|
||||
static bool is_v2;
|
||||
|
||||
static void einj_exec_ctx_init(struct apei_exec_context *ctx)
|
||||
|
|
@ -283,6 +284,24 @@ static void check_vendor_extension(u64 paddr,
|
|||
acpi_os_unmap_iomem(p, sizeof(v));
|
||||
}
|
||||
|
||||
static u32 einjv2_init(struct einjv2_extension_struct *e)
|
||||
{
|
||||
if (e->revision != 1) {
|
||||
pr_info("Unknown v2 extension revision %u\n", e->revision);
|
||||
return 0;
|
||||
}
|
||||
if (e->length < sizeof(*e) || e->length > PAGE_SIZE) {
|
||||
pr_info(FW_BUG "Bad1 v2 extension length %u\n", e->length);
|
||||
return 0;
|
||||
}
|
||||
if ((e->length - sizeof(*e)) % sizeof(e->component_arr[0])) {
|
||||
pr_info(FW_BUG "Bad2 v2 extension length %u\n", e->length);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (e->length - sizeof(*e)) / sizeof(e->component_arr[0]);
|
||||
}
|
||||
|
||||
static void __iomem *einj_get_parameter_address(void)
|
||||
{
|
||||
int i;
|
||||
|
|
@ -310,28 +329,21 @@ static void __iomem *einj_get_parameter_address(void)
|
|||
v5param_size = sizeof(v5param);
|
||||
p = acpi_os_map_iomem(pa_v5, sizeof(*p));
|
||||
if (p) {
|
||||
int offset, len;
|
||||
|
||||
memcpy_fromio(&v5param, p, v5param_size);
|
||||
acpi5 = 1;
|
||||
check_vendor_extension(pa_v5, &v5param);
|
||||
if (is_v2 && available_error_type & ACPI65_EINJV2_SUPP) {
|
||||
len = v5param.einjv2_struct.length;
|
||||
offset = offsetof(struct einjv2_extension_struct, component_arr);
|
||||
max_nr_components = (len - offset) /
|
||||
sizeof(v5param.einjv2_struct.component_arr[0]);
|
||||
/*
|
||||
* The first call to acpi_os_map_iomem above does not include the
|
||||
* component array, instead it is used to read and calculate maximum
|
||||
* number of components supported by the system. Below, the mapping
|
||||
* is expanded to include the component array.
|
||||
*/
|
||||
if (available_error_type & ACPI65_EINJV2_SUPP) {
|
||||
struct einjv2_extension_struct *e;
|
||||
|
||||
e = &v5param.einjv2_struct;
|
||||
max_nr_components = einjv2_init(e);
|
||||
|
||||
/* remap including einjv2_extension_struct */
|
||||
acpi_os_unmap_iomem(p, v5param_size);
|
||||
offset = offsetof(struct set_error_type_with_address, einjv2_struct);
|
||||
v5param_size = offset + struct_size(&v5param.einjv2_struct,
|
||||
component_arr, max_nr_components);
|
||||
p = acpi_os_map_iomem(pa_v5, v5param_size);
|
||||
v66param_size = v5param_size - sizeof(*e) + e->length;
|
||||
p = acpi_os_map_iomem(pa_v5, v66param_size);
|
||||
}
|
||||
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
|
@ -527,6 +539,7 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
|
|||
u64 param3, u64 param4)
|
||||
{
|
||||
struct apei_exec_context ctx;
|
||||
u32 param_size = is_v2 ? v66param_size : v5param_size;
|
||||
u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
|
||||
int i, rc;
|
||||
|
||||
|
|
@ -539,11 +552,11 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
|
|||
if (acpi5) {
|
||||
struct set_error_type_with_address *v5param;
|
||||
|
||||
v5param = kmalloc(v5param_size, GFP_KERNEL);
|
||||
v5param = kmalloc(param_size, GFP_KERNEL);
|
||||
if (!v5param)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy_fromio(v5param, einj_param, v5param_size);
|
||||
memcpy_fromio(v5param, einj_param, param_size);
|
||||
v5param->type = type;
|
||||
if (type & ACPI5_VENDOR_BIT) {
|
||||
switch (vendor_flags) {
|
||||
|
|
@ -601,7 +614,7 @@ static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
|
|||
break;
|
||||
}
|
||||
}
|
||||
memcpy_toio(einj_param, v5param, v5param_size);
|
||||
memcpy_toio(einj_param, v5param, param_size);
|
||||
kfree(v5param);
|
||||
} else {
|
||||
rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
|
||||
|
|
@ -1132,9 +1145,14 @@ static void einj_remove(struct faux_device *fdev)
|
|||
struct apei_exec_context ctx;
|
||||
|
||||
if (einj_param) {
|
||||
acpi_size size = (acpi5) ?
|
||||
v5param_size :
|
||||
sizeof(struct einj_parameter);
|
||||
acpi_size size;
|
||||
|
||||
if (v66param_size)
|
||||
size = v66param_size;
|
||||
else if (acpi5)
|
||||
size = v5param_size;
|
||||
else
|
||||
size = sizeof(struct einj_parameter);
|
||||
|
||||
acpi_os_unmap_iomem(einj_param, size);
|
||||
if (vendor_errors.size)
|
||||
|
|
|
|||
|
|
@ -430,10 +430,10 @@ static int __init gtdt_platform_timer_init(void)
|
|||
continue;
|
||||
|
||||
pdev = platform_device_register_data(NULL, "gtdt-arm-mmio-timer",
|
||||
gwdt_count, &atm,
|
||||
mmio_timer_count, &atm,
|
||||
sizeof(atm));
|
||||
if (IS_ERR(pdev)) {
|
||||
pr_err("Can't register timer %d\n", gwdt_count);
|
||||
pr_err("Can't register timer %d\n", mmio_timer_count);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -166,6 +166,7 @@ static int __acpi_processor_start(struct acpi_device *device)
|
|||
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
|
||||
dev_dbg(&device->dev, "CPPC data invalid or not present\n");
|
||||
|
||||
if (cpuidle_get_driver() == &acpi_idle_driver)
|
||||
acpi_processor_power_init(pr);
|
||||
|
||||
acpi_pss_perf_init(pr);
|
||||
|
|
@ -262,8 +263,6 @@ static int __init acpi_processor_driver_init(void)
|
|||
if (result < 0)
|
||||
return result;
|
||||
|
||||
acpi_processor_register_idle_driver();
|
||||
|
||||
result = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
|
||||
"acpi/cpu-drv:online",
|
||||
acpi_soft_cpu_online, NULL);
|
||||
|
|
@ -302,7 +301,6 @@ static void __exit acpi_processor_driver_exit(void)
|
|||
|
||||
cpuhp_remove_state_nocalls(hp_online);
|
||||
cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
|
||||
acpi_processor_unregister_idle_driver();
|
||||
driver_unregister(&acpi_processor_driver);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ module_param(latency_factor, uint, 0644);
|
|||
|
||||
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
|
||||
|
||||
static struct cpuidle_driver acpi_idle_driver = {
|
||||
struct cpuidle_driver acpi_idle_driver = {
|
||||
.name = "acpi_idle",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
|
@ -1357,102 +1357,79 @@ int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void acpi_processor_register_idle_driver(void)
|
||||
{
|
||||
struct acpi_processor *pr;
|
||||
int ret = -ENODEV;
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* Acpi idle driver is used by all possible CPUs.
|
||||
* Install the idle handler by the processor power info of one in them.
|
||||
* Note that we use previously set idle handler will be used on
|
||||
* platforms that only support C1.
|
||||
*/
|
||||
for_each_cpu(cpu, (struct cpumask *)cpu_possible_mask) {
|
||||
pr = per_cpu(processors, cpu);
|
||||
if (!pr)
|
||||
continue;
|
||||
|
||||
ret = acpi_processor_get_power_info(pr);
|
||||
if (!ret) {
|
||||
pr->flags.power_setup_done = 1;
|
||||
acpi_processor_setup_cpuidle_states(pr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
pr_debug("No ACPI power information from any CPUs.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ret = cpuidle_register_driver(&acpi_idle_driver);
|
||||
if (ret) {
|
||||
pr_debug("register %s failed.\n", acpi_idle_driver.name);
|
||||
return;
|
||||
}
|
||||
pr_debug("%s registered with cpuidle.\n", acpi_idle_driver.name);
|
||||
}
|
||||
|
||||
void acpi_processor_unregister_idle_driver(void)
|
||||
{
|
||||
cpuidle_unregister_driver(&acpi_idle_driver);
|
||||
}
|
||||
|
||||
void acpi_processor_power_init(struct acpi_processor *pr)
|
||||
static int acpi_processor_registered;
|
||||
|
||||
int acpi_processor_power_init(struct acpi_processor *pr)
|
||||
{
|
||||
int retval;
|
||||
struct cpuidle_device *dev;
|
||||
|
||||
/*
|
||||
* The code below only works if the current cpuidle driver is the ACPI
|
||||
* idle driver.
|
||||
*/
|
||||
if (cpuidle_get_driver() != &acpi_idle_driver)
|
||||
return;
|
||||
|
||||
if (disabled_by_idle_boot_param())
|
||||
return;
|
||||
return 0;
|
||||
|
||||
acpi_processor_cstate_first_run_checks();
|
||||
|
||||
if (!acpi_processor_get_power_info(pr))
|
||||
pr->flags.power_setup_done = 1;
|
||||
|
||||
if (!pr->flags.power)
|
||||
return;
|
||||
/*
|
||||
* Install the idle handler if processor power management is supported.
|
||||
* Note that we use previously set idle handler will be used on
|
||||
* platforms that only support C1.
|
||||
*/
|
||||
if (pr->flags.power) {
|
||||
/* Register acpi_idle_driver if not already registered */
|
||||
if (!acpi_processor_registered) {
|
||||
acpi_processor_setup_cpuidle_states(pr);
|
||||
retval = cpuidle_register_driver(&acpi_idle_driver);
|
||||
if (retval)
|
||||
return retval;
|
||||
pr_debug("%s registered with cpuidle\n",
|
||||
acpi_idle_driver.name);
|
||||
}
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return;
|
||||
|
||||
return -ENOMEM;
|
||||
per_cpu(acpi_cpuidle_device, pr->id) = dev;
|
||||
|
||||
acpi_processor_setup_cpuidle_dev(pr, dev);
|
||||
|
||||
/*
|
||||
* Register a cpuidle device for this CPU. The cpuidle driver using
|
||||
* this device is expected to be registered.
|
||||
/* Register per-cpu cpuidle_device. Cpuidle driver
|
||||
* must already be registered before registering device
|
||||
*/
|
||||
if (cpuidle_register_device(dev)) {
|
||||
retval = cpuidle_register_device(dev);
|
||||
if (retval) {
|
||||
if (acpi_processor_registered == 0)
|
||||
cpuidle_unregister_driver(&acpi_idle_driver);
|
||||
|
||||
per_cpu(acpi_cpuidle_device, pr->id) = NULL;
|
||||
kfree(dev);
|
||||
return retval;
|
||||
}
|
||||
acpi_processor_registered++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void acpi_processor_power_exit(struct acpi_processor *pr)
|
||||
int acpi_processor_power_exit(struct acpi_processor *pr)
|
||||
{
|
||||
struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
|
||||
|
||||
if (disabled_by_idle_boot_param())
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (pr->flags.power) {
|
||||
cpuidle_unregister_device(dev);
|
||||
acpi_processor_registered--;
|
||||
if (acpi_processor_registered == 0)
|
||||
cpuidle_unregister_driver(&acpi_idle_driver);
|
||||
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
pr->flags.power_setup_done = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
MODULE_IMPORT_NS("ACPI_PROCESSOR_IDLE");
|
||||
|
|
|
|||
|
|
@ -3006,6 +3006,16 @@ int ata_dev_configure(struct ata_device *dev)
|
|||
}
|
||||
|
||||
dev->n_sectors = ata_id_n_sectors(id);
|
||||
if (ata_id_is_locked(id)) {
|
||||
/*
|
||||
* If Security locked, set capacity to zero to prevent
|
||||
* any I/O, e.g. partition scanning, as any I/O to a
|
||||
* locked drive will result in user visible errors.
|
||||
*/
|
||||
ata_dev_info(dev,
|
||||
"Security locked, setting capacity to zero\n");
|
||||
dev->n_sectors = 0;
|
||||
}
|
||||
|
||||
/* get current R/W Multiple count setting */
|
||||
if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
|
||||
|
|
|
|||
|
|
@ -992,6 +992,13 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
|
|||
return;
|
||||
}
|
||||
|
||||
if (ata_id_is_locked(dev->id)) {
|
||||
/* Security locked */
|
||||
/* LOGICAL UNIT ACCESS NOT AUTHORIZED */
|
||||
ata_scsi_set_sense(dev, cmd, DATA_PROTECT, 0x74, 0x71);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(qc->flags & ATA_QCFLAG_RTF_FILLED)) {
|
||||
ata_dev_dbg(dev,
|
||||
"Missing result TF: reporting aborted command\n");
|
||||
|
|
@ -4894,8 +4901,10 @@ void ata_scsi_dev_rescan(struct work_struct *work)
|
|||
spin_unlock_irqrestore(ap->lock, flags);
|
||||
if (do_resume) {
|
||||
ret = scsi_resume_device(sdev);
|
||||
if (ret == -EWOULDBLOCK)
|
||||
if (ret == -EWOULDBLOCK) {
|
||||
scsi_device_put(sdev);
|
||||
goto unlock_scan;
|
||||
}
|
||||
dev->flags &= ~ATA_DFLAG_RESUMING;
|
||||
}
|
||||
ret = scsi_rescan_device(sdev);
|
||||
|
|
|
|||
|
|
@ -1374,7 +1374,9 @@ fore200e_open(struct atm_vcc *vcc)
|
|||
|
||||
vcc->dev_data = NULL;
|
||||
|
||||
mutex_lock(&fore200e->rate_mtx);
|
||||
fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
|
||||
mutex_unlock(&fore200e->rate_mtx);
|
||||
|
||||
kfree(fore200e_vcc);
|
||||
return -EINVAL;
|
||||
|
|
|
|||
|
|
@ -888,12 +888,15 @@ static void device_resume_early(struct device *dev, pm_message_t state, bool asy
|
|||
TRACE_DEVICE(dev);
|
||||
TRACE_RESUME(0);
|
||||
|
||||
if (dev->power.syscore || dev->power.direct_complete)
|
||||
if (dev->power.direct_complete)
|
||||
goto Out;
|
||||
|
||||
if (!dev->power.is_late_suspended)
|
||||
goto Out;
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Skip;
|
||||
|
||||
if (!dpm_wait_for_superior(dev, async))
|
||||
goto Out;
|
||||
|
||||
|
|
@ -926,11 +929,11 @@ static void device_resume_early(struct device *dev, pm_message_t state, bool asy
|
|||
|
||||
Skip:
|
||||
dev->power.is_late_suspended = false;
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
Out:
|
||||
TRACE_RESUME(error);
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
complete_all(&dev->power.completion);
|
||||
|
||||
if (error) {
|
||||
|
|
@ -1615,12 +1618,6 @@ static void device_suspend_late(struct device *dev, pm_message_t state, bool asy
|
|||
TRACE_DEVICE(dev);
|
||||
TRACE_SUSPEND(0);
|
||||
|
||||
/*
|
||||
* Disable runtime PM for the device without checking if there is a
|
||||
* pending resume request for it.
|
||||
*/
|
||||
__pm_runtime_disable(dev, false);
|
||||
|
||||
dpm_wait_for_subordinate(dev, async);
|
||||
|
||||
if (READ_ONCE(async_error))
|
||||
|
|
@ -1631,9 +1628,18 @@ static void device_suspend_late(struct device *dev, pm_message_t state, bool asy
|
|||
goto Complete;
|
||||
}
|
||||
|
||||
if (dev->power.syscore || dev->power.direct_complete)
|
||||
if (dev->power.direct_complete)
|
||||
goto Complete;
|
||||
|
||||
/*
|
||||
* Disable runtime PM for the device without checking if there is a
|
||||
* pending resume request for it.
|
||||
*/
|
||||
__pm_runtime_disable(dev, false);
|
||||
|
||||
if (dev->power.syscore)
|
||||
goto Skip;
|
||||
|
||||
if (dev->pm_domain) {
|
||||
info = "late power domain ";
|
||||
callback = pm_late_early_op(&dev->pm_domain->ops, state);
|
||||
|
|
@ -1664,6 +1670,7 @@ static void device_suspend_late(struct device *dev, pm_message_t state, bool asy
|
|||
WRITE_ONCE(async_error, error);
|
||||
dpm_save_failed_dev(dev_name(dev));
|
||||
pm_dev_err(dev, state, async ? " async late" : " late", error);
|
||||
pm_runtime_enable(dev);
|
||||
goto Complete;
|
||||
}
|
||||
dpm_propagate_wakeup_to_parent(dev);
|
||||
|
|
|
|||
|
|
@ -2711,9 +2711,21 @@ static int btusb_recv_event_realtek(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
|
||||
static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
|
||||
{
|
||||
struct btmtk_data *btmtk_data = hci_get_priv(data->hdev);
|
||||
struct btmtk_data *btmtk_data;
|
||||
int err;
|
||||
|
||||
if (!data->hdev)
|
||||
return;
|
||||
|
||||
btmtk_data = hci_get_priv(data->hdev);
|
||||
if (!btmtk_data)
|
||||
return;
|
||||
|
||||
if (!btmtk_data->isopkt_intf) {
|
||||
bt_dev_err(data->hdev, "Can't claim NULL iso interface");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* The function usb_driver_claim_interface() is documented to need
|
||||
* locks held if it's not called from a probe routine. The code here
|
||||
|
|
@ -2735,17 +2747,30 @@ static void btusb_mtk_claim_iso_intf(struct btusb_data *data)
|
|||
|
||||
static void btusb_mtk_release_iso_intf(struct hci_dev *hdev)
|
||||
{
|
||||
struct btmtk_data *btmtk_data = hci_get_priv(hdev);
|
||||
struct btmtk_data *btmtk_data;
|
||||
|
||||
if (!hdev)
|
||||
return;
|
||||
|
||||
btmtk_data = hci_get_priv(hdev);
|
||||
if (!btmtk_data)
|
||||
return;
|
||||
|
||||
if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) {
|
||||
usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor);
|
||||
clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags);
|
||||
|
||||
if (btmtk_data->isopkt_skb) {
|
||||
dev_kfree_skb_irq(btmtk_data->isopkt_skb);
|
||||
btmtk_data->isopkt_skb = NULL;
|
||||
}
|
||||
|
||||
if (btmtk_data->isopkt_intf) {
|
||||
usb_set_intfdata(btmtk_data->isopkt_intf, NULL);
|
||||
usb_driver_release_interface(&btusb_driver,
|
||||
btmtk_data->isopkt_intf);
|
||||
btmtk_data->isopkt_intf = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags);
|
||||
|
|
|
|||
|
|
@ -121,11 +121,11 @@ static SUNXI_CCU_GATE_HW(bus_r_ir_rx_clk, "bus-r-ir-rx",
|
|||
&r_apb0_clk.common.hw, 0x1cc, BIT(0), 0);
|
||||
|
||||
static SUNXI_CCU_GATE_HW(bus_r_dma_clk, "bus-r-dma",
|
||||
&r_apb0_clk.common.hw, 0x1dc, BIT(0), 0);
|
||||
&r_apb0_clk.common.hw, 0x1dc, BIT(0), CLK_IS_CRITICAL);
|
||||
static SUNXI_CCU_GATE_HW(bus_r_rtc_clk, "bus-r-rtc",
|
||||
&r_apb0_clk.common.hw, 0x20c, BIT(0), 0);
|
||||
static SUNXI_CCU_GATE_HW(bus_r_cpucfg_clk, "bus-r-cpucfg",
|
||||
&r_apb0_clk.common.hw, 0x22c, BIT(0), 0);
|
||||
&r_apb0_clk.common.hw, 0x22c, BIT(0), CLK_IS_CRITICAL);
|
||||
|
||||
static struct ccu_common *sun55i_a523_r_ccu_clks[] = {
|
||||
&r_ahb_clk.common,
|
||||
|
|
|
|||
|
|
@ -300,7 +300,7 @@ static struct ccu_nm pll_audio0_4x_clk = {
|
|||
.m = _SUNXI_CCU_DIV(16, 6),
|
||||
.sdm = _SUNXI_CCU_SDM(pll_audio0_sdm_table, BIT(24),
|
||||
0x178, BIT(31)),
|
||||
.min_rate = 180000000U,
|
||||
.min_rate = 90000000U,
|
||||
.max_rate = 3000000000U,
|
||||
.common = {
|
||||
.reg = 0x078,
|
||||
|
|
|
|||
|
|
@ -2548,10 +2548,17 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
|
|||
container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb);
|
||||
struct lineinfo_changed_ctx *ctx;
|
||||
struct gpio_desc *desc = data;
|
||||
struct file *fp;
|
||||
|
||||
if (!test_bit(gpio_chip_hwgpio(desc), cdev->watched_lines))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/* Keep the file descriptor alive for the duration of the notification. */
|
||||
fp = get_file_active(&cdev->fp);
|
||||
if (!fp)
|
||||
/* Chardev file descriptor was or is being released. */
|
||||
return NOTIFY_DONE;
|
||||
|
||||
/*
|
||||
* If this is called from atomic context (for instance: with a spinlock
|
||||
* taken by the atomic notifier chain), any sleeping calls must be done
|
||||
|
|
@ -2575,8 +2582,6 @@ static int lineinfo_changed_notify(struct notifier_block *nb,
|
|||
/* Keep the GPIO device alive until we emit the event. */
|
||||
ctx->gdev = gpio_device_get(desc->gdev);
|
||||
ctx->cdev = cdev;
|
||||
/* Keep the file descriptor alive too. */
|
||||
get_file(ctx->cdev->fp);
|
||||
|
||||
INIT_WORK(&ctx->work, lineinfo_changed_func);
|
||||
queue_work(ctx->gdev->line_state_wq, &ctx->work);
|
||||
|
|
|
|||
|
|
@ -3414,10 +3414,11 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
|
|||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
|
||||
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
|
||||
continue;
|
||||
/* skip CG for VCE/UVD, it's handled specially */
|
||||
/* skip CG for VCE/UVD/VPE, it's handled specially */
|
||||
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VPE &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
|
||||
adev->ip_blocks[i].version->funcs->set_powergating_state) {
|
||||
/* enable powergating to save power */
|
||||
|
|
|
|||
|
|
@ -1372,7 +1372,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
|
|||
mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
|
||||
flags |= AMDGPU_PTE_SYSTEM;
|
||||
|
||||
if (ttm->caching == ttm_cached)
|
||||
if (ttm && ttm->caching == ttm_cached)
|
||||
flags |= AMDGPU_PTE_SNOOPED;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2078,7 +2078,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo *bo = before->bo_va->base.bo;
|
||||
|
||||
amdgpu_vm_it_insert(before, &vm->va);
|
||||
if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
|
||||
if (before->flags & AMDGPU_VM_PAGE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
|
||||
|
|
@ -2093,7 +2093,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
|||
struct amdgpu_bo *bo = after->bo_va->base.bo;
|
||||
|
||||
amdgpu_vm_it_insert(after, &vm->va);
|
||||
if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
|
||||
if (after->flags & AMDGPU_VM_PAGE_PRT)
|
||||
amdgpu_vm_prt_get(adev);
|
||||
|
||||
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
|
||||
|
|
|
|||
|
|
@ -5872,9 +5872,9 @@ static void gfx_v11_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
|||
if (flags & AMDGPU_IB_PREEMPTED)
|
||||
control |= INDIRECT_BUFFER_PRE_RESUME(1);
|
||||
|
||||
if (vmid)
|
||||
if (vmid && !ring->adev->gfx.rs64_enable)
|
||||
gfx_v11_0_ring_emit_de_meta(ring,
|
||||
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
|
||||
!amdgpu_sriov_vf(ring->adev) && (flags & AMDGPU_IB_PREEMPTED));
|
||||
}
|
||||
|
||||
amdgpu_ring_write(ring, header);
|
||||
|
|
|
|||
|
|
@ -141,7 +141,7 @@ static int vcn_v4_0_3_late_init(struct amdgpu_ip_block *ip_block)
|
|||
adev->vcn.supported_reset =
|
||||
amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]);
|
||||
|
||||
if (amdgpu_dpm_reset_vcn_is_supported(adev))
|
||||
if (amdgpu_dpm_reset_vcn_is_supported(adev) && !amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -122,7 +122,9 @@ static int vcn_v5_0_1_late_init(struct amdgpu_ip_block *ip_block)
|
|||
|
||||
switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
|
||||
case IP_VERSION(13, 0, 12):
|
||||
if ((adev->psp.sos.fw_version >= 0x00450025) && amdgpu_dpm_reset_vcn_is_supported(adev))
|
||||
if ((adev->psp.sos.fw_version >= 0x00450025) &&
|
||||
amdgpu_dpm_reset_vcn_is_supported(adev) &&
|
||||
!amdgpu_sriov_vf(adev))
|
||||
adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
|
||||
break;
|
||||
default:
|
||||
|
|
|
|||
|
|
@ -3859,6 +3859,97 @@ void amdgpu_dm_update_connector_after_detect(
|
|||
update_subconnector_property(aconnector);
|
||||
}
|
||||
|
||||
static bool are_sinks_equal(const struct dc_sink *sink1, const struct dc_sink *sink2)
|
||||
{
|
||||
if (!sink1 || !sink2)
|
||||
return false;
|
||||
if (sink1->sink_signal != sink2->sink_signal)
|
||||
return false;
|
||||
|
||||
if (sink1->dc_edid.length != sink2->dc_edid.length)
|
||||
return false;
|
||||
|
||||
if (memcmp(sink1->dc_edid.raw_edid, sink2->dc_edid.raw_edid,
|
||||
sink1->dc_edid.length) != 0)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* DOC: hdmi_hpd_debounce_work
|
||||
*
|
||||
* HDMI HPD debounce delay in milliseconds. When an HDMI display toggles HPD
|
||||
* (such as during power save transitions), this delay determines how long to
|
||||
* wait before processing the HPD event. This allows distinguishing between a
|
||||
* physical unplug (>hdmi_hpd_debounce_delay)
|
||||
* and a spontaneous RX HPD toggle (<hdmi_hpd_debounce_delay).
|
||||
*
|
||||
* If the toggle is less than this delay, the driver compares sink capabilities
|
||||
* and permits a hotplug event if they changed.
|
||||
*
|
||||
* The default value of 1500ms was chosen based on experimental testing with
|
||||
* various monitors that exhibit spontaneous HPD toggling behavior.
|
||||
*/
|
||||
static void hdmi_hpd_debounce_work(struct work_struct *work)
|
||||
{
|
||||
struct amdgpu_dm_connector *aconnector =
|
||||
container_of(to_delayed_work(work), struct amdgpu_dm_connector,
|
||||
hdmi_hpd_debounce_work);
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct dc *dc = aconnector->dc_link->ctx->dc;
|
||||
bool fake_reconnect = false;
|
||||
bool reallow_idle = false;
|
||||
bool ret = false;
|
||||
guard(mutex)(&aconnector->hpd_lock);
|
||||
|
||||
/* Re-detect the display */
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
if (dc->caps.ips_support && dc->ctx->dmub_srv->idle_allowed) {
|
||||
dc_allow_idle_optimizations(dc, false);
|
||||
reallow_idle = true;
|
||||
}
|
||||
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
/* Apply workaround delay for certain panels */
|
||||
apply_delay_after_dpcd_poweroff(adev, aconnector->dc_sink);
|
||||
/* Compare sinks to determine if this was a spontaneous HPD toggle */
|
||||
if (are_sinks_equal(aconnector->dc_link->local_sink, aconnector->hdmi_prev_sink)) {
|
||||
/*
|
||||
* Sinks match - this was a spontaneous HDMI HPD toggle.
|
||||
*/
|
||||
drm_dbg_kms(dev, "HDMI HPD: Sink unchanged after debounce, internal re-enable\n");
|
||||
fake_reconnect = true;
|
||||
}
|
||||
|
||||
/* Update connector state */
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
dm_restore_drm_connector_state(dev, connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
/* Only notify OS if sink actually changed */
|
||||
if (!fake_reconnect && aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
}
|
||||
|
||||
/* Release the cached sink reference */
|
||||
if (aconnector->hdmi_prev_sink) {
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
}
|
||||
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
if (reallow_idle && dc->caps.ips_support)
|
||||
dc_allow_idle_optimizations(dc, true);
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
|
|
@ -3868,6 +3959,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
|||
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
|
||||
struct dc *dc = aconnector->dc_link->ctx->dc;
|
||||
bool ret = false;
|
||||
bool debounce_required = false;
|
||||
|
||||
if (adev->dm.disable_hpd_irq)
|
||||
return;
|
||||
|
|
@ -3890,6 +3982,14 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
|||
if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
|
||||
drm_err(adev_to_drm(adev), "KMS: Failed to detect connector\n");
|
||||
|
||||
/*
|
||||
* Check for HDMI disconnect with debounce enabled.
|
||||
*/
|
||||
debounce_required = (aconnector->hdmi_hpd_debounce_delay_ms > 0 &&
|
||||
dc_is_hdmi_signal(aconnector->dc_link->connector_signal) &&
|
||||
new_connection_type == dc_connection_none &&
|
||||
aconnector->dc_link->local_sink != NULL);
|
||||
|
||||
if (aconnector->base.force && new_connection_type == dc_connection_none) {
|
||||
emulated_link_detect(aconnector->dc_link);
|
||||
|
||||
|
|
@ -3899,7 +3999,34 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
|
|||
|
||||
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_connector_hotplug_event(connector);
|
||||
} else if (debounce_required) {
|
||||
/*
|
||||
* HDMI disconnect detected - schedule delayed work instead of
|
||||
* processing immediately. This allows us to coalesce spurious
|
||||
* HDMI signals from physical unplugs.
|
||||
*/
|
||||
drm_dbg_kms(dev, "HDMI HPD: Disconnect detected, scheduling debounce work (%u ms)\n",
|
||||
aconnector->hdmi_hpd_debounce_delay_ms);
|
||||
|
||||
/* Cache the current sink for later comparison */
|
||||
if (aconnector->hdmi_prev_sink)
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = aconnector->dc_link->local_sink;
|
||||
if (aconnector->hdmi_prev_sink)
|
||||
dc_sink_retain(aconnector->hdmi_prev_sink);
|
||||
|
||||
/* Schedule delayed detection. */
|
||||
if (mod_delayed_work(system_wq,
|
||||
&aconnector->hdmi_hpd_debounce_work,
|
||||
msecs_to_jiffies(aconnector->hdmi_hpd_debounce_delay_ms)))
|
||||
drm_dbg_kms(dev, "HDMI HPD: Re-scheduled debounce work\n");
|
||||
|
||||
} else {
|
||||
|
||||
/* If the aconnector->hdmi_hpd_debounce_work is scheduled, exit early */
|
||||
if (delayed_work_pending(&aconnector->hdmi_hpd_debounce_work))
|
||||
return;
|
||||
|
||||
scoped_guard(mutex, &adev->dm.dc_lock) {
|
||||
dc_exit_ips_for_hw_access(dc);
|
||||
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
|
|
@ -7388,6 +7515,13 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
|
|||
if (aconnector->mst_mgr.dev)
|
||||
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
|
||||
|
||||
/* Cancel and flush any pending HDMI HPD debounce work */
|
||||
cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work);
|
||||
if (aconnector->hdmi_prev_sink) {
|
||||
dc_sink_release(aconnector->hdmi_prev_sink);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
}
|
||||
|
||||
if (aconnector->bl_idx != -1) {
|
||||
backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
|
||||
dm->backlight_dev[aconnector->bl_idx] = NULL;
|
||||
|
|
@ -8549,6 +8683,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
|
|||
mutex_init(&aconnector->hpd_lock);
|
||||
mutex_init(&aconnector->handle_mst_msg_ready);
|
||||
|
||||
aconnector->hdmi_hpd_debounce_delay_ms = AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS;
|
||||
INIT_DELAYED_WORK(&aconnector->hdmi_hpd_debounce_work, hdmi_hpd_debounce_work);
|
||||
aconnector->hdmi_prev_sink = NULL;
|
||||
|
||||
/*
|
||||
* configure support HPD hot plug connector_>polled default value is 0
|
||||
* which means HPD hot plug not supported
|
||||
|
|
|
|||
|
|
@ -59,6 +59,7 @@
|
|||
|
||||
#define AMDGPU_HDR_MULT_DEFAULT (0x100000000LL)
|
||||
|
||||
#define AMDGPU_DM_HDMI_HPD_DEBOUNCE_MS 1500
|
||||
/*
|
||||
#include "include/amdgpu_dal_power_if.h"
|
||||
#include "amdgpu_dm_irq.h"
|
||||
|
|
@ -819,6 +820,11 @@ struct amdgpu_dm_connector {
|
|||
bool pack_sdp_v1_3;
|
||||
enum adaptive_sync_type as_type;
|
||||
struct amdgpu_hdmi_vsdb_info vsdb_info;
|
||||
|
||||
/* HDMI HPD debounce support */
|
||||
unsigned int hdmi_hpd_debounce_delay_ms;
|
||||
struct delayed_work hdmi_hpd_debounce_work;
|
||||
struct dc_sink *hdmi_prev_sink;
|
||||
};
|
||||
|
||||
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
|
||||
|
|
|
|||
|
|
@ -884,26 +884,28 @@ struct dsc_mst_fairness_params {
|
|||
};
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_FP)
|
||||
static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link)
|
||||
static uint64_t kbps_to_pbn(int kbps, bool is_peak_pbn)
|
||||
{
|
||||
u8 link_coding_cap;
|
||||
uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B;
|
||||
uint64_t effective_kbps = (uint64_t)kbps;
|
||||
|
||||
link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link);
|
||||
if (link_coding_cap == DP_128b_132b_ENCODING)
|
||||
fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B;
|
||||
|
||||
return fec_overhead_multiplier_x1000;
|
||||
if (is_peak_pbn) { // add 0.6% (1006/1000) overhead into effective kbps
|
||||
effective_kbps *= 1006;
|
||||
effective_kbps = div_u64(effective_kbps, 1000);
|
||||
}
|
||||
|
||||
static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000)
|
||||
{
|
||||
u64 peak_kbps = kbps;
|
||||
return (uint64_t) DIV64_U64_ROUND_UP(effective_kbps * 64, (54 * 8 * 1000));
|
||||
}
|
||||
|
||||
peak_kbps *= 1006;
|
||||
peak_kbps *= fec_overhead_multiplier_x1000;
|
||||
peak_kbps = div_u64(peak_kbps, 1000 * 1000);
|
||||
return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
|
||||
static uint32_t pbn_to_kbps(unsigned int pbn, bool with_margin)
|
||||
{
|
||||
uint64_t pbn_effective = (uint64_t)pbn;
|
||||
|
||||
if (with_margin) // deduct 0.6% (994/1000) overhead from effective pbn
|
||||
pbn_effective *= (1000000 / PEAK_FACTOR_X1000);
|
||||
else
|
||||
pbn_effective *= 1000;
|
||||
|
||||
return DIV_U64_ROUND_UP(pbn_effective * 8 * 54, 64);
|
||||
}
|
||||
|
||||
static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
|
||||
|
|
@ -974,7 +976,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
|
|||
dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options);
|
||||
dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16;
|
||||
|
||||
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
|
||||
kbps = pbn_to_kbps(pbn, false);
|
||||
dc_dsc_compute_config(
|
||||
param.sink->ctx->dc->res_pool->dscs[0],
|
||||
¶m.sink->dsc_caps.dsc_dec_caps,
|
||||
|
|
@ -1003,12 +1005,11 @@ static int increase_dsc_bpp(struct drm_atomic_state *state,
|
|||
int link_timeslots_used;
|
||||
int fair_pbn_alloc;
|
||||
int ret = 0;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
if (vars[i + k].dsc_enabled) {
|
||||
initial_slack[i] =
|
||||
kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn;
|
||||
kbps_to_pbn(params[i].bw_range.max_kbps, false) - vars[i + k].pbn;
|
||||
bpp_increased[i] = false;
|
||||
remaining_to_increase += 1;
|
||||
} else {
|
||||
|
|
@ -1104,7 +1105,6 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
|||
int next_index;
|
||||
int remaining_to_try = 0;
|
||||
int ret;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
int var_pbn;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
|
@ -1137,7 +1137,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
|
|||
|
||||
DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
|
||||
var_pbn = vars[next_index].pbn;
|
||||
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[next_index].pbn = kbps_to_pbn(params[next_index].bw_range.stream_kbps, true);
|
||||
ret = drm_dp_atomic_find_time_slots(state,
|
||||
params[next_index].port->mgr,
|
||||
params[next_index].port,
|
||||
|
|
@ -1197,7 +1197,6 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
int count = 0;
|
||||
int i, k, ret;
|
||||
bool debugfs_overwrite = false;
|
||||
uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link);
|
||||
struct drm_connector_state *new_conn_state;
|
||||
|
||||
memset(params, 0, sizeof(params));
|
||||
|
|
@ -1278,7 +1277,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
|
||||
for (i = 0; i < count; i++) {
|
||||
vars[i + k].aconnector = params[i].aconnector;
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port,
|
||||
|
|
@ -1300,7 +1299,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
|
||||
for (i = 0; i < count; i++) {
|
||||
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.min_kbps, false);
|
||||
vars[i + k].dsc_enabled = true;
|
||||
vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
|
|
@ -1308,7 +1307,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
} else {
|
||||
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
|
||||
vars[i + k].pbn = kbps_to_pbn(params[i].bw_range.stream_kbps, false);
|
||||
vars[i + k].dsc_enabled = false;
|
||||
vars[i + k].bpp_x16 = 0;
|
||||
ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr,
|
||||
|
|
@ -1763,18 +1762,6 @@ int pre_validate_dsc(struct drm_atomic_state *state,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static uint32_t kbps_from_pbn(unsigned int pbn)
|
||||
{
|
||||
uint64_t kbps = (uint64_t)pbn;
|
||||
|
||||
kbps *= (1000000 / PEAK_FACTOR_X1000);
|
||||
kbps *= 8;
|
||||
kbps *= 54;
|
||||
kbps /= 64;
|
||||
|
||||
return (uint32_t)kbps;
|
||||
}
|
||||
|
||||
static bool is_dsc_common_config_possible(struct dc_stream_state *stream,
|
||||
struct dc_dsc_bw_range *bw_range)
|
||||
{
|
||||
|
|
@ -1873,7 +1860,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
|||
dc_link_get_highest_encoding_format(stream->link));
|
||||
cur_link_settings = stream->link->verified_link_cap;
|
||||
root_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, &cur_link_settings);
|
||||
virtual_channel_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
|
||||
virtual_channel_bw_in_kbps = pbn_to_kbps(aconnector->mst_output_port->full_pbn, true);
|
||||
|
||||
/* pick the end to end bw bottleneck */
|
||||
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
|
||||
|
|
@ -1926,7 +1913,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
|
|||
immediate_upstream_port = aconnector->mst_output_port->parent->port_parent;
|
||||
|
||||
if (immediate_upstream_port) {
|
||||
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
|
||||
virtual_channel_bw_in_kbps = pbn_to_kbps(immediate_upstream_port->full_pbn, true);
|
||||
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
|
||||
} else {
|
||||
/* For topology LCT 1 case - only one mstb*/
|
||||
|
|
|
|||
|
|
@ -394,6 +394,8 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
|
||||
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
|
||||
new_clocks->ref_dtbclk_khz = 600000;
|
||||
else if (!new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000)
|
||||
new_clocks->ref_dtbclk_khz = 0;
|
||||
|
||||
/*
|
||||
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
|
||||
|
|
@ -435,7 +437,7 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
|||
|
||||
actual_dtbclk = REG_READ(CLK1_CLK4_CURRENT_CNT);
|
||||
|
||||
if (actual_dtbclk) {
|
||||
if (actual_dtbclk > 590000) {
|
||||
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
|
||||
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1411,7 +1411,7 @@ static void dccg35_set_dtbclk_dto(
|
|||
__func__, params->otg_inst, params->pixclk_khz,
|
||||
params->ref_dtbclk_khz, req_dtbclk_khz, phase, modulo);
|
||||
|
||||
} else {
|
||||
} else if (!params->ref_dtbclk_khz && !req_dtbclk_khz) {
|
||||
switch (params->otg_inst) {
|
||||
case 0:
|
||||
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
|
||||
|
|
|
|||
|
|
@ -614,6 +614,14 @@ void dcn20_dpp_pg_control(
|
|||
* DOMAIN11_PGFSM_PWR_STATUS, pwr_status,
|
||||
* 1, 1000);
|
||||
*/
|
||||
|
||||
/* Force disable cursor on plane powerdown on DPP 5 using dpp_force_disable_cursor */
|
||||
if (!power_on) {
|
||||
struct dpp *dpp5 = hws->ctx->dc->res_pool->dpps[dpp_inst];
|
||||
if (dpp5 && dpp5->funcs->dpp_force_disable_cursor)
|
||||
dpp5->funcs->dpp_force_disable_cursor(dpp5);
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
BREAK_TO_DEBUGGER();
|
||||
|
|
|
|||
|
|
@ -1691,7 +1691,7 @@ static bool retrieve_link_cap(struct dc_link *link)
|
|||
union edp_configuration_cap edp_config_cap;
|
||||
union dp_downstream_port_present ds_port = { 0 };
|
||||
enum dc_status status = DC_ERROR_UNEXPECTED;
|
||||
uint32_t read_dpcd_retry_cnt = 3;
|
||||
uint32_t read_dpcd_retry_cnt = 20;
|
||||
int i;
|
||||
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
|
||||
const uint32_t post_oui_delay = 30; // 30ms
|
||||
|
|
@ -1734,12 +1734,13 @@ static bool retrieve_link_cap(struct dc_link *link)
|
|||
}
|
||||
|
||||
dpcd_set_source_specific_data(link);
|
||||
/* Sink may need to configure internals based on vendor, so allow some
|
||||
|
||||
for (i = 0; i < read_dpcd_retry_cnt; i++) {
|
||||
/*
|
||||
* Sink may need to configure internals based on vendor, so allow some
|
||||
* time before proceeding with possibly vendor specific transactions
|
||||
*/
|
||||
msleep(post_oui_delay);
|
||||
|
||||
for (i = 0; i < read_dpcd_retry_cnt; i++) {
|
||||
status = core_link_read_dpcd(
|
||||
link,
|
||||
DP_DPCD_REV,
|
||||
|
|
|
|||
|
|
@ -210,7 +210,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
|
|||
formats_size = sizeof(__u32) * plane->format_count;
|
||||
if (WARN_ON(!formats_size)) {
|
||||
/* 0 formats are never expected */
|
||||
return 0;
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
modifiers_size =
|
||||
|
|
@ -226,7 +226,7 @@ static struct drm_property_blob *create_in_format_blob(struct drm_device *dev,
|
|||
|
||||
blob = drm_property_create_blob(dev, blob_size, NULL);
|
||||
if (IS_ERR(blob))
|
||||
return NULL;
|
||||
return blob;
|
||||
|
||||
blob_data = blob->data;
|
||||
blob_data->version = FORMAT_BLOB_CURRENT;
|
||||
|
|
|
|||
|
|
@ -39,14 +39,12 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder)
|
|||
struct intel_display *display = to_intel_display(encoder);
|
||||
enum phy phy = intel_encoder_to_phy(encoder);
|
||||
|
||||
/* PTL doesn't have a PHY connected to PORT B; as such,
|
||||
* there will never be a case where PTL uses PHY B.
|
||||
* WCL uses PORT A and B with the C10 PHY.
|
||||
* Reusing the condition for WCL and extending it for PORT B
|
||||
* should not cause any issues for PTL.
|
||||
*/
|
||||
if (display->platform.pantherlake && phy < PHY_C)
|
||||
return true;
|
||||
if (display->platform.pantherlake) {
|
||||
if (display->platform.pantherlake_wildcatlake)
|
||||
return phy <= PHY_B;
|
||||
else
|
||||
return phy == PHY_A;
|
||||
}
|
||||
|
||||
if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C)
|
||||
return true;
|
||||
|
|
|
|||
|
|
@ -1404,8 +1404,20 @@ static const struct platform_desc bmg_desc = {
|
|||
PLATFORM_GROUP(dgfx),
|
||||
};
|
||||
|
||||
static const u16 wcl_ids[] = {
|
||||
INTEL_WCL_IDS(ID),
|
||||
0
|
||||
};
|
||||
|
||||
static const struct platform_desc ptl_desc = {
|
||||
PLATFORM(pantherlake),
|
||||
.subplatforms = (const struct subplatform_desc[]) {
|
||||
{
|
||||
SUBPLATFORM(pantherlake, wildcatlake),
|
||||
.pciidlist = wcl_ids,
|
||||
},
|
||||
{},
|
||||
}
|
||||
};
|
||||
|
||||
__diag_pop();
|
||||
|
|
@ -1482,6 +1494,7 @@ static const struct {
|
|||
INTEL_LNL_IDS(INTEL_DISPLAY_DEVICE, &lnl_desc),
|
||||
INTEL_BMG_IDS(INTEL_DISPLAY_DEVICE, &bmg_desc),
|
||||
INTEL_PTL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
|
||||
INTEL_WCL_IDS(INTEL_DISPLAY_DEVICE, &ptl_desc),
|
||||
};
|
||||
|
||||
static const struct {
|
||||
|
|
|
|||
|
|
@ -101,7 +101,9 @@ struct pci_dev;
|
|||
/* Display ver 14.1 (based on GMD ID) */ \
|
||||
func(battlemage) \
|
||||
/* Display ver 30 (based on GMD ID) */ \
|
||||
func(pantherlake)
|
||||
func(pantherlake) \
|
||||
func(pantherlake_wildcatlake)
|
||||
|
||||
|
||||
#define __MEMBER(name) unsigned long name:1;
|
||||
#define __COUNT(x) 1 +
|
||||
|
|
|
|||
|
|
@ -127,6 +127,9 @@ static bool dmc_firmware_param_disabled(struct intel_display *display)
|
|||
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
|
||||
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
|
||||
|
||||
#define XE3LPD_3002_DMC_PATH DMC_PATH(xe3lpd_3002)
|
||||
MODULE_FIRMWARE(XE3LPD_3002_DMC_PATH);
|
||||
|
||||
#define XE3LPD_DMC_PATH DMC_PATH(xe3lpd)
|
||||
MODULE_FIRMWARE(XE3LPD_DMC_PATH);
|
||||
|
||||
|
|
@ -183,9 +186,10 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size
|
|||
{
|
||||
const char *fw_path = NULL;
|
||||
u32 max_fw_size = 0;
|
||||
|
||||
if (DISPLAY_VERx100(display) == 3002 ||
|
||||
DISPLAY_VERx100(display) == 3000) {
|
||||
if (DISPLAY_VERx100(display) == 3002) {
|
||||
fw_path = XE3LPD_3002_DMC_PATH;
|
||||
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
|
||||
} else if (DISPLAY_VERx100(display) == 3000) {
|
||||
fw_path = XE3LPD_DMC_PATH;
|
||||
max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
|
||||
} else if (DISPLAY_VERx100(display) == 2000) {
|
||||
|
|
|
|||
|
|
@ -159,6 +159,8 @@ nvkm_falcon_fw_dtor(struct nvkm_falcon_fw *fw)
|
|||
nvkm_memory_unref(&fw->inst);
|
||||
nvkm_falcon_fw_dtor_sigs(fw);
|
||||
nvkm_firmware_dtor(&fw->fw);
|
||||
kfree(fw->boot);
|
||||
fw->boot = NULL;
|
||||
}
|
||||
|
||||
static const struct nvkm_firmware_func
|
||||
|
|
|
|||
|
|
@ -360,13 +360,6 @@ static bool radeon_fence_is_signaled(struct dma_fence *f)
|
|||
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
|
||||
return true;
|
||||
|
||||
if (down_read_trylock(&rdev->exclusive_lock)) {
|
||||
radeon_fence_process(rdev, ring);
|
||||
up_read(&rdev->exclusive_lock);
|
||||
|
||||
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3148,6 +3148,7 @@ static int tegra_dc_couple(struct tegra_dc *dc)
|
|||
dc->client.parent = &parent->client;
|
||||
|
||||
dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
|
||||
put_device(companion);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -913,15 +913,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
|
|||
u32 value;
|
||||
int err;
|
||||
|
||||
/* If the bootloader enabled DSI it needs to be disabled
|
||||
* in order for the panel initialization commands to be
|
||||
* properly sent.
|
||||
*/
|
||||
value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
|
||||
|
||||
if (value & DSI_POWER_CONTROL_ENABLE)
|
||||
tegra_dsi_disable(dsi);
|
||||
|
||||
err = tegra_dsi_prepare(dsi);
|
||||
if (err < 0) {
|
||||
dev_err(dsi->dev, "failed to prepare: %d\n", err);
|
||||
|
|
|
|||
|
|
@ -114,9 +114,12 @@ int tegra_drm_ioctl_channel_open(struct drm_device *drm, void *data, struct drm_
|
|||
if (err)
|
||||
goto put_channel;
|
||||
|
||||
if (supported)
|
||||
if (supported) {
|
||||
struct pid *pid = get_task_pid(current, PIDTYPE_TGID);
|
||||
context->memory_context = host1x_memory_context_alloc(
|
||||
host, client->base.dev, get_task_pid(current, PIDTYPE_TGID));
|
||||
host, client->base.dev, pid);
|
||||
put_pid(pid);
|
||||
}
|
||||
|
||||
if (IS_ERR(context->memory_context)) {
|
||||
if (PTR_ERR(context->memory_context) != -EOPNOTSUPP) {
|
||||
|
|
|
|||
|
|
@ -13,7 +13,6 @@ config DRM_XE
|
|||
select TMPFS
|
||||
select DRM_BUDDY
|
||||
select DRM_CLIENT_SELECTION
|
||||
select DRM_EXEC
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_KUNIT_TEST_HELPERS if DRM_XE_KUNIT_TEST != n
|
||||
select DRM_PANEL
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ static void read_l3cc_table(struct xe_gt *gt,
|
|||
fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
||||
if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) {
|
||||
xe_force_wake_put(gt_to_fw(gt), fw_ref);
|
||||
KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n");
|
||||
KUNIT_FAIL_AND_ABORT(test, "Forcewake Failed.\n");
|
||||
}
|
||||
|
||||
for (i = 0; i < info->num_mocs_regs; i++) {
|
||||
|
|
|
|||
|
|
@ -847,22 +847,6 @@ static int xe_irq_msix_init(struct xe_device *xe)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static irqreturn_t guc2host_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct xe_device *xe = arg;
|
||||
struct xe_tile *tile;
|
||||
u8 id;
|
||||
|
||||
if (!atomic_read(&xe->irq.enabled))
|
||||
return IRQ_NONE;
|
||||
|
||||
for_each_tile(tile, xe, id)
|
||||
xe_guc_irq_handler(&tile->primary_gt->uc.guc,
|
||||
GUC_INTR_GUC2HOST);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t xe_irq_msix_default_hwe_handler(int irq, void *arg)
|
||||
{
|
||||
unsigned int tile_id, gt_id;
|
||||
|
|
@ -979,7 +963,7 @@ int xe_irq_msix_request_irqs(struct xe_device *xe)
|
|||
u16 msix;
|
||||
|
||||
msix = GUC2HOST_MSIX;
|
||||
err = xe_irq_msix_request_irq(xe, guc2host_irq_handler, xe,
|
||||
err = xe_irq_msix_request_irq(xe, xe_irq_handler(xe), xe,
|
||||
DRIVER_NAME "-guc2host", false, &msix);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
|||
|
|
@ -375,6 +375,7 @@ static const struct pci_device_id pciidlist[] = {
|
|||
INTEL_LNL_IDS(INTEL_VGA_DEVICE, &lnl_desc),
|
||||
INTEL_BMG_IDS(INTEL_VGA_DEVICE, &bmg_desc),
|
||||
INTEL_PTL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
|
||||
INTEL_WCL_IDS(INTEL_VGA_DEVICE, &ptl_desc),
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
|
|
|
|||
|
|
@ -3370,7 +3370,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
|
|||
XE_IOCTL_DBG(xe, prefetch_region &&
|
||||
op != DRM_XE_VM_BIND_OP_PREFETCH) ||
|
||||
XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC &&
|
||||
!(BIT(prefetch_region) & xe->info.mem_region_mask))) ||
|
||||
/* Guard against undefined shift in BIT(prefetch_region) */
|
||||
(prefetch_region >= (sizeof(xe->info.mem_region_mask) * 8) ||
|
||||
!(BIT(prefetch_region) & xe->info.mem_region_mask)))) ||
|
||||
XE_IOCTL_DBG(xe, obj &&
|
||||
op == DRM_XE_VM_BIND_OP_UNMAP) ||
|
||||
XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) &&
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ int hid_haptic_input_configured(struct hid_device *hdev,
|
|||
if (hi->application == HID_DG_TOUCHPAD) {
|
||||
if (haptic->auto_trigger_report &&
|
||||
haptic->manual_trigger_report) {
|
||||
__set_bit(INPUT_PROP_HAPTIC_TOUCHPAD, hi->input->propbit);
|
||||
__set_bit(INPUT_PROP_PRESSUREPAD, hi->input->propbit);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -261,6 +261,12 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
|
|||
case EC_MKBP_EVENT_KEY_MATRIX:
|
||||
pm_wakeup_event(ckdev->dev, 0);
|
||||
|
||||
if (!ckdev->idev) {
|
||||
dev_warn_once(ckdev->dev,
|
||||
"Unexpected key matrix event\n");
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
if (ckdev->ec->event_size != ckdev->cols) {
|
||||
dev_err(ckdev->dev,
|
||||
"Discarded incomplete key matrix event.\n");
|
||||
|
|
|
|||
|
|
@ -158,7 +158,7 @@ static int imx_sc_key_probe(struct platform_device *pdev)
|
|||
return error;
|
||||
}
|
||||
|
||||
error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, &priv);
|
||||
error = devm_add_action_or_reset(&pdev->dev, imx_sc_key_action, priv);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
|
|||
|
|
@ -63,6 +63,9 @@
|
|||
#define BUTTON_PRESSED 0xb5
|
||||
#define COMMAND_VERSION 0xa9
|
||||
|
||||
/* 1 Status + 1 Color + 2 X + 2 Y = 6 bytes */
|
||||
#define NOTETAKER_PACKET_SIZE 6
|
||||
|
||||
/* in xy data packet */
|
||||
#define BATTERY_NO_REPORT 0x40
|
||||
#define BATTERY_LOW 0x41
|
||||
|
|
@ -311,6 +314,12 @@ static int pegasus_probe(struct usb_interface *intf,
|
|||
}
|
||||
|
||||
pegasus->data_len = usb_maxpacket(dev, pipe);
|
||||
if (pegasus->data_len < NOTETAKER_PACKET_SIZE) {
|
||||
dev_err(&intf->dev, "packet size is too small (%d)\n",
|
||||
pegasus->data_len);
|
||||
error = -EINVAL;
|
||||
goto err_free_mem;
|
||||
}
|
||||
|
||||
pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL,
|
||||
&pegasus->data_dma);
|
||||
|
|
|
|||
|
|
@ -796,17 +796,6 @@ int goodix_reset_no_int_sync(struct goodix_ts_data *ts)
|
|||
|
||||
usleep_range(6000, 10000); /* T4: > 5ms */
|
||||
|
||||
/*
|
||||
* Put the reset pin back in to input / high-impedance mode to save
|
||||
* power. Only do this in the non ACPI case since some ACPI boards
|
||||
* don't have a pull-up, so there the reset pin must stay active-high.
|
||||
*/
|
||||
if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_GPIO) {
|
||||
error = gpiod_direction_input(ts->gpiod_rst);
|
||||
if (error)
|
||||
goto error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
|
|
@ -957,14 +946,6 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Normally we put the reset pin in input / high-impedance mode to save
|
||||
* power. But some x86/ACPI boards don't have a pull-up, so for the ACPI
|
||||
* case, leave the pin as is. This results in the pin not being touched
|
||||
* at all on x86/ACPI boards, except when needed for error-recover.
|
||||
*/
|
||||
ts->gpiod_rst_flags = GPIOD_ASIS;
|
||||
|
||||
return devm_acpi_dev_add_driver_gpios(dev, gpio_mapping);
|
||||
}
|
||||
#else
|
||||
|
|
@ -989,12 +970,6 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
|
|||
return -EINVAL;
|
||||
dev = &ts->client->dev;
|
||||
|
||||
/*
|
||||
* By default we request the reset pin as input, leaving it in
|
||||
* high-impedance when not resetting the controller to save power.
|
||||
*/
|
||||
ts->gpiod_rst_flags = GPIOD_IN;
|
||||
|
||||
ts->avdd28 = devm_regulator_get(dev, "AVDD28");
|
||||
if (IS_ERR(ts->avdd28))
|
||||
return dev_err_probe(dev, PTR_ERR(ts->avdd28), "Failed to get AVDD28 regulator\n");
|
||||
|
|
@ -1019,7 +994,7 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
|
|||
ts->gpiod_int = gpiod;
|
||||
|
||||
/* Get the reset line GPIO pin number */
|
||||
gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, ts->gpiod_rst_flags);
|
||||
gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_RST_NAME, GPIOD_ASIS);
|
||||
if (IS_ERR(gpiod))
|
||||
return dev_err_probe(dev, PTR_ERR(gpiod), "Failed to get %s GPIO\n",
|
||||
GOODIX_GPIO_RST_NAME);
|
||||
|
|
@ -1557,6 +1532,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
|
|||
static const struct acpi_device_id goodix_acpi_match[] = {
|
||||
{ "GDIX1001", 0 },
|
||||
{ "GDIX1002", 0 },
|
||||
{ "GDIX1003", 0 },
|
||||
{ "GDX9110", 0 },
|
||||
{ }
|
||||
};
|
||||
|
|
|
|||
|
|
@ -88,7 +88,6 @@ struct goodix_ts_data {
|
|||
struct gpio_desc *gpiod_rst;
|
||||
int gpio_count;
|
||||
int gpio_int_idx;
|
||||
enum gpiod_flags gpiod_rst_flags;
|
||||
char id[GOODIX_ID_MAX_LEN + 1];
|
||||
char cfg_name[64];
|
||||
u16 version;
|
||||
|
|
|
|||
|
|
@ -161,8 +161,8 @@ int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
|
|||
vevent = &veventq->lost_events_header;
|
||||
goto out_set_header;
|
||||
}
|
||||
memcpy(vevent->event_data, event_data, data_len);
|
||||
vevent->data_len = data_len;
|
||||
memcpy(vevent->event_data, event_data, data_len);
|
||||
veventq->num_events++;
|
||||
|
||||
out_set_header:
|
||||
|
|
|
|||
|
|
@ -614,7 +614,6 @@ struct iommufd_veventq {
|
|||
struct iommufd_eventq common;
|
||||
struct iommufd_viommu *viommu;
|
||||
struct list_head node; /* for iommufd_viommu::veventqs */
|
||||
struct iommufd_vevent lost_events_header;
|
||||
|
||||
enum iommu_veventq_type type;
|
||||
unsigned int depth;
|
||||
|
|
@ -622,6 +621,9 @@ struct iommufd_veventq {
|
|||
/* Use common.lock for protection */
|
||||
u32 num_events;
|
||||
u32 sequence;
|
||||
|
||||
/* Must be last as it ends in a flexible-array member. */
|
||||
struct iommufd_vevent lost_events_header;
|
||||
};
|
||||
|
||||
static inline struct iommufd_veventq *
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
dm-pcache-y := dm_pcache.o cache_dev.o segment.o backing_dev.o cache.o cache_gc.o cache_writeback.o cache_segment.o cache_key.o cache_req.o
|
||||
|
||||
obj-m += dm-pcache.o
|
||||
obj-$(CONFIG_DM_PCACHE) += dm-pcache.o
|
||||
|
|
|
|||
|
|
@ -181,7 +181,7 @@ static void cache_info_init_default(struct pcache_cache *cache)
|
|||
{
|
||||
struct pcache_cache_info *cache_info = &cache->cache_info;
|
||||
|
||||
cache_info->header.seq = 0;
|
||||
memset(cache_info, 0, sizeof(*cache_info));
|
||||
cache_info->n_segs = cache->cache_dev->seg_num;
|
||||
cache_info_set_gc_percent(cache_info, PCACHE_CACHE_GC_PERCENT_DEFAULT);
|
||||
}
|
||||
|
|
@ -411,7 +411,7 @@ void pcache_cache_stop(struct dm_pcache *pcache)
|
|||
{
|
||||
struct pcache_cache *cache = &pcache->cache;
|
||||
|
||||
cache_flush(cache);
|
||||
pcache_cache_flush(cache);
|
||||
|
||||
cancel_delayed_work_sync(&cache->gc_work);
|
||||
flush_work(&cache->clean_work);
|
||||
|
|
|
|||
|
|
@ -339,7 +339,7 @@ void cache_seg_put(struct pcache_cache_segment *cache_seg);
|
|||
void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id);
|
||||
|
||||
/* cache request*/
|
||||
int cache_flush(struct pcache_cache *cache);
|
||||
int pcache_cache_flush(struct pcache_cache *cache);
|
||||
void miss_read_end_work_fn(struct work_struct *work);
|
||||
int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *pcache_req);
|
||||
|
||||
|
|
|
|||
|
|
@ -790,7 +790,7 @@ static int cache_write(struct pcache_cache *cache, struct pcache_request *pcache
|
|||
}
|
||||
|
||||
/**
|
||||
* cache_flush - Flush all ksets to persist any pending cache data
|
||||
* pcache_cache_flush - Flush all ksets to persist any pending cache data
|
||||
* @cache: Pointer to the cache structure
|
||||
*
|
||||
* This function iterates through all ksets associated with the provided `cache`
|
||||
|
|
@ -802,7 +802,7 @@ static int cache_write(struct pcache_cache *cache, struct pcache_request *pcache
|
|||
* the respective error code, preventing the flush operation from proceeding to
|
||||
* subsequent ksets.
|
||||
*/
|
||||
int cache_flush(struct pcache_cache *cache)
|
||||
int pcache_cache_flush(struct pcache_cache *cache)
|
||||
{
|
||||
struct pcache_cache_kset *kset;
|
||||
int ret;
|
||||
|
|
@ -827,7 +827,7 @@ int pcache_cache_handle_req(struct pcache_cache *cache, struct pcache_request *p
|
|||
struct bio *bio = pcache_req->bio;
|
||||
|
||||
if (unlikely(bio->bi_opf & REQ_PREFLUSH))
|
||||
return cache_flush(cache);
|
||||
return pcache_cache_flush(cache);
|
||||
|
||||
if (bio_data_dir(bio) == READ)
|
||||
return cache_read(cache, pcache_req);
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ static inline void __must_check *pcache_meta_find_latest(struct pcache_meta_head
|
|||
/* Update latest if a more recent sequence is found */
|
||||
if (!latest || pcache_meta_seq_after(meta->seq, seq_latest)) {
|
||||
seq_latest = meta->seq;
|
||||
latest = (void *)header + (i * meta_max_size);
|
||||
latest = meta_addr;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -320,11 +320,7 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
|
|||
if (fio->bufs[n])
|
||||
continue;
|
||||
|
||||
fio->bufs[n] = mempool_alloc(&v->fec->prealloc_pool, GFP_NOWAIT);
|
||||
if (unlikely(!fio->bufs[n])) {
|
||||
DMERR("failed to allocate FEC buffer");
|
||||
return -ENOMEM;
|
||||
}
|
||||
fio->bufs[n] = mempool_alloc(&v->fec->prealloc_pool, GFP_NOIO);
|
||||
}
|
||||
|
||||
/* try to allocate the maximum number of buffers */
|
||||
|
|
|
|||
|
|
@ -2005,7 +2005,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
|
|||
* linear target or multiple linear targets pointing to the same
|
||||
* device), we can send the flush with data directly to it.
|
||||
*/
|
||||
if (map->flush_bypasses_map) {
|
||||
if (bio->bi_iter.bi_size && map->flush_bypasses_map) {
|
||||
struct list_head *devices = dm_table_get_devices(map);
|
||||
if (devices->next == devices->prev)
|
||||
goto send_preflush_with_data;
|
||||
|
|
|
|||
|
|
@ -710,6 +710,11 @@ static void rcar_canfd_set_bit_reg(void __iomem *addr, u32 val)
|
|||
rcar_canfd_update(val, val, addr);
|
||||
}
|
||||
|
||||
static void rcar_canfd_clear_bit_reg(void __iomem *addr, u32 val)
|
||||
{
|
||||
rcar_canfd_update(val, 0, addr);
|
||||
}
|
||||
|
||||
static void rcar_canfd_update_bit_reg(void __iomem *addr, u32 mask, u32 val)
|
||||
{
|
||||
rcar_canfd_update(mask, val, addr);
|
||||
|
|
@ -756,25 +761,6 @@ static void rcar_canfd_set_rnc(struct rcar_canfd_global *gpriv, unsigned int ch,
|
|||
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(w), rnc);
|
||||
}
|
||||
|
||||
static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
|
||||
{
|
||||
if (gpriv->info->ch_interface_mode) {
|
||||
u32 ch, val = gpriv->fdmode ? RCANFD_GEN4_FDCFG_FDOE
|
||||
: RCANFD_GEN4_FDCFG_CLOE;
|
||||
|
||||
for_each_set_bit(ch, &gpriv->channels_mask,
|
||||
gpriv->info->max_channels)
|
||||
rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg, val);
|
||||
} else {
|
||||
if (gpriv->fdmode)
|
||||
rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
|
||||
RCANFD_GRMCFG_RCMC);
|
||||
else
|
||||
rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
|
||||
RCANFD_GRMCFG_RCMC);
|
||||
}
|
||||
}
|
||||
|
||||
static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
|
||||
{
|
||||
struct device *dev = &gpriv->pdev->dev;
|
||||
|
|
@ -807,6 +793,16 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
|
|||
/* Reset Global error flags */
|
||||
rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0);
|
||||
|
||||
/* Set the controller into appropriate mode */
|
||||
if (!gpriv->info->ch_interface_mode) {
|
||||
if (gpriv->fdmode)
|
||||
rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
|
||||
RCANFD_GRMCFG_RCMC);
|
||||
else
|
||||
rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG,
|
||||
RCANFD_GRMCFG_RCMC);
|
||||
}
|
||||
|
||||
/* Transition all Channels to reset mode */
|
||||
for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) {
|
||||
rcar_canfd_clear_bit(gpriv->base,
|
||||
|
|
@ -824,10 +820,23 @@ static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv)
|
|||
dev_dbg(dev, "channel %u reset failed\n", ch);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set the controller into appropriate mode */
|
||||
rcar_canfd_set_mode(gpriv);
|
||||
if (gpriv->info->ch_interface_mode) {
|
||||
/* Do not set CLOE and FDOE simultaneously */
|
||||
if (!gpriv->fdmode) {
|
||||
rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
|
||||
RCANFD_GEN4_FDCFG_FDOE);
|
||||
rcar_canfd_set_bit_reg(&gpriv->fcbase[ch].cfdcfg,
|
||||
RCANFD_GEN4_FDCFG_CLOE);
|
||||
} else {
|
||||
rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
|
||||
RCANFD_GEN4_FDCFG_FDOE);
|
||||
rcar_canfd_clear_bit_reg(&gpriv->fcbase[ch].cfdcfg,
|
||||
RCANFD_GEN4_FDCFG_CLOE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -548,8 +548,8 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
|
|||
if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF)
|
||||
goto out;
|
||||
|
||||
while ((isrc = priv->read_reg(priv, SJA1000_IR)) &&
|
||||
(n < SJA1000_MAX_IRQ)) {
|
||||
while ((n < SJA1000_MAX_IRQ) &&
|
||||
(isrc = priv->read_reg(priv, SJA1000_IR))) {
|
||||
|
||||
status = priv->read_reg(priv, SJA1000_SR);
|
||||
/* check for absent controller due to hw unplug */
|
||||
|
|
|
|||
|
|
@ -657,8 +657,8 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
|
|||
u8 isrc, status;
|
||||
int n = 0;
|
||||
|
||||
while ((isrc = readl(priv->base + SUN4I_REG_INT_ADDR)) &&
|
||||
(n < SUN4I_CAN_MAX_IRQ)) {
|
||||
while ((n < SUN4I_CAN_MAX_IRQ) &&
|
||||
(isrc = readl(priv->base + SUN4I_REG_INT_ADDR))) {
|
||||
n++;
|
||||
status = readl(priv->base + SUN4I_REG_STA_ADDR);
|
||||
|
||||
|
|
|
|||
|
|
@ -261,7 +261,13 @@ struct canfd_quirk {
|
|||
u8 quirk;
|
||||
} __packed;
|
||||
|
||||
/* struct gs_host_frame::echo_id == GS_HOST_FRAME_ECHO_ID_RX indicates
|
||||
* a regular RX'ed CAN frame
|
||||
*/
|
||||
#define GS_HOST_FRAME_ECHO_ID_RX 0xffffffff
|
||||
|
||||
struct gs_host_frame {
|
||||
struct_group(header,
|
||||
u32 echo_id;
|
||||
__le32 can_id;
|
||||
|
||||
|
|
@ -269,6 +275,7 @@ struct gs_host_frame {
|
|||
u8 channel;
|
||||
u8 flags;
|
||||
u8 reserved;
|
||||
);
|
||||
|
||||
union {
|
||||
DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
|
||||
|
|
@ -568,6 +575,37 @@ gs_usb_get_echo_skb(struct gs_can *dev, struct sk_buff *skb,
|
|||
return len;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
gs_usb_get_minimum_rx_length(const struct gs_can *dev, const struct gs_host_frame *hf,
|
||||
unsigned int *data_length_p)
|
||||
{
|
||||
unsigned int minimum_length, data_length = 0;
|
||||
|
||||
if (hf->flags & GS_CAN_FLAG_FD) {
|
||||
if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX)
|
||||
data_length = can_fd_dlc2len(hf->can_dlc);
|
||||
|
||||
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
|
||||
/* timestamp follows data field of max size */
|
||||
minimum_length = struct_size(hf, canfd_ts, 1);
|
||||
else
|
||||
minimum_length = sizeof(hf->header) + data_length;
|
||||
} else {
|
||||
if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX &&
|
||||
!(hf->can_id & cpu_to_le32(CAN_RTR_FLAG)))
|
||||
data_length = can_cc_dlc2len(hf->can_dlc);
|
||||
|
||||
if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
|
||||
/* timestamp follows data field of max size */
|
||||
minimum_length = struct_size(hf, classic_can_ts, 1);
|
||||
else
|
||||
minimum_length = sizeof(hf->header) + data_length;
|
||||
}
|
||||
|
||||
*data_length_p = data_length;
|
||||
return minimum_length;
|
||||
}
|
||||
|
||||
static void gs_usb_receive_bulk_callback(struct urb *urb)
|
||||
{
|
||||
struct gs_usb *parent = urb->context;
|
||||
|
|
@ -576,6 +614,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
|
|||
int rc;
|
||||
struct net_device_stats *stats;
|
||||
struct gs_host_frame *hf = urb->transfer_buffer;
|
||||
unsigned int minimum_length, data_length;
|
||||
struct gs_tx_context *txc;
|
||||
struct can_frame *cf;
|
||||
struct canfd_frame *cfd;
|
||||
|
|
@ -594,6 +633,15 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
|
|||
return;
|
||||
}
|
||||
|
||||
minimum_length = sizeof(hf->header);
|
||||
if (urb->actual_length < minimum_length) {
|
||||
dev_err_ratelimited(&parent->udev->dev,
|
||||
"short read (actual_length=%u, minimum_length=%u)\n",
|
||||
urb->actual_length, minimum_length);
|
||||
|
||||
goto resubmit_urb;
|
||||
}
|
||||
|
||||
/* device reports out of range channel id */
|
||||
if (hf->channel >= parent->channel_cnt)
|
||||
goto device_detach;
|
||||
|
|
@ -609,20 +657,33 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
|
|||
if (!netif_running(netdev))
|
||||
goto resubmit_urb;
|
||||
|
||||
if (hf->echo_id == -1) { /* normal rx */
|
||||
minimum_length = gs_usb_get_minimum_rx_length(dev, hf, &data_length);
|
||||
if (urb->actual_length < minimum_length) {
|
||||
stats->rx_errors++;
|
||||
stats->rx_length_errors++;
|
||||
|
||||
if (net_ratelimit())
|
||||
netdev_err(netdev,
|
||||
"short read (actual_length=%u, minimum_length=%u)\n",
|
||||
urb->actual_length, minimum_length);
|
||||
|
||||
goto resubmit_urb;
|
||||
}
|
||||
|
||||
if (hf->echo_id == GS_HOST_FRAME_ECHO_ID_RX) { /* normal rx */
|
||||
if (hf->flags & GS_CAN_FLAG_FD) {
|
||||
skb = alloc_canfd_skb(netdev, &cfd);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
cfd->can_id = le32_to_cpu(hf->can_id);
|
||||
cfd->len = can_fd_dlc2len(hf->can_dlc);
|
||||
cfd->len = data_length;
|
||||
if (hf->flags & GS_CAN_FLAG_BRS)
|
||||
cfd->flags |= CANFD_BRS;
|
||||
if (hf->flags & GS_CAN_FLAG_ESI)
|
||||
cfd->flags |= CANFD_ESI;
|
||||
|
||||
memcpy(cfd->data, hf->canfd->data, cfd->len);
|
||||
memcpy(cfd->data, hf->canfd->data, data_length);
|
||||
} else {
|
||||
skb = alloc_can_skb(netdev, &cf);
|
||||
if (!skb)
|
||||
|
|
@ -631,7 +692,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
|
|||
cf->can_id = le32_to_cpu(hf->can_id);
|
||||
can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
|
||||
|
||||
memcpy(cf->data, hf->classic_can->data, 8);
|
||||
memcpy(cf->data, hf->classic_can->data, data_length);
|
||||
|
||||
/* ERROR frames tell us information about the controller */
|
||||
if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
|
||||
|
|
@ -687,7 +748,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
|
|||
resubmit_urb:
|
||||
usb_fill_bulk_urb(urb, parent->udev,
|
||||
parent->pipe_in,
|
||||
hf, dev->parent->hf_size_rx,
|
||||
hf, parent->hf_size_rx,
|
||||
gs_usb_receive_bulk_callback, parent);
|
||||
|
||||
rc = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
|
|
@ -750,8 +811,21 @@ static void gs_usb_xmit_callback(struct urb *urb)
|
|||
struct gs_can *dev = txc->dev;
|
||||
struct net_device *netdev = dev->netdev;
|
||||
|
||||
if (urb->status)
|
||||
netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
|
||||
if (!urb->status)
|
||||
return;
|
||||
|
||||
if (urb->status != -ESHUTDOWN && net_ratelimit())
|
||||
netdev_info(netdev, "failed to xmit URB %u: %pe\n",
|
||||
txc->echo_id, ERR_PTR(urb->status));
|
||||
|
||||
netdev->stats.tx_dropped++;
|
||||
netdev->stats.tx_errors++;
|
||||
|
||||
can_free_echo_skb(netdev, txc->echo_id, NULL);
|
||||
gs_free_tx_context(txc);
|
||||
atomic_dec(&dev->active_tx_urbs);
|
||||
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
|
||||
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
|
||||
|
|
|
|||
|
|
@ -685,7 +685,7 @@ static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id,
|
|||
* for further details.
|
||||
*/
|
||||
if (tmp->len == 0) {
|
||||
pos = round_up(pos,
|
||||
pos = round_up(pos + 1,
|
||||
le16_to_cpu
|
||||
(dev->bulk_in->wMaxPacketSize));
|
||||
continue;
|
||||
|
|
@ -1732,7 +1732,7 @@ static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev,
|
|||
* number of events in case of a heavy rx load on the bus.
|
||||
*/
|
||||
if (cmd->len == 0) {
|
||||
pos = round_up(pos, le16_to_cpu
|
||||
pos = round_up(pos + 1, le16_to_cpu
|
||||
(dev->bulk_in->wMaxPacketSize));
|
||||
continue;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2587,8 +2587,8 @@ static int ksz_irq_phy_setup(struct ksz_device *dev)
|
|||
|
||||
irq = irq_find_mapping(dev->ports[port].pirq.domain,
|
||||
PORT_SRC_PHY_INT);
|
||||
if (irq < 0) {
|
||||
ret = irq;
|
||||
if (!irq) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
ds->user_mii_bus->irq[phy] = irq;
|
||||
|
|
@ -2952,8 +2952,8 @@ static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
|
|||
snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
|
||||
|
||||
pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
|
||||
if (pirq->irq_num < 0)
|
||||
return pirq->irq_num;
|
||||
if (!pirq->irq_num)
|
||||
return -EINVAL;
|
||||
|
||||
return ksz_irq_common_setup(dev, pirq);
|
||||
}
|
||||
|
|
@ -3038,12 +3038,12 @@ static int ksz_setup(struct dsa_switch *ds)
|
|||
dsa_switch_for_each_user_port(dp, dev->ds) {
|
||||
ret = ksz_pirq_setup(dev, dp->index);
|
||||
if (ret)
|
||||
goto out_girq;
|
||||
goto port_release;
|
||||
|
||||
if (dev->info->ptp_capable) {
|
||||
ret = ksz_ptp_irq_setup(ds, dp->index);
|
||||
if (ret)
|
||||
goto out_pirq;
|
||||
goto pirq_release;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -3053,7 +3053,7 @@ static int ksz_setup(struct dsa_switch *ds)
|
|||
if (ret) {
|
||||
dev_err(dev->dev, "Failed to register PTP clock: %d\n",
|
||||
ret);
|
||||
goto out_ptpirq;
|
||||
goto port_release;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -3076,17 +3076,16 @@ static int ksz_setup(struct dsa_switch *ds)
|
|||
out_ptp_clock_unregister:
|
||||
if (dev->info->ptp_capable)
|
||||
ksz_ptp_clock_unregister(ds);
|
||||
out_ptpirq:
|
||||
if (dev->irq > 0 && dev->info->ptp_capable)
|
||||
dsa_switch_for_each_user_port(dp, dev->ds)
|
||||
port_release:
|
||||
if (dev->irq > 0) {
|
||||
dsa_switch_for_each_user_port_continue_reverse(dp, dev->ds) {
|
||||
if (dev->info->ptp_capable)
|
||||
ksz_ptp_irq_free(ds, dp->index);
|
||||
out_pirq:
|
||||
if (dev->irq > 0)
|
||||
dsa_switch_for_each_user_port(dp, dev->ds)
|
||||
pirq_release:
|
||||
ksz_irq_free(&dev->ports[dp->index].pirq);
|
||||
out_girq:
|
||||
if (dev->irq > 0)
|
||||
}
|
||||
ksz_irq_free(&dev->girq);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1093,19 +1093,19 @@ static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
|
|||
static const char * const name[] = {"pdresp-msg", "xdreq-msg",
|
||||
"sync-msg"};
|
||||
const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
|
||||
struct ksz_irq *ptpirq = &port->ptpirq;
|
||||
struct ksz_ptp_irq *ptpmsg_irq;
|
||||
|
||||
ptpmsg_irq = &port->ptpmsg_irq[n];
|
||||
ptpmsg_irq->num = irq_create_mapping(ptpirq->domain, n);
|
||||
if (!ptpmsg_irq->num)
|
||||
return -EINVAL;
|
||||
|
||||
ptpmsg_irq->port = port;
|
||||
ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
|
||||
|
||||
strscpy(ptpmsg_irq->name, name[n]);
|
||||
|
||||
ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
|
||||
if (ptpmsg_irq->num < 0)
|
||||
return ptpmsg_irq->num;
|
||||
|
||||
return request_threaded_irq(ptpmsg_irq->num, NULL,
|
||||
ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
|
||||
ptpmsg_irq->name, ptpmsg_irq);
|
||||
|
|
@ -1135,12 +1135,9 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
|
|||
if (!ptpirq->domain)
|
||||
return -ENOMEM;
|
||||
|
||||
for (irq = 0; irq < ptpirq->nirqs; irq++)
|
||||
irq_create_mapping(ptpirq->domain, irq);
|
||||
|
||||
ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
|
||||
if (ptpirq->irq_num < 0) {
|
||||
ret = ptpirq->irq_num;
|
||||
if (!ptpirq->irq_num) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
@ -1159,12 +1156,11 @@ int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
|
|||
|
||||
out_ptp_msg:
|
||||
free_irq(ptpirq->irq_num, ptpirq);
|
||||
while (irq--)
|
||||
while (irq--) {
|
||||
free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
|
||||
out:
|
||||
for (irq = 0; irq < ptpirq->nirqs; irq++)
|
||||
irq_dispose_mapping(port->ptpmsg_irq[irq].num);
|
||||
|
||||
}
|
||||
out:
|
||||
irq_domain_remove(ptpirq->domain);
|
||||
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -1302,14 +1302,7 @@ static int sja1105_set_port_speed(struct sja1105_private *priv, int port,
|
|||
* table, since this will be used for the clocking setup, and we no
|
||||
* longer need to store it in the static config (already told hardware
|
||||
* we want auto during upload phase).
|
||||
* Actually for the SGMII port, the MAC is fixed at 1 Gbps and
|
||||
* we need to configure the PCS only (if even that).
|
||||
*/
|
||||
if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII)
|
||||
speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
|
||||
else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX)
|
||||
speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
|
||||
|
||||
mac[port].speed = speed;
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
#include "aq_hw.h"
|
||||
#include "aq_nic.h"
|
||||
#include "hw_atl/hw_atl_llh.h"
|
||||
|
||||
void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
|
||||
u32 shift, u32 val)
|
||||
|
|
@ -81,6 +82,27 @@ void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value)
|
|||
lo_hi_writeq(value, hw->mmio + reg);
|
||||
}
|
||||
|
||||
int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw)
|
||||
{
|
||||
int err;
|
||||
u32 val;
|
||||
|
||||
/* Invalidate Descriptor Cache to prevent writing to the cached
|
||||
* descriptors and to the data pointer of those descriptors
|
||||
*/
|
||||
hw_atl_rdm_rx_dma_desc_cache_init_tgl(hw);
|
||||
|
||||
err = aq_hw_err_from_flags(hw);
|
||||
if (err)
|
||||
goto err_exit;
|
||||
|
||||
readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
|
||||
hw, val, val == 1, 1000U, 10000U);
|
||||
|
||||
err_exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
int aq_hw_err_from_flags(struct aq_hw_s *hw)
|
||||
{
|
||||
int err = 0;
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
|
|||
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
|
||||
u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
|
||||
void aq_hw_write_reg64(struct aq_hw_s *hw, u32 reg, u64 value);
|
||||
int aq_hw_invalidate_descriptor_cache(struct aq_hw_s *hw);
|
||||
int aq_hw_err_from_flags(struct aq_hw_s *hw);
|
||||
int aq_hw_num_tcs(struct aq_hw_s *hw);
|
||||
int aq_hw_q_per_tc(struct aq_hw_s *hw);
|
||||
|
|
|
|||
|
|
@ -547,6 +547,11 @@ static int __aq_ring_rx_clean(struct aq_ring_s *self, struct napi_struct *napi,
|
|||
|
||||
if (!buff->is_eop) {
|
||||
unsigned int frag_cnt = 0U;
|
||||
|
||||
/* There will be an extra fragment */
|
||||
if (buff->len > AQ_CFG_RX_HDR_SIZE)
|
||||
frag_cnt++;
|
||||
|
||||
buff_ = buff;
|
||||
do {
|
||||
bool is_rsc_completed = true;
|
||||
|
|
|
|||
|
|
@ -1198,26 +1198,9 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
|
|||
|
||||
static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
|
||||
{
|
||||
int err;
|
||||
u32 val;
|
||||
|
||||
hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
|
||||
|
||||
/* Invalidate Descriptor Cache to prevent writing to the cached
|
||||
* descriptors and to the data pointer of those descriptors
|
||||
*/
|
||||
hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
|
||||
|
||||
err = aq_hw_err_from_flags(self);
|
||||
|
||||
if (err)
|
||||
goto err_exit;
|
||||
|
||||
readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
|
||||
self, val, val == 1, 1000U, 10000U);
|
||||
|
||||
err_exit:
|
||||
return err;
|
||||
return aq_hw_invalidate_descriptor_cache(self);
|
||||
}
|
||||
|
||||
int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self, struct aq_ring_s *ring)
|
||||
|
|
|
|||
|
|
@ -759,7 +759,7 @@ static int hw_atl2_hw_stop(struct aq_hw_s *self)
|
|||
{
|
||||
hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK);
|
||||
|
||||
return 0;
|
||||
return aq_hw_invalidate_descriptor_cache(self);
|
||||
}
|
||||
|
||||
static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self)
|
||||
|
|
|
|||
|
|
@ -661,6 +661,7 @@ struct fec_enet_private {
|
|||
unsigned int reload_period;
|
||||
int pps_enable;
|
||||
unsigned int next_counter;
|
||||
bool perout_enable;
|
||||
struct hrtimer perout_timer;
|
||||
u64 perout_stime;
|
||||
|
||||
|
|
|
|||
|
|
@ -128,6 +128,12 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
|
|||
|
||||
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
||||
|
||||
if (fep->perout_enable) {
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
dev_err(&fep->pdev->dev, "PEROUT is running");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (fep->pps_enable == enable) {
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
return 0;
|
||||
|
|
@ -243,6 +249,7 @@ static int fec_ptp_pps_perout(struct fec_enet_private *fep)
|
|||
* the FEC_TCCR register in time and missed the start time.
|
||||
*/
|
||||
if (fep->perout_stime < curr_time + 100 * NSEC_PER_MSEC) {
|
||||
fep->perout_enable = false;
|
||||
dev_err(&fep->pdev->dev, "Current time is too close to the start time!\n");
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
return -1;
|
||||
|
|
@ -497,7 +504,10 @@ static int fec_ptp_pps_disable(struct fec_enet_private *fep, uint channel)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
hrtimer_cancel(&fep->perout_timer);
|
||||
|
||||
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
||||
fep->perout_enable = false;
|
||||
writel(0, fep->hwp + FEC_TCSR(channel));
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
|
||||
|
|
@ -529,6 +539,8 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
|
|||
|
||||
return ret;
|
||||
} else if (rq->type == PTP_CLK_REQ_PEROUT) {
|
||||
u32 reload_period;
|
||||
|
||||
/* Reject requests with unsupported flags */
|
||||
if (rq->perout.flags)
|
||||
return -EOPNOTSUPP;
|
||||
|
|
@ -548,12 +560,14 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
fep->reload_period = div_u64(period_ns, 2);
|
||||
if (on && fep->reload_period) {
|
||||
reload_period = div_u64(period_ns, 2);
|
||||
if (on && reload_period) {
|
||||
u64 perout_stime;
|
||||
|
||||
/* Convert 1588 timestamp to ns*/
|
||||
start_time.tv_sec = rq->perout.start.sec;
|
||||
start_time.tv_nsec = rq->perout.start.nsec;
|
||||
fep->perout_stime = timespec64_to_ns(&start_time);
|
||||
perout_stime = timespec64_to_ns(&start_time);
|
||||
|
||||
mutex_lock(&fep->ptp_clk_mutex);
|
||||
if (!fep->ptp_clk_on) {
|
||||
|
|
@ -562,18 +576,41 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
spin_lock_irqsave(&fep->tmreg_lock, flags);
|
||||
|
||||
if (fep->pps_enable) {
|
||||
dev_err(&fep->pdev->dev, "PPS is running");
|
||||
ret = -EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (fep->perout_enable) {
|
||||
dev_err(&fep->pdev->dev,
|
||||
"PEROUT has been enabled\n");
|
||||
ret = -EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Read current timestamp */
|
||||
curr_time = timecounter_read(&fep->tc);
|
||||
if (perout_stime <= curr_time) {
|
||||
dev_err(&fep->pdev->dev,
|
||||
"Start time must be greater than current time\n");
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Calculate time difference */
|
||||
delta = perout_stime - curr_time;
|
||||
fep->reload_period = reload_period;
|
||||
fep->perout_stime = perout_stime;
|
||||
fep->perout_enable = true;
|
||||
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
|
||||
mutex_unlock(&fep->ptp_clk_mutex);
|
||||
|
||||
/* Calculate time difference */
|
||||
delta = fep->perout_stime - curr_time;
|
||||
|
||||
if (fep->perout_stime <= curr_time) {
|
||||
dev_err(&fep->pdev->dev, "Start time must larger than current time!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Because the timer counter of FEC only has 31-bits, correspondingly,
|
||||
* the time comparison register FEC_TCCR also only low 31 bits can be
|
||||
|
|
@ -681,8 +718,11 @@ static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
|
|||
fep->next_counter = (fep->next_counter + fep->reload_period) &
|
||||
fep->cc.mask;
|
||||
|
||||
if (fep->pps_enable) {
|
||||
event.type = PTP_CLOCK_PPS;
|
||||
ptp_clock_event(fep->ptp_clock, &event);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -627,7 +627,7 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
|
|||
MLX5E_100MB);
|
||||
max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
|
||||
max_bw_unit[i] = MLX5_100_MBPS_UNIT;
|
||||
} else if (max_bw_value[i] <= upper_limit_gbps) {
|
||||
} else if (maxrate->tc_maxrate[i] <= upper_limit_gbps) {
|
||||
max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
|
||||
MLX5E_1GB);
|
||||
max_bw_unit[i] = MLX5_GBPS_UNIT;
|
||||
|
|
|
|||
|
|
@ -201,7 +201,7 @@ static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
|
|||
return -ENODEV;
|
||||
|
||||
/* Fill all but 1 unused descriptors in the Rx queue. */
|
||||
count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
|
||||
count = (head - tail - 1) & (FBNIC_IPC_MBX_DESC_LEN - 1);
|
||||
while (!err && count--) {
|
||||
struct fbnic_tlv_msg *msg;
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,14 @@
|
|||
// SPDX-License-Identifier: GPL-2.0+
|
||||
|
||||
#include <linux/ptp_classify.h>
|
||||
#include <linux/units.h>
|
||||
|
||||
#include "lan966x_main.h"
|
||||
#include "vcap_api.h"
|
||||
#include "vcap_api_client.h"
|
||||
|
||||
#define LAN9X66_CLOCK_RATE 165617754
|
||||
|
||||
#define LAN966X_MAX_PTP_ID 512
|
||||
|
||||
/* Represents 1ppm adjustment in 2^59 format with 6.037735849ns as reference
|
||||
|
|
@ -1126,5 +1129,5 @@ void lan966x_ptp_rxtstamp(struct lan966x *lan966x, struct sk_buff *skb,
|
|||
u32 lan966x_ptp_get_period_ps(void)
|
||||
{
|
||||
/* This represents the system clock period in picoseconds */
|
||||
return 15125;
|
||||
return PICO / LAN9X66_CLOCK_RATE;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1520,11 +1520,20 @@ static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
|
|||
|
||||
static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
|
||||
{
|
||||
if (tp->mac_version >= RTL_GIGA_MAC_VER_25 &&
|
||||
tp->mac_version != RTL_GIGA_MAC_VER_28 &&
|
||||
tp->mac_version != RTL_GIGA_MAC_VER_31 &&
|
||||
tp->mac_version != RTL_GIGA_MAC_VER_38)
|
||||
r8169_mod_reg8_cond(tp, PMCH, D3_NO_PLL_DOWN, !enable);
|
||||
switch (tp->mac_version) {
|
||||
case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_24:
|
||||
case RTL_GIGA_MAC_VER_28:
|
||||
case RTL_GIGA_MAC_VER_31:
|
||||
case RTL_GIGA_MAC_VER_38:
|
||||
break;
|
||||
case RTL_GIGA_MAC_VER_80:
|
||||
r8169_mod_reg8_cond(tp, PMCH, D3_NO_PLL_DOWN, true);
|
||||
break;
|
||||
default:
|
||||
r8169_mod_reg8_cond(tp, PMCH, D3HOT_NO_PLL_DOWN, true);
|
||||
r8169_mod_reg8_cond(tp, PMCH, D3COLD_NO_PLL_DOWN, !enable);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void rtl_reset_packet_filter(struct rtl8169_private *tp)
|
||||
|
|
|
|||
|
|
@ -1521,8 +1521,10 @@ static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
|
|||
|
||||
skb = priv->rxq[qnum]->rx_skbuff[entry];
|
||||
|
||||
if (unlikely(!skb))
|
||||
if (unlikely(!skb)) {
|
||||
netdev_err(priv->dev, "rx descriptor is not consistent\n");
|
||||
break;
|
||||
}
|
||||
|
||||
prefetch(skb->data - NET_IP_ALIGN);
|
||||
priv->rxq[qnum]->rx_skbuff[entry] = NULL;
|
||||
|
|
|
|||
|
|
@ -582,7 +582,7 @@ static int gpy_update_interface(struct phy_device *phydev)
|
|||
/* Interface mode is fixed for USXGMII and integrated PHY */
|
||||
if (phydev->interface == PHY_INTERFACE_MODE_USXGMII ||
|
||||
phydev->interface == PHY_INTERFACE_MODE_INTERNAL)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
||||
/* Automatically switch SERDES interface between SGMII and 2500-BaseX
|
||||
* according to speed. Disable ANEG in 2500-BaseX mode.
|
||||
|
|
@ -620,13 +620,7 @@ static int gpy_update_interface(struct phy_device *phydev)
|
|||
break;
|
||||
}
|
||||
|
||||
if (phydev->speed == SPEED_2500 || phydev->speed == SPEED_1000) {
|
||||
ret = genphy_read_master_slave(phydev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return gpy_update_mdix(phydev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gpy_read_status(struct phy_device *phydev)
|
||||
|
|
@ -681,6 +675,16 @@ static int gpy_read_status(struct phy_device *phydev)
|
|||
ret = gpy_update_interface(phydev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (phydev->speed == SPEED_2500 || phydev->speed == SPEED_1000) {
|
||||
ret = genphy_read_master_slave(phydev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = gpy_update_mdix(phydev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue