Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Cross-merge networking fixes after downstream PR (net-6.18-rc5).

Conflicts:

drivers/net/wireless/ath/ath12k/mac.c
  9222582ec5 ("Revert "wifi: ath12k: Fix missing station power save configuration"")
  6917e268c4 ("wifi: ath12k: Defer vdev bring-up until CSA finalize to avoid stale beacon")
https://lore.kernel.org/11cece9f7e36c12efd732baa5718239b1bf8c950.camel@sipsolutions.net

Adjacent changes:

drivers/net/ethernet/intel/Kconfig
  b1d16f7c00 ("libie: depend on DEBUG_FS when building LIBIE_FWLOG")
  93f53db9f9 ("ice: switch to Page Pool")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2025-11-06 09:25:16 -08:00
commit 1ec9871fbb
259 changed files with 2356 additions and 950 deletions

View File

@ -644,6 +644,7 @@ Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
Quentin Monnet <qmo@kernel.org> <quentin@isovalent.com>
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
Rae Moar <raemoar63@gmail.com> <rmoar@google.com>
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
Rajendra Nayak <quic_rjendra@quicinc.com> <rnayak@codeaurora.org>

View File

@ -32,7 +32,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
maxItems: 4
items:
enum: [1, 2, 3, 4]
@ -48,7 +48,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32-array
minItems: 2
maxItems: 2
maxItems: 5
items:
enum: [1, 2, 3, 4, 5]

View File

@ -4819,6 +4819,7 @@ F: drivers/net/dsa/b53/*
F: drivers/net/dsa/bcm_sf2*
F: include/linux/dsa/brcm.h
F: include/linux/platform_data/b53.h
F: net/dsa/tag_brcm.c
BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
M: Florian Fainelli <florian.fainelli@broadcom.com>
@ -12521,6 +12522,7 @@ F: include/linux/avf/virtchnl.h
F: include/linux/net/intel/*/
INTEL ETHERNET PROTOCOL DRIVER FOR RDMA
M: Krzysztof Czurylo <krzysztof.czurylo@intel.com>
M: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
L: linux-rdma@vger.kernel.org
S: Supported
@ -12861,7 +12863,8 @@ F: tools/testing/selftests/sgx/*
K: \bSGX_
INTEL SKYLAKE INT3472 ACPI DEVICE DRIVER
M: Daniel Scally <djrscally@gmail.com>
M: Daniel Scally <dan.scally@ideasonboard.com>
M: Sakari Ailus <sakari.ailus@linux.intel.com>
S: Maintained
F: drivers/platform/x86/intel/int3472/
F: include/linux/platform_data/x86/int3472.h
@ -13425,9 +13428,12 @@ F: mm/kasan/
F: scripts/Makefile.kasan
KCONFIG
M: Nathan Chancellor <nathan@kernel.org>
M: Nicolas Schier <nsc@kernel.org>
L: linux-kbuild@vger.kernel.org
S: Orphan
S: Odd Fixes
Q: https://patchwork.kernel.org/project/linux-kbuild/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linux.git
F: Documentation/kbuild/kconfig*
F: scripts/Kconfig.include
F: scripts/kconfig/
@ -13612,7 +13618,7 @@ F: fs/smb/server/
KERNEL UNIT TESTING FRAMEWORK (KUnit)
M: Brendan Higgins <brendan.higgins@linux.dev>
M: David Gow <davidgow@google.com>
R: Rae Moar <rmoar@google.com>
R: Rae Moar <raemoar63@gmail.com>
L: linux-kselftest@vger.kernel.org
L: kunit-dev@googlegroups.com
S: Maintained

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 18
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Baby Opossum Posse
# *DOCUMENTATION*

View File

@ -1213,6 +1213,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
const u8 tmp3 = bpf2a64[TMP_REG_3];
const u8 fp = bpf2a64[BPF_REG_FP];
const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
const u8 priv_sp = bpf2a64[PRIVATE_SP];
@ -1757,8 +1758,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
emit(A64_ADD(1, tmp2, dst, arena_vm_base), ctx);
dst = tmp2;
emit(A64_ADD(1, tmp3, dst, arena_vm_base), ctx);
dst = tmp3;
}
if (dst == fp) {
dst_adj = ctx->priv_sp_used ? priv_sp : A64_SP;

View File

@ -109,7 +109,7 @@ endif
ifdef CONFIG_RUSTC_HAS_ANNOTATE_TABLEJUMP
KBUILD_RUSTFLAGS += -Cllvm-args=--loongarch-annotate-tablejump
else
KBUILD_RUSTFLAGS += -Zno-jump-tables # keep compatibility with older compilers
KBUILD_RUSTFLAGS += $(if $(call rustc-min-version,109300),-Cjump-tables=n,-Zno-jump-tables) # keep compatibility with older compilers
endif
ifdef CONFIG_LTO_CLANG
# The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled.

View File

@ -158,7 +158,6 @@ config S390
select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
select ARCH_WANT_KERNEL_PMD_MKWRITE
select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
select ARCH_WANTS_THP_SWAP
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS2

View File

@ -101,6 +101,7 @@ CONFIG_SLUB_STATS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_SYSFS=y
@ -123,12 +124,12 @@ CONFIG_TLS_DEVICE=y
CONFIG_TLS_TOE=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_DIBS=y
CONFIG_DIBS_LO=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_DIBS=y
CONFIG_DIBS_LO=y
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@ -472,6 +473,7 @@ CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LLBITMAP=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_CLUSTER=m
@ -654,9 +656,12 @@ CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_JFS_STATISTICS=y
CONFIG_XFS_FS=y
CONFIG_XFS_SUPPORT_V4=y
CONFIG_XFS_SUPPORT_ASCII_CI=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
# CONFIG_XFS_ONLINE_SCRUB is not set
CONFIG_XFS_DEBUG=y
CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
@ -666,7 +671,6 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_BTRFS_DEBUG=y
CONFIG_BTRFS_ASSERT=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y

View File

@ -94,6 +94,7 @@ CONFIG_SLAB_BUCKETS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_PERSISTENT_HUGE_ZERO_FOLIO=y
CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
@ -114,12 +115,12 @@ CONFIG_TLS_DEVICE=y
CONFIG_TLS_TOE=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_DIBS=y
CONFIG_DIBS_LO=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_DIBS=y
CONFIG_DIBS_LO=y
CONFIG_XDP_SOCKETS=y
CONFIG_XDP_SOCKETS_DIAG=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
@ -462,6 +463,7 @@ CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LLBITMAP=y
# CONFIG_MD_BITMAP_FILE is not set
CONFIG_MD_LINEAR=m
CONFIG_MD_CLUSTER=m
@ -644,16 +646,18 @@ CONFIG_JFS_POSIX_ACL=y
CONFIG_JFS_SECURITY=y
CONFIG_JFS_STATISTICS=y
CONFIG_XFS_FS=y
CONFIG_XFS_SUPPORT_V4=y
CONFIG_XFS_SUPPORT_ASCII_CI=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y
# CONFIG_XFS_ONLINE_SCRUB is not set
CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
CONFIG_FS_VERITY=y

View File

@ -33,7 +33,6 @@ CONFIG_NET=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_SAFE=y
CONFIG_BLK_DEV_RAM=y
# CONFIG_DCSSBLK is not set
# CONFIG_DASD is not set
CONFIG_ENCLOSURE_SERVICES=y
CONFIG_SCSI=y

View File

@ -169,11 +169,18 @@ struct kmac_sha2_ctx {
u64 buflen[2];
};
enum async_op {
OP_NOP = 0,
OP_UPDATE,
OP_FINAL,
OP_FINUP,
};
/* phmac request context */
struct phmac_req_ctx {
struct hash_walk_helper hwh;
struct kmac_sha2_ctx kmac_ctx;
bool final;
enum async_op async_op;
};
/*
@ -610,6 +617,7 @@ static int phmac_update(struct ahash_request *req)
* using engine to serialize requests.
*/
if (rc == 0 || rc == -EKEYEXPIRED) {
req_ctx->async_op = OP_UPDATE;
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
@ -647,8 +655,7 @@ static int phmac_final(struct ahash_request *req)
* using engine to serialize requests.
*/
if (rc == 0 || rc == -EKEYEXPIRED) {
req->nbytes = 0;
req_ctx->final = true;
req_ctx->async_op = OP_FINAL;
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
@ -676,13 +683,16 @@ static int phmac_finup(struct ahash_request *req)
if (rc)
goto out;
req_ctx->async_op = OP_FINUP;
/* Try synchronous operations if no active engine usage */
if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_update(req, false);
if (rc == 0)
req->nbytes = 0;
req_ctx->async_op = OP_FINAL;
}
if (!rc && !req->nbytes && !atomic_read(&tfm_ctx->via_engine_ctr)) {
if (!rc && req_ctx->async_op == OP_FINAL &&
!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_final(req, false);
if (rc == 0)
goto out;
@ -694,7 +704,7 @@ static int phmac_finup(struct ahash_request *req)
* using engine to serialize requests.
*/
if (rc == 0 || rc == -EKEYEXPIRED) {
req_ctx->final = true;
/* req->async_op has been set to either OP_FINUP or OP_FINAL */
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req);
if (rc != -EINPROGRESS)
@ -855,15 +865,16 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
/*
* Three kinds of requests come in here:
* update when req->nbytes > 0 and req_ctx->final is false
* final when req->nbytes = 0 and req_ctx->final is true
* finup when req->nbytes > 0 and req_ctx->final is true
* For update and finup the hwh walk needs to be prepared and
* up to date but the actual nr of bytes in req->nbytes may be
* any non zero number. For final there is no hwh walk needed.
* 1. req->async_op == OP_UPDATE with req->nbytes > 0
* 2. req->async_op == OP_FINUP with req->nbytes > 0
* 3. req->async_op == OP_FINAL
* For update and finup the hwh walk has already been prepared
* by the caller. For final there is no hwh walk needed.
*/
if (req->nbytes) {
switch (req_ctx->async_op) {
case OP_UPDATE:
case OP_FINUP:
rc = phmac_kmac_update(req, true);
if (rc == -EKEYEXPIRED) {
/*
@ -880,10 +891,11 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
hwh_advance(hwh, rc);
goto out;
}
req->nbytes = 0;
}
if (req_ctx->final) {
if (req_ctx->async_op == OP_UPDATE)
break;
req_ctx->async_op = OP_FINAL;
fallthrough;
case OP_FINAL:
rc = phmac_kmac_final(req, true);
if (rc == -EKEYEXPIRED) {
/*
@ -897,10 +909,14 @@ static int phmac_do_one_request(struct crypto_engine *engine, void *areq)
cond_resched();
return -ENOSPC;
}
break;
default:
/* unknown/unsupported/unimplemented asynch op */
return -EOPNOTSUPP;
}
out:
if (rc || req_ctx->final)
if (rc || req_ctx->async_op == OP_FINAL)
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
pr_debug("request complete with rc=%d\n", rc);
local_bh_disable();

View File

@ -145,7 +145,6 @@ struct zpci_dev {
u8 has_resources : 1;
u8 is_physfn : 1;
u8 util_str_avail : 1;
u8 irqs_registered : 1;
u8 tid_avail : 1;
u8 rtr_avail : 1; /* Relaxed translation allowed */
unsigned int devfn; /* DEVFN part of the RID*/

View File

@ -291,16 +291,14 @@ static int ptdump_cmp(const void *a, const void *b)
static int add_marker(unsigned long start, unsigned long end, const char *name)
{
size_t oldsize, newsize;
struct addr_marker *new;
size_t newsize;
oldsize = markers_cnt * sizeof(*markers);
newsize = oldsize + 2 * sizeof(*markers);
if (!oldsize)
markers = kvmalloc(newsize, GFP_KERNEL);
else
markers = kvrealloc(markers, newsize, GFP_KERNEL);
if (!markers)
goto error;
newsize = (markers_cnt + 2) * sizeof(*markers);
new = kvrealloc(markers, newsize, GFP_KERNEL);
if (!new)
return -ENOMEM;
markers = new;
markers[markers_cnt].is_start = 1;
markers[markers_cnt].start_address = start;
markers[markers_cnt].size = end - start;
@ -312,9 +310,6 @@ static int add_marker(unsigned long start, unsigned long end, const char *name)
markers[markers_cnt].name = name;
markers_cnt++;
return 0;
error:
markers_cnt = 0;
return -ENOMEM;
}
static int pt_dump_init(void)

View File

@ -188,7 +188,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
* is unbound or probed and that userspace can't access its
* configuration space while we perform recovery.
*/
pci_dev_lock(pdev);
device_lock(&pdev->dev);
if (pdev->error_state == pci_channel_io_perm_failure) {
ers_res = PCI_ERS_RESULT_DISCONNECT;
goto out_unlock;
@ -257,7 +257,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
driver->err_handler->resume(pdev);
pci_uevent_ers(pdev, PCI_ERS_RESULT_RECOVERED);
out_unlock:
pci_dev_unlock(pdev);
device_unlock(&pdev->dev);
zpci_report_status(zdev, "recovery", status_str);
return ers_res;

View File

@ -107,9 +107,6 @@ static int zpci_set_irq(struct zpci_dev *zdev)
else
rc = zpci_set_airq(zdev);
if (!rc)
zdev->irqs_registered = 1;
return rc;
}
@ -123,9 +120,6 @@ static int zpci_clear_irq(struct zpci_dev *zdev)
else
rc = zpci_clear_airq(zdev);
if (!rc)
zdev->irqs_registered = 0;
return rc;
}
@ -427,8 +421,7 @@ bool arch_restore_msi_irqs(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
if (!zdev->irqs_registered)
zpci_set_irq(zdev);
zpci_set_irq(zdev);
return true;
}

View File

@ -75,7 +75,7 @@ export BITS
#
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
#
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx -mno-sse4a
KBUILD_RUSTFLAGS += --target=$(objtree)/scripts/target.json
KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
@ -98,7 +98,7 @@ ifeq ($(CONFIG_X86_KERNEL_IBT),y)
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=104816
#
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=branch -fno-jump-tables)
KBUILD_RUSTFLAGS += -Zcf-protection=branch -Zno-jump-tables
KBUILD_RUSTFLAGS += -Zcf-protection=branch $(if $(call rustc-min-version,109300),-Cjump-tables=n,-Zno-jump-tables)
else
KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
endif

View File

@ -7596,6 +7596,7 @@ __init int intel_pmu_init(void)
break;
case INTEL_PANTHERLAKE_L:
case INTEL_WILDCATLAKE_L:
pr_cont("Pantherlake Hybrid events, ");
name = "pantherlake_hybrid";
goto lnl_common;

View File

@ -317,7 +317,8 @@ static u64 __grt_latency_data(struct perf_event *event, u64 status,
{
u64 val;
WARN_ON_ONCE(hybrid_pmu(event->pmu)->pmu_type == hybrid_big);
WARN_ON_ONCE(is_hybrid() &&
hybrid_pmu(event->pmu)->pmu_type == hybrid_big);
dse &= PERF_PEBS_DATA_SOURCE_GRT_MASK;
val = hybrid_var(event->pmu, pebs_data_source)[dse];

View File

@ -1895,6 +1895,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &mtl_uncore_init),
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &lnl_uncore_init),
X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &ptl_uncore_init),
X86_MATCH_VFM(INTEL_WILDCATLAKE_L, &ptl_uncore_init),
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_uncore_init),
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_uncore_init),
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_uncore_init),

View File

@ -150,12 +150,12 @@
#define INTEL_LUNARLAKE_M IFM(6, 0xBD) /* Lion Cove / Skymont */
#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Crestmont */
#define INTEL_PANTHERLAKE_L IFM(6, 0xCC) /* Cougar Cove / Darkmont */
#define INTEL_WILDCATLAKE_L IFM(6, 0xD5)
#define INTEL_NOVALAKE IFM(18, 0x01)
#define INTEL_NOVALAKE_L IFM(18, 0x03)
#define INTEL_NOVALAKE IFM(18, 0x01) /* Coyote Cove / Arctic Wolf */
#define INTEL_NOVALAKE_L IFM(18, 0x03) /* Coyote Cove / Arctic Wolf */
/* "Small Core" Processors (Atom/E-Core) */

View File

@ -43,6 +43,9 @@ extern unsigned long __phys_addr_symbol(unsigned long);
void clear_page_orig(void *page);
void clear_page_rep(void *page);
void clear_page_erms(void *page);
KCFI_REFERENCE(clear_page_orig);
KCFI_REFERENCE(clear_page_rep);
KCFI_REFERENCE(clear_page_erms);
static inline void clear_page(void *page)
{

View File

@ -2,6 +2,10 @@
#ifndef _ASM_RUNTIME_CONST_H
#define _ASM_RUNTIME_CONST_H
#ifdef MODULE
#error "Cannot use runtime-const infrastructure from modules"
#endif
#ifdef __ASSEMBLY__
.macro RUNTIME_CONST_PTR sym reg

View File

@ -12,12 +12,12 @@
#include <asm/cpufeatures.h>
#include <asm/page.h>
#include <asm/percpu.h>
#include <asm/runtime-const.h>
/*
* Virtual variable: there's no actual backing store for this,
* it can purely be used as 'runtime_const_ptr(USER_PTR_MAX)'
*/
#ifdef MODULE
#define runtime_const_ptr(sym) (sym)
#else
#include <asm/runtime-const.h>
#endif
extern unsigned long USER_PTR_MAX;
#ifdef CONFIG_ADDRESS_MASKING

View File

@ -516,7 +516,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ZEN5);
break;
case 0x50 ... 0x5f:
case 0x90 ... 0xaf:
case 0x80 ... 0xaf:
case 0xc0 ... 0xcf:
setup_force_cpu_cap(X86_FEATURE_ZEN6);
break;
@ -1035,8 +1035,18 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)
}
}
static const struct x86_cpu_id zen5_rdseed_microcode[] = {
ZEN_MODEL_STEP_UCODE(0x1a, 0x02, 0x1, 0x0b00215a),
ZEN_MODEL_STEP_UCODE(0x1a, 0x11, 0x0, 0x0b101054),
};
static void init_amd_zen5(struct cpuinfo_x86 *c)
{
if (!x86_match_min_microcode_rev(zen5_rdseed_microcode)) {
clear_cpu_cap(c, X86_FEATURE_RDSEED);
msr_clear_bit(MSR_AMD64_CPUID_FN_7, 18);
pr_emerg_once("RDSEED32 is broken. Disabling the corresponding CPUID bit.\n");
}
}
static void init_amd(struct cpuinfo_x86 *c)

View File

@ -78,6 +78,10 @@
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
EXPORT_PER_CPU_SYMBOL(cpu_info);
/* Used for modules: built-in code uses runtime constants */
unsigned long USER_PTR_MAX;
EXPORT_SYMBOL(USER_PTR_MAX);
u32 elf_hwcap2 __read_mostly;
/* Number of siblings per CPU package */
@ -2579,7 +2583,7 @@ void __init arch_cpu_finalize_init(void)
alternative_instructions();
if (IS_ENABLED(CONFIG_X86_64)) {
unsigned long USER_PTR_MAX = TASK_SIZE_MAX;
USER_PTR_MAX = TASK_SIZE_MAX;
/*
* Enable this when LAM is gated on LASS support

View File

@ -233,13 +233,31 @@ static bool need_sha_check(u32 cur_rev)
return true;
}
static bool cpu_has_entrysign(void)
{
unsigned int fam = x86_family(bsp_cpuid_1_eax);
unsigned int model = x86_model(bsp_cpuid_1_eax);
if (fam == 0x17 || fam == 0x19)
return true;
if (fam == 0x1a) {
if (model <= 0x2f ||
(0x40 <= model && model <= 0x4f) ||
(0x60 <= model && model <= 0x6f))
return true;
}
return false;
}
static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
{
struct patch_digest *pd = NULL;
u8 digest[SHA256_DIGEST_SIZE];
int i;
if (x86_family(bsp_cpuid_1_eax) < 0x17)
if (!cpu_has_entrysign())
return true;
if (!need_sha_check(cur_rev))

View File

@ -825,6 +825,9 @@ void fpu__clear_user_states(struct fpu *fpu)
!fpregs_state_valid(fpu, smp_processor_id()))
os_xrstor_supervisor(fpu->fpstate);
/* Ensure XFD state is in sync before reloading XSTATE */
xfd_update_state(fpu->fpstate);
/* Reset user states in registers. */
restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE);

View File

@ -2701,7 +2701,7 @@ st: if (is_imm8(insn->off))
/* Update cleanup_addr */
ctx->cleanup_addr = proglen;
if (bpf_prog_was_classic(bpf_prog) &&
!capable(CAP_SYS_ADMIN)) {
!ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {
u8 *ip = image + addrs[i - 1];
if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))

View File

@ -292,7 +292,7 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
}
if (!bio_crypt_check_alignment(bio)) {
bio->bi_status = BLK_STS_IOERR;
bio->bi_status = BLK_STS_INVAL;
goto fail;
}

View File

@ -63,6 +63,9 @@ static __init int acpi_parse_mrrm(struct acpi_table_header *table)
if (!mrrm)
return -ENODEV;
if (mrrm->header.revision != 1)
return -EINVAL;
if (mrrm->flags & ACPI_MRRM_FLAGS_REGION_ASSIGNMENT_OS)
return -EOPNOTSUPP;

View File

@ -1959,8 +1959,10 @@ static void acpi_video_bus_remove_notify_handler(struct acpi_video_bus *video)
struct acpi_video_device *dev;
mutex_lock(&video->device_list_lock);
list_for_each_entry(dev, &video->video_device_list, entry)
list_for_each_entry(dev, &video->video_device_list, entry) {
acpi_video_dev_remove_notify_handler(dev);
cancel_delayed_work_sync(&dev->switch_brightness_work);
}
mutex_unlock(&video->device_list_lock);
acpi_video_bus_stop_devices(video);

View File

@ -619,8 +619,10 @@ static int acpi_button_add(struct acpi_device *device)
input_set_drvdata(input, device);
error = input_register_device(input);
if (error)
if (error) {
input_free_device(input);
goto err_remove_fs;
}
switch (device->device_type) {
case ACPI_BUS_TYPE_POWER_BUTTON:

View File

@ -49,6 +49,7 @@ struct acpi_fan_fst {
};
struct acpi_fan {
acpi_handle handle;
bool acpi4;
bool has_fst;
struct acpi_fan_fif fif;
@ -59,14 +60,14 @@ struct acpi_fan {
struct device_attribute fine_grain_control;
};
int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst);
int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst);
int acpi_fan_create_attributes(struct acpi_device *device);
void acpi_fan_delete_attributes(struct acpi_device *device);
#if IS_REACHABLE(CONFIG_HWMON)
int devm_acpi_fan_create_hwmon(struct acpi_device *device);
int devm_acpi_fan_create_hwmon(struct device *dev);
#else
static inline int devm_acpi_fan_create_hwmon(struct acpi_device *device) { return 0; };
static inline int devm_acpi_fan_create_hwmon(struct device *dev) { return 0; };
#endif
#endif

View File

@ -55,7 +55,7 @@ static ssize_t show_fan_speed(struct device *dev, struct device_attribute *attr,
struct acpi_fan_fst fst;
int status;
status = acpi_fan_get_fst(acpi_dev, &fst);
status = acpi_fan_get_fst(acpi_dev->handle, &fst);
if (status)
return status;

View File

@ -44,25 +44,30 @@ static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
return 0;
}
int acpi_fan_get_fst(struct acpi_device *device, struct acpi_fan_fst *fst)
int acpi_fan_get_fst(acpi_handle handle, struct acpi_fan_fst *fst)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
acpi_status status;
int ret = 0;
status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
if (ACPI_FAILURE(status)) {
dev_err(&device->dev, "Get fan state failed\n");
return -ENODEV;
}
status = acpi_evaluate_object(handle, "_FST", NULL, &buffer);
if (ACPI_FAILURE(status))
return -EIO;
obj = buffer.pointer;
if (!obj || obj->type != ACPI_TYPE_PACKAGE ||
obj->package.count != 3 ||
obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
dev_err(&device->dev, "Invalid _FST data\n");
ret = -EINVAL;
if (!obj)
return -ENODATA;
if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) {
ret = -EPROTO;
goto err;
}
if (obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
obj->package.elements[1].type != ACPI_TYPE_INTEGER ||
obj->package.elements[2].type != ACPI_TYPE_INTEGER) {
ret = -EPROTO;
goto err;
}
@ -81,7 +86,7 @@ static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
struct acpi_fan_fst fst;
int status, i;
status = acpi_fan_get_fst(device, &fst);
status = acpi_fan_get_fst(device->handle, &fst);
if (status)
return status;
@ -311,11 +316,16 @@ static int acpi_fan_probe(struct platform_device *pdev)
struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
char *name;
if (!device)
return -ENODEV;
fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
if (!fan) {
dev_err(&device->dev, "No memory for fan\n");
return -ENOMEM;
}
fan->handle = device->handle;
device->driver_data = fan;
platform_set_drvdata(pdev, fan);
@ -337,7 +347,7 @@ static int acpi_fan_probe(struct platform_device *pdev)
}
if (fan->has_fst) {
result = devm_acpi_fan_create_hwmon(device);
result = devm_acpi_fan_create_hwmon(&pdev->dev);
if (result)
return result;

View File

@ -93,13 +93,12 @@ static umode_t acpi_fan_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_
static int acpi_fan_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
struct acpi_device *adev = to_acpi_device(dev->parent);
struct acpi_fan *fan = dev_get_drvdata(dev);
struct acpi_fan_fps *fps;
struct acpi_fan_fst fst;
int ret;
ret = acpi_fan_get_fst(adev, &fst);
ret = acpi_fan_get_fst(fan->handle, &fst);
if (ret < 0)
return ret;
@ -167,12 +166,12 @@ static const struct hwmon_chip_info acpi_fan_hwmon_chip_info = {
.info = acpi_fan_hwmon_info,
};
int devm_acpi_fan_create_hwmon(struct acpi_device *device)
int devm_acpi_fan_create_hwmon(struct device *dev)
{
struct acpi_fan *fan = acpi_driver_data(device);
struct acpi_fan *fan = dev_get_drvdata(dev);
struct device *hdev;
hdev = devm_hwmon_device_register_with_info(&device->dev, "acpi_fan", fan,
&acpi_fan_hwmon_chip_info, NULL);
hdev = devm_hwmon_device_register_with_info(dev, "acpi_fan", fan, &acpi_fan_hwmon_chip_info,
NULL);
return PTR_ERR_OR_ZERO(hdev);
}

View File

@ -155,7 +155,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
* Baud Rate field. If this field is zero or not present, Configured
* Baud Rate is used.
*/
if (table->precise_baudrate)
if (table->header.revision >= 4 && table->precise_baudrate)
baud_rate = table->precise_baudrate;
else switch (table->baud_rate) {
case 0:

View File

@ -48,8 +48,7 @@ struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
return __regmap_init(&slimbus->dev, bus, &slimbus->dev, config,
lock_key, lock_name);
return __regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_slimbus);
@ -63,8 +62,7 @@ struct regmap *__devm_regmap_init_slimbus(struct slim_device *slimbus,
if (IS_ERR(bus))
return ERR_CAST(bus);
return __devm_regmap_init(&slimbus->dev, bus, &slimbus, config,
lock_key, lock_name);
return __devm_regmap_init(&slimbus->dev, bus, slimbus, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_slimbus);

View File

@ -1949,6 +1949,7 @@ static int null_add_dev(struct nullb_device *dev)
.logical_block_size = dev->blocksize,
.physical_block_size = dev->blocksize,
.max_hw_sectors = dev->max_sectors,
.dma_alignment = dev->blocksize - 1,
};
struct nullb *nullb;

View File

@ -625,8 +625,10 @@ static int rtlbt_parse_firmware_v2(struct hci_dev *hdev,
len += entry->len;
}
if (!len)
if (!len) {
kvfree(ptr);
return -EPERM;
}
*_buf = ptr;
return len;

View File

@ -318,10 +318,13 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
/*
* Use a physical idle state, not busy polling, unless a timer
* is going to trigger soon enough.
* is going to trigger soon enough or the exit latency of the
* idle state in question is greater than the predicted idle
* duration.
*/
if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) &&
s->target_residency_ns <= data->next_timer_ns) {
s->target_residency_ns <= data->next_timer_ns &&
s->exit_latency_ns <= predicted_ns) {
predicted_ns = s->target_residency_ns;
idx = i;
break;

View File

@ -787,7 +787,6 @@ static int aspeed_acry_probe(struct platform_device *pdev)
err_engine_rsa_start:
crypto_engine_exit(acry_dev->crypt_engine_rsa);
clk_exit:
clk_disable_unprepare(acry_dev->clk);
return rc;
}
@ -799,7 +798,6 @@ static void aspeed_acry_remove(struct platform_device *pdev)
aspeed_acry_unregister(acry_dev);
crypto_engine_exit(acry_dev->crypt_engine_rsa);
tasklet_kill(&acry_dev->done_task);
clk_disable_unprepare(acry_dev->clk);
}
MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);

View File

@ -1141,7 +1141,7 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
"RCU protection is required for safe access to returned string");
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return fence->ops->get_driver_name(fence);
return fence->ops->get_timeline_name(fence);
else
return "signaled-timeline";
}

View File

@ -433,7 +433,7 @@ static void handle_error(struct mc_priv *priv, struct ecc_status *stat,
phys_addr_t pfn;
int err;
if (WARN_ON_ONCE(ctl_num > NUM_CONTROLLERS))
if (WARN_ON_ONCE(ctl_num >= NUM_CONTROLLERS))
return;
mci = priv->mci[ctl_num];

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: MIT
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*

View File

@ -322,6 +322,26 @@ static int vpe_early_init(struct amdgpu_ip_block *ip_block)
return 0;
}
static bool vpe_need_dpm0_at_power_down(struct amdgpu_device *adev)
{
switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
case IP_VERSION(6, 1, 1):
return adev->pm.fw_version < 0x0a640500;
default:
return false;
}
}
static int vpe_get_dpm_level(struct amdgpu_device *adev)
{
struct amdgpu_vpe *vpe = &adev->vpe;
if (!adev->pm.dpm_enabled)
return 0;
return RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_lv));
}
static void vpe_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
@ -329,11 +349,17 @@ static void vpe_idle_work_handler(struct work_struct *work)
unsigned int fences = 0;
fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
if (fences)
goto reschedule;
if (fences == 0)
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
else
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
if (vpe_need_dpm0_at_power_down(adev) && vpe_get_dpm_level(adev) != 0)
goto reschedule;
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
return;
reschedule:
schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
}
static int vpe_common_init(struct amdgpu_vpe *vpe)

View File

@ -1,4 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: MIT
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*

View File

@ -248,6 +248,8 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
struct vblank_control_work *vblank_work =
container_of(work, struct vblank_control_work, work);
struct amdgpu_display_manager *dm = vblank_work->dm;
struct amdgpu_device *adev = drm_to_adev(dm->ddev);
int r;
mutex_lock(&dm->dc_lock);
@ -277,7 +279,16 @@ static void amdgpu_dm_crtc_vblank_control_worker(struct work_struct *work)
if (dm->active_vblank_irq_count == 0) {
dc_post_update_surfaces_to_stream(dm->dc);
r = amdgpu_dpm_pause_power_profile(adev, true);
if (r)
dev_warn(adev->dev, "failed to set default power profile mode\n");
dc_allow_idle_optimizations(dm->dc, true);
r = amdgpu_dpm_pause_power_profile(adev, false);
if (r)
dev_warn(adev->dev, "failed to restore the power profile mode\n");
}
mutex_unlock(&dm->dc_lock);
@ -297,8 +308,12 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
int irq_type;
int rc = 0;
if (acrtc->otg_inst == -1)
goto skip;
if (enable && !acrtc->base.enabled) {
drm_dbg_vbl(crtc->dev,
"Reject vblank enable on unconfigured CRTC %d (enabled=%d)\n",
acrtc->crtc_id, acrtc->base.enabled);
return -EINVAL;
}
irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
@ -383,7 +398,7 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable)
return rc;
}
#endif
skip:
if (amdgpu_in_reset(adev))
return 0;

View File

@ -83,6 +83,7 @@ static void apply_edid_quirks(struct drm_device *dev, struct edid *edid, struct
edid_caps->panel_patch.remove_sink_ext_caps = true;
break;
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4154):
case drm_edid_encode_panel_id('S', 'D', 'C', 0x4171):
drm_dbg_driver(dev, "Disabling VSC on monitor with panel id %X\n", panel_id);
edid_caps->panel_patch.disable_colorimetry = true;
break;

View File

@ -578,9 +578,6 @@ static void dpp3_power_on_blnd_lut(
dpp_base->ctx->dc->optimized_required = true;
dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
}
} else {
REG_SET(CM_MEM_PWR_CTRL, 0,
BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
}
}

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: MIT */
/*
* Copyright 2025 Advanced Micro Devices, Inc.
*

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* SPDX-License-Identifier: MIT */
/*
* Copyright 2024 Advanced Micro Devices, Inc. All rights reserved.

View File

@ -2024,7 +2024,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/
table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
table->VRConfig = 0;

View File

@ -2028,7 +2028,7 @@ static int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->VoltageResponseTime = 0;
table->PhaseResponseTime = 0;
table->MemoryThermThrottleEnable = 1;
table->PCIeBootLinkLevel = 0;
table->PCIeBootLinkLevel = (uint8_t) (data->dpm_table.pcie_speed_table.count);
table->PCIeGenInterval = 1;
result = iceland_populate_smc_svi2_config(hwmgr, table);

View File

@ -969,7 +969,7 @@ int smu_cmn_update_table(struct smu_context *smu,
table_index);
uint32_t table_size;
int ret = 0;
if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
if (!table_data || table_index >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
table_size = smu_table->tables[table_index].size;

View File

@ -282,13 +282,13 @@ static inline void __ast_write8_i(void __iomem *addr, u32 reg, u8 index, u8 val)
__ast_write8(addr, reg + 1, val);
}
static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 read_mask,
static inline void __ast_write8_i_masked(void __iomem *addr, u32 reg, u8 index, u8 preserve_mask,
u8 val)
{
u8 tmp = __ast_read8_i_masked(addr, reg, index, read_mask);
u8 tmp = __ast_read8_i_masked(addr, reg, index, preserve_mask);
tmp |= val;
__ast_write8_i(addr, reg, index, tmp);
val &= ~preserve_mask;
__ast_write8_i(addr, reg, index, tmp | val);
}
static inline u32 ast_read32(struct ast_device *ast, u32 reg)

View File

@ -280,7 +280,7 @@ sanity:
GIT_STRATEGY: none
script:
# ci-fairy check-commits --junit-xml=check-commits.xml
- ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
# - ci-fairy check-merge-request --require-allow-collaboration --junit-xml=check-merge-request.xml
- |
set -eu
image_tags=(

View File

@ -310,8 +310,12 @@ EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
void __drm_gem_reset_shadow_plane(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state)
{
__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
if (shadow_plane_state) {
__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
drm_format_conv_state_init(&shadow_plane_state->fmtcnv_state);
} else {
__drm_atomic_helper_plane_reset(plane, NULL);
}
}
EXPORT_SYMBOL(__drm_gem_reset_shadow_plane);

View File

@ -347,7 +347,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state;
bool switch_mmu_context = gpu->mmu_context != mmu_context;
unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
unsigned int new_flush_seq = READ_ONCE(mmu_context->flush_seq);
bool need_flush = switch_mmu_context || gpu->flush_seq != new_flush_seq;
bool has_blt = !!(gpu->identity.minor_features5 &
chipMinorFeatures5_BLT_ENGINE);

View File

@ -546,6 +546,36 @@ static bool is_event_handler(struct intel_display *display,
REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;
}
static bool fixup_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg_ctl, u32 *data_ctl,
i915_reg_t reg_htp, u32 *data_htp)
{
if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl))
return false;
if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp))
return false;
/* make sure reg_ctl and reg_htp are for the same event */
if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) !=
i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0)))
return false;
/*
* On ADL-S the HRR event handler is not restored after DC6.
* Clear it to zero from the beginning to avoid mismatches later.
*/
if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN &&
is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) {
*data_ctl = 0;
*data_htp = 0;
return true;
}
return false;
}
static bool disable_dmc_evt(struct intel_display *display,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
@ -1064,9 +1094,32 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
}
for (i = 0; i < mmio_count - 1; i++) {
u32 orig_mmiodata[2] = {
dmc_info->mmiodata[i],
dmc_info->mmiodata[i+1],
};
if (!fixup_dmc_evt(display, dmc_id,
dmc_info->mmioaddr[i], &dmc_info->mmiodata[i],
dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1]))
continue;
drm_dbg_kms(display->drm,
" mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n",
i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]),
orig_mmiodata[0], dmc_info->mmiodata[i]);
drm_dbg_kms(display->drm,
" mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n",
i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]),
orig_mmiodata[1], dmc_info->mmiodata[i+1]);
}
for (i = 0; i < mmio_count; i++) {
drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
i, mmioaddr[i], mmiodata[i],
i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i],
is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i],

View File

@ -25,19 +25,18 @@
struct imx_parallel_display_encoder {
struct drm_encoder encoder;
struct drm_bridge bridge;
struct imx_parallel_display *pd;
};
struct imx_parallel_display {
struct device *dev;
u32 bus_format;
struct drm_bridge *next_bridge;
struct drm_bridge bridge;
};
static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
{
return container_of(b, struct imx_parallel_display_encoder, bridge)->pd;
return container_of(b, struct imx_parallel_display, bridge);
}
static const u32 imx_pd_bus_fmts[] = {
@ -195,15 +194,13 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(imxpd_encoder))
return PTR_ERR(imxpd_encoder);
imxpd_encoder->pd = imxpd;
encoder = &imxpd_encoder->encoder;
bridge = &imxpd_encoder->bridge;
bridge = &imxpd->bridge;
ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
if (ret)
return ret;
bridge->funcs = &imx_pd_bridge_funcs;
drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR);
connector = drm_bridge_connector_init(drm, encoder);
@ -228,9 +225,10 @@ static int imx_pd_probe(struct platform_device *pdev)
u32 bus_format = 0;
const char *fmt;
imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
if (!imxpd)
return -ENOMEM;
imxpd = devm_drm_bridge_alloc(dev, struct imx_parallel_display, bridge,
&imx_pd_bridge_funcs);
if (IS_ERR(imxpd))
return PTR_ERR(imxpd);
/* port@1 is the output port */
imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0);
@ -258,6 +256,8 @@ static int imx_pd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imxpd);
devm_drm_bridge_add(dev, &imxpd->bridge);
return component_add(dev, &imx_pd_ops);
}

View File

@ -686,10 +686,6 @@ static int mtk_drm_bind(struct device *dev)
for (i = 0; i < private->data->mmsys_dev_num; i++)
private->all_drm_private[i]->drm = NULL;
err_put_dev:
for (i = 0; i < private->data->mmsys_dev_num; i++) {
/* For device_find_child in mtk_drm_get_all_priv() */
put_device(private->all_drm_private[i]->dev);
}
put_device(private->mutex_dev);
return ret;
}
@ -697,18 +693,12 @@ static int mtk_drm_bind(struct device *dev)
static void mtk_drm_unbind(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
int i;
/* for multi mmsys dev, unregister drm dev in mmsys master */
if (private->drm_master) {
drm_dev_unregister(private->drm);
mtk_drm_kms_deinit(private->drm);
drm_dev_put(private->drm);
for (i = 0; i < private->data->mmsys_dev_num; i++) {
/* For device_find_child in mtk_drm_get_all_priv() */
put_device(private->all_drm_private[i]->dev);
}
put_device(private->mutex_dev);
}
private->mtk_drm_bound = false;

View File

@ -780,6 +780,9 @@ static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
return true;
}
#define NEXT_BLK(blk) \
((const struct block_header *)((const char *)(blk) + sizeof(*(blk)) + (blk)->size))
static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
@ -811,7 +814,7 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
for (blk = (const struct block_header *) fw_image->data;
(const u8*) blk < fw_image->data + fw_image->size;
blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
blk = NEXT_BLK(blk)) {
if (blk->size == 0)
continue;

View File

@ -348,13 +348,6 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags,
return 0;
}
static bool
adreno_smmu_has_prr(struct msm_gpu *gpu)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
return adreno_smmu && adreno_smmu->set_prr_addr;
}
int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx,
uint32_t param, uint64_t *value, uint32_t *len)
{

View File

@ -1545,6 +1545,9 @@ static enum drm_mode_status dpu_crtc_mode_valid(struct drm_crtc *crtc,
adjusted_mode_clk = dpu_core_perf_adjusted_mode_clk(mode->clock,
dpu_kms->perf.perf_cfg);
if (dpu_kms->catalog->caps->has_3d_merge)
adjusted_mode_clk /= 2;
/*
* The given mode, adjusted for the perf clock factor, should not exceed
* the max core clock rate

View File

@ -267,8 +267,8 @@ static const u32 wb2_formats_rgb_yuv[] = {
.base = 0x200, .len = 0xa0,}, \
.csc_blk = {.name = "csc", \
.base = 0x320, .len = 0x100,}, \
.format_list = plane_formats_yuv, \
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
.format_list = plane_formats, \
.num_formats = ARRAY_SIZE(plane_formats), \
.rotation_cfg = NULL, \
}

View File

@ -500,13 +500,15 @@ static void _dpu_plane_setup_pixel_ext(struct dpu_hw_scaler3_cfg *scale_cfg,
int i;
for (i = 0; i < DPU_MAX_PLANES; i++) {
uint32_t w = src_w, h = src_h;
if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
src_w /= chroma_subsmpl_h;
src_h /= chroma_subsmpl_v;
w /= chroma_subsmpl_h;
h /= chroma_subsmpl_v;
}
pixel_ext->num_ext_pxls_top[i] = src_h;
pixel_ext->num_ext_pxls_left[i] = src_w;
pixel_ext->num_ext_pxls_top[i] = h;
pixel_ext->num_ext_pxls_left[i] = w;
}
}
@ -740,7 +742,7 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
* We already have verified scaling against platform limitations.
* Now check if the SSPP supports scaling at all.
*/
if (!sblk->scaler_blk.len &&
if (!(sblk->scaler_blk.len && pipe->sspp->ops.setup_scaler) &&
((drm_rect_width(&new_plane_state->src) >> 16 !=
drm_rect_width(&new_plane_state->dst)) ||
(drm_rect_height(&new_plane_state->src) >> 16 !=
@ -1278,7 +1280,7 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
state, plane_state,
prev_adjacent_plane_state);
if (ret)
break;
return ret;
prev_adjacent_plane_state = plane_state;
}

View File

@ -842,7 +842,7 @@ struct dpu_hw_sspp *dpu_rm_reserve_sspp(struct dpu_rm *rm,
if (!reqs->scale && !reqs->yuv)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_DMA);
if (!hw_sspp && reqs->scale)
if (!hw_sspp && !reqs->yuv)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_RGB);
if (!hw_sspp)
hw_sspp = dpu_rm_try_sspp(rm, global_state, crtc, reqs, SSPP_TYPE_VIG);

View File

@ -72,6 +72,9 @@ static int dpu_wb_conn_atomic_check(struct drm_connector *connector,
DPU_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
fb->width, dpu_wb_conn->maxlinewidth);
return -EINVAL;
} else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
DPU_ERROR("unsupported fb modifier:%#llx\n", fb->modifier);
return -EINVAL;
}
return drm_atomic_helper_check_wb_connector_state(conn_state->connector, conn_state->state);

View File

@ -109,7 +109,6 @@ struct msm_dsi_phy {
struct msm_dsi_dphy_timing timing;
const struct msm_dsi_phy_cfg *cfg;
void *tuning_cfg;
void *pll_data;
enum msm_dsi_phy_usecase usecase;
bool regulator_ldo_mode;

View File

@ -426,11 +426,8 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
u32 data;
spin_lock_irqsave(&pll->pll_enable_lock, flags);
if (pll->pll_enable_cnt++) {
spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
WARN_ON(pll->pll_enable_cnt == INT_MAX);
return;
}
pll->pll_enable_cnt++;
WARN_ON(pll->pll_enable_cnt == INT_MAX);
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_0);
data |= DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
@ -876,7 +873,6 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
spin_lock_init(&pll_7nm->pll_enable_lock);
pll_7nm->phy = phy;
phy->pll_data = pll_7nm;
ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws);
if (ret) {
@ -965,10 +961,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
u32 const delay_us = 5;
u32 const timeout_us = 1000;
struct msm_dsi_dphy_timing *timing = &phy->timing;
struct dsi_pll_7nm *pll = phy->pll_data;
void __iomem *base = phy->base;
bool less_than_1500_mhz;
unsigned long flags;
u32 vreg_ctrl_0, vreg_ctrl_1, lane_ctrl0;
u32 glbl_pemph_ctrl_0;
u32 glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
@ -1090,13 +1084,10 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
glbl_rescode_bot_ctrl = 0x3c;
}
spin_lock_irqsave(&pll->pll_enable_lock, flags);
pll->pll_enable_cnt = 1;
/* de-assert digital and pll power down */
data = DSI_7nm_PHY_CMN_CTRL_0_DIGTOP_PWRDN_B |
DSI_7nm_PHY_CMN_CTRL_0_PLL_SHUTDOWNB;
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
/* Assert PLL core reset */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
@ -1209,9 +1200,7 @@ static bool dsi_7nm_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll = phy->pll_data;
void __iomem *base = phy->base;
unsigned long flags;
u32 data;
DBG("");
@ -1238,11 +1227,8 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
writel(0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0);
spin_lock_irqsave(&pll->pll_enable_lock, flags);
pll->pll_enable_cnt = 0;
/* Turn off all PHY blocks */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
spin_unlock_irqrestore(&pll->pll_enable_lock, flags);
/* make sure phy is turned off */
wmb();

View File

@ -1120,12 +1120,16 @@ static void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
}
if (obj->resv != &obj->_resv) {
/*
* In error paths, we could end up here before msm_gem_new_handle()
* has changed obj->resv to point to the shared resv. In this case,
* we don't want to drop a ref to the shared r_obj that we haven't
* taken yet.
*/
if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) {
struct drm_gem_object *r_obj =
container_of(obj->resv, struct drm_gem_object, _resv);
WARN_ON(!(msm_obj->flags & MSM_BO_NO_SHARE));
/* Drop reference we hold to shared resv obj: */
drm_gem_object_put(r_obj);
}

View File

@ -414,6 +414,11 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
submit->user_fence,
DMA_RESV_USAGE_BOOKKEEP,
DMA_RESV_USAGE_BOOKKEEP);
last_fence = vm->last_fence;
vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
dma_fence_put(last_fence);
return;
}
@ -427,10 +432,6 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit)
dma_resv_add_fence(obj->resv, submit->user_fence,
DMA_RESV_USAGE_READ);
}
last_fence = vm->last_fence;
vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence);
dma_fence_put(last_fence);
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,

View File

@ -971,6 +971,7 @@ static int
lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
{
struct drm_device *dev = job->vm->drm;
struct msm_drm_private *priv = dev->dev_private;
int i = job->nr_ops++;
int ret = 0;
@ -1017,6 +1018,11 @@ lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
break;
}
if ((op->op == MSM_VM_BIND_OP_MAP_NULL) &&
!adreno_smmu_has_prr(priv->gpu)) {
ret = UERR(EINVAL, dev, "PRR not supported\n");
}
return ret;
}
@ -1421,7 +1427,7 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
* Maybe we could allow just UNMAP ops? OTOH userspace should just
* immediately close the device file and all will be torn down.
*/
if (to_msm_vm(ctx->vm)->unusable)
if (to_msm_vm(msm_context_vm(dev, ctx))->unusable)
return UERR(EPIPE, dev, "context is unusable");
/*

View File

@ -299,6 +299,17 @@ static inline struct msm_gpu *dev_to_gpu(struct device *dev)
return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
}
static inline bool
adreno_smmu_has_prr(struct msm_gpu *gpu)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
if (!adreno_smmu)
return false;
return adreno_smmu && adreno_smmu->set_prr_addr;
}
/* It turns out that all targets use the same ringbuffer size */
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
#define MSM_GPU_RINGBUFFER_BLKSIZE 32

View File

@ -338,6 +338,8 @@ msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_preall
ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
if (ret != p->count) {
kfree(p->pages);
p->pages = NULL;
p->count = ret;
return -ENOMEM;
}
@ -351,6 +353,9 @@ msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_preallo
struct kmem_cache *pt_cache = get_pt_cache(mmu);
uint32_t remaining_pt_count = p->count - p->ptr;
if (!p->pages)
return;
if (p->count > 0)
trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);

View File

@ -482,6 +482,17 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm,
return 0;
}
static bool
nouveau_sched_job_list_empty(struct nouveau_sched *sched)
{
bool empty;
spin_lock(&sched->job.list.lock);
empty = list_empty(&sched->job.list.head);
spin_unlock(&sched->job.list.lock);
return empty;
}
static void
nouveau_sched_fini(struct nouveau_sched *sched)
@ -489,8 +500,7 @@ nouveau_sched_fini(struct nouveau_sched *sched)
struct drm_gpu_scheduler *drm_sched = &sched->base;
struct drm_sched_entity *entity = &sched->entity;
rmb(); /* for list_empty to work without lock */
wait_event(sched->job.wq, list_empty(&sched->job.list.head));
wait_event(sched->job.wq, nouveau_sched_job_list_empty(sched));
drm_sched_entity_fini(entity);
drm_sched_fini(drm_sched);

View File

@ -359,7 +359,7 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM;
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base,
&kingdisplay_panel_funcs,

View File

@ -249,6 +249,11 @@ static const struct drm_display_mode default_mode = {
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
/*
* The mode data for this panel has been reverse engineered without access
* to the panel datasheet / manual. Using DRM_MODE_FLAG_PHSYNC like all
* other panels results in garbage data on the display.
*/
static const struct drm_display_mode t28cp45tn89_mode = {
.clock = 6008,
.hdisplay = 240,
@ -261,7 +266,7 @@ static const struct drm_display_mode t28cp45tn89_mode = {
.vtotal = 320 + 8 + 4 + 4,
.width_mm = 43,
.height_mm = 57,
.flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC,
};
static const struct drm_display_mode et028013dma_mode = {

View File

@ -314,17 +314,17 @@ static int radeon_pci_probe(struct pci_dev *pdev,
ret = pci_enable_device(pdev);
if (ret)
goto err_free;
return ret;
pci_set_drvdata(pdev, ddev);
ret = radeon_driver_load_kms(ddev, flags);
if (ret)
goto err_agp;
goto err;
ret = drm_dev_register(ddev, flags);
if (ret)
goto err_agp;
goto err;
if (rdev->mc.real_vram_size <= (8 * 1024 * 1024))
format = drm_format_info(DRM_FORMAT_C8);
@ -337,30 +337,14 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return 0;
err_agp:
err:
pci_disable_device(pdev);
err_free:
drm_dev_put(ddev);
return ret;
}
static void
radeon_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_put_dev(dev);
}
static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown
*/
if (radeon_device_is_virtual())
radeon_pci_remove(pdev);
#if defined(CONFIG_PPC64) || defined(CONFIG_MACH_LOONGSON64)
/*
* Some adapters need to be suspended before a
@ -613,7 +597,6 @@ static struct pci_driver radeon_kms_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = radeon_pci_probe,
.remove = radeon_pci_remove,
.shutdown = radeon_pci_shutdown,
.driver.pm = &radeon_pm_ops,
};

View File

@ -84,7 +84,6 @@ void radeon_driver_unload_kms(struct drm_device *dev)
rdev->agp = NULL;
done_free:
kfree(rdev);
dev->dev_private = NULL;
}

View File

@ -70,6 +70,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->guilty = guilty;
entity->num_sched_list = num_sched_list;
entity->priority = priority;
entity->last_user = current->group_leader;
/*
* It's perfectly valid to initialize an entity without having a valid
* scheduler attached. It's just not valid to use the scheduler before it
@ -302,7 +303,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
/* For a killed process disallow further enqueueing of jobs. */
last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
if ((!last_user || last_user == current->group_leader) &&
if (last_user == current->group_leader &&
(current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
drm_sched_entity_kill(entity);
@ -552,10 +553,11 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
}
spin_unlock(&entity->lock);
if (entity->num_sched_list == 1)
entity->sched_list = NULL;
spin_unlock(&entity->lock);
}
/**

View File

@ -813,12 +813,16 @@ static int gt_reset(struct xe_gt *gt)
unsigned int fw_ref;
int err;
if (xe_device_wedged(gt_to_xe(gt)))
return -ECANCELED;
if (xe_device_wedged(gt_to_xe(gt))) {
err = -ECANCELED;
goto err_pm_put;
}
/* We only support GT resets with GuC submission */
if (!xe_device_uc_enabled(gt_to_xe(gt)))
return -ENODEV;
if (!xe_device_uc_enabled(gt_to_xe(gt))) {
err = -ENODEV;
goto err_pm_put;
}
xe_gt_info(gt, "reset started\n");
@ -826,8 +830,6 @@ static int gt_reset(struct xe_gt *gt)
if (!err)
xe_gt_warn(gt, "reset block failed to get lifted");
xe_pm_runtime_get(gt_to_xe(gt));
if (xe_fault_inject_gt_reset()) {
err = -ECANCELED;
goto err_fail;
@ -874,6 +876,7 @@ static int gt_reset(struct xe_gt *gt)
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
xe_device_declare_wedged(gt_to_xe(gt));
err_pm_put:
xe_pm_runtime_put(gt_to_xe(gt));
return err;
@ -895,7 +898,9 @@ void xe_gt_reset_async(struct xe_gt *gt)
return;
xe_gt_info(gt, "reset queued\n");
queue_work(gt->ordered_wq, &gt->reset.worker);
xe_pm_runtime_get_noresume(gt_to_xe(gt));
if (!queue_work(gt->ordered_wq, &gt->reset.worker))
xe_pm_runtime_put(gt_to_xe(gt));
}
void xe_gt_suspend_prepare(struct xe_gt *gt)

View File

@ -166,10 +166,10 @@ xe_validation_device_init(struct xe_validation_device *val)
*/
DEFINE_CLASS(xe_validation, struct xe_validation_ctx *,
if (_T) xe_validation_ctx_fini(_T);,
({_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags);
_ret ? NULL : _ctx; }),
({*_ret = xe_validation_ctx_init(_ctx, _val, _exec, _flags);
*_ret ? NULL : _ctx; }),
struct xe_validation_ctx *_ctx, struct xe_validation_device *_val,
struct drm_exec *_exec, const struct xe_val_flags _flags, int _ret);
struct drm_exec *_exec, const struct xe_val_flags _flags, int *_ret);
static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)
{return *_T; }
#define class_xe_validation_is_conditional true
@ -186,7 +186,7 @@ static inline void *class_xe_validation_lock_ptr(class_xe_validation_t *_T)
* exhaustive eviction.
*/
#define xe_validation_guard(_ctx, _val, _exec, _flags, _ret) \
scoped_guard(xe_validation, _ctx, _val, _exec, _flags, _ret) \
scoped_guard(xe_validation, _ctx, _val, _exec, _flags, &_ret) \
drm_exec_until_all_locked(_exec)
#endif

View File

@ -206,6 +206,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
return ret;
err_free:
ib_umem_release(umem);
rdma_restrack_put(&cq->res);
kfree(cq);
err_event_file:

View File

@ -913,7 +913,7 @@ void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
}
static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
static void bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
{
struct bnxt_re_qp *gsi_sqp;
struct bnxt_re_ah *gsi_sah;
@ -933,10 +933,9 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
if (rc) {
if (rc)
ibdev_err(&rdev->ibdev, "Destroy Shadow QP failed");
goto fail;
}
bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
/* remove from active qp list */
@ -951,10 +950,6 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
rdev->gsi_ctx.gsi_sqp = NULL;
rdev->gsi_ctx.gsi_sah = NULL;
rdev->gsi_ctx.sqp_tbl = NULL;
return 0;
fail:
return rc;
}
static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev)

View File

@ -1216,13 +1216,13 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (umem->length < cq->size) {
ibdev_dbg(&dev->ibdev, "External memory too small\n");
err = -EINVAL;
goto err_free_mem;
goto err_out;
}
if (!ib_umem_is_contiguous(umem)) {
ibdev_dbg(&dev->ibdev, "Non contiguous CQ unsupported\n");
err = -EINVAL;
goto err_free_mem;
goto err_out;
}
cq->cpu_addr = NULL;
@ -1251,7 +1251,7 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
err = efa_com_create_cq(&dev->edev, &params, &result);
if (err)
goto err_free_mem;
goto err_free_mapped;
resp.db_off = result.db_off;
resp.cq_idx = result.cq_idx;
@ -1299,12 +1299,10 @@ int efa_create_cq_umem(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
efa_cq_user_mmap_entries_remove(cq);
err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx);
err_free_mem:
if (umem)
ib_umem_release(umem);
else
efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE);
err_free_mapped:
if (!umem)
efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
DMA_FROM_DEVICE);
err_out:
atomic64_inc(&dev->stats.create_cq_err);
return err;

View File

@ -30,6 +30,7 @@
* SOFTWARE.
*/
#include <linux/pci.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
#include "hns_roce_device.h"
@ -37,6 +38,43 @@
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
{
struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
return;
mutex_lock(&cq_table->bank_mutex);
cq_table->ctx_num[uctx->cq_bank_id]--;
mutex_unlock(&cq_table->bank_mutex);
}
void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx)
{
struct hns_roce_dev *hr_dev = to_hr_dev(uctx->ibucontext.device);
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
u32 least_load = cq_table->ctx_num[0];
u8 bankid = 0;
u8 i;
if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
return;
mutex_lock(&cq_table->bank_mutex);
for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
if (cq_table->ctx_num[i] < least_load) {
least_load = cq_table->ctx_num[i];
bankid = i;
}
}
cq_table->ctx_num[bankid]++;
mutex_unlock(&cq_table->bank_mutex);
uctx->cq_bank_id = bankid;
}
static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
{
u32 least_load = bank[0].inuse;
@ -55,7 +93,21 @@ static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
return bankid;
}
static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
static u8 select_cq_bankid(struct hns_roce_dev *hr_dev,
struct hns_roce_bank *bank, struct ib_udata *udata)
{
struct hns_roce_ucontext *uctx = udata ?
rdma_udata_to_drv_context(udata, struct hns_roce_ucontext,
ibucontext) : NULL;
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
return uctx ? uctx->cq_bank_id : 0;
return get_least_load_bankid_for_cq(bank);
}
static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
struct ib_udata *udata)
{
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct hns_roce_bank *bank;
@ -63,7 +115,7 @@ static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
int id;
mutex_lock(&cq_table->bank_mutex);
bankid = get_least_load_bankid_for_cq(cq_table->bank);
bankid = select_cq_bankid(hr_dev, cq_table->bank, udata);
bank = &cq_table->bank[bankid];
id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
@ -396,7 +448,7 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
goto err_cq_buf;
}
ret = alloc_cqn(hr_dev, hr_cq);
ret = alloc_cqn(hr_dev, hr_cq, udata);
if (ret) {
ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
goto err_cq_db;

View File

@ -217,6 +217,7 @@ struct hns_roce_ucontext {
struct mutex page_mutex;
struct hns_user_mmap_entry *db_mmap_entry;
u32 config;
u8 cq_bank_id;
};
struct hns_roce_pd {
@ -495,6 +496,7 @@ struct hns_roce_cq_table {
struct hns_roce_hem_table table;
struct hns_roce_bank bank[HNS_ROCE_CQ_BANK_NUM];
struct mutex bank_mutex;
u32 ctx_num[HNS_ROCE_CQ_BANK_NUM];
};
struct hns_roce_srq_table {
@ -1305,5 +1307,7 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
size_t length,
enum hns_roce_mmap_type mmap_type);
bool check_sl_valid(struct hns_roce_dev *hr_dev, u8 sl);
void hns_roce_put_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
void hns_roce_get_cq_bankid_for_uctx(struct hns_roce_ucontext *uctx);
#endif /* _HNS_ROCE_DEVICE_H */

View File

@ -165,6 +165,8 @@ static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
hr_reg_write(fseg, FRMR_PBL_BUF_PG_SZ,
to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift));
hr_reg_clear(fseg, FRMR_BLK_MODE);
hr_reg_clear(fseg, FRMR_BLOCK_SIZE);
hr_reg_clear(fseg, FRMR_ZBVA);
}
static void set_atomic_seg(const struct ib_send_wr *wr,
@ -339,9 +341,6 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
int j = 0;
int i;
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
(*sge_ind) & (qp->sge.sge_cnt - 1));
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_INLINE,
!!(wr->send_flags & IB_SEND_INLINE));
if (wr->send_flags & IB_SEND_INLINE)
@ -586,6 +585,9 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_CQE,
(wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
hr_reg_write(rc_sq_wqe, RC_SEND_WQE_MSG_START_SGE_IDX,
curr_idx & (qp->sge.sge_cnt - 1));
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
if (msg_len != ATOMIC_WR_LEN)
@ -734,6 +736,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
owner_bit =
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
/* RC and UD share the same DirectWQE field layout */
((struct hns_roce_v2_rc_send_wqe *)wqe)->byte_4 = 0;
/* Corresponding to the QP type, wqe process separately */
if (ibqp->qp_type == IB_QPT_RC)
ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
@ -7048,7 +7053,6 @@ static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
goto error_failed_roce_init;
}
handle->priv = hr_dev;
return 0;

View File

@ -425,6 +425,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
if (ret)
goto error_fail_copy_to_udata;
hns_roce_get_cq_bankid_for_uctx(context);
return 0;
error_fail_copy_to_udata:
@ -447,6 +449,8 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
hns_roce_put_cq_bankid_for_uctx(context);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB)
mutex_destroy(&context->page_mutex);

View File

@ -662,7 +662,6 @@ static int set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
hr_qp->sq.wqe_cnt = cnt;
cap->max_send_sge = hr_qp->sq.max_gs;
return 0;
}
@ -744,7 +743,6 @@ static int set_kernel_sq_size(struct hns_roce_dev *hr_dev,
/* sync the parameters of kernel QP to user's configuration */
cap->max_send_wr = cnt;
cap->max_send_sge = hr_qp->sq.max_gs;
return 0;
}

View File

@ -71,7 +71,7 @@ int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct sd_pd_idx *idx)
{
idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
idx->sd_idx = pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
}

View File

@ -706,7 +706,7 @@ struct irdma_sc_dev {
u32 vchnl_ver;
u16 num_vfs;
u16 hmc_fn_id;
u8 vf_id;
u16 vf_id;
bool privileged:1;
bool vchnl_up:1;
bool ceq_valid:1;

View File

@ -2503,6 +2503,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
spin_lock_init(&iwcq->lock);
INIT_LIST_HEAD(&iwcq->resize_list);
INIT_LIST_HEAD(&iwcq->cmpl_generated);
iwcq->cq_num = cq_num;
info.dev = dev;
ukinfo->cq_size = max(entries, 4);
ukinfo->cq_id = cq_num;

View File

@ -140,7 +140,7 @@ struct irdma_srq {
struct irdma_cq {
struct ib_cq ibcq;
struct irdma_sc_cq sc_cq;
u16 cq_num;
u32 cq_num;
bool user_mode;
atomic_t armed;
enum irdma_cmpl_notify last_notify;

View File

@ -1904,13 +1904,13 @@ setup_instance(struct hfcsusb *hw, struct device *parent)
mISDN_freebchannel(&hw->bch[1]);
mISDN_freebchannel(&hw->bch[0]);
mISDN_freedchannel(&hw->dch);
kfree(hw);
return err;
}
static int
hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
int err;
struct hfcsusb *hw;
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_host_interface *iface = intf->cur_altsetting;
@ -2101,20 +2101,28 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (!hw->ctrl_urb) {
pr_warn("%s: No memory for control urb\n",
driver_info->vend_name);
kfree(hw);
return -ENOMEM;
err = -ENOMEM;
goto err_free_hw;
}
pr_info("%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
hw->name, __func__, driver_info->vend_name,
conf_str[small_match], ifnum, alt_used);
if (setup_instance(hw, dev->dev.parent))
return -EIO;
if (setup_instance(hw, dev->dev.parent)) {
err = -EIO;
goto err_free_urb;
}
hw->intf = intf;
usb_set_intfdata(hw->intf, hw);
return 0;
err_free_urb:
usb_free_urb(hw->ctrl_urb);
err_free_hw:
kfree(hw);
return err;
}
/* function called when an active device is removed */

View File

@ -1010,6 +1010,11 @@ int vb2_ioctl_remove_bufs(struct file *file, void *priv,
if (vb2_queue_is_busy(vdev->queue, file))
return -EBUSY;
if (vb2_fileio_is_active(vdev->queue)) {
dprintk(vdev->queue, 1, "file io in progress\n");
return -EBUSY;
}
return vb2_core_remove_bufs(vdev->queue, d->index, d->count);
}
EXPORT_SYMBOL_GPL(vb2_ioctl_remove_bufs);

View File

@ -1136,11 +1136,8 @@ int cx18_init_on_first_open(struct cx18 *cx)
int video_input;
int fw_retry_count = 3;
struct v4l2_frequency vf;
struct cx18_open_id fh;
v4l2_std_id std;
fh.cx = cx;
if (test_bit(CX18_F_I_FAILED, &cx->i_flags))
return -ENXIO;
@ -1220,14 +1217,14 @@ int cx18_init_on_first_open(struct cx18 *cx)
video_input = cx->active_input;
cx->active_input++; /* Force update of input */
cx18_s_input(NULL, &fh, video_input);
cx18_do_s_input(cx, video_input);
/* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
in one place. */
cx->std++; /* Force full standard initialization */
std = (cx->tuner_std == V4L2_STD_ALL) ? V4L2_STD_NTSC_M : cx->tuner_std;
cx18_s_std(NULL, &fh, std);
cx18_s_frequency(NULL, &fh, &vf);
cx18_do_s_std(cx, std);
cx18_do_s_frequency(cx, &vf);
return 0;
}

Some files were not shown because too many files have changed in this diff Show More