mirror of https://github.com/torvalds/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR (net-6.17-rc3). No conflicts or adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
a9af709fda
|
|
@ -731,7 +731,7 @@ Contact: linux-block@vger.kernel.org
|
|||
Description:
|
||||
[RW] If the device is registered for writeback throttling, then
|
||||
this file shows the target minimum read latency. If this latency
|
||||
is exceeded in a given window of time (see wb_window_usec), then
|
||||
is exceeded in a given window of time (see curr_win_nsec), then
|
||||
the writeback throttling will start scaling back writes. Writing
|
||||
a value of '0' to this file disables the feature. Writing a
|
||||
value of '-1' to this file resets the value to the default
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ zone_capacity_mb Device zone capacity (must always be equal to or lower than
|
|||
the zone size. Default: zone size.
|
||||
conv_zones Total number of conventioanl zones starting from sector 0.
|
||||
Default: 8.
|
||||
base_dir Path to the base directoy where to create the directory
|
||||
base_dir Path to the base directory where to create the directory
|
||||
containing the zone files of the device.
|
||||
Default=/var/local/zloop.
|
||||
The device directory containing the zone files is always
|
||||
|
|
|
|||
|
|
@ -214,7 +214,7 @@ Spectre_v1 X
|
|||
Spectre_v2 X X
|
||||
Spectre_v2_user X X * (Note 1)
|
||||
SRBDS X X X X
|
||||
SRSO X X
|
||||
SRSO X X X X
|
||||
SSB (Note 4)
|
||||
TAA X X X X * (Note 2)
|
||||
TSA X X X X
|
||||
|
|
|
|||
|
|
@ -76,20 +76,21 @@ unit as preprocessor statement. The above example would then read::
|
|||
within the corresponding compilation unit before the #include for
|
||||
<linux/export.h>. Typically it's placed before the first #include statement.
|
||||
|
||||
Using the EXPORT_SYMBOL_GPL_FOR_MODULES() macro
|
||||
-----------------------------------------------
|
||||
Using the EXPORT_SYMBOL_FOR_MODULES() macro
|
||||
-------------------------------------------
|
||||
|
||||
Symbols exported using this macro are put into a module namespace. This
|
||||
namespace cannot be imported.
|
||||
namespace cannot be imported. These exports are GPL-only as they are only
|
||||
intended for in-tree modules.
|
||||
|
||||
The macro takes a comma separated list of module names, allowing only those
|
||||
modules to access this symbol. Simple tail-globs are supported.
|
||||
|
||||
For example::
|
||||
|
||||
EXPORT_SYMBOL_GPL_FOR_MODULES(preempt_notifier_inc, "kvm,kvm-*")
|
||||
EXPORT_SYMBOL_FOR_MODULES(preempt_notifier_inc, "kvm,kvm-*")
|
||||
|
||||
will limit usage of this symbol to modules whoes name matches the given
|
||||
will limit usage of this symbol to modules whose name matches the given
|
||||
patterns.
|
||||
|
||||
How to use Symbols exported in Namespaces
|
||||
|
|
|
|||
|
|
@ -12,6 +12,8 @@ add_addr_timeout - INTEGER (seconds)
|
|||
resent to an MPTCP peer that has not acknowledged a previous
|
||||
ADD_ADDR message.
|
||||
|
||||
Do not retransmit if set to 0.
|
||||
|
||||
The default value matches TCP_RTO_MAX. This is a per-namespace
|
||||
sysctl.
|
||||
|
||||
|
|
|
|||
|
|
@ -22183,7 +22183,7 @@ F: arch/s390/mm
|
|||
|
||||
S390 NETWORK DRIVERS
|
||||
M: Alexandra Winter <wintera@linux.ibm.com>
|
||||
M: Thorsten Winkler <twinkler@linux.ibm.com>
|
||||
R: Aswin Karuvally <aswin@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -2,7 +2,7 @@
|
|||
VERSION = 6
|
||||
PATCHLEVEL = 17
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
|||
|
|
@ -102,7 +102,13 @@ KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)
|
|||
|
||||
ifdef CONFIG_OBJTOOL
|
||||
ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP
|
||||
# The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled.
|
||||
# Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to
|
||||
# be passed via '-mllvm' to ld.lld.
|
||||
KBUILD_CFLAGS += -mannotate-tablejump
|
||||
ifdef CONFIG_LTO_CLANG
|
||||
KBUILD_LDFLAGS += -mllvm --loongarch-annotate-tablejump
|
||||
endif
|
||||
else
|
||||
KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers
|
||||
endif
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@
|
|||
.endm
|
||||
|
||||
.macro STACKLEAK_ERASE
|
||||
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
|
||||
#ifdef CONFIG_KSTACK_ERASE
|
||||
bl stackleak_erase_on_task_stack
|
||||
#endif
|
||||
.endm
|
||||
|
|
|
|||
|
|
@ -0,0 +1,8 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
|
||||
#ifndef _UAPI_ASM_LOONGARCH_SETUP_H
|
||||
#define _UAPI_ASM_LOONGARCH_SETUP_H
|
||||
|
||||
#define COMMAND_LINE_SIZE 4096
|
||||
|
||||
#endif /* _UAPI_ASM_LOONGARCH_SETUP_H */
|
||||
|
|
@ -8,6 +8,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/moduleloader.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
|
||||
{
|
||||
|
|
@ -61,39 +62,38 @@ Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr v
|
|||
return (Elf_Addr)&plt[nr];
|
||||
}
|
||||
|
||||
static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
|
||||
#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))
|
||||
|
||||
static int compare_rela(const void *x, const void *y)
|
||||
{
|
||||
return x->r_info == y->r_info && x->r_addend == y->r_addend;
|
||||
}
|
||||
int ret;
|
||||
const Elf_Rela *rela_x = x, *rela_y = y;
|
||||
|
||||
static bool duplicate_rela(const Elf_Rela *rela, int idx)
|
||||
{
|
||||
int i;
|
||||
ret = cmp_3way(rela_x->r_info, rela_y->r_info);
|
||||
if (ret == 0)
|
||||
ret = cmp_3way(rela_x->r_addend, rela_y->r_addend);
|
||||
|
||||
for (i = 0; i < idx; i++) {
|
||||
if (is_rela_equal(&rela[i], &rela[idx]))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void count_max_entries(Elf_Rela *relas, int num,
|
||||
unsigned int *plts, unsigned int *gots)
|
||||
{
|
||||
unsigned int i, type;
|
||||
unsigned int i;
|
||||
|
||||
sort(relas, num, sizeof(Elf_Rela), compare_rela, NULL);
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
type = ELF_R_TYPE(relas[i].r_info);
|
||||
switch (type) {
|
||||
if (i && !compare_rela(&relas[i-1], &relas[i]))
|
||||
continue;
|
||||
|
||||
switch (ELF_R_TYPE(relas[i].r_info)) {
|
||||
case R_LARCH_SOP_PUSH_PLT_PCREL:
|
||||
case R_LARCH_B26:
|
||||
if (!duplicate_rela(relas, i))
|
||||
(*plts)++;
|
||||
(*plts)++;
|
||||
break;
|
||||
case R_LARCH_GOT_PC_HI20:
|
||||
if (!duplicate_rela(relas, i))
|
||||
(*gots)++;
|
||||
(*gots)++;
|
||||
break;
|
||||
default:
|
||||
break; /* Do nothing. */
|
||||
|
|
|
|||
|
|
@ -677,6 +677,11 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
|||
for (i = 1; i < 32; i++)
|
||||
err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_LBT
|
||||
if (extctx->lbt.addr)
|
||||
err |= protected_save_lbt_context(extctx);
|
||||
#endif
|
||||
|
||||
if (extctx->lasx.addr)
|
||||
err |= protected_save_lasx_context(extctx);
|
||||
else if (extctx->lsx.addr)
|
||||
|
|
@ -684,11 +689,6 @@ static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
|
|||
else if (extctx->fpu.addr)
|
||||
err |= protected_save_fpu_context(extctx);
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_LBT
|
||||
if (extctx->lbt.addr)
|
||||
err |= protected_save_lbt_context(extctx);
|
||||
#endif
|
||||
|
||||
/* Set the "end" magic */
|
||||
info = (struct sctx_info *)extctx->end.addr;
|
||||
err |= __put_user(0, &info->magic);
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@
|
|||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/cpuhotplug.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/init.h>
|
||||
|
|
@ -102,6 +103,23 @@ static int constant_timer_next_event(unsigned long delta, struct clock_event_dev
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int arch_timer_starting(unsigned int cpu)
|
||||
{
|
||||
set_csr_ecfg(ECFGF_TIMER);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int arch_timer_dying(unsigned int cpu)
|
||||
{
|
||||
constant_set_state_shutdown(this_cpu_ptr(&constant_clockevent_device));
|
||||
|
||||
/* Clear Timer Interrupt */
|
||||
write_csr_tintclear(CSR_TINTCLR_TI);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long get_loops_per_jiffy(void)
|
||||
{
|
||||
unsigned long lpj = (unsigned long)const_clock_freq;
|
||||
|
|
@ -172,6 +190,10 @@ int constant_clockevent_init(void)
|
|||
lpj_fine = get_loops_per_jiffy();
|
||||
pr_info("Constant clock event device register\n");
|
||||
|
||||
cpuhp_setup_state(CPUHP_AP_LOONGARCH_ARCH_TIMER_STARTING,
|
||||
"clockevents/loongarch/timer:starting",
|
||||
arch_timer_starting, arch_timer_dying);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -45,7 +45,12 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
|
|||
}
|
||||
|
||||
cpu = s->sw_coremap[irq];
|
||||
vcpu = kvm_get_vcpu(s->kvm, cpu);
|
||||
vcpu = kvm_get_vcpu_by_id(s->kvm, cpu);
|
||||
if (unlikely(vcpu == NULL)) {
|
||||
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
if (level) {
|
||||
/* if not enable return false */
|
||||
if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
|
||||
|
|
|
|||
|
|
@ -99,7 +99,7 @@ static void write_mailbox(struct kvm_vcpu *vcpu, int offset, uint64_t data, int
|
|||
static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
||||
{
|
||||
int i, idx, ret;
|
||||
uint32_t val = 0, mask = 0;
|
||||
uint64_t val = 0, mask = 0;
|
||||
|
||||
/*
|
||||
* Bit 27-30 is mask for byte writing.
|
||||
|
|
@ -108,7 +108,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
|||
if ((data >> 27) & 0xf) {
|
||||
/* Read the old val */
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
||||
ret = kvm_io_bus_read(vcpu, KVM_IOCSR_BUS, addr, 4, &val);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (unlikely(ret)) {
|
||||
kvm_err("%s: : read data from addr %llx failed\n", __func__, addr);
|
||||
|
|
@ -124,7 +124,7 @@ static int send_ipi_data(struct kvm_vcpu *vcpu, gpa_t addr, uint64_t data)
|
|||
}
|
||||
val |= ((uint32_t)(data >> 32) & ~mask);
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, sizeof(val), &val);
|
||||
ret = kvm_io_bus_write(vcpu, KVM_IOCSR_BUS, addr, 4, &val);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
if (unlikely(ret))
|
||||
kvm_err("%s: : write data to addr %llx failed\n", __func__, addr);
|
||||
|
|
@ -298,7 +298,7 @@ static int kvm_ipi_regs_access(struct kvm_device *dev,
|
|||
cpu = (attr->attr >> 16) & 0x3ff;
|
||||
addr = attr->attr & 0xff;
|
||||
|
||||
vcpu = kvm_get_vcpu(dev->kvm, cpu);
|
||||
vcpu = kvm_get_vcpu_by_id(dev->kvm, cpu);
|
||||
if (unlikely(vcpu == NULL)) {
|
||||
kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
|
||||
return -EINVAL;
|
||||
|
|
|
|||
|
|
@ -195,6 +195,11 @@ static int kvm_pch_pic_read(struct kvm_vcpu *vcpu,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (addr & (len - 1)) {
|
||||
kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* statistics of pch pic reading */
|
||||
vcpu->stat.pch_pic_read_exits++;
|
||||
ret = loongarch_pch_pic_read(s, addr, len, val);
|
||||
|
|
@ -302,6 +307,11 @@ static int kvm_pch_pic_write(struct kvm_vcpu *vcpu,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (addr & (len - 1)) {
|
||||
kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* statistics of pch pic writing */
|
||||
vcpu->stat.pch_pic_write_exits++;
|
||||
ret = loongarch_pch_pic_write(s, addr, len, val);
|
||||
|
|
|
|||
|
|
@ -1283,9 +1283,11 @@ int kvm_own_lbt(struct kvm_vcpu *vcpu)
|
|||
return -EINVAL;
|
||||
|
||||
preempt_disable();
|
||||
set_csr_euen(CSR_EUEN_LBTEN);
|
||||
_restore_lbt(&vcpu->arch.lbt);
|
||||
vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
|
||||
if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) {
|
||||
set_csr_euen(CSR_EUEN_LBTEN);
|
||||
_restore_lbt(&vcpu->arch.lbt);
|
||||
vcpu->arch.aux_inuse |= KVM_LARCH_LBT;
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -785,6 +785,7 @@ static void __head svsm_pval_4k_page(unsigned long paddr, bool validate)
|
|||
pc->entry[0].page_size = RMP_PG_SIZE_4K;
|
||||
pc->entry[0].action = validate;
|
||||
pc->entry[0].ignore_cf = 0;
|
||||
pc->entry[0].rsvd = 0;
|
||||
pc->entry[0].pfn = paddr >> PAGE_SHIFT;
|
||||
|
||||
/* Protocol 0, Call ID 1 */
|
||||
|
|
|
|||
|
|
@ -227,6 +227,7 @@ static u64 svsm_build_ca_from_pfn_range(u64 pfn, u64 pfn_end, bool action,
|
|||
pe->page_size = RMP_PG_SIZE_4K;
|
||||
pe->action = action;
|
||||
pe->ignore_cf = 0;
|
||||
pe->rsvd = 0;
|
||||
pe->pfn = pfn;
|
||||
|
||||
pe++;
|
||||
|
|
@ -257,6 +258,7 @@ static int svsm_build_ca_from_psc_desc(struct snp_psc_desc *desc, unsigned int d
|
|||
pe->page_size = e->pagesize ? RMP_PG_SIZE_2M : RMP_PG_SIZE_4K;
|
||||
pe->action = e->operation == SNP_PAGE_STATE_PRIVATE;
|
||||
pe->ignore_cf = 0;
|
||||
pe->rsvd = 0;
|
||||
pe->pfn = e->gfn;
|
||||
|
||||
pe++;
|
||||
|
|
|
|||
|
|
@ -371,29 +371,30 @@ static enum es_result __vc_handle_msr_caa(struct pt_regs *regs, bool write)
|
|||
* executing with Secure TSC enabled, so special handling is required for
|
||||
* accesses of MSR_IA32_TSC and MSR_AMD64_GUEST_TSC_FREQ.
|
||||
*/
|
||||
static enum es_result __vc_handle_secure_tsc_msrs(struct pt_regs *regs, bool write)
|
||||
static enum es_result __vc_handle_secure_tsc_msrs(struct es_em_ctxt *ctxt, bool write)
|
||||
{
|
||||
struct pt_regs *regs = ctxt->regs;
|
||||
u64 tsc;
|
||||
|
||||
/*
|
||||
* GUEST_TSC_FREQ should not be intercepted when Secure TSC is enabled.
|
||||
* Terminate the SNP guest when the interception is enabled.
|
||||
* Writing to MSR_IA32_TSC can cause subsequent reads of the TSC to
|
||||
* return undefined values, and GUEST_TSC_FREQ is read-only. Generate
|
||||
* a #GP on all writes.
|
||||
*/
|
||||
if (write) {
|
||||
ctxt->fi.vector = X86_TRAP_GP;
|
||||
ctxt->fi.error_code = 0;
|
||||
return ES_EXCEPTION;
|
||||
}
|
||||
|
||||
/*
|
||||
* GUEST_TSC_FREQ read should not be intercepted when Secure TSC is
|
||||
* enabled. Terminate the guest if a read is attempted.
|
||||
*/
|
||||
if (regs->cx == MSR_AMD64_GUEST_TSC_FREQ)
|
||||
return ES_VMM_ERROR;
|
||||
|
||||
/*
|
||||
* Writes: Writing to MSR_IA32_TSC can cause subsequent reads of the TSC
|
||||
* to return undefined values, so ignore all writes.
|
||||
*
|
||||
* Reads: Reads of MSR_IA32_TSC should return the current TSC value, use
|
||||
* the value returned by rdtsc_ordered().
|
||||
*/
|
||||
if (write) {
|
||||
WARN_ONCE(1, "TSC MSR writes are verboten!\n");
|
||||
return ES_OK;
|
||||
}
|
||||
|
||||
/* Reads of MSR_IA32_TSC should return the current TSC value. */
|
||||
tsc = rdtsc_ordered();
|
||||
regs->ax = lower_32_bits(tsc);
|
||||
regs->dx = upper_32_bits(tsc);
|
||||
|
|
@ -416,7 +417,7 @@ static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
|
|||
case MSR_IA32_TSC:
|
||||
case MSR_AMD64_GUEST_TSC_FREQ:
|
||||
if (sev_status & MSR_AMD64_SNP_SECURE_TSC)
|
||||
return __vc_handle_secure_tsc_msrs(regs, write);
|
||||
return __vc_handle_secure_tsc_msrs(ctxt, write);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
|||
|
|
@ -1,8 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
|
||||
#ifndef _ASM_X86_CPUID_H
|
||||
#define _ASM_X86_CPUID_H
|
||||
|
||||
#include <asm/cpuid/api.h>
|
||||
|
||||
#endif /* _ASM_X86_CPUID_H */
|
||||
|
|
@ -386,7 +386,6 @@ static bool __init should_mitigate_vuln(unsigned int bug)
|
|||
|
||||
case X86_BUG_SPECTRE_V2:
|
||||
case X86_BUG_RETBLEED:
|
||||
case X86_BUG_SRSO:
|
||||
case X86_BUG_L1TF:
|
||||
case X86_BUG_ITS:
|
||||
return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) ||
|
||||
|
|
@ -3184,8 +3183,18 @@ static void __init srso_select_mitigation(void)
|
|||
}
|
||||
|
||||
if (srso_mitigation == SRSO_MITIGATION_AUTO) {
|
||||
if (should_mitigate_vuln(X86_BUG_SRSO)) {
|
||||
/*
|
||||
* Use safe-RET if user->kernel or guest->host protection is
|
||||
* required. Otherwise the 'microcode' mitigation is sufficient
|
||||
* to protect the user->user and guest->guest vectors.
|
||||
*/
|
||||
if (cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_HOST) ||
|
||||
(cpu_attack_vector_mitigated(CPU_MITIGATE_USER_KERNEL) &&
|
||||
!boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))) {
|
||||
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
|
||||
} else if (cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
|
||||
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST)) {
|
||||
srso_mitigation = SRSO_MITIGATION_MICROCODE;
|
||||
} else {
|
||||
srso_mitigation = SRSO_MITIGATION_NONE;
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -1881,19 +1881,20 @@ long fpu_xstate_prctl(int option, unsigned long arg2)
|
|||
#ifdef CONFIG_PROC_PID_ARCH_STATUS
|
||||
/*
|
||||
* Report the amount of time elapsed in millisecond since last AVX512
|
||||
* use in the task.
|
||||
* use in the task. Report -1 if no AVX-512 usage.
|
||||
*/
|
||||
static void avx512_status(struct seq_file *m, struct task_struct *task)
|
||||
{
|
||||
unsigned long timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);
|
||||
long delta;
|
||||
unsigned long timestamp;
|
||||
long delta = -1;
|
||||
|
||||
if (!timestamp) {
|
||||
/*
|
||||
* Report -1 if no AVX512 usage
|
||||
*/
|
||||
delta = -1;
|
||||
} else {
|
||||
/* AVX-512 usage is not tracked for kernel threads. Don't report anything. */
|
||||
if (task->flags & (PF_KTHREAD | PF_USER_WORKER))
|
||||
return;
|
||||
|
||||
timestamp = READ_ONCE(x86_task_fpu(task)->avx512_timestamp);
|
||||
|
||||
if (timestamp) {
|
||||
delta = (long)(jiffies - timestamp);
|
||||
/*
|
||||
* Cap to LONG_MAX if time difference > LONG_MAX
|
||||
|
|
|
|||
|
|
@ -5847,8 +5847,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
|
|||
goto out;
|
||||
}
|
||||
|
||||
bfqq = kmem_cache_alloc_node(bfq_pool,
|
||||
GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
|
||||
bfqq = kmem_cache_alloc_node(bfq_pool, GFP_NOWAIT | __GFP_ZERO,
|
||||
bfqd->queue->node);
|
||||
|
||||
if (bfqq) {
|
||||
|
|
|
|||
|
|
@ -394,7 +394,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
|
|||
|
||||
/* allocate */
|
||||
if (!new_blkg) {
|
||||
new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN);
|
||||
new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT);
|
||||
if (unlikely(!new_blkg)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_put_css;
|
||||
|
|
@ -1467,7 +1467,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
|
|||
|
||||
spin_lock_init(&blkcg->lock);
|
||||
refcount_set(&blkcg->online_pin, 1);
|
||||
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
|
||||
INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
|
||||
INIT_HLIST_HEAD(&blkcg->blkg_list);
|
||||
#ifdef CONFIG_CGROUP_WRITEBACK
|
||||
INIT_LIST_HEAD(&blkcg->cgwb_list);
|
||||
|
|
@ -1630,7 +1630,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
|
|||
pd_prealloc = NULL;
|
||||
} else {
|
||||
pd = pol->pd_alloc_fn(disk, blkg->blkcg,
|
||||
GFP_NOWAIT | __GFP_NOWARN);
|
||||
GFP_NOWAIT);
|
||||
}
|
||||
|
||||
if (!pd) {
|
||||
|
|
|
|||
|
|
@ -847,7 +847,7 @@ static void blk_queue_release(struct kobject *kobj)
|
|||
/* nothing to do here, all data is associated with the parent gendisk */
|
||||
}
|
||||
|
||||
static const struct kobj_type blk_queue_ktype = {
|
||||
const struct kobj_type blk_queue_ktype = {
|
||||
.default_groups = blk_queue_attr_groups,
|
||||
.sysfs_ops = &queue_sysfs_ops,
|
||||
.release = blk_queue_release,
|
||||
|
|
@ -875,15 +875,14 @@ int blk_register_queue(struct gendisk *disk)
|
|||
struct request_queue *q = disk->queue;
|
||||
int ret;
|
||||
|
||||
kobject_init(&disk->queue_kobj, &blk_queue_ktype);
|
||||
ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
|
||||
if (ret < 0)
|
||||
goto out_put_queue_kobj;
|
||||
return ret;
|
||||
|
||||
if (queue_is_mq(q)) {
|
||||
ret = blk_mq_sysfs_register(disk);
|
||||
if (ret)
|
||||
goto out_put_queue_kobj;
|
||||
goto out_del_queue_kobj;
|
||||
}
|
||||
mutex_lock(&q->sysfs_lock);
|
||||
|
||||
|
|
@ -903,9 +902,9 @@ int blk_register_queue(struct gendisk *disk)
|
|||
|
||||
if (queue_is_mq(q))
|
||||
elevator_set_default(q);
|
||||
wbt_enable_default(disk);
|
||||
|
||||
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
|
||||
wbt_enable_default(disk);
|
||||
|
||||
/* Now everything is ready and send out KOBJ_ADD uevent */
|
||||
kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
|
||||
|
|
@ -934,8 +933,8 @@ int blk_register_queue(struct gendisk *disk)
|
|||
mutex_unlock(&q->sysfs_lock);
|
||||
if (queue_is_mq(q))
|
||||
blk_mq_sysfs_unregister(disk);
|
||||
out_put_queue_kobj:
|
||||
kobject_put(&disk->queue_kobj);
|
||||
out_del_queue_kobj:
|
||||
kobject_del(&disk->queue_kobj);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
@ -986,5 +985,4 @@ void blk_unregister_queue(struct gendisk *disk)
|
|||
elevator_set_none(q);
|
||||
|
||||
blk_debugfs_remove(disk);
|
||||
kobject_put(&disk->queue_kobj);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -85,8 +85,8 @@ struct rq_wb {
|
|||
u64 sync_issue;
|
||||
void *sync_cookie;
|
||||
|
||||
unsigned long last_issue; /* last non-throttled issue */
|
||||
unsigned long last_comp; /* last non-throttled comp */
|
||||
unsigned long last_issue; /* issue time of last read rq */
|
||||
unsigned long last_comp; /* completion time of last read rq */
|
||||
unsigned long min_lat_nsec;
|
||||
struct rq_qos rqos;
|
||||
struct rq_wait rq_wait[WBT_NUM_RWQ];
|
||||
|
|
@ -248,13 +248,14 @@ static void wbt_done(struct rq_qos *rqos, struct request *rq)
|
|||
struct rq_wb *rwb = RQWB(rqos);
|
||||
|
||||
if (!wbt_is_tracked(rq)) {
|
||||
if (rwb->sync_cookie == rq) {
|
||||
rwb->sync_issue = 0;
|
||||
rwb->sync_cookie = NULL;
|
||||
}
|
||||
if (wbt_is_read(rq)) {
|
||||
if (rwb->sync_cookie == rq) {
|
||||
rwb->sync_issue = 0;
|
||||
rwb->sync_cookie = NULL;
|
||||
}
|
||||
|
||||
if (wbt_is_read(rq))
|
||||
wb_timestamp(rwb, &rwb->last_comp);
|
||||
}
|
||||
} else {
|
||||
WARN_ON_ONCE(rq == rwb->sync_cookie);
|
||||
__wbt_done(rqos, wbt_flags(rq));
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ struct elevator_tags;
|
|||
/* Max future timer expiry for timeouts */
|
||||
#define BLK_MAX_TIMEOUT (5 * HZ)
|
||||
|
||||
extern const struct kobj_type blk_queue_ktype;
|
||||
extern struct dentry *blk_debugfs_root;
|
||||
|
||||
struct blk_flush_queue {
|
||||
|
|
|
|||
|
|
@ -1303,6 +1303,7 @@ static void disk_release(struct device *dev)
|
|||
disk_free_zone_resources(disk);
|
||||
xa_destroy(&disk->part_tbl);
|
||||
|
||||
kobject_put(&disk->queue_kobj);
|
||||
disk->queue->disk = NULL;
|
||||
blk_put_queue(disk->queue);
|
||||
|
||||
|
|
@ -1486,6 +1487,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
|
|||
INIT_LIST_HEAD(&disk->slave_bdevs);
|
||||
#endif
|
||||
mutex_init(&disk->rqos_state_mutex);
|
||||
kobject_init(&disk->queue_kobj, &blk_queue_ktype);
|
||||
return disk;
|
||||
|
||||
out_erase_part0:
|
||||
|
|
|
|||
|
|
@ -2033,7 +2033,7 @@ void __init acpi_ec_ecdt_probe(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!strstarts(ecdt_ptr->id, "\\")) {
|
||||
if (!strlen(ecdt_ptr->id)) {
|
||||
/*
|
||||
* The ECDT table on some MSI notebooks contains invalid data, together
|
||||
* with an empty ID string ("").
|
||||
|
|
@ -2042,9 +2042,13 @@ void __init acpi_ec_ecdt_probe(void)
|
|||
* a "fully qualified reference to the (...) embedded controller device",
|
||||
* so this string always has to start with a backslash.
|
||||
*
|
||||
* By verifying this we can avoid such faulty ECDT tables in a safe way.
|
||||
* However some ThinkBook machines have a ECDT table with a valid EC
|
||||
* description but an invalid ID string ("_SB.PC00.LPCB.EC0").
|
||||
*
|
||||
* Because of this we only check if the ID string is empty in order to
|
||||
* avoid the obvious cases.
|
||||
*/
|
||||
pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id);
|
||||
pr_err(FW_BUG "Ignoring ECDT due to empty ID string\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -180,7 +180,7 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
|
|||
struct acpi_processor *pr = per_cpu(processors, cpu);
|
||||
int ret;
|
||||
|
||||
if (!pr || !pr->performance)
|
||||
if (!pr)
|
||||
continue;
|
||||
|
||||
/*
|
||||
|
|
@ -197,6 +197,9 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
|
|||
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
|
||||
cpu, ret);
|
||||
|
||||
if (!pr->performance)
|
||||
continue;
|
||||
|
||||
ret = acpi_processor_get_platform_limit(pr);
|
||||
if (ret)
|
||||
pr_err("Failed to update freq constraint for CPU%d (%d)\n",
|
||||
|
|
|
|||
|
|
@ -2075,7 +2075,7 @@ static void ata_eh_get_success_sense(struct ata_link *link)
|
|||
* Check if a link is established. This is a relaxed version of
|
||||
* ata_phys_link_online() which accounts for the fact that this is potentially
|
||||
* called after changing the link power management policy, which may not be
|
||||
* reflected immediately in the SSTAUS register (e.g., we may still be seeing
|
||||
* reflected immediately in the SStatus register (e.g., we may still be seeing
|
||||
* the PHY in partial, slumber or devsleep Partial power management state.
|
||||
* So check that:
|
||||
* - A device is still present, that is, DET is 1h (Device presence detected
|
||||
|
|
@ -2089,8 +2089,13 @@ static bool ata_eh_link_established(struct ata_link *link)
|
|||
u32 sstatus;
|
||||
u8 det, ipm;
|
||||
|
||||
/*
|
||||
* For old IDE/PATA adapters that do not have a valid scr_read method,
|
||||
* or if reading the SStatus register fails, assume that the device is
|
||||
* present. Device probe will determine if that is really the case.
|
||||
*/
|
||||
if (sata_scr_read(link, SCR_STATUS, &sstatus))
|
||||
return false;
|
||||
return true;
|
||||
|
||||
det = sstatus & 0x0f;
|
||||
ipm = (sstatus >> 8) & 0x0f;
|
||||
|
|
|
|||
|
|
@ -3904,21 +3904,16 @@ static int ata_mselect_control_ata_feature(struct ata_queued_cmd *qc,
|
|||
/* Check cdl_ctrl */
|
||||
switch (buf[0] & 0x03) {
|
||||
case 0:
|
||||
/* Disable CDL if it is enabled */
|
||||
if (!(dev->flags & ATA_DFLAG_CDL_ENABLED))
|
||||
return 0;
|
||||
/* Disable CDL */
|
||||
ata_dev_dbg(dev, "Disabling CDL\n");
|
||||
cdl_action = 0;
|
||||
dev->flags &= ~ATA_DFLAG_CDL_ENABLED;
|
||||
break;
|
||||
case 0x02:
|
||||
/*
|
||||
* Enable CDL if not already enabled. Since this is mutually
|
||||
* exclusive with NCQ priority, allow this only if NCQ priority
|
||||
* is disabled.
|
||||
* Enable CDL. Since CDL is mutually exclusive with NCQ
|
||||
* priority, allow this only if NCQ priority is disabled.
|
||||
*/
|
||||
if (dev->flags & ATA_DFLAG_CDL_ENABLED)
|
||||
return 0;
|
||||
if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED) {
|
||||
ata_dev_err(dev,
|
||||
"NCQ priority must be disabled to enable CDL\n");
|
||||
|
|
|
|||
|
|
@ -380,6 +380,9 @@ enum {
|
|||
/* this is/was a write request */
|
||||
__EE_WRITE,
|
||||
|
||||
/* hand back using mempool_free(e, drbd_buffer_page_pool) */
|
||||
__EE_RELEASE_TO_MEMPOOL,
|
||||
|
||||
/* this is/was a write same request */
|
||||
__EE_WRITE_SAME,
|
||||
|
||||
|
|
@ -402,6 +405,7 @@ enum {
|
|||
#define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
|
||||
#define EE_SUBMITTED (1<<__EE_SUBMITTED)
|
||||
#define EE_WRITE (1<<__EE_WRITE)
|
||||
#define EE_RELEASE_TO_MEMPOOL (1<<__EE_RELEASE_TO_MEMPOOL)
|
||||
#define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
|
||||
#define EE_APPLICATION (1<<__EE_APPLICATION)
|
||||
#define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
|
||||
|
|
@ -858,7 +862,6 @@ struct drbd_device {
|
|||
struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
|
||||
struct list_head done_ee; /* need to send P_WRITE_ACK */
|
||||
struct list_head read_ee; /* [RS]P_DATA_REQUEST being read */
|
||||
struct list_head net_ee; /* zero-copy network send in progress */
|
||||
|
||||
struct list_head resync_reads;
|
||||
atomic_t pp_in_use; /* allocated from page pool */
|
||||
|
|
@ -1329,24 +1332,6 @@ extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
|
|||
extern mempool_t drbd_request_mempool;
|
||||
extern mempool_t drbd_ee_mempool;
|
||||
|
||||
/* drbd's page pool, used to buffer data received from the peer,
|
||||
* or data requested by the peer.
|
||||
*
|
||||
* This does not have an emergency reserve.
|
||||
*
|
||||
* When allocating from this pool, it first takes pages from the pool.
|
||||
* Only if the pool is depleted will try to allocate from the system.
|
||||
*
|
||||
* The assumption is that pages taken from this pool will be processed,
|
||||
* and given back, "quickly", and then can be recycled, so we can avoid
|
||||
* frequent calls to alloc_page(), and still will be able to make progress even
|
||||
* under memory pressure.
|
||||
*/
|
||||
extern struct page *drbd_pp_pool;
|
||||
extern spinlock_t drbd_pp_lock;
|
||||
extern int drbd_pp_vacant;
|
||||
extern wait_queue_head_t drbd_pp_wait;
|
||||
|
||||
/* We also need a standard (emergency-reserve backed) page pool
|
||||
* for meta data IO (activity log, bitmap).
|
||||
* We can keep it global, as long as it is used as "N pages at a time".
|
||||
|
|
@ -1354,6 +1339,7 @@ extern wait_queue_head_t drbd_pp_wait;
|
|||
*/
|
||||
#define DRBD_MIN_POOL_PAGES 128
|
||||
extern mempool_t drbd_md_io_page_pool;
|
||||
extern mempool_t drbd_buffer_page_pool;
|
||||
|
||||
/* We also need to make sure we get a bio
|
||||
* when we need it for housekeeping purposes */
|
||||
|
|
@ -1488,10 +1474,7 @@ extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *,
|
|||
sector_t, unsigned int,
|
||||
unsigned int,
|
||||
gfp_t) __must_hold(local);
|
||||
extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
|
||||
int);
|
||||
#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
|
||||
#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
|
||||
extern void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *req);
|
||||
extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
|
||||
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
|
||||
extern int drbd_connected(struct drbd_peer_device *);
|
||||
|
|
@ -1610,16 +1593,6 @@ static inline struct page *page_chain_next(struct page *page)
|
|||
for (; page && ({ n = page_chain_next(page); 1; }); page = n)
|
||||
|
||||
|
||||
static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
|
||||
{
|
||||
struct page *page = peer_req->pages;
|
||||
page_chain_for_each(page) {
|
||||
if (page_count(page) > 1)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline union drbd_state drbd_read_state(struct drbd_device *device)
|
||||
{
|
||||
struct drbd_resource *resource = device->resource;
|
||||
|
|
|
|||
|
|
@ -114,20 +114,10 @@ struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
|
|||
mempool_t drbd_request_mempool;
|
||||
mempool_t drbd_ee_mempool;
|
||||
mempool_t drbd_md_io_page_pool;
|
||||
mempool_t drbd_buffer_page_pool;
|
||||
struct bio_set drbd_md_io_bio_set;
|
||||
struct bio_set drbd_io_bio_set;
|
||||
|
||||
/* I do not use a standard mempool, because:
|
||||
1) I want to hand out the pre-allocated objects first.
|
||||
2) I want to be able to interrupt sleeping allocation with a signal.
|
||||
Note: This is a single linked list, the next pointer is the private
|
||||
member of struct page.
|
||||
*/
|
||||
struct page *drbd_pp_pool;
|
||||
DEFINE_SPINLOCK(drbd_pp_lock);
|
||||
int drbd_pp_vacant;
|
||||
wait_queue_head_t drbd_pp_wait;
|
||||
|
||||
DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
|
||||
|
||||
static const struct block_device_operations drbd_ops = {
|
||||
|
|
@ -1611,6 +1601,7 @@ static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *b
|
|||
static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
|
||||
struct drbd_peer_request *peer_req)
|
||||
{
|
||||
bool use_sendpage = !(peer_req->flags & EE_RELEASE_TO_MEMPOOL);
|
||||
struct page *page = peer_req->pages;
|
||||
unsigned len = peer_req->i.size;
|
||||
int err;
|
||||
|
|
@ -1619,8 +1610,13 @@ static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
|
|||
page_chain_for_each(page) {
|
||||
unsigned l = min_t(unsigned, len, PAGE_SIZE);
|
||||
|
||||
err = _drbd_send_page(peer_device, page, 0, l,
|
||||
page_chain_next(page) ? MSG_MORE : 0);
|
||||
if (likely(use_sendpage))
|
||||
err = _drbd_send_page(peer_device, page, 0, l,
|
||||
page_chain_next(page) ? MSG_MORE : 0);
|
||||
else
|
||||
err = _drbd_no_send_page(peer_device, page, 0, l,
|
||||
page_chain_next(page) ? MSG_MORE : 0);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
len -= l;
|
||||
|
|
@ -1962,7 +1958,6 @@ void drbd_init_set_defaults(struct drbd_device *device)
|
|||
INIT_LIST_HEAD(&device->sync_ee);
|
||||
INIT_LIST_HEAD(&device->done_ee);
|
||||
INIT_LIST_HEAD(&device->read_ee);
|
||||
INIT_LIST_HEAD(&device->net_ee);
|
||||
INIT_LIST_HEAD(&device->resync_reads);
|
||||
INIT_LIST_HEAD(&device->resync_work.list);
|
||||
INIT_LIST_HEAD(&device->unplug_work.list);
|
||||
|
|
@ -2043,7 +2038,6 @@ void drbd_device_cleanup(struct drbd_device *device)
|
|||
D_ASSERT(device, list_empty(&device->sync_ee));
|
||||
D_ASSERT(device, list_empty(&device->done_ee));
|
||||
D_ASSERT(device, list_empty(&device->read_ee));
|
||||
D_ASSERT(device, list_empty(&device->net_ee));
|
||||
D_ASSERT(device, list_empty(&device->resync_reads));
|
||||
D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
|
||||
D_ASSERT(device, list_empty(&device->resync_work.list));
|
||||
|
|
@ -2055,19 +2049,11 @@ void drbd_device_cleanup(struct drbd_device *device)
|
|||
|
||||
static void drbd_destroy_mempools(void)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
while (drbd_pp_pool) {
|
||||
page = drbd_pp_pool;
|
||||
drbd_pp_pool = (struct page *)page_private(page);
|
||||
__free_page(page);
|
||||
drbd_pp_vacant--;
|
||||
}
|
||||
|
||||
/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
|
||||
|
||||
bioset_exit(&drbd_io_bio_set);
|
||||
bioset_exit(&drbd_md_io_bio_set);
|
||||
mempool_exit(&drbd_buffer_page_pool);
|
||||
mempool_exit(&drbd_md_io_page_pool);
|
||||
mempool_exit(&drbd_ee_mempool);
|
||||
mempool_exit(&drbd_request_mempool);
|
||||
|
|
@ -2086,9 +2072,8 @@ static void drbd_destroy_mempools(void)
|
|||
|
||||
static int drbd_create_mempools(void)
|
||||
{
|
||||
struct page *page;
|
||||
const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
|
||||
int i, ret;
|
||||
int ret;
|
||||
|
||||
/* caches */
|
||||
drbd_request_cache = kmem_cache_create(
|
||||
|
|
@ -2125,6 +2110,10 @@ static int drbd_create_mempools(void)
|
|||
if (ret)
|
||||
goto Enomem;
|
||||
|
||||
ret = mempool_init_page_pool(&drbd_buffer_page_pool, number, 0);
|
||||
if (ret)
|
||||
goto Enomem;
|
||||
|
||||
ret = mempool_init_slab_pool(&drbd_request_mempool, number,
|
||||
drbd_request_cache);
|
||||
if (ret)
|
||||
|
|
@ -2134,15 +2123,6 @@ static int drbd_create_mempools(void)
|
|||
if (ret)
|
||||
goto Enomem;
|
||||
|
||||
for (i = 0; i < number; i++) {
|
||||
page = alloc_page(GFP_HIGHUSER);
|
||||
if (!page)
|
||||
goto Enomem;
|
||||
set_page_private(page, (unsigned long)drbd_pp_pool);
|
||||
drbd_pp_pool = page;
|
||||
}
|
||||
drbd_pp_vacant = number;
|
||||
|
||||
return 0;
|
||||
|
||||
Enomem:
|
||||
|
|
@ -2169,10 +2149,6 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
|
|||
rr = drbd_free_peer_reqs(device, &device->done_ee);
|
||||
if (rr)
|
||||
drbd_err(device, "%d EEs in done list found!\n", rr);
|
||||
|
||||
rr = drbd_free_peer_reqs(device, &device->net_ee);
|
||||
if (rr)
|
||||
drbd_err(device, "%d EEs in net list found!\n", rr);
|
||||
}
|
||||
|
||||
/* caution. no locking. */
|
||||
|
|
@ -2863,11 +2839,6 @@ static int __init drbd_init(void)
|
|||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* allocate all necessary structs
|
||||
*/
|
||||
init_waitqueue_head(&drbd_pp_wait);
|
||||
|
||||
drbd_proc = NULL; /* play safe for drbd_cleanup */
|
||||
idr_init(&drbd_devices);
|
||||
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/part_stat.h>
|
||||
#include <linux/mempool.h>
|
||||
#include "drbd_int.h"
|
||||
#include "drbd_protocol.h"
|
||||
#include "drbd_req.h"
|
||||
|
|
@ -63,182 +64,31 @@ static int e_end_block(struct drbd_work *, int);
|
|||
|
||||
#define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
|
||||
|
||||
/*
|
||||
* some helper functions to deal with single linked page lists,
|
||||
* page->private being our "next" pointer.
|
||||
*/
|
||||
|
||||
/* If at least n pages are linked at head, get n pages off.
|
||||
* Otherwise, don't modify head, and return NULL.
|
||||
* Locking is the responsibility of the caller.
|
||||
*/
|
||||
static struct page *page_chain_del(struct page **head, int n)
|
||||
{
|
||||
struct page *page;
|
||||
struct page *tmp;
|
||||
|
||||
BUG_ON(!n);
|
||||
BUG_ON(!head);
|
||||
|
||||
page = *head;
|
||||
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
while (page) {
|
||||
tmp = page_chain_next(page);
|
||||
if (--n == 0)
|
||||
break; /* found sufficient pages */
|
||||
if (tmp == NULL)
|
||||
/* insufficient pages, don't use any of them. */
|
||||
return NULL;
|
||||
page = tmp;
|
||||
}
|
||||
|
||||
/* add end of list marker for the returned list */
|
||||
set_page_private(page, 0);
|
||||
/* actual return value, and adjustment of head */
|
||||
page = *head;
|
||||
*head = tmp;
|
||||
return page;
|
||||
}
|
||||
|
||||
/* may be used outside of locks to find the tail of a (usually short)
|
||||
* "private" page chain, before adding it back to a global chain head
|
||||
* with page_chain_add() under a spinlock. */
|
||||
static struct page *page_chain_tail(struct page *page, int *len)
|
||||
{
|
||||
struct page *tmp;
|
||||
int i = 1;
|
||||
while ((tmp = page_chain_next(page))) {
|
||||
++i;
|
||||
page = tmp;
|
||||
}
|
||||
if (len)
|
||||
*len = i;
|
||||
return page;
|
||||
}
|
||||
|
||||
static int page_chain_free(struct page *page)
|
||||
{
|
||||
struct page *tmp;
|
||||
int i = 0;
|
||||
page_chain_for_each_safe(page, tmp) {
|
||||
put_page(page);
|
||||
++i;
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
static void page_chain_add(struct page **head,
|
||||
struct page *chain_first, struct page *chain_last)
|
||||
{
|
||||
#if 1
|
||||
struct page *tmp;
|
||||
tmp = page_chain_tail(chain_first, NULL);
|
||||
BUG_ON(tmp != chain_last);
|
||||
#endif
|
||||
|
||||
/* add chain to head */
|
||||
set_page_private(chain_last, (unsigned long)*head);
|
||||
*head = chain_first;
|
||||
}
|
||||
|
||||
static struct page *__drbd_alloc_pages(struct drbd_device *device,
|
||||
unsigned int number)
|
||||
static struct page *__drbd_alloc_pages(unsigned int number)
|
||||
{
|
||||
struct page *page = NULL;
|
||||
struct page *tmp = NULL;
|
||||
unsigned int i = 0;
|
||||
|
||||
/* Yes, testing drbd_pp_vacant outside the lock is racy.
|
||||
* So what. It saves a spin_lock. */
|
||||
if (drbd_pp_vacant >= number) {
|
||||
spin_lock(&drbd_pp_lock);
|
||||
page = page_chain_del(&drbd_pp_pool, number);
|
||||
if (page)
|
||||
drbd_pp_vacant -= number;
|
||||
spin_unlock(&drbd_pp_lock);
|
||||
if (page)
|
||||
return page;
|
||||
}
|
||||
|
||||
/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
|
||||
* "criss-cross" setup, that might cause write-out on some other DRBD,
|
||||
* which in turn might block on the other node at this very place. */
|
||||
for (i = 0; i < number; i++) {
|
||||
tmp = alloc_page(GFP_TRY);
|
||||
tmp = mempool_alloc(&drbd_buffer_page_pool, GFP_TRY);
|
||||
if (!tmp)
|
||||
break;
|
||||
goto fail;
|
||||
set_page_private(tmp, (unsigned long)page);
|
||||
page = tmp;
|
||||
}
|
||||
|
||||
if (i == number)
|
||||
return page;
|
||||
|
||||
/* Not enough pages immediately available this time.
|
||||
* No need to jump around here, drbd_alloc_pages will retry this
|
||||
* function "soon". */
|
||||
if (page) {
|
||||
tmp = page_chain_tail(page, NULL);
|
||||
spin_lock(&drbd_pp_lock);
|
||||
page_chain_add(&drbd_pp_pool, page, tmp);
|
||||
drbd_pp_vacant += i;
|
||||
spin_unlock(&drbd_pp_lock);
|
||||
return page;
|
||||
fail:
|
||||
page_chain_for_each_safe(page, tmp) {
|
||||
set_page_private(page, 0);
|
||||
mempool_free(page, &drbd_buffer_page_pool);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void reclaim_finished_net_peer_reqs(struct drbd_device *device,
|
||||
struct list_head *to_be_freed)
|
||||
{
|
||||
struct drbd_peer_request *peer_req, *tmp;
|
||||
|
||||
/* The EEs are always appended to the end of the list. Since
|
||||
they are sent in order over the wire, they have to finish
|
||||
in order. As soon as we see the first not finished we can
|
||||
stop to examine the list... */
|
||||
|
||||
list_for_each_entry_safe(peer_req, tmp, &device->net_ee, w.list) {
|
||||
if (drbd_peer_req_has_active_page(peer_req))
|
||||
break;
|
||||
list_move(&peer_req->w.list, to_be_freed);
|
||||
}
|
||||
}
|
||||
|
||||
static void drbd_reclaim_net_peer_reqs(struct drbd_device *device)
|
||||
{
|
||||
LIST_HEAD(reclaimed);
|
||||
struct drbd_peer_request *peer_req, *t;
|
||||
|
||||
spin_lock_irq(&device->resource->req_lock);
|
||||
reclaim_finished_net_peer_reqs(device, &reclaimed);
|
||||
spin_unlock_irq(&device->resource->req_lock);
|
||||
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
|
||||
drbd_free_net_peer_req(device, peer_req);
|
||||
}
|
||||
|
||||
static void conn_reclaim_net_peer_reqs(struct drbd_connection *connection)
|
||||
{
|
||||
struct drbd_peer_device *peer_device;
|
||||
int vnr;
|
||||
|
||||
rcu_read_lock();
|
||||
idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
|
||||
struct drbd_device *device = peer_device->device;
|
||||
if (!atomic_read(&device->pp_in_use_by_net))
|
||||
continue;
|
||||
|
||||
kref_get(&device->kref);
|
||||
rcu_read_unlock();
|
||||
drbd_reclaim_net_peer_reqs(device);
|
||||
kref_put(&device->kref, drbd_destroy_device);
|
||||
rcu_read_lock();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
|
||||
* @peer_device: DRBD device.
|
||||
|
|
@ -263,9 +113,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
|
|||
bool retry)
|
||||
{
|
||||
struct drbd_device *device = peer_device->device;
|
||||
struct page *page = NULL;
|
||||
struct page *page;
|
||||
struct net_conf *nc;
|
||||
DEFINE_WAIT(wait);
|
||||
unsigned int mxb;
|
||||
|
||||
rcu_read_lock();
|
||||
|
|
@ -273,37 +122,9 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
|
|||
mxb = nc ? nc->max_buffers : 1000000;
|
||||
rcu_read_unlock();
|
||||
|
||||
if (atomic_read(&device->pp_in_use) < mxb)
|
||||
page = __drbd_alloc_pages(device, number);
|
||||
|
||||
/* Try to keep the fast path fast, but occasionally we need
|
||||
* to reclaim the pages we lended to the network stack. */
|
||||
if (page && atomic_read(&device->pp_in_use_by_net) > 512)
|
||||
drbd_reclaim_net_peer_reqs(device);
|
||||
|
||||
while (page == NULL) {
|
||||
prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
drbd_reclaim_net_peer_reqs(device);
|
||||
|
||||
if (atomic_read(&device->pp_in_use) < mxb) {
|
||||
page = __drbd_alloc_pages(device, number);
|
||||
if (page)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!retry)
|
||||
break;
|
||||
|
||||
if (signal_pending(current)) {
|
||||
drbd_warn(device, "drbd_alloc_pages interrupted!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (schedule_timeout(HZ/10) == 0)
|
||||
mxb = UINT_MAX;
|
||||
}
|
||||
finish_wait(&drbd_pp_wait, &wait);
|
||||
if (atomic_read(&device->pp_in_use) >= mxb)
|
||||
schedule_timeout_interruptible(HZ / 10);
|
||||
page = __drbd_alloc_pages(number);
|
||||
|
||||
if (page)
|
||||
atomic_add(number, &device->pp_in_use);
|
||||
|
|
@ -314,29 +135,25 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int
|
|||
* Is also used from inside an other spin_lock_irq(&resource->req_lock);
|
||||
* Either links the page chain back to the global pool,
|
||||
* or returns all pages to the system. */
|
||||
static void drbd_free_pages(struct drbd_device *device, struct page *page, int is_net)
|
||||
static void drbd_free_pages(struct drbd_device *device, struct page *page)
|
||||
{
|
||||
atomic_t *a = is_net ? &device->pp_in_use_by_net : &device->pp_in_use;
|
||||
int i;
|
||||
struct page *tmp;
|
||||
int i = 0;
|
||||
|
||||
if (page == NULL)
|
||||
return;
|
||||
|
||||
if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
|
||||
i = page_chain_free(page);
|
||||
else {
|
||||
struct page *tmp;
|
||||
tmp = page_chain_tail(page, &i);
|
||||
spin_lock(&drbd_pp_lock);
|
||||
page_chain_add(&drbd_pp_pool, page, tmp);
|
||||
drbd_pp_vacant += i;
|
||||
spin_unlock(&drbd_pp_lock);
|
||||
page_chain_for_each_safe(page, tmp) {
|
||||
set_page_private(page, 0);
|
||||
if (page_count(page) == 1)
|
||||
mempool_free(page, &drbd_buffer_page_pool);
|
||||
else
|
||||
put_page(page);
|
||||
i++;
|
||||
}
|
||||
i = atomic_sub_return(i, a);
|
||||
i = atomic_sub_return(i, &device->pp_in_use);
|
||||
if (i < 0)
|
||||
drbd_warn(device, "ASSERTION FAILED: %s: %d < 0\n",
|
||||
is_net ? "pp_in_use_by_net" : "pp_in_use", i);
|
||||
wake_up(&drbd_pp_wait);
|
||||
drbd_warn(device, "ASSERTION FAILED: pp_in_use: %d < 0\n", i);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -380,6 +197,8 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
|
|||
gfpflags_allow_blocking(gfp_mask));
|
||||
if (!page)
|
||||
goto fail;
|
||||
if (!mempool_is_saturated(&drbd_buffer_page_pool))
|
||||
peer_req->flags |= EE_RELEASE_TO_MEMPOOL;
|
||||
}
|
||||
|
||||
memset(peer_req, 0, sizeof(*peer_req));
|
||||
|
|
@ -403,13 +222,12 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void __drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req,
|
||||
int is_net)
|
||||
void drbd_free_peer_req(struct drbd_device *device, struct drbd_peer_request *peer_req)
|
||||
{
|
||||
might_sleep();
|
||||
if (peer_req->flags & EE_HAS_DIGEST)
|
||||
kfree(peer_req->digest);
|
||||
drbd_free_pages(device, peer_req->pages, is_net);
|
||||
drbd_free_pages(device, peer_req->pages);
|
||||
D_ASSERT(device, atomic_read(&peer_req->pending_bios) == 0);
|
||||
D_ASSERT(device, drbd_interval_empty(&peer_req->i));
|
||||
if (!expect(device, !(peer_req->flags & EE_CALL_AL_COMPLETE_IO))) {
|
||||
|
|
@ -424,14 +242,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
|
|||
LIST_HEAD(work_list);
|
||||
struct drbd_peer_request *peer_req, *t;
|
||||
int count = 0;
|
||||
int is_net = list == &device->net_ee;
|
||||
|
||||
spin_lock_irq(&device->resource->req_lock);
|
||||
list_splice_init(list, &work_list);
|
||||
spin_unlock_irq(&device->resource->req_lock);
|
||||
|
||||
list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
|
||||
__drbd_free_peer_req(device, peer_req, is_net);
|
||||
drbd_free_peer_req(device, peer_req);
|
||||
count++;
|
||||
}
|
||||
return count;
|
||||
|
|
@ -443,18 +260,13 @@ int drbd_free_peer_reqs(struct drbd_device *device, struct list_head *list)
|
|||
static int drbd_finish_peer_reqs(struct drbd_device *device)
|
||||
{
|
||||
LIST_HEAD(work_list);
|
||||
LIST_HEAD(reclaimed);
|
||||
struct drbd_peer_request *peer_req, *t;
|
||||
int err = 0;
|
||||
|
||||
spin_lock_irq(&device->resource->req_lock);
|
||||
reclaim_finished_net_peer_reqs(device, &reclaimed);
|
||||
list_splice_init(&device->done_ee, &work_list);
|
||||
spin_unlock_irq(&device->resource->req_lock);
|
||||
|
||||
list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
|
||||
drbd_free_net_peer_req(device, peer_req);
|
||||
|
||||
/* possible callbacks here:
|
||||
* e_end_block, and e_end_resync_block, e_send_superseded.
|
||||
* all ignore the last argument.
|
||||
|
|
@ -1975,7 +1787,7 @@ static int drbd_drain_block(struct drbd_peer_device *peer_device, int data_size)
|
|||
data_size -= len;
|
||||
}
|
||||
kunmap(page);
|
||||
drbd_free_pages(peer_device->device, page, 0);
|
||||
drbd_free_pages(peer_device->device, page);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
@ -5224,16 +5036,6 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device)
|
|||
put_ldev(device);
|
||||
}
|
||||
|
||||
/* tcp_close and release of sendpage pages can be deferred. I don't
|
||||
* want to use SO_LINGER, because apparently it can be deferred for
|
||||
* more than 20 seconds (longest time I checked).
|
||||
*
|
||||
* Actually we don't care for exactly when the network stack does its
|
||||
* put_page(), but release our reference on these pages right here.
|
||||
*/
|
||||
i = drbd_free_peer_reqs(device, &device->net_ee);
|
||||
if (i)
|
||||
drbd_info(device, "net_ee not empty, killed %u entries\n", i);
|
||||
i = atomic_read(&device->pp_in_use_by_net);
|
||||
if (i)
|
||||
drbd_info(device, "pp_in_use_by_net = %d, expected 0\n", i);
|
||||
|
|
@ -5980,8 +5782,6 @@ int drbd_ack_receiver(struct drbd_thread *thi)
|
|||
while (get_t_state(thi) == RUNNING) {
|
||||
drbd_thread_current_set_cpu(thi);
|
||||
|
||||
conn_reclaim_net_peer_reqs(connection);
|
||||
|
||||
if (test_and_clear_bit(SEND_PING, &connection->flags)) {
|
||||
if (drbd_send_ping(connection)) {
|
||||
drbd_err(connection, "drbd_send_ping has failed\n");
|
||||
|
|
|
|||
|
|
@ -1030,22 +1030,6 @@ int drbd_resync_finished(struct drbd_peer_device *peer_device)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/* helper */
|
||||
static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_request *peer_req)
|
||||
{
|
||||
if (drbd_peer_req_has_active_page(peer_req)) {
|
||||
/* This might happen if sendpage() has not finished */
|
||||
int i = PFN_UP(peer_req->i.size);
|
||||
atomic_add(i, &device->pp_in_use_by_net);
|
||||
atomic_sub(i, &device->pp_in_use);
|
||||
spin_lock_irq(&device->resource->req_lock);
|
||||
list_add_tail(&peer_req->w.list, &device->net_ee);
|
||||
spin_unlock_irq(&device->resource->req_lock);
|
||||
wake_up(&drbd_pp_wait);
|
||||
} else
|
||||
drbd_free_peer_req(device, peer_req);
|
||||
}
|
||||
|
||||
/**
|
||||
* w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
|
||||
* @w: work object.
|
||||
|
|
@ -1059,9 +1043,8 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
|
|||
int err;
|
||||
|
||||
if (unlikely(cancel)) {
|
||||
drbd_free_peer_req(device, peer_req);
|
||||
dec_unacked(device);
|
||||
return 0;
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
|
||||
|
|
@ -1074,12 +1057,12 @@ int w_e_end_data_req(struct drbd_work *w, int cancel)
|
|||
err = drbd_send_ack(peer_device, P_NEG_DREPLY, peer_req);
|
||||
}
|
||||
|
||||
dec_unacked(device);
|
||||
|
||||
move_to_net_ee_or_free(device, peer_req);
|
||||
|
||||
if (unlikely(err))
|
||||
drbd_err(device, "drbd_send_block() failed\n");
|
||||
out:
|
||||
dec_unacked(device);
|
||||
drbd_free_peer_req(device, peer_req);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
@ -1120,9 +1103,8 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
|
|||
int err;
|
||||
|
||||
if (unlikely(cancel)) {
|
||||
drbd_free_peer_req(device, peer_req);
|
||||
dec_unacked(device);
|
||||
return 0;
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (get_ldev_if_state(device, D_FAILED)) {
|
||||
|
|
@ -1155,13 +1137,12 @@ int w_e_end_rsdata_req(struct drbd_work *w, int cancel)
|
|||
/* update resync data with failure */
|
||||
drbd_rs_failed_io(peer_device, peer_req->i.sector, peer_req->i.size);
|
||||
}
|
||||
|
||||
dec_unacked(device);
|
||||
|
||||
move_to_net_ee_or_free(device, peer_req);
|
||||
|
||||
if (unlikely(err))
|
||||
drbd_err(device, "drbd_send_block() failed\n");
|
||||
out:
|
||||
dec_unacked(device);
|
||||
drbd_free_peer_req(device, peer_req);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
@ -1176,9 +1157,8 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
|
|||
int err, eq = 0;
|
||||
|
||||
if (unlikely(cancel)) {
|
||||
drbd_free_peer_req(device, peer_req);
|
||||
dec_unacked(device);
|
||||
return 0;
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (get_ldev(device)) {
|
||||
|
|
@ -1220,12 +1200,12 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
|
|||
if (drbd_ratelimit())
|
||||
drbd_err(device, "Sending NegDReply. I guess it gets messy.\n");
|
||||
}
|
||||
|
||||
dec_unacked(device);
|
||||
move_to_net_ee_or_free(device, peer_req);
|
||||
|
||||
if (unlikely(err))
|
||||
drbd_err(device, "drbd_send_block/ack() failed\n");
|
||||
out:
|
||||
dec_unacked(device);
|
||||
drbd_free_peer_req(device, peer_req);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -235,7 +235,7 @@ struct ublk_device {
|
|||
|
||||
struct completion completion;
|
||||
unsigned int nr_queues_ready;
|
||||
unsigned int nr_privileged_daemon;
|
||||
bool unprivileged_daemons;
|
||||
struct mutex cancel_mutex;
|
||||
bool canceling;
|
||||
pid_t ublksrv_tgid;
|
||||
|
|
@ -1389,7 +1389,7 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
|
|||
{
|
||||
blk_status_t res;
|
||||
|
||||
if (unlikely(ubq->fail_io))
|
||||
if (unlikely(READ_ONCE(ubq->fail_io)))
|
||||
return BLK_STS_TARGET;
|
||||
|
||||
/* With recovery feature enabled, force_abort is set in
|
||||
|
|
@ -1401,7 +1401,8 @@ static blk_status_t ublk_prep_req(struct ublk_queue *ubq, struct request *rq,
|
|||
* Note: force_abort is guaranteed to be seen because it is set
|
||||
* before request queue is unqiuesced.
|
||||
*/
|
||||
if (ublk_nosrv_should_queue_io(ubq) && unlikely(ubq->force_abort))
|
||||
if (ublk_nosrv_should_queue_io(ubq) &&
|
||||
unlikely(READ_ONCE(ubq->force_abort)))
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
if (check_cancel && unlikely(ubq->canceling))
|
||||
|
|
@ -1550,7 +1551,7 @@ static void ublk_reset_ch_dev(struct ublk_device *ub)
|
|||
/* set to NULL, otherwise new tasks cannot mmap io_cmd_buf */
|
||||
ub->mm = NULL;
|
||||
ub->nr_queues_ready = 0;
|
||||
ub->nr_privileged_daemon = 0;
|
||||
ub->unprivileged_daemons = false;
|
||||
ub->ublksrv_tgid = -1;
|
||||
}
|
||||
|
||||
|
|
@ -1644,7 +1645,6 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
|
|||
* Transition the device to the nosrv state. What exactly this
|
||||
* means depends on the recovery flags
|
||||
*/
|
||||
blk_mq_quiesce_queue(disk->queue);
|
||||
if (ublk_nosrv_should_stop_dev(ub)) {
|
||||
/*
|
||||
* Allow any pending/future I/O to pass through quickly
|
||||
|
|
@ -1652,8 +1652,7 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
|
|||
* waits for all pending I/O to complete
|
||||
*/
|
||||
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
|
||||
ublk_get_queue(ub, i)->force_abort = true;
|
||||
blk_mq_unquiesce_queue(disk->queue);
|
||||
WRITE_ONCE(ublk_get_queue(ub, i)->force_abort, true);
|
||||
|
||||
ublk_stop_dev_unlocked(ub);
|
||||
} else {
|
||||
|
|
@ -1663,9 +1662,8 @@ static int ublk_ch_release(struct inode *inode, struct file *filp)
|
|||
} else {
|
||||
ub->dev_info.state = UBLK_S_DEV_FAIL_IO;
|
||||
for (i = 0; i < ub->dev_info.nr_hw_queues; i++)
|
||||
ublk_get_queue(ub, i)->fail_io = true;
|
||||
WRITE_ONCE(ublk_get_queue(ub, i)->fail_io, true);
|
||||
}
|
||||
blk_mq_unquiesce_queue(disk->queue);
|
||||
}
|
||||
unlock:
|
||||
mutex_unlock(&ub->mutex);
|
||||
|
|
@ -1980,12 +1978,10 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
|
|||
__must_hold(&ub->mutex)
|
||||
{
|
||||
ubq->nr_io_ready++;
|
||||
if (ublk_queue_ready(ubq)) {
|
||||
if (ublk_queue_ready(ubq))
|
||||
ub->nr_queues_ready++;
|
||||
|
||||
if (capable(CAP_SYS_ADMIN))
|
||||
ub->nr_privileged_daemon++;
|
||||
}
|
||||
if (!ub->unprivileged_daemons && !capable(CAP_SYS_ADMIN))
|
||||
ub->unprivileged_daemons = true;
|
||||
|
||||
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues) {
|
||||
/* now we are ready for handling ublk io request */
|
||||
|
|
@ -2880,8 +2876,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
|
|||
|
||||
ublk_apply_params(ub);
|
||||
|
||||
/* don't probe partitions if any one ubq daemon is un-trusted */
|
||||
if (ub->nr_privileged_daemon != ub->nr_queues_ready)
|
||||
/* don't probe partitions if any daemon task is un-trusted */
|
||||
if (ub->unprivileged_daemons)
|
||||
set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
|
||||
|
||||
ublk_get_device(ub);
|
||||
|
|
|
|||
|
|
@ -642,12 +642,7 @@ static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev,
|
|||
* WMT command.
|
||||
*/
|
||||
err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT,
|
||||
TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
|
||||
if (err == -EINTR) {
|
||||
bt_dev_err(hdev, "Execution of wmt command interrupted");
|
||||
clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
|
||||
goto err_free_wc;
|
||||
}
|
||||
TASK_UNINTERRUPTIBLE, HCI_INIT_TIMEOUT);
|
||||
|
||||
if (err) {
|
||||
bt_dev_err(hdev, "Execution of wmt command timed out");
|
||||
|
|
|
|||
|
|
@ -543,10 +543,10 @@ static int ps_setup(struct hci_dev *hdev)
|
|||
}
|
||||
|
||||
if (psdata->wakeup_source) {
|
||||
ret = devm_request_irq(&serdev->dev, psdata->irq_handler,
|
||||
ps_host_wakeup_irq_handler,
|
||||
IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
|
||||
dev_name(&serdev->dev), nxpdev);
|
||||
ret = devm_request_threaded_irq(&serdev->dev, psdata->irq_handler,
|
||||
NULL, ps_host_wakeup_irq_handler,
|
||||
IRQF_ONESHOT,
|
||||
dev_name(&serdev->dev), nxpdev);
|
||||
if (ret)
|
||||
bt_dev_info(hdev, "error setting wakeup IRQ handler, ignoring\n");
|
||||
disable_irq(psdata->irq_handler);
|
||||
|
|
|
|||
|
|
@ -2793,6 +2793,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
|
|||
X86_MATCH(INTEL_GRANITERAPIDS_X, core_funcs),
|
||||
X86_MATCH(INTEL_ATOM_CRESTMONT, core_funcs),
|
||||
X86_MATCH(INTEL_ATOM_CRESTMONT_X, core_funcs),
|
||||
X86_MATCH(INTEL_ATOM_DARKMONT_X, core_funcs),
|
||||
{}
|
||||
};
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -97,6 +97,14 @@ static inline int which_bucket(u64 duration_ns)
|
|||
|
||||
static DEFINE_PER_CPU(struct menu_device, menu_devices);
|
||||
|
||||
static void menu_update_intervals(struct menu_device *data, unsigned int interval_us)
|
||||
{
|
||||
/* Update the repeating-pattern data. */
|
||||
data->intervals[data->interval_ptr++] = interval_us;
|
||||
if (data->interval_ptr >= INTERVALS)
|
||||
data->interval_ptr = 0;
|
||||
}
|
||||
|
||||
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
|
||||
|
||||
/*
|
||||
|
|
@ -222,6 +230,14 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
|||
if (data->needs_update) {
|
||||
menu_update(drv, dev);
|
||||
data->needs_update = 0;
|
||||
} else if (!dev->last_residency_ns) {
|
||||
/*
|
||||
* This happens when the driver rejects the previously selected
|
||||
* idle state and returns an error, so update the recent
|
||||
* intervals table to prevent invalid information from being
|
||||
* used going forward.
|
||||
*/
|
||||
menu_update_intervals(data, UINT_MAX);
|
||||
}
|
||||
|
||||
/* Find the shortest expected idle interval. */
|
||||
|
|
@ -482,10 +498,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
|||
|
||||
data->correction_factor[data->bucket] = new_factor;
|
||||
|
||||
/* update the repeating-pattern data */
|
||||
data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns);
|
||||
if (data->interval_ptr >= INTERVALS)
|
||||
data->interval_ptr = 0;
|
||||
menu_update_intervals(data, ktime_to_us(measured_ns));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -550,6 +550,23 @@ const struct fw_address_region fw_unit_space_region =
|
|||
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
|
||||
#endif /* 0 */
|
||||
|
||||
static void complete_address_handler(struct kref *kref)
|
||||
{
|
||||
struct fw_address_handler *handler = container_of(kref, struct fw_address_handler, kref);
|
||||
|
||||
complete(&handler->done);
|
||||
}
|
||||
|
||||
static void get_address_handler(struct fw_address_handler *handler)
|
||||
{
|
||||
kref_get(&handler->kref);
|
||||
}
|
||||
|
||||
static int put_address_handler(struct fw_address_handler *handler)
|
||||
{
|
||||
return kref_put(&handler->kref, complete_address_handler);
|
||||
}
|
||||
|
||||
/**
|
||||
* fw_core_add_address_handler() - register for incoming requests
|
||||
* @handler: callback
|
||||
|
|
@ -596,6 +613,8 @@ int fw_core_add_address_handler(struct fw_address_handler *handler,
|
|||
if (other != NULL) {
|
||||
handler->offset += other->length;
|
||||
} else {
|
||||
init_completion(&handler->done);
|
||||
kref_init(&handler->kref);
|
||||
list_add_tail_rcu(&handler->link, &address_handler_list);
|
||||
ret = 0;
|
||||
break;
|
||||
|
|
@ -621,6 +640,9 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler)
|
|||
list_del_rcu(&handler->link);
|
||||
|
||||
synchronize_rcu();
|
||||
|
||||
if (!put_address_handler(handler))
|
||||
wait_for_completion(&handler->done);
|
||||
}
|
||||
EXPORT_SYMBOL(fw_core_remove_address_handler);
|
||||
|
||||
|
|
@ -914,22 +936,31 @@ static void handle_exclusive_region_request(struct fw_card *card,
|
|||
handler = lookup_enclosing_address_handler(&address_handler_list, offset,
|
||||
request->length);
|
||||
if (handler)
|
||||
handler->address_callback(card, request, tcode, destination, source,
|
||||
p->generation, offset, request->data,
|
||||
request->length, handler->callback_data);
|
||||
get_address_handler(handler);
|
||||
}
|
||||
|
||||
if (!handler)
|
||||
if (!handler) {
|
||||
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
|
||||
return;
|
||||
}
|
||||
|
||||
// Outside the RCU read-side critical section. Without spinlock. With reference count.
|
||||
handler->address_callback(card, request, tcode, destination, source, p->generation, offset,
|
||||
request->data, request->length, handler->callback_data);
|
||||
put_address_handler(handler);
|
||||
}
|
||||
|
||||
// To use kmalloc allocator efficiently, this should be power of two.
|
||||
#define BUFFER_ON_KERNEL_STACK_SIZE 4
|
||||
|
||||
static void handle_fcp_region_request(struct fw_card *card,
|
||||
struct fw_packet *p,
|
||||
struct fw_request *request,
|
||||
unsigned long long offset)
|
||||
{
|
||||
struct fw_address_handler *handler;
|
||||
int tcode, destination, source;
|
||||
struct fw_address_handler *buffer_on_kernel_stack[BUFFER_ON_KERNEL_STACK_SIZE];
|
||||
struct fw_address_handler *handler, **handlers;
|
||||
int tcode, destination, source, i, count, buffer_size;
|
||||
|
||||
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
|
||||
offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
|
||||
|
|
@ -950,15 +981,55 @@ static void handle_fcp_region_request(struct fw_card *card,
|
|||
return;
|
||||
}
|
||||
|
||||
count = 0;
|
||||
handlers = buffer_on_kernel_stack;
|
||||
buffer_size = ARRAY_SIZE(buffer_on_kernel_stack);
|
||||
scoped_guard(rcu) {
|
||||
list_for_each_entry_rcu(handler, &address_handler_list, link) {
|
||||
if (is_enclosing_handler(handler, offset, request->length))
|
||||
handler->address_callback(card, request, tcode, destination, source,
|
||||
p->generation, offset, request->data,
|
||||
request->length, handler->callback_data);
|
||||
if (is_enclosing_handler(handler, offset, request->length)) {
|
||||
if (count >= buffer_size) {
|
||||
int next_size = buffer_size * 2;
|
||||
struct fw_address_handler **buffer_on_kernel_heap;
|
||||
|
||||
if (handlers == buffer_on_kernel_stack)
|
||||
buffer_on_kernel_heap = NULL;
|
||||
else
|
||||
buffer_on_kernel_heap = handlers;
|
||||
|
||||
buffer_on_kernel_heap =
|
||||
krealloc_array(buffer_on_kernel_heap, next_size,
|
||||
sizeof(*buffer_on_kernel_heap), GFP_ATOMIC);
|
||||
// FCP is used for purposes unrelated to significant system
|
||||
// resources (e.g. storage or networking), so allocation
|
||||
// failures are not considered so critical.
|
||||
if (!buffer_on_kernel_heap)
|
||||
break;
|
||||
|
||||
if (handlers == buffer_on_kernel_stack) {
|
||||
memcpy(buffer_on_kernel_heap, buffer_on_kernel_stack,
|
||||
sizeof(buffer_on_kernel_stack));
|
||||
}
|
||||
|
||||
handlers = buffer_on_kernel_heap;
|
||||
buffer_size = next_size;
|
||||
}
|
||||
get_address_handler(handler);
|
||||
handlers[count++] = handler;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < count; ++i) {
|
||||
handler = handlers[i];
|
||||
handler->address_callback(card, request, tcode, destination, source,
|
||||
p->generation, offset, request->data,
|
||||
request->length, handler->callback_data);
|
||||
put_address_handler(handler);
|
||||
}
|
||||
|
||||
if (handlers != buffer_on_kernel_stack)
|
||||
kfree(handlers);
|
||||
|
||||
fw_send_response(card, request, RCODE_COMPLETE);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -190,9 +190,7 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
|
|||
struct mlxbf3_gpio_context *gs;
|
||||
struct gpio_irq_chip *girq;
|
||||
struct gpio_chip *gc;
|
||||
char *colon_ptr;
|
||||
int ret, irq;
|
||||
long num;
|
||||
|
||||
gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL);
|
||||
if (!gs)
|
||||
|
|
@ -229,39 +227,25 @@ static int mlxbf3_gpio_probe(struct platform_device *pdev)
|
|||
gc->owner = THIS_MODULE;
|
||||
gc->add_pin_ranges = mlxbf3_gpio_add_pin_ranges;
|
||||
|
||||
colon_ptr = strchr(dev_name(dev), ':');
|
||||
if (!colon_ptr) {
|
||||
dev_err(dev, "invalid device name format\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
irq = platform_get_irq_optional(pdev, 0);
|
||||
if (irq >= 0) {
|
||||
girq = &gs->gc.irq;
|
||||
gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
|
||||
girq->default_type = IRQ_TYPE_NONE;
|
||||
/* This will let us handle the parent IRQ in the driver */
|
||||
girq->num_parents = 0;
|
||||
girq->parents = NULL;
|
||||
girq->parent_handler = NULL;
|
||||
girq->handler = handle_bad_irq;
|
||||
|
||||
ret = kstrtol(++colon_ptr, 16, &num);
|
||||
if (ret) {
|
||||
dev_err(dev, "invalid device instance\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!num) {
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq >= 0) {
|
||||
girq = &gs->gc.irq;
|
||||
gpio_irq_chip_set_chip(girq, &gpio_mlxbf3_irqchip);
|
||||
girq->default_type = IRQ_TYPE_NONE;
|
||||
/* This will let us handle the parent IRQ in the driver */
|
||||
girq->num_parents = 0;
|
||||
girq->parents = NULL;
|
||||
girq->parent_handler = NULL;
|
||||
girq->handler = handle_bad_irq;
|
||||
|
||||
/*
|
||||
* Directly request the irq here instead of passing
|
||||
* a flow-handler because the irq is shared.
|
||||
*/
|
||||
ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
|
||||
IRQF_SHARED, dev_name(dev), gs);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed to request IRQ");
|
||||
}
|
||||
/*
|
||||
* Directly request the irq here instead of passing
|
||||
* a flow-handler because the irq is shared.
|
||||
*/
|
||||
ret = devm_request_irq(dev, irq, mlxbf3_gpio_irq_handler,
|
||||
IRQF_SHARED, dev_name(dev), gs);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "failed to request IRQ");
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, gs);
|
||||
|
|
|
|||
|
|
@ -1139,6 +1139,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
|||
}
|
||||
}
|
||||
|
||||
if (!amdgpu_vm_ready(vm))
|
||||
return -EINVAL;
|
||||
|
||||
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
|
|
|||
|
|
@ -88,8 +88,8 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
}
|
||||
|
||||
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
|
||||
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
|
||||
AMDGPU_PTE_EXECUTABLE);
|
||||
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
|
||||
AMDGPU_VM_PAGE_EXECUTABLE);
|
||||
|
||||
if (r) {
|
||||
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
|
||||
|
|
|
|||
|
|
@ -1039,15 +1039,28 @@ int psp_update_fw_reservation(struct psp_context *psp)
|
|||
{
|
||||
int ret;
|
||||
uint64_t reserv_addr, reserv_addr_ext;
|
||||
uint32_t reserv_size, reserv_size_ext;
|
||||
uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
|
||||
struct amdgpu_device *adev = psp->adev;
|
||||
|
||||
mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
|
||||
|
||||
if (amdgpu_sriov_vf(psp->adev))
|
||||
return 0;
|
||||
|
||||
if ((amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 2)) &&
|
||||
(amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(14, 0, 3)))
|
||||
switch (mp0_ip_ver) {
|
||||
case IP_VERSION(14, 0, 2):
|
||||
if (adev->psp.sos.fw_version < 0x3b0e0d)
|
||||
return 0;
|
||||
break;
|
||||
|
||||
case IP_VERSION(14, 0, 3):
|
||||
if (adev->psp.sos.fw_version < 0x3a0e14)
|
||||
return 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
|
||||
if (ret)
|
||||
|
|
|
|||
|
|
@ -654,11 +654,10 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
|||
* Check if all VM PDs/PTs are ready for updates
|
||||
*
|
||||
* Returns:
|
||||
* True if VM is not evicting.
|
||||
* True if VM is not evicting and all VM entities are not stopped
|
||||
*/
|
||||
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
|
||||
{
|
||||
bool empty;
|
||||
bool ret;
|
||||
|
||||
amdgpu_vm_eviction_lock(vm);
|
||||
|
|
@ -666,10 +665,18 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
|
|||
amdgpu_vm_eviction_unlock(vm);
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
empty = list_empty(&vm->evicted);
|
||||
ret &= list_empty(&vm->evicted);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
return ret && empty;
|
||||
spin_lock(&vm->immediate.lock);
|
||||
ret &= !vm->immediate.stopped;
|
||||
spin_unlock(&vm->immediate.lock);
|
||||
|
||||
spin_lock(&vm->delayed.lock);
|
||||
ret &= !vm->delayed.stopped;
|
||||
spin_unlock(&vm->delayed.lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -648,9 +648,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
|
|||
list_for_each_entry(block, &vres->blocks, link)
|
||||
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
|
||||
|
||||
amdgpu_vram_mgr_do_reserve(man);
|
||||
|
||||
drm_buddy_free_list(mm, &vres->blocks, vres->flags);
|
||||
amdgpu_vram_mgr_do_reserve(man);
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
atomic64_sub(vis_usage, &mgr->vis_usage);
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ static void drm_aux_bridge_release(struct device *dev)
|
|||
{
|
||||
struct auxiliary_device *adev = to_auxiliary_dev(dev);
|
||||
|
||||
of_node_put(dev->of_node);
|
||||
ida_free(&drm_aux_bridge_ida, adev->id);
|
||||
|
||||
kfree(adev);
|
||||
|
|
@ -65,6 +66,7 @@ int drm_aux_bridge_register(struct device *parent)
|
|||
|
||||
ret = auxiliary_device_init(adev);
|
||||
if (ret) {
|
||||
of_node_put(adev->dev.of_node);
|
||||
ida_free(&drm_aux_bridge_ida, adev->id);
|
||||
kfree(adev);
|
||||
return ret;
|
||||
|
|
|
|||
|
|
@ -1227,6 +1227,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
|
|||
/**
|
||||
* drm_bridge_detect - check if anything is attached to the bridge output
|
||||
* @bridge: bridge control structure
|
||||
* @connector: attached connector
|
||||
*
|
||||
* If the bridge supports output detection, as reported by the
|
||||
* DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
|
||||
|
|
|
|||
|
|
@ -552,10 +552,6 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc)
|
|||
if (dpfc_ctl & DPFC_CTL_EN) {
|
||||
dpfc_ctl &= ~DPFC_CTL_EN;
|
||||
intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
|
||||
|
||||
/* wa_18038517565 Enable DPFC clock gating after FBC disable */
|
||||
if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
|
||||
fbc_compressor_clkgate_disable_wa(fbc, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1710,6 +1706,10 @@ static void __intel_fbc_disable(struct intel_fbc *fbc)
|
|||
|
||||
__intel_fbc_cleanup_cfb(fbc);
|
||||
|
||||
/* wa_18038517565 Enable DPFC clock gating after FBC disable */
|
||||
if (display->platform.dg2 || DISPLAY_VER(display) >= 14)
|
||||
fbc_compressor_clkgate_disable_wa(fbc, false);
|
||||
|
||||
fbc->state.plane = NULL;
|
||||
fbc->flip_pending = false;
|
||||
fbc->busy_bits = 0;
|
||||
|
|
|
|||
|
|
@ -3275,7 +3275,9 @@ static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp)
|
|||
|
||||
static void _psr_invalidate_handle(struct intel_dp *intel_dp)
|
||||
{
|
||||
if (intel_dp->psr.psr2_sel_fetch_enabled) {
|
||||
struct intel_display *display = to_intel_display(intel_dp);
|
||||
|
||||
if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
|
||||
if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) {
|
||||
intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
|
||||
intel_psr_configure_full_frame_update(intel_dp);
|
||||
|
|
@ -3361,7 +3363,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
|
|||
{
|
||||
struct intel_display *display = to_intel_display(intel_dp);
|
||||
|
||||
if (intel_dp->psr.psr2_sel_fetch_enabled) {
|
||||
if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
|
||||
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
|
||||
/* can we turn CFF off? */
|
||||
if (intel_dp->psr.busy_frontbuffer_bits == 0)
|
||||
|
|
@ -3378,11 +3380,13 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
|
|||
* existing SU configuration
|
||||
*/
|
||||
intel_psr_configure_full_frame_update(intel_dp);
|
||||
|
||||
intel_psr_force_update(intel_dp);
|
||||
} else {
|
||||
intel_psr_exit(intel_dp);
|
||||
}
|
||||
|
||||
intel_psr_force_update(intel_dp);
|
||||
|
||||
if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active &&
|
||||
if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) &&
|
||||
!intel_dp->psr.busy_frontbuffer_bits)
|
||||
queue_work(display->wq.unordered, &intel_dp->psr.work);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -253,6 +253,7 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
|
|||
|
||||
int
|
||||
nouveau_framebuffer_new(struct drm_device *dev,
|
||||
const struct drm_format_info *info,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *gem,
|
||||
struct drm_framebuffer **pfb)
|
||||
|
|
@ -260,7 +261,6 @@ nouveau_framebuffer_new(struct drm_device *dev,
|
|||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
|
||||
struct drm_framebuffer *fb;
|
||||
const struct drm_format_info *info;
|
||||
unsigned int height, i;
|
||||
uint32_t tile_mode;
|
||||
uint8_t kind;
|
||||
|
|
@ -295,9 +295,6 @@ nouveau_framebuffer_new(struct drm_device *dev,
|
|||
kind = nvbo->kind;
|
||||
}
|
||||
|
||||
info = drm_get_format_info(dev, mode_cmd->pixel_format,
|
||||
mode_cmd->modifier[0]);
|
||||
|
||||
for (i = 0; i < info->num_planes; i++) {
|
||||
height = drm_format_info_plane_height(info,
|
||||
mode_cmd->height,
|
||||
|
|
@ -321,7 +318,7 @@ nouveau_framebuffer_new(struct drm_device *dev,
|
|||
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
|
||||
drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
|
||||
drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
|
||||
fb->obj[0] = gem;
|
||||
|
||||
ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
|
||||
|
|
@ -344,7 +341,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
|
|||
if (!gem)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
|
||||
ret = nouveau_framebuffer_new(dev, info, mode_cmd, gem, &fb);
|
||||
if (ret == 0)
|
||||
return fb;
|
||||
|
||||
|
|
|
|||
|
|
@ -8,8 +8,11 @@
|
|||
|
||||
#include <drm/drm_framebuffer.h>
|
||||
|
||||
struct drm_format_info;
|
||||
|
||||
int
|
||||
nouveau_framebuffer_new(struct drm_device *dev,
|
||||
const struct drm_format_info *info,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *gem,
|
||||
struct drm_framebuffer **pfb);
|
||||
|
|
|
|||
|
|
@ -351,7 +351,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
|
|||
}
|
||||
}
|
||||
|
||||
fb = omap_framebuffer_init(dev, mode_cmd, bos);
|
||||
fb = omap_framebuffer_init(dev, info, mode_cmd, bos);
|
||||
if (IS_ERR(fb))
|
||||
goto error;
|
||||
|
||||
|
|
@ -365,9 +365,9 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
|
|||
}
|
||||
|
||||
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
|
||||
const struct drm_format_info *info,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
|
||||
{
|
||||
const struct drm_format_info *format = NULL;
|
||||
struct omap_framebuffer *omap_fb = NULL;
|
||||
struct drm_framebuffer *fb = NULL;
|
||||
unsigned int pitch = mode_cmd->pitches[0];
|
||||
|
|
@ -377,15 +377,12 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
|
|||
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
|
||||
(char *)&mode_cmd->pixel_format);
|
||||
|
||||
format = drm_get_format_info(dev, mode_cmd->pixel_format,
|
||||
mode_cmd->modifier[0]);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(formats); i++) {
|
||||
if (formats[i] == mode_cmd->pixel_format)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!format || i == ARRAY_SIZE(formats)) {
|
||||
if (i == ARRAY_SIZE(formats)) {
|
||||
dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n",
|
||||
(char *)&mode_cmd->pixel_format);
|
||||
ret = -EINVAL;
|
||||
|
|
@ -399,7 +396,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
|
|||
}
|
||||
|
||||
fb = &omap_fb->base;
|
||||
omap_fb->format = format;
|
||||
omap_fb->format = info;
|
||||
mutex_init(&omap_fb->lock);
|
||||
|
||||
/*
|
||||
|
|
@ -407,23 +404,23 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
|
|||
* that the two planes of multiplane formats need the same number of
|
||||
* bytes per pixel.
|
||||
*/
|
||||
if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
|
||||
if (info->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
|
||||
dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n");
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (pitch % format->cpp[0]) {
|
||||
if (pitch % info->cpp[0]) {
|
||||
dev_dbg(dev->dev,
|
||||
"buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n",
|
||||
pitch, format->cpp[0]);
|
||||
pitch, info->cpp[0]);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < format->num_planes; i++) {
|
||||
for (i = 0; i < info->num_planes; i++) {
|
||||
struct plane *plane = &omap_fb->planes[i];
|
||||
unsigned int vsub = i == 0 ? 1 : format->vsub;
|
||||
unsigned int vsub = i == 0 ? 1 : info->vsub;
|
||||
unsigned int size;
|
||||
|
||||
size = pitch * mode_cmd->height / vsub;
|
||||
|
|
@ -440,7 +437,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
|
|||
plane->dma_addr = 0;
|
||||
}
|
||||
|
||||
drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
|
||||
drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
|
||||
|
||||
ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
|
||||
if (ret) {
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ struct drm_connector;
|
|||
struct drm_device;
|
||||
struct drm_file;
|
||||
struct drm_framebuffer;
|
||||
struct drm_format_info;
|
||||
struct drm_gem_object;
|
||||
struct drm_mode_fb_cmd2;
|
||||
struct drm_plane_state;
|
||||
|
|
@ -23,6 +24,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
|
|||
struct drm_file *file, const struct drm_format_info *info,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd);
|
||||
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
|
||||
const struct drm_format_info *info,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
|
||||
int omap_framebuffer_pin(struct drm_framebuffer *fb);
|
||||
void omap_framebuffer_unpin(struct drm_framebuffer *fb);
|
||||
|
|
|
|||
|
|
@ -197,7 +197,10 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
fb = omap_framebuffer_init(dev, &mode_cmd, &bo);
|
||||
fb = omap_framebuffer_init(dev,
|
||||
drm_get_format_info(dev, mode_cmd.pixel_format,
|
||||
mode_cmd.modifier[0]),
|
||||
&mode_cmd, &bo);
|
||||
if (IS_ERR(fb)) {
|
||||
dev_err(dev->dev, "failed to allocate fb\n");
|
||||
/* note: if fb creation failed, we can't rely on fb destroy
|
||||
|
|
|
|||
|
|
@ -432,7 +432,7 @@ static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo,
|
|||
if (!refcount)
|
||||
return;
|
||||
|
||||
resident_size = bo->base.pages ? bo->base.base.size : 0;
|
||||
resident_size = panfrost_gem_rss(&bo->base.base);
|
||||
|
||||
snprintf(creator_info, sizeof(creator_info),
|
||||
"%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
|
||||
|
|
|
|||
|
|
@ -1297,12 +1297,13 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
|
|||
int
|
||||
radeon_framebuffer_init(struct drm_device *dev,
|
||||
struct drm_framebuffer *fb,
|
||||
const struct drm_format_info *info,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *obj)
|
||||
{
|
||||
int ret;
|
||||
fb->obj[0] = obj;
|
||||
drm_helper_mode_fill_fb_struct(dev, fb, NULL, mode_cmd);
|
||||
drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd);
|
||||
ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs);
|
||||
if (ret) {
|
||||
fb->obj[0] = NULL;
|
||||
|
|
@ -1341,7 +1342,7 @@ radeon_user_framebuffer_create(struct drm_device *dev,
|
|||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj);
|
||||
ret = radeon_framebuffer_init(dev, fb, info, mode_cmd, obj);
|
||||
if (ret) {
|
||||
kfree(fb);
|
||||
drm_gem_object_put(obj);
|
||||
|
|
|
|||
|
|
@ -53,10 +53,10 @@ static void radeon_fbdev_destroy_pinned_object(struct drm_gem_object *gobj)
|
|||
}
|
||||
|
||||
static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
|
||||
const struct drm_format_info *info,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object **gobj_p)
|
||||
{
|
||||
const struct drm_format_info *info;
|
||||
struct radeon_device *rdev = fb_helper->dev->dev_private;
|
||||
struct drm_gem_object *gobj = NULL;
|
||||
struct radeon_bo *rbo = NULL;
|
||||
|
|
@ -67,8 +67,6 @@ static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper,
|
|||
int height = mode_cmd->height;
|
||||
u32 cpp;
|
||||
|
||||
info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd->pixel_format,
|
||||
mode_cmd->modifier[0]);
|
||||
cpp = info->cpp[0];
|
||||
|
||||
/* need to align pitch with crtc limits */
|
||||
|
|
@ -206,6 +204,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
|
|||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct radeon_device *rdev = fb_helper->dev->dev_private;
|
||||
const struct drm_format_info *format_info;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = { };
|
||||
struct fb_info *info;
|
||||
struct drm_gem_object *gobj;
|
||||
|
|
@ -224,7 +223,9 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
|
|||
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
|
||||
sizes->surface_depth);
|
||||
|
||||
ret = radeon_fbdev_create_pinned_object(fb_helper, &mode_cmd, &gobj);
|
||||
format_info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd.pixel_format,
|
||||
mode_cmd.modifier[0]);
|
||||
ret = radeon_fbdev_create_pinned_object(fb_helper, format_info, &mode_cmd, &gobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create fbcon object %d\n", ret);
|
||||
return ret;
|
||||
|
|
@ -236,7 +237,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
|
|||
ret = -ENOMEM;
|
||||
goto err_radeon_fbdev_destroy_pinned_object;
|
||||
}
|
||||
ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, &mode_cmd, gobj);
|
||||
ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, format_info, &mode_cmd, gobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
|
||||
goto err_kfree;
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@
|
|||
|
||||
struct drm_fb_helper;
|
||||
struct drm_fb_helper_surface_size;
|
||||
struct drm_format_info;
|
||||
|
||||
struct edid;
|
||||
struct drm_edid;
|
||||
|
|
@ -890,6 +891,7 @@ extern void
|
|||
radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on);
|
||||
int radeon_framebuffer_init(struct drm_device *dev,
|
||||
struct drm_framebuffer *rfb,
|
||||
const struct drm_format_info *info,
|
||||
const struct drm_mode_fb_cmd2 *mode_cmd,
|
||||
struct drm_gem_object *obj);
|
||||
|
||||
|
|
|
|||
|
|
@ -7,5 +7,6 @@
|
|||
|
||||
#define GTTMMADR_BAR 0 /* MMIO + GTT */
|
||||
#define LMEM_BAR 2 /* VRAM */
|
||||
#define VF_LMEM_BAR 9 /* VF VRAM */
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -332,6 +332,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
|
|||
int ret = 0;
|
||||
u32 reg_val, max;
|
||||
struct xe_reg rapl_limit;
|
||||
u64 max_supp_power_limit = 0;
|
||||
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
|
||||
|
|
@ -356,6 +357,20 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
|
|||
goto unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to
|
||||
* the supported maximum (U12.3 format).
|
||||
* This is to avoid truncation during reg_val calculation below and ensure the valid
|
||||
* power limit is sent for pcode which would clamp it to card-supported value.
|
||||
*/
|
||||
max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER;
|
||||
if (value > max_supp_power_limit) {
|
||||
value = max_supp_power_limit;
|
||||
drm_info(&hwmon->xe->drm,
|
||||
"Power limit clamped as selected %s exceeds channel %d limit\n",
|
||||
PWR_ATTR_TO_STR(attr), channel);
|
||||
}
|
||||
|
||||
/* Computation in 64-bits to avoid overflow. Round to nearest. */
|
||||
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
|
||||
|
||||
|
|
@ -739,9 +754,23 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
|
|||
{
|
||||
int ret;
|
||||
u32 uval;
|
||||
u64 max_crit_power_curr = 0;
|
||||
|
||||
mutex_lock(&hwmon->hwmon_lock);
|
||||
|
||||
/*
|
||||
* If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1
|
||||
* max supported value, clamp it to the command's max (U10.6 format).
|
||||
* This is to avoid truncation during uval calculation below and ensure the valid power
|
||||
* limit is sent for pcode which would clamp it to card-supported value.
|
||||
*/
|
||||
max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor;
|
||||
if (value > max_crit_power_curr) {
|
||||
value = max_crit_power_curr;
|
||||
drm_info(&hwmon->xe->drm,
|
||||
"Power limit clamped as selected exceeds channel %d limit\n",
|
||||
channel);
|
||||
}
|
||||
uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
|
||||
ret = xe_hwmon_pcode_write_i1(hwmon, uval);
|
||||
|
||||
|
|
|
|||
|
|
@ -1820,15 +1820,19 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
|
|||
if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) ||
|
||||
!IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) {
|
||||
int buf_offset = 0;
|
||||
void *bounce;
|
||||
int err;
|
||||
|
||||
BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES));
|
||||
bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL);
|
||||
if (!bounce)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Less than ideal for large unaligned access but this should be
|
||||
* fairly rare, can fixup if this becomes common.
|
||||
*/
|
||||
do {
|
||||
u8 bounce[XE_CACHELINE_BYTES];
|
||||
void *ptr = (void *)bounce;
|
||||
int err;
|
||||
int copy_bytes = min_t(int, bytes_left,
|
||||
XE_CACHELINE_BYTES -
|
||||
(offset & XE_CACHELINE_MASK));
|
||||
|
|
@ -1837,22 +1841,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
|
|||
err = xe_migrate_access_memory(m, bo,
|
||||
offset &
|
||||
~XE_CACHELINE_MASK,
|
||||
(void *)ptr,
|
||||
sizeof(bounce), 0);
|
||||
bounce,
|
||||
XE_CACHELINE_BYTES, 0);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
|
||||
if (write) {
|
||||
memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes);
|
||||
memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes);
|
||||
|
||||
err = xe_migrate_access_memory(m, bo,
|
||||
offset & ~XE_CACHELINE_MASK,
|
||||
(void *)ptr,
|
||||
sizeof(bounce), write);
|
||||
bounce,
|
||||
XE_CACHELINE_BYTES, write);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
} else {
|
||||
memcpy(buf + buf_offset, ptr + ptr_offset,
|
||||
memcpy(buf + buf_offset, bounce + ptr_offset,
|
||||
copy_bytes);
|
||||
}
|
||||
|
||||
|
|
@ -1861,7 +1865,8 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
|
|||
offset += copy_bytes;
|
||||
} while (bytes_left);
|
||||
|
||||
return 0;
|
||||
kfree(bounce);
|
||||
return err;
|
||||
}
|
||||
|
||||
dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write);
|
||||
|
|
@ -1882,8 +1887,11 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
|
|||
else
|
||||
current_bytes = min_t(int, bytes_left, cursor.size);
|
||||
|
||||
if (fence)
|
||||
dma_fence_put(fence);
|
||||
if (current_bytes & ~PAGE_MASK) {
|
||||
int pitch = 4;
|
||||
|
||||
current_bytes = min_t(int, current_bytes, S16_MAX * pitch);
|
||||
}
|
||||
|
||||
__fence = xe_migrate_vram(m, current_bytes,
|
||||
(unsigned long)buf & ~PAGE_MASK,
|
||||
|
|
@ -1892,11 +1900,15 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
|
|||
XE_MIGRATE_COPY_TO_VRAM :
|
||||
XE_MIGRATE_COPY_TO_SRAM);
|
||||
if (IS_ERR(__fence)) {
|
||||
if (fence)
|
||||
if (fence) {
|
||||
dma_fence_wait(fence, false);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
fence = __fence;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
dma_fence_put(fence);
|
||||
fence = __fence;
|
||||
|
||||
buf += current_bytes;
|
||||
|
|
|
|||
|
|
@ -3,6 +3,10 @@
|
|||
* Copyright © 2023-2024 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include "regs/xe_bars.h"
|
||||
#include "xe_assert.h"
|
||||
#include "xe_device.h"
|
||||
#include "xe_gt_sriov_pf_config.h"
|
||||
|
|
@ -128,6 +132,18 @@ static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs,
|
|||
}
|
||||
}
|
||||
|
||||
static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
|
||||
u32 sizes;
|
||||
|
||||
sizes = pci_iov_vf_bar_get_sizes(pdev, VF_LMEM_BAR, num_vfs);
|
||||
if (!sizes)
|
||||
return 0;
|
||||
|
||||
return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes));
|
||||
}
|
||||
|
||||
static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
|
||||
|
|
@ -158,6 +174,12 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs)
|
|||
if (err < 0)
|
||||
goto failed;
|
||||
|
||||
if (IS_DGFX(xe)) {
|
||||
err = resize_vf_vram_bar(xe, num_vfs);
|
||||
if (err)
|
||||
xe_sriov_info(xe, "Failed to set VF LMEM BAR size: %d\n", err);
|
||||
}
|
||||
|
||||
err = pci_enable_sriov(pdev, num_vfs);
|
||||
if (err < 0)
|
||||
goto failed;
|
||||
|
|
|
|||
|
|
@ -54,10 +54,10 @@ xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgea
|
|||
write_unlock(&shrinker->lock);
|
||||
}
|
||||
|
||||
static s64 xe_shrinker_walk(struct xe_device *xe,
|
||||
struct ttm_operation_ctx *ctx,
|
||||
const struct xe_bo_shrink_flags flags,
|
||||
unsigned long to_scan, unsigned long *scanned)
|
||||
static s64 __xe_shrinker_walk(struct xe_device *xe,
|
||||
struct ttm_operation_ctx *ctx,
|
||||
const struct xe_bo_shrink_flags flags,
|
||||
unsigned long to_scan, unsigned long *scanned)
|
||||
{
|
||||
unsigned int mem_type;
|
||||
s64 freed = 0, lret;
|
||||
|
|
@ -93,6 +93,48 @@ static s64 xe_shrinker_walk(struct xe_device *xe,
|
|||
return freed;
|
||||
}
|
||||
|
||||
/*
|
||||
* Try shrinking idle objects without writeback first, then if not sufficient,
|
||||
* try also non-idle objects and finally if that's not sufficient either,
|
||||
* add writeback. This avoids stalls and explicit writebacks with light or
|
||||
* moderate memory pressure.
|
||||
*/
|
||||
static s64 xe_shrinker_walk(struct xe_device *xe,
|
||||
struct ttm_operation_ctx *ctx,
|
||||
const struct xe_bo_shrink_flags flags,
|
||||
unsigned long to_scan, unsigned long *scanned)
|
||||
{
|
||||
bool no_wait_gpu = true;
|
||||
struct xe_bo_shrink_flags save_flags = flags;
|
||||
s64 lret, freed;
|
||||
|
||||
swap(no_wait_gpu, ctx->no_wait_gpu);
|
||||
save_flags.writeback = false;
|
||||
lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
|
||||
swap(no_wait_gpu, ctx->no_wait_gpu);
|
||||
if (lret < 0 || *scanned >= to_scan)
|
||||
return lret;
|
||||
|
||||
freed = lret;
|
||||
if (!ctx->no_wait_gpu) {
|
||||
lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned);
|
||||
if (lret < 0)
|
||||
return lret;
|
||||
freed += lret;
|
||||
if (*scanned >= to_scan)
|
||||
return freed;
|
||||
}
|
||||
|
||||
if (flags.writeback) {
|
||||
lret = __xe_shrinker_walk(xe, ctx, flags, to_scan, scanned);
|
||||
if (lret < 0)
|
||||
return lret;
|
||||
freed += lret;
|
||||
}
|
||||
|
||||
return freed;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
xe_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
|
|
@ -199,6 +241,7 @@ static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_con
|
|||
runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup);
|
||||
|
||||
shrink_flags.purge = false;
|
||||
|
||||
lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags,
|
||||
nr_to_scan, &nr_scanned);
|
||||
if (lret >= 0)
|
||||
|
|
|
|||
|
|
@ -1679,7 +1679,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
|
|||
};
|
||||
|
||||
static const struct x86_cpu_id intel_mwait_ids[] __initconst = {
|
||||
X86_MATCH_VENDOR_FAM_FEATURE(INTEL, 6, X86_FEATURE_MWAIT, NULL),
|
||||
X86_MATCH_VENDOR_FAM_FEATURE(INTEL, X86_FAMILY_ANY, X86_FEATURE_MWAIT, NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -95,13 +95,13 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker);
|
|||
static void ad_mux_machine(struct port *port, bool *update_slave_arr);
|
||||
static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
|
||||
static void ad_tx_machine(struct port *port);
|
||||
static void ad_periodic_machine(struct port *port, struct bond_params *bond_params);
|
||||
static void ad_periodic_machine(struct port *port);
|
||||
static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
|
||||
static void ad_agg_selection_logic(struct aggregator *aggregator,
|
||||
bool *update_slave_arr);
|
||||
static void ad_clear_agg(struct aggregator *aggregator);
|
||||
static void ad_initialize_agg(struct aggregator *aggregator);
|
||||
static void ad_initialize_port(struct port *port, int lacp_fast);
|
||||
static void ad_initialize_port(struct port *port, const struct bond_params *bond_params);
|
||||
static void ad_enable_collecting(struct port *port);
|
||||
static void ad_disable_distributing(struct port *port,
|
||||
bool *update_slave_arr);
|
||||
|
|
@ -1307,10 +1307,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
|
|||
* case of EXPIRED even if LINK_DOWN didn't arrive for
|
||||
* the port.
|
||||
*/
|
||||
port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
|
||||
port->sm_vars &= ~AD_PORT_MATCHED;
|
||||
/* Based on IEEE 8021AX-2014, Figure 6-18 - Receive
|
||||
* machine state diagram, the statue should be
|
||||
* Partner_Oper_Port_State.Synchronization = FALSE;
|
||||
* Partner_Oper_Port_State.LACP_Timeout = Short Timeout;
|
||||
* start current_while_timer(Short Timeout);
|
||||
* Actor_Oper_Port_State.Expired = TRUE;
|
||||
*/
|
||||
port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
|
||||
port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
|
||||
port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
|
||||
port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
|
||||
port->actor_oper_port_state |= LACP_STATE_EXPIRED;
|
||||
port->sm_vars |= AD_PORT_CHURNED;
|
||||
|
|
@ -1417,11 +1423,10 @@ static void ad_tx_machine(struct port *port)
|
|||
/**
|
||||
* ad_periodic_machine - handle a port's periodic state machine
|
||||
* @port: the port we're looking at
|
||||
* @bond_params: bond parameters we will use
|
||||
*
|
||||
* Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
|
||||
*/
|
||||
static void ad_periodic_machine(struct port *port, struct bond_params *bond_params)
|
||||
static void ad_periodic_machine(struct port *port)
|
||||
{
|
||||
periodic_states_t last_state;
|
||||
|
||||
|
|
@ -1430,8 +1435,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para
|
|||
|
||||
/* check if port was reinitialized */
|
||||
if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
|
||||
(!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
|
||||
!bond_params->lacp_active) {
|
||||
(!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) {
|
||||
port->sm_periodic_state = AD_NO_PERIODIC;
|
||||
}
|
||||
/* check if state machine should change state */
|
||||
|
|
@ -1955,16 +1959,16 @@ static void ad_initialize_agg(struct aggregator *aggregator)
|
|||
/**
|
||||
* ad_initialize_port - initialize a given port's parameters
|
||||
* @port: the port we're looking at
|
||||
* @lacp_fast: boolean. whether fast periodic should be used
|
||||
* @bond_params: bond parameters we will use
|
||||
*/
|
||||
static void ad_initialize_port(struct port *port, int lacp_fast)
|
||||
static void ad_initialize_port(struct port *port, const struct bond_params *bond_params)
|
||||
{
|
||||
static const struct port_params tmpl = {
|
||||
.system_priority = 0xffff,
|
||||
.key = 1,
|
||||
.port_number = 1,
|
||||
.port_priority = 0xff,
|
||||
.port_state = 1,
|
||||
.port_state = 0,
|
||||
};
|
||||
static const struct lacpdu lacpdu = {
|
||||
.subtype = 0x01,
|
||||
|
|
@ -1982,12 +1986,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
|
|||
port->actor_port_priority = 0xff;
|
||||
port->actor_port_aggregator_identifier = 0;
|
||||
port->ntt = false;
|
||||
port->actor_admin_port_state = LACP_STATE_AGGREGATION |
|
||||
LACP_STATE_LACP_ACTIVITY;
|
||||
port->actor_oper_port_state = LACP_STATE_AGGREGATION |
|
||||
LACP_STATE_LACP_ACTIVITY;
|
||||
port->actor_admin_port_state = LACP_STATE_AGGREGATION;
|
||||
port->actor_oper_port_state = LACP_STATE_AGGREGATION;
|
||||
if (bond_params->lacp_active) {
|
||||
port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY;
|
||||
port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
|
||||
}
|
||||
|
||||
if (lacp_fast)
|
||||
if (bond_params->lacp_fast)
|
||||
port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
|
||||
|
||||
memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
|
||||
|
|
@ -2201,7 +2207,7 @@ void bond_3ad_bind_slave(struct slave *slave)
|
|||
/* port initialization */
|
||||
port = &(SLAVE_AD_INFO(slave)->port);
|
||||
|
||||
ad_initialize_port(port, bond->params.lacp_fast);
|
||||
ad_initialize_port(port, &bond->params);
|
||||
|
||||
port->slave = slave;
|
||||
port->actor_port_number = SLAVE_AD_INFO(slave)->id;
|
||||
|
|
@ -2513,7 +2519,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
|
|||
}
|
||||
|
||||
ad_rx_machine(NULL, port);
|
||||
ad_periodic_machine(port, &bond->params);
|
||||
ad_periodic_machine(port);
|
||||
ad_port_selection_logic(port, &update_slave_arr);
|
||||
ad_mux_machine(port, &update_slave_arr);
|
||||
ad_tx_machine(port);
|
||||
|
|
@ -2883,6 +2889,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
|
|||
spin_unlock_bh(&bond->mode_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* bond_3ad_update_lacp_active - change the lacp active
|
||||
* @bond: bonding struct
|
||||
*
|
||||
* Update actor_oper_port_state when lacp_active is modified.
|
||||
*/
|
||||
void bond_3ad_update_lacp_active(struct bonding *bond)
|
||||
{
|
||||
struct port *port = NULL;
|
||||
struct list_head *iter;
|
||||
struct slave *slave;
|
||||
int lacp_active;
|
||||
|
||||
lacp_active = bond->params.lacp_active;
|
||||
spin_lock_bh(&bond->mode_lock);
|
||||
bond_for_each_slave(bond, slave, iter) {
|
||||
port = &(SLAVE_AD_INFO(slave)->port);
|
||||
if (lacp_active)
|
||||
port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
|
||||
else
|
||||
port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY;
|
||||
}
|
||||
spin_unlock_bh(&bond->mode_lock);
|
||||
}
|
||||
|
||||
size_t bond_3ad_stats_size(void)
|
||||
{
|
||||
return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
|
||||
|
|
|
|||
|
|
@ -1660,6 +1660,7 @@ static int bond_option_lacp_active_set(struct bonding *bond,
|
|||
netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
|
||||
newval->string, newval->value);
|
||||
bond->params.lacp_active = newval->value;
|
||||
bond_3ad_update_lacp_active(bond);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2078,7 +2078,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
|
|||
|
||||
/* Start search operation */
|
||||
reg = ARL_SRCH_STDN;
|
||||
b53_write8(priv, offset, B53_ARL_SRCH_CTL, reg);
|
||||
b53_write8(priv, B53_ARLIO_PAGE, offset, reg);
|
||||
|
||||
do {
|
||||
ret = b53_arl_search_wait(priv);
|
||||
|
|
|
|||
|
|
@ -2457,6 +2457,12 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
|
|||
dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
|
||||
}
|
||||
|
||||
/* HSR ports are setup once so need to use the assigned membership
|
||||
* when the port is enabled.
|
||||
*/
|
||||
if (!port_member && p->stp_state == BR_STATE_FORWARDING &&
|
||||
(dev->hsr_ports & BIT(port)))
|
||||
port_member = dev->hsr_ports;
|
||||
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -781,10 +781,8 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
|
||||
e->hash = 0xffff;
|
||||
if (!airoha_ppe_foe_compare_entry(e, hwe))
|
||||
continue;
|
||||
}
|
||||
|
||||
airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
|
||||
commit_done = true;
|
||||
|
|
|
|||
|
|
@ -5336,7 +5336,7 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
|
|||
{
|
||||
int i;
|
||||
|
||||
netdev_assert_locked(bp->dev);
|
||||
netdev_assert_locked_or_invisible(bp->dev);
|
||||
|
||||
/* Under netdev instance lock and all our NAPIs have been disabled.
|
||||
* It's safe to delete the hash table.
|
||||
|
|
|
|||
|
|
@ -5349,7 +5349,8 @@ static const struct macb_config sama7g5_gem_config = {
|
|||
|
||||
static const struct macb_config sama7g5_emac_config = {
|
||||
.caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
|
||||
MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP,
|
||||
MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII |
|
||||
MACB_CAPS_GEM_HAS_PTP,
|
||||
.dma_burst_length = 16,
|
||||
.clk_init = macb_clk_init,
|
||||
.init = macb_init,
|
||||
|
|
|
|||
|
|
@ -2870,6 +2870,8 @@ static void gve_shutdown(struct pci_dev *pdev)
|
|||
struct gve_priv *priv = netdev_priv(netdev);
|
||||
bool was_up = netif_running(priv->dev);
|
||||
|
||||
netif_device_detach(netdev);
|
||||
|
||||
rtnl_lock();
|
||||
netdev_lock(netdev);
|
||||
if (was_up && gve_close(priv->dev)) {
|
||||
|
|
|
|||
|
|
@ -7149,6 +7149,13 @@ static int igc_probe(struct pci_dev *pdev,
|
|||
adapter->port_num = hw->bus.func;
|
||||
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
|
||||
|
||||
/* PCI config space info */
|
||||
hw->vendor_id = pdev->vendor;
|
||||
hw->device_id = pdev->device;
|
||||
hw->revision_id = pdev->revision;
|
||||
hw->subsystem_vendor_id = pdev->subsystem_vendor;
|
||||
hw->subsystem_device_id = pdev->subsystem_device;
|
||||
|
||||
/* Disable ASPM L1.2 on I226 devices to avoid packet loss */
|
||||
if (igc_is_device_id_i226(hw))
|
||||
pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
|
||||
|
|
@ -7175,13 +7182,6 @@ static int igc_probe(struct pci_dev *pdev,
|
|||
netdev->mem_start = pci_resource_start(pdev, 0);
|
||||
netdev->mem_end = pci_resource_end(pdev, 0);
|
||||
|
||||
/* PCI config space info */
|
||||
hw->vendor_id = pdev->vendor;
|
||||
hw->device_id = pdev->device;
|
||||
hw->revision_id = pdev->revision;
|
||||
hw->subsystem_vendor_id = pdev->subsystem_vendor;
|
||||
hw->subsystem_device_id = pdev->subsystem_device;
|
||||
|
||||
/* Copy the default MAC and PHY function pointers */
|
||||
memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
|
||||
memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
|
||||
|
|
|
|||
|
|
@ -968,10 +968,6 @@ static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter)
|
|||
for (i = 0; i < adapter->num_tx_queues; i++)
|
||||
clear_bit(__IXGBE_HANG_CHECK_ARMED,
|
||||
&adapter->tx_ring[i]->state);
|
||||
|
||||
for (i = 0; i < adapter->num_xdp_queues; i++)
|
||||
clear_bit(__IXGBE_HANG_CHECK_ARMED,
|
||||
&adapter->xdp_ring[i]->state);
|
||||
}
|
||||
|
||||
static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
|
||||
|
|
@ -1214,7 +1210,7 @@ static void ixgbe_pf_handle_tx_hang(struct ixgbe_ring *tx_ring,
|
|||
struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
|
||||
e_err(drv, "Detected Tx Unit Hang%s\n"
|
||||
e_err(drv, "Detected Tx Unit Hang\n"
|
||||
" Tx Queue <%d>\n"
|
||||
" TDH, TDT <%x>, <%x>\n"
|
||||
" next_to_use <%x>\n"
|
||||
|
|
@ -1222,16 +1218,14 @@ static void ixgbe_pf_handle_tx_hang(struct ixgbe_ring *tx_ring,
|
|||
"tx_buffer_info[next_to_clean]\n"
|
||||
" time_stamp <%lx>\n"
|
||||
" jiffies <%lx>\n",
|
||||
ring_is_xdp(tx_ring) ? " (XDP)" : "",
|
||||
tx_ring->queue_index,
|
||||
IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
|
||||
IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
|
||||
tx_ring->next_to_use, next,
|
||||
tx_ring->tx_buffer_info[next].time_stamp, jiffies);
|
||||
|
||||
if (!ring_is_xdp(tx_ring))
|
||||
netif_stop_subqueue(tx_ring->netdev,
|
||||
tx_ring->queue_index);
|
||||
netif_stop_subqueue(tx_ring->netdev,
|
||||
tx_ring->queue_index);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1451,6 +1445,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
|
|||
total_bytes);
|
||||
adapter->tx_ipsec += total_ipsec;
|
||||
|
||||
if (ring_is_xdp(tx_ring))
|
||||
return !!budget;
|
||||
|
||||
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
|
||||
if (adapter->hw.mac.type == ixgbe_mac_e610)
|
||||
ixgbe_handle_mdd_event(adapter, tx_ring);
|
||||
|
|
@ -1468,9 +1465,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
|
|||
return true;
|
||||
}
|
||||
|
||||
if (ring_is_xdp(tx_ring))
|
||||
return !!budget;
|
||||
|
||||
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
|
||||
txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
|
||||
if (!__netif_txq_completed_wake(txq, total_packets, total_bytes,
|
||||
|
|
@ -7974,12 +7968,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
|
|||
return;
|
||||
|
||||
/* Force detection of hung controller */
|
||||
if (netif_carrier_ok(adapter->netdev)) {
|
||||
if (netif_carrier_ok(adapter->netdev))
|
||||
for (i = 0; i < adapter->num_tx_queues; i++)
|
||||
set_check_for_tx_hang(adapter->tx_ring[i]);
|
||||
for (i = 0; i < adapter->num_xdp_queues; i++)
|
||||
set_check_for_tx_hang(adapter->xdp_ring[i]);
|
||||
}
|
||||
|
||||
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
|
||||
/*
|
||||
|
|
@ -8199,13 +8190,6 @@ static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter)
|
|||
return true;
|
||||
}
|
||||
|
||||
for (i = 0; i < adapter->num_xdp_queues; i++) {
|
||||
struct ixgbe_ring *ring = adapter->xdp_ring[i];
|
||||
|
||||
if (ring->next_to_use != ring->next_to_clean)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -11005,6 +10989,10 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
|
|||
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
|
||||
return -ENETDOWN;
|
||||
|
||||
if (!netif_carrier_ok(adapter->netdev) ||
|
||||
!netif_running(adapter->netdev))
|
||||
return -ENETDOWN;
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
|||
|
|
@ -398,7 +398,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
|
|||
dma_addr_t dma;
|
||||
u32 cmd_type;
|
||||
|
||||
while (budget-- > 0) {
|
||||
while (likely(budget)) {
|
||||
if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
|
||||
work_done = false;
|
||||
break;
|
||||
|
|
@ -433,6 +433,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
|
|||
xdp_ring->next_to_use++;
|
||||
if (xdp_ring->next_to_use == xdp_ring->count)
|
||||
xdp_ring->next_to_use = 0;
|
||||
|
||||
budget--;
|
||||
}
|
||||
|
||||
if (tx_desc) {
|
||||
|
|
|
|||
|
|
@ -606,8 +606,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
|
|||
if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
|
||||
*features &= ~BIT_ULL(NPC_OUTER_VID);
|
||||
|
||||
/* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
|
||||
if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
|
||||
/* Allow extracting SPI field from AH and ESP headers at same offset */
|
||||
if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) &&
|
||||
(*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
|
||||
*features |= BIT_ULL(NPC_IPSEC_SPI);
|
||||
|
||||
|
|
|
|||
|
|
@ -101,7 +101,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
|
|||
if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
|
||||
return -1;
|
||||
|
||||
rcu_read_lock();
|
||||
err = dev_fill_forward_path(dev, addr, &stack);
|
||||
rcu_read_unlock();
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,6 @@ struct mlx5e_dcbx {
|
|||
u8 cap;
|
||||
|
||||
/* Buffer configuration */
|
||||
bool manual_buffer;
|
||||
u32 cable_len;
|
||||
u32 xoff;
|
||||
u16 port_buff_cell_sz;
|
||||
|
|
|
|||
|
|
@ -272,8 +272,8 @@ static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
|
|||
/* Total shared buffer size is split in a ratio of 3:1 between
|
||||
* lossy and lossless pools respectively.
|
||||
*/
|
||||
lossy_epool_size = (shared_buffer_size / 4) * 3;
|
||||
lossless_ipool_size = shared_buffer_size / 4;
|
||||
lossy_epool_size = shared_buffer_size - lossless_ipool_size;
|
||||
|
||||
mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
|
||||
lossy_epool_size);
|
||||
|
|
@ -288,14 +288,12 @@ static int port_set_buffer(struct mlx5e_priv *priv,
|
|||
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
|
||||
u32 new_headroom_size = 0;
|
||||
u32 current_headroom_size;
|
||||
u32 current_headroom_cells = 0;
|
||||
u32 new_headroom_cells = 0;
|
||||
void *in;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
current_headroom_size = port_buffer->headroom_size;
|
||||
|
||||
in = kzalloc(sz, GFP_KERNEL);
|
||||
if (!in)
|
||||
return -ENOMEM;
|
||||
|
|
@ -306,12 +304,14 @@ static int port_set_buffer(struct mlx5e_priv *priv,
|
|||
|
||||
for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
|
||||
void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
|
||||
current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size);
|
||||
|
||||
u64 size = port_buffer->buffer[i].size;
|
||||
u64 xoff = port_buffer->buffer[i].xoff;
|
||||
u64 xon = port_buffer->buffer[i].xon;
|
||||
|
||||
new_headroom_size += size;
|
||||
do_div(size, port_buff_cell_sz);
|
||||
new_headroom_cells += size;
|
||||
do_div(xoff, port_buff_cell_sz);
|
||||
do_div(xon, port_buff_cell_sz);
|
||||
MLX5_SET(bufferx_reg, buffer, size, size);
|
||||
|
|
@ -320,10 +320,8 @@ static int port_set_buffer(struct mlx5e_priv *priv,
|
|||
MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
|
||||
}
|
||||
|
||||
new_headroom_size /= port_buff_cell_sz;
|
||||
current_headroom_size /= port_buff_cell_sz;
|
||||
err = port_update_shared_buffer(priv->mdev, current_headroom_size,
|
||||
new_headroom_size);
|
||||
err = port_update_shared_buffer(priv->mdev, current_headroom_cells,
|
||||
new_headroom_cells);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
|
|
|||
|
|
@ -173,6 +173,8 @@ static void mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs *fs_hmfs,
|
|||
|
||||
memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions));
|
||||
rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter);
|
||||
rule_actions[0].counter.offset =
|
||||
attr->counter->id - attr->counter->bulk->base_id;
|
||||
/* Modify header is special, it may require extra arguments outside the action itself. */
|
||||
if (mh_action->mh_data) {
|
||||
rule_actions[1].modify_header.offset = mh_action->mh_data->offset;
|
||||
|
|
|
|||
|
|
@ -362,6 +362,7 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
|
|||
static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
|
||||
struct ieee_pfc *pfc)
|
||||
{
|
||||
u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
u32 old_cable_len = priv->dcbx.cable_len;
|
||||
|
|
@ -389,7 +390,14 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
|
|||
|
||||
if (MLX5_BUFFER_SUPPORTED(mdev)) {
|
||||
pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
|
||||
if (priv->dcbx.manual_buffer)
|
||||
ret = mlx5_query_port_buffer_ownership(mdev,
|
||||
&buffer_ownership);
|
||||
if (ret)
|
||||
netdev_err(dev,
|
||||
"%s, Failed to get buffer ownership: %d\n",
|
||||
__func__, ret);
|
||||
|
||||
if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED)
|
||||
ret = mlx5e_port_manual_buffer_config(priv, changed,
|
||||
dev->mtu, &pfc_new,
|
||||
NULL, NULL);
|
||||
|
|
@ -982,7 +990,6 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
|
|||
if (!changed)
|
||||
return 0;
|
||||
|
||||
priv->dcbx.manual_buffer = true;
|
||||
err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
|
||||
buffer_size, prio2buffer);
|
||||
return err;
|
||||
|
|
@ -1252,7 +1259,6 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
|
|||
priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
|
||||
|
||||
priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
|
||||
priv->dcbx.manual_buffer = false;
|
||||
priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
|
||||
|
||||
mlx5e_ets_init(priv);
|
||||
|
|
|
|||
|
|
@ -47,10 +47,12 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
|
|||
devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
|
||||
vport_num - 1, external);
|
||||
} else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
|
||||
u16 base_vport = mlx5_core_ec_vf_vport_base(dev);
|
||||
|
||||
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
|
||||
dl_port->attrs.switch_id.id_len = ppid.id_len;
|
||||
devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
|
||||
vport_num - 1, false);
|
||||
vport_num - base_vport, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -102,6 +102,8 @@ struct mlx5_esw_sched_node {
|
|||
u8 level;
|
||||
/* Valid only when this node represents a traffic class. */
|
||||
u8 tc;
|
||||
/* Valid only for a TC arbiter node or vport TC arbiter. */
|
||||
u32 tc_bw[DEVLINK_RATE_TCS_MAX];
|
||||
};
|
||||
|
||||
static void esw_qos_node_attach_to_parent(struct mlx5_esw_sched_node *node)
|
||||
|
|
@ -462,6 +464,7 @@ static int
|
|||
esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_esw_sched_node *parent = vport_node->parent;
|
||||
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
|
||||
struct mlx5_core_dev *dev = vport_node->esw->dev;
|
||||
void *attr;
|
||||
|
|
@ -477,7 +480,7 @@ esw_qos_vport_create_sched_element(struct mlx5_esw_sched_node *vport_node,
|
|||
attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
|
||||
MLX5_SET(vport_element, attr, vport_number, vport_node->vport->vport);
|
||||
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
|
||||
vport_node->parent->ix);
|
||||
parent ? parent->ix : vport_node->esw->qos.root_tsar_ix);
|
||||
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
|
||||
vport_node->max_rate);
|
||||
|
||||
|
|
@ -608,10 +611,7 @@ static void
|
|||
esw_qos_tc_arbiter_get_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
|
||||
u32 *tc_bw)
|
||||
{
|
||||
struct mlx5_esw_sched_node *vports_tc_node;
|
||||
|
||||
list_for_each_entry(vports_tc_node, &tc_arbiter_node->children, entry)
|
||||
tc_bw[vports_tc_node->tc] = vports_tc_node->bw_share;
|
||||
memcpy(tc_bw, tc_arbiter_node->tc_bw, sizeof(tc_arbiter_node->tc_bw));
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -628,6 +628,7 @@ esw_qos_set_tc_arbiter_bw_shares(struct mlx5_esw_sched_node *tc_arbiter_node,
|
|||
u8 tc = vports_tc_node->tc;
|
||||
u32 bw_share;
|
||||
|
||||
tc_arbiter_node->tc_bw[tc] = tc_bw[tc];
|
||||
bw_share = tc_bw[tc] * fw_max_bw_share;
|
||||
bw_share = esw_qos_calc_bw_share(bw_share, divider,
|
||||
fw_max_bw_share);
|
||||
|
|
@ -786,48 +787,15 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
|
|||
return err;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) {
|
||||
esw->qos.node0 = __esw_qos_create_vports_sched_node(esw, NULL, extack);
|
||||
} else {
|
||||
/* The eswitch doesn't support scheduling nodes.
|
||||
* Create a software-only node0 using the root TSAR to attach vport QoS to.
|
||||
*/
|
||||
if (!__esw_qos_alloc_node(esw,
|
||||
esw->qos.root_tsar_ix,
|
||||
SCHED_NODE_TYPE_VPORTS_TSAR,
|
||||
NULL))
|
||||
esw->qos.node0 = ERR_PTR(-ENOMEM);
|
||||
else
|
||||
list_add_tail(&esw->qos.node0->entry,
|
||||
&esw->qos.domain->nodes);
|
||||
}
|
||||
if (IS_ERR(esw->qos.node0)) {
|
||||
err = PTR_ERR(esw->qos.node0);
|
||||
esw_warn(dev, "E-Switch create rate node 0 failed (%d)\n", err);
|
||||
goto err_node0;
|
||||
}
|
||||
refcount_set(&esw->qos.refcnt, 1);
|
||||
|
||||
return 0;
|
||||
|
||||
err_node0:
|
||||
if (mlx5_destroy_scheduling_element_cmd(esw->dev, SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
esw->qos.root_tsar_ix))
|
||||
esw_warn(esw->dev, "E-Switch destroy root TSAR failed.\n");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void esw_qos_destroy(struct mlx5_eswitch *esw)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (esw->qos.node0->ix != esw->qos.root_tsar_ix)
|
||||
__esw_qos_destroy_node(esw->qos.node0, NULL);
|
||||
else
|
||||
__esw_qos_free_node(esw->qos.node0);
|
||||
esw->qos.node0 = NULL;
|
||||
|
||||
err = mlx5_destroy_scheduling_element_cmd(esw->dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
esw->qos.root_tsar_ix);
|
||||
|
|
@ -990,13 +958,16 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
|
|||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
|
||||
int err, new_level, max_level;
|
||||
struct mlx5_esw_sched_node *parent = vport_node->parent;
|
||||
int err;
|
||||
|
||||
if (type == SCHED_NODE_TYPE_TC_ARBITER_TSAR) {
|
||||
int new_level, max_level;
|
||||
|
||||
/* Increase the parent's level by 2 to account for both the
|
||||
* TC arbiter and the vports TC scheduling element.
|
||||
*/
|
||||
new_level = vport_node->parent->level + 2;
|
||||
new_level = (parent ? parent->level : 2) + 2;
|
||||
max_level = 1 << MLX5_CAP_QOS(vport_node->esw->dev,
|
||||
log_esw_max_sched_depth);
|
||||
if (new_level > max_level) {
|
||||
|
|
@ -1033,9 +1004,7 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
|
|||
err_sched_nodes:
|
||||
if (type == SCHED_NODE_TYPE_RATE_LIMITER) {
|
||||
esw_qos_node_destroy_sched_element(vport_node, NULL);
|
||||
list_add_tail(&vport_node->entry,
|
||||
&vport_node->parent->children);
|
||||
vport_node->level = vport_node->parent->level + 1;
|
||||
esw_qos_node_attach_to_parent(vport_node);
|
||||
} else {
|
||||
esw_qos_tc_arbiter_scheduling_teardown(vport_node, NULL);
|
||||
}
|
||||
|
|
@ -1083,7 +1052,6 @@ static int esw_qos_set_vport_tcs_min_rate(struct mlx5_vport *vport,
|
|||
static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
|
||||
struct mlx5_esw_sched_node *parent = vport_node->parent;
|
||||
enum sched_node_type curr_type = vport_node->type;
|
||||
|
||||
if (curr_type == SCHED_NODE_TYPE_VPORT)
|
||||
|
|
@ -1092,8 +1060,9 @@ static void esw_qos_vport_disable(struct mlx5_vport *vport, struct netlink_ext_a
|
|||
esw_qos_vport_tc_disable(vport, extack);
|
||||
|
||||
vport_node->bw_share = 0;
|
||||
memset(vport_node->tc_bw, 0, sizeof(vport_node->tc_bw));
|
||||
list_del_init(&vport_node->entry);
|
||||
esw_qos_normalize_min_rate(parent->esw, parent, extack);
|
||||
esw_qos_normalize_min_rate(vport_node->esw, vport_node->parent, extack);
|
||||
|
||||
trace_mlx5_esw_vport_qos_destroy(vport_node->esw->dev, vport);
|
||||
}
|
||||
|
|
@ -1103,25 +1072,23 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport,
|
|||
struct mlx5_esw_sched_node *parent,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
|
||||
int err;
|
||||
|
||||
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
|
||||
|
||||
esw_qos_node_set_parent(vport->qos.sched_node, parent);
|
||||
if (type == SCHED_NODE_TYPE_VPORT) {
|
||||
err = esw_qos_vport_create_sched_element(vport->qos.sched_node,
|
||||
extack);
|
||||
} else {
|
||||
esw_qos_node_set_parent(vport_node, parent);
|
||||
if (type == SCHED_NODE_TYPE_VPORT)
|
||||
err = esw_qos_vport_create_sched_element(vport_node, extack);
|
||||
else
|
||||
err = esw_qos_vport_tc_enable(vport, type, extack);
|
||||
}
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
vport->qos.sched_node->type = type;
|
||||
esw_qos_normalize_min_rate(parent->esw, parent, extack);
|
||||
trace_mlx5_esw_vport_qos_create(vport->dev, vport,
|
||||
vport->qos.sched_node->max_rate,
|
||||
vport->qos.sched_node->bw_share);
|
||||
vport_node->type = type;
|
||||
esw_qos_normalize_min_rate(vport_node->esw, parent, extack);
|
||||
trace_mlx5_esw_vport_qos_create(vport->dev, vport, vport_node->max_rate,
|
||||
vport_node->bw_share);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1132,6 +1099,7 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
|
|||
{
|
||||
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
|
||||
struct mlx5_esw_sched_node *sched_node;
|
||||
struct mlx5_eswitch *parent_esw;
|
||||
int err;
|
||||
|
||||
esw_assert_qos_lock_held(esw);
|
||||
|
|
@ -1139,10 +1107,14 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
parent = parent ?: esw->qos.node0;
|
||||
sched_node = __esw_qos_alloc_node(parent->esw, 0, type, parent);
|
||||
if (!sched_node)
|
||||
parent_esw = parent ? parent->esw : esw;
|
||||
sched_node = __esw_qos_alloc_node(parent_esw, 0, type, parent);
|
||||
if (!sched_node) {
|
||||
esw_qos_put(esw);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!parent)
|
||||
list_add_tail(&sched_node->entry, &esw->qos.domain->nodes);
|
||||
|
||||
sched_node->max_rate = max_rate;
|
||||
sched_node->min_rate = min_rate;
|
||||
|
|
@ -1150,6 +1122,7 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
|
|||
vport->qos.sched_node = sched_node;
|
||||
err = esw_qos_vport_enable(vport, type, parent, extack);
|
||||
if (err) {
|
||||
__esw_qos_free_node(sched_node);
|
||||
esw_qos_put(esw);
|
||||
vport->qos.sched_node = NULL;
|
||||
}
|
||||
|
|
@ -1157,6 +1130,19 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
|
|||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_esw_qos_vport_disable_locked(struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
|
||||
|
||||
esw_assert_qos_lock_held(esw);
|
||||
if (!vport->qos.sched_node)
|
||||
return;
|
||||
|
||||
esw_qos_vport_disable(vport, NULL);
|
||||
mlx5_esw_qos_vport_qos_free(vport);
|
||||
esw_qos_put(esw);
|
||||
}
|
||||
|
||||
void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
|
||||
|
|
@ -1168,11 +1154,9 @@ void mlx5_esw_qos_vport_disable(struct mlx5_vport *vport)
|
|||
goto unlock;
|
||||
|
||||
parent = vport->qos.sched_node->parent;
|
||||
WARN(parent != esw->qos.node0, "Disabling QoS on port before detaching it from node");
|
||||
WARN(parent, "Disabling QoS on port before detaching it from node");
|
||||
|
||||
esw_qos_vport_disable(vport, NULL);
|
||||
mlx5_esw_qos_vport_qos_free(vport);
|
||||
esw_qos_put(esw);
|
||||
mlx5_esw_qos_vport_disable_locked(vport);
|
||||
unlock:
|
||||
esw_qos_unlock(esw);
|
||||
}
|
||||
|
|
@ -1262,13 +1246,13 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
|
|||
struct mlx5_esw_sched_node *parent,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_esw_sched_node *curr_parent = vport->qos.sched_node->parent;
|
||||
enum sched_node_type curr_type = vport->qos.sched_node->type;
|
||||
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
|
||||
struct mlx5_esw_sched_node *curr_parent = vport_node->parent;
|
||||
enum sched_node_type curr_type = vport_node->type;
|
||||
u32 curr_tc_bw[DEVLINK_RATE_TCS_MAX] = {0};
|
||||
int err;
|
||||
|
||||
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
|
||||
parent = parent ?: curr_parent;
|
||||
if (curr_type == type && curr_parent == parent)
|
||||
return 0;
|
||||
|
||||
|
|
@ -1276,10 +1260,8 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
|
||||
esw_qos_tc_arbiter_get_bw_shares(vport->qos.sched_node,
|
||||
curr_tc_bw);
|
||||
}
|
||||
if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type)
|
||||
esw_qos_tc_arbiter_get_bw_shares(vport_node, curr_tc_bw);
|
||||
|
||||
esw_qos_vport_disable(vport, extack);
|
||||
|
||||
|
|
@ -1290,8 +1272,8 @@ static int esw_qos_vport_update(struct mlx5_vport *vport,
|
|||
}
|
||||
|
||||
if (curr_type == SCHED_NODE_TYPE_TC_ARBITER_TSAR && curr_type == type) {
|
||||
esw_qos_set_tc_arbiter_bw_shares(vport->qos.sched_node,
|
||||
curr_tc_bw, extack);
|
||||
esw_qos_set_tc_arbiter_bw_shares(vport_node, curr_tc_bw,
|
||||
extack);
|
||||
}
|
||||
|
||||
return err;
|
||||
|
|
@ -1306,16 +1288,16 @@ static int esw_qos_vport_update_parent(struct mlx5_vport *vport, struct mlx5_esw
|
|||
|
||||
esw_assert_qos_lock_held(esw);
|
||||
curr_parent = vport->qos.sched_node->parent;
|
||||
parent = parent ?: esw->qos.node0;
|
||||
if (curr_parent == parent)
|
||||
return 0;
|
||||
|
||||
/* Set vport QoS type based on parent node type if different from
|
||||
* default QoS; otherwise, use the vport's current QoS type.
|
||||
*/
|
||||
if (parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
|
||||
if (parent && parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
|
||||
type = SCHED_NODE_TYPE_RATE_LIMITER;
|
||||
else if (curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
|
||||
else if (curr_parent &&
|
||||
curr_parent->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
|
||||
type = SCHED_NODE_TYPE_VPORT;
|
||||
else
|
||||
type = vport->qos.sched_node->type;
|
||||
|
|
@ -1654,9 +1636,10 @@ static bool esw_qos_validate_unsupported_tc_bw(struct mlx5_eswitch *esw,
|
|||
static bool esw_qos_vport_validate_unsupported_tc_bw(struct mlx5_vport *vport,
|
||||
u32 *tc_bw)
|
||||
{
|
||||
struct mlx5_eswitch *esw = vport->qos.sched_node ?
|
||||
vport->qos.sched_node->parent->esw :
|
||||
vport->dev->priv.eswitch;
|
||||
struct mlx5_esw_sched_node *node = vport->qos.sched_node;
|
||||
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
|
||||
|
||||
esw = (node && node->parent) ? node->parent->esw : esw;
|
||||
|
||||
return esw_qos_validate_unsupported_tc_bw(esw, tc_bw);
|
||||
}
|
||||
|
|
@ -1673,6 +1656,21 @@ static bool esw_qos_tc_bw_disabled(u32 *tc_bw)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void esw_vport_qos_prune_empty(struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_esw_sched_node *vport_node = vport->qos.sched_node;
|
||||
|
||||
esw_assert_qos_lock_held(vport->dev->priv.eswitch);
|
||||
if (!vport_node)
|
||||
return;
|
||||
|
||||
if (vport_node->parent || vport_node->max_rate ||
|
||||
vport_node->min_rate || !esw_qos_tc_bw_disabled(vport_node->tc_bw))
|
||||
return;
|
||||
|
||||
mlx5_esw_qos_vport_disable_locked(vport);
|
||||
}
|
||||
|
||||
int mlx5_esw_qos_init(struct mlx5_eswitch *esw)
|
||||
{
|
||||
if (esw->qos.domain)
|
||||
|
|
@ -1706,6 +1704,10 @@ int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void
|
|||
|
||||
esw_qos_lock(esw);
|
||||
err = mlx5_esw_qos_set_vport_min_rate(vport, tx_share, extack);
|
||||
if (err)
|
||||
goto out;
|
||||
esw_vport_qos_prune_empty(vport);
|
||||
out:
|
||||
esw_qos_unlock(esw);
|
||||
return err;
|
||||
}
|
||||
|
|
@ -1727,6 +1729,10 @@ int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *
|
|||
|
||||
esw_qos_lock(esw);
|
||||
err = mlx5_esw_qos_set_vport_max_rate(vport, tx_max, extack);
|
||||
if (err)
|
||||
goto out;
|
||||
esw_vport_qos_prune_empty(vport);
|
||||
out:
|
||||
esw_qos_unlock(esw);
|
||||
return err;
|
||||
}
|
||||
|
|
@ -1763,7 +1769,8 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
|
|||
if (disable) {
|
||||
if (vport_node->type == SCHED_NODE_TYPE_TC_ARBITER_TSAR)
|
||||
err = esw_qos_vport_update(vport, SCHED_NODE_TYPE_VPORT,
|
||||
NULL, extack);
|
||||
vport_node->parent, extack);
|
||||
esw_vport_qos_prune_empty(vport);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
|
|
@ -1775,7 +1782,7 @@ int mlx5_esw_devlink_rate_leaf_tc_bw_set(struct devlink_rate *rate_leaf,
|
|||
} else {
|
||||
err = esw_qos_vport_update(vport,
|
||||
SCHED_NODE_TYPE_TC_ARBITER_TSAR,
|
||||
NULL, extack);
|
||||
vport_node->parent, extack);
|
||||
}
|
||||
if (!err)
|
||||
esw_qos_set_tc_arbiter_bw_shares(vport_node, tc_bw, extack);
|
||||
|
|
@ -1924,14 +1931,20 @@ int mlx5_esw_devlink_rate_leaf_parent_set(struct devlink_rate *devlink_rate,
|
|||
void *priv, void *parent_priv,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_esw_sched_node *node;
|
||||
struct mlx5_esw_sched_node *node = parent ? parent_priv : NULL;
|
||||
struct mlx5_vport *vport = priv;
|
||||
int err;
|
||||
|
||||
if (!parent)
|
||||
return mlx5_esw_qos_vport_update_parent(vport, NULL, extack);
|
||||
err = mlx5_esw_qos_vport_update_parent(vport, node, extack);
|
||||
if (!err) {
|
||||
struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
|
||||
|
||||
node = parent_priv;
|
||||
return mlx5_esw_qos_vport_update_parent(vport, node, extack);
|
||||
esw_qos_lock(esw);
|
||||
esw_vport_qos_prune_empty(vport);
|
||||
esw_qos_unlock(esw);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static bool esw_qos_is_node_empty(struct mlx5_esw_sched_node *node)
|
||||
|
|
|
|||
|
|
@ -380,11 +380,6 @@ struct mlx5_eswitch {
|
|||
refcount_t refcnt;
|
||||
u32 root_tsar_ix;
|
||||
struct mlx5_qos_domain *domain;
|
||||
/* Contains all vports with QoS enabled but no explicit node.
|
||||
* Cannot be NULL if QoS is enabled, but may be a fake node
|
||||
* referencing the root TSAR if the esw doesn't support nodes.
|
||||
*/
|
||||
struct mlx5_esw_sched_node *node0;
|
||||
} qos;
|
||||
|
||||
struct mlx5_esw_bridge_offloads *br_offloads;
|
||||
|
|
|
|||
|
|
@ -367,6 +367,8 @@ int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
|
|||
int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
|
||||
int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
|
||||
int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
|
||||
int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
|
||||
u8 *buffer_ownership);
|
||||
int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
|
||||
int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
|
||||
|
||||
|
|
|
|||
|
|
@ -968,6 +968,26 @@ int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
|
|||
return err;
|
||||
}
|
||||
|
||||
int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
|
||||
u8 *buffer_ownership)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {};
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_PCAM_FEATURE(mdev, buffer_ownership)) {
|
||||
*buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = mlx5_query_pfcc_reg(mdev, out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
*buffer_ownership = MLX5_GET(pfcc_reg, out, buf_ownership);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
|
||||
{
|
||||
int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
|
||||
|
|
|
|||
|
|
@ -74,9 +74,9 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
|
|||
static int
|
||||
hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
|
||||
{
|
||||
bool move_error = false, poll_error = false, drain_error = false;
|
||||
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
|
||||
struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
|
||||
int drain_error = 0, move_error = 0, poll_error = 0;
|
||||
u16 bwc_queues = mlx5hws_bwc_queues(ctx);
|
||||
struct mlx5hws_rule_attr rule_attr;
|
||||
struct mlx5hws_bwc_rule *bwc_rule;
|
||||
|
|
@ -84,6 +84,7 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
|
|||
struct list_head *rules_list;
|
||||
u32 pending_rules;
|
||||
int i, ret = 0;
|
||||
bool drain;
|
||||
|
||||
mlx5hws_bwc_rule_fill_attr(bwc_matcher, 0, 0, &rule_attr);
|
||||
|
||||
|
|
@ -99,23 +100,37 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
|
|||
ret = mlx5hws_matcher_resize_rule_move(matcher,
|
||||
bwc_rule->rule,
|
||||
&rule_attr);
|
||||
if (unlikely(ret && !move_error)) {
|
||||
mlx5hws_err(ctx,
|
||||
"Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
|
||||
ret);
|
||||
move_error = true;
|
||||
if (unlikely(ret)) {
|
||||
if (!move_error) {
|
||||
mlx5hws_err(ctx,
|
||||
"Moving BWC rule: move failed (%d), attempting to move rest of the rules\n",
|
||||
ret);
|
||||
move_error = ret;
|
||||
}
|
||||
/* Rule wasn't queued, no need to poll */
|
||||
continue;
|
||||
}
|
||||
|
||||
pending_rules++;
|
||||
drain = pending_rules >=
|
||||
hws_bwc_get_burst_th(ctx, rule_attr.queue_id);
|
||||
ret = mlx5hws_bwc_queue_poll(ctx,
|
||||
rule_attr.queue_id,
|
||||
&pending_rules,
|
||||
false);
|
||||
if (unlikely(ret && !poll_error)) {
|
||||
mlx5hws_err(ctx,
|
||||
"Moving BWC rule: poll failed (%d), attempting to move rest of the rules\n",
|
||||
ret);
|
||||
poll_error = true;
|
||||
drain);
|
||||
if (unlikely(ret)) {
|
||||
if (ret == -ETIMEDOUT) {
|
||||
mlx5hws_err(ctx,
|
||||
"Moving BWC rule: timeout polling for completions (%d), aborting rehash\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
if (!poll_error) {
|
||||
mlx5hws_err(ctx,
|
||||
"Moving BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n",
|
||||
ret);
|
||||
poll_error = ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -126,17 +141,30 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
|
|||
rule_attr.queue_id,
|
||||
&pending_rules,
|
||||
true);
|
||||
if (unlikely(ret && !drain_error)) {
|
||||
mlx5hws_err(ctx,
|
||||
"Moving BWC rule: drain failed (%d), attempting to move rest of the rules\n",
|
||||
ret);
|
||||
drain_error = true;
|
||||
if (unlikely(ret)) {
|
||||
if (ret == -ETIMEDOUT) {
|
||||
mlx5hws_err(ctx,
|
||||
"Moving bwc rule: timeout draining completions (%d), aborting rehash\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
if (!drain_error) {
|
||||
mlx5hws_err(ctx,
|
||||
"Moving bwc rule: drain failed (%d), attempting to move rest of the rules\n",
|
||||
ret);
|
||||
drain_error = ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (move_error || poll_error || drain_error)
|
||||
ret = -EINVAL;
|
||||
/* Return the first error that happened */
|
||||
if (unlikely(move_error))
|
||||
return move_error;
|
||||
if (unlikely(poll_error))
|
||||
return poll_error;
|
||||
if (unlikely(drain_error))
|
||||
return drain_error;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -1035,6 +1063,21 @@ int mlx5hws_bwc_rule_create_simple(struct mlx5hws_bwc_rule *bwc_rule,
|
|||
return 0; /* rule inserted successfully */
|
||||
}
|
||||
|
||||
/* Rule insertion could fail due to queue being full, timeout, or
|
||||
* matcher in resize. In such cases, no point in trying to rehash.
|
||||
*/
|
||||
if (ret == -EBUSY || ret == -ETIMEDOUT || ret == -EAGAIN) {
|
||||
mutex_unlock(queue_lock);
|
||||
mlx5hws_err(ctx,
|
||||
"BWC rule insertion failed - %s (%d)\n",
|
||||
ret == -EBUSY ? "queue is full" :
|
||||
ret == -ETIMEDOUT ? "timeout" :
|
||||
ret == -EAGAIN ? "matcher in resize" : "N/A",
|
||||
ret);
|
||||
hws_bwc_rule_cnt_dec(bwc_rule);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* At this point the rule wasn't added.
|
||||
* It could be because there was collision, or some other problem.
|
||||
* Try rehash by size and insert rule again - last chance.
|
||||
|
|
|
|||
|
|
@ -1328,11 +1328,11 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
|
|||
{
|
||||
struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
|
||||
struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
|
||||
bool move_error = false, poll_error = false;
|
||||
u16 bwc_queues = mlx5hws_bwc_queues(ctx);
|
||||
struct mlx5hws_bwc_rule *tmp_bwc_rule;
|
||||
struct mlx5hws_rule_attr rule_attr;
|
||||
struct mlx5hws_table *isolated_tbl;
|
||||
int move_error = 0, poll_error = 0;
|
||||
struct mlx5hws_rule *tmp_rule;
|
||||
struct list_head *rules_list;
|
||||
u32 expected_completions = 1;
|
||||
|
|
@ -1391,11 +1391,15 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
|
|||
ret = mlx5hws_matcher_resize_rule_move(matcher,
|
||||
tmp_rule,
|
||||
&rule_attr);
|
||||
if (unlikely(ret && !move_error)) {
|
||||
mlx5hws_err(ctx,
|
||||
"Moving complex BWC rule failed (%d), attempting to move rest of the rules\n",
|
||||
ret);
|
||||
move_error = true;
|
||||
if (unlikely(ret)) {
|
||||
if (!move_error) {
|
||||
mlx5hws_err(ctx,
|
||||
"Moving complex BWC rule: move failed (%d), attempting to move rest of the rules\n",
|
||||
ret);
|
||||
move_error = ret;
|
||||
}
|
||||
/* Rule wasn't queued, no need to poll */
|
||||
continue;
|
||||
}
|
||||
|
||||
expected_completions = 1;
|
||||
|
|
@ -1403,11 +1407,19 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
|
|||
rule_attr.queue_id,
|
||||
&expected_completions,
|
||||
true);
|
||||
if (unlikely(ret && !poll_error)) {
|
||||
mlx5hws_err(ctx,
|
||||
"Moving complex BWC rule: poll failed (%d), attempting to move rest of the rules\n",
|
||||
ret);
|
||||
poll_error = true;
|
||||
if (unlikely(ret)) {
|
||||
if (ret == -ETIMEDOUT) {
|
||||
mlx5hws_err(ctx,
|
||||
"Moving complex BWC rule: timeout polling for completions (%d), aborting rehash\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
if (!poll_error) {
|
||||
mlx5hws_err(ctx,
|
||||
"Moving complex BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n",
|
||||
ret);
|
||||
poll_error = ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* Done moving the rule to the new matcher,
|
||||
|
|
@ -1422,8 +1434,11 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
|
|||
}
|
||||
}
|
||||
|
||||
if (move_error || poll_error)
|
||||
ret = -EINVAL;
|
||||
/* Return the first error that happened */
|
||||
if (unlikely(move_error))
|
||||
return move_error;
|
||||
if (unlikely(poll_error))
|
||||
return poll_error;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,6 +55,7 @@ int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
|
|||
|
||||
MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
|
||||
MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
|
||||
MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
|
||||
|
||||
ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
|
||||
MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ struct mlx5hws_cmd_set_fte_attr {
|
|||
struct mlx5hws_cmd_ft_create_attr {
|
||||
u8 type;
|
||||
u8 level;
|
||||
u16 uid;
|
||||
bool rtc_valid;
|
||||
bool decap_en;
|
||||
bool reformat_en;
|
||||
|
|
|
|||
|
|
@ -267,6 +267,7 @@ static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
|
|||
|
||||
tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
|
||||
tbl_attr.level = ft_attr->level;
|
||||
tbl_attr.uid = ft_attr->uid;
|
||||
tbl = mlx5hws_table_create(ctx, &tbl_attr);
|
||||
if (!tbl) {
|
||||
mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
|
||||
|
|
|
|||
|
|
@ -85,6 +85,7 @@ static int hws_matcher_create_end_ft_isolated(struct mlx5hws_matcher *matcher)
|
|||
|
||||
ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
|
||||
tbl,
|
||||
0,
|
||||
&matcher->end_ft_id);
|
||||
if (ret) {
|
||||
mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n");
|
||||
|
|
@ -112,7 +113,9 @@ static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
|
|||
if (mlx5hws_matcher_is_isolated(matcher))
|
||||
ret = hws_matcher_create_end_ft_isolated(matcher);
|
||||
else
|
||||
ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl,
|
||||
ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
|
||||
tbl,
|
||||
0,
|
||||
&matcher->end_ft_id);
|
||||
|
||||
if (ret) {
|
||||
|
|
|
|||
|
|
@ -75,6 +75,7 @@ struct mlx5hws_context_attr {
|
|||
struct mlx5hws_table_attr {
|
||||
enum mlx5hws_table_type type;
|
||||
u32 level;
|
||||
u16 uid;
|
||||
};
|
||||
|
||||
enum mlx5hws_matcher_flow_src {
|
||||
|
|
|
|||
|
|
@ -964,7 +964,6 @@ static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
|
|||
return -ENOMEM;
|
||||
|
||||
MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
|
||||
MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries);
|
||||
MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
|
||||
|
||||
err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
|
|||
}
|
||||
|
||||
static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
|
||||
u16 uid,
|
||||
struct mlx5hws_cmd_ft_create_attr *ft_attr)
|
||||
{
|
||||
ft_attr->type = tbl->fw_ft_type;
|
||||
|
|
@ -16,7 +17,9 @@ static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
|
|||
ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1;
|
||||
else
|
||||
ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1;
|
||||
|
||||
ft_attr->rtc_valid = true;
|
||||
ft_attr->uid = uid;
|
||||
}
|
||||
|
||||
static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
|
||||
|
|
@ -119,12 +122,12 @@ static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32
|
|||
|
||||
int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
|
||||
struct mlx5hws_table *tbl,
|
||||
u32 *ft_id)
|
||||
u16 uid, u32 *ft_id)
|
||||
{
|
||||
struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
|
||||
int ret;
|
||||
|
||||
hws_table_init_next_ft_attr(tbl, &ft_attr);
|
||||
hws_table_init_next_ft_attr(tbl, uid, &ft_attr);
|
||||
hws_table_set_cap_attr(tbl, &ft_attr);
|
||||
|
||||
ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id);
|
||||
|
|
@ -189,7 +192,10 @@ static int hws_table_init(struct mlx5hws_table *tbl)
|
|||
}
|
||||
|
||||
mutex_lock(&ctx->ctrl_lock);
|
||||
ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id);
|
||||
ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
|
||||
tbl,
|
||||
tbl->uid,
|
||||
&tbl->ft_id);
|
||||
if (ret) {
|
||||
mlx5hws_err(tbl->ctx, "Failed to create flow table object\n");
|
||||
mutex_unlock(&ctx->ctrl_lock);
|
||||
|
|
@ -239,6 +245,7 @@ struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx,
|
|||
tbl->ctx = ctx;
|
||||
tbl->type = attr->type;
|
||||
tbl->level = attr->level;
|
||||
tbl->uid = attr->uid;
|
||||
|
||||
ret = hws_table_init(tbl);
|
||||
if (ret) {
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ struct mlx5hws_table {
|
|||
enum mlx5hws_table_type type;
|
||||
u32 fw_ft_type;
|
||||
u32 level;
|
||||
u16 uid;
|
||||
struct list_head matchers_list;
|
||||
struct list_head tbl_list_node;
|
||||
struct mlx5hws_default_miss default_miss;
|
||||
|
|
@ -47,7 +48,7 @@ u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type,
|
|||
|
||||
int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
|
||||
struct mlx5hws_table *tbl,
|
||||
u32 *ft_id);
|
||||
u16 uid, u32 *ft_id);
|
||||
|
||||
void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
|
||||
u32 ft_id);
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue