mirror of https://github.com/torvalds/linux.git
Merge branch 'slab/for-6.19/freelist_aba_t_cleanups' into slab/for-next
Merge series "slab: cmpxchg cleanups enabled by -fms-extensions" From the cover letter [1]: After learning about -fms-extensions being enabled for 6.19, I realized there is some cleanup potential in slub code by extending the definition and usage of freelist_aba_t, as it can now become an unnamed member of struct slab. This series performs the cleanup, with no functional changes intended. Additionally we turn freelist_aba_t to struct freelist_counters as it doesn't meet any criteria for being a typedef, per Documentation/process/coding-style.rst Based on the tag kbuild-ms-extensions-6.19 from git://git.kernel.org/pub/scm/linux/kernel/git/kbuild/linuxV Link: https://lore.kernel.org/all/20251107-slab-fms-cleanup-v1-0-650b1491ac9e@suse.cz/#t [1]
This commit is contained in:
commit
ed80cc758b
3
Makefile
3
Makefile
|
|
@ -1061,6 +1061,9 @@ NOSTDINC_FLAGS += -nostdinc
|
|||
# perform bounds checking.
|
||||
KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
|
||||
|
||||
# Allow including a tagged struct or union anonymously in another struct/union.
|
||||
KBUILD_CFLAGS += -fms-extensions
|
||||
|
||||
# disable invalid "can't wrap" optimizations for signed / pointers
|
||||
KBUILD_CFLAGS += -fno-strict-overflow
|
||||
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
|
|||
$(filter -Werror,$(KBUILD_CPPFLAGS)) \
|
||||
-Werror-implicit-function-declaration \
|
||||
-Wno-format-security \
|
||||
-std=gnu11
|
||||
-std=gnu11 -fms-extensions
|
||||
VDSO_CFLAGS += -O2
|
||||
# Some useful compiler-dependent flags from top-level Makefile
|
||||
VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign)
|
||||
|
|
@ -71,6 +71,7 @@ VDSO_CFLAGS += -fno-strict-overflow
|
|||
VDSO_CFLAGS += $(call cc32-option,-Werror=strict-prototypes)
|
||||
VDSO_CFLAGS += -Werror=date-time
|
||||
VDSO_CFLAGS += $(call cc32-option,-Werror=incompatible-pointer-types)
|
||||
VDSO_CFLAGS += $(if $(CONFIG_CC_IS_CLANG),-Wno-microsoft-anon-tag)
|
||||
|
||||
# Compile as THUMB2 or ARM. Unwinding via frame-pointers in THUMB2 is
|
||||
# unreliable.
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ ccflags-vdso := \
|
|||
cflags-vdso := $(ccflags-vdso) \
|
||||
-isystem $(shell $(CC) -print-file-name=include) \
|
||||
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
|
||||
-std=gnu11 -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \
|
||||
-std=gnu11 -fms-extensions -O2 -g -fno-strict-aliasing -fno-common -fno-builtin \
|
||||
-fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
|
||||
$(call cc-option, -fno-asynchronous-unwind-tables) \
|
||||
$(call cc-option, -fno-stack-protector)
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ KBUILD_CFLAGS += -fno-PIE -mno-space-regs -mdisable-fpregs -Os
|
|||
ifndef CONFIG_64BIT
|
||||
KBUILD_CFLAGS += -mfast-indirect-calls
|
||||
endif
|
||||
KBUILD_CFLAGS += -std=gnu11
|
||||
KBUILD_CFLAGS += -std=gnu11 -fms-extensions
|
||||
|
||||
LDFLAGS_vmlinux := -X -e startup --as-needed -T
|
||||
$(obj)/vmlinux: $(obj)/vmlinux.lds $(addprefix $(obj)/, $(OBJECTS)) $(LIBGCC) FORCE
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ BOOTCPPFLAGS := -nostdinc $(LINUXINCLUDE)
|
|||
BOOTCPPFLAGS += -isystem $(shell $(BOOTCC) -print-file-name=include)
|
||||
|
||||
BOOTCFLAGS := $(BOOTTARGETFLAGS) \
|
||||
-std=gnu11 \
|
||||
-std=gnu11 -fms-extensions \
|
||||
-Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
|
||||
-fno-strict-aliasing -O2 \
|
||||
-msoft-float -mno-altivec -mno-vsx \
|
||||
|
|
@ -86,6 +86,7 @@ BOOTARFLAGS := -crD
|
|||
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
BOOTCFLAGS += $(CLANG_FLAGS)
|
||||
BOOTCFLAGS += -Wno-microsoft-anon-tag
|
||||
BOOTAFLAGS += $(CLANG_FLAGS)
|
||||
endif
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ KBUILD_AFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -D__ASSEMBLY__
|
|||
ifndef CONFIG_AS_IS_LLVM
|
||||
KBUILD_AFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),$(aflags_dwarf))
|
||||
endif
|
||||
KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11
|
||||
KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11 -fms-extensions
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += -Wno-pointer-sign
|
||||
|
|
@ -35,6 +35,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-membe
|
|||
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_CC_NO_ARRAY_BOUNDS),-Wno-array-bounds)
|
||||
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_CC_IS_CLANG),-Wno-microsoft-anon-tag)
|
||||
|
||||
UTS_MACHINE := s390x
|
||||
STACK_SIZE := $(if $(CONFIG_KASAN),65536,$(if $(CONFIG_KMSAN),65536,16384))
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ CFLAGS_sha256.o := -D__NO_FORTIFY
|
|||
$(obj)/mem.o: $(srctree)/arch/s390/lib/mem.S FORCE
|
||||
$(call if_changed_rule,as_o_S)
|
||||
|
||||
KBUILD_CFLAGS := -std=gnu11 -fno-strict-aliasing -Wall -Wstrict-prototypes
|
||||
KBUILD_CFLAGS := -std=gnu11 -fms-extensions -fno-strict-aliasing -Wall -Wstrict-prototypes
|
||||
KBUILD_CFLAGS += -Wno-pointer-sign -Wno-sign-compare
|
||||
KBUILD_CFLAGS += -fno-zero-initialized-in-bss -fno-builtin -ffreestanding
|
||||
KBUILD_CFLAGS += -Os -m64 -msoft-float -fno-common
|
||||
|
|
@ -21,6 +21,7 @@ KBUILD_CFLAGS += -fno-stack-protector
|
|||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
KBUILD_CFLAGS += -D__DISABLE_EXPORTS
|
||||
KBUILD_CFLAGS += $(CLANG_FLAGS)
|
||||
KBUILD_CFLAGS += $(if $(CONFIG_CC_IS_CLANG),-Wno-microsoft-anon-tag)
|
||||
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
|
||||
KBUILD_AFLAGS := $(filter-out -DCC_USING_EXPOLINE,$(KBUILD_AFLAGS))
|
||||
KBUILD_AFLAGS += -D__DISABLE_EXPORTS
|
||||
|
|
|
|||
|
|
@ -48,7 +48,8 @@ endif
|
|||
|
||||
# How to compile the 16-bit code. Note we always compile for -march=i386;
|
||||
# that way we can complain to the user if the CPU is insufficient.
|
||||
REALMODE_CFLAGS := -std=gnu11 -m16 -g -Os -DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
|
||||
REALMODE_CFLAGS := -std=gnu11 -fms-extensions -m16 -g -Os \
|
||||
-DDISABLE_BRANCH_PROFILING -D__DISABLE_EXPORTS \
|
||||
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
|
||||
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
|
||||
-mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
|
||||
|
|
@ -60,6 +61,7 @@ REALMODE_CFLAGS += $(cc_stack_align4)
|
|||
REALMODE_CFLAGS += $(CLANG_FLAGS)
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
REALMODE_CFLAGS += -Wno-gnu
|
||||
REALMODE_CFLAGS += -Wno-microsoft-anon-tag
|
||||
endif
|
||||
export REALMODE_CFLAGS
|
||||
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \
|
|||
# avoid errors with '-march=i386', and future flags may depend on the target to
|
||||
# be valid.
|
||||
KBUILD_CFLAGS := -m$(BITS) -O2 $(CLANG_FLAGS)
|
||||
KBUILD_CFLAGS += -std=gnu11
|
||||
KBUILD_CFLAGS += -std=gnu11 -fms-extensions
|
||||
KBUILD_CFLAGS += -fno-strict-aliasing -fPIE
|
||||
KBUILD_CFLAGS += -Wundef
|
||||
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
||||
|
|
@ -36,7 +36,10 @@ KBUILD_CFLAGS += -mno-mmx -mno-sse
|
|||
KBUILD_CFLAGS += -ffreestanding -fshort-wchar
|
||||
KBUILD_CFLAGS += -fno-stack-protector
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
|
||||
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
KBUILD_CFLAGS += -Wno-gnu
|
||||
KBUILD_CFLAGS += -Wno-microsoft-anon-tag
|
||||
endif
|
||||
KBUILD_CFLAGS += -Wno-pointer-sign
|
||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
||||
KBUILD_CFLAGS += -D__DISABLE_EXPORTS
|
||||
|
|
|
|||
|
|
@ -11,12 +11,12 @@ cflags-y := $(KBUILD_CFLAGS)
|
|||
|
||||
cflags-$(CONFIG_X86_32) := -march=i386
|
||||
cflags-$(CONFIG_X86_64) := -mcmodel=small
|
||||
cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 \
|
||||
cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ -std=gnu11 -fms-extensions \
|
||||
-fPIC -fno-strict-aliasing -mno-red-zone \
|
||||
-mno-mmx -mno-sse -fshort-wchar \
|
||||
-Wno-pointer-sign \
|
||||
$(call cc-disable-warning, address-of-packed-member) \
|
||||
$(call cc-disable-warning, gnu) \
|
||||
$(if $(CONFIG_CC_IS_CLANG),-Wno-gnu -Wno-microsoft-anon-tag) \
|
||||
-fno-asynchronous-unwind-tables \
|
||||
$(CLANG_FLAGS)
|
||||
|
||||
|
|
|
|||
|
|
@ -76,14 +76,14 @@ struct jfs_inode_info {
|
|||
struct {
|
||||
unchar _unused[16]; /* 16: */
|
||||
dxd_t _dxd; /* 16: */
|
||||
/* _inline may overflow into _inline_ea when needed */
|
||||
/* _inline_sym may overflow into _inline_ea when needed */
|
||||
/* _inline_ea may overlay the last part of
|
||||
* file._xtroot if maxentry = XTROOTINITSLOT
|
||||
*/
|
||||
union {
|
||||
struct {
|
||||
/* 128: inline symlink */
|
||||
unchar _inline[128];
|
||||
unchar _inline_sym[128];
|
||||
/* 128: inline extended attr */
|
||||
unchar _inline_ea[128];
|
||||
};
|
||||
|
|
@ -101,7 +101,7 @@ struct jfs_inode_info {
|
|||
#define i_imap u.file._imap
|
||||
#define i_dirtable u.dir._table
|
||||
#define i_dtroot u.dir._dtroot
|
||||
#define i_inline u.link._inline
|
||||
#define i_inline u.link._inline_sym
|
||||
#define i_inline_ea u.link._inline_ea
|
||||
#define i_inline_all u.link._inline_all
|
||||
|
||||
|
|
|
|||
52
mm/slab.h
52
mm/slab.h
|
|
@ -40,13 +40,29 @@ typedef u64 freelist_full_t;
|
|||
* Freelist pointer and counter to cmpxchg together, avoids the typical ABA
|
||||
* problems with cmpxchg of just a pointer.
|
||||
*/
|
||||
typedef union {
|
||||
struct {
|
||||
void *freelist;
|
||||
unsigned long counter;
|
||||
struct freelist_counters {
|
||||
union {
|
||||
struct {
|
||||
void *freelist;
|
||||
union {
|
||||
unsigned long counters;
|
||||
struct {
|
||||
unsigned inuse:16;
|
||||
unsigned objects:15;
|
||||
/*
|
||||
* If slab debugging is enabled then the
|
||||
* frozen bit can be reused to indicate
|
||||
* that the slab was corrupted
|
||||
*/
|
||||
unsigned frozen:1;
|
||||
};
|
||||
};
|
||||
};
|
||||
#ifdef system_has_freelist_aba
|
||||
freelist_full_t freelist_counters;
|
||||
#endif
|
||||
};
|
||||
freelist_full_t full;
|
||||
} freelist_aba_t;
|
||||
};
|
||||
|
||||
/* Reuses the bits in struct page */
|
||||
struct slab {
|
||||
|
|
@ -69,27 +85,7 @@ struct slab {
|
|||
#endif
|
||||
};
|
||||
/* Double-word boundary */
|
||||
union {
|
||||
struct {
|
||||
void *freelist; /* first free object */
|
||||
union {
|
||||
unsigned long counters;
|
||||
struct {
|
||||
unsigned inuse:16;
|
||||
unsigned objects:15;
|
||||
/*
|
||||
* If slab debugging is enabled then the
|
||||
* frozen bit can be reused to indicate
|
||||
* that the slab was corrupted
|
||||
*/
|
||||
unsigned frozen:1;
|
||||
};
|
||||
};
|
||||
};
|
||||
#ifdef system_has_freelist_aba
|
||||
freelist_aba_t freelist_counter;
|
||||
#endif
|
||||
};
|
||||
struct freelist_counters;
|
||||
};
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
|
@ -114,7 +110,7 @@ SLAB_MATCH(_unused_slab_obj_exts, obj_exts);
|
|||
#undef SLAB_MATCH
|
||||
static_assert(sizeof(struct slab) <= sizeof(struct page));
|
||||
#if defined(system_has_freelist_aba)
|
||||
static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
|
||||
static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(struct freelist_counters)));
|
||||
#endif
|
||||
|
||||
/**
|
||||
|
|
|
|||
155
mm/slub.c
155
mm/slub.c
|
|
@ -410,18 +410,22 @@ enum stat_item {
|
|||
NR_SLUB_STAT_ITEMS
|
||||
};
|
||||
|
||||
struct freelist_tid {
|
||||
union {
|
||||
struct {
|
||||
void *freelist; /* Pointer to next available object */
|
||||
unsigned long tid; /* Globally unique transaction id */
|
||||
};
|
||||
freelist_full_t freelist_tid;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* When changing the layout, make sure freelist and tid are still compatible
|
||||
* with this_cpu_cmpxchg_double() alignment requirements.
|
||||
*/
|
||||
struct kmem_cache_cpu {
|
||||
union {
|
||||
struct {
|
||||
void **freelist; /* Pointer to next available object */
|
||||
unsigned long tid; /* Globally unique transaction id */
|
||||
};
|
||||
freelist_aba_t freelist_tid;
|
||||
};
|
||||
struct freelist_tid;
|
||||
struct slab *slab; /* The slab from which we are allocating */
|
||||
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
||||
struct slab *partial; /* Partially allocated slabs */
|
||||
|
|
@ -756,32 +760,29 @@ static __always_inline void slab_unlock(struct slab *slab)
|
|||
}
|
||||
|
||||
static inline bool
|
||||
__update_freelist_fast(struct slab *slab,
|
||||
void *freelist_old, unsigned long counters_old,
|
||||
void *freelist_new, unsigned long counters_new)
|
||||
__update_freelist_fast(struct slab *slab, struct freelist_counters *old,
|
||||
struct freelist_counters *new)
|
||||
{
|
||||
#ifdef system_has_freelist_aba
|
||||
freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old };
|
||||
freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new };
|
||||
|
||||
return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full);
|
||||
return try_cmpxchg_freelist(&slab->freelist_counters,
|
||||
&old->freelist_counters,
|
||||
new->freelist_counters);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline bool
|
||||
__update_freelist_slow(struct slab *slab,
|
||||
void *freelist_old, unsigned long counters_old,
|
||||
void *freelist_new, unsigned long counters_new)
|
||||
__update_freelist_slow(struct slab *slab, struct freelist_counters *old,
|
||||
struct freelist_counters *new)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
slab_lock(slab);
|
||||
if (slab->freelist == freelist_old &&
|
||||
slab->counters == counters_old) {
|
||||
slab->freelist = freelist_new;
|
||||
slab->counters = counters_new;
|
||||
if (slab->freelist == old->freelist &&
|
||||
slab->counters == old->counters) {
|
||||
slab->freelist = new->freelist;
|
||||
slab->counters = new->counters;
|
||||
ret = true;
|
||||
}
|
||||
slab_unlock(slab);
|
||||
|
|
@ -797,22 +798,18 @@ __update_freelist_slow(struct slab *slab,
|
|||
* interrupt the operation.
|
||||
*/
|
||||
static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
|
||||
void *freelist_old, unsigned long counters_old,
|
||||
void *freelist_new, unsigned long counters_new,
|
||||
const char *n)
|
||||
struct freelist_counters *old, struct freelist_counters *new, const char *n)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
if (USE_LOCKLESS_FAST_PATH())
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
if (s->flags & __CMPXCHG_DOUBLE) {
|
||||
ret = __update_freelist_fast(slab, freelist_old, counters_old,
|
||||
freelist_new, counters_new);
|
||||
} else {
|
||||
ret = __update_freelist_slow(slab, freelist_old, counters_old,
|
||||
freelist_new, counters_new);
|
||||
}
|
||||
if (s->flags & __CMPXCHG_DOUBLE)
|
||||
ret = __update_freelist_fast(slab, old, new);
|
||||
else
|
||||
ret = __update_freelist_slow(slab, old, new);
|
||||
|
||||
if (likely(ret))
|
||||
return true;
|
||||
|
||||
|
|
@ -827,21 +824,17 @@ static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *sla
|
|||
}
|
||||
|
||||
static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
|
||||
void *freelist_old, unsigned long counters_old,
|
||||
void *freelist_new, unsigned long counters_new,
|
||||
const char *n)
|
||||
struct freelist_counters *old, struct freelist_counters *new, const char *n)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
if (s->flags & __CMPXCHG_DOUBLE) {
|
||||
ret = __update_freelist_fast(slab, freelist_old, counters_old,
|
||||
freelist_new, counters_new);
|
||||
ret = __update_freelist_fast(slab, old, new);
|
||||
} else {
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
ret = __update_freelist_slow(slab, freelist_old, counters_old,
|
||||
freelist_new, counters_new);
|
||||
ret = __update_freelist_slow(slab, old, new);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
if (likely(ret))
|
||||
|
|
@ -3740,8 +3733,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
|
|||
void *nextfree, *freelist_iter, *freelist_tail;
|
||||
int tail = DEACTIVATE_TO_HEAD;
|
||||
unsigned long flags = 0;
|
||||
struct slab new;
|
||||
struct slab old;
|
||||
struct freelist_counters old, new;
|
||||
|
||||
if (READ_ONCE(slab->freelist)) {
|
||||
stat(s, DEACTIVATE_REMOTE_FREES);
|
||||
|
|
@ -3790,10 +3782,7 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
|
|||
} else {
|
||||
new.freelist = old.freelist;
|
||||
}
|
||||
} while (!slab_update_freelist(s, slab,
|
||||
old.freelist, old.counters,
|
||||
new.freelist, new.counters,
|
||||
"unfreezing slab"));
|
||||
} while (!slab_update_freelist(s, slab, &old, &new, "unfreezing slab"));
|
||||
|
||||
/*
|
||||
* Stage three: Manipulate the slab list based on the updated state.
|
||||
|
|
@ -4381,11 +4370,11 @@ __update_cpu_freelist_fast(struct kmem_cache *s,
|
|||
void *freelist_old, void *freelist_new,
|
||||
unsigned long tid)
|
||||
{
|
||||
freelist_aba_t old = { .freelist = freelist_old, .counter = tid };
|
||||
freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) };
|
||||
struct freelist_tid old = { .freelist = freelist_old, .tid = tid };
|
||||
struct freelist_tid new = { .freelist = freelist_new, .tid = next_tid(tid) };
|
||||
|
||||
return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full,
|
||||
&old.full, new.full);
|
||||
return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid,
|
||||
&old.freelist_tid, new.freelist_tid);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -4398,27 +4387,24 @@ __update_cpu_freelist_fast(struct kmem_cache *s,
|
|||
*/
|
||||
static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
|
||||
{
|
||||
struct slab new;
|
||||
unsigned long counters;
|
||||
void *freelist;
|
||||
struct freelist_counters old, new;
|
||||
|
||||
lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
|
||||
|
||||
do {
|
||||
freelist = slab->freelist;
|
||||
counters = slab->counters;
|
||||
old.freelist = slab->freelist;
|
||||
old.counters = slab->counters;
|
||||
|
||||
new.counters = counters;
|
||||
new.freelist = NULL;
|
||||
new.counters = old.counters;
|
||||
|
||||
new.inuse = slab->objects;
|
||||
new.frozen = freelist != NULL;
|
||||
new.inuse = old.objects;
|
||||
new.frozen = old.freelist != NULL;
|
||||
|
||||
} while (!__slab_update_freelist(s, slab,
|
||||
freelist, counters,
|
||||
NULL, new.counters,
|
||||
"get_freelist"));
|
||||
|
||||
return freelist;
|
||||
} while (!__slab_update_freelist(s, slab, &old, &new, "get_freelist"));
|
||||
|
||||
return old.freelist;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -4426,26 +4412,22 @@ static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
|
|||
*/
|
||||
static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab)
|
||||
{
|
||||
struct slab new;
|
||||
unsigned long counters;
|
||||
void *freelist;
|
||||
struct freelist_counters old, new;
|
||||
|
||||
do {
|
||||
freelist = slab->freelist;
|
||||
counters = slab->counters;
|
||||
old.freelist = slab->freelist;
|
||||
old.counters = slab->counters;
|
||||
|
||||
new.counters = counters;
|
||||
new.freelist = NULL;
|
||||
new.counters = old.counters;
|
||||
VM_BUG_ON(new.frozen);
|
||||
|
||||
new.inuse = slab->objects;
|
||||
new.inuse = old.objects;
|
||||
new.frozen = 1;
|
||||
|
||||
} while (!slab_update_freelist(s, slab,
|
||||
freelist, counters,
|
||||
NULL, new.counters,
|
||||
"freeze_slab"));
|
||||
} while (!slab_update_freelist(s, slab, &old, &new, "freeze_slab"));
|
||||
|
||||
return freelist;
|
||||
return old.freelist;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
@ -5877,10 +5859,8 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
|
|||
unsigned long addr)
|
||||
|
||||
{
|
||||
void *old_head;
|
||||
bool was_frozen, was_full;
|
||||
struct slab new;
|
||||
unsigned long counters;
|
||||
struct freelist_counters old, new;
|
||||
struct kmem_cache_node *n = NULL;
|
||||
unsigned long flags;
|
||||
bool on_node_partial;
|
||||
|
|
@ -5904,13 +5884,19 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
|
|||
spin_unlock_irqrestore(&n->list_lock, flags);
|
||||
n = NULL;
|
||||
}
|
||||
old_head = slab->freelist;
|
||||
counters = slab->counters;
|
||||
set_freepointer(s, tail, old_head);
|
||||
new.counters = counters;
|
||||
was_frozen = !!new.frozen;
|
||||
was_full = (old_head == NULL);
|
||||
|
||||
old.freelist = slab->freelist;
|
||||
old.counters = slab->counters;
|
||||
|
||||
was_full = (old.freelist == NULL);
|
||||
was_frozen = old.frozen;
|
||||
|
||||
set_freepointer(s, tail, old.freelist);
|
||||
|
||||
new.freelist = head;
|
||||
new.counters = old.counters;
|
||||
new.inuse -= cnt;
|
||||
|
||||
/*
|
||||
* Might need to be taken off (due to becoming empty) or added
|
||||
* to (due to not being full anymore) the partial list.
|
||||
|
|
@ -5939,10 +5925,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
|
|||
}
|
||||
}
|
||||
|
||||
} while (!slab_update_freelist(s, slab,
|
||||
old_head, counters,
|
||||
head, new.counters,
|
||||
"__slab_free"));
|
||||
} while (!slab_update_freelist(s, slab, &old, &new, "__slab_free"));
|
||||
|
||||
if (likely(!n)) {
|
||||
|
||||
|
|
|
|||
|
|
@ -28,8 +28,10 @@ endif
|
|||
KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds
|
||||
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
# The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable.
|
||||
# The kernel builds with '-std=gnu11' and '-fms-extensions' so use of GNU and
|
||||
# Microsoft extensions is acceptable.
|
||||
KBUILD_CFLAGS += -Wno-gnu
|
||||
KBUILD_CFLAGS += -Wno-microsoft-anon-tag
|
||||
|
||||
# Clang checks for overflow/truncation with '%p', while GCC does not:
|
||||
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=111219
|
||||
|
|
|
|||
Loading…
Reference in New Issue