mirror of https://github.com/torvalds/linux.git
standalone cache drivers for v6.19
ccache: Add a compatible for the pic64gx SoC. No driver change needed, as it falls back to the PolarFire SoC. hisi hha/generic cpu cache maintenance: Add support for a non-architectural mechanism for invalidating memory regions, needed for some cxl implementations on arm64 (and probably elsewhere in the future). The HiSilicon Hydra Home Agent is the first driver to provide this support. Signed-off-by: Conor Dooley <conor.dooley@microchip.com> -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRh246EGq/8RLhDjO14tDGHoIJi0gUCaSiFRQAKCRB4tDGHoIJi 0glXAQDJ0NsReTniO9TgJkzw05oWwCKoOL4MadxBM/4MRLJXyQD/YWW09btaYxTZ fDVHpb/P2BDD5qNwaXkONMIeoU/iHAo= =zk03 -----END PGP SIGNATURE----- gpgsig -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEo6/YBQwIrVS28WGKmmx57+YAGNkFAmkoyhIACgkQmmx57+YA GNla4xAAw7gN+w/AIBFzh50ZT1DFxIaey67k3OGNEnictgWA8IOYu28brmqv04dT vqppDeviw/fT6MC9A1qykRS8KVPbD1UAfD9w3akQsN6Kzb1h+cjhhv2qw4ZsWbjH XcRj0ZeraL/3Pc7ecTiRvJ3ZebvzPeQNOVmkgaEWZusAsTPPzj+riNg0+LwazHrw BDTLXgDZel/lyt8AIkLWWmzUUnr57VO22ZY1/seT4i161+2XjCaECbsCQzV6rTmo EsYsQWf7G8jX5c2DxjVEkmPGVLJMBclvSgtfLABF/Co2rvVFZbdMjkCkHcySmWq5 KcvXsF+hRcHLC2kZ5jmxpyc0rlChYNGNgJUqJGSKM/sQRQmM0zsAGRZMiEb5ZEF5 xXNYAKVkFVAiIo+zw2ph334GIgv2tU5KB2WrNJu6kxqT7qQGZXO44Bg+1jmc6Q9n efZOTAqOfyeT0pwVpdp8ZJfp/mwLceehFhb8u3OUrkM/xiJlvMFSY3cV568bR2z4 XYvEw5UEJybWPgsm5qXUMPgjPqd8nK/UFAwGSJ7zK6p/EM1B3rrNGo2Kdz2AJwxZ /c4lSpr6Nz0MMQJpCfET9eet8fsAJi7UYopDDdNNTSYgiZ+1ZkUAffQX5wBQpGMI czzuDEHwsLhKFPyQ2bqTYs77YS2qiVaHuKtBABkKYvoNodl8Tik= =wip1 -----END PGP SIGNATURE----- Merge tag 'cache-for-v6.19' of https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux into soc/drivers-late standalone cache drivers for v6.19 ccache: Add a compatible for the pic64gx SoC. No driver change needed, as it falls back to the PolarFire SoC. hisi hha/generic cpu cache maintenance: Add support for a non-architectural mechanism for invalidating memory regions, needed for some cxl implementations on arm64 (and probably elsewhere in the future). The HiSilicon Hydra Home Agent is the first driver to provide this support. Signed-off-by: Conor Dooley <conor.dooley@microchip.com> * tag 'cache-for-v6.19' of https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux: MAINTAINERS: refer to intended file in STANDALONE CACHE CONTROLLER DRIVERS cache: Support cache maintenance for HiSilicon SoC Hydra Home Agent cache: Make top level Kconfig menu a boolean dependent on RISCV MAINTAINERS: Add Jonathan Cameron to drivers/cache and add lib/cache_maint.c + header arm64: Select GENERIC_CPU_CACHE_MAINTENANCE lib: Support ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION memregion: Support fine grained invalidate by cpu_cache_invalidate_memregion() memregion: Drop unused IORES_DESC_* parameter from cpu_cache_invalidate_memregion() dt-bindings: cache: sifive,ccache0: add a pic64gx compatible Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
commit
8e2baac0f2
|
|
@ -48,6 +48,11 @@ properties:
|
|||
- const: microchip,mpfs-ccache
|
||||
- const: sifive,fu540-c000-ccache
|
||||
- const: cache
|
||||
- items:
|
||||
- const: microchip,pic64gx-ccache
|
||||
- const: microchip,mpfs-ccache
|
||||
- const: sifive,fu540-c000-ccache
|
||||
- const: cache
|
||||
|
||||
cache-block-size:
|
||||
const: 64
|
||||
|
|
|
|||
|
|
@ -24451,10 +24451,13 @@ F: drivers/staging/
|
|||
|
||||
STANDALONE CACHE CONTROLLER DRIVERS
|
||||
M: Conor Dooley <conor@kernel.org>
|
||||
M: Jonathan Cameron <jonathan.cameron@huawei.com>
|
||||
S: Maintained
|
||||
T: git https://git.kernel.org/pub/scm/linux/kernel/git/conor/linux.git/
|
||||
F: Documentation/devicetree/bindings/cache/
|
||||
F: drivers/cache
|
||||
F: include/linux/cache_coherency.h
|
||||
F: lib/cache_maint.c
|
||||
|
||||
STARFIRE/DURALAN NETWORK DRIVER
|
||||
M: Ion Badulescu <ionut@badula.org>
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ config ARM64
|
|||
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
|
||||
select ARCH_HAS_CACHE_LINE_SIZE
|
||||
select ARCH_HAS_CC_PLATFORM
|
||||
select ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
|
||||
select ARCH_HAS_CURRENT_STACK_POINTER
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
|
|
@ -149,6 +150,7 @@ config ARM64
|
|||
select GENERIC_ARCH_TOPOLOGY
|
||||
select GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
select GENERIC_CPU_CACHE_MAINTENANCE
|
||||
select GENERIC_CPU_DEVICES
|
||||
select GENERIC_CPU_VULNERABILITIES
|
||||
select GENERIC_EARLY_IOREMAP
|
||||
|
|
|
|||
|
|
@ -368,7 +368,7 @@ bool cpu_cache_has_invalidate_memregion(void)
|
|||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM");
|
||||
|
||||
int cpu_cache_invalidate_memregion(int res_desc)
|
||||
int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
|
||||
{
|
||||
if (WARN_ON_ONCE(!cpu_cache_has_invalidate_memregion()))
|
||||
return -ENXIO;
|
||||
|
|
|
|||
|
|
@ -1,9 +1,17 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
menu "Cache Drivers"
|
||||
|
||||
menuconfig CACHEMAINT_FOR_DMA
|
||||
bool "Cache management for noncoherent DMA"
|
||||
depends on RISCV
|
||||
default y
|
||||
help
|
||||
These drivers implement support for noncoherent DMA master devices
|
||||
on platforms that lack the standard CPU interfaces for this.
|
||||
|
||||
if CACHEMAINT_FOR_DMA
|
||||
|
||||
config AX45MP_L2_CACHE
|
||||
bool "Andes Technology AX45MP L2 Cache controller"
|
||||
depends on RISCV
|
||||
select RISCV_NONSTANDARD_CACHE_OPS
|
||||
help
|
||||
Support for the L2 cache controller on Andes Technology AX45MP platforms.
|
||||
|
|
@ -16,7 +24,6 @@ config SIFIVE_CCACHE
|
|||
|
||||
config STARFIVE_STARLINK_CACHE
|
||||
bool "StarFive StarLink Cache controller"
|
||||
depends on RISCV
|
||||
depends on ARCH_STARFIVE
|
||||
depends on 64BIT
|
||||
select RISCV_DMA_NONCOHERENT
|
||||
|
|
@ -24,4 +31,26 @@ config STARFIVE_STARLINK_CACHE
|
|||
help
|
||||
Support for the StarLink cache controller IP from StarFive.
|
||||
|
||||
endmenu
|
||||
endif #CACHEMAINT_FOR_DMA
|
||||
|
||||
menuconfig CACHEMAINT_FOR_HOTPLUG
|
||||
bool "Cache management for memory hot plug like operations"
|
||||
depends on GENERIC_CPU_CACHE_MAINTENANCE
|
||||
help
|
||||
These drivers implement cache management for flows where it is necessary
|
||||
to flush data from all host caches.
|
||||
|
||||
if CACHEMAINT_FOR_HOTPLUG
|
||||
|
||||
config HISI_SOC_HHA
|
||||
tristate "HiSilicon Hydra Home Agent (HHA) device driver"
|
||||
depends on (ARM64 && ACPI) || COMPILE_TEST
|
||||
help
|
||||
The Hydra Home Agent (HHA) is responsible for cache coherency
|
||||
on the SoC. This drivers enables the cache maintenance functions of
|
||||
the HHA.
|
||||
|
||||
This driver can be built as a module. If so, the module will be
|
||||
called hisi_soc_hha.
|
||||
|
||||
endif #CACHEMAINT_FOR_HOTPLUG
|
||||
|
|
|
|||
|
|
@ -3,3 +3,5 @@
|
|||
obj-$(CONFIG_AX45MP_L2_CACHE) += ax45mp_cache.o
|
||||
obj-$(CONFIG_SIFIVE_CCACHE) += sifive_ccache.o
|
||||
obj-$(CONFIG_STARFIVE_STARLINK_CACHE) += starfive_starlink_cache.o
|
||||
|
||||
obj-$(CONFIG_HISI_SOC_HHA) += hisi_soc_hha.o
|
||||
|
|
|
|||
|
|
@ -0,0 +1,194 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Driver for HiSilicon Hydra Home Agent (HHA).
|
||||
*
|
||||
* Copyright (c) 2025 HiSilicon Technologies Co., Ltd.
|
||||
* Author: Yicong Yang <yangyicong@hisilicon.com>
|
||||
* Yushan Wang <wangyushan12@huawei.com>
|
||||
*
|
||||
* A system typically contains multiple HHAs. Each is responsible for a subset
|
||||
* of the physical addresses in the system, but interleave can make the mapping
|
||||
* from a particular cache line to a responsible HHA complex. As such no
|
||||
* filtering is done in the driver, with the hardware being responsible for
|
||||
* responding with success for even if it was not responsible for any addresses
|
||||
* in the range on which the operation was requested.
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/cache_coherency.h>
|
||||
#include <linux/dev_printk.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/memregion.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#define HISI_HHA_CTRL 0x5004
|
||||
#define HISI_HHA_CTRL_EN BIT(0)
|
||||
#define HISI_HHA_CTRL_RANGE BIT(1)
|
||||
#define HISI_HHA_CTRL_TYPE GENMASK(3, 2)
|
||||
#define HISI_HHA_START_L 0x5008
|
||||
#define HISI_HHA_START_H 0x500c
|
||||
#define HISI_HHA_LEN_L 0x5010
|
||||
#define HISI_HHA_LEN_H 0x5014
|
||||
|
||||
/* The maintain operation performs in a 128 Byte granularity */
|
||||
#define HISI_HHA_MAINT_ALIGN 128
|
||||
|
||||
#define HISI_HHA_POLL_GAP_US 10
|
||||
#define HISI_HHA_POLL_TIMEOUT_US 50000
|
||||
|
||||
struct hisi_soc_hha {
|
||||
/* Must be first element */
|
||||
struct cache_coherency_ops_inst cci;
|
||||
/* Locks HHA instance to forbid overlapping access. */
|
||||
struct mutex lock;
|
||||
void __iomem *base;
|
||||
};
|
||||
|
||||
static bool hisi_hha_cache_maintain_wait_finished(struct hisi_soc_hha *soc_hha)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
return !readl_poll_timeout_atomic(soc_hha->base + HISI_HHA_CTRL, val,
|
||||
!(val & HISI_HHA_CTRL_EN),
|
||||
HISI_HHA_POLL_GAP_US,
|
||||
HISI_HHA_POLL_TIMEOUT_US);
|
||||
}
|
||||
|
||||
static int hisi_soc_hha_wbinv(struct cache_coherency_ops_inst *cci,
|
||||
struct cc_inval_params *invp)
|
||||
{
|
||||
struct hisi_soc_hha *soc_hha =
|
||||
container_of(cci, struct hisi_soc_hha, cci);
|
||||
phys_addr_t top, addr = invp->addr;
|
||||
size_t size = invp->size;
|
||||
u32 reg;
|
||||
|
||||
if (!size)
|
||||
return -EINVAL;
|
||||
|
||||
addr = ALIGN_DOWN(addr, HISI_HHA_MAINT_ALIGN);
|
||||
top = ALIGN(addr + size, HISI_HHA_MAINT_ALIGN);
|
||||
size = top - addr;
|
||||
|
||||
guard(mutex)(&soc_hha->lock);
|
||||
|
||||
if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Hardware will search for addresses ranging [addr, addr + size - 1],
|
||||
* last byte included, and perform maintenance in 128 byte granules
|
||||
* on those cachelines which contain the addresses. If a given instance
|
||||
* is either not responsible for a cacheline or that cacheline is not
|
||||
* currently present then the search will fail, no operation will be
|
||||
* necessary and the device will report success.
|
||||
*/
|
||||
size -= 1;
|
||||
|
||||
writel(lower_32_bits(addr), soc_hha->base + HISI_HHA_START_L);
|
||||
writel(upper_32_bits(addr), soc_hha->base + HISI_HHA_START_H);
|
||||
writel(lower_32_bits(size), soc_hha->base + HISI_HHA_LEN_L);
|
||||
writel(upper_32_bits(size), soc_hha->base + HISI_HHA_LEN_H);
|
||||
|
||||
reg = FIELD_PREP(HISI_HHA_CTRL_TYPE, 1); /* Clean Invalid */
|
||||
reg |= HISI_HHA_CTRL_RANGE | HISI_HHA_CTRL_EN;
|
||||
writel(reg, soc_hha->base + HISI_HHA_CTRL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hisi_soc_hha_done(struct cache_coherency_ops_inst *cci)
|
||||
{
|
||||
struct hisi_soc_hha *soc_hha =
|
||||
container_of(cci, struct hisi_soc_hha, cci);
|
||||
|
||||
guard(mutex)(&soc_hha->lock);
|
||||
if (!hisi_hha_cache_maintain_wait_finished(soc_hha))
|
||||
return -ETIMEDOUT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct cache_coherency_ops hha_ops = {
|
||||
.wbinv = hisi_soc_hha_wbinv,
|
||||
.done = hisi_soc_hha_done,
|
||||
};
|
||||
|
||||
static int hisi_soc_hha_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct hisi_soc_hha *soc_hha;
|
||||
struct resource *mem;
|
||||
int ret;
|
||||
|
||||
soc_hha = cache_coherency_ops_instance_alloc(&hha_ops,
|
||||
struct hisi_soc_hha, cci);
|
||||
if (!soc_hha)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(pdev, soc_hha);
|
||||
|
||||
mutex_init(&soc_hha->lock);
|
||||
|
||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!mem) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_cci;
|
||||
}
|
||||
|
||||
soc_hha->base = ioremap(mem->start, resource_size(mem));
|
||||
if (!soc_hha->base) {
|
||||
ret = dev_err_probe(&pdev->dev, -ENOMEM,
|
||||
"failed to remap io memory");
|
||||
goto err_free_cci;
|
||||
}
|
||||
|
||||
ret = cache_coherency_ops_instance_register(&soc_hha->cci);
|
||||
if (ret)
|
||||
goto err_iounmap;
|
||||
|
||||
return 0;
|
||||
|
||||
err_iounmap:
|
||||
iounmap(soc_hha->base);
|
||||
err_free_cci:
|
||||
cache_coherency_ops_instance_put(&soc_hha->cci);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hisi_soc_hha_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct hisi_soc_hha *soc_hha = platform_get_drvdata(pdev);
|
||||
|
||||
cache_coherency_ops_instance_unregister(&soc_hha->cci);
|
||||
iounmap(soc_hha->base);
|
||||
cache_coherency_ops_instance_put(&soc_hha->cci);
|
||||
}
|
||||
|
||||
static const struct acpi_device_id hisi_soc_hha_ids[] = {
|
||||
{ "HISI0511", },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, hisi_soc_hha_ids);
|
||||
|
||||
static struct platform_driver hisi_soc_hha_driver = {
|
||||
.driver = {
|
||||
.name = "hisi_soc_hha",
|
||||
.acpi_match_table = hisi_soc_hha_ids,
|
||||
},
|
||||
.probe = hisi_soc_hha_probe,
|
||||
.remove = hisi_soc_hha_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(hisi_soc_hha_driver);
|
||||
|
||||
MODULE_IMPORT_NS("CACHE_COHERENCY");
|
||||
MODULE_DESCRIPTION("HiSilicon Hydra Home Agent driver supporting cache maintenance");
|
||||
MODULE_AUTHOR("Yicong Yang <yangyicong@hisilicon.com>");
|
||||
MODULE_AUTHOR("Yushan Wang <wangyushan12@huawei.com>");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
|
@ -236,7 +236,10 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
|
|||
return -ENXIO;
|
||||
}
|
||||
|
||||
cpu_cache_invalidate_memregion(IORES_DESC_CXL);
|
||||
if (!cxlr->params.res)
|
||||
return -ENXIO;
|
||||
cpu_cache_invalidate_memregion(cxlr->params.res->start,
|
||||
resource_size(cxlr->params.res));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -110,7 +110,7 @@ static void nd_region_remove(struct device *dev)
|
|||
* here is ok.
|
||||
*/
|
||||
if (cpu_cache_has_invalidate_memregion())
|
||||
cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
|
||||
cpu_cache_invalidate_all();
|
||||
}
|
||||
|
||||
static int child_notify(struct device *dev, void *data)
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ static int nd_region_invalidate_memregion(struct nd_region *nd_region)
|
|||
}
|
||||
}
|
||||
|
||||
cpu_cache_invalidate_memregion(IORES_DESC_PERSISTENT_MEMORY);
|
||||
cpu_cache_invalidate_all();
|
||||
out:
|
||||
for (i = 0; i < nd_region->ndr_mappings; i++) {
|
||||
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
|
||||
|
|
|
|||
|
|
@ -0,0 +1,61 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Cache coherency maintenance operation device drivers
|
||||
*
|
||||
* Copyright Huawei 2025
|
||||
*/
|
||||
#ifndef _LINUX_CACHE_COHERENCY_H_
|
||||
#define _LINUX_CACHE_COHERENCY_H_
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct cc_inval_params {
|
||||
phys_addr_t addr;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
struct cache_coherency_ops_inst;
|
||||
|
||||
struct cache_coherency_ops {
|
||||
int (*wbinv)(struct cache_coherency_ops_inst *cci,
|
||||
struct cc_inval_params *invp);
|
||||
int (*done)(struct cache_coherency_ops_inst *cci);
|
||||
};
|
||||
|
||||
struct cache_coherency_ops_inst {
|
||||
struct kref kref;
|
||||
struct list_head node;
|
||||
const struct cache_coherency_ops *ops;
|
||||
};
|
||||
|
||||
int cache_coherency_ops_instance_register(struct cache_coherency_ops_inst *cci);
|
||||
void cache_coherency_ops_instance_unregister(struct cache_coherency_ops_inst *cci);
|
||||
|
||||
struct cache_coherency_ops_inst *
|
||||
_cache_coherency_ops_instance_alloc(const struct cache_coherency_ops *ops,
|
||||
size_t size);
|
||||
/**
|
||||
* cache_coherency_ops_instance_alloc - Allocate cache coherency ops instance
|
||||
* @ops: Cache maintenance operations
|
||||
* @drv_struct: structure that contains the struct cache_coherency_ops_inst
|
||||
* @member: Name of the struct cache_coherency_ops_inst member in @drv_struct.
|
||||
*
|
||||
* This allocates a driver specific structure and initializes the
|
||||
* cache_coherency_ops_inst embedded in the drv_struct. Upon success the
|
||||
* pointer must be freed via cache_coherency_ops_instance_put().
|
||||
*
|
||||
* Returns a &drv_struct * on success, %NULL on error.
|
||||
*/
|
||||
#define cache_coherency_ops_instance_alloc(ops, drv_struct, member) \
|
||||
({ \
|
||||
static_assert(__same_type(struct cache_coherency_ops_inst, \
|
||||
((drv_struct *)NULL)->member)); \
|
||||
static_assert(offsetof(drv_struct, member) == 0); \
|
||||
(drv_struct *)_cache_coherency_ops_instance_alloc(ops, \
|
||||
sizeof(drv_struct)); \
|
||||
})
|
||||
void cache_coherency_ops_instance_put(struct cache_coherency_ops_inst *cci);
|
||||
|
||||
#endif
|
||||
|
|
@ -26,8 +26,10 @@ static inline void memregion_free(int id)
|
|||
|
||||
/**
|
||||
* cpu_cache_invalidate_memregion - drop any CPU cached data for
|
||||
* memregions described by @res_desc
|
||||
* @res_desc: one of the IORES_DESC_* types
|
||||
* memregion
|
||||
* @start: start physical address of the target memory region.
|
||||
* @len: length of the target memory region. -1 for all the regions of
|
||||
* the target type.
|
||||
*
|
||||
* Perform cache maintenance after a memory event / operation that
|
||||
* changes the contents of physical memory in a cache-incoherent manner.
|
||||
|
|
@ -46,7 +48,7 @@ static inline void memregion_free(int id)
|
|||
* the cache maintenance.
|
||||
*/
|
||||
#ifdef CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
|
||||
int cpu_cache_invalidate_memregion(int res_desc);
|
||||
int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len);
|
||||
bool cpu_cache_has_invalidate_memregion(void);
|
||||
#else
|
||||
static inline bool cpu_cache_has_invalidate_memregion(void)
|
||||
|
|
@ -54,10 +56,16 @@ static inline bool cpu_cache_has_invalidate_memregion(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline int cpu_cache_invalidate_memregion(int res_desc)
|
||||
static inline int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
|
||||
{
|
||||
WARN_ON_ONCE("CPU cache invalidation required");
|
||||
return -ENXIO;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int cpu_cache_invalidate_all(void)
|
||||
{
|
||||
return cpu_cache_invalidate_memregion(0, -1);
|
||||
}
|
||||
|
||||
#endif /* _MEMREGION_H_ */
|
||||
|
|
|
|||
|
|
@ -542,6 +542,9 @@ config MEMREGION
|
|||
config ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION
|
||||
bool
|
||||
|
||||
config GENERIC_CPU_CACHE_MAINTENANCE
|
||||
bool
|
||||
|
||||
config ARCH_HAS_MEMREMAP_COMPAT_ALIGN
|
||||
bool
|
||||
|
||||
|
|
|
|||
|
|
@ -127,6 +127,8 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
|
|||
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
|
||||
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
|
||||
|
||||
obj-$(CONFIG_GENERIC_CPU_CACHE_MAINTENANCE) += cache_maint.o
|
||||
|
||||
lib-y += logic_pio.o
|
||||
|
||||
lib-$(CONFIG_INDIRECT_IOMEM) += logic_iomem.o
|
||||
|
|
|
|||
|
|
@ -0,0 +1,138 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Generic support for Memory System Cache Maintenance operations.
|
||||
*
|
||||
* Coherency maintenance drivers register with this simple framework that will
|
||||
* iterate over each registered instance to first kick off invalidation and
|
||||
* then to wait until it is complete.
|
||||
*
|
||||
* If no implementations are registered yet cpu_cache_has_invalidate_memregion()
|
||||
* will return false. If this runs concurrently with unregistration then a
|
||||
* race exists but this is no worse than the case where the operations instance
|
||||
* responsible for a given memory region has not yet registered.
|
||||
*/
|
||||
#include <linux/cache_coherency.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/container_of.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/memregion.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
static LIST_HEAD(cache_ops_instance_list);
|
||||
static DECLARE_RWSEM(cache_ops_instance_list_lock);
|
||||
|
||||
static void __cache_coherency_ops_instance_free(struct kref *kref)
|
||||
{
|
||||
struct cache_coherency_ops_inst *cci =
|
||||
container_of(kref, struct cache_coherency_ops_inst, kref);
|
||||
kfree(cci);
|
||||
}
|
||||
|
||||
void cache_coherency_ops_instance_put(struct cache_coherency_ops_inst *cci)
|
||||
{
|
||||
kref_put(&cci->kref, __cache_coherency_ops_instance_free);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cache_coherency_ops_instance_put);
|
||||
|
||||
static int cache_inval_one(struct cache_coherency_ops_inst *cci, void *data)
|
||||
{
|
||||
if (!cci->ops)
|
||||
return -EINVAL;
|
||||
|
||||
return cci->ops->wbinv(cci, data);
|
||||
}
|
||||
|
||||
static int cache_inval_done_one(struct cache_coherency_ops_inst *cci)
|
||||
{
|
||||
if (!cci->ops)
|
||||
return -EINVAL;
|
||||
|
||||
if (!cci->ops->done)
|
||||
return 0;
|
||||
|
||||
return cci->ops->done(cci);
|
||||
}
|
||||
|
||||
static int cache_invalidate_memregion(phys_addr_t addr, size_t size)
|
||||
{
|
||||
int ret;
|
||||
struct cache_coherency_ops_inst *cci;
|
||||
struct cc_inval_params params = {
|
||||
.addr = addr,
|
||||
.size = size,
|
||||
};
|
||||
|
||||
guard(rwsem_read)(&cache_ops_instance_list_lock);
|
||||
list_for_each_entry(cci, &cache_ops_instance_list, node) {
|
||||
ret = cache_inval_one(cci, ¶ms);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
list_for_each_entry(cci, &cache_ops_instance_list, node) {
|
||||
ret = cache_inval_done_one(cci);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct cache_coherency_ops_inst *
|
||||
_cache_coherency_ops_instance_alloc(const struct cache_coherency_ops *ops,
|
||||
size_t size)
|
||||
{
|
||||
struct cache_coherency_ops_inst *cci;
|
||||
|
||||
if (!ops || !ops->wbinv)
|
||||
return NULL;
|
||||
|
||||
cci = kzalloc(size, GFP_KERNEL);
|
||||
if (!cci)
|
||||
return NULL;
|
||||
|
||||
cci->ops = ops;
|
||||
INIT_LIST_HEAD(&cci->node);
|
||||
kref_init(&cci->kref);
|
||||
|
||||
return cci;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(_cache_coherency_ops_instance_alloc, "CACHE_COHERENCY");
|
||||
|
||||
int cache_coherency_ops_instance_register(struct cache_coherency_ops_inst *cci)
|
||||
{
|
||||
guard(rwsem_write)(&cache_ops_instance_list_lock);
|
||||
list_add(&cci->node, &cache_ops_instance_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cache_coherency_ops_instance_register, "CACHE_COHERENCY");
|
||||
|
||||
void cache_coherency_ops_instance_unregister(struct cache_coherency_ops_inst *cci)
|
||||
{
|
||||
guard(rwsem_write)(&cache_ops_instance_list_lock);
|
||||
list_del(&cci->node);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cache_coherency_ops_instance_unregister, "CACHE_COHERENCY");
|
||||
|
||||
int cpu_cache_invalidate_memregion(phys_addr_t start, size_t len)
|
||||
{
|
||||
return cache_invalidate_memregion(start, len);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cpu_cache_invalidate_memregion, "DEVMEM");
|
||||
|
||||
/*
|
||||
* Used for optimization / debug purposes only as removal can race
|
||||
*
|
||||
* Machines that do not support invalidation, e.g. VMs, will not have any
|
||||
* operations instance to register and so this will always return false.
|
||||
*/
|
||||
bool cpu_cache_has_invalidate_memregion(void)
|
||||
{
|
||||
guard(rwsem_read)(&cache_ops_instance_list_lock);
|
||||
return !list_empty(&cache_ops_instance_list);
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cpu_cache_has_invalidate_memregion, "DEVMEM");
|
||||
Loading…
Reference in New Issue