mirror of https://github.com/torvalds/linux.git
Compare commits
26 Commits
30f09200cc
...
4941a17751
| Author | SHA1 | Date |
|---|---|---|
|
|
4941a17751 | |
|
|
9eb220eddd | |
|
|
ad8cccc248 | |
|
|
32115734c0 | |
|
|
430c89fe3a | |
|
|
a10d15a08f | |
|
|
d041e5e748 | |
|
|
b042fdf18e | |
|
|
43ff36c4a5 | |
|
|
d26e9f669c | |
|
|
1a8b350182 | |
|
|
e6889323c2 | |
|
|
66e600a26e | |
|
|
34fa09c698 | |
|
|
7c9580f44f | |
|
|
cff47b9e39 | |
|
|
6c96c6bd2c | |
|
|
52ac3f5829 | |
|
|
f0bb6dba3d | |
|
|
270065f514 | |
|
|
de8798965f | |
|
|
f5e31a196e | |
|
|
be4c9abdf0 | |
|
|
5719a189c9 | |
|
|
c83fc13960 | |
|
|
d4371c266b |
2
.mailmap
2
.mailmap
|
|
@ -691,6 +691,8 @@ Sachin Mokashi <sachin.mokashi@intel.com> <sachinx.mokashi@intel.com>
|
||||||
Sachin P Sant <ssant@in.ibm.com>
|
Sachin P Sant <ssant@in.ibm.com>
|
||||||
Sai Prakash Ranjan <quic_saipraka@quicinc.com> <saiprakash.ranjan@codeaurora.org>
|
Sai Prakash Ranjan <quic_saipraka@quicinc.com> <saiprakash.ranjan@codeaurora.org>
|
||||||
Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
|
Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
|
||||||
|
Sam Protsenko <semen.protsenko@linaro.org>
|
||||||
|
Sam Protsenko <semen.protsenko@linaro.org> <semen.protsenko@globallogic.com>
|
||||||
Sam Ravnborg <sam@mars.ravnborg.org>
|
Sam Ravnborg <sam@mars.ravnborg.org>
|
||||||
Sankeerth Billakanti <quic_sbillaka@quicinc.com> <sbillaka@codeaurora.org>
|
Sankeerth Billakanti <quic_sbillaka@quicinc.com> <sbillaka@codeaurora.org>
|
||||||
Santosh Shilimkar <santosh.shilimkar@oracle.org>
|
Santosh Shilimkar <santosh.shilimkar@oracle.org>
|
||||||
|
|
|
||||||
|
|
@ -13799,6 +13799,7 @@ F: Documentation/admin-guide/mm/kho.rst
|
||||||
F: Documentation/core-api/kho/*
|
F: Documentation/core-api/kho/*
|
||||||
F: include/linux/kexec_handover.h
|
F: include/linux/kexec_handover.h
|
||||||
F: kernel/kexec_handover.c
|
F: kernel/kexec_handover.c
|
||||||
|
F: lib/test_kho.c
|
||||||
F: tools/testing/selftests/kho/
|
F: tools/testing/selftests/kho/
|
||||||
|
|
||||||
KEYS-ENCRYPTED
|
KEYS-ENCRYPTED
|
||||||
|
|
|
||||||
|
|
@ -166,7 +166,8 @@ static int __acpi_processor_start(struct acpi_device *device)
|
||||||
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
|
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
|
||||||
dev_dbg(&device->dev, "CPPC data invalid or not present\n");
|
dev_dbg(&device->dev, "CPPC data invalid or not present\n");
|
||||||
|
|
||||||
acpi_processor_power_init(pr);
|
if (cpuidle_get_driver() == &acpi_idle_driver)
|
||||||
|
acpi_processor_power_init(pr);
|
||||||
|
|
||||||
acpi_pss_perf_init(pr);
|
acpi_pss_perf_init(pr);
|
||||||
|
|
||||||
|
|
@ -262,8 +263,6 @@ static int __init acpi_processor_driver_init(void)
|
||||||
if (result < 0)
|
if (result < 0)
|
||||||
return result;
|
return result;
|
||||||
|
|
||||||
acpi_processor_register_idle_driver();
|
|
||||||
|
|
||||||
result = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
|
result = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
|
||||||
"acpi/cpu-drv:online",
|
"acpi/cpu-drv:online",
|
||||||
acpi_soft_cpu_online, NULL);
|
acpi_soft_cpu_online, NULL);
|
||||||
|
|
@ -302,7 +301,6 @@ static void __exit acpi_processor_driver_exit(void)
|
||||||
|
|
||||||
cpuhp_remove_state_nocalls(hp_online);
|
cpuhp_remove_state_nocalls(hp_online);
|
||||||
cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
|
cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
|
||||||
acpi_processor_unregister_idle_driver();
|
|
||||||
driver_unregister(&acpi_processor_driver);
|
driver_unregister(&acpi_processor_driver);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -51,7 +51,7 @@ module_param(latency_factor, uint, 0644);
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
|
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
|
||||||
|
|
||||||
static struct cpuidle_driver acpi_idle_driver = {
|
struct cpuidle_driver acpi_idle_driver = {
|
||||||
.name = "acpi_idle",
|
.name = "acpi_idle",
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
@ -1357,102 +1357,79 @@ int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void acpi_processor_register_idle_driver(void)
|
static int acpi_processor_registered;
|
||||||
{
|
|
||||||
struct acpi_processor *pr;
|
int acpi_processor_power_init(struct acpi_processor *pr)
|
||||||
int ret = -ENODEV;
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Acpi idle driver is used by all possible CPUs.
|
|
||||||
* Install the idle handler by the processor power info of one in them.
|
|
||||||
* Note that we use previously set idle handler will be used on
|
|
||||||
* platforms that only support C1.
|
|
||||||
*/
|
|
||||||
for_each_cpu(cpu, (struct cpumask *)cpu_possible_mask) {
|
|
||||||
pr = per_cpu(processors, cpu);
|
|
||||||
if (!pr)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
ret = acpi_processor_get_power_info(pr);
|
|
||||||
if (!ret) {
|
|
||||||
pr->flags.power_setup_done = 1;
|
|
||||||
acpi_processor_setup_cpuidle_states(pr);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ret) {
|
|
||||||
pr_debug("No ACPI power information from any CPUs.\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = cpuidle_register_driver(&acpi_idle_driver);
|
|
||||||
if (ret) {
|
|
||||||
pr_debug("register %s failed.\n", acpi_idle_driver.name);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
pr_debug("%s registered with cpuidle.\n", acpi_idle_driver.name);
|
|
||||||
}
|
|
||||||
|
|
||||||
void acpi_processor_unregister_idle_driver(void)
|
|
||||||
{
|
|
||||||
cpuidle_unregister_driver(&acpi_idle_driver);
|
|
||||||
}
|
|
||||||
|
|
||||||
void acpi_processor_power_init(struct acpi_processor *pr)
|
|
||||||
{
|
{
|
||||||
|
int retval;
|
||||||
struct cpuidle_device *dev;
|
struct cpuidle_device *dev;
|
||||||
|
|
||||||
/*
|
|
||||||
* The code below only works if the current cpuidle driver is the ACPI
|
|
||||||
* idle driver.
|
|
||||||
*/
|
|
||||||
if (cpuidle_get_driver() != &acpi_idle_driver)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (disabled_by_idle_boot_param())
|
if (disabled_by_idle_boot_param())
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
acpi_processor_cstate_first_run_checks();
|
acpi_processor_cstate_first_run_checks();
|
||||||
|
|
||||||
if (!acpi_processor_get_power_info(pr))
|
if (!acpi_processor_get_power_info(pr))
|
||||||
pr->flags.power_setup_done = 1;
|
pr->flags.power_setup_done = 1;
|
||||||
|
|
||||||
if (!pr->flags.power)
|
|
||||||
return;
|
|
||||||
|
|
||||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
|
||||||
if (!dev)
|
|
||||||
return;
|
|
||||||
|
|
||||||
per_cpu(acpi_cpuidle_device, pr->id) = dev;
|
|
||||||
|
|
||||||
acpi_processor_setup_cpuidle_dev(pr, dev);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Register a cpuidle device for this CPU. The cpuidle driver using
|
* Install the idle handler if processor power management is supported.
|
||||||
* this device is expected to be registered.
|
* Note that we use previously set idle handler will be used on
|
||||||
|
* platforms that only support C1.
|
||||||
*/
|
*/
|
||||||
if (cpuidle_register_device(dev)) {
|
if (pr->flags.power) {
|
||||||
per_cpu(acpi_cpuidle_device, pr->id) = NULL;
|
/* Register acpi_idle_driver if not already registered */
|
||||||
kfree(dev);
|
if (!acpi_processor_registered) {
|
||||||
|
acpi_processor_setup_cpuidle_states(pr);
|
||||||
|
retval = cpuidle_register_driver(&acpi_idle_driver);
|
||||||
|
if (retval)
|
||||||
|
return retval;
|
||||||
|
pr_debug("%s registered with cpuidle\n",
|
||||||
|
acpi_idle_driver.name);
|
||||||
|
}
|
||||||
|
|
||||||
|
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||||
|
if (!dev)
|
||||||
|
return -ENOMEM;
|
||||||
|
per_cpu(acpi_cpuidle_device, pr->id) = dev;
|
||||||
|
|
||||||
|
acpi_processor_setup_cpuidle_dev(pr, dev);
|
||||||
|
|
||||||
|
/* Register per-cpu cpuidle_device. Cpuidle driver
|
||||||
|
* must already be registered before registering device
|
||||||
|
*/
|
||||||
|
retval = cpuidle_register_device(dev);
|
||||||
|
if (retval) {
|
||||||
|
if (acpi_processor_registered == 0)
|
||||||
|
cpuidle_unregister_driver(&acpi_idle_driver);
|
||||||
|
|
||||||
|
per_cpu(acpi_cpuidle_device, pr->id) = NULL;
|
||||||
|
kfree(dev);
|
||||||
|
return retval;
|
||||||
|
}
|
||||||
|
acpi_processor_registered++;
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void acpi_processor_power_exit(struct acpi_processor *pr)
|
int acpi_processor_power_exit(struct acpi_processor *pr)
|
||||||
{
|
{
|
||||||
struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
|
struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
|
||||||
|
|
||||||
if (disabled_by_idle_boot_param())
|
if (disabled_by_idle_boot_param())
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
if (pr->flags.power) {
|
if (pr->flags.power) {
|
||||||
cpuidle_unregister_device(dev);
|
cpuidle_unregister_device(dev);
|
||||||
|
acpi_processor_registered--;
|
||||||
|
if (acpi_processor_registered == 0)
|
||||||
|
cpuidle_unregister_driver(&acpi_idle_driver);
|
||||||
|
|
||||||
kfree(dev);
|
kfree(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
pr->flags.power_setup_done = 0;
|
pr->flags.power_setup_done = 0;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
MODULE_IMPORT_NS("ACPI_PROCESSOR_IDLE");
|
MODULE_IMPORT_NS("ACPI_PROCESSOR_IDLE");
|
||||||
|
|
|
||||||
|
|
@ -127,7 +127,8 @@ struct dc_ti_battery_chip {
|
||||||
static int dc_ti_battery_get_voltage_and_current_now(struct power_supply *psy, int *volt, int *curr)
|
static int dc_ti_battery_get_voltage_and_current_now(struct power_supply *psy, int *volt, int *curr)
|
||||||
{
|
{
|
||||||
struct dc_ti_battery_chip *chip = power_supply_get_drvdata(psy);
|
struct dc_ti_battery_chip *chip = power_supply_get_drvdata(psy);
|
||||||
s64 cnt_start_usec, now_usec, sleep_usec;
|
ktime_t ktime;
|
||||||
|
s64 sleep_usec;
|
||||||
unsigned int reg_val;
|
unsigned int reg_val;
|
||||||
s32 acc, smpl_ctr;
|
s32 acc, smpl_ctr;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
@ -141,16 +142,17 @@ static int dc_ti_battery_get_voltage_and_current_now(struct power_supply *psy, i
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
cnt_start_usec = ktime_get_ns() / NSEC_PER_USEC;
|
ktime = ktime_get();
|
||||||
|
|
||||||
/* Read Vbat, convert IIO mV to power-supply ųV */
|
/* Read Vbat, convert IIO mV to power-supply ųV */
|
||||||
ret = iio_read_channel_processed_scale(chip->vbat_channel, volt, 1000);
|
ret = iio_read_channel_processed_scale(chip->vbat_channel, volt, 1000);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
|
|
||||||
|
ktime = ktime_sub(ktime_get(), ktime);
|
||||||
|
|
||||||
/* Sleep at least 3 sample-times + slack to get 3+ CC samples */
|
/* Sleep at least 3 sample-times + slack to get 3+ CC samples */
|
||||||
now_usec = ktime_get_ns() / NSEC_PER_USEC;
|
sleep_usec = 3 * SMPL_INTVL_US + SLEEP_SLACK_US - ktime_to_us(ktime);
|
||||||
sleep_usec = 3 * SMPL_INTVL_US + SLEEP_SLACK_US - (now_usec - cnt_start_usec);
|
|
||||||
if (sleep_usec > 0 && sleep_usec < 1000000)
|
if (sleep_usec > 0 && sleep_usec < 1000000)
|
||||||
usleep_range(sleep_usec, sleep_usec + SLEEP_SLACK_US);
|
usleep_range(sleep_usec, sleep_usec + SLEEP_SLACK_US);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -417,15 +417,32 @@ static inline void acpi_processor_throttling_init(void) {}
|
||||||
#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
|
#endif /* CONFIG_ACPI_CPU_FREQ_PSS */
|
||||||
|
|
||||||
/* in processor_idle.c */
|
/* in processor_idle.c */
|
||||||
|
extern struct cpuidle_driver acpi_idle_driver;
|
||||||
#ifdef CONFIG_ACPI_PROCESSOR_IDLE
|
#ifdef CONFIG_ACPI_PROCESSOR_IDLE
|
||||||
void acpi_processor_power_init(struct acpi_processor *pr);
|
int acpi_processor_power_init(struct acpi_processor *pr);
|
||||||
void acpi_processor_power_exit(struct acpi_processor *pr);
|
int acpi_processor_power_exit(struct acpi_processor *pr);
|
||||||
int acpi_processor_power_state_has_changed(struct acpi_processor *pr);
|
int acpi_processor_power_state_has_changed(struct acpi_processor *pr);
|
||||||
int acpi_processor_hotplug(struct acpi_processor *pr);
|
int acpi_processor_hotplug(struct acpi_processor *pr);
|
||||||
void acpi_processor_register_idle_driver(void);
|
#else
|
||||||
void acpi_processor_unregister_idle_driver(void);
|
static inline int acpi_processor_power_init(struct acpi_processor *pr)
|
||||||
int acpi_processor_ffh_lpi_probe(unsigned int cpu);
|
{
|
||||||
int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int acpi_processor_power_exit(struct acpi_processor *pr)
|
||||||
|
{
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
|
||||||
|
{
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int acpi_processor_hotplug(struct acpi_processor *pr)
|
||||||
|
{
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
|
#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
|
||||||
|
|
||||||
/* in processor_thermal.c */
|
/* in processor_thermal.c */
|
||||||
|
|
@ -448,6 +465,11 @@ static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_CPU_FREQ */
|
#endif /* CONFIG_CPU_FREQ */
|
||||||
|
|
||||||
|
#ifdef CONFIG_ACPI_PROCESSOR_IDLE
|
||||||
|
extern int acpi_processor_ffh_lpi_probe(unsigned int cpu);
|
||||||
|
extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
|
||||||
|
#endif
|
||||||
|
|
||||||
void acpi_processor_init_invariance_cppc(void);
|
void acpi_processor_init_invariance_cppc(void);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
||||||
|
|
@ -8781,8 +8781,18 @@ static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
|
||||||
put_snapshot_map(iter->tr);
|
put_snapshot_map(iter->tr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int tracing_buffers_may_split(struct vm_area_struct *vma, unsigned long addr)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Trace buffer mappings require the complete buffer including
|
||||||
|
* the meta page. Partial mappings are not supported.
|
||||||
|
*/
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct vm_operations_struct tracing_buffers_vmops = {
|
static const struct vm_operations_struct tracing_buffers_vmops = {
|
||||||
.close = tracing_buffers_mmap_close,
|
.close = tracing_buffers_mmap_close,
|
||||||
|
.may_split = tracing_buffers_may_split,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
|
static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
|
|
|
||||||
|
|
@ -445,8 +445,7 @@ config FRAME_WARN
|
||||||
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
|
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
|
||||||
default 2048 if PARISC
|
default 2048 if PARISC
|
||||||
default 1536 if (!64BIT && XTENSA)
|
default 1536 if (!64BIT && XTENSA)
|
||||||
default 1280 if KASAN && !64BIT
|
default 1280 if !64BIT
|
||||||
default 1024 if !64BIT
|
|
||||||
default 2048 if 64BIT
|
default 2048 if 64BIT
|
||||||
help
|
help
|
||||||
Tell the compiler to warn at build time for stack frames larger than this.
|
Tell the compiler to warn at build time for stack frames larger than this.
|
||||||
|
|
|
||||||
27
mm/filemap.c
27
mm/filemap.c
|
|
@ -3682,8 +3682,9 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
|
||||||
struct folio *folio, unsigned long start,
|
struct folio *folio, unsigned long start,
|
||||||
unsigned long addr, unsigned int nr_pages,
|
unsigned long addr, unsigned int nr_pages,
|
||||||
unsigned long *rss, unsigned short *mmap_miss,
|
unsigned long *rss, unsigned short *mmap_miss,
|
||||||
bool can_map_large)
|
pgoff_t file_end)
|
||||||
{
|
{
|
||||||
|
struct address_space *mapping = folio->mapping;
|
||||||
unsigned int ref_from_caller = 1;
|
unsigned int ref_from_caller = 1;
|
||||||
vm_fault_t ret = 0;
|
vm_fault_t ret = 0;
|
||||||
struct page *page = folio_page(folio, start);
|
struct page *page = folio_page(folio, start);
|
||||||
|
|
@ -3692,12 +3693,16 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
|
||||||
unsigned long addr0;
|
unsigned long addr0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map the large folio fully where possible.
|
* Map the large folio fully where possible:
|
||||||
*
|
*
|
||||||
* The folio must not cross VMA or page table boundary.
|
* - The folio is fully within size of the file or belong
|
||||||
|
* to shmem/tmpfs;
|
||||||
|
* - The folio doesn't cross VMA boundary;
|
||||||
|
* - The folio doesn't cross page table boundary;
|
||||||
*/
|
*/
|
||||||
addr0 = addr - start * PAGE_SIZE;
|
addr0 = addr - start * PAGE_SIZE;
|
||||||
if (can_map_large && folio_within_vma(folio, vmf->vma) &&
|
if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
|
||||||
|
folio_within_vma(folio, vmf->vma) &&
|
||||||
(addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) {
|
(addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) {
|
||||||
vmf->pte -= start;
|
vmf->pte -= start;
|
||||||
page -= start;
|
page -= start;
|
||||||
|
|
@ -3812,7 +3817,6 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
|
||||||
unsigned long rss = 0;
|
unsigned long rss = 0;
|
||||||
unsigned int nr_pages = 0, folio_type;
|
unsigned int nr_pages = 0, folio_type;
|
||||||
unsigned short mmap_miss = 0, mmap_miss_saved;
|
unsigned short mmap_miss = 0, mmap_miss_saved;
|
||||||
bool can_map_large;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
|
folio = next_uptodate_folio(&xas, mapping, end_pgoff);
|
||||||
|
|
@ -3823,16 +3827,14 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
|
||||||
end_pgoff = min(end_pgoff, file_end);
|
end_pgoff = min(end_pgoff, file_end);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do not allow to map with PTEs beyond i_size and with PMD
|
* Do not allow to map with PMD across i_size to preserve
|
||||||
* across i_size to preserve SIGBUS semantics.
|
* SIGBUS semantics.
|
||||||
*
|
*
|
||||||
* Make an exception for shmem/tmpfs that for long time
|
* Make an exception for shmem/tmpfs that for long time
|
||||||
* intentionally mapped with PMDs across i_size.
|
* intentionally mapped with PMDs across i_size.
|
||||||
*/
|
*/
|
||||||
can_map_large = shmem_mapping(mapping) ||
|
if ((file_end >= folio_next_index(folio) || shmem_mapping(mapping)) &&
|
||||||
file_end >= folio_next_index(folio);
|
filemap_map_pmd(vmf, folio, start_pgoff)) {
|
||||||
|
|
||||||
if (can_map_large && filemap_map_pmd(vmf, folio, start_pgoff)) {
|
|
||||||
ret = VM_FAULT_NOPAGE;
|
ret = VM_FAULT_NOPAGE;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
@ -3861,8 +3863,7 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf,
|
||||||
else
|
else
|
||||||
ret |= filemap_map_folio_range(vmf, folio,
|
ret |= filemap_map_folio_range(vmf, folio,
|
||||||
xas.xa_index - folio->index, addr,
|
xas.xa_index - folio->index, addr,
|
||||||
nr_pages, &rss, &mmap_miss,
|
nr_pages, &rss, &mmap_miss, file_end);
|
||||||
can_map_large);
|
|
||||||
|
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
|
} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);
|
||||||
|
|
|
||||||
|
|
@ -3619,6 +3619,16 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
|
||||||
if (folio != page_folio(split_at) || folio != page_folio(lock_at))
|
if (folio != page_folio(split_at) || folio != page_folio(lock_at))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Folios that just got truncated cannot get split. Signal to the
|
||||||
|
* caller that there was a race.
|
||||||
|
*
|
||||||
|
* TODO: this will also currently refuse shmem folios that are in the
|
||||||
|
* swapcache.
|
||||||
|
*/
|
||||||
|
if (!is_anon && !folio->mapping)
|
||||||
|
return -EBUSY;
|
||||||
|
|
||||||
if (new_order >= folio_order(folio))
|
if (new_order >= folio_order(folio))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
|
@ -3659,18 +3669,6 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
|
||||||
gfp_t gfp;
|
gfp_t gfp;
|
||||||
|
|
||||||
mapping = folio->mapping;
|
mapping = folio->mapping;
|
||||||
|
|
||||||
/* Truncated ? */
|
|
||||||
/*
|
|
||||||
* TODO: add support for large shmem folio in swap cache.
|
|
||||||
* When shmem is in swap cache, mapping is NULL and
|
|
||||||
* folio_test_swapcache() is true.
|
|
||||||
*/
|
|
||||||
if (!mapping) {
|
|
||||||
ret = -EBUSY;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
min_order = mapping_min_folio_order(folio->mapping);
|
min_order = mapping_min_folio_order(folio->mapping);
|
||||||
if (new_order < min_order) {
|
if (new_order < min_order) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
|
|
|
||||||
27
mm/memfd.c
27
mm/memfd.c
|
|
@ -96,9 +96,36 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
|
||||||
NULL,
|
NULL,
|
||||||
gfp_mask);
|
gfp_mask);
|
||||||
if (folio) {
|
if (folio) {
|
||||||
|
u32 hash;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Zero the folio to prevent information leaks to userspace.
|
||||||
|
* Use folio_zero_user() which is optimized for huge/gigantic
|
||||||
|
* pages. Pass 0 as addr_hint since this is not a faulting path
|
||||||
|
* and we don't have a user virtual address yet.
|
||||||
|
*/
|
||||||
|
folio_zero_user(folio, 0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mark the folio uptodate before adding to page cache,
|
||||||
|
* as required by filemap.c and other hugetlb paths.
|
||||||
|
*/
|
||||||
|
__folio_mark_uptodate(folio);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Serialize hugepage allocation and instantiation to prevent
|
||||||
|
* races with concurrent allocations, as required by all other
|
||||||
|
* callers of hugetlb_add_to_page_cache().
|
||||||
|
*/
|
||||||
|
hash = hugetlb_fault_mutex_hash(memfd->f_mapping, idx);
|
||||||
|
mutex_lock(&hugetlb_fault_mutex_table[hash]);
|
||||||
|
|
||||||
err = hugetlb_add_to_page_cache(folio,
|
err = hugetlb_add_to_page_cache(folio,
|
||||||
memfd->f_mapping,
|
memfd->f_mapping,
|
||||||
idx);
|
idx);
|
||||||
|
|
||||||
|
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
goto err_unresv;
|
goto err_unresv;
|
||||||
|
|
|
||||||
|
|
@ -241,6 +241,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
|
||||||
if (PTR_ERR(vma) == -EAGAIN) {
|
if (PTR_ERR(vma) == -EAGAIN) {
|
||||||
count_vm_vma_lock_event(VMA_LOCK_MISS);
|
count_vm_vma_lock_event(VMA_LOCK_MISS);
|
||||||
/* The area was replaced with another one */
|
/* The area was replaced with another one */
|
||||||
|
mas_set(&mas, address);
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2005,10 +2005,8 @@ swp_entry_t get_swap_page_of_type(int type)
|
||||||
local_lock(&percpu_swap_cluster.lock);
|
local_lock(&percpu_swap_cluster.lock);
|
||||||
offset = cluster_alloc_swap_entry(si, 0, 1);
|
offset = cluster_alloc_swap_entry(si, 0, 1);
|
||||||
local_unlock(&percpu_swap_cluster.lock);
|
local_unlock(&percpu_swap_cluster.lock);
|
||||||
if (offset) {
|
if (offset)
|
||||||
entry = swp_entry(si->type, offset);
|
entry = swp_entry(si->type, offset);
|
||||||
atomic_long_dec(&nr_swap_pages);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
put_swap_device(si);
|
put_swap_device(si);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -585,6 +585,7 @@ static const struct hda_quirk cs4208_mac_fixup_tbl[] = {
|
||||||
SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
|
SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
|
||||||
SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
|
SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
|
||||||
SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
|
SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
|
||||||
|
SND_PCI_QUIRK(0x106b, 0x7800, "MacPro 6,1", CS4208_MACMINI),
|
||||||
SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
|
SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
|
||||||
{} /* terminator */
|
{} /* terminator */
|
||||||
};
|
};
|
||||||
|
|
|
||||||
|
|
@ -6525,6 +6525,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
|
||||||
SND_PCI_QUIRK(0x103c, 0x8a4f, "HP Victus 15-fa0xxx (MB 8A4F)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
|
SND_PCI_QUIRK(0x103c, 0x8a4f, "HP Victus 15-fa0xxx (MB 8A4F)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
|
||||||
SND_PCI_QUIRK(0x103c, 0x8a6e, "HP EDNA 360", ALC287_FIXUP_CS35L41_I2C_4),
|
SND_PCI_QUIRK(0x103c, 0x8a6e, "HP EDNA 360", ALC287_FIXUP_CS35L41_I2C_4),
|
||||||
SND_PCI_QUIRK(0x103c, 0x8a74, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
|
SND_PCI_QUIRK(0x103c, 0x8a74, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
|
||||||
|
SND_PCI_QUIRK(0x103c, 0x8a75, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
|
||||||
SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
|
SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
|
||||||
SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
|
SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
|
||||||
SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
|
SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
|
||||||
|
|
@ -6572,6 +6573,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
|
||||||
SND_PCI_QUIRK(0x103c, 0x8bc8, "HP Victus 15-fa1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
|
SND_PCI_QUIRK(0x103c, 0x8bc8, "HP Victus 15-fa1xxx", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
|
||||||
SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT),
|
SND_PCI_QUIRK(0x103c, 0x8bcd, "HP Omen 16-xd0xxx", ALC245_FIXUP_HP_MUTE_LED_V1_COEFBIT),
|
||||||
SND_PCI_QUIRK(0x103c, 0x8bd4, "HP Victus 16-s0xxx (MB 8BD4)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
|
SND_PCI_QUIRK(0x103c, 0x8bd4, "HP Victus 16-s0xxx (MB 8BD4)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
|
||||||
|
SND_PCI_QUIRK(0x103c, 0x8bd6, "HP Pavilion Aero Laptop 13z-be200", ALC287_FIXUP_HP_GPIO_LED),
|
||||||
SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
|
SND_PCI_QUIRK(0x103c, 0x8bdd, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
|
||||||
SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
|
SND_PCI_QUIRK(0x103c, 0x8bde, "HP Envy 17", ALC287_FIXUP_CS35L41_I2C_2),
|
||||||
SND_PCI_QUIRK(0x103c, 0x8bdf, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
|
SND_PCI_QUIRK(0x103c, 0x8bdf, "HP Envy 15", ALC287_FIXUP_CS35L41_I2C_2),
|
||||||
|
|
|
||||||
|
|
@ -280,11 +280,11 @@ __snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
|
||||||
|
|
||||||
// (5)
|
// (5)
|
||||||
err = pci_read_config_word(pci, PCI_DEVICE_ID, &chip->device);
|
err = pci_read_config_word(pci, PCI_DEVICE_ID, &chip->device);
|
||||||
if (err < 0)
|
if (err)
|
||||||
return err;
|
return pcibios_err_to_errno(err);
|
||||||
err = pci_read_config_word(pci, PCI_VENDOR_ID, &chip->vendor);
|
err = pci_read_config_word(pci, PCI_VENDOR_ID, &chip->vendor);
|
||||||
if (err < 0)
|
if (err)
|
||||||
return err;
|
return pcibios_err_to_errno(err);
|
||||||
chip->rev = pci->revision;
|
chip->rev = pci->revision;
|
||||||
#ifdef CHIP_AU8830
|
#ifdef CHIP_AU8830
|
||||||
if ((chip->rev) != 0xfe && (chip->rev) != 0xfa) {
|
if ((chip->rev) != 0xfe && (chip->rev) != 0xfa) {
|
||||||
|
|
|
||||||
|
|
@ -921,7 +921,7 @@ static int parse_term_uac2_clock_source(struct mixer_build *state,
|
||||||
{
|
{
|
||||||
struct uac_clock_source_descriptor *d = p1;
|
struct uac_clock_source_descriptor *d = p1;
|
||||||
|
|
||||||
term->type = UAC3_CLOCK_SOURCE << 16; /* virtual type */
|
term->type = UAC2_CLOCK_SOURCE << 16; /* virtual type */
|
||||||
term->id = id;
|
term->id = id;
|
||||||
term->name = d->iClockSource;
|
term->name = d->iClockSource;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
||||||
|
|
@ -2030,6 +2030,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
|
||||||
case USB_ID(0x249c, 0x9326): /* M2Tech Young MkIII */
|
case USB_ID(0x249c, 0x9326): /* M2Tech Young MkIII */
|
||||||
case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
|
case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
|
||||||
case USB_ID(0x2622, 0x0041): /* Audiolab M-DAC+ */
|
case USB_ID(0x2622, 0x0041): /* Audiolab M-DAC+ */
|
||||||
|
case USB_ID(0x2622, 0x0061): /* LEAK Stereo 230 */
|
||||||
case USB_ID(0x278b, 0x5100): /* Rotel RC-1590 */
|
case USB_ID(0x278b, 0x5100): /* Rotel RC-1590 */
|
||||||
case USB_ID(0x27f7, 0x3002): /* W4S DAC-2v2SE */
|
case USB_ID(0x27f7, 0x3002): /* W4S DAC-2v2SE */
|
||||||
case USB_ID(0x29a2, 0x0086): /* Mutec MC3+ USB */
|
case USB_ID(0x29a2, 0x0086): /* Mutec MC3+ USB */
|
||||||
|
|
@ -2428,6 +2429,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
|
||||||
QUIRK_FLAG_DSD_RAW),
|
QUIRK_FLAG_DSD_RAW),
|
||||||
VENDOR_FLG(0x25ce, /* Mytek devices */
|
VENDOR_FLG(0x25ce, /* Mytek devices */
|
||||||
QUIRK_FLAG_DSD_RAW),
|
QUIRK_FLAG_DSD_RAW),
|
||||||
|
VENDOR_FLG(0x2622, /* IAG Limited devices */
|
||||||
|
QUIRK_FLAG_DSD_RAW),
|
||||||
VENDOR_FLG(0x278b, /* Rotel? */
|
VENDOR_FLG(0x278b, /* Rotel? */
|
||||||
QUIRK_FLAG_DSD_RAW),
|
QUIRK_FLAG_DSD_RAW),
|
||||||
VENDOR_FLG(0x292b, /* Gustard/Ess based devices */
|
VENDOR_FLG(0x292b, /* Gustard/Ess based devices */
|
||||||
|
|
|
||||||
|
|
@ -1758,10 +1758,15 @@ int main(int argc, char *argv[])
|
||||||
uffd_test_ops = mem_type->mem_ops;
|
uffd_test_ops = mem_type->mem_ops;
|
||||||
uffd_test_case_ops = test->test_case_ops;
|
uffd_test_case_ops = test->test_case_ops;
|
||||||
|
|
||||||
if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB))
|
if (mem_type->mem_flag & (MEM_HUGETLB_PRIVATE | MEM_HUGETLB)) {
|
||||||
gopts.page_size = default_huge_page_size();
|
gopts.page_size = default_huge_page_size();
|
||||||
else
|
if (gopts.page_size == 0) {
|
||||||
|
uffd_test_skip("huge page size is 0, feature missing?");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
gopts.page_size = psize();
|
gopts.page_size = psize();
|
||||||
|
}
|
||||||
|
|
||||||
/* Ensure we have at least 2 pages */
|
/* Ensure we have at least 2 pages */
|
||||||
gopts.nr_pages = MAX(UFFD_TEST_MEM_SIZE, gopts.page_size * 2)
|
gopts.nr_pages = MAX(UFFD_TEST_MEM_SIZE, gopts.page_size * 2)
|
||||||
|
|
@ -1776,12 +1781,6 @@ int main(int argc, char *argv[])
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
uffd_test_start("%s on %s", test->name, mem_type->name);
|
uffd_test_start("%s on %s", test->name, mem_type->name);
|
||||||
if ((mem_type->mem_flag == MEM_HUGETLB ||
|
|
||||||
mem_type->mem_flag == MEM_HUGETLB_PRIVATE) &&
|
|
||||||
(default_huge_page_size() == 0)) {
|
|
||||||
uffd_test_skip("huge page size is 0, feature missing?");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (!uffd_feature_supported(test)) {
|
if (!uffd_feature_supported(test)) {
|
||||||
uffd_test_skip("feature missing");
|
uffd_test_skip("feature missing");
|
||||||
continue;
|
continue;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue