diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 6d84a02cfa5d..fc43f2703ae0 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -226,7 +226,6 @@ static int memory_block_online(struct memory_block *mem) unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; unsigned long nr_vmemmap_pages = 0; - struct memory_notify arg; struct zone *zone; int ret; @@ -246,19 +245,9 @@ static int memory_block_online(struct memory_block *mem) if (mem->altmap) nr_vmemmap_pages = mem->altmap->free; - arg.altmap_start_pfn = start_pfn; - arg.altmap_nr_pages = nr_vmemmap_pages; - arg.start_pfn = start_pfn + nr_vmemmap_pages; - arg.nr_pages = nr_pages - nr_vmemmap_pages; mem_hotplug_begin(); - ret = memory_notify(MEM_PREPARE_ONLINE, &arg); - ret = notifier_to_errno(ret); - if (ret) - goto out_notifier; - if (nr_vmemmap_pages) { - ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, - zone, mem->altmap->inaccessible); + ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone); if (ret) goto out; } @@ -280,11 +269,7 @@ static int memory_block_online(struct memory_block *mem) nr_vmemmap_pages); mem->zone = zone; - mem_hotplug_done(); - return ret; out: - memory_notify(MEM_FINISH_OFFLINE, &arg); -out_notifier: mem_hotplug_done(); return ret; } @@ -297,7 +282,6 @@ static int memory_block_offline(struct memory_block *mem) unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; unsigned long nr_vmemmap_pages = 0; - struct memory_notify arg; int ret; if (!mem->zone) @@ -329,11 +313,6 @@ static int memory_block_offline(struct memory_block *mem) mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); mem->zone = NULL; - arg.altmap_start_pfn = start_pfn; - arg.altmap_nr_pages = nr_vmemmap_pages; - arg.start_pfn = start_pfn + nr_vmemmap_pages; - arg.nr_pages = nr_pages - nr_vmemmap_pages; - memory_notify(MEM_FINISH_OFFLINE, &arg); out: mem_hotplug_done(); return ret; diff --git a/include/linux/memory.h b/include/linux/memory.h index 0c214256216f..ba1515160894 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -96,17 +96,8 @@ int set_memory_block_size_order(unsigned int order); #define MEM_GOING_ONLINE (1<<3) #define MEM_CANCEL_ONLINE (1<<4) #define MEM_CANCEL_OFFLINE (1<<5) -#define MEM_PREPARE_ONLINE (1<<6) -#define MEM_FINISH_OFFLINE (1<<7) struct memory_notify { - /* - * The altmap_start_pfn and altmap_nr_pages fields are designated for - * specifying the altmap range and are exclusively intended for use in - * MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers. - */ - unsigned long altmap_start_pfn; - unsigned long altmap_nr_pages; unsigned long start_pfn; unsigned long nr_pages; }; diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 23f038a16231..f2f16cdd73ee 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -58,22 +58,6 @@ typedef int __bitwise mhp_t; * implies the node id (nid). */ #define MHP_NID_IS_MGID ((__force mhp_t)BIT(2)) -/* - * The hotplugged memory is completely inaccessible while the memory is - * offline. The memory provider will handle MEM_PREPARE_ONLINE / - * MEM_FINISH_OFFLINE notifications and make the memory accessible. - * - * This flag is only relevant when used along with MHP_MEMMAP_ON_MEMORY, - * because the altmap cannot be written (e.g., poisoned) when adding - * memory -- before it is set online. - * - * This allows for adding memory with an altmap that is not currently - * made available by a hypervisor. When onlining that memory, the - * hypervisor can be instructed to make that memory available, and - * the onlining phase will not require any memory allocations, which is - * helpful in low-memory situations. - */ -#define MHP_OFFLINE_INACCESSIBLE ((__force mhp_t)BIT(3)) /* * Extended parameters for memory hotplug: @@ -123,7 +107,7 @@ extern void adjust_present_page_count(struct page *page, long nr_pages); /* VM interface that may be used by firmware interface */ extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, - struct zone *zone, bool mhp_off_inaccessible); + struct zone *zone); extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); extern int online_pages(unsigned long pfn, unsigned long nr_pages, struct zone *zone, struct memory_group *group); diff --git a/include/linux/memremap.h b/include/linux/memremap.h index e5951ba12a28..30c7aecbd245 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -25,7 +25,6 @@ struct vmem_altmap { unsigned long free; unsigned long align; unsigned long alloc; - bool inaccessible; }; /* diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 0be83039c3b5..238a6712738e 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1088,7 +1088,7 @@ void adjust_present_page_count(struct page *page, struct memory_group *group, } int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, - struct zone *zone, bool mhp_off_inaccessible) + struct zone *zone) { unsigned long end_pfn = pfn + nr_pages; int ret, i; @@ -1097,15 +1097,6 @@ int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, if (ret) return ret; - /* - * Memory block is accessible at this stage and hence poison the struct - * pages now. If the memory block is accessible during memory hotplug - * addition phase, then page poisining is already performed in - * sparse_add_section(). - */ - if (mhp_off_inaccessible) - page_init_poison(pfn_to_page(pfn), sizeof(struct page) * nr_pages); - move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE, false); @@ -1444,7 +1435,7 @@ static void remove_memory_blocks_and_altmaps(u64 start, u64 size) } static int create_altmaps_and_memory_blocks(int nid, struct memory_group *group, - u64 start, u64 size, mhp_t mhp_flags) + u64 start, u64 size) { unsigned long memblock_size = memory_block_size_bytes(); u64 cur_start; @@ -1460,8 +1451,6 @@ static int create_altmaps_and_memory_blocks(int nid, struct memory_group *group, }; mhp_altmap.free = memory_block_memmap_on_memory_pages(); - if (mhp_flags & MHP_OFFLINE_INACCESSIBLE) - mhp_altmap.inaccessible = true; params.altmap = kmemdup(&mhp_altmap, sizeof(struct vmem_altmap), GFP_KERNEL); if (!params.altmap) { @@ -1555,7 +1544,7 @@ int add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) */ if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) && mhp_supports_memmap_on_memory()) { - ret = create_altmaps_and_memory_blocks(nid, group, start, size, mhp_flags); + ret = create_altmaps_and_memory_blocks(nid, group, start, size); if (ret) goto error; } else { diff --git a/mm/sparse.c b/mm/sparse.c index 17c50a6415c2..b5b2b6f7041b 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -951,8 +951,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn, * Poison uninitialized struct pages in order to catch invalid flags * combinations. */ - if (!altmap || !altmap->inaccessible) - page_init_poison(memmap, sizeof(struct page) * nr_pages); + page_init_poison(memmap, sizeof(struct page) * nr_pages); ms = __nr_to_section(section_nr); set_section_nid(section_nr, nid);