drm/pagemap: DMA map folios when possible

If the page is part of a folio, DMA map the whole folio at once instead of
mapping individual pages one after the other. For example if 2MB folios
are used instead of 4KB pages, this reduces the number of DMA mappings by
512.

The folio order (and consequently, the size) is persisted in the struct
drm_pagemap_device_addr to be available at the time of unmapping.

v2:
- Initialize order variable (Matthew Brost)
- Set proto and dir for completeness (Matthew Brost)
- Do not populate drm_pagemap_addr, document it (Matthew Brost)
- Add and use macro NR_PAGES(order) (Matthew Brost)

Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://lore.kernel.org/r/20250805140028.599361-4-francois.dugast@intel.com
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
This commit is contained in:
Francois Dugast 2025-08-05 15:59:04 +02:00
parent f35a6cdf8a
commit d755ff6063
2 changed files with 26 additions and 9 deletions

View File

@ -222,24 +222,32 @@ static int drm_pagemap_migrate_map_pages(struct device *dev,
{
unsigned long i;
for (i = 0; i < npages; ++i) {
for (i = 0; i < npages;) {
struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
dma_addr_t dma_addr;
struct folio *folio;
unsigned int order = 0;
if (!page)
continue;
goto next;
if (WARN_ON_ONCE(is_zone_device_page(page)))
return -EFAULT;
dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
folio = page_folio(page);
order = folio_order(folio);
dma_addr = dma_map_page(dev, page, 0, page_size(page), dir);
if (dma_mapping_error(dev, dma_addr))
return -EFAULT;
pagemap_addr[i] =
drm_pagemap_addr_encode(dma_addr,
DRM_INTERCONNECT_SYSTEM,
0, dir);
order, dir);
next:
i += NR_PAGES(order);
}
return 0;
@ -263,11 +271,14 @@ static void drm_pagemap_migrate_unmap_pages(struct device *dev,
{
unsigned long i;
for (i = 0; i < npages; ++i) {
for (i = 0; i < npages;) {
if (!pagemap_addr[i].addr || dma_mapping_error(dev, pagemap_addr[i].addr))
continue;
goto next;
dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE, dir);
dma_unmap_page(dev, pagemap_addr[i].addr, PAGE_SIZE << pagemap_addr[i].order, dir);
next:
i += NR_PAGES(pagemap_addr[i].order);
}
}

View File

@ -6,6 +6,8 @@
#include <linux/hmm.h>
#include <linux/types.h>
#define NR_PAGES(order) (1U << (order))
struct drm_pagemap;
struct drm_pagemap_zdd;
struct device;
@ -173,7 +175,9 @@ struct drm_pagemap_devmem_ops {
* @pagemap_addr: Pointer to array of DMA information (source)
* @npages: Number of pages to copy
*
* Copy pages to device memory.
* Copy pages to device memory. If the order of a @pagemap_addr entry
* is greater than 0, the entry is populated but subsequent entries
* within the range of that order are not populated.
*
* Return: 0 on success, a negative error code on failure.
*/
@ -187,7 +191,9 @@ struct drm_pagemap_devmem_ops {
* @pagemap_addr: Pointer to array of DMA information (destination)
* @npages: Number of pages to copy
*
* Copy pages to system RAM.
* Copy pages to system RAM. If the order of a @pagemap_addr entry
* is greater than 0, the entry is populated but subsequent entries
* within the range of that order are not populated.
*
* Return: 0 on success, a negative error code on failure.
*/