diff options
| author | Simona Vetter <simona.vetter@ffwll.ch> | 2025-07-11 11:08:52 +0200 |
|---|---|---|
| committer | Simona Vetter <simona.vetter@ffwll.ch> | 2025-07-11 11:08:53 +0200 |
| commit | 9800bf6fae3bcee42c14bf172f7f6342c3d3d305 (patch) | |
| tree | 8a0bab0fd4999030d227d12d3988daf9869e8ab5 /include | |
| parent | Merge tag 'drm-misc-next-2025-07-10' of https://gitlab.freedesktop.org/drm/mi... (diff) | |
| parent | drm/xe/guc: Cancel ongoing H2G requests when stopping CT (diff) | |
| download | linux-9800bf6fae3bcee42c14bf172f7f6342c3d3d305.tar.gz linux-9800bf6fae3bcee42c14bf172f7f6342c3d3d305.zip | |
Merge tag 'drm-xe-next-2025-07-10' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next
UAPI Changes:
- Documentation fixes (Shuicheng)
Cross-subsystem Changes:
- MTD intel-dg driver for dgfx non-volatile memory device (Sasha)
- i2c: designware changes to allow i2c integration with BMG (Heikki)
Core Changes:
- Restructure migration in preparation for multi-device (Brost, Thomas)
- Expose fan control and voltage regulator version on sysfs (Raag)
Driver Changes:
- Add WildCat Lake support (Roper)
- Add aux bus child device driver for NVM on DGFX (Sasha)
- Some refactor and fixes to allow cleaner BMG w/a (Lucas, Maarten, Auld)
- BMG w/a (Vinay)
- Improve handling of aborted probe (Michal)
- Do not wedge device on killed exec queues (Brost)
- Init changes for flicker-free boot (Maarten)
- Fix out-of-bounds field write in MI_STORE_DATA_IMM (Jia)
- Enable the GuC Dynamic Inhibit Context Switch optimization (Daniele)
- Drop bo->size (Brost)
- Builds and KConfig fixes (Harry, Maarten)
- Consolidate LRC offset calculations (Tvrtko)
- Fix potential leak in hw_engine_group (Michal)
- Future-proof for multi-tile + multi-GT cases (Roper)
- Validate gt in pmu event (Riana)
- SRIOV PF: Clear all LMTT pages on alloc (Michal)
- Allocate PF queue size on pow2 boundary (Brost)
- SRIOV VF: Make multi-GT migration less error prone (Tomasz)
- Revert indirect ring state patch to fix random LRC context switches failures (Brost)
- Fix compressed VRAM handling (Auld)
- Add one additional BMG PCI ID (Ravi)
- Recommend GuC v70.46.2 for BMG, LNL, DG2 (Julia)
- Add GuC and HuC to PTL (Daniele)
- Drop PTL force_probe requirement (Atwood)
- Fix error flow in display suspend (Shuicheng)
- Disable GuC communication on hardware initialization error (Zhanjun)
- Devcoredump fixes and clean up (Shuicheng)
- SRIOV PF: Downgrade some info to debug (Michal)
- Don't allocate temporary GuC policies object (Michal)
- Support for I2C attached MCUs (Heikki, Raag, Riana)
- Add GPU memory bo trace points (Juston)
- SRIOV VF: Skip some W/a (Michal)
- Correct comment of xe_pm_set_vram_threshold (Shuicheng)
- Cancel ongoing H2G requests when stopping CT (Michal)
Signed-off-by: Simona Vetter <simona.vetter@ffwll.ch>
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/aHA7184UnWlONORU@intel.com
Diffstat (limited to 'include')
| -rw-r--r-- | include/drm/drm_gpusvm.h | 96 | ||||
| -rw-r--r-- | include/drm/drm_pagemap.h | 135 | ||||
| -rw-r--r-- | include/drm/intel/pciids.h | 5 | ||||
| -rw-r--r-- | include/linux/intel_dg_nvm_aux.h | 32 | ||||
| -rw-r--r-- | include/uapi/drm/xe_drm.h | 8 |
5 files changed, 175 insertions, 101 deletions
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h index 6a5156476bf4..4aedc5423aff 100644 --- a/include/drm/drm_gpusvm.h +++ b/include/drm/drm_gpusvm.h @@ -16,92 +16,10 @@ struct drm_gpusvm; struct drm_gpusvm_notifier; struct drm_gpusvm_ops; struct drm_gpusvm_range; -struct drm_gpusvm_devmem; struct drm_pagemap; struct drm_pagemap_device_addr; /** - * struct drm_gpusvm_devmem_ops - Operations structure for GPU SVM device memory - * - * This structure defines the operations for GPU Shared Virtual Memory (SVM) - * device memory. These operations are provided by the GPU driver to manage device memory - * allocations and perform operations such as migration between device memory and system - * RAM. - */ -struct drm_gpusvm_devmem_ops { - /** - * @devmem_release: Release device memory allocation (optional) - * @devmem_allocation: device memory allocation - * - * Release device memory allocation and drop a reference to device - * memory allocation. - */ - void (*devmem_release)(struct drm_gpusvm_devmem *devmem_allocation); - - /** - * @populate_devmem_pfn: Populate device memory PFN (required for migration) - * @devmem_allocation: device memory allocation - * @npages: Number of pages to populate - * @pfn: Array of page frame numbers to populate - * - * Populate device memory page frame numbers (PFN). - * - * Return: 0 on success, a negative error code on failure. - */ - int (*populate_devmem_pfn)(struct drm_gpusvm_devmem *devmem_allocation, - unsigned long npages, unsigned long *pfn); - - /** - * @copy_to_devmem: Copy to device memory (required for migration) - * @pages: Pointer to array of device memory pages (destination) - * @dma_addr: Pointer to array of DMA addresses (source) - * @npages: Number of pages to copy - * - * Copy pages to device memory. - * - * Return: 0 on success, a negative error code on failure. - */ - int (*copy_to_devmem)(struct page **pages, - dma_addr_t *dma_addr, - unsigned long npages); - - /** - * @copy_to_ram: Copy to system RAM (required for migration) - * @pages: Pointer to array of device memory pages (source) - * @dma_addr: Pointer to array of DMA addresses (destination) - * @npages: Number of pages to copy - * - * Copy pages to system RAM. - * - * Return: 0 on success, a negative error code on failure. - */ - int (*copy_to_ram)(struct page **pages, - dma_addr_t *dma_addr, - unsigned long npages); -}; - -/** - * struct drm_gpusvm_devmem - Structure representing a GPU SVM device memory allocation - * - * @dev: Pointer to the device structure which device memory allocation belongs to - * @mm: Pointer to the mm_struct for the address space - * @detached: device memory allocations is detached from device pages - * @ops: Pointer to the operations structure for GPU SVM device memory - * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to. - * @size: Size of device memory allocation - * @timeslice_expiration: Timeslice expiration in jiffies - */ -struct drm_gpusvm_devmem { - struct device *dev; - struct mm_struct *mm; - struct completion detached; - const struct drm_gpusvm_devmem_ops *ops; - struct drm_pagemap *dpagemap; - size_t size; - u64 timeslice_expiration; -}; - -/** * struct drm_gpusvm_ops - Operations structure for GPU SVM * * This structure defines the operations for GPU Shared Virtual Memory (SVM). @@ -361,15 +279,6 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, struct drm_gpusvm_range *range, const struct drm_gpusvm_ctx *ctx); -int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm, - struct drm_gpusvm_range *range, - struct drm_gpusvm_devmem *devmem_allocation, - const struct drm_gpusvm_ctx *ctx); - -int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation); - -const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void); - bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start, unsigned long end); @@ -380,11 +289,6 @@ drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start, void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range, const struct mmu_notifier_range *mmu_range); -void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation, - struct device *dev, struct mm_struct *mm, - const struct drm_gpusvm_devmem_ops *ops, - struct drm_pagemap *dpagemap, size_t size); - #ifdef CONFIG_LOCKDEP /** * drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h index 202c157ff4d7..e5f20a1235be 100644 --- a/include/drm/drm_pagemap.h +++ b/include/drm/drm_pagemap.h @@ -7,6 +7,7 @@ #include <linux/types.h> struct drm_pagemap; +struct drm_pagemap_zdd; struct device; /** @@ -91,6 +92,35 @@ struct drm_pagemap_ops { struct device *dev, struct drm_pagemap_device_addr addr); + /** + * @populate_mm: Populate part of the mm with @dpagemap memory, + * migrating existing data. + * @dpagemap: The struct drm_pagemap managing the memory. + * @start: The virtual start address in @mm + * @end: The virtual end address in @mm + * @mm: Pointer to a live mm. The caller must have an mmget() + * reference. + * + * The caller will have the mm lock at least in read mode. + * Note that there is no guarantee that the memory is resident + * after the function returns, it's best effort only. + * When the mm is not using the memory anymore, + * it will be released. The struct drm_pagemap might have a + * mechanism in place to reclaim the memory and the data will + * then be migrated. Typically to system memory. + * The implementation should hold sufficient runtime power- + * references while pages are used in an address space and + * should ideally guard against hardware device unbind in + * a way such that device pages are migrated back to system + * followed by device page removal. The implementation should + * return -ENODEV after device removal. + * + * Return: 0 if successful. Negative error code on error. + */ + int (*populate_mm)(struct drm_pagemap *dpagemap, + unsigned long start, unsigned long end, + struct mm_struct *mm, + unsigned long timeslice_ms); }; /** @@ -104,4 +134,109 @@ struct drm_pagemap { struct device *dev; }; +struct drm_pagemap_devmem; + +/** + * struct drm_pagemap_devmem_ops - Operations structure for GPU SVM device memory + * + * This structure defines the operations for GPU Shared Virtual Memory (SVM) + * device memory. These operations are provided by the GPU driver to manage device memory + * allocations and perform operations such as migration between device memory and system + * RAM. + */ +struct drm_pagemap_devmem_ops { + /** + * @devmem_release: Release device memory allocation (optional) + * @devmem_allocation: device memory allocation + * + * Release device memory allocation and drop a reference to device + * memory allocation. + */ + void (*devmem_release)(struct drm_pagemap_devmem *devmem_allocation); + + /** + * @populate_devmem_pfn: Populate device memory PFN (required for migration) + * @devmem_allocation: device memory allocation + * @npages: Number of pages to populate + * @pfn: Array of page frame numbers to populate + * + * Populate device memory page frame numbers (PFN). + * + * Return: 0 on success, a negative error code on failure. + */ + int (*populate_devmem_pfn)(struct drm_pagemap_devmem *devmem_allocation, + unsigned long npages, unsigned long *pfn); + + /** + * @copy_to_devmem: Copy to device memory (required for migration) + * @pages: Pointer to array of device memory pages (destination) + * @dma_addr: Pointer to array of DMA addresses (source) + * @npages: Number of pages to copy + * + * Copy pages to device memory. + * + * Return: 0 on success, a negative error code on failure. + */ + int (*copy_to_devmem)(struct page **pages, + dma_addr_t *dma_addr, + unsigned long npages); + + /** + * @copy_to_ram: Copy to system RAM (required for migration) + * @pages: Pointer to array of device memory pages (source) + * @dma_addr: Pointer to array of DMA addresses (destination) + * @npages: Number of pages to copy + * + * Copy pages to system RAM. + * + * Return: 0 on success, a negative error code on failure. + */ + int (*copy_to_ram)(struct page **pages, + dma_addr_t *dma_addr, + unsigned long npages); +}; + +/** + * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation + * + * @dev: Pointer to the device structure which device memory allocation belongs to + * @mm: Pointer to the mm_struct for the address space + * @detached: device memory allocations is detached from device pages + * @ops: Pointer to the operations structure for GPU SVM device memory + * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to. + * @size: Size of device memory allocation + * @timeslice_expiration: Timeslice expiration in jiffies + */ +struct drm_pagemap_devmem { + struct device *dev; + struct mm_struct *mm; + struct completion detached; + const struct drm_pagemap_devmem_ops *ops; + struct drm_pagemap *dpagemap; + size_t size; + u64 timeslice_expiration; +}; + +int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, + struct mm_struct *mm, + unsigned long start, unsigned long end, + unsigned long timeslice_ms, + void *pgmap_owner); + +int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation); + +const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void); + +struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page); + +void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, + struct device *dev, struct mm_struct *mm, + const struct drm_pagemap_devmem_ops *ops, + struct drm_pagemap *dpagemap, size_t size); + +int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, + unsigned long start, unsigned long end, + struct mm_struct *mm, + unsigned long timeslice_ms); + #endif diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h index 129c3172aa5f..76f8d26f9cc9 100644 --- a/include/drm/intel/pciids.h +++ b/include/drm/intel/pciids.h @@ -846,6 +846,7 @@ /* BMG */ #define INTEL_BMG_IDS(MACRO__, ...) \ MACRO__(0xE202, ## __VA_ARGS__), \ + MACRO__(0xE209, ## __VA_ARGS__), \ MACRO__(0xE20B, ## __VA_ARGS__), \ MACRO__(0xE20C, ## __VA_ARGS__), \ MACRO__(0xE20D, ## __VA_ARGS__), \ @@ -871,6 +872,8 @@ MACRO__(0xB08F, ## __VA_ARGS__), \ MACRO__(0xB090, ## __VA_ARGS__), \ MACRO__(0xB0A0, ## __VA_ARGS__), \ - MACRO__(0xB0B0, ## __VA_ARGS__) + MACRO__(0xB0B0, ## __VA_ARGS__), \ + MACRO__(0xFD80, ## __VA_ARGS__), \ + MACRO__(0xFD81, ## __VA_ARGS__) #endif /* __PCIIDS_H__ */ diff --git a/include/linux/intel_dg_nvm_aux.h b/include/linux/intel_dg_nvm_aux.h new file mode 100644 index 000000000000..625d46a6b96e --- /dev/null +++ b/include/linux/intel_dg_nvm_aux.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2019-2025, Intel Corporation. All rights reserved. + */ + +#ifndef __INTEL_DG_NVM_AUX_H__ +#define __INTEL_DG_NVM_AUX_H__ + +#include <linux/auxiliary_bus.h> +#include <linux/container_of.h> +#include <linux/ioport.h> +#include <linux/types.h> + +#define INTEL_DG_NVM_REGIONS 13 + +struct intel_dg_nvm_region { + const char *name; +}; + +struct intel_dg_nvm_dev { + struct auxiliary_device aux_dev; + bool writable_override; + bool non_posted_erase; + struct resource bar; + struct resource bar2; + const struct intel_dg_nvm_region *regions; +}; + +#define auxiliary_dev_to_intel_dg_nvm_dev(auxiliary_dev) \ + container_of(auxiliary_dev, struct intel_dg_nvm_dev, aux_dev) + +#endif /* __INTEL_DG_NVM_AUX_H__ */ diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 8e8bbdec8c5c..e2426413488f 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -925,9 +925,9 @@ struct drm_xe_gem_mmap_offset { * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts * exec submissions to its exec_queues that don't have an upper time * limit on the job execution time. But exec submissions to these - * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ, - * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF, - * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL. + * don't allow any of the sync types DRM_XE_SYNC_TYPE_SYNCOBJ, + * DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is, + * together with sync flag DRM_XE_SYNC_FLAG_SIGNAL. * LR VMs can be created in recoverable page-fault mode using * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it. * If that flag is omitted, the UMD can not rely on the slightly @@ -1394,7 +1394,7 @@ struct drm_xe_sync { /** * @timeline_value: Input for the timeline sync object. Needs to be - * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ. + * different than 0 when used with %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ. */ __u64 timeline_value; |
