aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2025-09-02 10:05:55 +1000
committerDave Airlie <airlied@redhat.com>2025-09-02 11:23:17 +1000
commit83631c7b1f5bafb8c6f976c3aab63f2cac962c8e (patch)
tree31d560557ea09214648b5434e2d1d7560c6935e7 /include
parentMerge tag 'amd-drm-next-6.18-2025-08-29' of https://gitlab.freedesktop.org/ag... (diff)
parentdrm/xe: Fix incorrect migration of backed-up object to VRAM (diff)
downloadlinux-83631c7b1f5bafb8c6f976c3aab63f2cac962c8e.tar.gz
linux-83631c7b1f5bafb8c6f976c3aab63f2cac962c8e.zip
Merge tag 'drm-xe-next-2025-08-29' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-next
UAPI Changes: - Add madvise interface (Himal Prasad Ghimiray) - Add DRM_IOCTL_XE_VM_QUERY_MEMORY_RANGE_ATTRS to query VMA count and memory attributes (Himal Prasad Ghimiray) - Handle Firmware reported Hardware Errors notifying userspace with device wedged uevent (Riana Tauro) Cross-subsystem Changes: - Add a vendor-specific recovery method to drm device wedged uevent (Riana Tauro) Driver Changes: - Use same directory structure in debugfs as in sysfs (Michal Wajdeczko) - Cleanup and future-proof VRAM region initialization (Piotr Piórkowski) - Add G-states and PCIe link states to debugfs (Soham Purkait) - Cleanup eustall debug messages (Harish Chegondi) - Add SR-IOV support to restore Compression Control Surface (CCS) to Xe2 and later (Satyanarayana K V P) - Enable SR-IOV PF mode by default on supported platforms without needing CONFIG_DRM_XE_DEBUG and mark some platforms behind force_probe as supported (Michal Wajdeczko) - More targeted log messages (Michal Wajdeczko) - Cleanup STEER_SEMAPHORE/MCFG_MCR_SELECTOR usage (Nitin Gote) - Use common code to emit flush (Tvrtko Ursulin) - Add/extend more HW workarounds and tunings for Xe2 and Xe3 (Sk Anirban, Tangudu Tilak Tirumalesh, Nitin Gote, Chaitanya Kumar Borah) - Add a generic dependency scheduler to help with TLB invalidations and future scenarios (Matthew Brost) - Use DRM scheduler for delayed GT TLB invalidations (Matthew Brost) - Error out on incorrect device use in configfs (Michal Wajdeczko, Lucas De Marchi) - Refactor configfs attributes (Michal Wajdeczko / Lucas De Marchi) - Allow configuring future VF devices via configfs (Michal Wajdeczko) - Implement some missing XeLP workarounds (Tvrtko Ursulin) - Generalize WA BB setup/emission and add support for mid context restore BB, aka indirect context (Tvrtko Ursulin) - Prepare the driver to expose mmio regions to userspace in future (Ilia Levi) - Add more GuC load error status codes (John Harrison) - Document DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (Priyanka Dandamudi) - Disable CSC and RPM on VFs (Lukasz Laguna, Satyanarayana K V P) - Fix oops in xe_gem_fault with PREEMPT_RT (Maarten Lankhorst) - Skip LMTT update if no LMEM was provisioned (Michal Wajdeczko) - Add support to VF migration (Tomasz Lis) - Use a helper for guc_waklv_enable functions (Jonathan Cavitt) - Prepare GPU SVM for migration of THP (Francois Dugast) - Program LMTT directory pointer on all GTs within a tile (Piotr Piórkowski) - Rename XE_WA to XE_GT_WA to better convey its scope vs the device WAs (Matt Atwood) - Allow to match devices on PCI devid/vendorid only (Lucas De Marchi) - Improve PDE PAT index selection (Matthew Brost) - Consolidate ASID allocation in xe_vm_create() vs xe_vm_create_ioctl() (Piotr Piórkowski) - Resize VF BARS to max possible size according to number of VFs (Michał Winiarski) - Untangle vm_bind_ioctl cleanup order (Christoph Manszewski) - Start fixing usage of XE_PAGE_SIZE vs PAGE_SIZE to improve compatibility with non-x86 arch (Simon Richter) - Improve tile vs gt initialization order and accounting (Gustavo Sousa) - Extend WA kunit test to PTL - Ensure data is initialized before transferring to pcode (Stuart Summers) - Add PSMI support for HW validation (Lucas De Marchi, Vinay Belgaumkar, Badal Nilawar) - Improve xe_dma_buf test (Thomas Hellström, Marcin Bernatowicz) - Fix basename() usage in generator with !glibc (Carlos Llamas) - Ensure GT is in C0 during resumes (Xin Wang) - Add TLB invalidation abstraction (Matt Brost, Stuart Summers) - Make MI_TLB_INVALIDATE conditional on migrate (Matthew Auld) - Prepare xe_nvm to be initialized early for future use cases (Riana Tauro) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://lore.kernel.org/r/nuejxdhnalyok7tzwkrj67dwjgdafwp4mhdejpyyqnrh4f2epq@nlldovuflnbx
Diffstat (limited to 'include')
-rw-r--r--include/drm/drm_device.h4
-rw-r--r--include/drm/drm_gpusvm.h4
-rw-r--r--include/drm/drm_pagemap.h50
-rw-r--r--include/drm/intel/pciids.h5
-rw-r--r--include/uapi/drm/xe_drm.h282
5 files changed, 320 insertions, 25 deletions
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index a33aedd5e9ec..59fd3f4d5995 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -26,10 +26,14 @@ struct pci_controller;
* Recovery methods for wedged device in order of less to more side-effects.
* To be used with drm_dev_wedged_event() as recovery @method. Callers can
* use any one, multiple (or'd) or none depending on their needs.
+ *
+ * Refer to "Device Wedging" chapter in Documentation/gpu/drm-uapi.rst for more
+ * details.
*/
#define DRM_WEDGE_RECOVERY_NONE BIT(0) /* optional telemetry collection */
#define DRM_WEDGE_RECOVERY_REBIND BIT(1) /* unbind + bind driver */
#define DRM_WEDGE_RECOVERY_BUS_RESET BIT(2) /* unbind + reset bus device + bind */
+#define DRM_WEDGE_RECOVERY_VENDOR BIT(3) /* vendor specific recovery method */
/**
* struct drm_wedge_task_info - information about the guilty task of a wedge dev
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index 142fc2af1716..0e336148309d 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -17,7 +17,7 @@ struct drm_gpusvm_notifier;
struct drm_gpusvm_ops;
struct drm_gpusvm_range;
struct drm_pagemap;
-struct drm_pagemap_device_addr;
+struct drm_pagemap_addr;
/**
* struct drm_gpusvm_ops - Operations structure for GPU SVM
@@ -154,7 +154,7 @@ struct drm_gpusvm_range {
struct interval_tree_node itree;
struct list_head entry;
unsigned long notifier_seq;
- struct drm_pagemap_device_addr *dma_addr;
+ struct drm_pagemap_addr *dma_addr;
struct drm_pagemap *dpagemap;
struct drm_gpusvm_range_flags flags;
};
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index e5f20a1235be..f6e7e234c089 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -6,6 +6,8 @@
#include <linux/hmm.h>
#include <linux/types.h>
+#define NR_PAGES(order) (1U << (order))
+
struct drm_pagemap;
struct drm_pagemap_zdd;
struct device;
@@ -23,7 +25,7 @@ enum drm_interconnect_protocol {
};
/**
- * struct drm_pagemap_device_addr - Device address representation.
+ * struct drm_pagemap_addr - Address representation.
* @addr: The dma address or driver-defined address for driver private interconnects.
* @proto: The interconnect protocol.
* @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
@@ -32,7 +34,7 @@ enum drm_interconnect_protocol {
* Note: There is room for improvement here. We should be able to pack into
* 64 bits.
*/
-struct drm_pagemap_device_addr {
+struct drm_pagemap_addr {
dma_addr_t addr;
u64 proto : 54;
u64 order : 8;
@@ -40,21 +42,21 @@ struct drm_pagemap_device_addr {
};
/**
- * drm_pagemap_device_addr_encode() - Encode a dma address with metadata
+ * drm_pagemap_addr_encode() - Encode a dma address with metadata
* @addr: The dma address or driver-defined address for driver private interconnects.
* @proto: The interconnect protocol.
* @order: The page order of the dma mapping. (Size is PAGE_SIZE << order).
* @dir: The DMA direction.
*
- * Return: A struct drm_pagemap_device_addr encoding the above information.
+ * Return: A struct drm_pagemap_addr encoding the above information.
*/
-static inline struct drm_pagemap_device_addr
-drm_pagemap_device_addr_encode(dma_addr_t addr,
- enum drm_interconnect_protocol proto,
- unsigned int order,
- enum dma_data_direction dir)
+static inline struct drm_pagemap_addr
+drm_pagemap_addr_encode(dma_addr_t addr,
+ enum drm_interconnect_protocol proto,
+ unsigned int order,
+ enum dma_data_direction dir)
{
- return (struct drm_pagemap_device_addr) {
+ return (struct drm_pagemap_addr) {
.addr = addr,
.proto = proto,
.order = order,
@@ -75,11 +77,11 @@ struct drm_pagemap_ops {
* @order: The page order of the device mapping. (Size is PAGE_SIZE << order).
* @dir: The transfer direction.
*/
- struct drm_pagemap_device_addr (*device_map)(struct drm_pagemap *dpagemap,
- struct device *dev,
- struct page *page,
- unsigned int order,
- enum dma_data_direction dir);
+ struct drm_pagemap_addr (*device_map)(struct drm_pagemap *dpagemap,
+ struct device *dev,
+ struct page *page,
+ unsigned int order,
+ enum dma_data_direction dir);
/**
* @device_unmap: Unmap a device address previously obtained using @device_map.
@@ -90,7 +92,7 @@ struct drm_pagemap_ops {
*/
void (*device_unmap)(struct drm_pagemap *dpagemap,
struct device *dev,
- struct drm_pagemap_device_addr addr);
+ struct drm_pagemap_addr addr);
/**
* @populate_mm: Populate part of the mm with @dpagemap memory,
@@ -170,29 +172,33 @@ struct drm_pagemap_devmem_ops {
/**
* @copy_to_devmem: Copy to device memory (required for migration)
* @pages: Pointer to array of device memory pages (destination)
- * @dma_addr: Pointer to array of DMA addresses (source)
+ * @pagemap_addr: Pointer to array of DMA information (source)
* @npages: Number of pages to copy
*
- * Copy pages to device memory.
+ * Copy pages to device memory. If the order of a @pagemap_addr entry
+ * is greater than 0, the entry is populated but subsequent entries
+ * within the range of that order are not populated.
*
* Return: 0 on success, a negative error code on failure.
*/
int (*copy_to_devmem)(struct page **pages,
- dma_addr_t *dma_addr,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages);
/**
* @copy_to_ram: Copy to system RAM (required for migration)
* @pages: Pointer to array of device memory pages (source)
- * @dma_addr: Pointer to array of DMA addresses (destination)
+ * @pagemap_addr: Pointer to array of DMA information (destination)
* @npages: Number of pages to copy
*
- * Copy pages to system RAM.
+ * Copy pages to system RAM. If the order of a @pagemap_addr entry
+ * is greater than 0, the entry is populated but subsequent entries
+ * within the range of that order are not populated.
*
* Return: 0 on success, a negative error code on failure.
*/
int (*copy_to_ram)(struct page **pages,
- dma_addr_t *dma_addr,
+ struct drm_pagemap_addr *pagemap_addr,
unsigned long npages);
};
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
index 76f8d26f9cc9..da6301a6fcea 100644
--- a/include/drm/intel/pciids.h
+++ b/include/drm/intel/pciids.h
@@ -26,6 +26,11 @@
#define __PCIIDS_H__
#ifdef __KERNEL__
+#define INTEL_PCI_DEVICE(_id, _info) { \
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, (_id)), \
+ .driver_data = (kernel_ulong_t)(_info), \
+}
+
#define INTEL_VGA_DEVICE(_id, _info) { \
PCI_DEVICE(PCI_VENDOR_ID_INTEL, (_id)), \
.class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index e2426413488f..40ff19f52a8d 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -81,6 +81,8 @@ extern "C" {
* - &DRM_IOCTL_XE_EXEC
* - &DRM_IOCTL_XE_WAIT_USER_FENCE
* - &DRM_IOCTL_XE_OBSERVATION
+ * - &DRM_IOCTL_XE_MADVISE
+ * - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS
*/
/*
@@ -102,6 +104,8 @@ extern "C" {
#define DRM_XE_EXEC 0x09
#define DRM_XE_WAIT_USER_FENCE 0x0a
#define DRM_XE_OBSERVATION 0x0b
+#define DRM_XE_MADVISE 0x0c
+#define DRM_XE_VM_QUERY_MEM_RANGE_ATTRS 0x0d
/* Must be kept compact -- no holes */
@@ -117,6 +121,8 @@ extern "C" {
#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
+#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
+#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_RANGE_ATTRS, struct drm_xe_vm_query_mem_range_attr)
/**
* DOC: Xe IOCTL Extensions
@@ -760,7 +766,11 @@ struct drm_xe_device_query {
* gem creation
*
* The @flags can be:
- * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
+ * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING - Modify the GEM object
+ * allocation strategy by deferring physical memory allocation
+ * until the object is either bound to a virtual memory region via
+ * VM_BIND or accessed by the CPU. As a result, no backing memory is
+ * reserved at the time of GEM object creation.
* - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
* - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
* possible placement, ensure that the corresponding VRAM allocation
@@ -1003,6 +1013,10 @@ struct drm_xe_vm_destroy {
* valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address
* mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
* handle MBZ, and the BO offset MBZ.
+ *
+ * The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be:
+ * - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in
+ * the memory region advised by madvise.
*/
struct drm_xe_vm_bind_op {
/** @extensions: Pointer to the first extension struct, if any */
@@ -1108,6 +1122,7 @@ struct drm_xe_vm_bind_op {
/** @flags: Bind flags */
__u32 flags;
+#define DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC -1
/**
* @prefetch_mem_region_instance: Memory region to prefetch VMA to.
* It is a region instance, not a mask.
@@ -1974,6 +1989,271 @@ struct drm_xe_query_eu_stall {
__u64 sampling_rates[];
};
+/**
+ * struct drm_xe_madvise - Input of &DRM_IOCTL_XE_MADVISE
+ *
+ * This structure is used to set memory attributes for a virtual address range
+ * in a VM. The type of attribute is specified by @type, and the corresponding
+ * union member is used to provide additional parameters for @type.
+ *
+ * Supported attribute types:
+ * - DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC: Set preferred memory location.
+ * - DRM_XE_MEM_RANGE_ATTR_ATOMIC: Set atomic access policy.
+ * - DRM_XE_MEM_RANGE_ATTR_PAT: Set page attribute table index.
+ *
+ * Example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_madvise madvise = {
+ * .vm_id = vm_id,
+ * .start = 0x100000,
+ * .range = 0x2000,
+ * .type = DRM_XE_MEM_RANGE_ATTR_ATOMIC,
+ * .atomic_val = DRM_XE_ATOMIC_DEVICE,
+ * };
+ *
+ * ioctl(fd, DRM_IOCTL_XE_MADVISE, &madvise);
+ *
+ */
+struct drm_xe_madvise {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @range: size of the virtual address range */
+ __u64 range;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+#define DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC 0
+#define DRM_XE_MEM_RANGE_ATTR_ATOMIC 1
+#define DRM_XE_MEM_RANGE_ATTR_PAT 2
+ /** @type: type of attribute */
+ __u32 type;
+
+ union {
+ /**
+ * @preferred_mem_loc: preferred memory location
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_PREFERRED_LOC
+ *
+ * Supported values for @preferred_mem_loc.devmem_fd:
+ * - DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE: set vram of fault tile as preferred loc
+ * - DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM: set smem as preferred loc
+ *
+ * Supported values for @preferred_mem_loc.migration_policy:
+ * - DRM_XE_MIGRATE_ALL_PAGES
+ * - DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES
+ */
+ struct {
+#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0
+#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
+ /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ __u32 devmem_fd;
+
+#define DRM_XE_MIGRATE_ALL_PAGES 0
+#define DRM_XE_MIGRATE_ONLY_SYSTEM_PAGES 1
+ /** @preferred_mem_loc.migration_policy: Page migration policy */
+ __u16 migration_policy;
+
+ /** @preferred_mem_loc.pad : MBZ */
+ __u16 pad;
+
+ /** @preferred_mem_loc.reserved : Reserved */
+ __u64 reserved;
+ } preferred_mem_loc;
+
+ /**
+ * @atomic: Atomic access policy
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_ATOMIC.
+ *
+ * Supported values for @atomic.val:
+ * - DRM_XE_ATOMIC_UNDEFINED: Undefined or default behaviour.
+ * Support both GPU and CPU atomic operations for system allocator.
+ * Support GPU atomic operations for normal(bo) allocator.
+ * - DRM_XE_ATOMIC_DEVICE: Support GPU atomic operations.
+ * - DRM_XE_ATOMIC_GLOBAL: Support both GPU and CPU atomic operations.
+ * - DRM_XE_ATOMIC_CPU: Support CPU atomic only, no GPU atomics supported.
+ */
+ struct {
+#define DRM_XE_ATOMIC_UNDEFINED 0
+#define DRM_XE_ATOMIC_DEVICE 1
+#define DRM_XE_ATOMIC_GLOBAL 2
+#define DRM_XE_ATOMIC_CPU 3
+ /** @atomic.val: value of atomic operation */
+ __u32 val;
+
+ /** @atomic.pad: MBZ */
+ __u32 pad;
+
+ /** @atomic.reserved: Reserved */
+ __u64 reserved;
+ } atomic;
+
+ /**
+ * @pat_index: Page attribute table index
+ *
+ * Used when @type == DRM_XE_MEM_RANGE_ATTR_PAT.
+ */
+ struct {
+ /** @pat_index.val: PAT index value */
+ __u32 val;
+
+ /** @pat_index.pad: MBZ */
+ __u32 pad;
+
+ /** @pat_index.reserved: Reserved */
+ __u64 reserved;
+ } pat_index;
+ };
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_mem_range_attr - Output of &DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS
+ *
+ * This structure is provided by userspace and filled by KMD in response to the
+ * DRM_IOCTL_XE_VM_QUERY_MEM_RANGES_ATTRS ioctl. It describes memory attributes of
+ * a memory ranges within a user specified address range in a VM.
+ *
+ * The structure includes information such as atomic access policy,
+ * page attribute table (PAT) index, and preferred memory location.
+ * Userspace allocates an array of these structures and passes a pointer to the
+ * ioctl to retrieve attributes for each memory ranges
+ *
+ * @extensions: Pointer to the first extension struct, if any
+ * @start: Start address of the memory range
+ * @end: End address of the virtual memory range
+ *
+ */
+struct drm_xe_mem_range_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @start: start of the memory range */
+ __u64 start;
+
+ /** @end: end of the memory range */
+ __u64 end;
+
+ /** @preferred_mem_loc: preferred memory location */
+ struct {
+ /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ __u32 devmem_fd;
+
+ /** @preferred_mem_loc.migration_policy: Page migration policy */
+ __u32 migration_policy;
+ } preferred_mem_loc;
+
+ /** @atomic: Atomic access policy */
+ struct {
+ /** @atomic.val: atomic attribute */
+ __u32 val;
+
+ /** @atomic.reserved: Reserved */
+ __u32 reserved;
+ } atomic;
+
+ /** @pat_index: Page attribute table index */
+ struct {
+ /** @pat_index.val: PAT index */
+ __u32 val;
+
+ /** @pat_index.reserved: Reserved */
+ __u32 reserved;
+ } pat_index;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_query_mem_range_attr - Input of &DRM_IOCTL_XE_VM_QUERY_MEM_ATTRIBUTES
+ *
+ * This structure is used to query memory attributes of memory regions
+ * within a user specified address range in a VM. It provides detailed
+ * information about each memory range, including atomic access policy,
+ * page attribute table (PAT) index, and preferred memory location.
+ *
+ * Userspace first calls the ioctl with @num_mem_ranges = 0,
+ * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL to retrieve
+ * the number of memory regions and size of each memory range attribute.
+ * Then, it allocates a buffer of that size and calls the ioctl again to fill
+ * the buffer with memory range attributes.
+ *
+ * If second call fails with -ENOSPC, it means memory ranges changed between
+ * first call and now, retry IOCTL again with @num_mem_ranges = 0,
+ * @sizeof_mem_ranges_attr = 0 and @vector_of_vma_mem_attr = NULL followed by
+ * Second ioctl call.
+ *
+ * Example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_vm_query_mem_range_attr query = {
+ * .vm_id = vm_id,
+ * .start = 0x100000,
+ * .range = 0x2000,
+ * };
+ *
+ * // First ioctl call to get num of mem regions and sizeof each attribute
+ * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
+ *
+ * // Allocate buffer for the memory region attributes
+ * void *ptr = malloc(query.num_mem_ranges * query.sizeof_mem_range_attr);
+ * void *ptr_start = ptr;
+ *
+ * query.vector_of_mem_attr = (uintptr_t)ptr;
+ *
+ * // Second ioctl call to actually fill the memory attributes
+ * ioctl(fd, DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS, &query);
+ *
+ * // Iterate over the returned memory region attributes
+ * for (unsigned int i = 0; i < query.num_mem_ranges; ++i) {
+ * struct drm_xe_mem_range_attr *attr = (struct drm_xe_mem_range_attr *)ptr;
+ *
+ * // Do something with attr
+ *
+ * // Move pointer by one entry
+ * ptr += query.sizeof_mem_range_attr;
+ * }
+ *
+ * free(ptr_start);
+ */
+struct drm_xe_vm_query_mem_range_attr {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: vm_id of the virtual range */
+ __u32 vm_id;
+
+ /** @num_mem_ranges: number of mem_ranges in range */
+ __u32 num_mem_ranges;
+
+ /** @start: start of the virtual address range */
+ __u64 start;
+
+ /** @range: size of the virtual address range */
+ __u64 range;
+
+ /** @sizeof_mem_range_attr: size of struct drm_xe_mem_range_attr */
+ __u64 sizeof_mem_range_attr;
+
+ /** @vector_of_mem_attr: userptr to array of struct drm_xe_mem_range_attr */
+ __u64 vector_of_mem_attr;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+
+};
+
#if defined(__cplusplus)
}
#endif