From 341a2c99c87ce6f62c6f4423fa641a39f0966bff Mon Sep 17 00:00:00 2001 From: Nitin Gote Date: Thu, 12 Mar 2026 21:32:45 +0530 Subject: drm/xe/uapi: Fix kernel-doc for DRM_XE_VM_BIND_FLAG_DECOMPRESS There is kernel-doc warning for DRM_XE_VM_BIND_FLAG_DECOMPRESS: ./include/uapi/drm/xe_drm.h:1060: WARNING: Block quote ends without a blank line; unexpected unindent. Fix the warning by adding the missing '%' prefix to DRM_XE_VM_BIND_FLAG_DECOMPRESS in the kernel-doc list entry for struct drm_xe_vm_bind_op. Fixes: 2270bd7124f4 ("drm/xe: add VM_BIND DECOMPRESS uapi flag") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202603121515.gEMrFlTL-lkp@intel.com/ Cc: Matthew Auld Signed-off-by: Nitin Gote Reviewed-by: Matthew Auld Link: https://patch.msgid.link/20260312160244.809849-2-nitin.r.gote@intel.com Signed-off-by: Tejas Upadhyay --- include/uapi/drm/xe_drm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 0497b85fa12a..f8b2afb20540 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -1057,7 +1057,7 @@ struct drm_xe_vm_destroy { * not invoke autoreset. Neither will stack variables going out of scope. * Therefore it's recommended to always explicitly reset the madvises when * freeing the memory backing a region used in a &DRM_IOCTL_XE_MADVISE call. - * - DRM_XE_VM_BIND_FLAG_DECOMPRESS - Request on-device decompression for a MAP. + * - %DRM_XE_VM_BIND_FLAG_DECOMPRESS - Request on-device decompression for a MAP. * When set on a MAP bind operation, request the driver schedule an on-device * in-place decompression (via the migrate/resolve path) for the GPU mapping * created by this bind. Only valid for DRM_XE_VM_BIND_OP_MAP; usage on -- cgit v1.2.3 From 2e03c0c5c59a086df534e15ddde03cb33bc475c4 Mon Sep 17 00:00:00 2001 From: Francois Dugast Date: Thu, 12 Mar 2026 20:20:12 +0100 Subject: drm/pagemap: Add helper to access zone_device_data This new helper helps ensure all accesses to zone_device_data use the correct API whether the page is part of a folio or not. v2: - Move to drm_pagemap.h, stick to folio_zone_device_data (Matthew Brost) - Return struct drm_pagemap_zdd * (Matthew Brost) v3: - Add stub for !CONFIG_ZONE_DEVICE (CI) Cc: Andrew Morton Cc: David Hildenbrand Cc: Lorenzo Stoakes Cc: Liam R. Howlett Cc: Vlastimil Babka Cc: Mike Rapoport Cc: Suren Baghdasaryan Cc: Michal Hocko Cc: Zi Yan Cc: Alistair Popple Cc: Balbir Singh Cc: linux-mm@kvack.org Suggested-by: Matthew Brost Reviewed-by: Matthew Brost Signed-off-by: Francois Dugast Signed-off-by: Matthew Brost Link: https://patch.msgid.link/20260312192126.2024853-3-francois.dugast@intel.com --- drivers/gpu/drm/drm_gpusvm.c | 7 +++++-- drivers/gpu/drm/drm_pagemap.c | 21 ++++++++++++--------- include/drm/drm_pagemap.h | 21 +++++++++++++++++++++ 3 files changed, 38 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index 35dd07297dd0..4b928fda5b12 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -1488,12 +1488,15 @@ map_pages: order = drm_gpusvm_hmm_pfn_to_order(pfns[i], i, npages); if (is_device_private_page(page) || is_device_coherent_page(page)) { + struct drm_pagemap_zdd *__zdd = + drm_pagemap_page_zone_device_data(page); + if (!ctx->allow_mixed && - zdd != page->zone_device_data && i > 0) { + zdd != __zdd && i > 0) { err = -EOPNOTSUPP; goto err_unmap; } - zdd = page->zone_device_data; + zdd = __zdd; if (pagemap != page_pgmap(page)) { if (pagemap) { err = -EOPNOTSUPP; diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c index f453a12b6a8e..733a3857947c 100644 --- a/drivers/gpu/drm/drm_pagemap.c +++ b/drivers/gpu/drm/drm_pagemap.c @@ -252,7 +252,7 @@ static int drm_pagemap_migrate_map_pages(struct device *dev, order = folio_order(folio); if (is_device_private_page(page)) { - struct drm_pagemap_zdd *zdd = page->zone_device_data; + struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page); struct drm_pagemap *dpagemap = zdd->dpagemap; struct drm_pagemap_addr addr; @@ -323,7 +323,7 @@ static void drm_pagemap_migrate_unmap_pages(struct device *dev, goto next; if (is_zone_device_page(page)) { - struct drm_pagemap_zdd *zdd = page->zone_device_data; + struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page); struct drm_pagemap *dpagemap = zdd->dpagemap; dpagemap->ops->device_unmap(dpagemap, dev, &pagemap_addr[i]); @@ -601,7 +601,8 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, pages[i] = NULL; if (src_page && is_device_private_page(src_page)) { - struct drm_pagemap_zdd *src_zdd = src_page->zone_device_data; + struct drm_pagemap_zdd *src_zdd = + drm_pagemap_page_zone_device_data(src_page); if (page_pgmap(src_page) == pagemap && !mdetails->can_migrate_same_pagemap) { @@ -723,8 +724,8 @@ static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas, goto next; if (fault_page) { - if (src_page->zone_device_data != - fault_page->zone_device_data) + if (drm_pagemap_page_zone_device_data(src_page) != + drm_pagemap_page_zone_device_data(fault_page)) goto next; } @@ -1065,7 +1066,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas, void *buf; int i, err = 0; - zdd = page->zone_device_data; + zdd = drm_pagemap_page_zone_device_data(page); if (time_before64(get_jiffies_64(), zdd->devmem_allocation->timeslice_expiration)) return 0; @@ -1148,7 +1149,9 @@ err_out: */ static void drm_pagemap_folio_free(struct folio *folio) { - drm_pagemap_zdd_put(folio->page.zone_device_data); + struct page *page = folio_page(folio, 0); + + drm_pagemap_zdd_put(drm_pagemap_page_zone_device_data(page)); } /** @@ -1164,7 +1167,7 @@ static void drm_pagemap_folio_free(struct folio *folio) */ static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf) { - struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data; + struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(vmf->page); int err; err = __drm_pagemap_migrate_to_ram(vmf->vma, @@ -1230,7 +1233,7 @@ EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init); */ struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page) { - struct drm_pagemap_zdd *zdd = page->zone_device_data; + struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page); return zdd->devmem_allocation->dpagemap; } diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h index c848f578e3da..75e6ca58922d 100644 --- a/include/drm/drm_pagemap.h +++ b/include/drm/drm_pagemap.h @@ -4,6 +4,7 @@ #include #include +#include #include #define NR_PAGES(order) (1U << (order)) @@ -367,6 +368,26 @@ void drm_pagemap_destroy(struct drm_pagemap *dpagemap, bool is_atomic_or_reclaim int drm_pagemap_reinit(struct drm_pagemap *dpagemap); +/** + * drm_pagemap_page_zone_device_data() - Page to zone_device_data + * @page: Pointer to the page + * + * Return: Page's zone_device_data + */ +static inline struct drm_pagemap_zdd *drm_pagemap_page_zone_device_data(struct page *page) +{ + struct folio *folio = page_folio(page); + + return folio_zone_device_data(folio); +} + +#else + +static inline struct drm_pagemap_zdd *drm_pagemap_page_zone_device_data(struct page *page) +{ + return NULL; +} + #endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */ #endif -- cgit v1.2.3 From 4f39a194d41e6b8cb61a91a7bb01b17be59a7d73 Mon Sep 17 00:00:00 2001 From: Tejas Upadhyay Date: Thu, 5 Mar 2026 17:49:06 +0530 Subject: drm/xe/xe3p_lpg: Restrict UAPI to enable L2 flush optimization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When set, starting xe3p_lpg, the L2 flush optimization feature will control whether L2 is in Persistent or Transient mode through monitoring of media activity. To enable L2 flush optimization include new feature flag GUC_CTL_ENABLE_L2FLUSH_OPT for Novalake platforms when media type is detected. Tighten UAPI validation to restrict userptr, svm and dmabuf mappings to be either 2WAY or XA+1WAY V5(Thomas): logic correction V4(MattA): Modify uapi doc and commit V3(MattA): check valid op and pat_index value V2(MattA): validate dma-buf bos and madvise pat-index Acked-by: José Roberto de Souza Acked-by: Michal Mrozek Acked-by: Carl Zhang Reviewed-by: Thomas Hellström Reviewed-by: Matthew Auld Link: https://patch.msgid.link/20260305121902.1892593-9-tejas.upadhyay@intel.com Signed-off-by: Tejas Upadhyay --- drivers/gpu/drm/xe/xe_guc.c | 3 +++ drivers/gpu/drm/xe/xe_guc_fwif.h | 1 + drivers/gpu/drm/xe/xe_vm.c | 8 ++++++++ drivers/gpu/drm/xe/xe_vm_madvise.c | 23 +++++++++++++++++++++++ include/uapi/drm/xe_drm.h | 4 +++- 5 files changed, 38 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 576f3d500390..ccebb437e37f 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -98,6 +98,9 @@ static u32 guc_ctl_feature_flags(struct xe_guc *guc) if (xe_guc_using_main_gamctrl_queues(guc)) flags |= GUC_CTL_MAIN_GAMCTRL_QUEUES; + if (GRAPHICS_VER(xe) >= 35 && !IS_DGFX(xe) && xe_gt_is_media_type(guc_to_gt(guc))) + flags |= GUC_CTL_ENABLE_L2FLUSH_OPT; + return flags; } diff --git a/drivers/gpu/drm/xe/xe_guc_fwif.h b/drivers/gpu/drm/xe/xe_guc_fwif.h index bb8f71d38611..b73fae063fac 100644 --- a/drivers/gpu/drm/xe/xe_guc_fwif.h +++ b/drivers/gpu/drm/xe/xe_guc_fwif.h @@ -67,6 +67,7 @@ struct guc_update_exec_queue_policy { #define GUC_CTL_ENABLE_PSMI_LOGGING BIT(7) #define GUC_CTL_MAIN_GAMCTRL_QUEUES BIT(9) #define GUC_CTL_DISABLE_SCHEDULER BIT(14) +#define GUC_CTL_ENABLE_L2FLUSH_OPT BIT(15) #define GUC_CTL_DEBUG 3 #define GUC_LOG_VERBOSITY REG_GENMASK(1, 0) diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index c0d8f5db019d..e24436287786 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -3492,6 +3492,10 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm, op == DRM_XE_VM_BIND_OP_MAP_USERPTR) || XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE && op == DRM_XE_VM_BIND_OP_MAP_USERPTR) || + XE_IOCTL_DBG(xe, xe_device_is_l2_flush_optimized(xe) && + (op == DRM_XE_VM_BIND_OP_MAP_USERPTR || + is_cpu_addr_mirror) && + (pat_index != 19 && coh_mode != XE_COH_2WAY)) || XE_IOCTL_DBG(xe, comp_en && op == DRM_XE_VM_BIND_OP_MAP_USERPTR) || XE_IOCTL_DBG(xe, op == DRM_XE_VM_BIND_OP_MAP_USERPTR && @@ -3633,6 +3637,10 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo, if (XE_IOCTL_DBG(xe, bo->ttm.base.import_attach && comp_en)) return -EINVAL; + if (XE_IOCTL_DBG(xe, bo->ttm.base.import_attach && xe_device_is_l2_flush_optimized(xe) && + (pat_index != 19 && coh_mode != XE_COH_2WAY))) + return -EINVAL; + /* If a BO is protected it can only be mapped if the key is still valid */ if ((bind_flags & DRM_XE_VM_BIND_FLAG_CHECK_PXP) && xe_bo_is_protected(bo) && op != DRM_XE_VM_BIND_OP_UNMAP && op != DRM_XE_VM_BIND_OP_UNMAP_ALL) diff --git a/drivers/gpu/drm/xe/xe_vm_madvise.c b/drivers/gpu/drm/xe/xe_vm_madvise.c index 431be53be56f..e564b12c02d9 100644 --- a/drivers/gpu/drm/xe/xe_vm_madvise.c +++ b/drivers/gpu/drm/xe/xe_vm_madvise.c @@ -419,6 +419,7 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *fil struct xe_vmas_in_madvise_range madvise_range = {.addr = args->start, .range = args->range, }; struct xe_madvise_details details; + u16 pat_index, coh_mode; struct xe_vm *vm; struct drm_exec exec; int err, attr_type; @@ -455,6 +456,17 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *fil if (err || !madvise_range.num_vmas) goto madv_fini; + if (args->type == DRM_XE_MEM_RANGE_ATTR_PAT) { + pat_index = array_index_nospec(args->pat_index.val, xe->pat.n_entries); + coh_mode = xe_pat_index_get_coh_mode(xe, pat_index); + if (XE_IOCTL_DBG(xe, madvise_range.has_svm_userptr_vmas && + xe_device_is_l2_flush_optimized(xe) && + (pat_index != 19 && coh_mode != XE_COH_2WAY))) { + err = -EINVAL; + goto madv_fini; + } + } + if (madvise_range.has_bo_vmas) { if (args->type == DRM_XE_MEM_RANGE_ATTR_ATOMIC) { if (!check_bo_args_are_sane(vm, madvise_range.vmas, @@ -472,6 +484,17 @@ int xe_vm_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *fil if (!bo) continue; + + if (args->type == DRM_XE_MEM_RANGE_ATTR_PAT) { + if (XE_IOCTL_DBG(xe, bo->ttm.base.import_attach && + xe_device_is_l2_flush_optimized(xe) && + (pat_index != 19 && + coh_mode != XE_COH_2WAY))) { + err = -EINVAL; + goto err_fini; + } + } + err = drm_exec_lock_obj(&exec, &bo->ttm.base); drm_exec_retry_on_contention(&exec); if (err) diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index f8b2afb20540..7014dde1c9c4 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -1114,7 +1114,9 @@ struct drm_xe_vm_bind_op { * incoherent GT access is possible. * * Note: For userptr and externally imported dma-buf the kernel expects - * either 1WAY or 2WAY for the @pat_index. + * either 1WAY or 2WAY for the @pat_index. Starting from NVL-P, for + * userptr, svm, madvise and externally imported dma-buf the kernel expects + * either 2WAY or 1WAY and XA @pat_index. * * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions * on the @pat_index. For such mappings there is no actual memory being -- cgit v1.2.3 From 2de36e3f72dae2035b2742ffe3355e43067a81ad Mon Sep 17 00:00:00 2001 From: Piotr Piórkowski Date: Mon, 9 Mar 2026 16:24:48 +0100 Subject: drm/xe/pf: Add FLR_PREPARE state to VF control flow MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Our xe-vfio-pci component relies on the confirmation from the PF that VF FLR processing has finished, but due to the notification latency on the HW/FW side, PF might be unaware yet of the already triggered VF FLR. Update VF state machine with new FLR_PREPARE state that indicate imminent VF FLR notification and treat that as a begin of the FLR sequence. Also introduce function that xe-vfio-pci should call to guarantee correct synchronization. v2: move PREPARE into WIP, update commit msg (Michal) Signed-off-by: Piotr Piórkowski Co-developed-by: Michal Wajdeczko Signed-off-by: Michal Wajdeczko Reviewed-by: Michał Winiarski Link: https://patch.msgid.link/20260309152449.910636-2-piotr.piorkowski@intel.com Signed-off-by: Michał Winiarski --- drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c | 78 ++++++++++++++++++----- drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h | 1 + drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h | 2 + drivers/gpu/drm/xe/xe_sriov_pf_control.c | 24 +++++++ drivers/gpu/drm/xe/xe_sriov_pf_control.h | 1 + drivers/gpu/drm/xe/xe_sriov_vfio.c | 1 + include/drm/intel/xe_sriov_vfio.h | 11 ++++ 7 files changed, 102 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index 5cb705c7ee7a..058585f063a9 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -171,6 +171,7 @@ static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit) case XE_GT_SRIOV_STATE_##_X: return #_X CASE2STR(WIP); CASE2STR(FLR_WIP); + CASE2STR(FLR_PREPARE); CASE2STR(FLR_SEND_START); CASE2STR(FLR_WAIT_GUC); CASE2STR(FLR_GUC_DONE); @@ -1486,11 +1487,15 @@ int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid) * The VF FLR state machine looks like:: * * (READY,PAUSED,STOPPED)<------------<--------------o - * | \ - * flr \ - * | \ - * ....V..........................FLR_WIP........... \ - * : \ : \ + * | | \ + * flr prepare \ + * | | \ + * ....V.............V............FLR_WIP........... \ + * : | | : \ + * : | FLR_PREPARE : | + * : | / : | + * : \ flr : | + * : \ / : | * : \ o----<----busy : | * : \ / / : | * : FLR_SEND_START---failed----->-----------o--->(FLR_FAILED)<---o @@ -1539,20 +1544,28 @@ static void pf_enter_vf_flr_send_start(struct xe_gt *gt, unsigned int vfid) pf_queue_vf(gt, vfid); } -static void pf_enter_vf_flr_wip(struct xe_gt *gt, unsigned int vfid) +static bool pf_exit_vf_flr_prepare(struct xe_gt *gt, unsigned int vfid) { - if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP)) { - xe_gt_sriov_dbg(gt, "VF%u FLR is already in progress\n", vfid); - return; - } + if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_PREPARE)) + return false; - pf_enter_vf_wip(gt, vfid); pf_enter_vf_flr_send_start(gt, vfid); + return true; +} + +static bool pf_enter_vf_flr_wip(struct xe_gt *gt, unsigned int vfid) +{ + if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP)) + return false; + + pf_enter_vf_wip(gt, vfid); + return true; } static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid) { if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP)) { + pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_PREPARE); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_FINISH); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO); pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA); @@ -1760,21 +1773,54 @@ static void pf_enter_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid) } /** - * xe_gt_sriov_pf_control_trigger_flr - Start a VF FLR sequence. + * xe_gt_sriov_pf_control_prepare_flr() - Notify PF that VF FLR request was issued. * @gt: the &xe_gt * @vfid: the VF identifier * + * This is an optional early notification path used to mark pending FLR before + * the GuC notifies the PF with a FLR event. + * * This function is for PF only. * * Return: 0 on success or a negative error code on failure. */ -int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid) +int xe_gt_sriov_pf_control_prepare_flr(struct xe_gt *gt, unsigned int vfid) { - pf_enter_vf_flr_wip(gt, vfid); + if (!pf_enter_vf_flr_wip(gt, vfid)) + return -EALREADY; + pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_PREPARE); return 0; } +static int pf_begin_vf_flr(struct xe_gt *gt, unsigned int vfid) +{ + if (pf_enter_vf_flr_wip(gt, vfid)) { + pf_enter_vf_flr_send_start(gt, vfid); + return 0; + } + + if (pf_exit_vf_flr_prepare(gt, vfid)) + return 0; + + xe_gt_sriov_dbg(gt, "VF%u FLR is already in progress\n", vfid); + return -EALREADY; +} + +/** + * xe_gt_sriov_pf_control_trigger_flr - Start a VF FLR sequence. + * @gt: the &xe_gt + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid) +{ + return pf_begin_vf_flr(gt, vfid); +} + /** * xe_gt_sriov_pf_control_sync_flr() - Synchronize on the VF FLR checkpoint. * @gt: the &xe_gt @@ -1879,9 +1925,9 @@ static void pf_handle_vf_flr(struct xe_gt *gt, u32 vfid) if (needs_dispatch_flr(xe)) { for_each_gt(gtit, xe, gtid) - pf_enter_vf_flr_wip(gtit, vfid); + pf_begin_vf_flr(gtit, vfid); } else { - pf_enter_vf_flr_wip(gt, vfid); + pf_begin_vf_flr(gt, vfid); } } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h index c36c8767f3ad..23182a5c5fb8 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h @@ -27,6 +27,7 @@ int xe_gt_sriov_pf_control_process_restore_data(struct xe_gt *gt, unsigned int v int xe_gt_sriov_pf_control_trigger_restore_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_finish_restore_vf(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid); +int xe_gt_sriov_pf_control_prepare_flr(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid); int xe_gt_sriov_pf_control_sync_flr(struct xe_gt *gt, unsigned int vfid, bool sync); int xe_gt_sriov_pf_control_wait_flr(struct xe_gt *gt, unsigned int vfid); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h index 6027ba05a7f2..e78c59e08adf 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h @@ -15,6 +15,7 @@ * * @XE_GT_SRIOV_STATE_WIP: indicates that some operations are in progress. * @XE_GT_SRIOV_STATE_FLR_WIP: indicates that a VF FLR is in progress. + * @XE_GT_SRIOV_STATE_FLR_PREPARE: indicates that the PF received early VF FLR prepare notification. * @XE_GT_SRIOV_STATE_FLR_SEND_START: indicates that the PF wants to send a FLR START command. * @XE_GT_SRIOV_STATE_FLR_WAIT_GUC: indicates that the PF awaits for a response from the GuC. * @XE_GT_SRIOV_STATE_FLR_GUC_DONE: indicates that the PF has received a response from the GuC. @@ -56,6 +57,7 @@ enum xe_gt_sriov_control_bits { XE_GT_SRIOV_STATE_WIP = 1, XE_GT_SRIOV_STATE_FLR_WIP, + XE_GT_SRIOV_STATE_FLR_PREPARE, XE_GT_SRIOV_STATE_FLR_SEND_START, XE_GT_SRIOV_STATE_FLR_WAIT_GUC, XE_GT_SRIOV_STATE_FLR_GUC_DONE, diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_sriov_pf_control.c index ed4b9820b06e..15b4341d7f12 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.c @@ -123,6 +123,30 @@ int xe_sriov_pf_control_reset_vf(struct xe_device *xe, unsigned int vfid) return result; } +/** + * xe_sriov_pf_control_prepare_flr() - Notify PF that VF FLR prepare has started. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * This function is for PF only. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_control_prepare_flr(struct xe_device *xe, unsigned int vfid) +{ + struct xe_gt *gt; + unsigned int id; + int result = 0; + int err; + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_control_prepare_flr(gt, vfid); + result = result ? -EUCLEAN : err; + } + + return result; +} + /** * xe_sriov_pf_control_wait_flr() - Wait for a VF reset (FLR) to complete. * @xe: the &xe_device diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_sriov_pf_control.h index ef9f219b2109..74981a67db88 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf_control.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf_control.h @@ -12,6 +12,7 @@ int xe_sriov_pf_control_pause_vf(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_resume_vf(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_stop_vf(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_reset_vf(struct xe_device *xe, unsigned int vfid); +int xe_sriov_pf_control_prepare_flr(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_wait_flr(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_sync_flr(struct xe_device *xe, unsigned int vfid); int xe_sriov_pf_control_trigger_save_vf(struct xe_device *xe, unsigned int vfid); diff --git a/drivers/gpu/drm/xe/xe_sriov_vfio.c b/drivers/gpu/drm/xe/xe_sriov_vfio.c index 3da81af97b8b..00f96b0976d1 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vfio.c +++ b/drivers/gpu/drm/xe/xe_sriov_vfio.c @@ -42,6 +42,7 @@ _type xe_sriov_vfio_##_func(struct xe_device *xe, unsigned int vfid) \ EXPORT_SYMBOL_FOR_MODULES(xe_sriov_vfio_##_func, "xe-vfio-pci") DEFINE_XE_SRIOV_VFIO_FUNCTION(int, wait_flr_done, control_wait_flr); +DEFINE_XE_SRIOV_VFIO_FUNCTION(int, flr_prepare, control_prepare_flr); DEFINE_XE_SRIOV_VFIO_FUNCTION(int, suspend_device, control_pause_vf); DEFINE_XE_SRIOV_VFIO_FUNCTION(int, resume_device, control_resume_vf); DEFINE_XE_SRIOV_VFIO_FUNCTION(int, stop_copy_enter, control_trigger_save_vf); diff --git a/include/drm/intel/xe_sriov_vfio.h b/include/drm/intel/xe_sriov_vfio.h index e9814e8149fd..27c224a70e6f 100644 --- a/include/drm/intel/xe_sriov_vfio.h +++ b/include/drm/intel/xe_sriov_vfio.h @@ -27,6 +27,17 @@ struct xe_device *xe_sriov_vfio_get_pf(struct pci_dev *pdev); */ bool xe_sriov_vfio_migration_supported(struct xe_device *xe); +/** + * xe_sriov_vfio_flr_prepare() - Notify PF that VF FLR prepare has started. + * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() + * @vfid: the VF identifier (can't be 0) + * + * This function marks VF FLR as pending before PF receives GuC FLR event. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_vfio_flr_prepare(struct xe_device *xe, unsigned int vfid); + /** * xe_sriov_vfio_wait_flr_done() - Wait for VF FLR completion. * @xe: the PF &xe_device obtained by calling xe_sriov_vfio_get_pf() -- cgit v1.2.3 From 716c11d462c598328edca2565e8d25457c6ee3d6 Mon Sep 17 00:00:00 2001 From: Jonathan Cavitt Date: Tue, 24 Mar 2026 15:29:38 +0000 Subject: drm/xe/uapi: Define drm_xe_vm_get_property MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add initial declarations for the drm_xe_vm_get_property ioctl. v2: - Expand kernel docs for drm_xe_vm_get_property (Jianxun) v3: - Remove address type external definitions (Jianxun) - Add fault type to xe_drm_fault struct (Jianxun) v4: - Remove engine class and instance (Ivan) v5: - Add declares for fault type, access type, and fault level (Matt Brost, Ivan) v6: - Fix inconsistent use of whitespace in defines v7: - Rebase and refactor (jcavitt) v8: - Rebase (jcavitt) v9: - Clarify address is canonical (José) v10: - s/uAPI/Link in the commit log links Link: https://github.com/intel/compute-runtime/pull/878 Signed-off-by: Jonathan Cavitt Reviewed-by: Shuicheng Lin Acked-by: Matthew Brost Acked-by: Ivan Briano Acked-by: José Roberto de Souza Cc: Zhang Jianxun Cc: Ivan Briano Cc: Matthew Brost Signed-off-by: Matthew Brost Link: https://patch.msgid.link/20260324152935.72444-8-jonathan.cavitt@intel.com --- include/uapi/drm/xe_drm.h | 86 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) (limited to 'include') diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 7014dde1c9c4..6c99514a85e1 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -83,6 +83,7 @@ extern "C" { * - &DRM_IOCTL_XE_OBSERVATION * - &DRM_IOCTL_XE_MADVISE * - &DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS + * - &DRM_IOCTL_XE_VM_GET_PROPERTY */ /* @@ -107,6 +108,7 @@ extern "C" { #define DRM_XE_MADVISE 0x0c #define DRM_XE_VM_QUERY_MEM_RANGE_ATTRS 0x0d #define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x0e +#define DRM_XE_VM_GET_PROPERTY 0x0f /* Must be kept compact -- no holes */ @@ -125,6 +127,7 @@ extern "C" { #define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise) #define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_RANGE_ATTRS, struct drm_xe_vm_query_mem_range_attr) #define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property) +#define DRM_IOCTL_XE_VM_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_GET_PROPERTY, struct drm_xe_vm_get_property) /** * DOC: Xe IOCTL Extensions @@ -1263,6 +1266,89 @@ struct drm_xe_vm_bind { __u64 reserved[2]; }; +/** struct xe_vm_fault - Describes faults for %DRM_XE_VM_GET_PROPERTY_FAULTS */ +struct xe_vm_fault { + /** @address: Canonical address of the fault */ + __u64 address; + /** @address_precision: Precision of faulted address */ + __u32 address_precision; + /** @access_type: Type of address access that resulted in fault */ +#define FAULT_ACCESS_TYPE_READ 0 +#define FAULT_ACCESS_TYPE_WRITE 1 +#define FAULT_ACCESS_TYPE_ATOMIC 2 + __u8 access_type; + /** @fault_type: Type of fault reported */ +#define FAULT_TYPE_NOT_PRESENT 0 +#define FAULT_TYPE_WRITE_ACCESS 1 +#define FAULT_TYPE_ATOMIC_ACCESS 2 + __u8 fault_type; + /** @fault_level: fault level of the fault */ +#define FAULT_LEVEL_PTE 0 +#define FAULT_LEVEL_PDE 1 +#define FAULT_LEVEL_PDP 2 +#define FAULT_LEVEL_PML4 3 +#define FAULT_LEVEL_PML5 4 + __u8 fault_level; + /** @pad: MBZ */ + __u8 pad; + /** @reserved: MBZ */ + __u64 reserved[4]; +}; + +/** + * struct drm_xe_vm_get_property - Input of &DRM_IOCTL_XE_VM_GET_PROPERTY + * + * The user provides a VM and a property to query among DRM_XE_VM_GET_PROPERTY_*, + * and sets the values in the vm_id and property members, respectively. This + * determines both the VM to get the property of, as well as the property to + * report. + * + * If size is set to 0, the driver fills it with the required size for the + * requested property. The user is expected here to allocate memory for the + * property structure and to provide a pointer to the allocated memory using the + * data member. For some properties, this may be zero, in which case, the + * value of the property will be saved to the value member and size will remain + * zero on return. + * + * If size is not zero, then the IOCTL will attempt to copy the requested + * property into the data member. + * + * The IOCTL will return -ENOENT if the VM could not be identified from the + * provided VM ID, or -EINVAL if the IOCTL fails for any other reason, such as + * providing an invalid size for the given property or if the property data + * could not be copied to the memory allocated to the data member. + * + * The property member can be: + * - %DRM_XE_VM_GET_PROPERTY_FAULTS + */ +struct drm_xe_vm_get_property { + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @vm_id: The ID of the VM to query the properties of */ + __u32 vm_id; + +#define DRM_XE_VM_GET_PROPERTY_FAULTS 0 + /** @property: property to get */ + __u32 property; + + /** @size: Size to allocate for @data */ + __u32 size; + + /** @pad: MBZ */ + __u32 pad; + + union { + /** @data: Pointer to user-defined array of flexible size and type */ + __u64 data; + /** @value: Return value for scalar queries */ + __u64 value; + }; + + /** @reserved: MBZ */ + __u64 reserved[3]; +}; + /** * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE * -- cgit v1.2.3