aboutsummaryrefslogtreecommitdiffstats
path: root/include/uapi/drm
diff options
context:
space:
mode:
Diffstat (limited to 'include/uapi/drm')
-rw-r--r--include/uapi/drm/xe_drm.h29
1 files changed, 28 insertions, 1 deletions
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index f62689ca861a..cac607a30f6d 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -811,6 +811,32 @@ struct drm_xe_gem_create {
/**
* struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
+ *
+ * The @flags can be:
+ * - %DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER - For user to query special offset
+ * for use in mmap ioctl. Writing to the returned mmap address will generate a
+ * PCI memory barrier with low overhead (avoiding IOCTL call as well as writing
+ * to VRAM which would also add overhead), acting like an MI_MEM_FENCE
+ * instruction.
+ *
+ * Note: The mmap size can be at most 4K, due to HW limitations. As a result
+ * this interface is only supported on CPU architectures that support 4K page
+ * size. The mmap_offset ioctl will detect this and gracefully return an
+ * error, where userspace is expected to have a different fallback method for
+ * triggering a barrier.
+ *
+ * Roughly the usage would be as follows:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_gem_mmap_offset mmo = {
+ * .handle = 0, // must be set to 0
+ * .flags = DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER,
+ * };
+ *
+ * err = ioctl(fd, DRM_IOCTL_XE_GEM_MMAP_OFFSET, &mmo);
+ * map = mmap(NULL, size, PROT_WRITE, MAP_SHARED, fd, mmo.offset);
+ * map[i] = 0xdeadbeaf; // issue barrier
*/
struct drm_xe_gem_mmap_offset {
/** @extensions: Pointer to the first extension struct, if any */
@@ -819,7 +845,8 @@ struct drm_xe_gem_mmap_offset {
/** @handle: Handle for the object being mapped. */
__u32 handle;
- /** @flags: Must be zero */
+#define DRM_XE_MMAP_OFFSET_FLAG_PCI_BARRIER (1 << 0)
+ /** @flags: Flags */
__u32 flags;
/** @offset: The fake offset to use for subsequent mmap call */