summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorThomas Hellström <thomas.hellstrom@linux.intel.com>2025-12-19 12:33:03 +0100
committerThomas Hellström <thomas.hellstrom@linux.intel.com>2025-12-23 09:37:33 +0100
commit77f14f2f2d73f6955b856e7c91ca9fb7046da191 (patch)
tree6612a0aa4e84b81e9a8b5e6f0dc2fd8f431f2de3 /include
parenta26084328ac40c12096ef01482a7520346379453 (diff)
downloadlinux-77f14f2f2d73f6955b856e7c91ca9fb7046da191.tar.gz
linux-77f14f2f2d73f6955b856e7c91ca9fb7046da191.zip
drm/pagemap: Add a drm_pagemap cache and shrinker
Pagemaps are costly to set up and tear down, and they consume a lot of system memory for the struct pages. Ideally they should be created only when needed. Add a caching mechanism to allow doing just that: Create the drm_pagemaps when needed for migration. Keep them around to avoid destruction and re-creation latencies and destroy inactive/unused drm_pagemaps on memory pressure using a shrinker. Only add the helper functions. They will be hooked up to the xe driver in the upcoming patch. v2: - Add lockdep checking for drm_pagemap_put(). (Matt Brost) - Add a copyright notice. (Matt Brost) Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> # For merging through drm-xe. Link: https://patch.msgid.link/20251219113320.183860-8-thomas.hellstrom@linux.intel.com
Diffstat (limited to 'include')
-rw-r--r--include/drm/drm_pagemap.h53
-rw-r--r--include/drm/drm_pagemap_util.h42
2 files changed, 92 insertions, 3 deletions
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index b3edcdde4454..78cc9011829f 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -10,6 +10,7 @@
struct dma_fence;
struct drm_pagemap;
+struct drm_pagemap_cache;
struct drm_pagemap_dev_hold;
struct drm_pagemap_zdd;
struct device;
@@ -125,6 +126,25 @@ struct drm_pagemap_ops {
unsigned long start, unsigned long end,
struct mm_struct *mm,
unsigned long timeslice_ms);
+ /**
+ * @destroy: Destroy the drm_pagemap and associated resources.
+ * @dpagemap: The drm_pagemap to destroy.
+ * @is_atomic_or_reclaim: The function may be called from
+ * atomic- or reclaim context.
+ *
+ * The implementation should take care not to attempt to
+ * destroy resources that may already have been destroyed
+ * using devm_ callbacks, since this function may be called
+ * after the underlying struct device has been unbound.
+ * If the implementation defers the execution to a work item
+ * to avoid locking issues, then it must make sure the work
+ * items are flushed before module exit. If the destroy call
+ * happens after the provider's pci_remove() callback has
+ * been executed, a module reference and drm device reference is
+ * held across the destroy callback.
+ */
+ void (*destroy)(struct drm_pagemap *dpagemap,
+ bool is_atomic_or_reclaim);
};
/**
@@ -136,6 +156,10 @@ struct drm_pagemap_ops {
* @pagemap: Pointer to the underlying dev_pagemap.
* @dev_hold: Pointer to a struct drm_pagemap_dev_hold for
* device referencing.
+ * @cache: Back-pointer to the &struct drm_pagemap_cache used for this
+ * &struct drm_pagemap. May be NULL if no cache is used.
+ * @shrink_link: Link into the shrinker's list of drm_pagemaps. Only
+ * used if also using a pagemap cache.
*/
struct drm_pagemap {
const struct drm_pagemap_ops *ops;
@@ -143,6 +167,8 @@ struct drm_pagemap {
struct drm_device *drm;
struct dev_pagemap *pagemap;
struct drm_pagemap_dev_hold *dev_hold;
+ struct drm_pagemap_cache *cache;
+ struct list_head shrink_link;
};
struct drm_pagemap_devmem;
@@ -217,6 +243,11 @@ struct drm_pagemap_devmem_ops {
struct dma_fence *pre_migrate_fence);
};
+int drm_pagemap_init(struct drm_pagemap *dpagemap,
+ struct dev_pagemap *pagemap,
+ struct drm_device *drm,
+ const struct drm_pagemap_ops *ops);
+
struct drm_pagemap *drm_pagemap_create(struct drm_device *drm,
struct dev_pagemap *pagemap,
const struct drm_pagemap_ops *ops);
@@ -235,9 +266,9 @@ static inline void drm_pagemap_put(struct drm_pagemap *dpagemap)
/**
* drm_pagemap_get() - Obtain a reference on a struct drm_pagemap
- * @dpagemap: Pointer to the struct drm_pagemap.
+ * @dpagemap: Pointer to the struct drm_pagemap, or NULL.
*
- * Return: Pointer to the struct drm_pagemap.
+ * Return: Pointer to the struct drm_pagemap, or NULL.
*/
static inline struct drm_pagemap *
drm_pagemap_get(struct drm_pagemap *dpagemap)
@@ -249,6 +280,20 @@ drm_pagemap_get(struct drm_pagemap *dpagemap)
}
/**
+ * drm_pagemap_get_unless_zero() - Obtain a reference on a struct drm_pagemap
+ * unless the current reference count is zero.
+ * @dpagemap: Pointer to the drm_pagemap or NULL.
+ *
+ * Return: A pointer to @dpagemap if the reference count was successfully
+ * incremented. NULL if @dpagemap was NULL, or its refcount was 0.
+ */
+static inline struct drm_pagemap * __must_check
+drm_pagemap_get_unless_zero(struct drm_pagemap *dpagemap)
+{
+ return (dpagemap && kref_get_unless_zero(&dpagemap->ref)) ? dpagemap : NULL;
+}
+
+/**
* struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
*
* @dev: Pointer to the device structure which device memory allocation belongs to
@@ -295,5 +340,7 @@ int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
struct mm_struct *mm,
unsigned long timeslice_ms);
-#endif
+void drm_pagemap_destroy(struct drm_pagemap *dpagemap, bool is_atomic_or_reclaim);
+int drm_pagemap_reinit(struct drm_pagemap *dpagemap);
+#endif
diff --git a/include/drm/drm_pagemap_util.h b/include/drm/drm_pagemap_util.h
new file mode 100644
index 000000000000..924244d5b899
--- /dev/null
+++ b/include/drm/drm_pagemap_util.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _DRM_PAGEMAP_UTIL_H_
+#define _DRM_PAGEMAP_UTIL_H_
+
+struct drm_device;
+struct drm_pagemap;
+struct drm_pagemap_cache;
+struct drm_pagemap_shrinker;
+
+void drm_pagemap_shrinker_add(struct drm_pagemap *dpagemap);
+
+int drm_pagemap_cache_lock_lookup(struct drm_pagemap_cache *cache);
+
+void drm_pagemap_cache_unlock_lookup(struct drm_pagemap_cache *cache);
+
+struct drm_pagemap_shrinker *drm_pagemap_shrinker_create_devm(struct drm_device *drm);
+
+struct drm_pagemap_cache *drm_pagemap_cache_create_devm(struct drm_pagemap_shrinker *shrinker);
+
+struct drm_pagemap *drm_pagemap_get_from_cache(struct drm_pagemap_cache *cache);
+
+void drm_pagemap_cache_set_pagemap(struct drm_pagemap_cache *cache, struct drm_pagemap *dpagemap);
+
+struct drm_pagemap *drm_pagemap_get_from_cache_if_active(struct drm_pagemap_cache *cache);
+
+#ifdef CONFIG_PROVE_LOCKING
+
+void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap);
+
+#else
+
+static inline void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap)
+{
+}
+
+#endif /* CONFIG_PROVE_LOCKING */
+
+#endif