1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
|
// SPDX-License-Identifier: GPL-2.0-only OR MIT
/*
* Copyright © 2025 Intel Corporation
*/
#include <linux/slab.h>
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/drm_pagemap.h>
#include <drm/drm_pagemap_util.h>
#include <drm/drm_print.h>
/**
* struct drm_pagemap_cache - Lookup structure for pagemaps
*
* Structure to keep track of active (refcount > 1) and inactive
* (refcount == 0) pagemaps. Inactive pagemaps can be made active
* again by waiting for the @queued completion (indicating that the
* pagemap has been put on the @shrinker's list of shrinkable
* pagemaps, and then successfully removing it from @shrinker's
* list. The latter may fail if the shrinker is already in the
* process of freeing the pagemap. A struct drm_pagemap_cache can
* hold a single struct drm_pagemap.
*/
struct drm_pagemap_cache {
/** @lookup_mutex: Mutex making the lookup process atomic */
struct mutex lookup_mutex;
/** @lock: Lock protecting the @dpagemap pointer */
spinlock_t lock;
/** @shrinker: Pointer to the shrinker used for this cache. Immutable. */
struct drm_pagemap_shrinker *shrinker;
/** @dpagemap: Non-refcounted pointer to the drm_pagemap */
struct drm_pagemap *dpagemap;
/**
* @queued: Signals when an inactive drm_pagemap has been put on
* @shrinker's list.
*/
struct completion queued;
};
/**
* struct drm_pagemap_shrinker - Shrinker to remove unused pagemaps
*/
struct drm_pagemap_shrinker {
/** @drm: Pointer to the drm device. */
struct drm_device *drm;
/** @lock: Spinlock to protect the @dpagemaps list. */
spinlock_t lock;
/** @dpagemaps: List of unused dpagemaps. */
struct list_head dpagemaps;
/** @num_dpagemaps: Number of unused dpagemaps in @dpagemaps. */
atomic_t num_dpagemaps;
/** @shrink: Pointer to the struct shrinker. */
struct shrinker *shrink;
};
static bool drm_pagemap_shrinker_cancel(struct drm_pagemap *dpagemap);
static void drm_pagemap_cache_fini(void *arg)
{
struct drm_pagemap_cache *cache = arg;
struct drm_pagemap *dpagemap;
drm_dbg(cache->shrinker->drm, "Destroying dpagemap cache.\n");
spin_lock(&cache->lock);
dpagemap = cache->dpagemap;
if (!dpagemap) {
spin_unlock(&cache->lock);
goto out;
}
if (drm_pagemap_shrinker_cancel(dpagemap)) {
cache->dpagemap = NULL;
spin_unlock(&cache->lock);
drm_pagemap_destroy(dpagemap, false);
}
out:
mutex_destroy(&cache->lookup_mutex);
kfree(cache);
}
/**
* drm_pagemap_cache_create_devm() - Create a drm_pagemap_cache
* @shrinker: Pointer to a struct drm_pagemap_shrinker.
*
* Create a device-managed drm_pagemap cache. The cache is automatically
* destroyed on struct device removal, at which point any *inactive*
* drm_pagemap's are destroyed.
*
* Return: Pointer to a struct drm_pagemap_cache on success. Error pointer
* on failure.
*/
struct drm_pagemap_cache *drm_pagemap_cache_create_devm(struct drm_pagemap_shrinker *shrinker)
{
struct drm_pagemap_cache *cache = kzalloc_obj(*cache);
int err;
if (!cache)
return ERR_PTR(-ENOMEM);
mutex_init(&cache->lookup_mutex);
spin_lock_init(&cache->lock);
cache->shrinker = shrinker;
init_completion(&cache->queued);
err = devm_add_action_or_reset(shrinker->drm->dev, drm_pagemap_cache_fini, cache);
if (err)
return ERR_PTR(err);
return cache;
}
EXPORT_SYMBOL(drm_pagemap_cache_create_devm);
/**
* DOC: Cache lookup
*
* Cache lookup should be done under a locked mutex, so that a
* failed drm_pagemap_get_from_cache() and a following
* drm_pagemap_cache_setpagemap() are carried out as an atomic
* operation WRT other lookups. Otherwise, racing lookups may
* unnecessarily concurrently create pagemaps to fulfill a
* failed lookup. The API provides two functions to perform this lock,
* drm_pagemap_lock_lookup() and drm_pagemap_unlock_lookup() and they
* should be used in the following way:
*
* .. code-block:: c
*
* drm_pagemap_lock_lookup(cache);
* dpagemap = drm_pagemap_get_from_cache(cache);
* if (dpagemap)
* goto out_unlock;
*
* dpagemap = driver_create_new_dpagemap();
* if (!IS_ERR(dpagemap))
* drm_pagemap_cache_set_pagemap(cache, dpagemap);
*
* out_unlock:
* drm_pagemap_unlock_lookup(cache);
*/
/**
* drm_pagemap_cache_lock_lookup() - Lock a drm_pagemap_cache for lookup.
* @cache: The drm_pagemap_cache to lock.
*
* Return: %-EINTR if interrupted while blocking. %0 otherwise.
*/
int drm_pagemap_cache_lock_lookup(struct drm_pagemap_cache *cache)
{
return mutex_lock_interruptible(&cache->lookup_mutex);
}
EXPORT_SYMBOL(drm_pagemap_cache_lock_lookup);
/**
* drm_pagemap_cache_unlock_lookup() - Unlock a drm_pagemap_cache after lookup.
* @cache: The drm_pagemap_cache to unlock.
*/
void drm_pagemap_cache_unlock_lookup(struct drm_pagemap_cache *cache)
{
mutex_unlock(&cache->lookup_mutex);
}
EXPORT_SYMBOL(drm_pagemap_cache_unlock_lookup);
/**
* drm_pagemap_get_from_cache() - Lookup of drm_pagemaps.
* @cache: The cache used for lookup.
*
* If an active pagemap is present in the cache, it is immediately returned.
* If an inactive pagemap is present, it's removed from the shrinker list and
* an attempt is made to make it active.
* If no pagemap present or the attempt to make it active failed, %NULL is returned
* to indicate to the caller to create a new drm_pagemap and insert it into
* the cache.
*
* Return: A reference-counted pointer to a drm_pagemap if successful. An error
* pointer if an error occurred, or %NULL if no drm_pagemap was found and
* the caller should insert a new one.
*/
struct drm_pagemap *drm_pagemap_get_from_cache(struct drm_pagemap_cache *cache)
{
struct drm_pagemap *dpagemap;
int err;
lockdep_assert_held(&cache->lookup_mutex);
retry:
spin_lock(&cache->lock);
dpagemap = cache->dpagemap;
if (drm_pagemap_get_unless_zero(dpagemap)) {
spin_unlock(&cache->lock);
return dpagemap;
}
if (!dpagemap) {
spin_unlock(&cache->lock);
return NULL;
}
if (!try_wait_for_completion(&cache->queued)) {
spin_unlock(&cache->lock);
err = wait_for_completion_interruptible(&cache->queued);
if (err)
return ERR_PTR(err);
goto retry;
}
if (drm_pagemap_shrinker_cancel(dpagemap)) {
cache->dpagemap = NULL;
spin_unlock(&cache->lock);
err = drm_pagemap_reinit(dpagemap);
if (err) {
drm_pagemap_destroy(dpagemap, false);
return ERR_PTR(err);
}
drm_pagemap_cache_set_pagemap(cache, dpagemap);
} else {
cache->dpagemap = NULL;
spin_unlock(&cache->lock);
dpagemap = NULL;
}
return dpagemap;
}
EXPORT_SYMBOL(drm_pagemap_get_from_cache);
/**
* drm_pagemap_cache_set_pagemap() - Assign a drm_pagemap to a drm_pagemap_cache
* @cache: The cache to assign the drm_pagemap to.
* @dpagemap: The drm_pagemap to assign.
*
* The function must be called to populate a drm_pagemap_cache only
* after a call to drm_pagemap_get_from_cache() returns NULL.
*/
void drm_pagemap_cache_set_pagemap(struct drm_pagemap_cache *cache, struct drm_pagemap *dpagemap)
{
struct drm_device *drm = dpagemap->drm;
lockdep_assert_held(&cache->lookup_mutex);
spin_lock(&cache->lock);
dpagemap->cache = cache;
swap(cache->dpagemap, dpagemap);
reinit_completion(&cache->queued);
spin_unlock(&cache->lock);
drm_WARN_ON(drm, !!dpagemap);
}
EXPORT_SYMBOL(drm_pagemap_cache_set_pagemap);
/**
* drm_pagemap_get_from_cache_if_active() - Quick lookup of active drm_pagemaps
* @cache: The cache to lookup from.
*
* Function that should be used to lookup a drm_pagemap that is already active.
* (refcount > 0).
*
* Return: A pointer to the cache's drm_pagemap if it's active; %NULL otherwise.
*/
struct drm_pagemap *drm_pagemap_get_from_cache_if_active(struct drm_pagemap_cache *cache)
{
struct drm_pagemap *dpagemap;
spin_lock(&cache->lock);
dpagemap = drm_pagemap_get_unless_zero(cache->dpagemap);
spin_unlock(&cache->lock);
return dpagemap;
}
EXPORT_SYMBOL(drm_pagemap_get_from_cache_if_active);
static bool drm_pagemap_shrinker_cancel(struct drm_pagemap *dpagemap)
{
struct drm_pagemap_cache *cache = dpagemap->cache;
struct drm_pagemap_shrinker *shrinker = cache->shrinker;
spin_lock(&shrinker->lock);
if (list_empty(&dpagemap->shrink_link)) {
spin_unlock(&shrinker->lock);
return false;
}
list_del_init(&dpagemap->shrink_link);
atomic_dec(&shrinker->num_dpagemaps);
spin_unlock(&shrinker->lock);
return true;
}
#ifdef CONFIG_PROVE_LOCKING
/**
* drm_pagemap_shrinker_might_lock() - lockdep test for drm_pagemap_shrinker_add()
* @dpagemap: The drm pagemap.
*
* The drm_pagemap_shrinker_add() function performs some locking.
* This function can be called in code-paths that might
* call drm_pagemap_shrinker_add() to detect any lockdep problems early.
*/
void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap)
{
int idx;
if (drm_dev_enter(dpagemap->drm, &idx)) {
struct drm_pagemap_cache *cache = dpagemap->cache;
if (cache)
might_lock(&cache->shrinker->lock);
drm_dev_exit(idx);
}
}
#endif
/**
* drm_pagemap_shrinker_add() - Add a drm_pagemap to the shrinker list or destroy
* @dpagemap: The drm_pagemap.
*
* If @dpagemap is associated with a &struct drm_pagemap_cache AND the
* struct device backing the drm device is still alive, add @dpagemap to
* the &struct drm_pagemap_shrinker list of shrinkable drm_pagemaps.
*
* Otherwise destroy the pagemap directly using drm_pagemap_destroy().
*
* This is an internal function which is not intended to be exposed to drivers.
*/
void drm_pagemap_shrinker_add(struct drm_pagemap *dpagemap)
{
struct drm_pagemap_cache *cache;
struct drm_pagemap_shrinker *shrinker;
int idx;
/*
* The pagemap cache and shrinker are disabled at
* pci device remove time. After that, dpagemaps
* are freed directly.
*/
if (!drm_dev_enter(dpagemap->drm, &idx))
goto out_no_cache;
cache = dpagemap->cache;
if (!cache) {
drm_dev_exit(idx);
goto out_no_cache;
}
shrinker = cache->shrinker;
spin_lock(&shrinker->lock);
list_add_tail(&dpagemap->shrink_link, &shrinker->dpagemaps);
atomic_inc(&shrinker->num_dpagemaps);
spin_unlock(&shrinker->lock);
complete_all(&cache->queued);
drm_dev_exit(idx);
return;
out_no_cache:
drm_pagemap_destroy(dpagemap, true);
}
static unsigned long
drm_pagemap_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct drm_pagemap_shrinker *shrinker = shrink->private_data;
unsigned long count = atomic_read(&shrinker->num_dpagemaps);
return count ? : SHRINK_EMPTY;
}
static unsigned long
drm_pagemap_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct drm_pagemap_shrinker *shrinker = shrink->private_data;
struct drm_pagemap *dpagemap;
struct drm_pagemap_cache *cache;
unsigned long nr_freed = 0;
sc->nr_scanned = 0;
spin_lock(&shrinker->lock);
do {
dpagemap = list_first_entry_or_null(&shrinker->dpagemaps, typeof(*dpagemap),
shrink_link);
if (!dpagemap)
break;
atomic_dec(&shrinker->num_dpagemaps);
list_del_init(&dpagemap->shrink_link);
spin_unlock(&shrinker->lock);
sc->nr_scanned++;
nr_freed++;
cache = dpagemap->cache;
spin_lock(&cache->lock);
cache->dpagemap = NULL;
spin_unlock(&cache->lock);
drm_dbg(dpagemap->drm, "Shrinking dpagemap %p.\n", dpagemap);
drm_pagemap_destroy(dpagemap, true);
spin_lock(&shrinker->lock);
} while (sc->nr_scanned < sc->nr_to_scan);
spin_unlock(&shrinker->lock);
return sc->nr_scanned ? nr_freed : SHRINK_STOP;
}
static void drm_pagemap_shrinker_fini(void *arg)
{
struct drm_pagemap_shrinker *shrinker = arg;
drm_dbg(shrinker->drm, "Destroying dpagemap shrinker.\n");
drm_WARN_ON(shrinker->drm, !!atomic_read(&shrinker->num_dpagemaps));
shrinker_free(shrinker->shrink);
kfree(shrinker);
}
/**
* drm_pagemap_shrinker_create_devm() - Create and register a pagemap shrinker
* @drm: The drm device
*
* Create and register a pagemap shrinker that shrinks unused pagemaps
* and thereby reduces memory footprint.
* The shrinker is drm_device managed and unregisters itself when
* the drm device is removed.
*
* Return: %0 on success, negative error code on failure.
*/
struct drm_pagemap_shrinker *drm_pagemap_shrinker_create_devm(struct drm_device *drm)
{
struct drm_pagemap_shrinker *shrinker;
struct shrinker *shrink;
int err;
shrinker = kzalloc_obj(*shrinker);
if (!shrinker)
return ERR_PTR(-ENOMEM);
shrink = shrinker_alloc(0, "drm-drm_pagemap:%s", drm->unique);
if (!shrink) {
kfree(shrinker);
return ERR_PTR(-ENOMEM);
}
spin_lock_init(&shrinker->lock);
INIT_LIST_HEAD(&shrinker->dpagemaps);
shrinker->drm = drm;
shrinker->shrink = shrink;
shrink->count_objects = drm_pagemap_shrinker_count;
shrink->scan_objects = drm_pagemap_shrinker_scan;
shrink->private_data = shrinker;
shrinker_register(shrink);
err = devm_add_action_or_reset(drm->dev, drm_pagemap_shrinker_fini, shrinker);
if (err)
return ERR_PTR(err);
return shrinker;
}
EXPORT_SYMBOL(drm_pagemap_shrinker_create_devm);
/**
* struct drm_pagemap_owner - Device interconnect group
* @kref: Reference count.
*
* A struct drm_pagemap_owner identifies a device interconnect group.
*/
struct drm_pagemap_owner {
struct kref kref;
};
static void drm_pagemap_owner_release(struct kref *kref)
{
kfree(container_of(kref, struct drm_pagemap_owner, kref));
}
/**
* drm_pagemap_release_owner() - Stop participating in an interconnect group
* @peer: Pointer to the struct drm_pagemap_peer used when joining the group
*
* Stop participating in an interconnect group. This function is typically
* called when a pagemap is removed to indicate that it doesn't need to
* be taken into account.
*/
void drm_pagemap_release_owner(struct drm_pagemap_peer *peer)
{
struct drm_pagemap_owner_list *owner_list = peer->list;
if (!owner_list)
return;
mutex_lock(&owner_list->lock);
list_del(&peer->link);
kref_put(&peer->owner->kref, drm_pagemap_owner_release);
peer->owner = NULL;
mutex_unlock(&owner_list->lock);
}
EXPORT_SYMBOL(drm_pagemap_release_owner);
/**
* typedef interconnect_fn - Callback function to identify fast interconnects
* @peer1: First endpoint.
* @peer2: Second endpont.
*
* The function returns %true iff @peer1 and @peer2 have a fast interconnect.
* Note that this is symmetrical. The function has no notion of client and provider,
* which may not be sufficient in some cases. However, since the callback is intended
* to guide in providing common pagemap owners, the notion of a common owner to
* indicate fast interconnects would then have to change as well.
*
* Return: %true iff @peer1 and @peer2 have a fast interconnect. Otherwise @false.
*/
typedef bool (*interconnect_fn)(struct drm_pagemap_peer *peer1, struct drm_pagemap_peer *peer2);
/**
* drm_pagemap_acquire_owner() - Join an interconnect group
* @peer: A struct drm_pagemap_peer keeping track of the device interconnect
* @owner_list: Pointer to the owner_list, keeping track of all interconnects
* @has_interconnect: Callback function to determine whether two peers have a
* fast local interconnect.
*
* Repeatedly calls @has_interconnect for @peer and other peers on @owner_list to
* determine a set of peers for which @peer has a fast interconnect. That set will
* have common &struct drm_pagemap_owner, and upon successful return, @peer::owner
* will point to that struct, holding a reference, and @peer will be registered in
* @owner_list. If @peer doesn't have any fast interconnects to other @peers, a
* new unique &struct drm_pagemap_owner will be allocated for it, and that
* may be shared with other peers that, at a later point, are determined to have
* a fast interconnect with @peer.
*
* When @peer no longer participates in an interconnect group,
* drm_pagemap_release_owner() should be called to drop the reference on the
* struct drm_pagemap_owner.
*
* Return: %0 on success, negative error code on failure.
*/
int drm_pagemap_acquire_owner(struct drm_pagemap_peer *peer,
struct drm_pagemap_owner_list *owner_list,
interconnect_fn has_interconnect)
{
struct drm_pagemap_peer *cur_peer;
struct drm_pagemap_owner *owner = NULL;
bool interconnect = false;
mutex_lock(&owner_list->lock);
might_alloc(GFP_KERNEL);
list_for_each_entry(cur_peer, &owner_list->peers, link) {
if (cur_peer->owner != owner) {
if (owner && interconnect)
break;
owner = cur_peer->owner;
interconnect = true;
}
if (interconnect && !has_interconnect(peer, cur_peer))
interconnect = false;
}
if (!interconnect) {
owner = kmalloc_obj(*owner);
if (!owner) {
mutex_unlock(&owner_list->lock);
return -ENOMEM;
}
kref_init(&owner->kref);
list_add_tail(&peer->link, &owner_list->peers);
} else {
kref_get(&owner->kref);
list_add_tail(&peer->link, &cur_peer->link);
}
peer->owner = owner;
peer->list = owner_list;
mutex_unlock(&owner_list->lock);
return 0;
}
EXPORT_SYMBOL(drm_pagemap_acquire_owner);
|