aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/radix-tree
diff options
context:
space:
mode:
authorMatthew Wilcox <willy@infradead.org>2018-05-17 00:13:27 -0400
committerMatthew Wilcox <willy@infradead.org>2018-10-21 10:46:35 -0400
commita332125fc3223e1092f765db442b7afb9fd4ecde (patch)
treef4b896bcbe096e3522506869f01c2fe4bd9e40a6 /tools/testing/radix-tree
parentpage cache: Convert filemap_map_pages to XArray (diff)
downloadlinux-a332125fc3223e1092f765db442b7afb9fd4ecde.tar.gz
linux-a332125fc3223e1092f765db442b7afb9fd4ecde.zip
radix tree test suite: Convert regression1 to XArray
Now the page cache lookup is using the XArray, let's convert this regression test from the radix tree API to the XArray so it's testing roughly the same thing it was testing before. Signed-off-by: Matthew Wilcox <willy@infradead.org>
Diffstat (limited to 'tools/testing/radix-tree')
-rw-r--r--tools/testing/radix-tree/regression1.c58
1 files changed, 19 insertions, 39 deletions
diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c
index 0aece092f40e..b4a4a7168986 100644
--- a/tools/testing/radix-tree/regression1.c
+++ b/tools/testing/radix-tree/regression1.c
@@ -53,12 +53,12 @@ struct page {
unsigned long index;
};
-static struct page *page_alloc(void)
+static struct page *page_alloc(int index)
{
struct page *p;
p = malloc(sizeof(struct page));
p->count = 1;
- p->index = 1;
+ p->index = index;
pthread_mutex_init(&p->lock, NULL);
return p;
@@ -80,53 +80,33 @@ static void page_free(struct page *p)
static unsigned find_get_pages(unsigned long start,
unsigned int nr_pages, struct page **pages)
{
- unsigned int i;
- unsigned int ret;
- unsigned int nr_found;
+ XA_STATE(xas, &mt_tree, start);
+ struct page *page;
+ unsigned int ret = 0;
rcu_read_lock();
-restart:
- nr_found = radix_tree_gang_lookup_slot(&mt_tree,
- (void ***)pages, NULL, start, nr_pages);
- ret = 0;
- for (i = 0; i < nr_found; i++) {
- struct page *page;
-repeat:
- page = radix_tree_deref_slot((void **)pages[i]);
- if (unlikely(!page))
+ xas_for_each(&xas, page, ULONG_MAX) {
+ if (xas_retry(&xas, page))
continue;
- if (radix_tree_exception(page)) {
- if (radix_tree_deref_retry(page)) {
- /*
- * Transient condition which can only trigger
- * when entry at index 0 moves out of or back
- * to root: none yet gotten, safe to restart.
- */
- assert((start | i) == 0);
- goto restart;
- }
- /*
- * No exceptional entries are inserted in this test.
- */
- assert(0);
- }
-
pthread_mutex_lock(&page->lock);
- if (!page->count) {
- pthread_mutex_unlock(&page->lock);
- goto repeat;
- }
+ if (!page->count)
+ goto unlock;
+
/* don't actually update page refcount */
pthread_mutex_unlock(&page->lock);
/* Has the page moved? */
- if (unlikely(page != *((void **)pages[i]))) {
- goto repeat;
- }
+ if (unlikely(page != xas_reload(&xas)))
+ goto put_page;
pages[ret] = page;
ret++;
+ continue;
+unlock:
+ pthread_mutex_unlock(&page->lock);
+put_page:
+ xas_reset(&xas);
}
rcu_read_unlock();
return ret;
@@ -145,12 +125,12 @@ static void *regression1_fn(void *arg)
for (j = 0; j < 1000000; j++) {
struct page *p;
- p = page_alloc();
+ p = page_alloc(0);
pthread_mutex_lock(&mt_lock);
radix_tree_insert(&mt_tree, 0, p);
pthread_mutex_unlock(&mt_lock);
- p = page_alloc();
+ p = page_alloc(1);
pthread_mutex_lock(&mt_lock);
radix_tree_insert(&mt_tree, 1, p);
pthread_mutex_unlock(&mt_lock);
/td>-19/+22 2024-09-09mm: migrate_device: use more folio in migrate_device_unmap()Kefeng Wang1-10/+10 2024-09-09mm: migrate_device: use a folio in migrate_device_range()Kefeng Wang1-4/+5 2024-09-09mm: migrate_device: convert to migrate_device_coherent_folio()Kefeng Wang3-17/+17 2024-09-09swap: convert swapon() to use a folioMatthew Wilcox (Oracle)1-9/+7 2024-09-09mm: count the number of partially mapped anonymous THPs per sizeBarry Song3-0/+14 2024-09-09mm: count the number of anonymous THPs per sizeBarry Song6-6/+37 2024-09-09mm: tidy up shmem mTHP controls and statsRyan Roberts1-28/+112 2024-09-09mm: cleanup count_mthp_stat() definitionRyan Roberts3-43/+35 2024-09-03mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolationKefeng Wang1-28/+15 2024-09-03mm: migrate: add isolate_folio_to_list()Kefeng Wang3-35/+42 2024-09-03mm: memory_hotplug: check hwpoisoned page firstly in do_migrate_range()Kefeng Wang1-10/+10 2024-09-03mm: memory-failure: add unmap_poisoned_folio()Kefeng Wang2-17/+35 2024-09-03mm: memory_hotplug: remove head variable in do_migrate_range()Kefeng Wang1-8/+14