aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/alloc_tag.c249
-rw-r--r--lib/codetag.c100
2 files changed, 322 insertions, 27 deletions
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 435aa837e550..5f9cd1642d58 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/alloc_tag.h>
+#include <linux/execmem.h>
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/module.h>
@@ -9,6 +10,7 @@
#include <linux/seq_file.h>
#define ALLOCINFO_FILE_NAME "allocinfo"
+#define MODULE_ALLOC_TAG_VMAP_SIZE (100000UL * sizeof(struct alloc_tag))
#ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
static bool mem_profiling_support __meminitdata = true;
@@ -174,31 +176,226 @@ static void __init procfs_init(void)
}
}
-static bool alloc_tag_module_unload(struct codetag_type *cttype,
- struct codetag_module *cmod)
+#ifdef CONFIG_MODULES
+
+static struct maple_tree mod_area_mt = MTREE_INIT(mod_area_mt, MT_FLAGS_ALLOC_RANGE);
+/* A dummy object used to indicate an unloaded module */
+static struct module unloaded_mod;
+/* A dummy object used to indicate a module prepended area */
+static struct module prepend_mod;
+
+static struct alloc_tag_module_section module_tags;
+
+static bool needs_section_mem(struct module *mod, unsigned long size)
{
- struct codetag_iterator iter = codetag_get_ct_iter(cttype);
- struct alloc_tag_counters counter;
- bool module_unused = true;
- struct alloc_tag *tag;
- struct codetag *ct;
+ return size >= sizeof(struct alloc_tag);
+}
+
+static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to)
+{
+ while (from <= to) {
+ struct alloc_tag_counters counter;
- for (ct = codetag_next_ct(&iter); ct; ct = codetag_next_ct(&iter)) {
- if (iter.cmod != cmod)
+ counter = alloc_tag_read(from);
+ if (counter.bytes)
+ return from;
+ from++;
+ }
+
+ return NULL;
+}
+
+/* Called with mod_area_mt locked */
+static void clean_unused_module_areas_locked(void)
+{
+ MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
+ struct module *val;
+
+ mas_for_each(&mas, val, module_tags.size) {
+ if (val != &unloaded_mod)
continue;
- tag = ct_to_alloc_tag(ct);
- counter = alloc_tag_read(tag);
+ /* Release area if all tags are unused */
+ if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
+ (struct alloc_tag *)(module_tags.start_addr + mas.last)))
+ mas_erase(&mas);
+ }
+}
+
+/* Called with mod_area_mt locked */
+static bool find_aligned_area(struct ma_state *mas, unsigned long section_size,
+ unsigned long size, unsigned int prepend, unsigned long align)
+{
+ bool cleanup_done = false;
+
+repeat:
+ /* Try finding exact size and hope the start is aligned */
+ if (!mas_empty_area(mas, 0, section_size - 1, prepend + size)) {
+ if (IS_ALIGNED(mas->index + prepend, align))
+ return true;
+
+ /* Try finding larger area to align later */
+ mas_reset(mas);
+ if (!mas_empty_area(mas, 0, section_size - 1,
+ size + prepend + align - 1))
+ return true;
+ }
+
+ /* No free area, try cleanup stale data and repeat the search once */
+ if (!cleanup_done) {
+ clean_unused_module_areas_locked();
+ cleanup_done = true;
+ mas_reset(mas);
+ goto repeat;
+ }
+
+ return false;
+}
+
+static void *reserve_module_tags(struct module *mod, unsigned long size,
+ unsigned int prepend, unsigned long align)
+{
+ unsigned long section_size = module_tags.end_addr - module_tags.start_addr;
+ MA_STATE(mas, &mod_area_mt, 0, section_size - 1);
+ unsigned long offset;
+ void *ret = NULL;
+
+ /* If no tags return error */
+ if (size < sizeof(struct alloc_tag))
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * align is always power of 2, so we can use IS_ALIGNED and ALIGN.
+ * align 0 or 1 means no alignment, to simplify set to 1.
+ */
+ if (!align)
+ align = 1;
+
+ mas_lock(&mas);
+ if (!find_aligned_area(&mas, section_size, size, prepend, align)) {
+ ret = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+
+ /* Mark found area as reserved */
+ offset = mas.index;
+ offset += prepend;
+ offset = ALIGN(offset, align);
+ if (offset != mas.index) {
+ unsigned long pad_start = mas.index;
+
+ mas.last = offset - 1;
+ mas_store(&mas, &prepend_mod);
+ if (mas_is_err(&mas)) {
+ ret = ERR_PTR(xa_err(mas.node));
+ goto unlock;
+ }
+ mas.index = offset;
+ mas.last = offset + size - 1;
+ mas_store(&mas, mod);
+ if (mas_is_err(&mas)) {
+ mas.index = pad_start;
+ mas_erase(&mas);
+ ret = ERR_PTR(xa_err(mas.node));
+ }
+ } else {
+ mas.last = offset + size - 1;
+ mas_store(&mas, mod);
+ if (mas_is_err(&mas))
+ ret = ERR_PTR(xa_err(mas.node));
+ }
+unlock:
+ mas_unlock(&mas);
+
+ if (IS_ERR(ret))
+ return ret;
- if (WARN(counter.bytes,
- "%s:%u module %s func:%s has %llu allocated at module unload",
- ct->filename, ct->lineno, ct->modname, ct->function, counter.bytes))
- module_unused = false;
+ if (module_tags.size < offset + size)
+ module_tags.size = offset + size;
+
+ return (struct alloc_tag *)(module_tags.start_addr + offset);
+}
+
+static void release_module_tags(struct module *mod, bool used)
+{
+ MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size);
+ struct alloc_tag *tag;
+ struct module *val;
+
+ mas_lock(&mas);
+ mas_for_each_rev(&mas, val, 0)
+ if (val == mod)
+ break;
+
+ if (!val) /* module not found */
+ goto out;
+
+ if (!used)
+ goto release_area;
+
+ /* Find out if the area is used */
+ tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index),
+ (struct alloc_tag *)(module_tags.start_addr + mas.last));
+ if (tag) {
+ struct alloc_tag_counters counter = alloc_tag_read(tag);
+
+ pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n",
+ tag->ct.filename, tag->ct.lineno, tag->ct.modname,
+ tag->ct.function, counter.bytes);
+ } else {
+ used = false;
+ }
+release_area:
+ mas_store(&mas, used ? &unloaded_mod : NULL);
+ val = mas_prev_range(&mas, 0);
+ if (val == &prepend_mod)
+ mas_store(&mas, NULL);
+out:
+ mas_unlock(&mas);
+}
+
+static void replace_module(struct module *mod, struct module *new_mod)
+{
+ MA_STATE(mas, &mod_area_mt, 0, module_tags.size);
+ struct module *val;
+
+ mas_lock(&mas);
+ mas_for_each(&mas, val, module_tags.size) {
+ if (val != mod)
+ continue;
+
+ mas_store_gfp(&mas, new_mod, GFP_KERNEL);
+ break;
}
+ mas_unlock(&mas);
+}
+
+static int __init alloc_mod_tags_mem(void)
+{
+ /* Allocate space to copy allocation tags */
+ module_tags.start_addr = (unsigned long)execmem_alloc(EXECMEM_MODULE_DATA,
+ MODULE_ALLOC_TAG_VMAP_SIZE);
+ if (!module_tags.start_addr)
+ return -ENOMEM;
+
+ module_tags.end_addr = module_tags.start_addr + MODULE_ALLOC_TAG_VMAP_SIZE;
+
+ return 0;
+}
- return module_unused;
+static void __init free_mod_tags_mem(void)
+{
+ execmem_free((void *)module_tags.start_addr);
+ module_tags.start_addr = 0;
}
+#else /* CONFIG_MODULES */
+
+static inline int alloc_mod_tags_mem(void) { return 0; }
+static inline void free_mod_tags_mem(void) {}
+
+#endif /* CONFIG_MODULES */
+
static int __init setup_early_mem_profiling(char *str)
{
bool enable;
@@ -274,14 +471,26 @@ static inline void sysctl_init(void) {}
static int __init alloc_tag_init(void)
{
const struct codetag_type_desc desc = {
- .section = "alloc_tags",
- .tag_size = sizeof(struct alloc_tag),
- .module_unload = alloc_tag_module_unload,
+ .section = ALLOC_TAG_SECTION_NAME,
+ .tag_size = sizeof(struct alloc_tag),
+#ifdef CONFIG_MODULES
+ .needs_section_mem = needs_section_mem,
+ .alloc_section_mem = reserve_module_tags,
+ .free_section_mem = release_module_tags,
+ .module_replaced = replace_module,
+#endif
};
+ int res;
+
+ res = alloc_mod_tags_mem();
+ if (res)
+ return res;
alloc_tag_cttype = codetag_register_type(&desc);
- if (IS_ERR(alloc_tag_cttype))
+ if (IS_ERR(alloc_tag_cttype)) {
+ free_mod_tags_mem();
return PTR_ERR(alloc_tag_cttype);
+ }
sysctl_init();
procfs_init();
diff --git a/lib/codetag.c b/lib/codetag.c
index d1fbbb7c2ec3..7455b966cae4 100644
--- a/lib/codetag.c
+++ b/lib/codetag.c
@@ -207,6 +207,94 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod)
}
#ifdef CONFIG_MODULES
+#define CODETAG_SECTION_PREFIX ".codetag."
+
+/* Some codetag types need a separate module section */
+bool codetag_needs_module_section(struct module *mod, const char *name,
+ unsigned long size)
+{
+ const char *type_name;
+ struct codetag_type *cttype;
+ bool ret = false;
+
+ if (strncmp(name, CODETAG_SECTION_PREFIX, strlen(CODETAG_SECTION_PREFIX)))
+ return false;
+
+ type_name = name + strlen(CODETAG_SECTION_PREFIX);
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link) {
+ if (strcmp(type_name, cttype->desc.section) == 0) {
+ if (!cttype->desc.needs_section_mem)
+ break;
+
+ down_write(&cttype->mod_lock);
+ ret = cttype->desc.needs_section_mem(mod, size);
+ up_write(&cttype->mod_lock);
+ break;
+ }
+ }
+ mutex_unlock(&codetag_lock);
+
+ return ret;
+}
+
+void *codetag_alloc_module_section(struct module *mod, const char *name,
+ unsigned long size, unsigned int prepend,
+ unsigned long align)
+{
+ const char *type_name = name + strlen(CODETAG_SECTION_PREFIX);
+ struct codetag_type *cttype;
+ void *ret = ERR_PTR(-EINVAL);
+
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link) {
+ if (strcmp(type_name, cttype->desc.section) == 0) {
+ if (WARN_ON(!cttype->desc.alloc_section_mem))
+ break;
+
+ down_write(&cttype->mod_lock);
+ ret = cttype->desc.alloc_section_mem(mod, size, prepend, align);
+ up_write(&cttype->mod_lock);
+ break;
+ }
+ }
+ mutex_unlock(&codetag_lock);
+
+ return ret;
+}
+
+void codetag_free_module_sections(struct module *mod)
+{
+ struct codetag_type *cttype;
+
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link) {
+ if (!cttype->desc.free_section_mem)
+ continue;
+
+ down_write(&cttype->mod_lock);
+ cttype->desc.free_section_mem(mod, false);
+ up_write(&cttype->mod_lock);
+ }
+ mutex_unlock(&codetag_lock);
+}
+
+void codetag_module_replaced(struct module *mod, struct module *new_mod)
+{
+ struct codetag_type *cttype;
+
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link) {
+ if (!cttype->desc.module_replaced)
+ continue;
+
+ down_write(&cttype->mod_lock);
+ cttype->desc.module_replaced(mod, new_mod);
+ up_write(&cttype->mod_lock);
+ }
+ mutex_unlock(&codetag_lock);
+}
+
void codetag_load_module(struct module *mod)
{
struct codetag_type *cttype;
@@ -220,13 +308,12 @@ void codetag_load_module(struct module *mod)
mutex_unlock(&codetag_lock);
}
-bool codetag_unload_module(struct module *mod)
+void codetag_unload_module(struct module *mod)
{
struct codetag_type *cttype;
- bool unload_ok = true;
if (!mod)
- return true;
+ return;
/* await any module's kfree_rcu() operations to complete */
kvfree_rcu_barrier();
@@ -246,18 +333,17 @@ bool codetag_unload_module(struct module *mod)
}
if (found) {
if (cttype->desc.module_unload)
- if (!cttype->desc.module_unload(cttype, cmod))
- unload_ok = false;
+ cttype->desc.module_unload(cttype, cmod);
cttype->count -= range_size(cttype, &cmod->range);
idr_remove(&cttype->mod_idr, mod_id);
kfree(cmod);
}
up_write(&cttype->mod_lock);
+ if (found && cttype->desc.free_section_mem)
+ cttype->desc.free_section_mem(mod, true);
}
mutex_unlock(&codetag_lock);
-
- return unload_ok;
}
#endif /* CONFIG_MODULES */