From 49b786ea146f69c371df18e81ce0a2d5839f865c Mon Sep 17 00:00:00 2001 From: Aleksa Sarai Date: Tue, 9 Jun 2015 21:32:10 +1000 Subject: cgroup: implement the PIDs subsystem Adds a new single-purpose PIDs subsystem to limit the number of tasks that can be forked inside a cgroup. Essentially this is an implementation of RLIMIT_NPROC that applies to a cgroup rather than a process tree. However, it should be noted that organisational operations (adding and removing tasks from a PIDs hierarchy) will *not* be prevented. Rather, the number of tasks in the hierarchy cannot exceed the limit through forking. This is due to the fact that, in the unified hierarchy, attach cannot fail (and it is not possible for a task to overcome its PIDs cgroup policy limit by attaching to a child cgroup -- even if migrating mid-fork it must be able to fork in the parent first). PIDs are fundamentally a global resource, and it is possible to reach PID exhaustion inside a cgroup without hitting any reasonable kmemcg policy. Once you've hit PID exhaustion, you're only in a marginally better state than OOM. This subsystem allows PID exhaustion inside a cgroup to be prevented. Signed-off-by: Aleksa Sarai Signed-off-by: Tejun Heo --- kernel/Makefile | 1 + 1 file changed, 1 insertion(+) (limited to 'kernel/Makefile') diff --git a/kernel/Makefile b/kernel/Makefile index 43c4c920f30a..718fb8afab7a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -55,6 +55,7 @@ obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o obj-$(CONFIG_COMPAT) += compat.o obj-$(CONFIG_CGROUPS) += cgroup.o obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o +obj-$(CONFIG_CGROUP_PIDS) += cgroup_pids.o obj-$(CONFIG_CPUSETS) += cpuset.o obj-$(CONFIG_UTS_NS) += utsname.o obj-$(CONFIG_USER_NS) += user_namespace.o -- cgit v1.2.3 From 7f49294282c49ef426ed05eb4959728524ba140c Mon Sep 17 00:00:00 2001 From: Richard Guy Briggs Date: Wed, 5 Aug 2015 16:29:36 -0400 Subject: audit: clean simple fsnotify implementation This is to be used to audit by executable path rules, but audit watches should be able to share this code eventually. At the moment the audit watch code is a lot more complex. That code only creates one fsnotify watch per parent directory. That 'audit_parent' in turn has a list of 'audit_watches' which contain the name, ino, dev of the specific object we care about. This just creates one fsnotify watch per object we care about. So if you watch 100 inodes in /etc this code will create 100 fsnotify watches on /etc. The audit_watch code will instead create 1 fsnotify watch on /etc (the audit_parent) and then 100 individual watches chained from that fsnotify mark. We should be able to convert the audit_watch code to do one fsnotify mark per watch and simplify things/remove a whole lot of code. After that conversion we should be able to convert the audit_fsnotify code to support that hierarchy if the optimization is necessary. Move the access to the entry for audit_match_signal() to the beginning of the audit_del_rule() function in case the entry found is the same one passed in. This will enable it to be used by audit_autoremove_mark_rule(), kill_rules() and audit_remove_parent_watches(). This is a heavily modified and merged version of two patches originally submitted by Eric Paris. Cc: Peter Moody Cc: Eric Paris Signed-off-by: Richard Guy Briggs [PM: added a space after a declaration to keep ./scripts/checkpatch happy] Signed-off-by: Paul Moore --- kernel/Makefile | 2 +- kernel/audit.h | 14 ++++ kernel/audit_fsnotify.c | 216 ++++++++++++++++++++++++++++++++++++++++++++++++ kernel/auditfilter.c | 2 +- 4 files changed, 232 insertions(+), 2 deletions(-) create mode 100644 kernel/audit_fsnotify.c (limited to 'kernel/Makefile') diff --git a/kernel/Makefile b/kernel/Makefile index 1408b3353a3c..d7657f5535c9 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -62,7 +62,7 @@ obj-$(CONFIG_SMP) += stop_machine.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o obj-$(CONFIG_AUDIT) += audit.o auditfilter.o obj-$(CONFIG_AUDITSYSCALL) += auditsc.o -obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o +obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o audit_fsnotify.o obj-$(CONFIG_AUDIT_TREE) += audit_tree.o obj-$(CONFIG_GCOV_KERNEL) += gcov/ obj-$(CONFIG_KPROBES) += kprobes.o diff --git a/kernel/audit.h b/kernel/audit.h index 1caa0d345d90..7102d538737b 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -50,6 +50,7 @@ enum audit_state { /* Rule lists */ struct audit_watch; +struct audit_fsnotify_mark; struct audit_tree; struct audit_chunk; @@ -252,6 +253,7 @@ struct audit_net { extern int selinux_audit_rule_update(void); extern struct mutex audit_filter_mutex; +extern int audit_del_rule(struct audit_entry *); extern void audit_free_rule_rcu(struct rcu_head *); extern struct list_head audit_filter_list[]; @@ -266,6 +268,13 @@ extern int audit_add_watch(struct audit_krule *krule, struct list_head **list); extern void audit_remove_watch_rule(struct audit_krule *krule); extern char *audit_watch_path(struct audit_watch *watch); extern int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev); + +extern struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pathname, int len); +extern char *audit_mark_path(struct audit_fsnotify_mark *mark); +extern void audit_remove_mark(struct audit_fsnotify_mark *audit_mark); +extern void audit_remove_mark_rule(struct audit_krule *krule); +extern int audit_mark_compare(struct audit_fsnotify_mark *mark, unsigned long ino, dev_t dev); + #else #define audit_put_watch(w) {} #define audit_get_watch(w) {} @@ -275,6 +284,11 @@ extern int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev #define audit_watch_path(w) "" #define audit_watch_compare(w, i, d) 0 +#define audit_alloc_mark(k, p, l) (ERR_PTR(-EINVAL)) +#define audit_mark_path(m) "" +#define audit_remove_mark(m) +#define audit_remove_mark_rule(k) +#define audit_mark_compare(m, i, d) 0 #endif /* CONFIG_AUDIT_WATCH */ #ifdef CONFIG_AUDIT_TREE diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c new file mode 100644 index 000000000000..27c6046c2c3d --- /dev/null +++ b/kernel/audit_fsnotify.c @@ -0,0 +1,216 @@ +/* audit_fsnotify.c -- tracking inodes + * + * Copyright 2003-2009,2014-2015 Red Hat, Inc. + * Copyright 2005 Hewlett-Packard Development Company, L.P. + * Copyright 2005 IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "audit.h" + +/* + * this mark lives on the parent directory of the inode in question. + * but dev, ino, and path are about the child + */ +struct audit_fsnotify_mark { + dev_t dev; /* associated superblock device */ + unsigned long ino; /* associated inode number */ + char *path; /* insertion path */ + struct fsnotify_mark mark; /* fsnotify mark on the inode */ + struct audit_krule *rule; +}; + +/* fsnotify handle. */ +static struct fsnotify_group *audit_fsnotify_group; + +/* fsnotify events we care about. */ +#define AUDIT_FS_EVENTS (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\ + FS_MOVE_SELF | FS_EVENT_ON_CHILD) + +static void audit_fsnotify_mark_free(struct audit_fsnotify_mark *audit_mark) +{ + kfree(audit_mark->path); + kfree(audit_mark); +} + +static void audit_fsnotify_free_mark(struct fsnotify_mark *mark) +{ + struct audit_fsnotify_mark *audit_mark; + + audit_mark = container_of(mark, struct audit_fsnotify_mark, mark); + audit_fsnotify_mark_free(audit_mark); +} + +char *audit_mark_path(struct audit_fsnotify_mark *mark) +{ + return mark->path; +} + +int audit_mark_compare(struct audit_fsnotify_mark *mark, unsigned long ino, dev_t dev) +{ + if (mark->ino == AUDIT_INO_UNSET) + return 0; + return (mark->ino == ino) && (mark->dev == dev); +} + +static void audit_update_mark(struct audit_fsnotify_mark *audit_mark, + struct inode *inode) +{ + audit_mark->dev = inode ? inode->i_sb->s_dev : AUDIT_DEV_UNSET; + audit_mark->ino = inode ? inode->i_ino : AUDIT_INO_UNSET; +} + +struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pathname, int len) +{ + struct audit_fsnotify_mark *audit_mark; + struct path path; + struct dentry *dentry; + struct inode *inode; + int ret; + + if (pathname[0] != '/' || pathname[len-1] == '/') + return ERR_PTR(-EINVAL); + + dentry = kern_path_locked(pathname, &path); + if (IS_ERR(dentry)) + return (void *)dentry; /* returning an error */ + inode = path.dentry->d_inode; + mutex_unlock(&inode->i_mutex); + + audit_mark = kzalloc(sizeof(*audit_mark), GFP_KERNEL); + if (unlikely(!audit_mark)) { + audit_mark = ERR_PTR(-ENOMEM); + goto out; + } + + fsnotify_init_mark(&audit_mark->mark, audit_fsnotify_free_mark); + audit_mark->mark.mask = AUDIT_FS_EVENTS; + audit_mark->path = pathname; + audit_update_mark(audit_mark, dentry->d_inode); + audit_mark->rule = krule; + + ret = fsnotify_add_mark(&audit_mark->mark, audit_fsnotify_group, inode, NULL, true); + if (ret < 0) { + audit_fsnotify_mark_free(audit_mark); + audit_mark = ERR_PTR(ret); + } +out: + dput(dentry); + path_put(&path); + return audit_mark; +} + +static void audit_mark_log_rule_change(struct audit_fsnotify_mark *audit_mark, char *op) +{ + struct audit_buffer *ab; + struct audit_krule *rule = audit_mark->rule; + + if (!audit_enabled) + return; + ab = audit_log_start(NULL, GFP_NOFS, AUDIT_CONFIG_CHANGE); + if (unlikely(!ab)) + return; + audit_log_format(ab, "auid=%u ses=%u op=", + from_kuid(&init_user_ns, audit_get_loginuid(current)), + audit_get_sessionid(current)); + audit_log_string(ab, op); + audit_log_format(ab, " path="); + audit_log_untrustedstring(ab, audit_mark->path); + audit_log_key(ab, rule->filterkey); + audit_log_format(ab, " list=%d res=1", rule->listnr); + audit_log_end(ab); +} + +void audit_remove_mark(struct audit_fsnotify_mark *audit_mark) +{ + fsnotify_destroy_mark(&audit_mark->mark, audit_fsnotify_group); + fsnotify_put_mark(&audit_mark->mark); +} + +void audit_remove_mark_rule(struct audit_krule *krule) +{ + struct audit_fsnotify_mark *mark = krule->exe; + + audit_remove_mark(mark); +} + +static void audit_autoremove_mark_rule(struct audit_fsnotify_mark *audit_mark) +{ + struct audit_krule *rule = audit_mark->rule; + struct audit_entry *entry = container_of(rule, struct audit_entry, rule); + + audit_mark_log_rule_change(audit_mark, "autoremove_rule"); + audit_del_rule(entry); +} + +/* Update mark data in audit rules based on fsnotify events. */ +static int audit_mark_handle_event(struct fsnotify_group *group, + struct inode *to_tell, + struct fsnotify_mark *inode_mark, + struct fsnotify_mark *vfsmount_mark, + u32 mask, void *data, int data_type, + const unsigned char *dname, u32 cookie) +{ + struct audit_fsnotify_mark *audit_mark; + struct inode *inode = NULL; + + audit_mark = container_of(inode_mark, struct audit_fsnotify_mark, mark); + + BUG_ON(group != audit_fsnotify_group); + + switch (data_type) { + case (FSNOTIFY_EVENT_PATH): + inode = ((struct path *)data)->dentry->d_inode; + break; + case (FSNOTIFY_EVENT_INODE): + inode = (struct inode *)data; + break; + default: + BUG(); + return 0; + }; + + if (mask & (FS_CREATE|FS_MOVED_TO|FS_DELETE|FS_MOVED_FROM)) { + if (audit_compare_dname_path(dname, audit_mark->path, AUDIT_NAME_FULL)) + return 0; + audit_update_mark(audit_mark, inode); + } else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF)) + audit_autoremove_mark_rule(audit_mark); + + return 0; +} + +static const struct fsnotify_ops audit_mark_fsnotify_ops = { + .handle_event = audit_mark_handle_event, +}; + +static int __init audit_fsnotify_init(void) +{ + audit_fsnotify_group = fsnotify_alloc_group(&audit_mark_fsnotify_ops); + if (IS_ERR(audit_fsnotify_group)) { + audit_fsnotify_group = NULL; + audit_panic("cannot create audit fsnotify group"); + } + return 0; +} +device_initcall(audit_fsnotify_init); diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 7ca7d3b5aca2..b4d8c366ec30 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -935,7 +935,7 @@ static inline int audit_add_rule(struct audit_entry *entry) } /* Remove an existing rule from filterlist. */ -static inline int audit_del_rule(struct audit_entry *entry) +int audit_del_rule(struct audit_entry *entry) { struct audit_entry *e; struct audit_tree *tree = entry->rule.tree; -- cgit v1.2.3 From 19e91b69d77bab16405cc284b451378e89a4110c Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 20 Jul 2015 21:16:29 +0100 Subject: modsign: Allow external signing key to be specified Signed-off-by: David Woodhouse Signed-off-by: David Howells --- Documentation/module-signing.txt | 31 ++++++++++++++++++++++++++----- Makefile | 2 +- init/Kconfig | 14 ++++++++++++++ kernel/Makefile | 5 +++++ 4 files changed, 46 insertions(+), 6 deletions(-) (limited to 'kernel/Makefile') diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt index faaa6ea002f7..84597c7ea175 100644 --- a/Documentation/module-signing.txt +++ b/Documentation/module-signing.txt @@ -88,6 +88,22 @@ This has a number of options available: than being a module) so that modules signed with that algorithm can have their signatures checked without causing a dependency loop. + (4) "File name or PKCS#11 URI of module signing key" (CONFIG_MODULE_SIG_KEY) + + Setting this option to something other than its default of + "signing_key.priv" will disable the autogeneration of signing keys and + allow the kernel modules to be signed with a key of your choosing. + The string provided should identify a file containing a private key + in PEM form, or — on systems where the OpenSSL ENGINE_pkcs11 is + appropriately installed — a PKCS#11 URI as defined by RFC7512. + + If the PEM file containing the private key is encrypted, or if the + PKCS#11 token requries a PIN, this can be provided at build time by + means of the KBUILD_SIGN_PIN variable. + + The corresponding X.509 certificate in DER form should still be placed + in a file named signing_key.x509 in the top-level build directory. + ======================= GENERATING SIGNING KEYS @@ -100,8 +116,9 @@ it can be deleted or stored securely. The public key gets built into the kernel so that it can be used to check the signatures as the modules are loaded. -Under normal conditions, the kernel build will automatically generate a new -keypair using openssl if one does not exist in the files: +Under normal conditions, when CONFIG_MODULE_SIG_KEY is unchanged from its +default of "signing_key.priv", the kernel build will automatically generate +a new keypair using openssl if one does not exist in the files: signing_key.priv signing_key.x509 @@ -135,8 +152,12 @@ kernel sources tree and the openssl command. The following is an example to generate the public/private key files: openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \ - -config x509.genkey -outform DER -out signing_key.x509 \ - -keyout signing_key.priv + -config x509.genkey -outform PEM -out kernel_key.pem \ + -keyout kernel_key.pem + +The full pathname for the resulting kernel_key.pem file can then be specified +in the CONFIG_MODULE_SIG_KEY option, and the certificate and key therein will +be used instead of an autogenerated keypair. ========================= @@ -181,7 +202,7 @@ To manually sign a module, use the scripts/sign-file tool available in the Linux kernel source tree. The script requires 4 arguments: 1. The hash algorithm (e.g., sha256) - 2. The private key filename + 2. The private key filename or PKCS#11 URI 3. The public key filename 4. The kernel module to be signed diff --git a/Makefile b/Makefile index dc87ec280fbc..531dd16c9751 100644 --- a/Makefile +++ b/Makefile @@ -870,7 +870,7 @@ INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4 # export INITRD_COMPRESS := $(INITRD_COMPRESS-y) ifdef CONFIG_MODULE_SIG_ALL -MODSECKEY = ./signing_key.priv +MODSECKEY = $(CONFIG_MODULE_SIG_KEY) MODPUBKEY = ./signing_key.x509 export MODPUBKEY mod_sign_cmd = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(MODSECKEY) $(MODPUBKEY) diff --git a/init/Kconfig b/init/Kconfig index 14b3d8422502..1b1148e9181b 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1948,6 +1948,20 @@ config MODULE_SIG_HASH default "sha384" if MODULE_SIG_SHA384 default "sha512" if MODULE_SIG_SHA512 +config MODULE_SIG_KEY + string "File name or PKCS#11 URI of module signing key" + default "signing_key.priv" + depends on MODULE_SIG + help + Provide the file name of a private key in PKCS#8 PEM format, or + a PKCS#11 URI according to RFC7512. The corresponding X.509 + certificate in DER form should be present in signing_key.x509 + in the top-level build directory. + + If this option is unchanged from its default "signing_key.priv", + then the kernel will automatically generate the private key and + certificate as described in Documentation/module-signing.txt + config MODULE_COMPRESS bool "Compress modules on installation" depends on MODULES diff --git a/kernel/Makefile b/kernel/Makefile index 43c4c920f30a..2c937ace292e 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -170,6 +170,10 @@ ifndef CONFIG_MODULE_SIG_HASH $(error Could not determine digest type to use from kernel config) endif +# We do it this way rather than having a boolean option for enabling an +# external private key, because 'make randconfig' might enable such a +# boolean option and we unfortunately can't make it depend on !RANDCONFIG. +ifeq ($(CONFIG_MODULE_SIG_KEY),"signing_key.priv") signing_key.priv signing_key.x509: x509.genkey @echo "###" @echo "### Now generating an X.509 key pair to be used for signing modules." @@ -207,3 +211,4 @@ x509.genkey: @echo >>x509.genkey "subjectKeyIdentifier=hash" @echo >>x509.genkey "authorityKeyIdentifier=keyid" endif +endif -- cgit v1.2.3 From 1329e8cc69b93a0b1bc6d197b30dcff628c18dbf Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 20 Jul 2015 21:16:30 +0100 Subject: modsign: Extract signing cert from CONFIG_MODULE_SIG_KEY if needed Where an external PEM file or PKCS#11 URI is given, we can get the cert from it for ourselves instead of making the user drop signing_key.x509 in place for us. Signed-off-by: David Woodhouse Signed-off-by: David Howells --- Documentation/module-signing.txt | 11 ++-- init/Kconfig | 8 +-- kernel/Makefile | 38 +++++++++++ scripts/Makefile | 3 +- scripts/extract-cert.c | 132 +++++++++++++++++++++++++++++++++++++++ 5 files changed, 181 insertions(+), 11 deletions(-) create mode 100644 scripts/extract-cert.c (limited to 'kernel/Makefile') diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt index 84597c7ea175..693001920890 100644 --- a/Documentation/module-signing.txt +++ b/Documentation/module-signing.txt @@ -93,17 +93,16 @@ This has a number of options available: Setting this option to something other than its default of "signing_key.priv" will disable the autogeneration of signing keys and allow the kernel modules to be signed with a key of your choosing. - The string provided should identify a file containing a private key - in PEM form, or — on systems where the OpenSSL ENGINE_pkcs11 is - appropriately installed — a PKCS#11 URI as defined by RFC7512. + The string provided should identify a file containing both a private + key and its corresponding X.509 certificate in PEM form, or — on + systems where the OpenSSL ENGINE_pkcs11 is functional — a PKCS#11 URI + as defined by RFC7512. In the latter case, the PKCS#11 URI should + reference both a certificate and a private key. If the PEM file containing the private key is encrypted, or if the PKCS#11 token requries a PIN, this can be provided at build time by means of the KBUILD_SIGN_PIN variable. - The corresponding X.509 certificate in DER form should still be placed - in a file named signing_key.x509 in the top-level build directory. - ======================= GENERATING SIGNING KEYS diff --git a/init/Kconfig b/init/Kconfig index 1b1148e9181b..e2e0a1d27886 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1953,10 +1953,10 @@ config MODULE_SIG_KEY default "signing_key.priv" depends on MODULE_SIG help - Provide the file name of a private key in PKCS#8 PEM format, or - a PKCS#11 URI according to RFC7512. The corresponding X.509 - certificate in DER form should be present in signing_key.x509 - in the top-level build directory. + Provide the file name of a private key/certificate in PEM format, + or a PKCS#11 URI according to RFC7512. The file should contain, or + the URI should identify, both the certificate and its corresponding + private key. If this option is unchanged from its default "signing_key.priv", then the kernel will automatically generate the private key and diff --git a/kernel/Makefile b/kernel/Makefile index 2c937ace292e..fa2f8b84b18a 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -210,5 +210,43 @@ x509.genkey: @echo >>x509.genkey "keyUsage=digitalSignature" @echo >>x509.genkey "subjectKeyIdentifier=hash" @echo >>x509.genkey "authorityKeyIdentifier=keyid" +else +# For external (PKCS#11 or PEM) key, we need to obtain the certificate from +# CONFIG_MODULE_SIG_KEY automatically. +quiet_cmd_extract_der = CERT_DER $(2) + cmd_extract_der = scripts/extract-cert "$(2)" signing_key.x509 + +# CONFIG_MODULE_SIG_KEY is either a PKCS#11 URI or a filename. It is +# surrounded by quotes, and may contain spaces. To strip the quotes +# with $(patsubst) we need to turn the spaces into something else. +# And if it's a filename, those spaces need to be escaped as '\ ' in +# order to use it in dependencies or $(wildcard). +space := +space += +space_escape := %%%SPACE%%% +X509_SOURCE_temp := $(subst $(space),$(space_escape),$(CONFIG_MODULE_SIG_KEY)) +# We need this to check for absolute paths or PKCS#11 URIs. +X509_SOURCE_ONEWORD := $(patsubst "%",%,$(X509_SOURCE_temp)) +# This is the actual source filename/URI without the quotes +X509_SOURCE := $(subst $(space_escape),$(space),$(X509_SOURCE_ONEWORD)) +# This\ version\ with\ spaces\ escaped\ for\ $(wildcard)\ and\ dependencies +X509_SOURCE_ESCAPED := $(subst $(space_escape),\$(space),$(X509_SOURCE_ONEWORD)) + +ifeq ($(patsubst pkcs11:%,%,$(X509_SOURCE_ONEWORD)),$(X509_SOURCE_ONEWORD)) +# If it's a filename, depend on it. +X509_DEP := $(X509_SOURCE_ESCAPED) +ifeq ($(patsubst /%,%,$(X509_SOURCE_ONEWORD)),$(X509_SOURCE_ONEWORD)) +ifeq ($(wildcard $(X509_SOURCE_ESCAPED)),) +ifneq ($(wildcard $(srctree)/$(X509_SOURCE_ESCAPED)),) +# Non-absolute filename, found in source tree and not build tree +X509_SOURCE := $(srctree)/$(X509_SOURCE) +X509_DEP := $(srctree)/$(X509_SOURCE_ESCAPED) +endif +endif +endif +endif + +signing_key.x509: scripts/extract-cert include/config/module/sig/key.h $(X509_DEP) + $(call cmd,extract_der,$(X509_SOURCE)) endif endif diff --git a/scripts/Makefile b/scripts/Makefile index b12fe020664d..236f683510bd 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -16,11 +16,12 @@ hostprogs-$(CONFIG_VT) += conmakehash hostprogs-$(BUILD_C_RECORDMCOUNT) += recordmcount hostprogs-$(CONFIG_BUILDTIME_EXTABLE_SORT) += sortextable hostprogs-$(CONFIG_ASN1) += asn1_compiler -hostprogs-$(CONFIG_MODULE_SIG) += sign-file +hostprogs-$(CONFIG_MODULE_SIG) += sign-file extract-cert HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include HOSTLOADLIBES_sign-file = -lcrypto +HOSTLOADLIBES_extract-cert = -lcrypto always := $(hostprogs-y) $(hostprogs-m) diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c new file mode 100644 index 000000000000..4fd5b2f07b45 --- /dev/null +++ b/scripts/extract-cert.c @@ -0,0 +1,132 @@ +/* Extract X.509 certificate in DER form from PKCS#11 or PEM. + * + * Copyright © 2014 Red Hat, Inc. All Rights Reserved. + * Copyright © 2015 Intel Corporation. + * + * Authors: David Howells + * David Woodhouse + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PKEY_ID_PKCS7 2 + +static __attribute__((noreturn)) +void format(void) +{ + fprintf(stderr, + "Usage: scripts/extract-cert \n"); + exit(2); +} + +static void display_openssl_errors(int l) +{ + const char *file; + char buf[120]; + int e, line; + + if (ERR_peek_error() == 0) + return; + fprintf(stderr, "At main.c:%d:\n", l); + + while ((e = ERR_get_error_line(&file, &line))) { + ERR_error_string(e, buf); + fprintf(stderr, "- SSL %s: %s:%d\n", buf, file, line); + } +} + +static void drain_openssl_errors(void) +{ + const char *file; + int line; + + if (ERR_peek_error() == 0) + return; + while (ERR_get_error_line(&file, &line)) {} +} + +#define ERR(cond, fmt, ...) \ + do { \ + bool __cond = (cond); \ + display_openssl_errors(__LINE__); \ + if (__cond) { \ + err(1, fmt, ## __VA_ARGS__); \ + } \ + } while(0) + +static const char *key_pass; + +int main(int argc, char **argv) +{ + char *cert_src, *cert_dst; + X509 *x509; + BIO *b; + + OpenSSL_add_all_algorithms(); + ERR_load_crypto_strings(); + ERR_clear_error(); + + key_pass = getenv("KBUILD_SIGN_PIN"); + + if (argc != 3) + format(); + + cert_src = argv[1]; + cert_dst = argv[2]; + + if (!strncmp(cert_src, "pkcs11:", 7)) { + ENGINE *e; + struct { + const char *cert_id; + X509 *cert; + } parms; + + parms.cert_id = cert_src; + parms.cert = NULL; + + ENGINE_load_builtin_engines(); + drain_openssl_errors(); + e = ENGINE_by_id("pkcs11"); + ERR(!e, "Load PKCS#11 ENGINE"); + if (ENGINE_init(e)) + drain_openssl_errors(); + else + ERR(1, "ENGINE_init"); + if (key_pass) + ERR(!ENGINE_ctrl_cmd_string(e, "PIN", key_pass, 0), "Set PKCS#11 PIN"); + ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1); + ERR(!parms.cert, "Get X.509 from PKCS#11"); + x509 = parms.cert; + } else { + b = BIO_new_file(cert_src, "rb"); + ERR(!b, "%s", cert_src); + x509 = PEM_read_bio_X509(b, NULL, NULL, NULL); + ERR(!x509, "%s", cert_src); + BIO_free(b); + } + + b = BIO_new_file(cert_dst, "wb"); + ERR(!b, "%s", cert_dst); + ERR(!i2d_X509_bio(b, x509), cert_dst); + BIO_free(b); + + return 0; +} -- cgit v1.2.3 From fb1179499134bc718dc7557c7a6a95dc72f224cb Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 20 Jul 2015 21:16:30 +0100 Subject: modsign: Use single PEM file for autogenerated key The current rule for generating signing_key.priv and signing_key.x509 is a classic example of a bad rule which has a tendency to break parallel make. When invoked to create *either* target, it generates the other target as a side-effect that make didn't predict. So let's switch to using a single file signing_key.pem which contains both key and certificate. That matches what we do in the case of an external key specified by CONFIG_MODULE_SIG_KEY anyway, so it's also slightly cleaner. Signed-off-by: David Woodhouse Signed-off-by: David Howells --- .gitignore | 1 + Documentation/module-signing.txt | 9 ++++----- Makefile | 4 ++-- init/Kconfig | 4 ++-- kernel/Makefile | 15 +++++++-------- 5 files changed, 16 insertions(+), 17 deletions(-) (limited to 'kernel/Makefile') diff --git a/.gitignore b/.gitignore index 4ad4a98b884b..17fa24dd7e46 100644 --- a/.gitignore +++ b/.gitignore @@ -97,6 +97,7 @@ GTAGS # Leavings from module signing # extra_certificates +signing_key.pem signing_key.priv signing_key.x509 x509.genkey diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt index 693001920890..5d5e4e32dc26 100644 --- a/Documentation/module-signing.txt +++ b/Documentation/module-signing.txt @@ -91,7 +91,7 @@ This has a number of options available: (4) "File name or PKCS#11 URI of module signing key" (CONFIG_MODULE_SIG_KEY) Setting this option to something other than its default of - "signing_key.priv" will disable the autogeneration of signing keys and + "signing_key.pem" will disable the autogeneration of signing keys and allow the kernel modules to be signed with a key of your choosing. The string provided should identify a file containing both a private key and its corresponding X.509 certificate in PEM form, or — on @@ -116,11 +116,10 @@ kernel so that it can be used to check the signatures as the modules are loaded. Under normal conditions, when CONFIG_MODULE_SIG_KEY is unchanged from its -default of "signing_key.priv", the kernel build will automatically generate -a new keypair using openssl if one does not exist in the files: +default, the kernel build will automatically generate a new keypair using +openssl if one does not exist in the file: - signing_key.priv - signing_key.x509 + signing_key.pem during the building of vmlinux (the public part of the key needs to be built into vmlinux) using parameters in the: diff --git a/Makefile b/Makefile index 531dd16c9751..6ab99d8cc23c 100644 --- a/Makefile +++ b/Makefile @@ -1173,8 +1173,8 @@ MRPROPER_DIRS += include/config usr/include include/generated \ arch/*/include/generated .tmp_objdiff MRPROPER_FILES += .config .config.old .version .old_version \ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \ - signing_key.priv signing_key.x509 x509.genkey \ - extra_certificates signing_key.x509.keyid \ + signing_key.pem signing_key.priv signing_key.x509 \ + x509.genkey extra_certificates signing_key.x509.keyid \ signing_key.x509.signer vmlinux-gdb.py # clean - Delete most, but leave enough to build external modules diff --git a/init/Kconfig b/init/Kconfig index e2e0a1d27886..2b119850784b 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1950,7 +1950,7 @@ config MODULE_SIG_HASH config MODULE_SIG_KEY string "File name or PKCS#11 URI of module signing key" - default "signing_key.priv" + default "signing_key.pem" depends on MODULE_SIG help Provide the file name of a private key/certificate in PEM format, @@ -1958,7 +1958,7 @@ config MODULE_SIG_KEY the URI should identify, both the certificate and its corresponding private key. - If this option is unchanged from its default "signing_key.priv", + If this option is unchanged from its default "signing_key.pem", then the kernel will automatically generate the private key and certificate as described in Documentation/module-signing.txt diff --git a/kernel/Makefile b/kernel/Makefile index fa2f8b84b18a..7453283981ca 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -173,8 +173,8 @@ endif # We do it this way rather than having a boolean option for enabling an # external private key, because 'make randconfig' might enable such a # boolean option and we unfortunately can't make it depend on !RANDCONFIG. -ifeq ($(CONFIG_MODULE_SIG_KEY),"signing_key.priv") -signing_key.priv signing_key.x509: x509.genkey +ifeq ($(CONFIG_MODULE_SIG_KEY),"signing_key.pem") +signing_key.pem: x509.genkey @echo "###" @echo "### Now generating an X.509 key pair to be used for signing modules." @echo "###" @@ -185,8 +185,8 @@ signing_key.priv signing_key.x509: x509.genkey @echo "###" openssl req -new -nodes -utf8 -$(CONFIG_MODULE_SIG_HASH) -days 36500 \ -batch -x509 -config x509.genkey \ - -outform DER -out signing_key.x509 \ - -keyout signing_key.priv 2>&1 + -outform PEM -out signing_key.pem \ + -keyout signing_key.pem 2>&1 @echo "###" @echo "### Key pair generated." @echo "###" @@ -210,9 +210,9 @@ x509.genkey: @echo >>x509.genkey "keyUsage=digitalSignature" @echo >>x509.genkey "subjectKeyIdentifier=hash" @echo >>x509.genkey "authorityKeyIdentifier=keyid" -else -# For external (PKCS#11 or PEM) key, we need to obtain the certificate from -# CONFIG_MODULE_SIG_KEY automatically. +endif + +# We need to obtain the certificate from CONFIG_MODULE_SIG_KEY. quiet_cmd_extract_der = CERT_DER $(2) cmd_extract_der = scripts/extract-cert "$(2)" signing_key.x509 @@ -249,4 +249,3 @@ endif signing_key.x509: scripts/extract-cert include/config/module/sig/key.h $(X509_DEP) $(call cmd,extract_der,$(X509_SOURCE)) endif -endif -- cgit v1.2.3 From 99d27b1b52bd5cdf9bd9f7661ca8641e9a1b55e6 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 20 Jul 2015 21:16:31 +0100 Subject: modsign: Add explicit CONFIG_SYSTEM_TRUSTED_KEYS option Let the user explicitly provide a file containing trusted keys, instead of just automatically finding files matching *.x509 in the build tree and trusting whatever we find. This really ought to be an *explicit* configuration, and the build rules for dealing with the files were fairly painful too. Fix applied from James Morris that removes an '=' from a macro definition in kernel/Makefile as this is a feature that only exists from GNU make 3.82 onwards. Signed-off-by: David Woodhouse Signed-off-by: David Howells --- Documentation/module-signing.txt | 15 +++-- init/Kconfig | 13 ++++ kernel/Makefile | 125 ++++++++++++++++++++------------------- 3 files changed, 89 insertions(+), 64 deletions(-) (limited to 'kernel/Makefile') diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt index 5d5e4e32dc26..4e62bc29666e 100644 --- a/Documentation/module-signing.txt +++ b/Documentation/module-signing.txt @@ -88,6 +88,7 @@ This has a number of options available: than being a module) so that modules signed with that algorithm can have their signatures checked without causing a dependency loop. + (4) "File name or PKCS#11 URI of module signing key" (CONFIG_MODULE_SIG_KEY) Setting this option to something other than its default of @@ -104,6 +105,13 @@ This has a number of options available: means of the KBUILD_SIGN_PIN variable. + (5) "Additional X.509 keys for default system keyring" (CONFIG_SYSTEM_TRUSTED_KEYS) + + This option can be set to the filename of a PEM-encoded file containing + additional certificates which will be included in the system keyring by + default. + + ======================= GENERATING SIGNING KEYS ======================= @@ -171,10 +179,9 @@ in a keyring called ".system_keyring" that can be seen by: 302d2d52 I------ 1 perm 1f010000 0 0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 [] ... -Beyond the public key generated specifically for module signing, any file -placed in the kernel source root directory or the kernel build root directory -whose name is suffixed with ".x509" will be assumed to be an X.509 public key -and will be added to the keyring. +Beyond the public key generated specifically for module signing, additional +trusted certificates can be provided in a PEM-encoded file referenced by the +CONFIG_SYSTEM_TRUSTED_KEYS configuration option. Further, the architecture code may take public keys from a hardware store and add those in also (e.g. from the UEFI key database). diff --git a/init/Kconfig b/init/Kconfig index 2b119850784b..62b725653c36 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1752,6 +1752,19 @@ config SYSTEM_TRUSTED_KEYRING Keys in this keyring are used by module signature checking. +config SYSTEM_TRUSTED_KEYS + string "Additional X.509 keys for default system keyring" + depends on SYSTEM_TRUSTED_KEYRING + help + If set, this option should be the filename of a PEM-formatted file + containing trusted X.509 certificates to be included in the default + system keyring. Any certificate used for module signing is implicitly + also trusted. + + NOTE: If you previously provided keys for the system keyring in the + form of DER-encoded *.x509 files in the top-level build directory, + those are no longer used. You will need to set this option instead. + config SYSTEM_DATA_VERIFICATION def_bool n select SYSTEM_TRUSTED_KEYRING diff --git a/kernel/Makefile b/kernel/Makefile index 7453283981ca..575329777d9e 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -114,46 +114,75 @@ $(obj)/config_data.h: $(obj)/config_data.gz FORCE ############################################################################### # -# Roll all the X.509 certificates that we can find together and pull them into -# the kernel so that they get loaded into the system trusted keyring during -# boot. +# When a Kconfig string contains a filename, it is suitable for +# passing to shell commands. It is surrounded by double-quotes, and +# any double-quotes or backslashes within it are escaped by +# backslashes. # -# We look in the source root and the build root for all files whose name ends -# in ".x509". Unfortunately, this will generate duplicate filenames, so we -# have make canonicalise the pathnames and then sort them to discard the -# duplicates. +# This is no use for dependencies or $(wildcard). We need to strip the +# surrounding quotes and the escaping from quotes and backslashes, and +# we *do* need to escape any spaces in the string. So, for example: +# +# Usage: $(eval $(call config_filename,FOO)) +# +# Defines FOO_FILENAME based on the contents of the CONFIG_FOO option, +# transformed as described above to be suitable for use within the +# makefile. +# +# Also, if the filename is a relative filename and exists in the source +# tree but not the build tree, define FOO_SRCPREFIX as $(srctree)/ to +# be prefixed to *both* command invocation and dependencies. +# +# Note: We also print the filenames in the quiet_cmd_foo text, and +# perhaps ought to have a version specially escaped for that purpose. +# But it's only cosmetic, and $(patsubst "%",%,$(CONFIG_FOO)) is good +# enough. It'll strip the quotes in the common case where there's no +# space and it's a simple filename, and it'll retain the quotes when +# there's a space. There are some esoteric cases in which it'll print +# the wrong thing, but we don't really care. The actual dependencies +# and commands *do* get it right, with various combinations of single +# and double quotes, backslashes and spaces in the filenames. # ############################################################################### -ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) -X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509) -X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509 -X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \ - $(or $(realpath $(CERT)),$(CERT)))) -X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw)) - -ifeq ($(X509_CERTIFICATES),) -$(warning *** No X.509 certificates found ***) +# +quote := $(firstword " ") +space := +space += +space_escape := %%%SPACE%%% +# +define config_filename +ifneq ($$(CONFIG_$(1)),"") +$(1)_FILENAME := $$(subst \\,\,$$(subst \$$(quote),$$(quote),$$(subst $$(space_escape),\$$(space),$$(patsubst "%",%,$$(subst $$(space),$$(space_escape),$$(CONFIG_$(1))))))) +ifneq ($$(patsubst /%,%,$$(firstword $$($(1)_FILENAME))),$$(firstword $$($(1)_FILENAME))) +else +ifeq ($$(wildcard $$($(1)_FILENAME)),) +ifneq ($$(wildcard $$(srctree)/$$($(1)_FILENAME)),) +$(1)_SRCPREFIX := $(srctree)/ +endif endif - -ifneq ($(wildcard $(obj)/.x509.list),) -ifneq ($(shell cat $(obj)/.x509.list),$(X509_CERTIFICATES)) -$(warning X.509 certificate list changed to "$(X509_CERTIFICATES)" from "$(shell cat $(obj)/.x509.list)") -$(shell rm $(obj)/.x509.list) endif endif +endef +# +############################################################################### + + +ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) + +$(eval $(call config_filename,SYSTEM_TRUSTED_KEYS)) + +SIGNING_X509-$(CONFIG_MODULE_SIG) += signing_key.x509 kernel/system_certificates.o: $(obj)/x509_certificate_list -quiet_cmd_x509certs = CERTS $@ - cmd_x509certs = cat $(X509_CERTIFICATES) /dev/null >$@ $(foreach X509,$(X509_CERTIFICATES),; $(kecho) " - Including cert $(X509)") +quiet_cmd_x509certs = CERTS $(SIGNING_X509-y) $(patsubst "%",%,$(2)) + cmd_x509certs = ( cat $(SIGNING_X509-y) /dev/null; \ + awk '/-----BEGIN CERTIFICATE-----/{flag=1;next}/-----END CERTIFICATE-----/{flag=0}flag' $(2) /dev/null | base64 -d ) > $@ || ( rm $@; exit 1) targets += $(obj)/x509_certificate_list -$(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list - $(call if_changed,x509certs) +$(obj)/x509_certificate_list: $(SIGNING_X509-y) include/config/system/trusted/keys.h $(wildcard include/config/module/sig.h) $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) + $(call if_changed,x509certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS)) -targets += $(obj)/.x509.list -$(obj)/.x509.list: - @echo $(X509_CERTIFICATES) >$@ endif clean-files := x509_certificate_list .x509.list @@ -212,40 +241,16 @@ x509.genkey: @echo >>x509.genkey "authorityKeyIdentifier=keyid" endif -# We need to obtain the certificate from CONFIG_MODULE_SIG_KEY. -quiet_cmd_extract_der = CERT_DER $(2) - cmd_extract_der = scripts/extract-cert "$(2)" signing_key.x509 +$(eval $(call config_filename,MODULE_SIG_KEY)) -# CONFIG_MODULE_SIG_KEY is either a PKCS#11 URI or a filename. It is -# surrounded by quotes, and may contain spaces. To strip the quotes -# with $(patsubst) we need to turn the spaces into something else. -# And if it's a filename, those spaces need to be escaped as '\ ' in -# order to use it in dependencies or $(wildcard). -space := -space += -space_escape := %%%SPACE%%% -X509_SOURCE_temp := $(subst $(space),$(space_escape),$(CONFIG_MODULE_SIG_KEY)) -# We need this to check for absolute paths or PKCS#11 URIs. -X509_SOURCE_ONEWORD := $(patsubst "%",%,$(X509_SOURCE_temp)) -# This is the actual source filename/URI without the quotes -X509_SOURCE := $(subst $(space_escape),$(space),$(X509_SOURCE_ONEWORD)) -# This\ version\ with\ spaces\ escaped\ for\ $(wildcard)\ and\ dependencies -X509_SOURCE_ESCAPED := $(subst $(space_escape),\$(space),$(X509_SOURCE_ONEWORD)) - -ifeq ($(patsubst pkcs11:%,%,$(X509_SOURCE_ONEWORD)),$(X509_SOURCE_ONEWORD)) -# If it's a filename, depend on it. -X509_DEP := $(X509_SOURCE_ESCAPED) -ifeq ($(patsubst /%,%,$(X509_SOURCE_ONEWORD)),$(X509_SOURCE_ONEWORD)) -ifeq ($(wildcard $(X509_SOURCE_ESCAPED)),) -ifneq ($(wildcard $(srctree)/$(X509_SOURCE_ESCAPED)),) -# Non-absolute filename, found in source tree and not build tree -X509_SOURCE := $(srctree)/$(X509_SOURCE) -X509_DEP := $(srctree)/$(X509_SOURCE_ESCAPED) -endif -endif -endif +# If CONFIG_MODULE_SIG_KEY isn't a PKCS#11 URI, depend on it +ifeq ($(patsubst pkcs11:%,%,$(firstword $(MODULE_SIG_KEY_FILENAME))),$(firstword $(MODULE_SIG_KEY_FILENAME))) +X509_DEP := $(MODULE_SIG_KEY_SRCPREFIX)$(MODULE_SIG_KEY_FILENAME) endif +quiet_cmd_extract_der = SIGNING_CERT $(patsubst "%",%,$(2)) + cmd_extract_der = scripts/extract-cert $(2) signing_key.x509 + signing_key.x509: scripts/extract-cert include/config/module/sig/key.h $(X509_DEP) - $(call cmd,extract_der,$(X509_SOURCE)) + $(call cmd,extract_der,$(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY)) endif -- cgit v1.2.3 From 770f2b98760ef0500183d7206724aac762433e2d Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 20 Jul 2015 21:16:34 +0100 Subject: modsign: Use extract-cert to process CONFIG_SYSTEM_TRUSTED_KEYS Fix up the dependencies somewhat too, while we're at it. Signed-off-by: David Woodhouse Signed-off-by: David Howells --- kernel/Makefile | 25 ++++++++++++------------- kernel/system_certificates.S | 3 +++ scripts/Makefile | 3 ++- 3 files changed, 17 insertions(+), 14 deletions(-) (limited to 'kernel/Makefile') diff --git a/kernel/Makefile b/kernel/Makefile index 575329777d9e..65ef3846fbe8 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -166,23 +166,22 @@ endef # ############################################################################### - ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) $(eval $(call config_filename,SYSTEM_TRUSTED_KEYS)) -SIGNING_X509-$(CONFIG_MODULE_SIG) += signing_key.x509 - -kernel/system_certificates.o: $(obj)/x509_certificate_list +# GCC doesn't include .incbin files in -MD generated dependencies (PR#66871) +$(obj)/system_certificates.o: $(obj)/x509_certificate_list -quiet_cmd_x509certs = CERTS $(SIGNING_X509-y) $(patsubst "%",%,$(2)) - cmd_x509certs = ( cat $(SIGNING_X509-y) /dev/null; \ - awk '/-----BEGIN CERTIFICATE-----/{flag=1;next}/-----END CERTIFICATE-----/{flag=0}flag' $(2) /dev/null | base64 -d ) > $@ || ( rm $@; exit 1) +# Cope with signing_key.x509 existing in $(srctree) not $(objtree) +AFLAGS_system_certificates.o := -I$(srctree) -targets += $(obj)/x509_certificate_list -$(obj)/x509_certificate_list: $(SIGNING_X509-y) include/config/system/trusted/keys.h $(wildcard include/config/module/sig.h) $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) - $(call if_changed,x509certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS)) +quiet_cmd_extract_certs = EXTRACT_CERTS $(patsubst "%",%,$(2)) + cmd_extract_certs = scripts/extract-cert $(2) $@ || ( rm $@; exit 1) +targets += x509_certificate_list +$(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) FORCE + $(call if_changed,extract_certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS)) endif clean-files := x509_certificate_list .x509.list @@ -248,9 +247,9 @@ ifeq ($(patsubst pkcs11:%,%,$(firstword $(MODULE_SIG_KEY_FILENAME))),$(firstword X509_DEP := $(MODULE_SIG_KEY_SRCPREFIX)$(MODULE_SIG_KEY_FILENAME) endif -quiet_cmd_extract_der = SIGNING_CERT $(patsubst "%",%,$(2)) - cmd_extract_der = scripts/extract-cert $(2) signing_key.x509 +# GCC PR#66871 again. +$(obj)/system_certificates.o: signing_key.x509 signing_key.x509: scripts/extract-cert include/config/module/sig/key.h $(X509_DEP) - $(call cmd,extract_der,$(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY)) + $(call cmd,extract_certs,$(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY)) endif diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S index 3e9868d47535..6ba2f75e7ba5 100644 --- a/kernel/system_certificates.S +++ b/kernel/system_certificates.S @@ -7,6 +7,9 @@ .globl VMLINUX_SYMBOL(system_certificate_list) VMLINUX_SYMBOL(system_certificate_list): __cert_list_start: +#ifdef CONFIG_MODULE_SIG + .incbin "signing_key.x509" +#endif .incbin "kernel/x509_certificate_list" __cert_list_end: diff --git a/scripts/Makefile b/scripts/Makefile index 236f683510bd..1b2661712d44 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -16,7 +16,8 @@ hostprogs-$(CONFIG_VT) += conmakehash hostprogs-$(BUILD_C_RECORDMCOUNT) += recordmcount hostprogs-$(CONFIG_BUILDTIME_EXTABLE_SORT) += sortextable hostprogs-$(CONFIG_ASN1) += asn1_compiler -hostprogs-$(CONFIG_MODULE_SIG) += sign-file extract-cert +hostprogs-$(CONFIG_MODULE_SIG) += sign-file +hostprogs-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += extract-cert HOSTCFLAGS_sortextable.o = -I$(srctree)/tools/include HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include -- cgit v1.2.3 From cfc411e7fff3e15cd6354ff69773907e2c9d1c0c Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 14 Aug 2015 15:20:41 +0100 Subject: Move certificate handling to its own directory Move certificate handling out of the kernel/ directory and into a certs/ directory to get all the weird stuff in one place and move the generated signing keys into this directory. Signed-off-by: David Howells Reviewed-by: David Woodhouse --- Documentation/module-signing.txt | 18 ++--- MAINTAINERS | 9 +++ Makefile | 4 +- certs/Kconfig | 42 +++++++++++ certs/Makefile | 147 ++++++++++++++++++++++++++++++++++++ certs/system_certificates.S | 23 ++++++ certs/system_keyring.c | 157 +++++++++++++++++++++++++++++++++++++++ crypto/Kconfig | 1 + init/Kconfig | 39 ---------- kernel/Makefile | 143 ----------------------------------- kernel/system_certificates.S | 23 ------ kernel/system_keyring.c | 157 --------------------------------------- 12 files changed, 390 insertions(+), 373 deletions(-) create mode 100644 certs/Kconfig create mode 100644 certs/Makefile create mode 100644 certs/system_certificates.S create mode 100644 certs/system_keyring.c delete mode 100644 kernel/system_certificates.S delete mode 100644 kernel/system_keyring.c (limited to 'kernel/Makefile') diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt index 02a9baf1c72f..a78bf1ffa68c 100644 --- a/Documentation/module-signing.txt +++ b/Documentation/module-signing.txt @@ -92,13 +92,13 @@ This has a number of options available: (4) "File name or PKCS#11 URI of module signing key" (CONFIG_MODULE_SIG_KEY) Setting this option to something other than its default of - "signing_key.pem" will disable the autogeneration of signing keys and - allow the kernel modules to be signed with a key of your choosing. - The string provided should identify a file containing both a private - key and its corresponding X.509 certificate in PEM form, or — on - systems where the OpenSSL ENGINE_pkcs11 is functional — a PKCS#11 URI - as defined by RFC7512. In the latter case, the PKCS#11 URI should - reference both a certificate and a private key. + "certs/signing_key.pem" will disable the autogeneration of signing keys + and allow the kernel modules to be signed with a key of your choosing. + The string provided should identify a file containing both a private key + and its corresponding X.509 certificate in PEM form, or — on systems where + the OpenSSL ENGINE_pkcs11 is functional — a PKCS#11 URI as defined by + RFC7512. In the latter case, the PKCS#11 URI should reference both a + certificate and a private key. If the PEM file containing the private key is encrypted, or if the PKCS#11 token requries a PIN, this can be provided at build time by @@ -130,12 +130,12 @@ Under normal conditions, when CONFIG_MODULE_SIG_KEY is unchanged from its default, the kernel build will automatically generate a new keypair using openssl if one does not exist in the file: - signing_key.pem + certs/signing_key.pem during the building of vmlinux (the public part of the key needs to be built into vmlinux) using parameters in the: - x509.genkey + certs/x509.genkey file (which is also generated if it does not already exist). diff --git a/MAINTAINERS b/MAINTAINERS index bde2e3f5a10b..294dc59ed5e1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2589,6 +2589,15 @@ S: Supported F: Documentation/filesystems/ceph.txt F: fs/ceph/ +CERTIFICATE HANDLING: +M: David Howells +M: David Woodhouse +L: keyrings@linux-nfs.org +S: Maintained +F: Documentation/module-signing.txt +F: certs/ +F: scripts/extract-cert.c + CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM: L: linux-usb@vger.kernel.org S: Orphan diff --git a/Makefile b/Makefile index 6ab99d8cc23c..2341942feb85 100644 --- a/Makefile +++ b/Makefile @@ -871,7 +871,7 @@ INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4 ifdef CONFIG_MODULE_SIG_ALL MODSECKEY = $(CONFIG_MODULE_SIG_KEY) -MODPUBKEY = ./signing_key.x509 +MODPUBKEY = certs/signing_key.x509 export MODPUBKEY mod_sign_cmd = scripts/sign-file $(CONFIG_MODULE_SIG_HASH) $(MODSECKEY) $(MODPUBKEY) else @@ -881,7 +881,7 @@ export mod_sign_cmd ifeq ($(KBUILD_EXTMOD),) -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ +core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ diff --git a/certs/Kconfig b/certs/Kconfig new file mode 100644 index 000000000000..b030b9c7ed34 --- /dev/null +++ b/certs/Kconfig @@ -0,0 +1,42 @@ +menu "Certificates for signature checking" + +config MODULE_SIG_KEY + string "File name or PKCS#11 URI of module signing key" + default "certs/signing_key.pem" + depends on MODULE_SIG + help + Provide the file name of a private key/certificate in PEM format, + or a PKCS#11 URI according to RFC7512. The file should contain, or + the URI should identify, both the certificate and its corresponding + private key. + + If this option is unchanged from its default "certs/signing_key.pem", + then the kernel will automatically generate the private key and + certificate as described in Documentation/module-signing.txt + +config SYSTEM_TRUSTED_KEYRING + bool "Provide system-wide ring of trusted keys" + depends on KEYS + help + Provide a system keyring to which trusted keys can be added. Keys in + the keyring are considered to be trusted. Keys may be added at will + by the kernel from compiled-in data and from hardware key stores, but + userspace may only add extra keys if those keys can be verified by + keys already in the keyring. + + Keys in this keyring are used by module signature checking. + +config SYSTEM_TRUSTED_KEYS + string "Additional X.509 keys for default system keyring" + depends on SYSTEM_TRUSTED_KEYRING + help + If set, this option should be the filename of a PEM-formatted file + containing trusted X.509 certificates to be included in the default + system keyring. Any certificate used for module signing is implicitly + also trusted. + + NOTE: If you previously provided keys for the system keyring in the + form of DER-encoded *.x509 files in the top-level build directory, + those are no longer used. You will need to set this option instead. + +endmenu diff --git a/certs/Makefile b/certs/Makefile new file mode 100644 index 000000000000..5d33486d3b20 --- /dev/null +++ b/certs/Makefile @@ -0,0 +1,147 @@ +# +# Makefile for the linux kernel signature checking certificates. +# + +obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o + +############################################################################### +# +# When a Kconfig string contains a filename, it is suitable for +# passing to shell commands. It is surrounded by double-quotes, and +# any double-quotes or backslashes within it are escaped by +# backslashes. +# +# This is no use for dependencies or $(wildcard). We need to strip the +# surrounding quotes and the escaping from quotes and backslashes, and +# we *do* need to escape any spaces in the string. So, for example: +# +# Usage: $(eval $(call config_filename,FOO)) +# +# Defines FOO_FILENAME based on the contents of the CONFIG_FOO option, +# transformed as described above to be suitable for use within the +# makefile. +# +# Also, if the filename is a relative filename and exists in the source +# tree but not the build tree, define FOO_SRCPREFIX as $(srctree)/ to +# be prefixed to *both* command invocation and dependencies. +# +# Note: We also print the filenames in the quiet_cmd_foo text, and +# perhaps ought to have a version specially escaped for that purpose. +# But it's only cosmetic, and $(patsubst "%",%,$(CONFIG_FOO)) is good +# enough. It'll strip the quotes in the common case where there's no +# space and it's a simple filename, and it'll retain the quotes when +# there's a space. There are some esoteric cases in which it'll print +# the wrong thing, but we don't really care. The actual dependencies +# and commands *do* get it right, with various combinations of single +# and double quotes, backslashes and spaces in the filenames. +# +############################################################################### +# +quote := $(firstword " ") +space := +space += +space_escape := %%%SPACE%%% +# +define config_filename +ifneq ($$(CONFIG_$(1)),"") +$(1)_FILENAME := $$(subst \\,\,$$(subst \$$(quote),$$(quote),$$(subst $$(space_escape),\$$(space),$$(patsubst "%",%,$$(subst $$(space),$$(space_escape),$$(CONFIG_$(1))))))) +ifneq ($$(patsubst /%,%,$$(firstword $$($(1)_FILENAME))),$$(firstword $$($(1)_FILENAME))) +else +ifeq ($$(wildcard $$($(1)_FILENAME)),) +ifneq ($$(wildcard $$(srctree)/$$($(1)_FILENAME)),) +$(1)_SRCPREFIX := $(srctree)/ +endif +endif +endif +endif +endef +# +############################################################################### + +ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) + +$(eval $(call config_filename,SYSTEM_TRUSTED_KEYS)) + +# GCC doesn't include .incbin files in -MD generated dependencies (PR#66871) +$(obj)/system_certificates.o: $(obj)/x509_certificate_list + +# Cope with signing_key.x509 existing in $(srctree) not $(objtree) +AFLAGS_system_certificates.o := -I$(srctree) + +quiet_cmd_extract_certs = EXTRACT_CERTS $(patsubst "%",%,$(2)) + cmd_extract_certs = scripts/extract-cert $(2) $@ || ( rm $@; exit 1) + +targets += x509_certificate_list +$(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) FORCE + $(call if_changed,extract_certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS)) +endif + +clean-files := x509_certificate_list .x509.list + +ifeq ($(CONFIG_MODULE_SIG),y) +############################################################################### +# +# If module signing is requested, say by allyesconfig, but a key has not been +# supplied, then one will need to be generated to make sure the build does not +# fail and that the kernel may be used afterwards. +# +############################################################################### +ifndef CONFIG_MODULE_SIG_HASH +$(error Could not determine digest type to use from kernel config) +endif + +# We do it this way rather than having a boolean option for enabling an +# external private key, because 'make randconfig' might enable such a +# boolean option and we unfortunately can't make it depend on !RANDCONFIG. +ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem") +$(obj)/signing_key.pem: $(obj)/x509.genkey + @echo "###" + @echo "### Now generating an X.509 key pair to be used for signing modules." + @echo "###" + @echo "### If this takes a long time, you might wish to run rngd in the" + @echo "### background to keep the supply of entropy topped up. It" + @echo "### needs to be run as root, and uses a hardware random" + @echo "### number generator if one is available." + @echo "###" + openssl req -new -nodes -utf8 -$(CONFIG_MODULE_SIG_HASH) -days 36500 \ + -batch -x509 -config $(obj)/x509.genkey \ + -outform PEM -out $(obj)/signing_key.pem \ + -keyout $(obj)/signing_key.pem 2>&1 + @echo "###" + @echo "### Key pair generated." + @echo "###" + +$(obj)/x509.genkey: + @echo Generating X.509 key generation config + @echo >$@ "[ req ]" + @echo >>$@ "default_bits = 4096" + @echo >>$@ "distinguished_name = req_distinguished_name" + @echo >>$@ "prompt = no" + @echo >>$@ "string_mask = utf8only" + @echo >>$@ "x509_extensions = myexts" + @echo >>$@ + @echo >>$@ "[ req_distinguished_name ]" + @echo >>$@ "#O = Unspecified company" + @echo >>$@ "CN = Build time autogenerated kernel key" + @echo >>$@ "#emailAddress = unspecified.user@unspecified.company" + @echo >>$@ + @echo >>$@ "[ myexts ]" + @echo >>$@ "basicConstraints=critical,CA:FALSE" + @echo >>$@ "keyUsage=digitalSignature" + @echo >>$@ "subjectKeyIdentifier=hash" + @echo >>$@ "authorityKeyIdentifier=keyid" +endif + +$(eval $(call config_filename,MODULE_SIG_KEY)) + +# If CONFIG_MODULE_SIG_KEY isn't a PKCS#11 URI, depend on it +ifeq ($(patsubst pkcs11:%,%,$(firstword $(MODULE_SIG_KEY_FILENAME))),$(firstword $(MODULE_SIG_KEY_FILENAME))) +X509_DEP := $(MODULE_SIG_KEY_SRCPREFIX)$(MODULE_SIG_KEY_FILENAME) +endif + +# GCC PR#66871 again. +$(obj)/system_certificates.o: $(obj)/signing_key.x509 + +$(obj)/signing_key.x509: scripts/extract-cert include/config/module/sig/key.h $(X509_DEP) + $(call cmd,extract_certs,$(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY)) +endif diff --git a/certs/system_certificates.S b/certs/system_certificates.S new file mode 100644 index 000000000000..9216e8c81764 --- /dev/null +++ b/certs/system_certificates.S @@ -0,0 +1,23 @@ +#include +#include + + __INITRODATA + + .align 8 + .globl VMLINUX_SYMBOL(system_certificate_list) +VMLINUX_SYMBOL(system_certificate_list): +__cert_list_start: +#ifdef CONFIG_MODULE_SIG + .incbin "certs/signing_key.x509" +#endif + .incbin "certs/x509_certificate_list" +__cert_list_end: + + .align 8 + .globl VMLINUX_SYMBOL(system_certificate_list_size) +VMLINUX_SYMBOL(system_certificate_list_size): +#ifdef CONFIG_64BIT + .quad __cert_list_end - __cert_list_start +#else + .long __cert_list_end - __cert_list_start +#endif diff --git a/certs/system_keyring.c b/certs/system_keyring.c new file mode 100644 index 000000000000..2570598b784d --- /dev/null +++ b/certs/system_keyring.c @@ -0,0 +1,157 @@ +/* System trusted keyring for trusted public keys + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct key *system_trusted_keyring; +EXPORT_SYMBOL_GPL(system_trusted_keyring); + +extern __initconst const u8 system_certificate_list[]; +extern __initconst const unsigned long system_certificate_list_size; + +/* + * Load the compiled-in keys + */ +static __init int system_trusted_keyring_init(void) +{ + pr_notice("Initialise system trusted keyring\n"); + + system_trusted_keyring = + keyring_alloc(".system_keyring", + KUIDT_INIT(0), KGIDT_INIT(0), current_cred(), + ((KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH), + KEY_ALLOC_NOT_IN_QUOTA, NULL); + if (IS_ERR(system_trusted_keyring)) + panic("Can't allocate system trusted keyring\n"); + + set_bit(KEY_FLAG_TRUSTED_ONLY, &system_trusted_keyring->flags); + return 0; +} + +/* + * Must be initialised before we try and load the keys into the keyring. + */ +device_initcall(system_trusted_keyring_init); + +/* + * Load the compiled-in list of X.509 certificates. + */ +static __init int load_system_certificate_list(void) +{ + key_ref_t key; + const u8 *p, *end; + size_t plen; + + pr_notice("Loading compiled-in X.509 certificates\n"); + + p = system_certificate_list; + end = p + system_certificate_list_size; + while (p < end) { + /* Each cert begins with an ASN.1 SEQUENCE tag and must be more + * than 256 bytes in size. + */ + if (end - p < 4) + goto dodgy_cert; + if (p[0] != 0x30 && + p[1] != 0x82) + goto dodgy_cert; + plen = (p[2] << 8) | p[3]; + plen += 4; + if (plen > end - p) + goto dodgy_cert; + + key = key_create_or_update(make_key_ref(system_trusted_keyring, 1), + "asymmetric", + NULL, + p, + plen, + ((KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ), + KEY_ALLOC_NOT_IN_QUOTA | + KEY_ALLOC_TRUSTED); + if (IS_ERR(key)) { + pr_err("Problem loading in-kernel X.509 certificate (%ld)\n", + PTR_ERR(key)); + } else { + set_bit(KEY_FLAG_BUILTIN, &key_ref_to_ptr(key)->flags); + pr_notice("Loaded X.509 cert '%s'\n", + key_ref_to_ptr(key)->description); + key_ref_put(key); + } + p += plen; + } + + return 0; + +dodgy_cert: + pr_err("Problem parsing in-kernel X.509 certificate list\n"); + return 0; +} +late_initcall(load_system_certificate_list); + +#ifdef CONFIG_SYSTEM_DATA_VERIFICATION + +/** + * Verify a PKCS#7-based signature on system data. + * @data: The data to be verified. + * @len: Size of @data. + * @raw_pkcs7: The PKCS#7 message that is the signature. + * @pkcs7_len: The size of @raw_pkcs7. + * @usage: The use to which the key is being put. + */ +int system_verify_data(const void *data, unsigned long len, + const void *raw_pkcs7, size_t pkcs7_len, + enum key_being_used_for usage) +{ + struct pkcs7_message *pkcs7; + bool trusted; + int ret; + + pkcs7 = pkcs7_parse_message(raw_pkcs7, pkcs7_len); + if (IS_ERR(pkcs7)) + return PTR_ERR(pkcs7); + + /* The data should be detached - so we need to supply it. */ + if (pkcs7_supply_detached_data(pkcs7, data, len) < 0) { + pr_err("PKCS#7 signature with non-detached data\n"); + ret = -EBADMSG; + goto error; + } + + ret = pkcs7_verify(pkcs7, usage); + if (ret < 0) + goto error; + + ret = pkcs7_validate_trust(pkcs7, system_trusted_keyring, &trusted); + if (ret < 0) + goto error; + + if (!trusted) { + pr_err("PKCS#7 signature not signed with a trusted key\n"); + ret = -ENOKEY; + } + +error: + pkcs7_free_message(pkcs7); + pr_devel("<==%s() = %d\n", __func__, ret); + return ret; +} +EXPORT_SYMBOL_GPL(system_verify_data); + +#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ diff --git a/crypto/Kconfig b/crypto/Kconfig index b4cfc5754033..51b01de7c0ae 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -1601,5 +1601,6 @@ config CRYPTO_HASH_INFO source "drivers/crypto/Kconfig" source crypto/asymmetric_keys/Kconfig +source certs/Kconfig endif # if CRYPTO diff --git a/init/Kconfig b/init/Kconfig index 5d1a703663ad..5526dfaac628 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1740,31 +1740,6 @@ config MMAP_ALLOW_UNINITIALIZED See Documentation/nommu-mmap.txt for more information. -config SYSTEM_TRUSTED_KEYRING - bool "Provide system-wide ring of trusted keys" - depends on KEYS - help - Provide a system keyring to which trusted keys can be added. Keys in - the keyring are considered to be trusted. Keys may be added at will - by the kernel from compiled-in data and from hardware key stores, but - userspace may only add extra keys if those keys can be verified by - keys already in the keyring. - - Keys in this keyring are used by module signature checking. - -config SYSTEM_TRUSTED_KEYS - string "Additional X.509 keys for default system keyring" - depends on SYSTEM_TRUSTED_KEYRING - help - If set, this option should be the filename of a PEM-formatted file - containing trusted X.509 certificates to be included in the default - system keyring. Any certificate used for module signing is implicitly - also trusted. - - NOTE: If you previously provided keys for the system keyring in the - form of DER-encoded *.x509 files in the top-level build directory, - those are no longer used. You will need to set this option instead. - config SYSTEM_DATA_VERIFICATION def_bool n select SYSTEM_TRUSTED_KEYRING @@ -1965,20 +1940,6 @@ config MODULE_SIG_HASH default "sha384" if MODULE_SIG_SHA384 default "sha512" if MODULE_SIG_SHA512 -config MODULE_SIG_KEY - string "File name or PKCS#11 URI of module signing key" - default "signing_key.pem" - depends on MODULE_SIG - help - Provide the file name of a private key/certificate in PEM format, - or a PKCS#11 URI according to RFC7512. The file should contain, or - the URI should identify, both the certificate and its corresponding - private key. - - If this option is unchanged from its default "signing_key.pem", - then the kernel will automatically generate the private key and - certificate as described in Documentation/module-signing.txt - config MODULE_COMPRESS bool "Compress modules on installation" depends on MODULES diff --git a/kernel/Makefile b/kernel/Makefile index 65ef3846fbe8..1aa153a1be21 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -45,7 +45,6 @@ ifneq ($(CONFIG_SMP),y) obj-y += up.o endif obj-$(CONFIG_UID16) += uid16.o -obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULE_SIG) += module_signing.o obj-$(CONFIG_KALLSYMS) += kallsyms.o @@ -111,145 +110,3 @@ $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE targets += config_data.h $(obj)/config_data.h: $(obj)/config_data.gz FORCE $(call filechk,ikconfiggz) - -############################################################################### -# -# When a Kconfig string contains a filename, it is suitable for -# passing to shell commands. It is surrounded by double-quotes, and -# any double-quotes or backslashes within it are escaped by -# backslashes. -# -# This is no use for dependencies or $(wildcard). We need to strip the -# surrounding quotes and the escaping from quotes and backslashes, and -# we *do* need to escape any spaces in the string. So, for example: -# -# Usage: $(eval $(call config_filename,FOO)) -# -# Defines FOO_FILENAME based on the contents of the CONFIG_FOO option, -# transformed as described above to be suitable for use within the -# makefile. -# -# Also, if the filename is a relative filename and exists in the source -# tree but not the build tree, define FOO_SRCPREFIX as $(srctree)/ to -# be prefixed to *both* command invocation and dependencies. -# -# Note: We also print the filenames in the quiet_cmd_foo text, and -# perhaps ought to have a version specially escaped for that purpose. -# But it's only cosmetic, and $(patsubst "%",%,$(CONFIG_FOO)) is good -# enough. It'll strip the quotes in the common case where there's no -# space and it's a simple filename, and it'll retain the quotes when -# there's a space. There are some esoteric cases in which it'll print -# the wrong thing, but we don't really care. The actual dependencies -# and commands *do* get it right, with various combinations of single -# and double quotes, backslashes and spaces in the filenames. -# -############################################################################### -# -quote := $(firstword " ") -space := -space += -space_escape := %%%SPACE%%% -# -define config_filename -ifneq ($$(CONFIG_$(1)),"") -$(1)_FILENAME := $$(subst \\,\,$$(subst \$$(quote),$$(quote),$$(subst $$(space_escape),\$$(space),$$(patsubst "%",%,$$(subst $$(space),$$(space_escape),$$(CONFIG_$(1))))))) -ifneq ($$(patsubst /%,%,$$(firstword $$($(1)_FILENAME))),$$(firstword $$($(1)_FILENAME))) -else -ifeq ($$(wildcard $$($(1)_FILENAME)),) -ifneq ($$(wildcard $$(srctree)/$$($(1)_FILENAME)),) -$(1)_SRCPREFIX := $(srctree)/ -endif -endif -endif -endif -endef -# -############################################################################### - -ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y) - -$(eval $(call config_filename,SYSTEM_TRUSTED_KEYS)) - -# GCC doesn't include .incbin files in -MD generated dependencies (PR#66871) -$(obj)/system_certificates.o: $(obj)/x509_certificate_list - -# Cope with signing_key.x509 existing in $(srctree) not $(objtree) -AFLAGS_system_certificates.o := -I$(srctree) - -quiet_cmd_extract_certs = EXTRACT_CERTS $(patsubst "%",%,$(2)) - cmd_extract_certs = scripts/extract-cert $(2) $@ || ( rm $@; exit 1) - -targets += x509_certificate_list -$(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) FORCE - $(call if_changed,extract_certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS)) -endif - -clean-files := x509_certificate_list .x509.list - -ifeq ($(CONFIG_MODULE_SIG),y) -############################################################################### -# -# If module signing is requested, say by allyesconfig, but a key has not been -# supplied, then one will need to be generated to make sure the build does not -# fail and that the kernel may be used afterwards. -# -############################################################################### -ifndef CONFIG_MODULE_SIG_HASH -$(error Could not determine digest type to use from kernel config) -endif - -# We do it this way rather than having a boolean option for enabling an -# external private key, because 'make randconfig' might enable such a -# boolean option and we unfortunately can't make it depend on !RANDCONFIG. -ifeq ($(CONFIG_MODULE_SIG_KEY),"signing_key.pem") -signing_key.pem: x509.genkey - @echo "###" - @echo "### Now generating an X.509 key pair to be used for signing modules." - @echo "###" - @echo "### If this takes a long time, you might wish to run rngd in the" - @echo "### background to keep the supply of entropy topped up. It" - @echo "### needs to be run as root, and uses a hardware random" - @echo "### number generator if one is available." - @echo "###" - openssl req -new -nodes -utf8 -$(CONFIG_MODULE_SIG_HASH) -days 36500 \ - -batch -x509 -config x509.genkey \ - -outform PEM -out signing_key.pem \ - -keyout signing_key.pem 2>&1 - @echo "###" - @echo "### Key pair generated." - @echo "###" - -x509.genkey: - @echo Generating X.509 key generation config - @echo >x509.genkey "[ req ]" - @echo >>x509.genkey "default_bits = 4096" - @echo >>x509.genkey "distinguished_name = req_distinguished_name" - @echo >>x509.genkey "prompt = no" - @echo >>x509.genkey "string_mask = utf8only" - @echo >>x509.genkey "x509_extensions = myexts" - @echo >>x509.genkey - @echo >>x509.genkey "[ req_distinguished_name ]" - @echo >>x509.genkey "#O = Unspecified company" - @echo >>x509.genkey "CN = Build time autogenerated kernel key" - @echo >>x509.genkey "#emailAddress = unspecified.user@unspecified.company" - @echo >>x509.genkey - @echo >>x509.genkey "[ myexts ]" - @echo >>x509.genkey "basicConstraints=critical,CA:FALSE" - @echo >>x509.genkey "keyUsage=digitalSignature" - @echo >>x509.genkey "subjectKeyIdentifier=hash" - @echo >>x509.genkey "authorityKeyIdentifier=keyid" -endif - -$(eval $(call config_filename,MODULE_SIG_KEY)) - -# If CONFIG_MODULE_SIG_KEY isn't a PKCS#11 URI, depend on it -ifeq ($(patsubst pkcs11:%,%,$(firstword $(MODULE_SIG_KEY_FILENAME))),$(firstword $(MODULE_SIG_KEY_FILENAME))) -X509_DEP := $(MODULE_SIG_KEY_SRCPREFIX)$(MODULE_SIG_KEY_FILENAME) -endif - -# GCC PR#66871 again. -$(obj)/system_certificates.o: signing_key.x509 - -signing_key.x509: scripts/extract-cert include/config/module/sig/key.h $(X509_DEP) - $(call cmd,extract_certs,$(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY)) -endif diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S deleted file mode 100644 index 6ba2f75e7ba5..000000000000 --- a/kernel/system_certificates.S +++ /dev/null @@ -1,23 +0,0 @@ -#include -#include - - __INITRODATA - - .align 8 - .globl VMLINUX_SYMBOL(system_certificate_list) -VMLINUX_SYMBOL(system_certificate_list): -__cert_list_start: -#ifdef CONFIG_MODULE_SIG - .incbin "signing_key.x509" -#endif - .incbin "kernel/x509_certificate_list" -__cert_list_end: - - .align 8 - .globl VMLINUX_SYMBOL(system_certificate_list_size) -VMLINUX_SYMBOL(system_certificate_list_size): -#ifdef CONFIG_64BIT - .quad __cert_list_end - __cert_list_start -#else - .long __cert_list_end - __cert_list_start -#endif diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c deleted file mode 100644 index 2570598b784d..000000000000 --- a/kernel/system_keyring.c +++ /dev/null @@ -1,157 +0,0 @@ -/* System trusted keyring for trusted public keys - * - * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -struct key *system_trusted_keyring; -EXPORT_SYMBOL_GPL(system_trusted_keyring); - -extern __initconst const u8 system_certificate_list[]; -extern __initconst const unsigned long system_certificate_list_size; - -/* - * Load the compiled-in keys - */ -static __init int system_trusted_keyring_init(void) -{ - pr_notice("Initialise system trusted keyring\n"); - - system_trusted_keyring = - keyring_alloc(".system_keyring", - KUIDT_INIT(0), KGIDT_INIT(0), current_cred(), - ((KEY_POS_ALL & ~KEY_POS_SETATTR) | - KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH), - KEY_ALLOC_NOT_IN_QUOTA, NULL); - if (IS_ERR(system_trusted_keyring)) - panic("Can't allocate system trusted keyring\n"); - - set_bit(KEY_FLAG_TRUSTED_ONLY, &system_trusted_keyring->flags); - return 0; -} - -/* - * Must be initialised before we try and load the keys into the keyring. - */ -device_initcall(system_trusted_keyring_init); - -/* - * Load the compiled-in list of X.509 certificates. - */ -static __init int load_system_certificate_list(void) -{ - key_ref_t key; - const u8 *p, *end; - size_t plen; - - pr_notice("Loading compiled-in X.509 certificates\n"); - - p = system_certificate_list; - end = p + system_certificate_list_size; - while (p < end) { - /* Each cert begins with an ASN.1 SEQUENCE tag and must be more - * than 256 bytes in size. - */ - if (end - p < 4) - goto dodgy_cert; - if (p[0] != 0x30 && - p[1] != 0x82) - goto dodgy_cert; - plen = (p[2] << 8) | p[3]; - plen += 4; - if (plen > end - p) - goto dodgy_cert; - - key = key_create_or_update(make_key_ref(system_trusted_keyring, 1), - "asymmetric", - NULL, - p, - plen, - ((KEY_POS_ALL & ~KEY_POS_SETATTR) | - KEY_USR_VIEW | KEY_USR_READ), - KEY_ALLOC_NOT_IN_QUOTA | - KEY_ALLOC_TRUSTED); - if (IS_ERR(key)) { - pr_err("Problem loading in-kernel X.509 certificate (%ld)\n", - PTR_ERR(key)); - } else { - set_bit(KEY_FLAG_BUILTIN, &key_ref_to_ptr(key)->flags); - pr_notice("Loaded X.509 cert '%s'\n", - key_ref_to_ptr(key)->description); - key_ref_put(key); - } - p += plen; - } - - return 0; - -dodgy_cert: - pr_err("Problem parsing in-kernel X.509 certificate list\n"); - return 0; -} -late_initcall(load_system_certificate_list); - -#ifdef CONFIG_SYSTEM_DATA_VERIFICATION - -/** - * Verify a PKCS#7-based signature on system data. - * @data: The data to be verified. - * @len: Size of @data. - * @raw_pkcs7: The PKCS#7 message that is the signature. - * @pkcs7_len: The size of @raw_pkcs7. - * @usage: The use to which the key is being put. - */ -int system_verify_data(const void *data, unsigned long len, - const void *raw_pkcs7, size_t pkcs7_len, - enum key_being_used_for usage) -{ - struct pkcs7_message *pkcs7; - bool trusted; - int ret; - - pkcs7 = pkcs7_parse_message(raw_pkcs7, pkcs7_len); - if (IS_ERR(pkcs7)) - return PTR_ERR(pkcs7); - - /* The data should be detached - so we need to supply it. */ - if (pkcs7_supply_detached_data(pkcs7, data, len) < 0) { - pr_err("PKCS#7 signature with non-detached data\n"); - ret = -EBADMSG; - goto error; - } - - ret = pkcs7_verify(pkcs7, usage); - if (ret < 0) - goto error; - - ret = pkcs7_validate_trust(pkcs7, system_trusted_keyring, &trusted); - if (ret < 0) - goto error; - - if (!trusted) { - pr_err("PKCS#7 signature not signed with a trusted key\n"); - ret = -ENOKEY; - } - -error: - pkcs7_free_message(pkcs7); - pr_devel("<==%s() = %d\n", __func__, ret); - return ret; -} -EXPORT_SYMBOL_GPL(system_verify_data); - -#endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ -- cgit v1.2.3 From 92281dee825f6d2eb07c441437e4196a44b0861c Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 10 Aug 2015 23:07:06 -0400 Subject: arch: introduce memremap() Existing users of ioremap_cache() are mapping memory that is known in advance to not have i/o side effects. These users are forced to cast away the __iomem annotation, or otherwise neglect to fix the sparse errors thrown when dereferencing pointers to this memory. Provide memremap() as a non __iomem annotated ioremap_*() in the case when ioremap is otherwise a pointer to cacheable memory. Empirically, ioremap_() call sites are seeking memory-like semantics (e.g. speculative reads, and prefetching permitted). memremap() is a break from the ioremap implementation pattern of adding a new memremap_() for each mapping type and having silent compatibility fall backs. Instead, the implementation defines flags that are passed to the central memremap() and if a mapping type is not supported by an arch memremap returns NULL. We introduce a memremap prototype as a trivial wrapper of ioremap_cache() and ioremap_wt(). Later, once all ioremap_cache() and ioremap_wt() usage has been removed from drivers we teach archs to implement arch_memremap() with the ability to strictly enforce the mapping type. Cc: Arnd Bergmann Reviewed-by: Christoph Hellwig Signed-off-by: Dan Williams --- arch/ia64/include/asm/io.h | 1 + arch/sh/include/asm/io.h | 1 + arch/xtensa/include/asm/io.h | 1 + include/linux/io.h | 9 ++++ kernel/Makefile | 2 + kernel/memremap.c | 98 ++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 112 insertions(+) create mode 100644 kernel/memremap.c (limited to 'kernel/Makefile') diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 80a7e34be009..9041bbe2b7b4 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h @@ -435,6 +435,7 @@ static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned lo { return ioremap(phys_addr, size); } +#define ioremap_cache ioremap_cache /* diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index 728c4c571f40..6194e20fccca 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h @@ -342,6 +342,7 @@ ioremap_cache(phys_addr_t offset, unsigned long size) { return __ioremap_mode(offset, size, PAGE_KERNEL); } +#define ioremap_cache ioremap_cache #ifdef CONFIG_HAVE_IOREMAP_PROT static inline void __iomem * diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index c39bb6e61911..867840f5400f 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h @@ -57,6 +57,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset, else BUG(); } +#define ioremap_cache ioremap_cache #define ioremap_wc ioremap_nocache #define ioremap_wt ioremap_nocache diff --git a/include/linux/io.h b/include/linux/io.h index fb5a99800e77..3fcf6256c088 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -121,4 +121,13 @@ static inline int arch_phys_wc_index(int handle) #endif #endif +enum { + /* See memremap() kernel-doc for usage description... */ + MEMREMAP_WB = 1 << 0, + MEMREMAP_WT = 1 << 1, +}; + +void *memremap(resource_size_t offset, size_t size, unsigned long flags); +void memunmap(void *addr); + #endif /* _LINUX_IO_H */ diff --git a/kernel/Makefile b/kernel/Makefile index 43c4c920f30a..92866d36e376 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -99,6 +99,8 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o obj-$(CONFIG_TORTURE_TEST) += torture.o +obj-$(CONFIG_HAS_IOMEM) += memremap.o + $(obj)/configs.o: $(obj)/config_data.h # config_data.h contains the same information as ikconfig.h but gzipped. diff --git a/kernel/memremap.c b/kernel/memremap.c new file mode 100644 index 000000000000..a293de52e837 --- /dev/null +++ b/kernel/memremap.c @@ -0,0 +1,98 @@ +/* + * Copyright(c) 2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ +#include +#include +#include + +#ifndef ioremap_cache +/* temporary while we convert existing ioremap_cache users to memremap */ +__weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size) +{ + return ioremap(offset, size); +} +#endif + +/** + * memremap() - remap an iomem_resource as cacheable memory + * @offset: iomem resource start address + * @size: size of remap + * @flags: either MEMREMAP_WB or MEMREMAP_WT + * + * memremap() is "ioremap" for cases where it is known that the resource + * being mapped does not have i/o side effects and the __iomem + * annotation is not applicable. + * + * MEMREMAP_WB - matches the default mapping for "System RAM" on + * the architecture. This is usually a read-allocate write-back cache. + * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM + * memremap() will bypass establishing a new mapping and instead return + * a pointer into the direct map. + * + * MEMREMAP_WT - establish a mapping whereby writes either bypass the + * cache or are written through to memory and never exist in a + * cache-dirty state with respect to program visibility. Attempts to + * map "System RAM" with this mapping type will fail. + */ +void *memremap(resource_size_t offset, size_t size, unsigned long flags) +{ + int is_ram = region_intersects(offset, size, "System RAM"); + void *addr = NULL; + + if (is_ram == REGION_MIXED) { + WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n", + &offset, (unsigned long) size); + return NULL; + } + + /* Try all mapping types requested until one returns non-NULL */ + if (flags & MEMREMAP_WB) { + flags &= ~MEMREMAP_WB; + /* + * MEMREMAP_WB is special in that it can be satisifed + * from the direct map. Some archs depend on the + * capability of memremap() to autodetect cases where + * the requested range is potentially in "System RAM" + */ + if (is_ram == REGION_INTERSECTS) + addr = __va(offset); + else + addr = ioremap_cache(offset, size); + } + + /* + * If we don't have a mapping yet and more request flags are + * pending then we will be attempting to establish a new virtual + * address mapping. Enforce that this mapping is not aliasing + * "System RAM" + */ + if (!addr && is_ram == REGION_INTERSECTS && flags) { + WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n", + &offset, (unsigned long) size); + return NULL; + } + + if (!addr && (flags & MEMREMAP_WT)) { + flags &= ~MEMREMAP_WT; + addr = ioremap_wt(offset, size); + } + + return addr; +} +EXPORT_SYMBOL(memremap); + +void memunmap(void *addr) +{ + if (is_vmalloc_addr(addr)) + iounmap((void __iomem *) addr); +} +EXPORT_SYMBOL(memunmap); -- cgit v1.2.3 From a43cac0d9dc2073ff2245a171429ddbe1accece7 Mon Sep 17 00:00:00 2001 From: Dave Young Date: Wed, 9 Sep 2015 15:38:51 -0700 Subject: kexec: split kexec_file syscall code to kexec_file.c Split kexec_file syscall related code to another file kernel/kexec_file.c so that the #ifdef CONFIG_KEXEC_FILE in kexec.c can be dropped. Sharing variables and functions are moved to kernel/kexec_internal.h per suggestion from Vivek and Petr. [akpm@linux-foundation.org: fix bisectability] [akpm@linux-foundation.org: declare the various arch_kexec functions] [akpm@linux-foundation.org: fix build] Signed-off-by: Dave Young Cc: Eric W. Biederman Cc: Vivek Goyal Cc: Petr Tesarik Cc: Theodore Ts'o Cc: Josh Boyer Cc: David Howells Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kexec.h | 11 + kernel/Makefile | 1 + kernel/kexec.c | 1056 +---------------------------------------------- kernel/kexec_file.c | 1045 ++++++++++++++++++++++++++++++++++++++++++++++ kernel/kexec_internal.h | 22 + 5 files changed, 1090 insertions(+), 1045 deletions(-) create mode 100644 kernel/kexec_file.c create mode 100644 kernel/kexec_internal.h (limited to 'kernel/Makefile') diff --git a/include/linux/kexec.h b/include/linux/kexec.h index b63218f68c4b..ab150ade0d18 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -318,6 +318,17 @@ int crash_shrink_memory(unsigned long new_size); size_t crash_get_memory_size(void); void crash_free_reserved_phys_range(unsigned long begin, unsigned long end); +int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, + unsigned long buf_len); +void * __weak arch_kexec_kernel_image_load(struct kimage *image); +int __weak arch_kimage_file_post_load_cleanup(struct kimage *image); +int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, + unsigned long buf_len); +int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, + Elf_Shdr *sechdrs, unsigned int relsec); +int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, + unsigned int relsec); + #else /* !CONFIG_KEXEC */ struct pt_regs; struct task_struct; diff --git a/kernel/Makefile b/kernel/Makefile index e0d7587e7684..1b4890af5a65 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -50,6 +50,7 @@ obj-$(CONFIG_MODULE_SIG) += module_signing.o obj-$(CONFIG_KALLSYMS) += kallsyms.o obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o obj-$(CONFIG_KEXEC) += kexec.o +obj-$(CONFIG_KEXEC_FILE) += kexec_file.o obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o obj-$(CONFIG_COMPAT) += compat.o obj-$(CONFIG_CGROUPS) += cgroup.o diff --git a/kernel/kexec.c b/kernel/kexec.c index a785c1015e25..2d73ecfa5505 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -28,10 +28,10 @@ #include #include #include +#include #include #include #include -#include #include #include #include @@ -44,6 +44,9 @@ #include #include +#include "kexec_internal.h" + +DEFINE_MUTEX(kexec_mutex); /* Per cpu memory for storing cpu states in case of system crash. */ note_buf_t __percpu *crash_notes; @@ -57,16 +60,6 @@ size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); /* Flag to indicate we are going to kexec a new kernel */ bool kexec_in_progress = false; -/* - * Declare these symbols weak so that if architecture provides a purgatory, - * these will be overridden. - */ -char __weak kexec_purgatory[0]; -size_t __weak kexec_purgatory_size = 0; - -#ifdef CONFIG_KEXEC_FILE -static int kexec_calculate_store_digests(struct kimage *image); -#endif /* Location of the reserved area for the crash kernel */ struct resource crashk_res = { @@ -146,8 +139,6 @@ int kexec_should_crash(struct task_struct *p) */ #define KIMAGE_NO_DEST (-1UL) -static int kimage_is_destination_range(struct kimage *image, - unsigned long start, unsigned long end); static struct page *kimage_alloc_page(struct kimage *image, gfp_t gfp_mask, unsigned long dest); @@ -169,7 +160,7 @@ static int copy_user_segment_list(struct kimage *image, return ret; } -static int sanity_check_segment_list(struct kimage *image) +int sanity_check_segment_list(struct kimage *image) { int result, i; unsigned long nr_segments = image->nr_segments; @@ -259,7 +250,7 @@ static int sanity_check_segment_list(struct kimage *image) return 0; } -static struct kimage *do_kimage_alloc_init(void) +struct kimage *do_kimage_alloc_init(void) { struct kimage *image; @@ -286,8 +277,6 @@ static struct kimage *do_kimage_alloc_init(void) return image; } -static void kimage_free_page_list(struct list_head *list); - static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, unsigned long nr_segments, struct kexec_segment __user *segments, @@ -354,283 +343,7 @@ out_free_image: return ret; } -#ifdef CONFIG_KEXEC_FILE -static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len) -{ - struct fd f = fdget(fd); - int ret; - struct kstat stat; - loff_t pos; - ssize_t bytes = 0; - - if (!f.file) - return -EBADF; - - ret = vfs_getattr(&f.file->f_path, &stat); - if (ret) - goto out; - - if (stat.size > INT_MAX) { - ret = -EFBIG; - goto out; - } - - /* Don't hand 0 to vmalloc, it whines. */ - if (stat.size == 0) { - ret = -EINVAL; - goto out; - } - - *buf = vmalloc(stat.size); - if (!*buf) { - ret = -ENOMEM; - goto out; - } - - pos = 0; - while (pos < stat.size) { - bytes = kernel_read(f.file, pos, (char *)(*buf) + pos, - stat.size - pos); - if (bytes < 0) { - vfree(*buf); - ret = bytes; - goto out; - } - - if (bytes == 0) - break; - pos += bytes; - } - - if (pos != stat.size) { - ret = -EBADF; - vfree(*buf); - goto out; - } - - *buf_len = pos; -out: - fdput(f); - return ret; -} - -/* Architectures can provide this probe function */ -int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, - unsigned long buf_len) -{ - return -ENOEXEC; -} - -void * __weak arch_kexec_kernel_image_load(struct kimage *image) -{ - return ERR_PTR(-ENOEXEC); -} - -void __weak arch_kimage_file_post_load_cleanup(struct kimage *image) -{ -} - -int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, - unsigned long buf_len) -{ - return -EKEYREJECTED; -} - -/* Apply relocations of type RELA */ -int __weak -arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, - unsigned int relsec) -{ - pr_err("RELA relocation unsupported.\n"); - return -ENOEXEC; -} - -/* Apply relocations of type REL */ -int __weak -arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, - unsigned int relsec) -{ - pr_err("REL relocation unsupported.\n"); - return -ENOEXEC; -} - -/* - * Free up memory used by kernel, initrd, and command line. This is temporary - * memory allocation which is not needed any more after these buffers have - * been loaded into separate segments and have been copied elsewhere. - */ -static void kimage_file_post_load_cleanup(struct kimage *image) -{ - struct purgatory_info *pi = &image->purgatory_info; - - vfree(image->kernel_buf); - image->kernel_buf = NULL; - - vfree(image->initrd_buf); - image->initrd_buf = NULL; - - kfree(image->cmdline_buf); - image->cmdline_buf = NULL; - - vfree(pi->purgatory_buf); - pi->purgatory_buf = NULL; - - vfree(pi->sechdrs); - pi->sechdrs = NULL; - - /* See if architecture has anything to cleanup post load */ - arch_kimage_file_post_load_cleanup(image); - - /* - * Above call should have called into bootloader to free up - * any data stored in kimage->image_loader_data. It should - * be ok now to free it up. - */ - kfree(image->image_loader_data); - image->image_loader_data = NULL; -} - -/* - * In file mode list of segments is prepared by kernel. Copy relevant - * data from user space, do error checking, prepare segment list - */ -static int -kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd, - const char __user *cmdline_ptr, - unsigned long cmdline_len, unsigned flags) -{ - int ret = 0; - void *ldata; - - ret = copy_file_from_fd(kernel_fd, &image->kernel_buf, - &image->kernel_buf_len); - if (ret) - return ret; - - /* Call arch image probe handlers */ - ret = arch_kexec_kernel_image_probe(image, image->kernel_buf, - image->kernel_buf_len); - - if (ret) - goto out; - -#ifdef CONFIG_KEXEC_VERIFY_SIG - ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf, - image->kernel_buf_len); - if (ret) { - pr_debug("kernel signature verification failed.\n"); - goto out; - } - pr_debug("kernel signature verification successful.\n"); -#endif - /* It is possible that there no initramfs is being loaded */ - if (!(flags & KEXEC_FILE_NO_INITRAMFS)) { - ret = copy_file_from_fd(initrd_fd, &image->initrd_buf, - &image->initrd_buf_len); - if (ret) - goto out; - } - - if (cmdline_len) { - image->cmdline_buf = kzalloc(cmdline_len, GFP_KERNEL); - if (!image->cmdline_buf) { - ret = -ENOMEM; - goto out; - } - - ret = copy_from_user(image->cmdline_buf, cmdline_ptr, - cmdline_len); - if (ret) { - ret = -EFAULT; - goto out; - } - - image->cmdline_buf_len = cmdline_len; - - /* command line should be a string with last byte null */ - if (image->cmdline_buf[cmdline_len - 1] != '\0') { - ret = -EINVAL; - goto out; - } - } - - /* Call arch image load handlers */ - ldata = arch_kexec_kernel_image_load(image); - - if (IS_ERR(ldata)) { - ret = PTR_ERR(ldata); - goto out; - } - - image->image_loader_data = ldata; -out: - /* In case of error, free up all allocated memory in this function */ - if (ret) - kimage_file_post_load_cleanup(image); - return ret; -} - -static int -kimage_file_alloc_init(struct kimage **rimage, int kernel_fd, - int initrd_fd, const char __user *cmdline_ptr, - unsigned long cmdline_len, unsigned long flags) -{ - int ret; - struct kimage *image; - bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH; - - image = do_kimage_alloc_init(); - if (!image) - return -ENOMEM; - - image->file_mode = 1; - - if (kexec_on_panic) { - /* Enable special crash kernel control page alloc policy. */ - image->control_page = crashk_res.start; - image->type = KEXEC_TYPE_CRASH; - } - - ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd, - cmdline_ptr, cmdline_len, flags); - if (ret) - goto out_free_image; - - ret = sanity_check_segment_list(image); - if (ret) - goto out_free_post_load_bufs; - - ret = -ENOMEM; - image->control_code_page = kimage_alloc_control_pages(image, - get_order(KEXEC_CONTROL_PAGE_SIZE)); - if (!image->control_code_page) { - pr_err("Could not allocate control_code_buffer\n"); - goto out_free_post_load_bufs; - } - - if (!kexec_on_panic) { - image->swap_page = kimage_alloc_control_pages(image, 0); - if (!image->swap_page) { - pr_err("Could not allocate swap buffer\n"); - goto out_free_control_pages; - } - } - - *rimage = image; - return 0; -out_free_control_pages: - kimage_free_page_list(&image->control_pages); -out_free_post_load_bufs: - kimage_file_post_load_cleanup(image); -out_free_image: - kfree(image); - return ret; -} -#else /* CONFIG_KEXEC_FILE */ -static inline void kimage_file_post_load_cleanup(struct kimage *image) { } -#endif /* CONFIG_KEXEC_FILE */ - -static int kimage_is_destination_range(struct kimage *image, +int kimage_is_destination_range(struct kimage *image, unsigned long start, unsigned long end) { @@ -676,7 +389,7 @@ static void kimage_free_pages(struct page *page) __free_pages(page, order); } -static void kimage_free_page_list(struct list_head *list) +void kimage_free_page_list(struct list_head *list) { struct list_head *pos, *next; @@ -892,7 +605,7 @@ static void kimage_free_extra_pages(struct kimage *image) kimage_free_page_list(&image->unusable_pages); } -static void kimage_terminate(struct kimage *image) +void kimage_terminate(struct kimage *image) { if (*image->entry != 0) image->entry++; @@ -913,7 +626,7 @@ static void kimage_free_entry(kimage_entry_t entry) kimage_free_pages(page); } -static void kimage_free(struct kimage *image) +void kimage_free(struct kimage *image) { kimage_entry_t *ptr, entry; kimage_entry_t ind = 0; @@ -1204,7 +917,7 @@ out: return result; } -static int kimage_load_segment(struct kimage *image, +int kimage_load_segment(struct kimage *image, struct kexec_segment *segment) { int result = -ENOMEM; @@ -1245,8 +958,6 @@ struct kimage *kexec_image; struct kimage *kexec_crash_image; int kexec_load_disabled; -static DEFINE_MUTEX(kexec_mutex); - SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, struct kexec_segment __user *, segments, unsigned long, flags) { @@ -1391,85 +1102,6 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, } #endif -#ifdef CONFIG_KEXEC_FILE -SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, - unsigned long, cmdline_len, const char __user *, cmdline_ptr, - unsigned long, flags) -{ - int ret = 0, i; - struct kimage **dest_image, *image; - - /* We only trust the superuser with rebooting the system. */ - if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) - return -EPERM; - - /* Make sure we have a legal set of flags */ - if (flags != (flags & KEXEC_FILE_FLAGS)) - return -EINVAL; - - image = NULL; - - if (!mutex_trylock(&kexec_mutex)) - return -EBUSY; - - dest_image = &kexec_image; - if (flags & KEXEC_FILE_ON_CRASH) - dest_image = &kexec_crash_image; - - if (flags & KEXEC_FILE_UNLOAD) - goto exchange; - - /* - * In case of crash, new kernel gets loaded in reserved region. It is - * same memory where old crash kernel might be loaded. Free any - * current crash dump kernel before we corrupt it. - */ - if (flags & KEXEC_FILE_ON_CRASH) - kimage_free(xchg(&kexec_crash_image, NULL)); - - ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr, - cmdline_len, flags); - if (ret) - goto out; - - ret = machine_kexec_prepare(image); - if (ret) - goto out; - - ret = kexec_calculate_store_digests(image); - if (ret) - goto out; - - for (i = 0; i < image->nr_segments; i++) { - struct kexec_segment *ksegment; - - ksegment = &image->segment[i]; - pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n", - i, ksegment->buf, ksegment->bufsz, ksegment->mem, - ksegment->memsz); - - ret = kimage_load_segment(image, &image->segment[i]); - if (ret) - goto out; - } - - kimage_terminate(image); - - /* - * Free up any temporary buffers allocated which are not needed - * after image has been loaded - */ - kimage_file_post_load_cleanup(image); -exchange: - image = xchg(dest_image, image); -out: - mutex_unlock(&kexec_mutex); - kimage_free(image); - return ret; -} - -#endif /* CONFIG_KEXEC_FILE */ - void crash_kexec(struct pt_regs *regs) { /* Take the kexec_mutex here to prevent sys_kexec_load @@ -2024,672 +1656,6 @@ static int __init crash_save_vmcoreinfo_init(void) subsys_initcall(crash_save_vmcoreinfo_init); -#ifdef CONFIG_KEXEC_FILE -static int locate_mem_hole_top_down(unsigned long start, unsigned long end, - struct kexec_buf *kbuf) -{ - struct kimage *image = kbuf->image; - unsigned long temp_start, temp_end; - - temp_end = min(end, kbuf->buf_max); - temp_start = temp_end - kbuf->memsz; - - do { - /* align down start */ - temp_start = temp_start & (~(kbuf->buf_align - 1)); - - if (temp_start < start || temp_start < kbuf->buf_min) - return 0; - - temp_end = temp_start + kbuf->memsz - 1; - - /* - * Make sure this does not conflict with any of existing - * segments - */ - if (kimage_is_destination_range(image, temp_start, temp_end)) { - temp_start = temp_start - PAGE_SIZE; - continue; - } - - /* We found a suitable memory range */ - break; - } while (1); - - /* If we are here, we found a suitable memory range */ - kbuf->mem = temp_start; - - /* Success, stop navigating through remaining System RAM ranges */ - return 1; -} - -static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end, - struct kexec_buf *kbuf) -{ - struct kimage *image = kbuf->image; - unsigned long temp_start, temp_end; - - temp_start = max(start, kbuf->buf_min); - - do { - temp_start = ALIGN(temp_start, kbuf->buf_align); - temp_end = temp_start + kbuf->memsz - 1; - - if (temp_end > end || temp_end > kbuf->buf_max) - return 0; - /* - * Make sure this does not conflict with any of existing - * segments - */ - if (kimage_is_destination_range(image, temp_start, temp_end)) { - temp_start = temp_start + PAGE_SIZE; - continue; - } - - /* We found a suitable memory range */ - break; - } while (1); - - /* If we are here, we found a suitable memory range */ - kbuf->mem = temp_start; - - /* Success, stop navigating through remaining System RAM ranges */ - return 1; -} - -static int locate_mem_hole_callback(u64 start, u64 end, void *arg) -{ - struct kexec_buf *kbuf = (struct kexec_buf *)arg; - unsigned long sz = end - start + 1; - - /* Returning 0 will take to next memory range */ - if (sz < kbuf->memsz) - return 0; - - if (end < kbuf->buf_min || start > kbuf->buf_max) - return 0; - - /* - * Allocate memory top down with-in ram range. Otherwise bottom up - * allocation. - */ - if (kbuf->top_down) - return locate_mem_hole_top_down(start, end, kbuf); - return locate_mem_hole_bottom_up(start, end, kbuf); -} - -/* - * Helper function for placing a buffer in a kexec segment. This assumes - * that kexec_mutex is held. - */ -int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz, - unsigned long memsz, unsigned long buf_align, - unsigned long buf_min, unsigned long buf_max, - bool top_down, unsigned long *load_addr) -{ - - struct kexec_segment *ksegment; - struct kexec_buf buf, *kbuf; - int ret; - - /* Currently adding segment this way is allowed only in file mode */ - if (!image->file_mode) - return -EINVAL; - - if (image->nr_segments >= KEXEC_SEGMENT_MAX) - return -EINVAL; - - /* - * Make sure we are not trying to add buffer after allocating - * control pages. All segments need to be placed first before - * any control pages are allocated. As control page allocation - * logic goes through list of segments to make sure there are - * no destination overlaps. - */ - if (!list_empty(&image->control_pages)) { - WARN_ON(1); - return -EINVAL; - } - - memset(&buf, 0, sizeof(struct kexec_buf)); - kbuf = &buf; - kbuf->image = image; - kbuf->buffer = buffer; - kbuf->bufsz = bufsz; - - kbuf->memsz = ALIGN(memsz, PAGE_SIZE); - kbuf->buf_align = max(buf_align, PAGE_SIZE); - kbuf->buf_min = buf_min; - kbuf->buf_max = buf_max; - kbuf->top_down = top_down; - - /* Walk the RAM ranges and allocate a suitable range for the buffer */ - if (image->type == KEXEC_TYPE_CRASH) - ret = walk_iomem_res("Crash kernel", - IORESOURCE_MEM | IORESOURCE_BUSY, - crashk_res.start, crashk_res.end, kbuf, - locate_mem_hole_callback); - else - ret = walk_system_ram_res(0, -1, kbuf, - locate_mem_hole_callback); - if (ret != 1) { - /* A suitable memory range could not be found for buffer */ - return -EADDRNOTAVAIL; - } - - /* Found a suitable memory range */ - ksegment = &image->segment[image->nr_segments]; - ksegment->kbuf = kbuf->buffer; - ksegment->bufsz = kbuf->bufsz; - ksegment->mem = kbuf->mem; - ksegment->memsz = kbuf->memsz; - image->nr_segments++; - *load_addr = ksegment->mem; - return 0; -} - -/* Calculate and store the digest of segments */ -static int kexec_calculate_store_digests(struct kimage *image) -{ - struct crypto_shash *tfm; - struct shash_desc *desc; - int ret = 0, i, j, zero_buf_sz, sha_region_sz; - size_t desc_size, nullsz; - char *digest; - void *zero_buf; - struct kexec_sha_region *sha_regions; - struct purgatory_info *pi = &image->purgatory_info; - - zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT); - zero_buf_sz = PAGE_SIZE; - - tfm = crypto_alloc_shash("sha256", 0, 0); - if (IS_ERR(tfm)) { - ret = PTR_ERR(tfm); - goto out; - } - - desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); - desc = kzalloc(desc_size, GFP_KERNEL); - if (!desc) { - ret = -ENOMEM; - goto out_free_tfm; - } - - sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region); - sha_regions = vzalloc(sha_region_sz); - if (!sha_regions) - goto out_free_desc; - - desc->tfm = tfm; - desc->flags = 0; - - ret = crypto_shash_init(desc); - if (ret < 0) - goto out_free_sha_regions; - - digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL); - if (!digest) { - ret = -ENOMEM; - goto out_free_sha_regions; - } - - for (j = i = 0; i < image->nr_segments; i++) { - struct kexec_segment *ksegment; - - ksegment = &image->segment[i]; - /* - * Skip purgatory as it will be modified once we put digest - * info in purgatory. - */ - if (ksegment->kbuf == pi->purgatory_buf) - continue; - - ret = crypto_shash_update(desc, ksegment->kbuf, - ksegment->bufsz); - if (ret) - break; - - /* - * Assume rest of the buffer is filled with zero and - * update digest accordingly. - */ - nullsz = ksegment->memsz - ksegment->bufsz; - while (nullsz) { - unsigned long bytes = nullsz; - - if (bytes > zero_buf_sz) - bytes = zero_buf_sz; - ret = crypto_shash_update(desc, zero_buf, bytes); - if (ret) - break; - nullsz -= bytes; - } - - if (ret) - break; - - sha_regions[j].start = ksegment->mem; - sha_regions[j].len = ksegment->memsz; - j++; - } - - if (!ret) { - ret = crypto_shash_final(desc, digest); - if (ret) - goto out_free_digest; - ret = kexec_purgatory_get_set_symbol(image, "sha_regions", - sha_regions, sha_region_sz, 0); - if (ret) - goto out_free_digest; - - ret = kexec_purgatory_get_set_symbol(image, "sha256_digest", - digest, SHA256_DIGEST_SIZE, 0); - if (ret) - goto out_free_digest; - } - -out_free_digest: - kfree(digest); -out_free_sha_regions: - vfree(sha_regions); -out_free_desc: - kfree(desc); -out_free_tfm: - kfree(tfm); -out: - return ret; -} - -/* Actually load purgatory. Lot of code taken from kexec-tools */ -static int __kexec_load_purgatory(struct kimage *image, unsigned long min, - unsigned long max, int top_down) -{ - struct purgatory_info *pi = &image->purgatory_info; - unsigned long align, buf_align, bss_align, buf_sz, bss_sz, bss_pad; - unsigned long memsz, entry, load_addr, curr_load_addr, bss_addr, offset; - unsigned char *buf_addr, *src; - int i, ret = 0, entry_sidx = -1; - const Elf_Shdr *sechdrs_c; - Elf_Shdr *sechdrs = NULL; - void *purgatory_buf = NULL; - - /* - * sechdrs_c points to section headers in purgatory and are read - * only. No modifications allowed. - */ - sechdrs_c = (void *)pi->ehdr + pi->ehdr->e_shoff; - - /* - * We can not modify sechdrs_c[] and its fields. It is read only. - * Copy it over to a local copy where one can store some temporary - * data and free it at the end. We need to modify ->sh_addr and - * ->sh_offset fields to keep track of permanent and temporary - * locations of sections. - */ - sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr)); - if (!sechdrs) - return -ENOMEM; - - memcpy(sechdrs, sechdrs_c, pi->ehdr->e_shnum * sizeof(Elf_Shdr)); - - /* - * We seem to have multiple copies of sections. First copy is which - * is embedded in kernel in read only section. Some of these sections - * will be copied to a temporary buffer and relocated. And these - * sections will finally be copied to their final destination at - * segment load time. - * - * Use ->sh_offset to reflect section address in memory. It will - * point to original read only copy if section is not allocatable. - * Otherwise it will point to temporary copy which will be relocated. - * - * Use ->sh_addr to contain final address of the section where it - * will go during execution time. - */ - for (i = 0; i < pi->ehdr->e_shnum; i++) { - if (sechdrs[i].sh_type == SHT_NOBITS) - continue; - - sechdrs[i].sh_offset = (unsigned long)pi->ehdr + - sechdrs[i].sh_offset; - } - - /* - * Identify entry point section and make entry relative to section - * start. - */ - entry = pi->ehdr->e_entry; - for (i = 0; i < pi->ehdr->e_shnum; i++) { - if (!(sechdrs[i].sh_flags & SHF_ALLOC)) - continue; - - if (!(sechdrs[i].sh_flags & SHF_EXECINSTR)) - continue; - - /* Make entry section relative */ - if (sechdrs[i].sh_addr <= pi->ehdr->e_entry && - ((sechdrs[i].sh_addr + sechdrs[i].sh_size) > - pi->ehdr->e_entry)) { - entry_sidx = i; - entry -= sechdrs[i].sh_addr; - break; - } - } - - /* Determine how much memory is needed to load relocatable object. */ - buf_align = 1; - bss_align = 1; - buf_sz = 0; - bss_sz = 0; - - for (i = 0; i < pi->ehdr->e_shnum; i++) { - if (!(sechdrs[i].sh_flags & SHF_ALLOC)) - continue; - - align = sechdrs[i].sh_addralign; - if (sechdrs[i].sh_type != SHT_NOBITS) { - if (buf_align < align) - buf_align = align; - buf_sz = ALIGN(buf_sz, align); - buf_sz += sechdrs[i].sh_size; - } else { - /* bss section */ - if (bss_align < align) - bss_align = align; - bss_sz = ALIGN(bss_sz, align); - bss_sz += sechdrs[i].sh_size; - } - } - - /* Determine the bss padding required to align bss properly */ - bss_pad = 0; - if (buf_sz & (bss_align - 1)) - bss_pad = bss_align - (buf_sz & (bss_align - 1)); - - memsz = buf_sz + bss_pad + bss_sz; - - /* Allocate buffer for purgatory */ - purgatory_buf = vzalloc(buf_sz); - if (!purgatory_buf) { - ret = -ENOMEM; - goto out; - } - - if (buf_align < bss_align) - buf_align = bss_align; - - /* Add buffer to segment list */ - ret = kexec_add_buffer(image, purgatory_buf, buf_sz, memsz, - buf_align, min, max, top_down, - &pi->purgatory_load_addr); - if (ret) - goto out; - - /* Load SHF_ALLOC sections */ - buf_addr = purgatory_buf; - load_addr = curr_load_addr = pi->purgatory_load_addr; - bss_addr = load_addr + buf_sz + bss_pad; - - for (i = 0; i < pi->ehdr->e_shnum; i++) { - if (!(sechdrs[i].sh_flags & SHF_ALLOC)) - continue; - - align = sechdrs[i].sh_addralign; - if (sechdrs[i].sh_type != SHT_NOBITS) { - curr_load_addr = ALIGN(curr_load_addr, align); - offset = curr_load_addr - load_addr; - /* We already modifed ->sh_offset to keep src addr */ - src = (char *) sechdrs[i].sh_offset; - memcpy(buf_addr + offset, src, sechdrs[i].sh_size); - - /* Store load address and source address of section */ - sechdrs[i].sh_addr = curr_load_addr; - - /* - * This section got copied to temporary buffer. Update - * ->sh_offset accordingly. - */ - sechdrs[i].sh_offset = (unsigned long)(buf_addr + offset); - - /* Advance to the next address */ - curr_load_addr += sechdrs[i].sh_size; - } else { - bss_addr = ALIGN(bss_addr, align); - sechdrs[i].sh_addr = bss_addr; - bss_addr += sechdrs[i].sh_size; - } - } - - /* Update entry point based on load address of text section */ - if (entry_sidx >= 0) - entry += sechdrs[entry_sidx].sh_addr; - - /* Make kernel jump to purgatory after shutdown */ - image->start = entry; - - /* Used later to get/set symbol values */ - pi->sechdrs = sechdrs; - - /* - * Used later to identify which section is purgatory and skip it - * from checksumming. - */ - pi->purgatory_buf = purgatory_buf; - return ret; -out: - vfree(sechdrs); - vfree(purgatory_buf); - return ret; -} - -static int kexec_apply_relocations(struct kimage *image) -{ - int i, ret; - struct purgatory_info *pi = &image->purgatory_info; - Elf_Shdr *sechdrs = pi->sechdrs; - - /* Apply relocations */ - for (i = 0; i < pi->ehdr->e_shnum; i++) { - Elf_Shdr *section, *symtab; - - if (sechdrs[i].sh_type != SHT_RELA && - sechdrs[i].sh_type != SHT_REL) - continue; - - /* - * For section of type SHT_RELA/SHT_REL, - * ->sh_link contains section header index of associated - * symbol table. And ->sh_info contains section header - * index of section to which relocations apply. - */ - if (sechdrs[i].sh_info >= pi->ehdr->e_shnum || - sechdrs[i].sh_link >= pi->ehdr->e_shnum) - return -ENOEXEC; - - section = &sechdrs[sechdrs[i].sh_info]; - symtab = &sechdrs[sechdrs[i].sh_link]; - - if (!(section->sh_flags & SHF_ALLOC)) - continue; - - /* - * symtab->sh_link contain section header index of associated - * string table. - */ - if (symtab->sh_link >= pi->ehdr->e_shnum) - /* Invalid section number? */ - continue; - - /* - * Respective architecture needs to provide support for applying - * relocations of type SHT_RELA/SHT_REL. - */ - if (sechdrs[i].sh_type == SHT_RELA) - ret = arch_kexec_apply_relocations_add(pi->ehdr, - sechdrs, i); - else if (sechdrs[i].sh_type == SHT_REL) - ret = arch_kexec_apply_relocations(pi->ehdr, - sechdrs, i); - if (ret) - return ret; - } - - return 0; -} - -/* Load relocatable purgatory object and relocate it appropriately */ -int kexec_load_purgatory(struct kimage *image, unsigned long min, - unsigned long max, int top_down, - unsigned long *load_addr) -{ - struct purgatory_info *pi = &image->purgatory_info; - int ret; - - if (kexec_purgatory_size <= 0) - return -EINVAL; - - if (kexec_purgatory_size < sizeof(Elf_Ehdr)) - return -ENOEXEC; - - pi->ehdr = (Elf_Ehdr *)kexec_purgatory; - - if (memcmp(pi->ehdr->e_ident, ELFMAG, SELFMAG) != 0 - || pi->ehdr->e_type != ET_REL - || !elf_check_arch(pi->ehdr) - || pi->ehdr->e_shentsize != sizeof(Elf_Shdr)) - return -ENOEXEC; - - if (pi->ehdr->e_shoff >= kexec_purgatory_size - || (pi->ehdr->e_shnum * sizeof(Elf_Shdr) > - kexec_purgatory_size - pi->ehdr->e_shoff)) - return -ENOEXEC; - - ret = __kexec_load_purgatory(image, min, max, top_down); - if (ret) - return ret; - - ret = kexec_apply_relocations(image); - if (ret) - goto out; - - *load_addr = pi->purgatory_load_addr; - return 0; -out: - vfree(pi->sechdrs); - vfree(pi->purgatory_buf); - return ret; -} - -static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi, - const char *name) -{ - Elf_Sym *syms; - Elf_Shdr *sechdrs; - Elf_Ehdr *ehdr; - int i, k; - const char *strtab; - - if (!pi->sechdrs || !pi->ehdr) - return NULL; - - sechdrs = pi->sechdrs; - ehdr = pi->ehdr; - - for (i = 0; i < ehdr->e_shnum; i++) { - if (sechdrs[i].sh_type != SHT_SYMTAB) - continue; - - if (sechdrs[i].sh_link >= ehdr->e_shnum) - /* Invalid strtab section number */ - continue; - strtab = (char *)sechdrs[sechdrs[i].sh_link].sh_offset; - syms = (Elf_Sym *)sechdrs[i].sh_offset; - - /* Go through symbols for a match */ - for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) { - if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL) - continue; - - if (strcmp(strtab + syms[k].st_name, name) != 0) - continue; - - if (syms[k].st_shndx == SHN_UNDEF || - syms[k].st_shndx >= ehdr->e_shnum) { - pr_debug("Symbol: %s has bad section index %d.\n", - name, syms[k].st_shndx); - return NULL; - } - - /* Found the symbol we are looking for */ - return &syms[k]; - } - } - - return NULL; -} - -void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name) -{ - struct purgatory_info *pi = &image->purgatory_info; - Elf_Sym *sym; - Elf_Shdr *sechdr; - - sym = kexec_purgatory_find_symbol(pi, name); - if (!sym) - return ERR_PTR(-EINVAL); - - sechdr = &pi->sechdrs[sym->st_shndx]; - - /* - * Returns the address where symbol will finally be loaded after - * kexec_load_segment() - */ - return (void *)(sechdr->sh_addr + sym->st_value); -} - -/* - * Get or set value of a symbol. If "get_value" is true, symbol value is - * returned in buf otherwise symbol value is set based on value in buf. - */ -int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, - void *buf, unsigned int size, bool get_value) -{ - Elf_Sym *sym; - Elf_Shdr *sechdrs; - struct purgatory_info *pi = &image->purgatory_info; - char *sym_buf; - - sym = kexec_purgatory_find_symbol(pi, name); - if (!sym) - return -EINVAL; - - if (sym->st_size != size) { - pr_err("symbol %s size mismatch: expected %lu actual %u\n", - name, (unsigned long)sym->st_size, size); - return -EINVAL; - } - - sechdrs = pi->sechdrs; - - if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { - pr_err("symbol %s is in a bss section. Cannot %s\n", name, - get_value ? "get" : "set"); - return -EINVAL; - } - - sym_buf = (unsigned char *)sechdrs[sym->st_shndx].sh_offset + - sym->st_value; - - if (get_value) - memcpy((void *)buf, sym_buf, size); - else - memcpy((void *)sym_buf, buf, size); - - return 0; -} -#endif /* CONFIG_KEXEC_FILE */ - /* * Move into place and start executing a preloaded standalone * executable. If nothing was preloaded return an error. diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c new file mode 100644 index 000000000000..6a9a3f2a0e8e --- /dev/null +++ b/kernel/kexec_file.c @@ -0,0 +1,1045 @@ +/* + * kexec: kexec_file_load system call + * + * Copyright (C) 2014 Red Hat Inc. + * Authors: + * Vivek Goyal + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "kexec_internal.h" + +/* + * Declare these symbols weak so that if architecture provides a purgatory, + * these will be overridden. + */ +char __weak kexec_purgatory[0]; +size_t __weak kexec_purgatory_size = 0; + +static int kexec_calculate_store_digests(struct kimage *image); + +static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len) +{ + struct fd f = fdget(fd); + int ret; + struct kstat stat; + loff_t pos; + ssize_t bytes = 0; + + if (!f.file) + return -EBADF; + + ret = vfs_getattr(&f.file->f_path, &stat); + if (ret) + goto out; + + if (stat.size > INT_MAX) { + ret = -EFBIG; + goto out; + } + + /* Don't hand 0 to vmalloc, it whines. */ + if (stat.size == 0) { + ret = -EINVAL; + goto out; + } + + *buf = vmalloc(stat.size); + if (!*buf) { + ret = -ENOMEM; + goto out; + } + + pos = 0; + while (pos < stat.size) { + bytes = kernel_read(f.file, pos, (char *)(*buf) + pos, + stat.size - pos); + if (bytes < 0) { + vfree(*buf); + ret = bytes; + goto out; + } + + if (bytes == 0) + break; + pos += bytes; + } + + if (pos != stat.size) { + ret = -EBADF; + vfree(*buf); + goto out; + } + + *buf_len = pos; +out: + fdput(f); + return ret; +} + +/* Architectures can provide this probe function */ +int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, + unsigned long buf_len) +{ + return -ENOEXEC; +} + +void * __weak arch_kexec_kernel_image_load(struct kimage *image) +{ + return ERR_PTR(-ENOEXEC); +} + +int __weak arch_kimage_file_post_load_cleanup(struct kimage *image) +{ + return -EINVAL; +} + +int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, + unsigned long buf_len) +{ + return -EKEYREJECTED; +} + +/* Apply relocations of type RELA */ +int __weak +arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, + unsigned int relsec) +{ + pr_err("RELA relocation unsupported.\n"); + return -ENOEXEC; +} + +/* Apply relocations of type REL */ +int __weak +arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, + unsigned int relsec) +{ + pr_err("REL relocation unsupported.\n"); + return -ENOEXEC; +} + +/* + * Free up memory used by kernel, initrd, and command line. This is temporary + * memory allocation which is not needed any more after these buffers have + * been loaded into separate segments and have been copied elsewhere. + */ +void kimage_file_post_load_cleanup(struct kimage *image) +{ + struct purgatory_info *pi = &image->purgatory_info; + + vfree(image->kernel_buf); + image->kernel_buf = NULL; + + vfree(image->initrd_buf); + image->initrd_buf = NULL; + + kfree(image->cmdline_buf); + image->cmdline_buf = NULL; + + vfree(pi->purgatory_buf); + pi->purgatory_buf = NULL; + + vfree(pi->sechdrs); + pi->sechdrs = NULL; + + /* See if architecture has anything to cleanup post load */ + arch_kimage_file_post_load_cleanup(image); + + /* + * Above call should have called into bootloader to free up + * any data stored in kimage->image_loader_data. It should + * be ok now to free it up. + */ + kfree(image->image_loader_data); + image->image_loader_data = NULL; +} + +/* + * In file mode list of segments is prepared by kernel. Copy relevant + * data from user space, do error checking, prepare segment list + */ +static int +kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd, + const char __user *cmdline_ptr, + unsigned long cmdline_len, unsigned flags) +{ + int ret = 0; + void *ldata; + + ret = copy_file_from_fd(kernel_fd, &image->kernel_buf, + &image->kernel_buf_len); + if (ret) + return ret; + + /* Call arch image probe handlers */ + ret = arch_kexec_kernel_image_probe(image, image->kernel_buf, + image->kernel_buf_len); + + if (ret) + goto out; + +#ifdef CONFIG_KEXEC_VERIFY_SIG + ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf, + image->kernel_buf_len); + if (ret) { + pr_debug("kernel signature verification failed.\n"); + goto out; + } + pr_debug("kernel signature verification successful.\n"); +#endif + /* It is possible that there no initramfs is being loaded */ + if (!(flags & KEXEC_FILE_NO_INITRAMFS)) { + ret = copy_file_from_fd(initrd_fd, &image->initrd_buf, + &image->initrd_buf_len); + if (ret) + goto out; + } + + if (cmdline_len) { + image->cmdline_buf = kzalloc(cmdline_len, GFP_KERNEL); + if (!image->cmdline_buf) { + ret = -ENOMEM; + goto out; + } + + ret = copy_from_user(image->cmdline_buf, cmdline_ptr, + cmdline_len); + if (ret) { + ret = -EFAULT; + goto out; + } + + image->cmdline_buf_len = cmdline_len; + + /* command line should be a string with last byte null */ + if (image->cmdline_buf[cmdline_len - 1] != '\0') { + ret = -EINVAL; + goto out; + } + } + + /* Call arch image load handlers */ + ldata = arch_kexec_kernel_image_load(image); + + if (IS_ERR(ldata)) { + ret = PTR_ERR(ldata); + goto out; + } + + image->image_loader_data = ldata; +out: + /* In case of error, free up all allocated memory in this function */ + if (ret) + kimage_file_post_load_cleanup(image); + return ret; +} + +static int +kimage_file_alloc_init(struct kimage **rimage, int kernel_fd, + int initrd_fd, const char __user *cmdline_ptr, + unsigned long cmdline_len, unsigned long flags) +{ + int ret; + struct kimage *image; + bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH; + + image = do_kimage_alloc_init(); + if (!image) + return -ENOMEM; + + image->file_mode = 1; + + if (kexec_on_panic) { + /* Enable special crash kernel control page alloc policy. */ + image->control_page = crashk_res.start; + image->type = KEXEC_TYPE_CRASH; + } + + ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd, + cmdline_ptr, cmdline_len, flags); + if (ret) + goto out_free_image; + + ret = sanity_check_segment_list(image); + if (ret) + goto out_free_post_load_bufs; + + ret = -ENOMEM; + image->control_code_page = kimage_alloc_control_pages(image, + get_order(KEXEC_CONTROL_PAGE_SIZE)); + if (!image->control_code_page) { + pr_err("Could not allocate control_code_buffer\n"); + goto out_free_post_load_bufs; + } + + if (!kexec_on_panic) { + image->swap_page = kimage_alloc_control_pages(image, 0); + if (!image->swap_page) { + pr_err("Could not allocate swap buffer\n"); + goto out_free_control_pages; + } + } + + *rimage = image; + return 0; +out_free_control_pages: + kimage_free_page_list(&image->control_pages); +out_free_post_load_bufs: + kimage_file_post_load_cleanup(image); +out_free_image: + kfree(image); + return ret; +} + +SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, + unsigned long, cmdline_len, const char __user *, cmdline_ptr, + unsigned long, flags) +{ + int ret = 0, i; + struct kimage **dest_image, *image; + + /* We only trust the superuser with rebooting the system. */ + if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) + return -EPERM; + + /* Make sure we have a legal set of flags */ + if (flags != (flags & KEXEC_FILE_FLAGS)) + return -EINVAL; + + image = NULL; + + if (!mutex_trylock(&kexec_mutex)) + return -EBUSY; + + dest_image = &kexec_image; + if (flags & KEXEC_FILE_ON_CRASH) + dest_image = &kexec_crash_image; + + if (flags & KEXEC_FILE_UNLOAD) + goto exchange; + + /* + * In case of crash, new kernel gets loaded in reserved region. It is + * same memory where old crash kernel might be loaded. Free any + * current crash dump kernel before we corrupt it. + */ + if (flags & KEXEC_FILE_ON_CRASH) + kimage_free(xchg(&kexec_crash_image, NULL)); + + ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr, + cmdline_len, flags); + if (ret) + goto out; + + ret = machine_kexec_prepare(image); + if (ret) + goto out; + + ret = kexec_calculate_store_digests(image); + if (ret) + goto out; + + for (i = 0; i < image->nr_segments; i++) { + struct kexec_segment *ksegment; + + ksegment = &image->segment[i]; + pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n", + i, ksegment->buf, ksegment->bufsz, ksegment->mem, + ksegment->memsz); + + ret = kimage_load_segment(image, &image->segment[i]); + if (ret) + goto out; + } + + kimage_terminate(image); + + /* + * Free up any temporary buffers allocated which are not needed + * after image has been loaded + */ + kimage_file_post_load_cleanup(image); +exchange: + image = xchg(dest_image, image); +out: + mutex_unlock(&kexec_mutex); + kimage_free(image); + return ret; +} + +static int locate_mem_hole_top_down(unsigned long start, unsigned long end, + struct kexec_buf *kbuf) +{ + struct kimage *image = kbuf->image; + unsigned long temp_start, temp_end; + + temp_end = min(end, kbuf->buf_max); + temp_start = temp_end - kbuf->memsz; + + do { + /* align down start */ + temp_start = temp_start & (~(kbuf->buf_align - 1)); + + if (temp_start < start || temp_start < kbuf->buf_min) + return 0; + + temp_end = temp_start + kbuf->memsz - 1; + + /* + * Make sure this does not conflict with any of existing + * segments + */ + if (kimage_is_destination_range(image, temp_start, temp_end)) { + temp_start = temp_start - PAGE_SIZE; + continue; + } + + /* We found a suitable memory range */ + break; + } while (1); + + /* If we are here, we found a suitable memory range */ + kbuf->mem = temp_start; + + /* Success, stop navigating through remaining System RAM ranges */ + return 1; +} + +static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end, + struct kexec_buf *kbuf) +{ + struct kimage *image = kbuf->image; + unsigned long temp_start, temp_end; + + temp_start = max(start, kbuf->buf_min); + + do { + temp_start = ALIGN(temp_start, kbuf->buf_align); + temp_end = temp_start + kbuf->memsz - 1; + + if (temp_end > end || temp_end > kbuf->buf_max) + return 0; + /* + * Make sure this does not conflict with any of existing + * segments + */ + if (kimage_is_destination_range(image, temp_start, temp_end)) { + temp_start = temp_start + PAGE_SIZE; + continue; + } + + /* We found a suitable memory range */ + break; + } while (1); + + /* If we are here, we found a suitable memory range */ + kbuf->mem = temp_start; + + /* Success, stop navigating through remaining System RAM ranges */ + return 1; +} + +static int locate_mem_hole_callback(u64 start, u64 end, void *arg) +{ + struct kexec_buf *kbuf = (struct kexec_buf *)arg; + unsigned long sz = end - start + 1; + + /* Returning 0 will take to next memory range */ + if (sz < kbuf->memsz) + return 0; + + if (end < kbuf->buf_min || start > kbuf->buf_max) + return 0; + + /* + * Allocate memory top down with-in ram range. Otherwise bottom up + * allocation. + */ + if (kbuf->top_down) + return locate_mem_hole_top_down(start, end, kbuf); + return locate_mem_hole_bottom_up(start, end, kbuf); +} + +/* + * Helper function for placing a buffer in a kexec segment. This assumes + * that kexec_mutex is held. + */ +int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz, + unsigned long memsz, unsigned long buf_align, + unsigned long buf_min, unsigned long buf_max, + bool top_down, unsigned long *load_addr) +{ + + struct kexec_segment *ksegment; + struct kexec_buf buf, *kbuf; + int ret; + + /* Currently adding segment this way is allowed only in file mode */ + if (!image->file_mode) + return -EINVAL; + + if (image->nr_segments >= KEXEC_SEGMENT_MAX) + return -EINVAL; + + /* + * Make sure we are not trying to add buffer after allocating + * control pages. All segments need to be placed first before + * any control pages are allocated. As control page allocation + * logic goes through list of segments to make sure there are + * no destination overlaps. + */ + if (!list_empty(&image->control_pages)) { + WARN_ON(1); + return -EINVAL; + } + + memset(&buf, 0, sizeof(struct kexec_buf)); + kbuf = &buf; + kbuf->image = image; + kbuf->buffer = buffer; + kbuf->bufsz = bufsz; + + kbuf->memsz = ALIGN(memsz, PAGE_SIZE); + kbuf->buf_align = max(buf_align, PAGE_SIZE); + kbuf->buf_min = buf_min; + kbuf->buf_max = buf_max; + kbuf->top_down = top_down; + + /* Walk the RAM ranges and allocate a suitable range for the buffer */ + if (image->type == KEXEC_TYPE_CRASH) + ret = walk_iomem_res("Crash kernel", + IORESOURCE_MEM | IORESOURCE_BUSY, + crashk_res.start, crashk_res.end, kbuf, + locate_mem_hole_callback); + else + ret = walk_system_ram_res(0, -1, kbuf, + locate_mem_hole_callback); + if (ret != 1) { + /* A suitable memory range could not be found for buffer */ + return -EADDRNOTAVAIL; + } + + /* Found a suitable memory range */ + ksegment = &image->segment[image->nr_segments]; + ksegment->kbuf = kbuf->buffer; + ksegment->bufsz = kbuf->bufsz; + ksegment->mem = kbuf->mem; + ksegment->memsz = kbuf->memsz; + image->nr_segments++; + *load_addr = ksegment->mem; + return 0; +} + +/* Calculate and store the digest of segments */ +static int kexec_calculate_store_digests(struct kimage *image) +{ + struct crypto_shash *tfm; + struct shash_desc *desc; + int ret = 0, i, j, zero_buf_sz, sha_region_sz; + size_t desc_size, nullsz; + char *digest; + void *zero_buf; + struct kexec_sha_region *sha_regions; + struct purgatory_info *pi = &image->purgatory_info; + + zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT); + zero_buf_sz = PAGE_SIZE; + + tfm = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(tfm)) { + ret = PTR_ERR(tfm); + goto out; + } + + desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); + desc = kzalloc(desc_size, GFP_KERNEL); + if (!desc) { + ret = -ENOMEM; + goto out_free_tfm; + } + + sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region); + sha_regions = vzalloc(sha_region_sz); + if (!sha_regions) + goto out_free_desc; + + desc->tfm = tfm; + desc->flags = 0; + + ret = crypto_shash_init(desc); + if (ret < 0) + goto out_free_sha_regions; + + digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL); + if (!digest) { + ret = -ENOMEM; + goto out_free_sha_regions; + } + + for (j = i = 0; i < image->nr_segments; i++) { + struct kexec_segment *ksegment; + + ksegment = &image->segment[i]; + /* + * Skip purgatory as it will be modified once we put digest + * info in purgatory. + */ + if (ksegment->kbuf == pi->purgatory_buf) + continue; + + ret = crypto_shash_update(desc, ksegment->kbuf, + ksegment->bufsz); + if (ret) + break; + + /* + * Assume rest of the buffer is filled with zero and + * update digest accordingly. + */ + nullsz = ksegment->memsz - ksegment->bufsz; + while (nullsz) { + unsigned long bytes = nullsz; + + if (bytes > zero_buf_sz) + bytes = zero_buf_sz; + ret = crypto_shash_update(desc, zero_buf, bytes); + if (ret) + break; + nullsz -= bytes; + } + + if (ret) + break; + + sha_regions[j].start = ksegment->mem; + sha_regions[j].len = ksegment->memsz; + j++; + } + + if (!ret) { + ret = crypto_shash_final(desc, digest); + if (ret) + goto out_free_digest; + ret = kexec_purgatory_get_set_symbol(image, "sha_regions", + sha_regions, sha_region_sz, 0); + if (ret) + goto out_free_digest; + + ret = kexec_purgatory_get_set_symbol(image, "sha256_digest", + digest, SHA256_DIGEST_SIZE, 0); + if (ret) + goto out_free_digest; + } + +out_free_digest: + kfree(digest); +out_free_sha_regions: + vfree(sha_regions); +out_free_desc: + kfree(desc); +out_free_tfm: + kfree(tfm); +out: + return ret; +} + +/* Actually load purgatory. Lot of code taken from kexec-tools */ +static int __kexec_load_purgatory(struct kimage *image, unsigned long min, + unsigned long max, int top_down) +{ + struct purgatory_info *pi = &image->purgatory_info; + unsigned long align, buf_align, bss_align, buf_sz, bss_sz, bss_pad; + unsigned long memsz, entry, load_addr, curr_load_addr, bss_addr, offset; + unsigned char *buf_addr, *src; + int i, ret = 0, entry_sidx = -1; + const Elf_Shdr *sechdrs_c; + Elf_Shdr *sechdrs = NULL; + void *purgatory_buf = NULL; + + /* + * sechdrs_c points to section headers in purgatory and are read + * only. No modifications allowed. + */ + sechdrs_c = (void *)pi->ehdr + pi->ehdr->e_shoff; + + /* + * We can not modify sechdrs_c[] and its fields. It is read only. + * Copy it over to a local copy where one can store some temporary + * data and free it at the end. We need to modify ->sh_addr and + * ->sh_offset fields to keep track of permanent and temporary + * locations of sections. + */ + sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr)); + if (!sechdrs) + return -ENOMEM; + + memcpy(sechdrs, sechdrs_c, pi->ehdr->e_shnum * sizeof(Elf_Shdr)); + + /* + * We seem to have multiple copies of sections. First copy is which + * is embedded in kernel in read only section. Some of these sections + * will be copied to a temporary buffer and relocated. And these + * sections will finally be copied to their final destination at + * segment load time. + * + * Use ->sh_offset to reflect section address in memory. It will + * point to original read only copy if section is not allocatable. + * Otherwise it will point to temporary copy which will be relocated. + * + * Use ->sh_addr to contain final address of the section where it + * will go during execution time. + */ + for (i = 0; i < pi->ehdr->e_shnum; i++) { + if (sechdrs[i].sh_type == SHT_NOBITS) + continue; + + sechdrs[i].sh_offset = (unsigned long)pi->ehdr + + sechdrs[i].sh_offset; + } + + /* + * Identify entry point section and make entry relative to section + * start. + */ + entry = pi->ehdr->e_entry; + for (i = 0; i < pi->ehdr->e_shnum; i++) { + if (!(sechdrs[i].sh_flags & SHF_ALLOC)) + continue; + + if (!(sechdrs[i].sh_flags & SHF_EXECINSTR)) + continue; + + /* Make entry section relative */ + if (sechdrs[i].sh_addr <= pi->ehdr->e_entry && + ((sechdrs[i].sh_addr + sechdrs[i].sh_size) > + pi->ehdr->e_entry)) { + entry_sidx = i; + entry -= sechdrs[i].sh_addr; + break; + } + } + + /* Determine how much memory is needed to load relocatable object. */ + buf_align = 1; + bss_align = 1; + buf_sz = 0; + bss_sz = 0; + + for (i = 0; i < pi->ehdr->e_shnum; i++) { + if (!(sechdrs[i].sh_flags & SHF_ALLOC)) + continue; + + align = sechdrs[i].sh_addralign; + if (sechdrs[i].sh_type != SHT_NOBITS) { + if (buf_align < align) + buf_align = align; + buf_sz = ALIGN(buf_sz, align); + buf_sz += sechdrs[i].sh_size; + } else { + /* bss section */ + if (bss_align < align) + bss_align = align; + bss_sz = ALIGN(bss_sz, align); + bss_sz += sechdrs[i].sh_size; + } + } + + /* Determine the bss padding required to align bss properly */ + bss_pad = 0; + if (buf_sz & (bss_align - 1)) + bss_pad = bss_align - (buf_sz & (bss_align - 1)); + + memsz = buf_sz + bss_pad + bss_sz; + + /* Allocate buffer for purgatory */ + purgatory_buf = vzalloc(buf_sz); + if (!purgatory_buf) { + ret = -ENOMEM; + goto out; + } + + if (buf_align < bss_align) + buf_align = bss_align; + + /* Add buffer to segment list */ + ret = kexec_add_buffer(image, purgatory_buf, buf_sz, memsz, + buf_align, min, max, top_down, + &pi->purgatory_load_addr); + if (ret) + goto out; + + /* Load SHF_ALLOC sections */ + buf_addr = purgatory_buf; + load_addr = curr_load_addr = pi->purgatory_load_addr; + bss_addr = load_addr + buf_sz + bss_pad; + + for (i = 0; i < pi->ehdr->e_shnum; i++) { + if (!(sechdrs[i].sh_flags & SHF_ALLOC)) + continue; + + align = sechdrs[i].sh_addralign; + if (sechdrs[i].sh_type != SHT_NOBITS) { + curr_load_addr = ALIGN(curr_load_addr, align); + offset = curr_load_addr - load_addr; + /* We already modifed ->sh_offset to keep src addr */ + src = (char *) sechdrs[i].sh_offset; + memcpy(buf_addr + offset, src, sechdrs[i].sh_size); + + /* Store load address and source address of section */ + sechdrs[i].sh_addr = curr_load_addr; + + /* + * This section got copied to temporary buffer. Update + * ->sh_offset accordingly. + */ + sechdrs[i].sh_offset = (unsigned long)(buf_addr + offset); + + /* Advance to the next address */ + curr_load_addr += sechdrs[i].sh_size; + } else { + bss_addr = ALIGN(bss_addr, align); + sechdrs[i].sh_addr = bss_addr; + bss_addr += sechdrs[i].sh_size; + } + } + + /* Update entry point based on load address of text section */ + if (entry_sidx >= 0) + entry += sechdrs[entry_sidx].sh_addr; + + /* Make kernel jump to purgatory after shutdown */ + image->start = entry; + + /* Used later to get/set symbol values */ + pi->sechdrs = sechdrs; + + /* + * Used later to identify which section is purgatory and skip it + * from checksumming. + */ + pi->purgatory_buf = purgatory_buf; + return ret; +out: + vfree(sechdrs); + vfree(purgatory_buf); + return ret; +} + +static int kexec_apply_relocations(struct kimage *image) +{ + int i, ret; + struct purgatory_info *pi = &image->purgatory_info; + Elf_Shdr *sechdrs = pi->sechdrs; + + /* Apply relocations */ + for (i = 0; i < pi->ehdr->e_shnum; i++) { + Elf_Shdr *section, *symtab; + + if (sechdrs[i].sh_type != SHT_RELA && + sechdrs[i].sh_type != SHT_REL) + continue; + + /* + * For section of type SHT_RELA/SHT_REL, + * ->sh_link contains section header index of associated + * symbol table. And ->sh_info contains section header + * index of section to which relocations apply. + */ + if (sechdrs[i].sh_info >= pi->ehdr->e_shnum || + sechdrs[i].sh_link >= pi->ehdr->e_shnum) + return -ENOEXEC; + + section = &sechdrs[sechdrs[i].sh_info]; + symtab = &sechdrs[sechdrs[i].sh_link]; + + if (!(section->sh_flags & SHF_ALLOC)) + continue; + + /* + * symtab->sh_link contain section header index of associated + * string table. + */ + if (symtab->sh_link >= pi->ehdr->e_shnum) + /* Invalid section number? */ + continue; + + /* + * Respective architecture needs to provide support for applying + * relocations of type SHT_RELA/SHT_REL. + */ + if (sechdrs[i].sh_type == SHT_RELA) + ret = arch_kexec_apply_relocations_add(pi->ehdr, + sechdrs, i); + else if (sechdrs[i].sh_type == SHT_REL) + ret = arch_kexec_apply_relocations(pi->ehdr, + sechdrs, i); + if (ret) + return ret; + } + + return 0; +} + +/* Load relocatable purgatory object and relocate it appropriately */ +int kexec_load_purgatory(struct kimage *image, unsigned long min, + unsigned long max, int top_down, + unsigned long *load_addr) +{ + struct purgatory_info *pi = &image->purgatory_info; + int ret; + + if (kexec_purgatory_size <= 0) + return -EINVAL; + + if (kexec_purgatory_size < sizeof(Elf_Ehdr)) + return -ENOEXEC; + + pi->ehdr = (Elf_Ehdr *)kexec_purgatory; + + if (memcmp(pi->ehdr->e_ident, ELFMAG, SELFMAG) != 0 + || pi->ehdr->e_type != ET_REL + || !elf_check_arch(pi->ehdr) + || pi->ehdr->e_shentsize != sizeof(Elf_Shdr)) + return -ENOEXEC; + + if (pi->ehdr->e_shoff >= kexec_purgatory_size + || (pi->ehdr->e_shnum * sizeof(Elf_Shdr) > + kexec_purgatory_size - pi->ehdr->e_shoff)) + return -ENOEXEC; + + ret = __kexec_load_purgatory(image, min, max, top_down); + if (ret) + return ret; + + ret = kexec_apply_relocations(image); + if (ret) + goto out; + + *load_addr = pi->purgatory_load_addr; + return 0; +out: + vfree(pi->sechdrs); + vfree(pi->purgatory_buf); + return ret; +} + +static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi, + const char *name) +{ + Elf_Sym *syms; + Elf_Shdr *sechdrs; + Elf_Ehdr *ehdr; + int i, k; + const char *strtab; + + if (!pi->sechdrs || !pi->ehdr) + return NULL; + + sechdrs = pi->sechdrs; + ehdr = pi->ehdr; + + for (i = 0; i < ehdr->e_shnum; i++) { + if (sechdrs[i].sh_type != SHT_SYMTAB) + continue; + + if (sechdrs[i].sh_link >= ehdr->e_shnum) + /* Invalid strtab section number */ + continue; + strtab = (char *)sechdrs[sechdrs[i].sh_link].sh_offset; + syms = (Elf_Sym *)sechdrs[i].sh_offset; + + /* Go through symbols for a match */ + for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) { + if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL) + continue; + + if (strcmp(strtab + syms[k].st_name, name) != 0) + continue; + + if (syms[k].st_shndx == SHN_UNDEF || + syms[k].st_shndx >= ehdr->e_shnum) { + pr_debug("Symbol: %s has bad section index %d.\n", + name, syms[k].st_shndx); + return NULL; + } + + /* Found the symbol we are looking for */ + return &syms[k]; + } + } + + return NULL; +} + +void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name) +{ + struct purgatory_info *pi = &image->purgatory_info; + Elf_Sym *sym; + Elf_Shdr *sechdr; + + sym = kexec_purgatory_find_symbol(pi, name); + if (!sym) + return ERR_PTR(-EINVAL); + + sechdr = &pi->sechdrs[sym->st_shndx]; + + /* + * Returns the address where symbol will finally be loaded after + * kexec_load_segment() + */ + return (void *)(sechdr->sh_addr + sym->st_value); +} + +/* + * Get or set value of a symbol. If "get_value" is true, symbol value is + * returned in buf otherwise symbol value is set based on value in buf. + */ +int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, + void *buf, unsigned int size, bool get_value) +{ + Elf_Sym *sym; + Elf_Shdr *sechdrs; + struct purgatory_info *pi = &image->purgatory_info; + char *sym_buf; + + sym = kexec_purgatory_find_symbol(pi, name); + if (!sym) + return -EINVAL; + + if (sym->st_size != size) { + pr_err("symbol %s size mismatch: expected %lu actual %u\n", + name, (unsigned long)sym->st_size, size); + return -EINVAL; + } + + sechdrs = pi->sechdrs; + + if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { + pr_err("symbol %s is in a bss section. Cannot %s\n", name, + get_value ? "get" : "set"); + return -EINVAL; + } + + sym_buf = (unsigned char *)sechdrs[sym->st_shndx].sh_offset + + sym->st_value; + + if (get_value) + memcpy((void *)buf, sym_buf, size); + else + memcpy((void *)sym_buf, buf, size); + + return 0; +} diff --git a/kernel/kexec_internal.h b/kernel/kexec_internal.h new file mode 100644 index 000000000000..e4392a698ad4 --- /dev/null +++ b/kernel/kexec_internal.h @@ -0,0 +1,22 @@ +#ifndef LINUX_KEXEC_INTERNAL_H +#define LINUX_KEXEC_INTERNAL_H + +#include + +struct kimage *do_kimage_alloc_init(void); +int sanity_check_segment_list(struct kimage *image); +void kimage_free_page_list(struct list_head *list); +void kimage_free(struct kimage *image); +int kimage_load_segment(struct kimage *image, struct kexec_segment *segment); +void kimage_terminate(struct kimage *image); +int kimage_is_destination_range(struct kimage *image, + unsigned long start, unsigned long end); + +extern struct mutex kexec_mutex; + +#ifdef CONFIG_KEXEC_FILE +void kimage_file_post_load_cleanup(struct kimage *image); +#else /* CONFIG_KEXEC_FILE */ +static inline void kimage_file_post_load_cleanup(struct kimage *image) { } +#endif /* CONFIG_KEXEC_FILE */ +#endif /* LINUX_KEXEC_INTERNAL_H */ -- cgit v1.2.3 From 2965faa5e03d1e71e9ff9aa143fff39e0a77543a Mon Sep 17 00:00:00 2001 From: Dave Young Date: Wed, 9 Sep 2015 15:38:55 -0700 Subject: kexec: split kexec_load syscall from kexec core code There are two kexec load syscalls, kexec_load another and kexec_file_load. kexec_file_load has been splited as kernel/kexec_file.c. In this patch I split kexec_load syscall code to kernel/kexec.c. And add a new kconfig option KEXEC_CORE, so we can disable kexec_load and use kexec_file_load only, or vice verse. The original requirement is from Ted Ts'o, he want kexec kernel signature being checked with CONFIG_KEXEC_VERIFY_SIG enabled. But kexec-tools use kexec_load syscall can bypass the checking. Vivek Goyal proposed to create a common kconfig option so user can compile in only one syscall for loading kexec kernel. KEXEC/KEXEC_FILE selects KEXEC_CORE so that old config files still work. Because there's general code need CONFIG_KEXEC_CORE, so I updated all the architecture Kconfig with a new option KEXEC_CORE, and let KEXEC selects KEXEC_CORE in arch Kconfig. Also updated general kernel code with to kexec_load syscall. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Dave Young Cc: Eric W. Biederman Cc: Vivek Goyal Cc: Petr Tesarik Cc: Theodore Ts'o Cc: Josh Boyer Cc: David Howells Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/Kconfig | 3 + arch/arm/Kconfig | 1 + arch/ia64/Kconfig | 1 + arch/m68k/Kconfig | 1 + arch/mips/Kconfig | 1 + arch/powerpc/Kconfig | 1 + arch/s390/Kconfig | 1 + arch/sh/Kconfig | 1 + arch/tile/Kconfig | 1 + arch/x86/Kconfig | 3 +- arch/x86/boot/header.S | 2 +- arch/x86/include/asm/kdebug.h | 2 +- arch/x86/kernel/Makefile | 4 +- arch/x86/kernel/kvmclock.c | 4 +- arch/x86/kernel/reboot.c | 4 +- arch/x86/kernel/setup.c | 2 +- arch/x86/kernel/vmlinux.lds.S | 2 +- arch/x86/kvm/vmx.c | 8 +- arch/x86/platform/efi/efi.c | 4 +- arch/x86/platform/uv/uv_nmi.c | 6 +- drivers/firmware/efi/Kconfig | 2 +- drivers/pci/pci-driver.c | 2 +- include/linux/kexec.h | 6 +- init/initramfs.c | 4 +- kernel/Makefile | 1 + kernel/events/core.c | 2 +- kernel/kexec.c | 1495 +--------------------------------------- kernel/kexec_core.c | 1511 +++++++++++++++++++++++++++++++++++++++++ kernel/ksysfs.c | 6 +- kernel/printk/printk.c | 2 +- kernel/reboot.c | 2 +- kernel/sysctl.c | 2 +- 32 files changed, 1560 insertions(+), 1527 deletions(-) create mode 100644 kernel/kexec_core.c (limited to 'kernel/Makefile') diff --git a/arch/Kconfig b/arch/Kconfig index 8f3564930580..4e949e58b192 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -2,6 +2,9 @@ # General architecture dependent options # +config KEXEC_CORE + bool + config OPROFILE tristate "OProfile system profiling" depends on PROFILING diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0d1b717e1eca..72ad724c67ae 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -2020,6 +2020,7 @@ config KEXEC bool "Kexec system call (EXPERIMENTAL)" depends on (!SMP || PM_SLEEP_SMP) depends on !CPU_V7M + select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 42a91a7aa2b0..eb0249e37981 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -518,6 +518,7 @@ source "drivers/sn/Kconfig" config KEXEC bool "kexec system call" depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) + select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 2dd8f63bfbbb..498b567f007b 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -95,6 +95,7 @@ config MMU_SUN3 config KEXEC bool "kexec system call" depends on M68KCLASSIC + select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 752acca8de1f..e3aa5b0b4ef1 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2597,6 +2597,7 @@ source "kernel/Kconfig.preempt" config KEXEC bool "Kexec system call" + select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index b447918b9e2c..9a7057ec2154 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -420,6 +420,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE config KEXEC bool "kexec system call" depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) + select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 4827870f7a6d..1d57000b1b24 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -48,6 +48,7 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC config KEXEC def_bool y + select KEXEC_CORE config AUDIT_ARCH def_bool y diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 50057fed819d..d514df7e04dd 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -602,6 +602,7 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call (EXPERIMENTAL)" depends on SUPERH32 && MMU + select KEXEC_CORE help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 2ba12d761723..106c21bd7f44 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -205,6 +205,7 @@ source "kernel/Kconfig.hz" config KEXEC bool "kexec system call" + select KEXEC_CORE ---help--- kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cc0d73eac047..7aef2d52daa0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1754,6 +1754,7 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call" + select KEXEC_CORE ---help--- kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot @@ -1770,8 +1771,8 @@ config KEXEC config KEXEC_FILE bool "kexec file based system call" + select KEXEC_CORE select BUILD_BIN2C - depends on KEXEC depends on X86_64 depends on CRYPTO=y depends on CRYPTO_SHA256=y diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 16ef02596db2..2d6b309c8e9a 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -414,7 +414,7 @@ xloadflags: # define XLF23 0 #endif -#if defined(CONFIG_X86_64) && defined(CONFIG_EFI) && defined(CONFIG_KEXEC) +#if defined(CONFIG_X86_64) && defined(CONFIG_EFI) && defined(CONFIG_KEXEC_CORE) # define XLF4 XLF_EFI_KEXEC #else # define XLF4 0 diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h index 32ce71375b21..b130d59406fb 100644 --- a/arch/x86/include/asm/kdebug.h +++ b/arch/x86/include/asm/kdebug.h @@ -29,7 +29,7 @@ extern void show_trace(struct task_struct *t, struct pt_regs *regs, extern void __show_regs(struct pt_regs *regs, int all); extern unsigned long oops_begin(void); extern void oops_end(unsigned long, struct pt_regs *, int signr); -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE extern int in_crash_kexec; #else /* no crash dump is ever in progress if no crash kernel can be kexec'd */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 9ffdf25e5b86..b1b78ffe01d0 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -71,8 +71,8 @@ obj-$(CONFIG_LIVEPATCH) += livepatch.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_X86_TSC) += trace_clock.o -obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o -obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o +obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o +obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o obj-y += kprobes/ diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 49487b488061..2c7aafa70702 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -200,7 +200,7 @@ static void kvm_setup_secondary_clock(void) * kind of shutdown from our side, we unregister the clock by writting anything * that does not have the 'enable' bit set in the msr */ -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE static void kvm_crash_shutdown(struct pt_regs *regs) { native_write_msr(msr_kvm_system_time, 0, 0); @@ -259,7 +259,7 @@ void __init kvmclock_init(void) x86_platform.save_sched_clock_state = kvm_save_sched_clock_state; x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state; machine_ops.shutdown = kvm_shutdown; -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE machine_ops.crash_shutdown = kvm_crash_shutdown; #endif kvm_get_preset_lpj(); diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 86db4bcd7ce5..02693dd9a079 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -673,7 +673,7 @@ struct machine_ops machine_ops = { .emergency_restart = native_machine_emergency_restart, .restart = native_machine_restart, .halt = native_machine_halt, -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE .crash_shutdown = native_machine_crash_shutdown, #endif }; @@ -703,7 +703,7 @@ void machine_halt(void) machine_ops.halt(); } -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE void machine_crash_shutdown(struct pt_regs *regs) { machine_ops.crash_shutdown(regs); diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index baadbf90a7c5..fdb7f2a2d328 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -478,7 +478,7 @@ static void __init memblock_x86_reserve_range_setup_data(void) * --------- Crashkernel reservation ------------------------------ */ -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE /* * Keep the crash kernel below this limit. On 32 bits earlier kernels diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 00bf300fd846..74e4bf11f562 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -364,7 +364,7 @@ INIT_PER_CPU(irq_stack_union); #endif /* CONFIG_X86_32 */ -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE #include . = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 148ea2016022..d01986832afc 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1264,7 +1264,7 @@ static void vmcs_load(struct vmcs *vmcs) vmcs, phys_addr); } -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE /* * This bitmap is used to indicate whether the vmclear * operation is enabled on all cpus. All disabled by @@ -1302,7 +1302,7 @@ static void crash_vmclear_local_loaded_vmcss(void) #else static inline void crash_enable_local_vmclear(int cpu) { } static inline void crash_disable_local_vmclear(int cpu) { } -#endif /* CONFIG_KEXEC */ +#endif /* CONFIG_KEXEC_CORE */ static void __loaded_vmcs_clear(void *arg) { @@ -10411,7 +10411,7 @@ static int __init vmx_init(void) if (r) return r; -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE rcu_assign_pointer(crash_vmclear_loaded_vmcss, crash_vmclear_local_loaded_vmcss); #endif @@ -10421,7 +10421,7 @@ static int __init vmx_init(void) static void __exit vmx_exit(void) { -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL); synchronize_rcu(); #endif diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index e4308fe6afe8..1db84c0758b7 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -650,7 +650,7 @@ static void __init get_systab_virt_addr(efi_memory_desc_t *md) static void __init save_runtime_map(void) { -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE efi_memory_desc_t *md; void *tmp, *p, *q = NULL; int count = 0; @@ -748,7 +748,7 @@ static void * __init efi_map_regions(int *count, int *pg_shift) static void __init kexec_enter_virtual_mode(void) { -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE efi_memory_desc_t *md; void *p; diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c index 020c101c255f..5c9f63fa6abf 100644 --- a/arch/x86/platform/uv/uv_nmi.c +++ b/arch/x86/platform/uv/uv_nmi.c @@ -492,7 +492,7 @@ static void uv_nmi_touch_watchdogs(void) touch_nmi_watchdog(); } -#if defined(CONFIG_KEXEC) +#if defined(CONFIG_KEXEC_CORE) static atomic_t uv_nmi_kexec_failed; static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs) { @@ -519,13 +519,13 @@ static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs) uv_nmi_sync_exit(0); } -#else /* !CONFIG_KEXEC */ +#else /* !CONFIG_KEXEC_CORE */ static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs) { if (master) pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n"); } -#endif /* !CONFIG_KEXEC */ +#endif /* !CONFIG_KEXEC_CORE */ #ifdef CONFIG_KGDB #ifdef CONFIG_KGDB_KDB diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 54071c148340..84533e02fbf8 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -43,7 +43,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE config EFI_RUNTIME_MAP bool "Export efi runtime maps to sysfs" - depends on X86 && EFI && KEXEC + depends on X86 && EFI && KEXEC_CORE default y help Export efi runtime memory maps to /sys/firmware/efi/runtime-map. diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 52a880ca1768..dd652f2ae03d 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -467,7 +467,7 @@ static void pci_device_shutdown(struct device *dev) pci_msi_shutdown(pci_dev); pci_msix_shutdown(pci_dev); -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE /* * If this is a kexec reboot, turn off Bus Master bit on the * device to tell it to not continue to do DMA. Don't touch diff --git a/include/linux/kexec.h b/include/linux/kexec.h index ab150ade0d18..d140b1e9faa7 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -16,7 +16,7 @@ #include -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE #include #include #include @@ -329,13 +329,13 @@ int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, unsigned int relsec); -#else /* !CONFIG_KEXEC */ +#else /* !CONFIG_KEXEC_CORE */ struct pt_regs; struct task_struct; static inline void crash_kexec(struct pt_regs *regs) { } static inline int kexec_should_crash(struct task_struct *p) { return 0; } #define kexec_in_progress false -#endif /* CONFIG_KEXEC */ +#endif /* CONFIG_KEXEC_CORE */ #endif /* !defined(__ASSEBMLY__) */ diff --git a/init/initramfs.c b/init/initramfs.c index ad1bd7787bbb..b32ad7d97ac9 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -526,14 +526,14 @@ extern unsigned long __initramfs_size; static void __init free_initrd(void) { -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE unsigned long crashk_start = (unsigned long)__va(crashk_res.start); unsigned long crashk_end = (unsigned long)__va(crashk_res.end); #endif if (do_retain_initrd) goto skip; -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE /* * If the initrd region is overlapped with crashkernel reserved region, * free only memory that is not part of crashkernel region. diff --git a/kernel/Makefile b/kernel/Makefile index 1b4890af5a65..d4988410b410 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULE_SIG) += module_signing.o obj-$(CONFIG_KALLSYMS) += kallsyms.o obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o +obj-$(CONFIG_KEXEC_CORE) += kexec_core.o obj-$(CONFIG_KEXEC) += kexec.o obj-$(CONFIG_KEXEC_FILE) += kexec_file.o obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o diff --git a/kernel/events/core.c b/kernel/events/core.c index e8183895691c..f548f69c4299 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -9094,7 +9094,7 @@ static void perf_event_init_cpu(int cpu) mutex_unlock(&swhash->hlist_mutex); } -#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC +#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE static void __perf_event_exit_context(void *__info) { struct remove_event re = { .detach_group = true }; diff --git a/kernel/kexec.c b/kernel/kexec.c index 2d73ecfa5505..4c5edc357923 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1,148 +1,23 @@ /* - * kexec.c - kexec system call + * kexec.c - kexec_load system call * Copyright (C) 2002-2004 Eric Biederman * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ -#define pr_fmt(fmt) "kexec: " fmt - #include #include #include -#include -#include #include #include #include -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include +#include -#include -#include #include "kexec_internal.h" -DEFINE_MUTEX(kexec_mutex); - -/* Per cpu memory for storing cpu states in case of system crash. */ -note_buf_t __percpu *crash_notes; - -/* vmcoreinfo stuff */ -static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; -u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; -size_t vmcoreinfo_size; -size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); - -/* Flag to indicate we are going to kexec a new kernel */ -bool kexec_in_progress = false; - - -/* Location of the reserved area for the crash kernel */ -struct resource crashk_res = { - .name = "Crash kernel", - .start = 0, - .end = 0, - .flags = IORESOURCE_BUSY | IORESOURCE_MEM -}; -struct resource crashk_low_res = { - .name = "Crash kernel", - .start = 0, - .end = 0, - .flags = IORESOURCE_BUSY | IORESOURCE_MEM -}; - -int kexec_should_crash(struct task_struct *p) -{ - /* - * If crash_kexec_post_notifiers is enabled, don't run - * crash_kexec() here yet, which must be run after panic - * notifiers in panic(). - */ - if (crash_kexec_post_notifiers) - return 0; - /* - * There are 4 panic() calls in do_exit() path, each of which - * corresponds to each of these 4 conditions. - */ - if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) - return 1; - return 0; -} - -/* - * When kexec transitions to the new kernel there is a one-to-one - * mapping between physical and virtual addresses. On processors - * where you can disable the MMU this is trivial, and easy. For - * others it is still a simple predictable page table to setup. - * - * In that environment kexec copies the new kernel to its final - * resting place. This means I can only support memory whose - * physical address can fit in an unsigned long. In particular - * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. - * If the assembly stub has more restrictive requirements - * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be - * defined more restrictively in . - * - * The code for the transition from the current kernel to the - * the new kernel is placed in the control_code_buffer, whose size - * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single - * page of memory is necessary, but some architectures require more. - * Because this memory must be identity mapped in the transition from - * virtual to physical addresses it must live in the range - * 0 - TASK_SIZE, as only the user space mappings are arbitrarily - * modifiable. - * - * The assembly stub in the control code buffer is passed a linked list - * of descriptor pages detailing the source pages of the new kernel, - * and the destination addresses of those source pages. As this data - * structure is not used in the context of the current OS, it must - * be self-contained. - * - * The code has been made to work with highmem pages and will use a - * destination page in its final resting place (if it happens - * to allocate it). The end product of this is that most of the - * physical address space, and most of RAM can be used. - * - * Future directions include: - * - allocating a page table with the control code buffer identity - * mapped, to simplify machine_kexec and make kexec_on_panic more - * reliable. - */ - -/* - * KIMAGE_NO_DEST is an impossible destination address..., for - * allocating pages whose destination address we do not care about. - */ -#define KIMAGE_NO_DEST (-1UL) - -static struct page *kimage_alloc_page(struct kimage *image, - gfp_t gfp_mask, - unsigned long dest); - static int copy_user_segment_list(struct kimage *image, unsigned long nr_segments, struct kexec_segment __user *segments) @@ -160,123 +35,6 @@ static int copy_user_segment_list(struct kimage *image, return ret; } -int sanity_check_segment_list(struct kimage *image) -{ - int result, i; - unsigned long nr_segments = image->nr_segments; - - /* - * Verify we have good destination addresses. The caller is - * responsible for making certain we don't attempt to load - * the new image into invalid or reserved areas of RAM. This - * just verifies it is an address we can use. - * - * Since the kernel does everything in page size chunks ensure - * the destination addresses are page aligned. Too many - * special cases crop of when we don't do this. The most - * insidious is getting overlapping destination addresses - * simply because addresses are changed to page size - * granularity. - */ - result = -EADDRNOTAVAIL; - for (i = 0; i < nr_segments; i++) { - unsigned long mstart, mend; - - mstart = image->segment[i].mem; - mend = mstart + image->segment[i].memsz; - if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) - return result; - if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) - return result; - } - - /* Verify our destination addresses do not overlap. - * If we alloed overlapping destination addresses - * through very weird things can happen with no - * easy explanation as one segment stops on another. - */ - result = -EINVAL; - for (i = 0; i < nr_segments; i++) { - unsigned long mstart, mend; - unsigned long j; - - mstart = image->segment[i].mem; - mend = mstart + image->segment[i].memsz; - for (j = 0; j < i; j++) { - unsigned long pstart, pend; - pstart = image->segment[j].mem; - pend = pstart + image->segment[j].memsz; - /* Do the segments overlap ? */ - if ((mend > pstart) && (mstart < pend)) - return result; - } - } - - /* Ensure our buffer sizes are strictly less than - * our memory sizes. This should always be the case, - * and it is easier to check up front than to be surprised - * later on. - */ - result = -EINVAL; - for (i = 0; i < nr_segments; i++) { - if (image->segment[i].bufsz > image->segment[i].memsz) - return result; - } - - /* - * Verify we have good destination addresses. Normally - * the caller is responsible for making certain we don't - * attempt to load the new image into invalid or reserved - * areas of RAM. But crash kernels are preloaded into a - * reserved area of ram. We must ensure the addresses - * are in the reserved area otherwise preloading the - * kernel could corrupt things. - */ - - if (image->type == KEXEC_TYPE_CRASH) { - result = -EADDRNOTAVAIL; - for (i = 0; i < nr_segments; i++) { - unsigned long mstart, mend; - - mstart = image->segment[i].mem; - mend = mstart + image->segment[i].memsz - 1; - /* Ensure we are within the crash kernel limits */ - if ((mstart < crashk_res.start) || - (mend > crashk_res.end)) - return result; - } - } - - return 0; -} - -struct kimage *do_kimage_alloc_init(void) -{ - struct kimage *image; - - /* Allocate a controlling structure */ - image = kzalloc(sizeof(*image), GFP_KERNEL); - if (!image) - return NULL; - - image->head = 0; - image->entry = &image->head; - image->last_entry = &image->head; - image->control_page = ~0; /* By default this does not apply */ - image->type = KEXEC_TYPE_DEFAULT; - - /* Initialize the list of control pages */ - INIT_LIST_HEAD(&image->control_pages); - - /* Initialize the list of destination pages */ - INIT_LIST_HEAD(&image->dest_pages); - - /* Initialize the list of unusable pages */ - INIT_LIST_HEAD(&image->unusable_pages); - - return image; -} - static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, unsigned long nr_segments, struct kexec_segment __user *segments, @@ -343,597 +101,6 @@ out_free_image: return ret; } -int kimage_is_destination_range(struct kimage *image, - unsigned long start, - unsigned long end) -{ - unsigned long i; - - for (i = 0; i < image->nr_segments; i++) { - unsigned long mstart, mend; - - mstart = image->segment[i].mem; - mend = mstart + image->segment[i].memsz; - if ((end > mstart) && (start < mend)) - return 1; - } - - return 0; -} - -static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) -{ - struct page *pages; - - pages = alloc_pages(gfp_mask, order); - if (pages) { - unsigned int count, i; - pages->mapping = NULL; - set_page_private(pages, order); - count = 1 << order; - for (i = 0; i < count; i++) - SetPageReserved(pages + i); - } - - return pages; -} - -static void kimage_free_pages(struct page *page) -{ - unsigned int order, count, i; - - order = page_private(page); - count = 1 << order; - for (i = 0; i < count; i++) - ClearPageReserved(page + i); - __free_pages(page, order); -} - -void kimage_free_page_list(struct list_head *list) -{ - struct list_head *pos, *next; - - list_for_each_safe(pos, next, list) { - struct page *page; - - page = list_entry(pos, struct page, lru); - list_del(&page->lru); - kimage_free_pages(page); - } -} - -static struct page *kimage_alloc_normal_control_pages(struct kimage *image, - unsigned int order) -{ - /* Control pages are special, they are the intermediaries - * that are needed while we copy the rest of the pages - * to their final resting place. As such they must - * not conflict with either the destination addresses - * or memory the kernel is already using. - * - * The only case where we really need more than one of - * these are for architectures where we cannot disable - * the MMU and must instead generate an identity mapped - * page table for all of the memory. - * - * At worst this runs in O(N) of the image size. - */ - struct list_head extra_pages; - struct page *pages; - unsigned int count; - - count = 1 << order; - INIT_LIST_HEAD(&extra_pages); - - /* Loop while I can allocate a page and the page allocated - * is a destination page. - */ - do { - unsigned long pfn, epfn, addr, eaddr; - - pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); - if (!pages) - break; - pfn = page_to_pfn(pages); - epfn = pfn + count; - addr = pfn << PAGE_SHIFT; - eaddr = epfn << PAGE_SHIFT; - if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || - kimage_is_destination_range(image, addr, eaddr)) { - list_add(&pages->lru, &extra_pages); - pages = NULL; - } - } while (!pages); - - if (pages) { - /* Remember the allocated page... */ - list_add(&pages->lru, &image->control_pages); - - /* Because the page is already in it's destination - * location we will never allocate another page at - * that address. Therefore kimage_alloc_pages - * will not return it (again) and we don't need - * to give it an entry in image->segment[]. - */ - } - /* Deal with the destination pages I have inadvertently allocated. - * - * Ideally I would convert multi-page allocations into single - * page allocations, and add everything to image->dest_pages. - * - * For now it is simpler to just free the pages. - */ - kimage_free_page_list(&extra_pages); - - return pages; -} - -static struct page *kimage_alloc_crash_control_pages(struct kimage *image, - unsigned int order) -{ - /* Control pages are special, they are the intermediaries - * that are needed while we copy the rest of the pages - * to their final resting place. As such they must - * not conflict with either the destination addresses - * or memory the kernel is already using. - * - * Control pages are also the only pags we must allocate - * when loading a crash kernel. All of the other pages - * are specified by the segments and we just memcpy - * into them directly. - * - * The only case where we really need more than one of - * these are for architectures where we cannot disable - * the MMU and must instead generate an identity mapped - * page table for all of the memory. - * - * Given the low demand this implements a very simple - * allocator that finds the first hole of the appropriate - * size in the reserved memory region, and allocates all - * of the memory up to and including the hole. - */ - unsigned long hole_start, hole_end, size; - struct page *pages; - - pages = NULL; - size = (1 << order) << PAGE_SHIFT; - hole_start = (image->control_page + (size - 1)) & ~(size - 1); - hole_end = hole_start + size - 1; - while (hole_end <= crashk_res.end) { - unsigned long i; - - if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) - break; - /* See if I overlap any of the segments */ - for (i = 0; i < image->nr_segments; i++) { - unsigned long mstart, mend; - - mstart = image->segment[i].mem; - mend = mstart + image->segment[i].memsz - 1; - if ((hole_end >= mstart) && (hole_start <= mend)) { - /* Advance the hole to the end of the segment */ - hole_start = (mend + (size - 1)) & ~(size - 1); - hole_end = hole_start + size - 1; - break; - } - } - /* If I don't overlap any segments I have found my hole! */ - if (i == image->nr_segments) { - pages = pfn_to_page(hole_start >> PAGE_SHIFT); - break; - } - } - if (pages) - image->control_page = hole_end; - - return pages; -} - - -struct page *kimage_alloc_control_pages(struct kimage *image, - unsigned int order) -{ - struct page *pages = NULL; - - switch (image->type) { - case KEXEC_TYPE_DEFAULT: - pages = kimage_alloc_normal_control_pages(image, order); - break; - case KEXEC_TYPE_CRASH: - pages = kimage_alloc_crash_control_pages(image, order); - break; - } - - return pages; -} - -static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) -{ - if (*image->entry != 0) - image->entry++; - - if (image->entry == image->last_entry) { - kimage_entry_t *ind_page; - struct page *page; - - page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); - if (!page) - return -ENOMEM; - - ind_page = page_address(page); - *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; - image->entry = ind_page; - image->last_entry = ind_page + - ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); - } - *image->entry = entry; - image->entry++; - *image->entry = 0; - - return 0; -} - -static int kimage_set_destination(struct kimage *image, - unsigned long destination) -{ - int result; - - destination &= PAGE_MASK; - result = kimage_add_entry(image, destination | IND_DESTINATION); - - return result; -} - - -static int kimage_add_page(struct kimage *image, unsigned long page) -{ - int result; - - page &= PAGE_MASK; - result = kimage_add_entry(image, page | IND_SOURCE); - - return result; -} - - -static void kimage_free_extra_pages(struct kimage *image) -{ - /* Walk through and free any extra destination pages I may have */ - kimage_free_page_list(&image->dest_pages); - - /* Walk through and free any unusable pages I have cached */ - kimage_free_page_list(&image->unusable_pages); - -} -void kimage_terminate(struct kimage *image) -{ - if (*image->entry != 0) - image->entry++; - - *image->entry = IND_DONE; -} - -#define for_each_kimage_entry(image, ptr, entry) \ - for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ - ptr = (entry & IND_INDIRECTION) ? \ - phys_to_virt((entry & PAGE_MASK)) : ptr + 1) - -static void kimage_free_entry(kimage_entry_t entry) -{ - struct page *page; - - page = pfn_to_page(entry >> PAGE_SHIFT); - kimage_free_pages(page); -} - -void kimage_free(struct kimage *image) -{ - kimage_entry_t *ptr, entry; - kimage_entry_t ind = 0; - - if (!image) - return; - - kimage_free_extra_pages(image); - for_each_kimage_entry(image, ptr, entry) { - if (entry & IND_INDIRECTION) { - /* Free the previous indirection page */ - if (ind & IND_INDIRECTION) - kimage_free_entry(ind); - /* Save this indirection page until we are - * done with it. - */ - ind = entry; - } else if (entry & IND_SOURCE) - kimage_free_entry(entry); - } - /* Free the final indirection page */ - if (ind & IND_INDIRECTION) - kimage_free_entry(ind); - - /* Handle any machine specific cleanup */ - machine_kexec_cleanup(image); - - /* Free the kexec control pages... */ - kimage_free_page_list(&image->control_pages); - - /* - * Free up any temporary buffers allocated. This might hit if - * error occurred much later after buffer allocation. - */ - if (image->file_mode) - kimage_file_post_load_cleanup(image); - - kfree(image); -} - -static kimage_entry_t *kimage_dst_used(struct kimage *image, - unsigned long page) -{ - kimage_entry_t *ptr, entry; - unsigned long destination = 0; - - for_each_kimage_entry(image, ptr, entry) { - if (entry & IND_DESTINATION) - destination = entry & PAGE_MASK; - else if (entry & IND_SOURCE) { - if (page == destination) - return ptr; - destination += PAGE_SIZE; - } - } - - return NULL; -} - -static struct page *kimage_alloc_page(struct kimage *image, - gfp_t gfp_mask, - unsigned long destination) -{ - /* - * Here we implement safeguards to ensure that a source page - * is not copied to its destination page before the data on - * the destination page is no longer useful. - * - * To do this we maintain the invariant that a source page is - * either its own destination page, or it is not a - * destination page at all. - * - * That is slightly stronger than required, but the proof - * that no problems will not occur is trivial, and the - * implementation is simply to verify. - * - * When allocating all pages normally this algorithm will run - * in O(N) time, but in the worst case it will run in O(N^2) - * time. If the runtime is a problem the data structures can - * be fixed. - */ - struct page *page; - unsigned long addr; - - /* - * Walk through the list of destination pages, and see if I - * have a match. - */ - list_for_each_entry(page, &image->dest_pages, lru) { - addr = page_to_pfn(page) << PAGE_SHIFT; - if (addr == destination) { - list_del(&page->lru); - return page; - } - } - page = NULL; - while (1) { - kimage_entry_t *old; - - /* Allocate a page, if we run out of memory give up */ - page = kimage_alloc_pages(gfp_mask, 0); - if (!page) - return NULL; - /* If the page cannot be used file it away */ - if (page_to_pfn(page) > - (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { - list_add(&page->lru, &image->unusable_pages); - continue; - } - addr = page_to_pfn(page) << PAGE_SHIFT; - - /* If it is the destination page we want use it */ - if (addr == destination) - break; - - /* If the page is not a destination page use it */ - if (!kimage_is_destination_range(image, addr, - addr + PAGE_SIZE)) - break; - - /* - * I know that the page is someones destination page. - * See if there is already a source page for this - * destination page. And if so swap the source pages. - */ - old = kimage_dst_used(image, addr); - if (old) { - /* If so move it */ - unsigned long old_addr; - struct page *old_page; - - old_addr = *old & PAGE_MASK; - old_page = pfn_to_page(old_addr >> PAGE_SHIFT); - copy_highpage(page, old_page); - *old = addr | (*old & ~PAGE_MASK); - - /* The old page I have found cannot be a - * destination page, so return it if it's - * gfp_flags honor the ones passed in. - */ - if (!(gfp_mask & __GFP_HIGHMEM) && - PageHighMem(old_page)) { - kimage_free_pages(old_page); - continue; - } - addr = old_addr; - page = old_page; - break; - } else { - /* Place the page on the destination list I - * will use it later. - */ - list_add(&page->lru, &image->dest_pages); - } - } - - return page; -} - -static int kimage_load_normal_segment(struct kimage *image, - struct kexec_segment *segment) -{ - unsigned long maddr; - size_t ubytes, mbytes; - int result; - unsigned char __user *buf = NULL; - unsigned char *kbuf = NULL; - - result = 0; - if (image->file_mode) - kbuf = segment->kbuf; - else - buf = segment->buf; - ubytes = segment->bufsz; - mbytes = segment->memsz; - maddr = segment->mem; - - result = kimage_set_destination(image, maddr); - if (result < 0) - goto out; - - while (mbytes) { - struct page *page; - char *ptr; - size_t uchunk, mchunk; - - page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); - if (!page) { - result = -ENOMEM; - goto out; - } - result = kimage_add_page(image, page_to_pfn(page) - << PAGE_SHIFT); - if (result < 0) - goto out; - - ptr = kmap(page); - /* Start with a clear page */ - clear_page(ptr); - ptr += maddr & ~PAGE_MASK; - mchunk = min_t(size_t, mbytes, - PAGE_SIZE - (maddr & ~PAGE_MASK)); - uchunk = min(ubytes, mchunk); - - /* For file based kexec, source pages are in kernel memory */ - if (image->file_mode) - memcpy(ptr, kbuf, uchunk); - else - result = copy_from_user(ptr, buf, uchunk); - kunmap(page); - if (result) { - result = -EFAULT; - goto out; - } - ubytes -= uchunk; - maddr += mchunk; - if (image->file_mode) - kbuf += mchunk; - else - buf += mchunk; - mbytes -= mchunk; - } -out: - return result; -} - -static int kimage_load_crash_segment(struct kimage *image, - struct kexec_segment *segment) -{ - /* For crash dumps kernels we simply copy the data from - * user space to it's destination. - * We do things a page at a time for the sake of kmap. - */ - unsigned long maddr; - size_t ubytes, mbytes; - int result; - unsigned char __user *buf = NULL; - unsigned char *kbuf = NULL; - - result = 0; - if (image->file_mode) - kbuf = segment->kbuf; - else - buf = segment->buf; - ubytes = segment->bufsz; - mbytes = segment->memsz; - maddr = segment->mem; - while (mbytes) { - struct page *page; - char *ptr; - size_t uchunk, mchunk; - - page = pfn_to_page(maddr >> PAGE_SHIFT); - if (!page) { - result = -ENOMEM; - goto out; - } - ptr = kmap(page); - ptr += maddr & ~PAGE_MASK; - mchunk = min_t(size_t, mbytes, - PAGE_SIZE - (maddr & ~PAGE_MASK)); - uchunk = min(ubytes, mchunk); - if (mchunk > uchunk) { - /* Zero the trailing part of the page */ - memset(ptr + uchunk, 0, mchunk - uchunk); - } - - /* For file based kexec, source pages are in kernel memory */ - if (image->file_mode) - memcpy(ptr, kbuf, uchunk); - else - result = copy_from_user(ptr, buf, uchunk); - kexec_flush_icache_page(page); - kunmap(page); - if (result) { - result = -EFAULT; - goto out; - } - ubytes -= uchunk; - maddr += mchunk; - if (image->file_mode) - kbuf += mchunk; - else - buf += mchunk; - mbytes -= mchunk; - } -out: - return result; -} - -int kimage_load_segment(struct kimage *image, - struct kexec_segment *segment) -{ - int result = -ENOMEM; - - switch (image->type) { - case KEXEC_TYPE_DEFAULT: - result = kimage_load_normal_segment(image, segment); - break; - case KEXEC_TYPE_CRASH: - result = kimage_load_crash_segment(image, segment); - break; - } - - return result; -} - /* * Exec Kernel system call: for obvious reasons only root may call it. * @@ -954,9 +121,6 @@ int kimage_load_segment(struct kimage *image, * kexec does not sync, or unmount filesystems so if you need * that to happen you need to do that yourself. */ -struct kimage *kexec_image; -struct kimage *kexec_crash_image; -int kexec_load_disabled; SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments, struct kexec_segment __user *, segments, unsigned long, flags) @@ -1051,18 +215,6 @@ out: return result; } -/* - * Add and remove page tables for crashkernel memory - * - * Provide an empty default implementation here -- architecture - * code may override this - */ -void __weak crash_map_reserved_pages(void) -{} - -void __weak crash_unmap_reserved_pages(void) -{} - #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, compat_ulong_t, nr_segments, @@ -1101,646 +253,3 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, return sys_kexec_load(entry, nr_segments, ksegments, flags); } #endif - -void crash_kexec(struct pt_regs *regs) -{ - /* Take the kexec_mutex here to prevent sys_kexec_load - * running on one cpu from replacing the crash kernel - * we are using after a panic on a different cpu. - * - * If the crash kernel was not located in a fixed area - * of memory the xchg(&kexec_crash_image) would be - * sufficient. But since I reuse the memory... - */ - if (mutex_trylock(&kexec_mutex)) { - if (kexec_crash_image) { - struct pt_regs fixed_regs; - - crash_setup_regs(&fixed_regs, regs); - crash_save_vmcoreinfo(); - machine_crash_shutdown(&fixed_regs); - machine_kexec(kexec_crash_image); - } - mutex_unlock(&kexec_mutex); - } -} - -size_t crash_get_memory_size(void) -{ - size_t size = 0; - mutex_lock(&kexec_mutex); - if (crashk_res.end != crashk_res.start) - size = resource_size(&crashk_res); - mutex_unlock(&kexec_mutex); - return size; -} - -void __weak crash_free_reserved_phys_range(unsigned long begin, - unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) - free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); -} - -int crash_shrink_memory(unsigned long new_size) -{ - int ret = 0; - unsigned long start, end; - unsigned long old_size; - struct resource *ram_res; - - mutex_lock(&kexec_mutex); - - if (kexec_crash_image) { - ret = -ENOENT; - goto unlock; - } - start = crashk_res.start; - end = crashk_res.end; - old_size = (end == 0) ? 0 : end - start + 1; - if (new_size >= old_size) { - ret = (new_size == old_size) ? 0 : -EINVAL; - goto unlock; - } - - ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL); - if (!ram_res) { - ret = -ENOMEM; - goto unlock; - } - - start = roundup(start, KEXEC_CRASH_MEM_ALIGN); - end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); - - crash_map_reserved_pages(); - crash_free_reserved_phys_range(end, crashk_res.end); - - if ((start == end) && (crashk_res.parent != NULL)) - release_resource(&crashk_res); - - ram_res->start = end; - ram_res->end = crashk_res.end; - ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; - ram_res->name = "System RAM"; - - crashk_res.end = end - 1; - - insert_resource(&iomem_resource, ram_res); - crash_unmap_reserved_pages(); - -unlock: - mutex_unlock(&kexec_mutex); - return ret; -} - -static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, - size_t data_len) -{ - struct elf_note note; - - note.n_namesz = strlen(name) + 1; - note.n_descsz = data_len; - note.n_type = type; - memcpy(buf, ¬e, sizeof(note)); - buf += (sizeof(note) + 3)/4; - memcpy(buf, name, note.n_namesz); - buf += (note.n_namesz + 3)/4; - memcpy(buf, data, note.n_descsz); - buf += (note.n_descsz + 3)/4; - - return buf; -} - -static void final_note(u32 *buf) -{ - struct elf_note note; - - note.n_namesz = 0; - note.n_descsz = 0; - note.n_type = 0; - memcpy(buf, ¬e, sizeof(note)); -} - -void crash_save_cpu(struct pt_regs *regs, int cpu) -{ - struct elf_prstatus prstatus; - u32 *buf; - - if ((cpu < 0) || (cpu >= nr_cpu_ids)) - return; - - /* Using ELF notes here is opportunistic. - * I need a well defined structure format - * for the data I pass, and I need tags - * on the data to indicate what information I have - * squirrelled away. ELF notes happen to provide - * all of that, so there is no need to invent something new. - */ - buf = (u32 *)per_cpu_ptr(crash_notes, cpu); - if (!buf) - return; - memset(&prstatus, 0, sizeof(prstatus)); - prstatus.pr_pid = current->pid; - elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); - buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, - &prstatus, sizeof(prstatus)); - final_note(buf); -} - -static int __init crash_notes_memory_init(void) -{ - /* Allocate memory for saving cpu registers. */ - crash_notes = alloc_percpu(note_buf_t); - if (!crash_notes) { - pr_warn("Kexec: Memory allocation for saving cpu register states failed\n"); - return -ENOMEM; - } - return 0; -} -subsys_initcall(crash_notes_memory_init); - - -/* - * parsing the "crashkernel" commandline - * - * this code is intended to be called from architecture specific code - */ - - -/* - * This function parses command lines in the format - * - * crashkernel=ramsize-range:size[,...][@offset] - * - * The function returns 0 on success and -EINVAL on failure. - */ -static int __init parse_crashkernel_mem(char *cmdline, - unsigned long long system_ram, - unsigned long long *crash_size, - unsigned long long *crash_base) -{ - char *cur = cmdline, *tmp; - - /* for each entry of the comma-separated list */ - do { - unsigned long long start, end = ULLONG_MAX, size; - - /* get the start of the range */ - start = memparse(cur, &tmp); - if (cur == tmp) { - pr_warn("crashkernel: Memory value expected\n"); - return -EINVAL; - } - cur = tmp; - if (*cur != '-') { - pr_warn("crashkernel: '-' expected\n"); - return -EINVAL; - } - cur++; - - /* if no ':' is here, than we read the end */ - if (*cur != ':') { - end = memparse(cur, &tmp); - if (cur == tmp) { - pr_warn("crashkernel: Memory value expected\n"); - return -EINVAL; - } - cur = tmp; - if (end <= start) { - pr_warn("crashkernel: end <= start\n"); - return -EINVAL; - } - } - - if (*cur != ':') { - pr_warn("crashkernel: ':' expected\n"); - return -EINVAL; - } - cur++; - - size = memparse(cur, &tmp); - if (cur == tmp) { - pr_warn("Memory value expected\n"); - return -EINVAL; - } - cur = tmp; - if (size >= system_ram) { - pr_warn("crashkernel: invalid size\n"); - return -EINVAL; - } - - /* match ? */ - if (system_ram >= start && system_ram < end) { - *crash_size = size; - break; - } - } while (*cur++ == ','); - - if (*crash_size > 0) { - while (*cur && *cur != ' ' && *cur != '@') - cur++; - if (*cur == '@') { - cur++; - *crash_base = memparse(cur, &tmp); - if (cur == tmp) { - pr_warn("Memory value expected after '@'\n"); - return -EINVAL; - } - } - } - - return 0; -} - -/* - * That function parses "simple" (old) crashkernel command lines like - * - * crashkernel=size[@offset] - * - * It returns 0 on success and -EINVAL on failure. - */ -static int __init parse_crashkernel_simple(char *cmdline, - unsigned long long *crash_size, - unsigned long long *crash_base) -{ - char *cur = cmdline; - - *crash_size = memparse(cmdline, &cur); - if (cmdline == cur) { - pr_warn("crashkernel: memory value expected\n"); - return -EINVAL; - } - - if (*cur == '@') - *crash_base = memparse(cur+1, &cur); - else if (*cur != ' ' && *cur != '\0') { - pr_warn("crashkernel: unrecognized char\n"); - return -EINVAL; - } - - return 0; -} - -#define SUFFIX_HIGH 0 -#define SUFFIX_LOW 1 -#define SUFFIX_NULL 2 -static __initdata char *suffix_tbl[] = { - [SUFFIX_HIGH] = ",high", - [SUFFIX_LOW] = ",low", - [SUFFIX_NULL] = NULL, -}; - -/* - * That function parses "suffix" crashkernel command lines like - * - * crashkernel=size,[high|low] - * - * It returns 0 on success and -EINVAL on failure. - */ -static int __init parse_crashkernel_suffix(char *cmdline, - unsigned long long *crash_size, - const char *suffix) -{ - char *cur = cmdline; - - *crash_size = memparse(cmdline, &cur); - if (cmdline == cur) { - pr_warn("crashkernel: memory value expected\n"); - return -EINVAL; - } - - /* check with suffix */ - if (strncmp(cur, suffix, strlen(suffix))) { - pr_warn("crashkernel: unrecognized char\n"); - return -EINVAL; - } - cur += strlen(suffix); - if (*cur != ' ' && *cur != '\0') { - pr_warn("crashkernel: unrecognized char\n"); - return -EINVAL; - } - - return 0; -} - -static __init char *get_last_crashkernel(char *cmdline, - const char *name, - const char *suffix) -{ - char *p = cmdline, *ck_cmdline = NULL; - - /* find crashkernel and use the last one if there are more */ - p = strstr(p, name); - while (p) { - char *end_p = strchr(p, ' '); - char *q; - - if (!end_p) - end_p = p + strlen(p); - - if (!suffix) { - int i; - - /* skip the one with any known suffix */ - for (i = 0; suffix_tbl[i]; i++) { - q = end_p - strlen(suffix_tbl[i]); - if (!strncmp(q, suffix_tbl[i], - strlen(suffix_tbl[i]))) - goto next; - } - ck_cmdline = p; - } else { - q = end_p - strlen(suffix); - if (!strncmp(q, suffix, strlen(suffix))) - ck_cmdline = p; - } -next: - p = strstr(p+1, name); - } - - if (!ck_cmdline) - return NULL; - - return ck_cmdline; -} - -static int __init __parse_crashkernel(char *cmdline, - unsigned long long system_ram, - unsigned long long *crash_size, - unsigned long long *crash_base, - const char *name, - const char *suffix) -{ - char *first_colon, *first_space; - char *ck_cmdline; - - BUG_ON(!crash_size || !crash_base); - *crash_size = 0; - *crash_base = 0; - - ck_cmdline = get_last_crashkernel(cmdline, name, suffix); - - if (!ck_cmdline) - return -EINVAL; - - ck_cmdline += strlen(name); - - if (suffix) - return parse_crashkernel_suffix(ck_cmdline, crash_size, - suffix); - /* - * if the commandline contains a ':', then that's the extended - * syntax -- if not, it must be the classic syntax - */ - first_colon = strchr(ck_cmdline, ':'); - first_space = strchr(ck_cmdline, ' '); - if (first_colon && (!first_space || first_colon < first_space)) - return parse_crashkernel_mem(ck_cmdline, system_ram, - crash_size, crash_base); - - return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base); -} - -/* - * That function is the entry point for command line parsing and should be - * called from the arch-specific code. - */ -int __init parse_crashkernel(char *cmdline, - unsigned long long system_ram, - unsigned long long *crash_size, - unsigned long long *crash_base) -{ - return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, - "crashkernel=", NULL); -} - -int __init parse_crashkernel_high(char *cmdline, - unsigned long long system_ram, - unsigned long long *crash_size, - unsigned long long *crash_base) -{ - return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, - "crashkernel=", suffix_tbl[SUFFIX_HIGH]); -} - -int __init parse_crashkernel_low(char *cmdline, - unsigned long long system_ram, - unsigned long long *crash_size, - unsigned long long *crash_base) -{ - return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, - "crashkernel=", suffix_tbl[SUFFIX_LOW]); -} - -static void update_vmcoreinfo_note(void) -{ - u32 *buf = vmcoreinfo_note; - - if (!vmcoreinfo_size) - return; - buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data, - vmcoreinfo_size); - final_note(buf); -} - -void crash_save_vmcoreinfo(void) -{ - vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds()); - update_vmcoreinfo_note(); -} - -void vmcoreinfo_append_str(const char *fmt, ...) -{ - va_list args; - char buf[0x50]; - size_t r; - - va_start(args, fmt); - r = vscnprintf(buf, sizeof(buf), fmt, args); - va_end(args); - - r = min(r, vmcoreinfo_max_size - vmcoreinfo_size); - - memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); - - vmcoreinfo_size += r; -} - -/* - * provide an empty default implementation here -- architecture - * code may override this - */ -void __weak arch_crash_save_vmcoreinfo(void) -{} - -unsigned long __weak paddr_vmcoreinfo_note(void) -{ - return __pa((unsigned long)(char *)&vmcoreinfo_note); -} - -static int __init crash_save_vmcoreinfo_init(void) -{ - VMCOREINFO_OSRELEASE(init_uts_ns.name.release); - VMCOREINFO_PAGESIZE(PAGE_SIZE); - - VMCOREINFO_SYMBOL(init_uts_ns); - VMCOREINFO_SYMBOL(node_online_map); -#ifdef CONFIG_MMU - VMCOREINFO_SYMBOL(swapper_pg_dir); -#endif - VMCOREINFO_SYMBOL(_stext); - VMCOREINFO_SYMBOL(vmap_area_list); - -#ifndef CONFIG_NEED_MULTIPLE_NODES - VMCOREINFO_SYMBOL(mem_map); - VMCOREINFO_SYMBOL(contig_page_data); -#endif -#ifdef CONFIG_SPARSEMEM - VMCOREINFO_SYMBOL(mem_section); - VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); - VMCOREINFO_STRUCT_SIZE(mem_section); - VMCOREINFO_OFFSET(mem_section, section_mem_map); -#endif - VMCOREINFO_STRUCT_SIZE(page); - VMCOREINFO_STRUCT_SIZE(pglist_data); - VMCOREINFO_STRUCT_SIZE(zone); - VMCOREINFO_STRUCT_SIZE(free_area); - VMCOREINFO_STRUCT_SIZE(list_head); - VMCOREINFO_SIZE(nodemask_t); - VMCOREINFO_OFFSET(page, flags); - VMCOREINFO_OFFSET(page, _count); - VMCOREINFO_OFFSET(page, mapping); - VMCOREINFO_OFFSET(page, lru); - VMCOREINFO_OFFSET(page, _mapcount); - VMCOREINFO_OFFSET(page, private); - VMCOREINFO_OFFSET(pglist_data, node_zones); - VMCOREINFO_OFFSET(pglist_data, nr_zones); -#ifdef CONFIG_FLAT_NODE_MEM_MAP - VMCOREINFO_OFFSET(pglist_data, node_mem_map); -#endif - VMCOREINFO_OFFSET(pglist_data, node_start_pfn); - VMCOREINFO_OFFSET(pglist_data, node_spanned_pages); - VMCOREINFO_OFFSET(pglist_data, node_id); - VMCOREINFO_OFFSET(zone, free_area); - VMCOREINFO_OFFSET(zone, vm_stat); - VMCOREINFO_OFFSET(zone, spanned_pages); - VMCOREINFO_OFFSET(free_area, free_list); - VMCOREINFO_OFFSET(list_head, next); - VMCOREINFO_OFFSET(list_head, prev); - VMCOREINFO_OFFSET(vmap_area, va_start); - VMCOREINFO_OFFSET(vmap_area, list); - VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); - log_buf_kexec_setup(); - VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); - VMCOREINFO_NUMBER(NR_FREE_PAGES); - VMCOREINFO_NUMBER(PG_lru); - VMCOREINFO_NUMBER(PG_private); - VMCOREINFO_NUMBER(PG_swapcache); - VMCOREINFO_NUMBER(PG_slab); -#ifdef CONFIG_MEMORY_FAILURE - VMCOREINFO_NUMBER(PG_hwpoison); -#endif - VMCOREINFO_NUMBER(PG_head_mask); - VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); -#ifdef CONFIG_HUGETLBFS - VMCOREINFO_SYMBOL(free_huge_page); -#endif - - arch_crash_save_vmcoreinfo(); - update_vmcoreinfo_note(); - - return 0; -} - -subsys_initcall(crash_save_vmcoreinfo_init); - -/* - * Move into place and start executing a preloaded standalone - * executable. If nothing was preloaded return an error. - */ -int kernel_kexec(void) -{ - int error = 0; - - if (!mutex_trylock(&kexec_mutex)) - return -EBUSY; - if (!kexec_image) { - error = -EINVAL; - goto Unlock; - } - -#ifdef CONFIG_KEXEC_JUMP - if (kexec_image->preserve_context) { - lock_system_sleep(); - pm_prepare_console(); - error = freeze_processes(); - if (error) { - error = -EBUSY; - goto Restore_console; - } - suspend_console(); - error = dpm_suspend_start(PMSG_FREEZE); - if (error) - goto Resume_console; - /* At this point, dpm_suspend_start() has been called, - * but *not* dpm_suspend_end(). We *must* call - * dpm_suspend_end() now. Otherwise, drivers for - * some devices (e.g. interrupt controllers) become - * desynchronized with the actual state of the - * hardware at resume time, and evil weirdness ensues. - */ - error = dpm_suspend_end(PMSG_FREEZE); - if (error) - goto Resume_devices; - error = disable_nonboot_cpus(); - if (error) - goto Enable_cpus; - local_irq_disable(); - error = syscore_suspend(); - if (error) - goto Enable_irqs; - } else -#endif - { - kexec_in_progress = true; - kernel_restart_prepare(NULL); - migrate_to_reboot_cpu(); - - /* - * migrate_to_reboot_cpu() disables CPU hotplug assuming that - * no further code needs to use CPU hotplug (which is true in - * the reboot case). However, the kexec path depends on using - * CPU hotplug again; so re-enable it here. - */ - cpu_hotplug_enable(); - pr_emerg("Starting new kernel\n"); - machine_shutdown(); - } - - machine_kexec(kexec_image); - -#ifdef CONFIG_KEXEC_JUMP - if (kexec_image->preserve_context) { - syscore_resume(); - Enable_irqs: - local_irq_enable(); - Enable_cpus: - enable_nonboot_cpus(); - dpm_resume_start(PMSG_RESTORE); - Resume_devices: - dpm_resume_end(PMSG_RESTORE); - Resume_console: - resume_console(); - thaw_processes(); - Restore_console: - pm_restore_console(); - unlock_system_sleep(); - } -#endif - - Unlock: - mutex_unlock(&kexec_mutex); - return error; -} diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c new file mode 100644 index 000000000000..9aa25c034b2e --- /dev/null +++ b/kernel/kexec_core.c @@ -0,0 +1,1511 @@ +/* + * kexec.c - kexec system call core code. + * Copyright (C) 2002-2004 Eric Biederman + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#define pr_fmt(fmt) "kexec: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include "kexec_internal.h" + +DEFINE_MUTEX(kexec_mutex); + +/* Per cpu memory for storing cpu states in case of system crash. */ +note_buf_t __percpu *crash_notes; + +/* vmcoreinfo stuff */ +static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; +u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; +size_t vmcoreinfo_size; +size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); + +/* Flag to indicate we are going to kexec a new kernel */ +bool kexec_in_progress = false; + + +/* Location of the reserved area for the crash kernel */ +struct resource crashk_res = { + .name = "Crash kernel", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_MEM +}; +struct resource crashk_low_res = { + .name = "Crash kernel", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_MEM +}; + +int kexec_should_crash(struct task_struct *p) +{ + /* + * If crash_kexec_post_notifiers is enabled, don't run + * crash_kexec() here yet, which must be run after panic + * notifiers in panic(). + */ + if (crash_kexec_post_notifiers) + return 0; + /* + * There are 4 panic() calls in do_exit() path, each of which + * corresponds to each of these 4 conditions. + */ + if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) + return 1; + return 0; +} + +/* + * When kexec transitions to the new kernel there is a one-to-one + * mapping between physical and virtual addresses. On processors + * where you can disable the MMU this is trivial, and easy. For + * others it is still a simple predictable page table to setup. + * + * In that environment kexec copies the new kernel to its final + * resting place. This means I can only support memory whose + * physical address can fit in an unsigned long. In particular + * addresses where (pfn << PAGE_SHIFT) > ULONG_MAX cannot be handled. + * If the assembly stub has more restrictive requirements + * KEXEC_SOURCE_MEMORY_LIMIT and KEXEC_DEST_MEMORY_LIMIT can be + * defined more restrictively in . + * + * The code for the transition from the current kernel to the + * the new kernel is placed in the control_code_buffer, whose size + * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single + * page of memory is necessary, but some architectures require more. + * Because this memory must be identity mapped in the transition from + * virtual to physical addresses it must live in the range + * 0 - TASK_SIZE, as only the user space mappings are arbitrarily + * modifiable. + * + * The assembly stub in the control code buffer is passed a linked list + * of descriptor pages detailing the source pages of the new kernel, + * and the destination addresses of those source pages. As this data + * structure is not used in the context of the current OS, it must + * be self-contained. + * + * The code has been made to work with highmem pages and will use a + * destination page in its final resting place (if it happens + * to allocate it). The end product of this is that most of the + * physical address space, and most of RAM can be used. + * + * Future directions include: + * - allocating a page table with the control code buffer identity + * mapped, to simplify machine_kexec and make kexec_on_panic more + * reliable. + */ + +/* + * KIMAGE_NO_DEST is an impossible destination address..., for + * allocating pages whose destination address we do not care about. + */ +#define KIMAGE_NO_DEST (-1UL) + +static struct page *kimage_alloc_page(struct kimage *image, + gfp_t gfp_mask, + unsigned long dest); + +int sanity_check_segment_list(struct kimage *image) +{ + int result, i; + unsigned long nr_segments = image->nr_segments; + + /* + * Verify we have good destination addresses. The caller is + * responsible for making certain we don't attempt to load + * the new image into invalid or reserved areas of RAM. This + * just verifies it is an address we can use. + * + * Since the kernel does everything in page size chunks ensure + * the destination addresses are page aligned. Too many + * special cases crop of when we don't do this. The most + * insidious is getting overlapping destination addresses + * simply because addresses are changed to page size + * granularity. + */ + result = -EADDRNOTAVAIL; + for (i = 0; i < nr_segments; i++) { + unsigned long mstart, mend; + + mstart = image->segment[i].mem; + mend = mstart + image->segment[i].memsz; + if ((mstart & ~PAGE_MASK) || (mend & ~PAGE_MASK)) + return result; + if (mend >= KEXEC_DESTINATION_MEMORY_LIMIT) + return result; + } + + /* Verify our destination addresses do not overlap. + * If we alloed overlapping destination addresses + * through very weird things can happen with no + * easy explanation as one segment stops on another. + */ + result = -EINVAL; + for (i = 0; i < nr_segments; i++) { + unsigned long mstart, mend; + unsigned long j; + + mstart = image->segment[i].mem; + mend = mstart + image->segment[i].memsz; + for (j = 0; j < i; j++) { + unsigned long pstart, pend; + + pstart = image->segment[j].mem; + pend = pstart + image->segment[j].memsz; + /* Do the segments overlap ? */ + if ((mend > pstart) && (mstart < pend)) + return result; + } + } + + /* Ensure our buffer sizes are strictly less than + * our memory sizes. This should always be the case, + * and it is easier to check up front than to be surprised + * later on. + */ + result = -EINVAL; + for (i = 0; i < nr_segments; i++) { + if (image->segment[i].bufsz > image->segment[i].memsz) + return result; + } + + /* + * Verify we have good destination addresses. Normally + * the caller is responsible for making certain we don't + * attempt to load the new image into invalid or reserved + * areas of RAM. But crash kernels are preloaded into a + * reserved area of ram. We must ensure the addresses + * are in the reserved area otherwise preloading the + * kernel could corrupt things. + */ + + if (image->type == KEXEC_TYPE_CRASH) { + result = -EADDRNOTAVAIL; + for (i = 0; i < nr_segments; i++) { + unsigned long mstart, mend; + + mstart = image->segment[i].mem; + mend = mstart + image->segment[i].memsz - 1; + /* Ensure we are within the crash kernel limits */ + if ((mstart < crashk_res.start) || + (mend > crashk_res.end)) + return result; + } + } + + return 0; +} + +struct kimage *do_kimage_alloc_init(void) +{ + struct kimage *image; + + /* Allocate a controlling structure */ + image = kzalloc(sizeof(*image), GFP_KERNEL); + if (!image) + return NULL; + + image->head = 0; + image->entry = &image->head; + image->last_entry = &image->head; + image->control_page = ~0; /* By default this does not apply */ + image->type = KEXEC_TYPE_DEFAULT; + + /* Initialize the list of control pages */ + INIT_LIST_HEAD(&image->control_pages); + + /* Initialize the list of destination pages */ + INIT_LIST_HEAD(&image->dest_pages); + + /* Initialize the list of unusable pages */ + INIT_LIST_HEAD(&image->unusable_pages); + + return image; +} + +int kimage_is_destination_range(struct kimage *image, + unsigned long start, + unsigned long end) +{ + unsigned long i; + + for (i = 0; i < image->nr_segments; i++) { + unsigned long mstart, mend; + + mstart = image->segment[i].mem; + mend = mstart + image->segment[i].memsz; + if ((end > mstart) && (start < mend)) + return 1; + } + + return 0; +} + +static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) +{ + struct page *pages; + + pages = alloc_pages(gfp_mask, order); + if (pages) { + unsigned int count, i; + + pages->mapping = NULL; + set_page_private(pages, order); + count = 1 << order; + for (i = 0; i < count; i++) + SetPageReserved(pages + i); + } + + return pages; +} + +static void kimage_free_pages(struct page *page) +{ + unsigned int order, count, i; + + order = page_private(page); + count = 1 << order; + for (i = 0; i < count; i++) + ClearPageReserved(page + i); + __free_pages(page, order); +} + +void kimage_free_page_list(struct list_head *list) +{ + struct list_head *pos, *next; + + list_for_each_safe(pos, next, list) { + struct page *page; + + page = list_entry(pos, struct page, lru); + list_del(&page->lru); + kimage_free_pages(page); + } +} + +static struct page *kimage_alloc_normal_control_pages(struct kimage *image, + unsigned int order) +{ + /* Control pages are special, they are the intermediaries + * that are needed while we copy the rest of the pages + * to their final resting place. As such they must + * not conflict with either the destination addresses + * or memory the kernel is already using. + * + * The only case where we really need more than one of + * these are for architectures where we cannot disable + * the MMU and must instead generate an identity mapped + * page table for all of the memory. + * + * At worst this runs in O(N) of the image size. + */ + struct list_head extra_pages; + struct page *pages; + unsigned int count; + + count = 1 << order; + INIT_LIST_HEAD(&extra_pages); + + /* Loop while I can allocate a page and the page allocated + * is a destination page. + */ + do { + unsigned long pfn, epfn, addr, eaddr; + + pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order); + if (!pages) + break; + pfn = page_to_pfn(pages); + epfn = pfn + count; + addr = pfn << PAGE_SHIFT; + eaddr = epfn << PAGE_SHIFT; + if ((epfn >= (KEXEC_CONTROL_MEMORY_LIMIT >> PAGE_SHIFT)) || + kimage_is_destination_range(image, addr, eaddr)) { + list_add(&pages->lru, &extra_pages); + pages = NULL; + } + } while (!pages); + + if (pages) { + /* Remember the allocated page... */ + list_add(&pages->lru, &image->control_pages); + + /* Because the page is already in it's destination + * location we will never allocate another page at + * that address. Therefore kimage_alloc_pages + * will not return it (again) and we don't need + * to give it an entry in image->segment[]. + */ + } + /* Deal with the destination pages I have inadvertently allocated. + * + * Ideally I would convert multi-page allocations into single + * page allocations, and add everything to image->dest_pages. + * + * For now it is simpler to just free the pages. + */ + kimage_free_page_list(&extra_pages); + + return pages; +} + +static struct page *kimage_alloc_crash_control_pages(struct kimage *image, + unsigned int order) +{ + /* Control pages are special, they are the intermediaries + * that are needed while we copy the rest of the pages + * to their final resting place. As such they must + * not conflict with either the destination addresses + * or memory the kernel is already using. + * + * Control pages are also the only pags we must allocate + * when loading a crash kernel. All of the other pages + * are specified by the segments and we just memcpy + * into them directly. + * + * The only case where we really need more than one of + * these are for architectures where we cannot disable + * the MMU and must instead generate an identity mapped + * page table for all of the memory. + * + * Given the low demand this implements a very simple + * allocator that finds the first hole of the appropriate + * size in the reserved memory region, and allocates all + * of the memory up to and including the hole. + */ + unsigned long hole_start, hole_end, size; + struct page *pages; + + pages = NULL; + size = (1 << order) << PAGE_SHIFT; + hole_start = (image->control_page + (size - 1)) & ~(size - 1); + hole_end = hole_start + size - 1; + while (hole_end <= crashk_res.end) { + unsigned long i; + + if (hole_end > KEXEC_CRASH_CONTROL_MEMORY_LIMIT) + break; + /* See if I overlap any of the segments */ + for (i = 0; i < image->nr_segments; i++) { + unsigned long mstart, mend; + + mstart = image->segment[i].mem; + mend = mstart + image->segment[i].memsz - 1; + if ((hole_end >= mstart) && (hole_start <= mend)) { + /* Advance the hole to the end of the segment */ + hole_start = (mend + (size - 1)) & ~(size - 1); + hole_end = hole_start + size - 1; + break; + } + } + /* If I don't overlap any segments I have found my hole! */ + if (i == image->nr_segments) { + pages = pfn_to_page(hole_start >> PAGE_SHIFT); + break; + } + } + if (pages) + image->control_page = hole_end; + + return pages; +} + + +struct page *kimage_alloc_control_pages(struct kimage *image, + unsigned int order) +{ + struct page *pages = NULL; + + switch (image->type) { + case KEXEC_TYPE_DEFAULT: + pages = kimage_alloc_normal_control_pages(image, order); + break; + case KEXEC_TYPE_CRASH: + pages = kimage_alloc_crash_control_pages(image, order); + break; + } + + return pages; +} + +static int kimage_add_entry(struct kimage *image, kimage_entry_t entry) +{ + if (*image->entry != 0) + image->entry++; + + if (image->entry == image->last_entry) { + kimage_entry_t *ind_page; + struct page *page; + + page = kimage_alloc_page(image, GFP_KERNEL, KIMAGE_NO_DEST); + if (!page) + return -ENOMEM; + + ind_page = page_address(page); + *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION; + image->entry = ind_page; + image->last_entry = ind_page + + ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1); + } + *image->entry = entry; + image->entry++; + *image->entry = 0; + + return 0; +} + +static int kimage_set_destination(struct kimage *image, + unsigned long destination) +{ + int result; + + destination &= PAGE_MASK; + result = kimage_add_entry(image, destination | IND_DESTINATION); + + return result; +} + + +static int kimage_add_page(struct kimage *image, unsigned long page) +{ + int result; + + page &= PAGE_MASK; + result = kimage_add_entry(image, page | IND_SOURCE); + + return result; +} + + +static void kimage_free_extra_pages(struct kimage *image) +{ + /* Walk through and free any extra destination pages I may have */ + kimage_free_page_list(&image->dest_pages); + + /* Walk through and free any unusable pages I have cached */ + kimage_free_page_list(&image->unusable_pages); + +} +void kimage_terminate(struct kimage *image) +{ + if (*image->entry != 0) + image->entry++; + + *image->entry = IND_DONE; +} + +#define for_each_kimage_entry(image, ptr, entry) \ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \ + ptr = (entry & IND_INDIRECTION) ? \ + phys_to_virt((entry & PAGE_MASK)) : ptr + 1) + +static void kimage_free_entry(kimage_entry_t entry) +{ + struct page *page; + + page = pfn_to_page(entry >> PAGE_SHIFT); + kimage_free_pages(page); +} + +void kimage_free(struct kimage *image) +{ + kimage_entry_t *ptr, entry; + kimage_entry_t ind = 0; + + if (!image) + return; + + kimage_free_extra_pages(image); + for_each_kimage_entry(image, ptr, entry) { + if (entry & IND_INDIRECTION) { + /* Free the previous indirection page */ + if (ind & IND_INDIRECTION) + kimage_free_entry(ind); + /* Save this indirection page until we are + * done with it. + */ + ind = entry; + } else if (entry & IND_SOURCE) + kimage_free_entry(entry); + } + /* Free the final indirection page */ + if (ind & IND_INDIRECTION) + kimage_free_entry(ind); + + /* Handle any machine specific cleanup */ + machine_kexec_cleanup(image); + + /* Free the kexec control pages... */ + kimage_free_page_list(&image->control_pages); + + /* + * Free up any temporary buffers allocated. This might hit if + * error occurred much later after buffer allocation. + */ + if (image->file_mode) + kimage_file_post_load_cleanup(image); + + kfree(image); +} + +static kimage_entry_t *kimage_dst_used(struct kimage *image, + unsigned long page) +{ + kimage_entry_t *ptr, entry; + unsigned long destination = 0; + + for_each_kimage_entry(image, ptr, entry) { + if (entry & IND_DESTINATION) + destination = entry & PAGE_MASK; + else if (entry & IND_SOURCE) { + if (page == destination) + return ptr; + destination += PAGE_SIZE; + } + } + + return NULL; +} + +static struct page *kimage_alloc_page(struct kimage *image, + gfp_t gfp_mask, + unsigned long destination) +{ + /* + * Here we implement safeguards to ensure that a source page + * is not copied to its destination page before the data on + * the destination page is no longer useful. + * + * To do this we maintain the invariant that a source page is + * either its own destination page, or it is not a + * destination page at all. + * + * That is slightly stronger than required, but the proof + * that no problems will not occur is trivial, and the + * implementation is simply to verify. + * + * When allocating all pages normally this algorithm will run + * in O(N) time, but in the worst case it will run in O(N^2) + * time. If the runtime is a problem the data structures can + * be fixed. + */ + struct page *page; + unsigned long addr; + + /* + * Walk through the list of destination pages, and see if I + * have a match. + */ + list_for_each_entry(page, &image->dest_pages, lru) { + addr = page_to_pfn(page) << PAGE_SHIFT; + if (addr == destination) { + list_del(&page->lru); + return page; + } + } + page = NULL; + while (1) { + kimage_entry_t *old; + + /* Allocate a page, if we run out of memory give up */ + page = kimage_alloc_pages(gfp_mask, 0); + if (!page) + return NULL; + /* If the page cannot be used file it away */ + if (page_to_pfn(page) > + (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) { + list_add(&page->lru, &image->unusable_pages); + continue; + } + addr = page_to_pfn(page) << PAGE_SHIFT; + + /* If it is the destination page we want use it */ + if (addr == destination) + break; + + /* If the page is not a destination page use it */ + if (!kimage_is_destination_range(image, addr, + addr + PAGE_SIZE)) + break; + + /* + * I know that the page is someones destination page. + * See if there is already a source page for this + * destination page. And if so swap the source pages. + */ + old = kimage_dst_used(image, addr); + if (old) { + /* If so move it */ + unsigned long old_addr; + struct page *old_page; + + old_addr = *old & PAGE_MASK; + old_page = pfn_to_page(old_addr >> PAGE_SHIFT); + copy_highpage(page, old_page); + *old = addr | (*old & ~PAGE_MASK); + + /* The old page I have found cannot be a + * destination page, so return it if it's + * gfp_flags honor the ones passed in. + */ + if (!(gfp_mask & __GFP_HIGHMEM) && + PageHighMem(old_page)) { + kimage_free_pages(old_page); + continue; + } + addr = old_addr; + page = old_page; + break; + } + /* Place the page on the destination list, to be used later */ + list_add(&page->lru, &image->dest_pages); + } + + return page; +} + +static int kimage_load_normal_segment(struct kimage *image, + struct kexec_segment *segment) +{ + unsigned long maddr; + size_t ubytes, mbytes; + int result; + unsigned char __user *buf = NULL; + unsigned char *kbuf = NULL; + + result = 0; + if (image->file_mode) + kbuf = segment->kbuf; + else + buf = segment->buf; + ubytes = segment->bufsz; + mbytes = segment->memsz; + maddr = segment->mem; + + result = kimage_set_destination(image, maddr); + if (result < 0) + goto out; + + while (mbytes) { + struct page *page; + char *ptr; + size_t uchunk, mchunk; + + page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); + if (!page) { + result = -ENOMEM; + goto out; + } + result = kimage_add_page(image, page_to_pfn(page) + << PAGE_SHIFT); + if (result < 0) + goto out; + + ptr = kmap(page); + /* Start with a clear page */ + clear_page(ptr); + ptr += maddr & ~PAGE_MASK; + mchunk = min_t(size_t, mbytes, + PAGE_SIZE - (maddr & ~PAGE_MASK)); + uchunk = min(ubytes, mchunk); + + /* For file based kexec, source pages are in kernel memory */ + if (image->file_mode) + memcpy(ptr, kbuf, uchunk); + else + result = copy_from_user(ptr, buf, uchunk); + kunmap(page); + if (result) { + result = -EFAULT; + goto out; + } + ubytes -= uchunk; + maddr += mchunk; + if (image->file_mode) + kbuf += mchunk; + else + buf += mchunk; + mbytes -= mchunk; + } +out: + return result; +} + +static int kimage_load_crash_segment(struct kimage *image, + struct kexec_segment *segment) +{ + /* For crash dumps kernels we simply copy the data from + * user space to it's destination. + * We do things a page at a time for the sake of kmap. + */ + unsigned long maddr; + size_t ubytes, mbytes; + int result; + unsigned char __user *buf = NULL; + unsigned char *kbuf = NULL; + + result = 0; + if (image->file_mode) + kbuf = segment->kbuf; + else + buf = segment->buf; + ubytes = segment->bufsz; + mbytes = segment->memsz; + maddr = segment->mem; + while (mbytes) { + struct page *page; + char *ptr; + size_t uchunk, mchunk; + + page = pfn_to_page(maddr >> PAGE_SHIFT); + if (!page) { + result = -ENOMEM; + goto out; + } + ptr = kmap(page); + ptr += maddr & ~PAGE_MASK; + mchunk = min_t(size_t, mbytes, + PAGE_SIZE - (maddr & ~PAGE_MASK)); + uchunk = min(ubytes, mchunk); + if (mchunk > uchunk) { + /* Zero the trailing part of the page */ + memset(ptr + uchunk, 0, mchunk - uchunk); + } + + /* For file based kexec, source pages are in kernel memory */ + if (image->file_mode) + memcpy(ptr, kbuf, uchunk); + else + result = copy_from_user(ptr, buf, uchunk); + kexec_flush_icache_page(page); + kunmap(page); + if (result) { + result = -EFAULT; + goto out; + } + ubytes -= uchunk; + maddr += mchunk; + if (image->file_mode) + kbuf += mchunk; + else + buf += mchunk; + mbytes -= mchunk; + } +out: + return result; +} + +int kimage_load_segment(struct kimage *image, + struct kexec_segment *segment) +{ + int result = -ENOMEM; + + switch (image->type) { + case KEXEC_TYPE_DEFAULT: + result = kimage_load_normal_segment(image, segment); + break; + case KEXEC_TYPE_CRASH: + result = kimage_load_crash_segment(image, segment); + break; + } + + return result; +} + +struct kimage *kexec_image; +struct kimage *kexec_crash_image; +int kexec_load_disabled; + +void crash_kexec(struct pt_regs *regs) +{ + /* Take the kexec_mutex here to prevent sys_kexec_load + * running on one cpu from replacing the crash kernel + * we are using after a panic on a different cpu. + * + * If the crash kernel was not located in a fixed area + * of memory the xchg(&kexec_crash_image) would be + * sufficient. But since I reuse the memory... + */ + if (mutex_trylock(&kexec_mutex)) { + if (kexec_crash_image) { + struct pt_regs fixed_regs; + + crash_setup_regs(&fixed_regs, regs); + crash_save_vmcoreinfo(); + machine_crash_shutdown(&fixed_regs); + machine_kexec(kexec_crash_image); + } + mutex_unlock(&kexec_mutex); + } +} + +size_t crash_get_memory_size(void) +{ + size_t size = 0; + + mutex_lock(&kexec_mutex); + if (crashk_res.end != crashk_res.start) + size = resource_size(&crashk_res); + mutex_unlock(&kexec_mutex); + return size; +} + +void __weak crash_free_reserved_phys_range(unsigned long begin, + unsigned long end) +{ + unsigned long addr; + + for (addr = begin; addr < end; addr += PAGE_SIZE) + free_reserved_page(pfn_to_page(addr >> PAGE_SHIFT)); +} + +int crash_shrink_memory(unsigned long new_size) +{ + int ret = 0; + unsigned long start, end; + unsigned long old_size; + struct resource *ram_res; + + mutex_lock(&kexec_mutex); + + if (kexec_crash_image) { + ret = -ENOENT; + goto unlock; + } + start = crashk_res.start; + end = crashk_res.end; + old_size = (end == 0) ? 0 : end - start + 1; + if (new_size >= old_size) { + ret = (new_size == old_size) ? 0 : -EINVAL; + goto unlock; + } + + ram_res = kzalloc(sizeof(*ram_res), GFP_KERNEL); + if (!ram_res) { + ret = -ENOMEM; + goto unlock; + } + + start = roundup(start, KEXEC_CRASH_MEM_ALIGN); + end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); + + crash_map_reserved_pages(); + crash_free_reserved_phys_range(end, crashk_res.end); + + if ((start == end) && (crashk_res.parent != NULL)) + release_resource(&crashk_res); + + ram_res->start = end; + ram_res->end = crashk_res.end; + ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; + ram_res->name = "System RAM"; + + crashk_res.end = end - 1; + + insert_resource(&iomem_resource, ram_res); + crash_unmap_reserved_pages(); + +unlock: + mutex_unlock(&kexec_mutex); + return ret; +} + +static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, + size_t data_len) +{ + struct elf_note note; + + note.n_namesz = strlen(name) + 1; + note.n_descsz = data_len; + note.n_type = type; + memcpy(buf, ¬e, sizeof(note)); + buf += (sizeof(note) + 3)/4; + memcpy(buf, name, note.n_namesz); + buf += (note.n_namesz + 3)/4; + memcpy(buf, data, note.n_descsz); + buf += (note.n_descsz + 3)/4; + + return buf; +} + +static void final_note(u32 *buf) +{ + struct elf_note note; + + note.n_namesz = 0; + note.n_descsz = 0; + note.n_type = 0; + memcpy(buf, ¬e, sizeof(note)); +} + +void crash_save_cpu(struct pt_regs *regs, int cpu) +{ + struct elf_prstatus prstatus; + u32 *buf; + + if ((cpu < 0) || (cpu >= nr_cpu_ids)) + return; + + /* Using ELF notes here is opportunistic. + * I need a well defined structure format + * for the data I pass, and I need tags + * on the data to indicate what information I have + * squirrelled away. ELF notes happen to provide + * all of that, so there is no need to invent something new. + */ + buf = (u32 *)per_cpu_ptr(crash_notes, cpu); + if (!buf) + return; + memset(&prstatus, 0, sizeof(prstatus)); + prstatus.pr_pid = current->pid; + elf_core_copy_kernel_regs(&prstatus.pr_reg, regs); + buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, + &prstatus, sizeof(prstatus)); + final_note(buf); +} + +static int __init crash_notes_memory_init(void) +{ + /* Allocate memory for saving cpu registers. */ + crash_notes = alloc_percpu(note_buf_t); + if (!crash_notes) { + pr_warn("Kexec: Memory allocation for saving cpu register states failed\n"); + return -ENOMEM; + } + return 0; +} +subsys_initcall(crash_notes_memory_init); + + +/* + * parsing the "crashkernel" commandline + * + * this code is intended to be called from architecture specific code + */ + + +/* + * This function parses command lines in the format + * + * crashkernel=ramsize-range:size[,...][@offset] + * + * The function returns 0 on success and -EINVAL on failure. + */ +static int __init parse_crashkernel_mem(char *cmdline, + unsigned long long system_ram, + unsigned long long *crash_size, + unsigned long long *crash_base) +{ + char *cur = cmdline, *tmp; + + /* for each entry of the comma-separated list */ + do { + unsigned long long start, end = ULLONG_MAX, size; + + /* get the start of the range */ + start = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("crashkernel: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + if (*cur != '-') { + pr_warn("crashkernel: '-' expected\n"); + return -EINVAL; + } + cur++; + + /* if no ':' is here, than we read the end */ + if (*cur != ':') { + end = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("crashkernel: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + if (end <= start) { + pr_warn("crashkernel: end <= start\n"); + return -EINVAL; + } + } + + if (*cur != ':') { + pr_warn("crashkernel: ':' expected\n"); + return -EINVAL; + } + cur++; + + size = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + if (size >= system_ram) { + pr_warn("crashkernel: invalid size\n"); + return -EINVAL; + } + + /* match ? */ + if (system_ram >= start && system_ram < end) { + *crash_size = size; + break; + } + } while (*cur++ == ','); + + if (*crash_size > 0) { + while (*cur && *cur != ' ' && *cur != '@') + cur++; + if (*cur == '@') { + cur++; + *crash_base = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("Memory value expected after '@'\n"); + return -EINVAL; + } + } + } + + return 0; +} + +/* + * That function parses "simple" (old) crashkernel command lines like + * + * crashkernel=size[@offset] + * + * It returns 0 on success and -EINVAL on failure. + */ +static int __init parse_crashkernel_simple(char *cmdline, + unsigned long long *crash_size, + unsigned long long *crash_base) +{ + char *cur = cmdline; + + *crash_size = memparse(cmdline, &cur); + if (cmdline == cur) { + pr_warn("crashkernel: memory value expected\n"); + return -EINVAL; + } + + if (*cur == '@') + *crash_base = memparse(cur+1, &cur); + else if (*cur != ' ' && *cur != '\0') { + pr_warn("crashkernel: unrecognized char\n"); + return -EINVAL; + } + + return 0; +} + +#define SUFFIX_HIGH 0 +#define SUFFIX_LOW 1 +#define SUFFIX_NULL 2 +static __initdata char *suffix_tbl[] = { + [SUFFIX_HIGH] = ",high", + [SUFFIX_LOW] = ",low", + [SUFFIX_NULL] = NULL, +}; + +/* + * That function parses "suffix" crashkernel command lines like + * + * crashkernel=size,[high|low] + * + * It returns 0 on success and -EINVAL on failure. + */ +static int __init parse_crashkernel_suffix(char *cmdline, + unsigned long long *crash_size, + const char *suffix) +{ + char *cur = cmdline; + + *crash_size = memparse(cmdline, &cur); + if (cmdline == cur) { + pr_warn("crashkernel: memory value expected\n"); + return -EINVAL; + } + + /* check with suffix */ + if (strncmp(cur, suffix, strlen(suffix))) { + pr_warn("crashkernel: unrecognized char\n"); + return -EINVAL; + } + cur += strlen(suffix); + if (*cur != ' ' && *cur != '\0') { + pr_warn("crashkernel: unrecognized char\n"); + return -EINVAL; + } + + return 0; +} + +static __init char *get_last_crashkernel(char *cmdline, + const char *name, + const char *suffix) +{ + char *p = cmdline, *ck_cmdline = NULL; + + /* find crashkernel and use the last one if there are more */ + p = strstr(p, name); + while (p) { + char *end_p = strchr(p, ' '); + char *q; + + if (!end_p) + end_p = p + strlen(p); + + if (!suffix) { + int i; + + /* skip the one with any known suffix */ + for (i = 0; suffix_tbl[i]; i++) { + q = end_p - strlen(suffix_tbl[i]); + if (!strncmp(q, suffix_tbl[i], + strlen(suffix_tbl[i]))) + goto next; + } + ck_cmdline = p; + } else { + q = end_p - strlen(suffix); + if (!strncmp(q, suffix, strlen(suffix))) + ck_cmdline = p; + } +next: + p = strstr(p+1, name); + } + + if (!ck_cmdline) + return NULL; + + return ck_cmdline; +} + +static int __init __parse_crashkernel(char *cmdline, + unsigned long long system_ram, + unsigned long long *crash_size, + unsigned long long *crash_base, + const char *name, + const char *suffix) +{ + char *first_colon, *first_space; + char *ck_cmdline; + + BUG_ON(!crash_size || !crash_base); + *crash_size = 0; + *crash_base = 0; + + ck_cmdline = get_last_crashkernel(cmdline, name, suffix); + + if (!ck_cmdline) + return -EINVAL; + + ck_cmdline += strlen(name); + + if (suffix) + return parse_crashkernel_suffix(ck_cmdline, crash_size, + suffix); + /* + * if the commandline contains a ':', then that's the extended + * syntax -- if not, it must be the classic syntax + */ + first_colon = strchr(ck_cmdline, ':'); + first_space = strchr(ck_cmdline, ' '); + if (first_colon && (!first_space || first_colon < first_space)) + return parse_crashkernel_mem(ck_cmdline, system_ram, + crash_size, crash_base); + + return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base); +} + +/* + * That function is the entry point for command line parsing and should be + * called from the arch-specific code. + */ +int __init parse_crashkernel(char *cmdline, + unsigned long long system_ram, + unsigned long long *crash_size, + unsigned long long *crash_base) +{ + return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, + "crashkernel=", NULL); +} + +int __init parse_crashkernel_high(char *cmdline, + unsigned long long system_ram, + unsigned long long *crash_size, + unsigned long long *crash_base) +{ + return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, + "crashkernel=", suffix_tbl[SUFFIX_HIGH]); +} + +int __init parse_crashkernel_low(char *cmdline, + unsigned long long system_ram, + unsigned long long *crash_size, + unsigned long long *crash_base) +{ + return __parse_crashkernel(cmdline, system_ram, crash_size, crash_base, + "crashkernel=", suffix_tbl[SUFFIX_LOW]); +} + +static void update_vmcoreinfo_note(void) +{ + u32 *buf = vmcoreinfo_note; + + if (!vmcoreinfo_size) + return; + buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data, + vmcoreinfo_size); + final_note(buf); +} + +void crash_save_vmcoreinfo(void) +{ + vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds()); + update_vmcoreinfo_note(); +} + +void vmcoreinfo_append_str(const char *fmt, ...) +{ + va_list args; + char buf[0x50]; + size_t r; + + va_start(args, fmt); + r = vscnprintf(buf, sizeof(buf), fmt, args); + va_end(args); + + r = min(r, vmcoreinfo_max_size - vmcoreinfo_size); + + memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); + + vmcoreinfo_size += r; +} + +/* + * provide an empty default implementation here -- architecture + * code may override this + */ +void __weak arch_crash_save_vmcoreinfo(void) +{} + +unsigned long __weak paddr_vmcoreinfo_note(void) +{ + return __pa((unsigned long)(char *)&vmcoreinfo_note); +} + +static int __init crash_save_vmcoreinfo_init(void) +{ + VMCOREINFO_OSRELEASE(init_uts_ns.name.release); + VMCOREINFO_PAGESIZE(PAGE_SIZE); + + VMCOREINFO_SYMBOL(init_uts_ns); + VMCOREINFO_SYMBOL(node_online_map); +#ifdef CONFIG_MMU + VMCOREINFO_SYMBOL(swapper_pg_dir); +#endif + VMCOREINFO_SYMBOL(_stext); + VMCOREINFO_SYMBOL(vmap_area_list); + +#ifndef CONFIG_NEED_MULTIPLE_NODES + VMCOREINFO_SYMBOL(mem_map); + VMCOREINFO_SYMBOL(contig_page_data); +#endif +#ifdef CONFIG_SPARSEMEM + VMCOREINFO_SYMBOL(mem_section); + VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS); + VMCOREINFO_STRUCT_SIZE(mem_section); + VMCOREINFO_OFFSET(mem_section, section_mem_map); +#endif + VMCOREINFO_STRUCT_SIZE(page); + VMCOREINFO_STRUCT_SIZE(pglist_data); + VMCOREINFO_STRUCT_SIZE(zone); + VMCOREINFO_STRUCT_SIZE(free_area); + VMCOREINFO_STRUCT_SIZE(list_head); + VMCOREINFO_SIZE(nodemask_t); + VMCOREINFO_OFFSET(page, flags); + VMCOREINFO_OFFSET(page, _count); + VMCOREINFO_OFFSET(page, mapping); + VMCOREINFO_OFFSET(page, lru); + VMCOREINFO_OFFSET(page, _mapcount); + VMCOREINFO_OFFSET(page, private); + VMCOREINFO_OFFSET(pglist_data, node_zones); + VMCOREINFO_OFFSET(pglist_data, nr_zones); +#ifdef CONFIG_FLAT_NODE_MEM_MAP + VMCOREINFO_OFFSET(pglist_data, node_mem_map); +#endif + VMCOREINFO_OFFSET(pglist_data, node_start_pfn); + VMCOREINFO_OFFSET(pglist_data, node_spanned_pages); + VMCOREINFO_OFFSET(pglist_data, node_id); + VMCOREINFO_OFFSET(zone, free_area); + VMCOREINFO_OFFSET(zone, vm_stat); + VMCOREINFO_OFFSET(zone, spanned_pages); + VMCOREINFO_OFFSET(free_area, free_list); + VMCOREINFO_OFFSET(list_head, next); + VMCOREINFO_OFFSET(list_head, prev); + VMCOREINFO_OFFSET(vmap_area, va_start); + VMCOREINFO_OFFSET(vmap_area, list); + VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); + log_buf_kexec_setup(); + VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); + VMCOREINFO_NUMBER(NR_FREE_PAGES); + VMCOREINFO_NUMBER(PG_lru); + VMCOREINFO_NUMBER(PG_private); + VMCOREINFO_NUMBER(PG_swapcache); + VMCOREINFO_NUMBER(PG_slab); +#ifdef CONFIG_MEMORY_FAILURE + VMCOREINFO_NUMBER(PG_hwpoison); +#endif + VMCOREINFO_NUMBER(PG_head_mask); + VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); +#ifdef CONFIG_HUGETLBFS + VMCOREINFO_SYMBOL(free_huge_page); +#endif + + arch_crash_save_vmcoreinfo(); + update_vmcoreinfo_note(); + + return 0; +} + +subsys_initcall(crash_save_vmcoreinfo_init); + +/* + * Move into place and start executing a preloaded standalone + * executable. If nothing was preloaded return an error. + */ +int kernel_kexec(void) +{ + int error = 0; + + if (!mutex_trylock(&kexec_mutex)) + return -EBUSY; + if (!kexec_image) { + error = -EINVAL; + goto Unlock; + } + +#ifdef CONFIG_KEXEC_JUMP + if (kexec_image->preserve_context) { + lock_system_sleep(); + pm_prepare_console(); + error = freeze_processes(); + if (error) { + error = -EBUSY; + goto Restore_console; + } + suspend_console(); + error = dpm_suspend_start(PMSG_FREEZE); + if (error) + goto Resume_console; + /* At this point, dpm_suspend_start() has been called, + * but *not* dpm_suspend_end(). We *must* call + * dpm_suspend_end() now. Otherwise, drivers for + * some devices (e.g. interrupt controllers) become + * desynchronized with the actual state of the + * hardware at resume time, and evil weirdness ensues. + */ + error = dpm_suspend_end(PMSG_FREEZE); + if (error) + goto Resume_devices; + error = disable_nonboot_cpus(); + if (error) + goto Enable_cpus; + local_irq_disable(); + error = syscore_suspend(); + if (error) + goto Enable_irqs; + } else +#endif + { + kexec_in_progress = true; + kernel_restart_prepare(NULL); + migrate_to_reboot_cpu(); + + /* + * migrate_to_reboot_cpu() disables CPU hotplug assuming that + * no further code needs to use CPU hotplug (which is true in + * the reboot case). However, the kexec path depends on using + * CPU hotplug again; so re-enable it here. + */ + cpu_hotplug_enable(); + pr_emerg("Starting new kernel\n"); + machine_shutdown(); + } + + machine_kexec(kexec_image); + +#ifdef CONFIG_KEXEC_JUMP + if (kexec_image->preserve_context) { + syscore_resume(); + Enable_irqs: + local_irq_enable(); + Enable_cpus: + enable_nonboot_cpus(); + dpm_resume_start(PMSG_RESTORE); + Resume_devices: + dpm_resume_end(PMSG_RESTORE); + Resume_console: + resume_console(); + thaw_processes(); + Restore_console: + pm_restore_console(); + unlock_system_sleep(); + } +#endif + + Unlock: + mutex_unlock(&kexec_mutex); + return error; +} + +/* + * Add and remove page tables for crashkernel memory + * + * Provide an empty default implementation here -- architecture + * code may override this + */ +void __weak crash_map_reserved_pages(void) +{} + +void __weak crash_unmap_reserved_pages(void) +{} diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 6683ccef9fff..e83b26464061 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -90,7 +90,7 @@ static ssize_t profiling_store(struct kobject *kobj, KERNEL_ATTR_RW(profiling); #endif -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE static ssize_t kexec_loaded_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -134,7 +134,7 @@ static ssize_t vmcoreinfo_show(struct kobject *kobj, } KERNEL_ATTR_RO(vmcoreinfo); -#endif /* CONFIG_KEXEC */ +#endif /* CONFIG_KEXEC_CORE */ /* whether file capabilities are enabled */ static ssize_t fscaps_show(struct kobject *kobj, @@ -196,7 +196,7 @@ static struct attribute * kernel_attrs[] = { #ifdef CONFIG_PROFILING &profiling_attr.attr, #endif -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE &kexec_loaded_attr.attr, &kexec_crash_loaded_attr.attr, &kexec_crash_size_attr.attr, diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index cf8c24203368..8f0324ef72ab 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -835,7 +835,7 @@ const struct file_operations kmsg_fops = { .release = devkmsg_release, }; -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE /* * This appends the listed symbols to /proc/vmcore * diff --git a/kernel/reboot.c b/kernel/reboot.c index d20c85d9f8c0..bd30a973fe94 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c @@ -346,7 +346,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd, kernel_restart(buffer); break; -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE case LINUX_REBOOT_CMD_KEXEC: ret = kernel_kexec(); break; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 19b62b522158..715cc57cc66a 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -621,7 +621,7 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif -#ifdef CONFIG_KEXEC +#ifdef CONFIG_KEXEC_CORE { .procname = "kexec_load_disabled", .data = &kexec_load_disabled, -- cgit v1.2.3 From 5b25b13ab08f616efd566347d809b4ece54570d1 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Fri, 11 Sep 2015 13:07:39 -0700 Subject: sys_membarrier(): system-wide memory barrier (generic, x86) Here is an implementation of a new system call, sys_membarrier(), which executes a memory barrier on all threads running on the system. It is implemented by calling synchronize_sched(). It can be used to distribute the cost of user-space memory barriers asymmetrically by transforming pairs of memory barriers into pairs consisting of sys_membarrier() and a compiler barrier. For synchronization primitives that distinguish between read-side and write-side (e.g. userspace RCU [1], rwlocks), the read-side can be accelerated significantly by moving the bulk of the memory barrier overhead to the write-side. The existing applications of which I am aware that would be improved by this system call are as follows: * Through Userspace RCU library (http://urcu.so) - DNS server (Knot DNS) https://www.knot-dns.cz/ - Network sniffer (http://netsniff-ng.org/) - Distributed object storage (https://sheepdog.github.io/sheepdog/) - User-space tracing (http://lttng.org) - Network storage system (https://www.gluster.org/) - Virtual routers (https://events.linuxfoundation.org/sites/events/files/slides/DPDK_RCU_0MQ.pdf) - Financial software (https://lkml.org/lkml/2015/3/23/189) Those projects use RCU in userspace to increase read-side speed and scalability compared to locking. Especially in the case of RCU used by libraries, sys_membarrier can speed up the read-side by moving the bulk of the memory barrier cost to synchronize_rcu(). * Direct users of sys_membarrier - core dotnet garbage collector (https://github.com/dotnet/coreclr/issues/198) Microsoft core dotnet GC developers are planning to use the mprotect() side-effect of issuing memory barriers through IPIs as a way to implement Windows FlushProcessWriteBuffers() on Linux. They are referring to sys_membarrier in their github thread, specifically stating that sys_membarrier() is what they are looking for. To explain the benefit of this scheme, let's introduce two example threads: Thread A (non-frequent, e.g. executing liburcu synchronize_rcu()) Thread B (frequent, e.g. executing liburcu rcu_read_lock()/rcu_read_unlock()) In a scheme where all smp_mb() in thread A are ordering memory accesses with respect to smp_mb() present in Thread B, we can change each smp_mb() within Thread A into calls to sys_membarrier() and each smp_mb() within Thread B into compiler barriers "barrier()". Before the change, we had, for each smp_mb() pairs: Thread A Thread B previous mem accesses previous mem accesses smp_mb() smp_mb() following mem accesses following mem accesses After the change, these pairs become: Thread A Thread B prev mem accesses prev mem accesses sys_membarrier() barrier() follow mem accesses follow mem accesses As we can see, there are two possible scenarios: either Thread B memory accesses do not happen concurrently with Thread A accesses (1), or they do (2). 1) Non-concurrent Thread A vs Thread B accesses: Thread A Thread B prev mem accesses sys_membarrier() follow mem accesses prev mem accesses barrier() follow mem accesses In this case, thread B accesses will be weakly ordered. This is OK, because at that point, thread A is not particularly interested in ordering them with respect to its own accesses. 2) Concurrent Thread A vs Thread B accesses Thread A Thread B prev mem accesses prev mem accesses sys_membarrier() barrier() follow mem accesses follow mem accesses In this case, thread B accesses, which are ensured to be in program order thanks to the compiler barrier, will be "upgraded" to full smp_mb() by synchronize_sched(). * Benchmarks On Intel Xeon E5405 (8 cores) (one thread is calling sys_membarrier, the other 7 threads are busy looping) 1000 non-expedited sys_membarrier calls in 33s =3D 33 milliseconds/call. * User-space user of this system call: Userspace RCU library Both the signal-based and the sys_membarrier userspace RCU schemes permit us to remove the memory barrier from the userspace RCU rcu_read_lock() and rcu_read_unlock() primitives, thus significantly accelerating them. These memory barriers are replaced by compiler barriers on the read-side, and all matching memory barriers on the write-side are turned into an invocation of a memory barrier on all active threads in the process. By letting the kernel perform this synchronization rather than dumbly sending a signal to every process threads (as we currently do), we diminish the number of unnecessary wake ups and only issue the memory barriers on active threads. Non-running threads do not need to execute such barrier anyway, because these are implied by the scheduler context switches. Results in liburcu: Operations in 10s, 6 readers, 2 writers: memory barriers in reader: 1701557485 reads, 2202847 writes signal-based scheme: 9830061167 reads, 6700 writes sys_membarrier: 9952759104 reads, 425 writes sys_membarrier (dyn. check): 7970328887 reads, 425 writes The dynamic sys_membarrier availability check adds some overhead to the read-side compared to the signal-based scheme, but besides that, sys_membarrier slightly outperforms the signal-based scheme. However, this non-expedited sys_membarrier implementation has a much slower grace period than signal and memory barrier schemes. Besides diminishing the number of wake-ups, one major advantage of the membarrier system call over the signal-based scheme is that it does not need to reserve a signal. This plays much more nicely with libraries, and with processes injected into for tracing purposes, for which we cannot expect that signals will be unused by the application. An expedited version of this system call can be added later on to speed up the grace period. Its implementation will likely depend on reading the cpu_curr()->mm without holding each CPU's rq lock. This patch adds the system call to x86 and to asm-generic. [1] http://urcu.so membarrier(2) man page: MEMBARRIER(2) Linux Programmer's Manual MEMBARRIER(2) NAME membarrier - issue memory barriers on a set of threads SYNOPSIS #include int membarrier(int cmd, int flags); DESCRIPTION The cmd argument is one of the following: MEMBARRIER_CMD_QUERY Query the set of supported commands. It returns a bitmask of supported commands. MEMBARRIER_CMD_SHARED Execute a memory barrier on all threads running on the system. Upon return from system call, the caller thread is ensured that all running threads have passed through a state where all memory accesses to user-space addresses match program order between entry to and return from the system call (non-running threads are de facto in such a state). This covers threads from all pro=E2=80=90 cesses running on the system. This command returns 0. The flags argument needs to be 0. For future extensions. All memory accesses performed in program order from each targeted thread is guaranteed to be ordered with respect to sys_membarrier(). If we use the semantic "barrier()" to represent a compiler barrier forcing memory accesses to be performed in program order across the barrier, and smp_mb() to represent explicit memory barriers forcing full memory ordering across the barrier, we have the following ordering table for each pair of barrier(), sys_membarrier() and smp_mb(): The pair ordering is detailed as (O: ordered, X: not ordered): barrier() smp_mb() sys_membarrier() barrier() X X O smp_mb() X O O sys_membarrier() O O O RETURN VALUE On success, these system calls return zero. On error, -1 is returned, and errno is set appropriately. For a given command, with flags argument set to 0, this system call is guaranteed to always return the same value until reboot. ERRORS ENOSYS System call is not implemented. EINVAL Invalid arguments. Linux 2015-04-15 MEMBARRIER(2) Signed-off-by: Mathieu Desnoyers Reviewed-by: Paul E. McKenney Reviewed-by: Josh Triplett Cc: KOSAKI Motohiro Cc: Steven Rostedt Cc: Nicholas Miell Cc: Ingo Molnar Cc: Alan Cox Cc: Lai Jiangshan Cc: Stephen Hemminger Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: David Howells Cc: Pranith Kumar Cc: Michael Kerrisk Cc: Shuah Khan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 8 +++++ arch/x86/entry/syscalls/syscall_32.tbl | 1 + arch/x86/entry/syscalls/syscall_64.tbl | 1 + include/linux/syscalls.h | 2 ++ include/uapi/asm-generic/unistd.h | 4 ++- include/uapi/linux/Kbuild | 1 + include/uapi/linux/membarrier.h | 53 +++++++++++++++++++++++++++ init/Kconfig | 12 +++++++ kernel/Makefile | 1 + kernel/membarrier.c | 66 ++++++++++++++++++++++++++++++++++ kernel/sys_ni.c | 3 ++ 11 files changed, 151 insertions(+), 1 deletion(-) create mode 100644 include/uapi/linux/membarrier.h create mode 100644 kernel/membarrier.c (limited to 'kernel/Makefile') diff --git a/MAINTAINERS b/MAINTAINERS index 310da4295c70..e77bc84dc580 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6789,6 +6789,14 @@ W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ F: drivers/net/ethernet/mellanox/mlxsw/ +MEMBARRIER SUPPORT +M: Mathieu Desnoyers +M: "Paul E. McKenney" +L: linux-kernel@vger.kernel.org +S: Supported +F: kernel/membarrier.c +F: include/uapi/linux/membarrier.h + MEMORY MANAGEMENT L: linux-mm@kvack.org W: http://www.linux-mm.org diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 477bfa6db370..7663c455b9f6 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -381,3 +381,4 @@ 372 i386 recvmsg sys_recvmsg compat_sys_recvmsg 373 i386 shutdown sys_shutdown 374 i386 userfaultfd sys_userfaultfd +375 i386 membarrier sys_membarrier diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index 81c490634db9..278842fdf1f6 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -330,6 +330,7 @@ 321 common bpf sys_bpf 322 64 execveat stub_execveat 323 common userfaultfd sys_userfaultfd +324 common membarrier sys_membarrier # # x32-specific system call numbers start at 512 to avoid cache impact diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 08001317aee7..a460e2ef2843 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -885,4 +885,6 @@ asmlinkage long sys_execveat(int dfd, const char __user *filename, const char __user *const __user *argv, const char __user *const __user *envp, int flags); +asmlinkage long sys_membarrier(int cmd, int flags); + #endif diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index e016bd9b1a04..8da542a2874d 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -709,9 +709,11 @@ __SYSCALL(__NR_memfd_create, sys_memfd_create) __SYSCALL(__NR_bpf, sys_bpf) #define __NR_execveat 281 __SC_COMP(__NR_execveat, sys_execveat, compat_sys_execveat) +#define __NR_membarrier 282 +__SYSCALL(__NR_membarrier, sys_membarrier) #undef __NR_syscalls -#define __NR_syscalls 282 +#define __NR_syscalls 283 /* * All syscalls below here should go away really, diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 70ff1d9abf0d..f7b2db44eb4b 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild @@ -252,6 +252,7 @@ header-y += mdio.h header-y += media.h header-y += media-bus-format.h header-y += mei.h +header-y += membarrier.h header-y += memfd.h header-y += mempolicy.h header-y += meye.h diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h new file mode 100644 index 000000000000..e0b108bd2624 --- /dev/null +++ b/include/uapi/linux/membarrier.h @@ -0,0 +1,53 @@ +#ifndef _UAPI_LINUX_MEMBARRIER_H +#define _UAPI_LINUX_MEMBARRIER_H + +/* + * linux/membarrier.h + * + * membarrier system call API + * + * Copyright (c) 2010, 2015 Mathieu Desnoyers + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +/** + * enum membarrier_cmd - membarrier system call command + * @MEMBARRIER_CMD_QUERY: Query the set of supported commands. It returns + * a bitmask of valid commands. + * @MEMBARRIER_CMD_SHARED: Execute a memory barrier on all running threads. + * Upon return from system call, the caller thread + * is ensured that all running threads have passed + * through a state where all memory accesses to + * user-space addresses match program order between + * entry to and return from the system call + * (non-running threads are de facto in such a + * state). This covers threads from all processes + * running on the system. This command returns 0. + * + * Command to be passed to the membarrier system call. The commands need to + * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to + * the value 0. + */ +enum membarrier_cmd { + MEMBARRIER_CMD_QUERY = 0, + MEMBARRIER_CMD_SHARED = (1 << 0), +}; + +#endif /* _UAPI_LINUX_MEMBARRIER_H */ diff --git a/init/Kconfig b/init/Kconfig index 02da9f1fd9df..c24b6f767bf0 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1602,6 +1602,18 @@ config PCI_QUIRKS bugs/quirks. Disable this only if your target machine is unaffected by PCI quirks. +config MEMBARRIER + bool "Enable membarrier() system call" if EXPERT + default y + help + Enable the membarrier() system call that allows issuing memory + barriers across all running threads, which can be used to distribute + the cost of user-space memory barriers asymmetrically by transforming + pairs of memory barriers into pairs consisting of membarrier() and a + compiler barrier. + + If unsure, say Y. + config EMBEDDED bool "Embedded system" option allnoconfig_y diff --git a/kernel/Makefile b/kernel/Makefile index d4988410b410..53abf008ecb3 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -100,6 +100,7 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_JUMP_LABEL) += jump_label.o obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o obj-$(CONFIG_TORTURE_TEST) += torture.o +obj-$(CONFIG_MEMBARRIER) += membarrier.o obj-$(CONFIG_HAS_IOMEM) += memremap.o diff --git a/kernel/membarrier.c b/kernel/membarrier.c new file mode 100644 index 000000000000..536c727a56e9 --- /dev/null +++ b/kernel/membarrier.c @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2010, 2015 Mathieu Desnoyers + * + * membarrier system call + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include + +/* + * Bitmask made from a "or" of all commands within enum membarrier_cmd, + * except MEMBARRIER_CMD_QUERY. + */ +#define MEMBARRIER_CMD_BITMASK (MEMBARRIER_CMD_SHARED) + +/** + * sys_membarrier - issue memory barriers on a set of threads + * @cmd: Takes command values defined in enum membarrier_cmd. + * @flags: Currently needs to be 0. For future extensions. + * + * If this system call is not implemented, -ENOSYS is returned. If the + * command specified does not exist, or if the command argument is invalid, + * this system call returns -EINVAL. For a given command, with flags argument + * set to 0, this system call is guaranteed to always return the same value + * until reboot. + * + * All memory accesses performed in program order from each targeted thread + * is guaranteed to be ordered with respect to sys_membarrier(). If we use + * the semantic "barrier()" to represent a compiler barrier forcing memory + * accesses to be performed in program order across the barrier, and + * smp_mb() to represent explicit memory barriers forcing full memory + * ordering across the barrier, we have the following ordering table for + * each pair of barrier(), sys_membarrier() and smp_mb(): + * + * The pair ordering is detailed as (O: ordered, X: not ordered): + * + * barrier() smp_mb() sys_membarrier() + * barrier() X X O + * smp_mb() X O O + * sys_membarrier() O O O + */ +SYSCALL_DEFINE2(membarrier, int, cmd, int, flags) +{ + if (unlikely(flags)) + return -EINVAL; + switch (cmd) { + case MEMBARRIER_CMD_QUERY: + return MEMBARRIER_CMD_BITMASK; + case MEMBARRIER_CMD_SHARED: + if (num_online_cpus() > 1) + synchronize_sched(); + return 0; + default: + return -EINVAL; + } +} diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 03c3875d9958..a02decf15583 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -245,3 +245,6 @@ cond_syscall(sys_bpf); /* execveat */ cond_syscall(sys_execveat); + +/* membarrier */ +cond_syscall(sys_membarrier); -- cgit v1.2.3