From 4f62d0a22fcabaa4477c6b8d7bf2860d819959cc Mon Sep 17 00:00:00 2001 From: Álvaro Fernández Rojas Date: Wed, 24 Feb 2021 08:33:36 +0100 Subject: mips: smp-bmips: fix CPU mappings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When booting bmips with SMP enabled on a BCM6358 running on CPU #1 instead of CPU #0, the current CPU mapping code produces the following: - smp_processor_id(): 0 - cpu_logical_map(0): 1 - cpu_number_map(0): 1 This is because SMP isn't supported on BCM6358 since it has a shared TLB, so it is disabled and max_cpus is decreased from 2 to 1. Signed-off-by: Álvaro Fernández Rojas Reviewed-by: Florian Fainelli Signed-off-by: Thomas Bogendoerfer --- arch/mips/kernel/smp-bmips.c | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 359b176b665f..b6ef5f7312cf 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -134,17 +134,24 @@ static void __init bmips_smp_setup(void) if (!board_ebase_setup) board_ebase_setup = &bmips_ebase_setup; - __cpu_number_map[boot_cpu] = 0; - __cpu_logical_map[0] = boot_cpu; - - for (i = 0; i < max_cpus; i++) { - if (i != boot_cpu) { - __cpu_number_map[i] = cpu; - __cpu_logical_map[cpu] = i; - cpu++; + if (max_cpus > 1) { + __cpu_number_map[boot_cpu] = 0; + __cpu_logical_map[0] = boot_cpu; + + for (i = 0; i < max_cpus; i++) { + if (i != boot_cpu) { + __cpu_number_map[i] = cpu; + __cpu_logical_map[cpu] = i; + cpu++; + } + set_cpu_possible(i, 1); + set_cpu_present(i, 1); } - set_cpu_possible(i, 1); - set_cpu_present(i, 1); + } else { + __cpu_number_map[0] = boot_cpu; + __cpu_logical_map[0] = 0; + set_cpu_possible(0, 1); + set_cpu_present(0, 1); } } -- cgit v1.2.3 From a1515ec7204edca770c07929df8538fcdb03ad46 Mon Sep 17 00:00:00 2001 From: Thomas Bogendoerfer Date: Mon, 1 Mar 2021 16:29:56 +0100 Subject: MIPS: Remove KVM_GUEST support KVM_GUEST is broken and unmaintained, so let's remove it. Reviewed-by: Huacai Chen Reviewed-by: Jiaxun Yang Signed-off-by: Thomas Bogendoerfer --- arch/mips/Kconfig | 17 -- arch/mips/configs/malta_kvm_guest_defconfig | 436 ---------------------------- arch/mips/include/asm/mach-generic/spaces.h | 12 - arch/mips/include/asm/processor.h | 5 - arch/mips/include/asm/uaccess.h | 9 - arch/mips/kernel/cevt-r4k.c | 4 - arch/mips/mti-malta/Platform | 6 +- arch/mips/mti-malta/malta-time.c | 5 - 8 files changed, 1 insertion(+), 493 deletions(-) delete mode 100644 arch/mips/configs/malta_kvm_guest_defconfig (limited to 'arch/mips/kernel') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 3a38d27cc1e1..b9ae8e7d95e0 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2222,23 +2222,6 @@ config 64BIT endchoice -config KVM_GUEST - bool "KVM Guest Kernel" - depends on CPU_MIPS32_R2 - depends on !64BIT && BROKEN_ON_SMP - help - Select this option if building a guest kernel for KVM (Trap & Emulate) - mode. - -config KVM_GUEST_TIMER_FREQ - int "Count/Compare Timer Frequency (MHz)" - depends on KVM_GUEST - default 100 - help - Set this to non-zero if building a guest kernel for KVM to skip RTC - emulation when determining guest CPU Frequency. Instead, the guest's - timer frequency is specified directly. - config MIPS_VA_BITS_48 bool "48 bits virtual memory" depends on 64BIT diff --git a/arch/mips/configs/malta_kvm_guest_defconfig b/arch/mips/configs/malta_kvm_guest_defconfig deleted file mode 100644 index 9185e0a0aa45..000000000000 --- a/arch/mips/configs/malta_kvm_guest_defconfig +++ /dev/null @@ -1,436 +0,0 @@ -CONFIG_SYSVIPC=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_LOG_BUF_SHIFT=15 -CONFIG_NAMESPACES=y -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -# CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y -CONFIG_MIPS_MALTA=y -CONFIG_CPU_LITTLE_ENDIAN=y -CONFIG_CPU_MIPS32_R2=y -CONFIG_KVM_GUEST=y -CONFIG_PAGE_SIZE_16KB=y -# CONFIG_MIPS_MT_SMP is not set -CONFIG_HZ_100=y -CONFIG_PCI=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_NET_KEY=y -CONFIG_NET_KEY_MIGRATE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_NET_IPIP=m -CONFIG_IP_MROUTE=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_INET_XFRM_MODE_TRANSPORT=m -CONFIG_INET_XFRM_MODE_TUNNEL=m -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=m -CONFIG_INET6_ESP=m -CONFIG_INET6_IPCOMP=m -CONFIG_IPV6_TUNNEL=m -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_PIMSM_V2=y -CONFIG_NETWORK_SECMARK=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CONNTRACK_AMANDA=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_H323=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_PPTP=m -CONFIG_NF_CONNTRACK_SANE=m -CONFIG_NF_CONNTRACK_SIP=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_TPROXY=m -CONFIG_NETFILTER_XT_TARGET_TRACE=m -CONFIG_NETFILTER_XT_TARGET_SECMARK=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SOCKET=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_IP_VS=m -CONFIG_IP_VS_IPV6=y -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_RR=m -CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_LC=m -CONFIG_IP_VS_WLC=m -CONFIG_IP_VS_LBLC=m -CONFIG_IP_VS_LBLCR=m -CONFIG_IP_VS_DH=m -CONFIG_IP_VS_SH=m -CONFIG_IP_VS_SED=m -CONFIG_IP_VS_NQ=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m -CONFIG_IP_NF_TARGET_ECN=m -CONFIG_IP_NF_TARGET_TTL=m -CONFIG_IP_NF_RAW=m -CONFIG_IP_NF_ARPTABLES=m -CONFIG_IP_NF_ARPFILTER=m -CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_IP6_NF_MATCH_AH=m -CONFIG_IP6_NF_MATCH_EUI64=m -CONFIG_IP6_NF_MATCH_FRAG=m -CONFIG_IP6_NF_MATCH_OPTS=m -CONFIG_IP6_NF_MATCH_HL=m -CONFIG_IP6_NF_MATCH_IPV6HEADER=m -CONFIG_IP6_NF_MATCH_MH=m -CONFIG_IP6_NF_MATCH_RT=m -CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_FILTER=m -CONFIG_IP6_NF_TARGET_REJECT=m -CONFIG_IP6_NF_MANGLE=m -CONFIG_IP6_NF_RAW=m -CONFIG_BRIDGE_NF_EBTABLES=m -CONFIG_BRIDGE_EBT_BROUTE=m -CONFIG_BRIDGE_EBT_T_FILTER=m -CONFIG_BRIDGE_EBT_T_NAT=m -CONFIG_BRIDGE_EBT_802_3=m -CONFIG_BRIDGE_EBT_AMONG=m -CONFIG_BRIDGE_EBT_ARP=m -CONFIG_BRIDGE_EBT_IP=m -CONFIG_BRIDGE_EBT_IP6=m -CONFIG_BRIDGE_EBT_LIMIT=m -CONFIG_BRIDGE_EBT_MARK=m -CONFIG_BRIDGE_EBT_PKTTYPE=m -CONFIG_BRIDGE_EBT_STP=m -CONFIG_BRIDGE_EBT_VLAN=m -CONFIG_BRIDGE_EBT_ARPREPLY=m -CONFIG_BRIDGE_EBT_DNAT=m -CONFIG_BRIDGE_EBT_MARK_T=m -CONFIG_BRIDGE_EBT_REDIRECT=m -CONFIG_BRIDGE_EBT_SNAT=m -CONFIG_BRIDGE_EBT_LOG=m -CONFIG_BRIDGE_EBT_NFLOG=m -CONFIG_IP_SCTP=m -CONFIG_BRIDGE=m -CONFIG_VLAN_8021Q=m -CONFIG_VLAN_8021Q_GVRP=y -CONFIG_ATALK=m -CONFIG_DEV_APPLETALK=m -CONFIG_IPDDP=m -CONFIG_IPDDP_ENCAP=y -CONFIG_PHONET=m -CONFIG_NET_SCHED=y -CONFIG_NET_SCH_CBQ=m -CONFIG_NET_SCH_HTB=m -CONFIG_NET_SCH_HFSC=m -CONFIG_NET_SCH_PRIO=m -CONFIG_NET_SCH_RED=m -CONFIG_NET_SCH_SFQ=m -CONFIG_NET_SCH_TEQL=m -CONFIG_NET_SCH_TBF=m -CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m -CONFIG_NET_SCH_NETEM=m -CONFIG_NET_SCH_INGRESS=m -CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m -CONFIG_NET_CLS_ROUTE4=m -CONFIG_NET_CLS_FW=m -CONFIG_NET_CLS_U32=m -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m -CONFIG_NET_CLS_FLOW=m -CONFIG_NET_CLS_ACT=y -CONFIG_NET_ACT_POLICE=y -CONFIG_NET_ACT_GACT=m -CONFIG_GACT_PROB=y -CONFIG_NET_ACT_MIRRED=m -CONFIG_NET_ACT_IPT=m -CONFIG_NET_ACT_NAT=m -CONFIG_NET_ACT_PEDIT=m -CONFIG_NET_ACT_SIMP=m -CONFIG_NET_ACT_SKBEDIT=m -CONFIG_CFG80211=m -CONFIG_MAC80211=m -CONFIG_MAC80211_MESH=y -CONFIG_RFKILL=m -CONFIG_DEVTMPFS=y -CONFIG_CONNECTOR=m -CONFIG_MTD=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_OOPS=m -CONFIG_MTD_CFI=y -CONFIG_MTD_CFI_INTELEXT=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_CFI_STAA=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_UBI=m -CONFIG_MTD_UBI_GLUEBI=m -CONFIG_BLK_DEV_FD=m -CONFIG_BLK_DEV_UMEM=m -CONFIG_BLK_DEV_LOOP=m -CONFIG_BLK_DEV_CRYPTOLOOP=m -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y -CONFIG_CDROM_PKTCDVD=m -CONFIG_ATA_OVER_ETH=m -CONFIG_VIRTIO_BLK=y -CONFIG_IDE=y -CONFIG_BLK_DEV_IDECD=y -CONFIG_BLK_DEV_TC86C001=m -CONFIG_RAID_ATTRS=m -CONFIG_BLK_DEV_SD=y -CONFIG_CHR_DEV_ST=m -CONFIG_CHR_DEV_OSST=m -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=m -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SCAN_ASYNC=y -CONFIG_SCSI_FC_ATTRS=m -CONFIG_ISCSI_TCP=m -CONFIG_BLK_DEV_3W_XXXX_RAID=m -CONFIG_SCSI_3W_9XXX=m -CONFIG_SCSI_ACARD=m -CONFIG_SCSI_AACRAID=m -CONFIG_SCSI_AIC7XXX=m -CONFIG_AIC7XXX_RESET_DELAY_MS=15000 -# CONFIG_AIC7XXX_DEBUG_ENABLE is not set -CONFIG_ATA=y -CONFIG_ATA_PIIX=y -CONFIG_PATA_IT8213=m -CONFIG_PATA_OLDPIIX=y -CONFIG_PATA_MPIIX=y -CONFIG_ATA_GENERIC=y -CONFIG_PATA_LEGACY=y -CONFIG_MD=y -CONFIG_BLK_DEV_MD=m -CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m -CONFIG_MD_MULTIPATH=m -CONFIG_MD_FAULTY=m -CONFIG_BLK_DEV_DM=m -CONFIG_DM_CRYPT=m -CONFIG_DM_SNAPSHOT=m -CONFIG_DM_MIRROR=m -CONFIG_DM_ZERO=m -CONFIG_DM_MULTIPATH=m -CONFIG_NETDEVICES=y -CONFIG_BONDING=m -CONFIG_DUMMY=m -CONFIG_EQUALIZER=m -CONFIG_IFB=m -CONFIG_MACVLAN=m -CONFIG_TUN=m -CONFIG_VETH=m -CONFIG_VIRTIO_NET=y -CONFIG_PCNET32=y -CONFIG_CHELSIO_T3=m -CONFIG_AX88796=m -CONFIG_NETXEN_NIC=m -CONFIG_TC35815=m -CONFIG_BROADCOM_PHY=m -CONFIG_CICADA_PHY=m -CONFIG_DAVICOM_PHY=m -CONFIG_ICPLUS_PHY=m -CONFIG_LXT_PHY=m -CONFIG_MARVELL_PHY=m -CONFIG_QSEMI_PHY=m -CONFIG_REALTEK_PHY=m -CONFIG_SMSC_PHY=m -CONFIG_VITESSE_PHY=m -CONFIG_ATMEL=m -CONFIG_PCI_ATMEL=m -CONFIG_IPW2100=m -CONFIG_IPW2100_MONITOR=y -CONFIG_HOSTAP=m -CONFIG_HOSTAP_FIRMWARE=y -CONFIG_HOSTAP_FIRMWARE_NVRAM=y -CONFIG_HOSTAP_PLX=m -CONFIG_HOSTAP_PCI=m -CONFIG_PRISM54=m -CONFIG_LIBERTAS=m -CONFIG_INPUT_MOUSEDEV=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_PIIX4_POWEROFF=y -CONFIG_POWER_RESET_SYSCON=y -# CONFIG_HWMON is not set -CONFIG_FB=y -CONFIG_FB_CIRRUS=y -# CONFIG_VGA_CONSOLE is not set -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_HID=m -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_CMOS=y -CONFIG_UIO=m -CONFIG_UIO_CIF=m -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_BALLOON=y -CONFIG_VIRTIO_MMIO=y -CONFIG_EXT2_FS=y -CONFIG_EXT3_FS=y -CONFIG_REISERFS_FS=m -CONFIG_REISERFS_PROC_INFO=y -CONFIG_REISERFS_FS_XATTR=y -CONFIG_REISERFS_FS_POSIX_ACL=y -CONFIG_REISERFS_FS_SECURITY=y -CONFIG_JFS_FS=m -CONFIG_JFS_POSIX_ACL=y -CONFIG_JFS_SECURITY=y -CONFIG_XFS_FS=m -CONFIG_XFS_QUOTA=y -CONFIG_XFS_POSIX_ACL=y -CONFIG_QUOTA=y -CONFIG_QFMT_V2=y -CONFIG_FUSE_FS=m -CONFIG_ISO9660_FS=m -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=m -CONFIG_VFAT_FS=m -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_AFFS_FS=m -CONFIG_HFS_FS=m -CONFIG_HFSPLUS_FS=m -CONFIG_BEFS_FS=m -CONFIG_BFS_FS=m -CONFIG_EFS_FS=m -CONFIG_JFFS2_FS=m -CONFIG_JFFS2_FS_XATTR=y -CONFIG_JFFS2_COMPRESSION_OPTIONS=y -CONFIG_JFFS2_RUBIN=y -CONFIG_CRAMFS=m -CONFIG_VXFS_FS=m -CONFIG_MINIX_FS=m -CONFIG_ROMFS_FS=m -CONFIG_SYSV_FS=m -CONFIG_UFS_FS=m -CONFIG_NFS_FS=y -CONFIG_ROOT_NFS=y -CONFIG_NFSD=y -CONFIG_NFSD_V3=y -CONFIG_NLS_CODEPAGE_437=m -CONFIG_NLS_CODEPAGE_737=m -CONFIG_NLS_CODEPAGE_775=m -CONFIG_NLS_CODEPAGE_850=m -CONFIG_NLS_CODEPAGE_852=m -CONFIG_NLS_CODEPAGE_855=m -CONFIG_NLS_CODEPAGE_857=m -CONFIG_NLS_CODEPAGE_860=m -CONFIG_NLS_CODEPAGE_861=m -CONFIG_NLS_CODEPAGE_862=m -CONFIG_NLS_CODEPAGE_863=m -CONFIG_NLS_CODEPAGE_864=m -CONFIG_NLS_CODEPAGE_865=m -CONFIG_NLS_CODEPAGE_866=m -CONFIG_NLS_CODEPAGE_869=m -CONFIG_NLS_CODEPAGE_936=m -CONFIG_NLS_CODEPAGE_950=m -CONFIG_NLS_CODEPAGE_932=m -CONFIG_NLS_CODEPAGE_949=m -CONFIG_NLS_CODEPAGE_874=m -CONFIG_NLS_ISO8859_8=m -CONFIG_NLS_CODEPAGE_1250=m -CONFIG_NLS_CODEPAGE_1251=m -CONFIG_NLS_ASCII=m -CONFIG_NLS_ISO8859_1=m -CONFIG_NLS_ISO8859_2=m -CONFIG_NLS_ISO8859_3=m -CONFIG_NLS_ISO8859_4=m -CONFIG_NLS_ISO8859_5=m -CONFIG_NLS_ISO8859_6=m -CONFIG_NLS_ISO8859_7=m -CONFIG_NLS_ISO8859_9=m -CONFIG_NLS_ISO8859_13=m -CONFIG_NLS_ISO8859_14=m -CONFIG_NLS_ISO8859_15=m -CONFIG_NLS_KOI8_R=m -CONFIG_NLS_KOI8_U=m -CONFIG_CRYPTO_CRYPTD=m -CONFIG_CRYPTO_LRW=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=m -CONFIG_CRYPTO_MD4=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAMELLIA=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_FCRYPT=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h index c3ac06a6acd2..b247575c5e69 100644 --- a/arch/mips/include/asm/mach-generic/spaces.h +++ b/arch/mips/include/asm/mach-generic/spaces.h @@ -30,11 +30,7 @@ #endif /* __ASSEMBLY__ */ #ifdef CONFIG_32BIT -#ifdef CONFIG_KVM_GUEST -#define CAC_BASE _AC(0x40000000, UL) -#else #define CAC_BASE _AC(0x80000000, UL) -#endif #ifndef IO_BASE #define IO_BASE _AC(0xa0000000, UL) #endif @@ -43,12 +39,8 @@ #endif #ifndef MAP_BASE -#ifdef CONFIG_KVM_GUEST -#define MAP_BASE _AC(0x60000000, UL) -#else #define MAP_BASE _AC(0xc0000000, UL) #endif -#endif /* * Memory above this physical address will be considered highmem. @@ -100,11 +92,7 @@ #endif #ifndef FIXADDR_TOP -#ifdef CONFIG_KVM_GUEST -#define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000) -#else #define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) #endif -#endif #endif /* __ASM_MACH_GENERIC_SPACES_H */ diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 7834e7c0c78a..8e69e0a35ee9 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -32,16 +32,11 @@ extern unsigned int vced_count, vcei_count; extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #ifdef CONFIG_32BIT -#ifdef CONFIG_KVM_GUEST -/* User space process size is limited to 1GB in KVM Guest Mode */ -#define TASK_SIZE 0x3fff8000UL -#else /* * User space process size: 2GB. This is hardcoded into a few places, * so don't change it unless you know what you are doing. */ #define TASK_SIZE 0x80000000UL -#endif #define STACK_TOP_MAX TASK_SIZE diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 61fc01f177a6..d273a3857809 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -25,11 +25,7 @@ */ #ifdef CONFIG_32BIT -#ifdef CONFIG_KVM_GUEST -#define __UA_LIMIT 0x40000000UL -#else #define __UA_LIMIT 0x80000000UL -#endif #define __UA_ADDR ".word" #define __UA_LA "la" @@ -61,13 +57,8 @@ extern u64 __ua_limit; * address in this range it's the process's problem, not ours :-) */ -#ifdef CONFIG_KVM_GUEST -#define KERNEL_DS ((mm_segment_t) { 0x80000000UL }) -#define USER_DS ((mm_segment_t) { 0xC0000000UL }) -#else #define KERNEL_DS ((mm_segment_t) { 0UL }) #define USER_DS ((mm_segment_t) { __UA_LIMIT }) -#endif #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 995ad9e69ded..32ec67c9ab67 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -195,10 +195,6 @@ int c0_compare_int_usable(void) unsigned int delta; unsigned int cnt; -#ifdef CONFIG_KVM_GUEST - return 1; -#endif - /* * IP7 already pending? Try to clear it by acking the timer. */ diff --git a/arch/mips/mti-malta/Platform b/arch/mips/mti-malta/Platform index 41e0d2a2d325..f4616934d950 100644 --- a/arch/mips/mti-malta/Platform +++ b/arch/mips/mti-malta/Platform @@ -2,9 +2,5 @@ # MIPS Malta board # cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta -ifdef CONFIG_KVM_GUEST - load-$(CONFIG_MIPS_MALTA) += 0x0000000040100000 -else - load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 -endif +load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 567720374d57..bbf1e38e1431 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -66,11 +66,6 @@ static void __init estimate_frequencies(void) int secs; u64 giccount = 0, gicstart = 0; -#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ - mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000; - return; -#endif - local_irq_save(flags); if (mips_gic_present()) -- cgit v1.2.3 From ecbba30fbf45dceaaf0e8010638283e7aa94a4df Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 1 Mar 2021 23:48:24 +0900 Subject: mips: syscalls: switch to generic syscalltbl.sh Many architectures duplicate similar shell scripts. This commit converts mips to use scripts/syscalltbl.sh. This also unifies syscall_table_32_o32.h and syscall_table_64_o32.h into syscall_table_o32.h. The offset parameters are unneeded here; __SYSCALL(nr, entry) is defined as 'PTR entry', so the parameter 'nr' is not used in the first place. With this commit, syscall tables and generated files are straight mapped, which makes things easier to understand. syscall_n32.tbl --> syscall_table_n32.h syscall_n64.tbl --> syscall_table_n64.h syscall_o32.tbl --> syscall_table_o32.h Then, the abi parameters are also unneeded. Signed-off-by: Masahiro Yamada Signed-off-by: Thomas Bogendoerfer --- arch/mips/include/asm/Kbuild | 7 +++---- arch/mips/kernel/scall32-o32.S | 4 ++-- arch/mips/kernel/scall64-n32.S | 3 +-- arch/mips/kernel/scall64-n64.S | 3 +-- arch/mips/kernel/scall64-o32.S | 4 ++-- arch/mips/kernel/syscalls/Makefile | 31 ++++++++-------------------- arch/mips/kernel/syscalls/syscalltbl.sh | 36 --------------------------------- 7 files changed, 17 insertions(+), 71 deletions(-) delete mode 100644 arch/mips/kernel/syscalls/syscalltbl.sh (limited to 'arch/mips/kernel') diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 8f6fe69674b7..dee172716581 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -1,9 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 # MIPS headers -generated-y += syscall_table_32_o32.h -generated-y += syscall_table_64_n32.h -generated-y += syscall_table_64_n64.h -generated-y += syscall_table_64_o32.h +generated-y += syscall_table_n32.h +generated-y += syscall_table_n64.h +generated-y += syscall_table_o32.h generated-y += unistd_nr_n32.h generated-y += unistd_nr_n64.h generated-y += unistd_nr_o32.h diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index b449b68662a9..84e8624e83a2 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -217,9 +217,9 @@ einval: li v0, -ENOSYS #define sys_sched_getaffinity mipsmt_sys_sched_getaffinity #endif /* CONFIG_MIPS_MT_FPAFF */ +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) #define __SYSCALL(nr, entry) PTR entry .align 2 .type sys_call_table, @object EXPORT(sys_call_table) -#include -#undef __SYSCALL +#include diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 35d8c86b160e..f650c55a17dc 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -104,5 +104,4 @@ not_n32_scall: #define __SYSCALL(nr, entry) PTR entry .type sysn32_call_table, @object EXPORT(sysn32_call_table) -#include -#undef __SYSCALL +#include diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S index 5e9c497ce099..5d7bfc65e4d0 100644 --- a/arch/mips/kernel/scall64-n64.S +++ b/arch/mips/kernel/scall64-n64.S @@ -113,5 +113,4 @@ illegal_syscall: .align 3 .type sys_call_table, @object EXPORT(sys_call_table) -#include -#undef __SYSCALL +#include diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 50c9a57e0d3a..cedc8bd88804 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -213,9 +213,9 @@ einval: li v0, -ENOSYS jr ra END(sys32_syscall) +#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) #define __SYSCALL(nr, entry) PTR entry .align 3 .type sys32_call_table,@object EXPORT(sys32_call_table) -#include -#undef __SYSCALL +#include diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile index 51f8b805f2ed..2bbea47caf7e 100644 --- a/arch/mips/kernel/syscalls/Makefile +++ b/arch/mips/kernel/syscalls/Makefile @@ -10,7 +10,7 @@ syscalln64 := $(src)/syscall_n64.tbl syscallo32 := $(src)/syscall_o32.tbl syshdr := $(srctree)/$(src)/syscallhdr.sh sysnr := $(srctree)/$(src)/syscallnr.sh -systbl := $(srctree)/$(src)/syscalltbl.sh +systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ @@ -25,10 +25,7 @@ quiet_cmd_sysnr = SYSNR $@ '$(sysnr_offset_$(basetarget))' quiet_cmd_systbl = SYSTBL $@ - cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ - '$(systbl_abis_$(basetarget))' \ - '$(systbl_abi_$(basetarget))' \ - '$(systbl_offset_$(basetarget))' + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ syshdr_offset_unistd_n32 := __NR_Linux $(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) FORCE @@ -57,33 +54,21 @@ sysnr_offset_unistd_nr_o32 := 4000 $(kapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) FORCE $(call if_changed,sysnr) -systbl_abi_syscall_table_32_o32 := 32_o32 -systbl_offset_syscall_table_32_o32 := 4000 -$(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl) FORCE +$(kapi)/syscall_table_n32.h: $(syscalln32) $(systbl) FORCE $(call if_changed,systbl) -systbl_abi_syscall_table_64_n32 := 64_n32 -systbl_offset_syscall_table_64_n32 := 6000 -$(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl) FORCE +$(kapi)/syscall_table_n64.h: $(syscalln64) $(systbl) FORCE $(call if_changed,systbl) -systbl_abi_syscall_table_64_n64 := 64_n64 -systbl_offset_syscall_table_64_n64 := 5000 -$(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl) FORCE - $(call if_changed,systbl) - -systbl_abi_syscall_table_64_o32 := 64_o32 -systbl_offset_syscall_table_64_o32 := 4000 -$(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl) FORCE +$(kapi)/syscall_table_o32.h: $(syscallo32) $(systbl) FORCE $(call if_changed,systbl) uapisyshdr-y += unistd_n32.h \ unistd_n64.h \ unistd_o32.h -kapisyshdr-y += syscall_table_32_o32.h \ - syscall_table_64_n32.h \ - syscall_table_64_n64.h \ - syscall_table_64_o32.h \ +kapisyshdr-y += syscall_table_n32.h \ + syscall_table_n64.h \ + syscall_table_o32.h \ unistd_nr_n32.h \ unistd_nr_n64.h \ unistd_nr_o32.h diff --git a/arch/mips/kernel/syscalls/syscalltbl.sh b/arch/mips/kernel/syscalls/syscalltbl.sh deleted file mode 100644 index 1e2570740c20..000000000000 --- a/arch/mips/kernel/syscalls/syscalltbl.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -my_abi="$4" -offset="$5" - -emit() { - t_nxt="$1" - t_nr="$2" - t_entry="$3" - - while [ $t_nxt -lt $t_nr ]; do - printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" - t_nxt=$((t_nxt+1)) - done - printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" -} - -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - nxt=0 - if [ -z "$offset" ]; then - offset=0 - fi - - while read nr abi name entry compat ; do - if [ "$my_abi" = "64_o32" ] && [ ! -z "$compat" ]; then - emit $((nxt+offset)) $((nr+offset)) $compat - else - emit $((nxt+offset)) $((nr+offset)) $entry - fi - nxt=$((nr+1)) - done -) > "$out" -- cgit v1.2.3 From 6228bd65288af02cd8cc2417c9c4bf05e1caf935 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 1 Mar 2021 23:48:25 +0900 Subject: mips: syscalls: switch to generic syscallhdr.sh Many architectures duplicate similar shell scripts. This commit converts mips to use scripts/syscallhdr.sh. Signed-off-by: Masahiro Yamada Signed-off-by: Thomas Bogendoerfer --- arch/mips/kernel/syscalls/Makefile | 10 ++------- arch/mips/kernel/syscalls/syscallhdr.sh | 36 --------------------------------- 2 files changed, 2 insertions(+), 44 deletions(-) delete mode 100644 arch/mips/kernel/syscalls/syscallhdr.sh (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile index 2bbea47caf7e..904452992992 100644 --- a/arch/mips/kernel/syscalls/Makefile +++ b/arch/mips/kernel/syscalls/Makefile @@ -8,15 +8,12 @@ _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ syscalln32 := $(src)/syscall_n32.tbl syscalln64 := $(src)/syscall_n64.tbl syscallo32 := $(src)/syscall_o32.tbl -syshdr := $(srctree)/$(src)/syscallhdr.sh +syshdr := $(srctree)/scripts/syscallhdr.sh sysnr := $(srctree)/$(src)/syscallnr.sh systbl := $(srctree)/scripts/syscalltbl.sh quiet_cmd_syshdr = SYSHDR $@ - cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ - '$(syshdr_abis_$(basetarget))' \ - '$(syshdr_pfx_$(basetarget))' \ - '$(syshdr_offset_$(basetarget))' + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --offset __NR_Linux $< $@ quiet_cmd_sysnr = SYSNR $@ cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ @@ -27,15 +24,12 @@ quiet_cmd_sysnr = SYSNR $@ quiet_cmd_systbl = SYSTBL $@ cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ -syshdr_offset_unistd_n32 := __NR_Linux $(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_offset_unistd_n64 := __NR_Linux $(uapi)/unistd_n64.h: $(syscalln64) $(syshdr) FORCE $(call if_changed,syshdr) -syshdr_offset_unistd_o32 := __NR_Linux $(uapi)/unistd_o32.h: $(syscallo32) $(syshdr) FORCE $(call if_changed,syshdr) diff --git a/arch/mips/kernel/syscalls/syscallhdr.sh b/arch/mips/kernel/syscalls/syscallhdr.sh deleted file mode 100644 index 2e241e713a7d..000000000000 --- a/arch/mips/kernel/syscalls/syscallhdr.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/sh -# SPDX-License-Identifier: GPL-2.0 - -in="$1" -out="$2" -my_abis=`echo "($3)" | tr ',' '|'` -prefix="$4" -offset="$5" - -fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \ - -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ - -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` -grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( - printf "#ifndef %s\n" "${fileguard}" - printf "#define %s\n" "${fileguard}" - printf "\n" - - nxt=0 - while read nr abi name entry compat ; do - if [ -z "$offset" ]; then - printf "#define __NR_%s%s\t%s\n" \ - "${prefix}" "${name}" "${nr}" - else - printf "#define __NR_%s%s\t(%s + %s)\n" \ - "${prefix}" "${name}" "${offset}" "${nr}" - fi - nxt=$((nr+1)) - done - - printf "\n" - printf "#ifdef __KERNEL__\n" - printf "#define __NR_syscalls\t%s\n" "${nxt}" - printf "#endif\n" - printf "\n" - printf "#endif /* %s */\n" "${fileguard}" -) > "$out" -- cgit v1.2.3 From 63d6c98168916f0c18f7bb7a28e27efd95524409 Mon Sep 17 00:00:00 2001 From: Wang Qing Date: Sat, 13 Mar 2021 11:33:48 +0800 Subject: mips: kernel: use DEFINE_DEBUGFS_ATTRIBUTE with debugfs_create_file_unsafe() debugfs_create_file_unsafe does not protect the fops handed to it against file removal. DEFINE_DEBUGFS_ATTRIBUTE makes the fops aware of the file lifetime and thus protects it against removal. Signed-off-by: Wang Qing Signed-off-by: Thomas Bogendoerfer --- arch/mips/kernel/spinlock_test.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/spinlock_test.c b/arch/mips/kernel/spinlock_test.c index ab4e3e1b138d..90f53e041a38 100644 --- a/arch/mips/kernel/spinlock_test.c +++ b/arch/mips/kernel/spinlock_test.c @@ -35,7 +35,7 @@ static int ss_get(void *data, u64 *val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n"); @@ -114,13 +114,13 @@ static int multi_get(void *data, u64 *val) return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); static int __init spinlock_test(void) { - debugfs_create_file("spin_single", S_IRUGO, mips_debugfs_dir, NULL, + debugfs_create_file_unsafe("spin_single", S_IRUGO, mips_debugfs_dir, NULL, &fops_ss); - debugfs_create_file("spin_multi", S_IRUGO, mips_debugfs_dir, NULL, + debugfs_create_file_unsafe("spin_multi", S_IRUGO, mips_debugfs_dir, NULL, &fops_multi); return 0; } -- cgit v1.2.3 From dfad83cb7193effb6c853a5c7337ac2274a2e2fc Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 30 Mar 2021 20:22:07 -0700 Subject: MIPS: Add support for CONFIG_DEBUG_VIRTUAL Provide hooks to intercept bad usages of virt_to_phys() and __pa_symbol() throughout the kernel. To make this possible, we need to rename the current implement of virt_to_phys() into __virt_to_phys_nodebug() and wrap it around depending on CONFIG_DEBUG_VIRTUAL. A similar thing is needed for __pa_symbol() which is now aliased to __phys_addr_symbol() whose implementation is either the direct return of RELOC_HIDE or goes through the debug version. Signed-off-by: Florian Fainelli Signed-off-by: Thomas Bogendoerfer --- arch/mips/Kconfig | 1 + arch/mips/include/asm/io.h | 14 ++++++++++- arch/mips/include/asm/page.h | 9 ++++++- arch/mips/kernel/vdso.c | 5 ++-- arch/mips/mm/Makefile | 2 ++ arch/mips/mm/physaddr.c | 56 ++++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 83 insertions(+), 4 deletions(-) create mode 100644 arch/mips/mm/physaddr.c (limited to 'arch/mips/kernel') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index b72458215d20..7d509191168b 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -4,6 +4,7 @@ config MIPS default y select ARCH_32BIT_OFF_T if !64BIT select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT + select ARCH_HAS_DEBUG_VIRTUAL if !64BIT select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_KCOV select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 78537aa23500..2c138450ad3b 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h @@ -100,11 +100,23 @@ static inline void set_io_port_base(unsigned long base) * almost all conceivable cases a device driver should not be using * this function */ -static inline unsigned long virt_to_phys(volatile const void *address) +static inline unsigned long __virt_to_phys_nodebug(volatile const void *address) { return __pa(address); } +#ifdef CONFIG_DEBUG_VIRTUAL +extern phys_addr_t __virt_to_phys(volatile const void *x); +#else +#define __virt_to_phys(x) __virt_to_phys_nodebug(x) +#endif + +#define virt_to_phys virt_to_phys +static inline phys_addr_t virt_to_phys(const volatile void *x) +{ + return __virt_to_phys(x); +} + /* * phys_to_virt - map physical address to virtual * @address: address to remap diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 65acab9c41f9..195ff4e9771f 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -210,9 +210,16 @@ static inline unsigned long ___pa(unsigned long x) * also affect MIPS so we keep this one until GCC 3.x has been retired * before we can apply https://patchwork.linux-mips.org/patch/1541/ */ +#define __pa_symbol_nodebug(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) + +#ifdef CONFIG_DEBUG_VIRTUAL +extern phys_addr_t __phys_addr_symbol(unsigned long x); +#else +#define __phys_addr_symbol(x) __pa_symbol_nodebug(x) +#endif #ifndef __pa_symbol -#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) +#define __pa_symbol(x) __phys_addr_symbol((unsigned long)(x)) #endif #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 7d0b91ad2581..3d0cf471f2fe 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -90,7 +90,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { struct mips_vdso_image *image = current->thread.abi->vdso; struct mm_struct *mm = current->mm; - unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn; + unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base; struct vm_area_struct *vma; int ret; @@ -158,7 +158,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) /* Map GIC user page. */ if (gic_size) { - gic_pfn = virt_to_phys(mips_gic_base + MIPS_GIC_USER_OFS) >> PAGE_SHIFT; + gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS; + gic_pfn = virt_to_phys((void *)gic_base) >> PAGE_SHIFT; ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size, pgprot_noncached(vma->vm_page_prot)); diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile index 865926a37775..fa1f729e0700 100644 --- a/arch/mips/mm/Makefile +++ b/arch/mips/mm/Makefile @@ -40,3 +40,5 @@ obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o obj-$(CONFIG_SCACHE_DEBUGFS) += sc-debugfs.o + +obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o diff --git a/arch/mips/mm/physaddr.c b/arch/mips/mm/physaddr.c new file mode 100644 index 000000000000..a1ced5e44951 --- /dev/null +++ b/arch/mips/mm/physaddr.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static inline bool __debug_virt_addr_valid(unsigned long x) +{ + /* high_memory does not get immediately defined, and there + * are early callers of __pa() against PAGE_OFFSET + */ + if (!high_memory && x >= PAGE_OFFSET) + return true; + + if (high_memory && x >= PAGE_OFFSET && x < (unsigned long)high_memory) + return true; + + /* + * MAX_DMA_ADDRESS is a virtual address that may not correspond to an + * actual physical address. Enough code relies on + * virt_to_phys(MAX_DMA_ADDRESS) that we just need to work around it + * and always return true. + */ + if (x == MAX_DMA_ADDRESS) + return true; + + return false; +} + +phys_addr_t __virt_to_phys(volatile const void *x) +{ + WARN(!__debug_virt_addr_valid((unsigned long)x), + "virt_to_phys used for non-linear address: %pK (%pS)\n", + x, x); + + return __virt_to_phys_nodebug(x); +} +EXPORT_SYMBOL(__virt_to_phys); + +phys_addr_t __phys_addr_symbol(unsigned long x) +{ + /* This is bounds checking against the kernel image only. + * __pa_symbol should only be used on kernel symbol addresses. + */ + VIRTUAL_BUG_ON(x < (unsigned long)_text || + x > (unsigned long)_end); + + return __pa_symbol_nodebug(x); +} +EXPORT_SYMBOL(__phys_addr_symbol); -- cgit v1.2.3 From 9a91dd501c2b98b6a1677affa514e30452b9c908 Mon Sep 17 00:00:00 2001 From: Thomas Bogendoerfer Date: Thu, 1 Apr 2021 14:56:34 +0200 Subject: MIPS: kernel: Remove not needed set_fs calls flush_icache_range always does flush kernel address ranges, so no need to do the set_fs dance. Signed-off-by: Thomas Bogendoerfer Reviewed-by: Christoph Hellwig --- arch/mips/kernel/ftrace.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 666b9969c1bd..8c401e42301c 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -90,7 +90,6 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, unsigned int new_code2) { int faulted; - mm_segment_t old_fs; safe_store_code(new_code1, ip, faulted); if (unlikely(faulted)) @@ -102,10 +101,7 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, return -EFAULT; ip -= 4; - old_fs = get_fs(); - set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); - set_fs(old_fs); return 0; } @@ -114,7 +110,6 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, unsigned int new_code2) { int faulted; - mm_segment_t old_fs; ip += 4; safe_store_code(new_code2, ip, faulted); @@ -126,10 +121,7 @@ static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, if (unlikely(faulted)) return -EFAULT; - old_fs = get_fs(); - set_fs(KERNEL_DS); flush_icache_range(ip, ip + 8); - set_fs(old_fs); return 0; } -- cgit v1.2.3 From 45deb5faeb9e02951361ceba5ffee721745661c3 Mon Sep 17 00:00:00 2001 From: Thomas Bogendoerfer Date: Thu, 1 Apr 2021 14:56:36 +0200 Subject: MIPS: uaccess: Remove get_fs/set_fs call sites Use new helpers to access user/kernel for functions, which are used with user/kernel pointers. Instead of dealing with get_fs/set_fs select user/kernel access via parameter. Signed-off-by: Thomas Bogendoerfer Reviewed-by: Christoph Hellwig --- arch/mips/kernel/access-helper.h | 18 ++++ arch/mips/kernel/traps.c | 105 +++++++++------------ arch/mips/kernel/unaligned.c | 199 ++++++++++++++------------------------- 3 files changed, 136 insertions(+), 186 deletions(-) create mode 100644 arch/mips/kernel/access-helper.h (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/access-helper.h b/arch/mips/kernel/access-helper.h new file mode 100644 index 000000000000..dd5b502813b8 --- /dev/null +++ b/arch/mips/kernel/access-helper.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +static inline int __get_addr(unsigned long *a, unsigned long *p, bool user) +{ + return user ? get_user(*a, p) : get_kernel_nofault(*a, p); +} + +static inline int __get_inst16(u16 *i, u16 *p, bool user) +{ + return user ? get_user(*i, p) : get_kernel_nofault(*i, p); +} + +static inline int __get_inst32(u32 *i, u32 *p, bool user) +{ + return user ? get_user(*i, p) : get_kernel_nofault(*i, p); +} diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 808b8b61ded1..0b4e06303c55 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -72,6 +72,8 @@ #include +#include "access-helper.h" + extern void check_wait(void); extern asmlinkage void rollback_handle_int(void); extern asmlinkage void handle_int(void); @@ -108,7 +110,8 @@ void (*board_bind_eic_interrupt)(int irq, int regset); void (*board_ebase_setup)(void); void(*board_cache_error_setup)(void); -static void show_raw_backtrace(unsigned long reg29, const char *loglvl) +static void show_raw_backtrace(unsigned long reg29, const char *loglvl, + bool user) { unsigned long *sp = (unsigned long *)(reg29 & ~3); unsigned long addr; @@ -118,9 +121,7 @@ static void show_raw_backtrace(unsigned long reg29, const char *loglvl) printk("%s\n", loglvl); #endif while (!kstack_end(sp)) { - unsigned long __user *p = - (unsigned long __user *)(unsigned long)sp++; - if (__get_user(addr, p)) { + if (__get_addr(&addr, sp++, user)) { printk("%s (Bad stack address)", loglvl); break; } @@ -141,7 +142,7 @@ __setup("raw_show_trace", set_raw_show_trace); #endif static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, - const char *loglvl) + const char *loglvl, bool user) { unsigned long sp = regs->regs[29]; unsigned long ra = regs->regs[31]; @@ -151,7 +152,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, task = current; if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) { - show_raw_backtrace(sp, loglvl); + show_raw_backtrace(sp, loglvl, user); return; } printk("%sCall Trace:\n", loglvl); @@ -167,12 +168,12 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, * with at least a bit of error checking ... */ static void show_stacktrace(struct task_struct *task, - const struct pt_regs *regs, const char *loglvl) + const struct pt_regs *regs, const char *loglvl, bool user) { const int field = 2 * sizeof(unsigned long); - long stackdata; + unsigned long stackdata; int i; - unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; + unsigned long *sp = (unsigned long *)regs->regs[29]; printk("%sStack :", loglvl); i = 0; @@ -186,7 +187,7 @@ static void show_stacktrace(struct task_struct *task, break; } - if (__get_user(stackdata, sp++)) { + if (__get_addr(&stackdata, sp++, user)) { pr_cont(" (Bad stack address)"); break; } @@ -195,13 +196,12 @@ static void show_stacktrace(struct task_struct *task, i++; } pr_cont("\n"); - show_backtrace(task, regs, loglvl); + show_backtrace(task, regs, loglvl, user); } void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) { struct pt_regs regs; - mm_segment_t old_fs = get_fs(); regs.cp0_status = KSU_KERNEL; if (sp) { @@ -217,33 +217,41 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) prepare_frametrace(®s); } } - /* - * show_stack() deals exclusively with kernel mode, so be sure to access - * the stack in the kernel (not user) address space. - */ - set_fs(KERNEL_DS); - show_stacktrace(task, ®s, loglvl); - set_fs(old_fs); + show_stacktrace(task, ®s, loglvl, false); } -static void show_code(unsigned int __user *pc) +static void show_code(void *pc, bool user) { long i; - unsigned short __user *pc16 = NULL; + unsigned short *pc16 = NULL; printk("Code:"); if ((unsigned long)pc & 1) - pc16 = (unsigned short __user *)((unsigned long)pc & ~1); + pc16 = (u16 *)((unsigned long)pc & ~1); + for(i = -3 ; i < 6 ; i++) { - unsigned int insn; - if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { - pr_cont(" (Bad address in epc)\n"); - break; + if (pc16) { + u16 insn16; + + if (__get_inst16(&insn16, pc16 + i, user)) + goto bad_address; + + pr_cont("%c%04x%c", (i?' ':'<'), insn16, (i?' ':'>')); + } else { + u32 insn32; + + if (__get_inst32(&insn32, (u32 *)pc + i, user)) + goto bad_address; + + pr_cont("%c%08x%c", (i?' ':'<'), insn32, (i?' ':'>')); } - pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); } pr_cont("\n"); + return; + +bad_address: + pr_cont(" (Bad address in epc)\n\n"); } static void __show_regs(const struct pt_regs *regs) @@ -356,7 +364,6 @@ void show_regs(struct pt_regs *regs) void show_registers(struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); - mm_segment_t old_fs = get_fs(); __show_regs(regs); print_modules(); @@ -371,13 +378,9 @@ void show_registers(struct pt_regs *regs) printk("*HwTLS: %0*lx\n", field, tls); } - if (!user_mode(regs)) - /* Necessary for getting the correct stack content */ - set_fs(KERNEL_DS); - show_stacktrace(current, regs, KERN_DEFAULT); - show_code((unsigned int __user *) regs->cp0_epc); + show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs)); + show_code((void *)regs->cp0_epc, user_mode(regs)); printk("\n"); - set_fs(old_fs); } static DEFINE_RAW_SPINLOCK(die_lock); @@ -1022,18 +1025,14 @@ asmlinkage void do_bp(struct pt_regs *regs) unsigned long epc = msk_isa16_mode(exception_epc(regs)); unsigned int opcode, bcode; enum ctx_state prev_state; - mm_segment_t seg; - - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); + bool user = user_mode(regs); prev_state = exception_enter(); current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; if (get_isa16_mode(regs->cp0_epc)) { u16 instr[2]; - if (__get_user(instr[0], (u16 __user *)epc)) + if (__get_inst16(&instr[0], (u16 *)epc, user)) goto out_sigsegv; if (!cpu_has_mmips) { @@ -1044,13 +1043,13 @@ asmlinkage void do_bp(struct pt_regs *regs) bcode = instr[0] & 0xf; } else { /* 32-bit microMIPS BREAK */ - if (__get_user(instr[1], (u16 __user *)(epc + 2))) + if (__get_inst16(&instr[1], (u16 *)(epc + 2), user)) goto out_sigsegv; opcode = (instr[0] << 16) | instr[1]; bcode = (opcode >> 6) & ((1 << 20) - 1); } } else { - if (__get_user(opcode, (unsigned int __user *)epc)) + if (__get_inst32(&opcode, (u32 *)epc, user)) goto out_sigsegv; bcode = (opcode >> 6) & ((1 << 20) - 1); } @@ -1100,7 +1099,6 @@ asmlinkage void do_bp(struct pt_regs *regs) do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break"); out: - set_fs(seg); exception_exit(prev_state); return; @@ -1114,25 +1112,21 @@ asmlinkage void do_tr(struct pt_regs *regs) u32 opcode, tcode = 0; enum ctx_state prev_state; u16 instr[2]; - mm_segment_t seg; + bool user = user_mode(regs); unsigned long epc = msk_isa16_mode(exception_epc(regs)); - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); - prev_state = exception_enter(); current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; if (get_isa16_mode(regs->cp0_epc)) { - if (__get_user(instr[0], (u16 __user *)(epc + 0)) || - __get_user(instr[1], (u16 __user *)(epc + 2))) + if (__get_inst16(&instr[0], (u16 *)(epc + 0), user) || + __get_inst16(&instr[1], (u16 *)(epc + 2), user)) goto out_sigsegv; opcode = (instr[0] << 16) | instr[1]; /* Immediate versions don't provide a code. */ if (!(opcode & OPCODE)) tcode = (opcode >> 12) & ((1 << 4) - 1); } else { - if (__get_user(opcode, (u32 __user *)epc)) + if (__get_inst32(&opcode, (u32 *)epc, user)) goto out_sigsegv; /* Immediate versions don't provide a code. */ if (!(opcode & OPCODE)) @@ -1142,7 +1136,6 @@ asmlinkage void do_tr(struct pt_regs *regs) do_trap_or_bp(regs, tcode, 0, "Trap"); out: - set_fs(seg); exception_exit(prev_state); return; @@ -1591,7 +1584,6 @@ asmlinkage void do_mcheck(struct pt_regs *regs) { int multi_match = regs->cp0_status & ST0_TS; enum ctx_state prev_state; - mm_segment_t old_fs = get_fs(); prev_state = exception_enter(); show_regs(regs); @@ -1602,12 +1594,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs) dump_tlb_all(); } - if (!user_mode(regs)) - set_fs(KERNEL_DS); - - show_code((unsigned int __user *) regs->cp0_epc); - - set_fs(old_fs); + show_code((void *)regs->cp0_epc, user_mode(regs)); /* * Some chips may have other causes of machine check (e.g. SB1 diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 126a5f3f4e4c..1f3b20a8c377 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -93,6 +93,8 @@ #include #include +#include "access-helper.h" + enum { UNALIGNED_ACTION_QUIET, UNALIGNED_ACTION_SIGNAL, @@ -112,9 +114,8 @@ static void emulate_load_store_insn(struct pt_regs *regs, unsigned long origpc, orig31, value; union mips_instruction insn; unsigned int res; -#ifdef CONFIG_EVA - mm_segment_t seg; -#endif + bool user = user_mode(regs); + origpc = (unsigned long)pc; orig31 = regs->regs[31]; @@ -123,7 +124,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, /* * This load never faults. */ - __get_user(insn.word, pc); + __get_inst32(&insn.word, pc, user); switch (insn.i_format.opcode) { /* @@ -163,7 +164,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (insn.dsp_format.func == lx_op) { switch (insn.dsp_format.op) { case lwx_op: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadW(addr, value, res); if (res) @@ -172,7 +173,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, regs->regs[insn.dsp_format.rd] = value; break; case lhx_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHW(addr, value, res); if (res) @@ -191,93 +192,66 @@ static void emulate_load_store_insn(struct pt_regs *regs, * memory, so we need to "switch" the address limit to * user space, so that address check can work properly. */ - seg = force_uaccess_begin(); switch (insn.spec3_format.func) { case lhe_op: - if (!access_ok(addr, 2)) { - force_uaccess_end(seg); + if (!access_ok(addr, 2)) goto sigbus; - } LoadHWE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } compute_return_epc(regs); regs->regs[insn.spec3_format.rt] = value; break; case lwe_op: - if (!access_ok(addr, 4)) { - force_uaccess_end(seg); + if (!access_ok(addr, 4)) goto sigbus; - } LoadWE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } compute_return_epc(regs); regs->regs[insn.spec3_format.rt] = value; break; case lhue_op: - if (!access_ok(addr, 2)) { - force_uaccess_end(seg); + if (!access_ok(addr, 2)) goto sigbus; - } LoadHWUE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } compute_return_epc(regs); regs->regs[insn.spec3_format.rt] = value; break; case she_op: - if (!access_ok(addr, 2)) { - force_uaccess_end(seg); + if (!access_ok(addr, 2)) goto sigbus; - } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; StoreHWE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } break; case swe_op: - if (!access_ok(addr, 4)) { - force_uaccess_end(seg); + if (!access_ok(addr, 4)) goto sigbus; - } compute_return_epc(regs); value = regs->regs[insn.spec3_format.rt]; StoreWE(addr, value, res); - if (res) { - force_uaccess_end(seg); + if (res) goto fault; - } break; default: - force_uaccess_end(seg); goto sigill; } - force_uaccess_end(seg); } #endif break; case lh_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - LoadHW(addr, value, res); - else - LoadHWE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + LoadHWE(addr, value, res); + else LoadHW(addr, value, res); - } if (res) goto fault; @@ -286,17 +260,13 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lw_op: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - LoadW(addr, value, res); - else - LoadWE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + LoadWE(addr, value, res); + else LoadW(addr, value, res); - } if (res) goto fault; @@ -305,17 +275,13 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; case lhu_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - LoadHWU(addr, value, res); - else - LoadHWUE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + LoadHWUE(addr, value, res); + else LoadHWU(addr, value, res); - } if (res) goto fault; @@ -332,7 +298,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadWU(addr, value, res); @@ -355,7 +321,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; LoadDW(addr, value, res); @@ -370,40 +336,32 @@ static void emulate_load_store_insn(struct pt_regs *regs, goto sigill; case sh_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - StoreHW(addr, value, res); - else - StoreHWE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + StoreHWE(addr, value, res); + else StoreHW(addr, value, res); - } if (res) goto fault; break; case sw_op: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - if (IS_ENABLED(CONFIG_EVA)) { - if (uaccess_kernel()) - StoreW(addr, value, res); - else - StoreWE(addr, value, res); - } else { + if (IS_ENABLED(CONFIG_EVA) && user) + StoreWE(addr, value, res); + else StoreW(addr, value, res); - } if (res) goto fault; @@ -418,7 +376,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; compute_return_epc(regs); @@ -626,6 +584,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, unsigned long origpc, contpc; union mips_instruction insn; struct mm_decoded_insn mminsn; + bool user = user_mode(regs); origpc = regs->cp0_epc; orig31 = regs->regs[31]; @@ -689,7 +648,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; LoadW(addr, value, res); @@ -708,7 +667,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; value = regs->regs[reg]; @@ -728,7 +687,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(addr, 16)) + if (user && !access_ok(addr, 16)) goto sigbus; LoadDW(addr, value, res); @@ -751,7 +710,7 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if (reg == 31) goto sigbus; - if (!access_ok(addr, 16)) + if (user && !access_ok(addr, 16)) goto sigbus; value = regs->regs[reg]; @@ -774,10 +733,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok(addr, 4 * (rvar + 1))) + if (user && !access_ok(addr, 4 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(addr, 4 * rvar)) + if (user && !access_ok(addr, 4 * rvar)) goto sigbus; } if (rvar == 9) @@ -810,10 +769,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok(addr, 4 * (rvar + 1))) + if (user && !access_ok(addr, 4 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(addr, 4 * rvar)) + if (user && !access_ok(addr, 4 * rvar)) goto sigbus; } if (rvar == 9) @@ -847,10 +806,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok(addr, 8 * (rvar + 1))) + if (user && !access_ok(addr, 8 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(addr, 8 * rvar)) + if (user && !access_ok(addr, 8 * rvar)) goto sigbus; } if (rvar == 9) @@ -888,10 +847,10 @@ static void emulate_load_store_microMIPS(struct pt_regs *regs, if ((rvar > 9) || !reg) goto sigill; if (reg & 0x10) { - if (!access_ok(addr, 8 * (rvar + 1))) + if (user && !access_ok(addr, 8 * (rvar + 1))) goto sigbus; } else { - if (!access_ok(addr, 8 * rvar)) + if (user && !access_ok(addr, 8 * rvar)) goto sigbus; } if (rvar == 9) @@ -1010,7 +969,7 @@ fpu_emul: case mm_lwm16_op: reg = insn.mm16_m_format.rlist; rvar = reg + 1; - if (!access_ok(addr, 4 * rvar)) + if (user && !access_ok(addr, 4 * rvar)) goto sigbus; for (i = 16; rvar; rvar--, i++) { @@ -1030,7 +989,7 @@ fpu_emul: case mm_swm16_op: reg = insn.mm16_m_format.rlist; rvar = reg + 1; - if (!access_ok(addr, 4 * rvar)) + if (user && !access_ok(addr, 4 * rvar)) goto sigbus; for (i = 16; rvar; rvar--, i++) { @@ -1084,7 +1043,7 @@ fpu_emul: } loadHW: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHW(addr, value, res); @@ -1094,7 +1053,7 @@ loadHW: goto success; loadHWU: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHWU(addr, value, res); @@ -1104,7 +1063,7 @@ loadHWU: goto success; loadW: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadW(addr, value, res); @@ -1122,7 +1081,7 @@ loadWU: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadWU(addr, value, res); @@ -1144,7 +1103,7 @@ loadDW: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; LoadDW(addr, value, res); @@ -1158,7 +1117,7 @@ loadDW: goto sigill; storeHW: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; value = regs->regs[reg]; @@ -1168,7 +1127,7 @@ storeHW: goto success; storeW: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; value = regs->regs[reg]; @@ -1186,7 +1145,7 @@ storeDW: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; value = regs->regs[reg]; @@ -1243,6 +1202,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) union mips16e_instruction mips16inst, oldinst; unsigned int opcode; int extended = 0; + bool user = user_mode(regs); origpc = regs->cp0_epc; orig31 = regs->regs[31]; @@ -1344,7 +1304,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) goto sigbus; case MIPS16e_lh_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHW(addr, value, res); @@ -1355,7 +1315,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) break; case MIPS16e_lhu_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; LoadHWU(addr, value, res); @@ -1368,7 +1328,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) case MIPS16e_lw_op: case MIPS16e_lwpc_op: case MIPS16e_lwsp_op: - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadW(addr, value, res); @@ -1387,7 +1347,7 @@ static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; LoadWU(addr, value, res); @@ -1411,7 +1371,7 @@ loadDW: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; LoadDW(addr, value, res); @@ -1426,7 +1386,7 @@ loadDW: goto sigill; case MIPS16e_sh_op: - if (!access_ok(addr, 2)) + if (user && !access_ok(addr, 2)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); @@ -1439,7 +1399,7 @@ loadDW: case MIPS16e_sw_op: case MIPS16e_swsp_op: case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ - if (!access_ok(addr, 4)) + if (user && !access_ok(addr, 4)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); @@ -1459,7 +1419,7 @@ writeDW: * would blow up, so for now we don't handle unaligned 64-bit * instructions on 32-bit kernels. */ - if (!access_ok(addr, 8)) + if (user && !access_ok(addr, 8)) goto sigbus; MIPS16e_compute_return_epc(regs, &oldinst); @@ -1516,7 +1476,6 @@ asmlinkage void do_ade(struct pt_regs *regs) { enum ctx_state prev_state; unsigned int __user *pc; - mm_segment_t seg; prev_state = exception_enter(); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, @@ -1551,24 +1510,14 @@ asmlinkage void do_ade(struct pt_regs *regs) show_registers(regs); if (cpu_has_mmips) { - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); emulate_load_store_microMIPS(regs, (void __user *)regs->cp0_badvaddr); - set_fs(seg); - return; } if (cpu_has_mips16) { - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); emulate_load_store_MIPS16e(regs, (void __user *)regs->cp0_badvaddr); - set_fs(seg); - return; } @@ -1579,11 +1528,7 @@ asmlinkage void do_ade(struct pt_regs *regs) show_registers(regs); pc = (unsigned int __user *)exception_epc(regs); - seg = get_fs(); - if (!user_mode(regs)) - set_fs(KERNEL_DS); emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); - set_fs(seg); return; -- cgit v1.2.3 From 04324f44cb69a03fdc8f2ee52386a4fdf6a0043b Mon Sep 17 00:00:00 2001 From: Thomas Bogendoerfer Date: Thu, 1 Apr 2021 14:56:37 +0200 Subject: MIPS: Remove get_fs/set_fs All get_fs/set_fs calls in MIPS code are gone, so remove implementation of it. With the clear separation of user/kernel space access we no longer need the EVA special handling, so get rid of that, too. Signed-off-by: Thomas Bogendoerfer Reviewed-by: Christoph Hellwig --- arch/mips/Kconfig | 1 - arch/mips/include/asm/processor.h | 4 - arch/mips/include/asm/thread_info.h | 6 - arch/mips/include/asm/uaccess.h | 449 ++++++++++++------------------------ arch/mips/kernel/asm-offsets.c | 1 - arch/mips/kernel/process.c | 2 - arch/mips/kernel/scall32-o32.S | 4 +- arch/mips/lib/memcpy.S | 28 ++- arch/mips/lib/memset.S | 3 - arch/mips/lib/strncpy_user.S | 48 ++-- arch/mips/lib/strnlen_user.S | 44 +--- 11 files changed, 190 insertions(+), 400 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 7d509191168b..e9893cd34992 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -94,7 +94,6 @@ config MIPS select PERF_USE_VMALLOC select PCI_MSI_ARCH_FALLBACKS if PCI_MSI select RTC_LIB - select SET_FS select SYSCTL_EXCEPTION_TRACE select VIRT_TO_BUS select ARCH_HAS_ELFCORE_COMPAT diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 8e69e0a35ee9..0c3550c82b72 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -221,10 +221,6 @@ struct nlm_cop2_state { #define COP2_INIT #endif -typedef struct { - unsigned long seg; -} mm_segment_t; - #ifdef CONFIG_CPU_HAS_MSA # define ARCH_MIN_TASKALIGN 16 # define FPU_ALIGN __aligned(16) diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index e2c352da3877..0b17aaa9e012 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -28,11 +28,6 @@ struct thread_info { unsigned long tp_value; /* thread pointer */ __u32 cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ - mm_segment_t addr_limit; /* - * thread address space limit: - * 0x7fffffff for user-thead - * 0xffffffff for kernel-thread - */ struct pt_regs *regs; long syscall; /* syscall number */ }; @@ -46,7 +41,6 @@ struct thread_info { .flags = _TIF_FIXADE, \ .cpu = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ - .addr_limit = KERNEL_DS, \ } /* diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index c5cab0b8f902..91bc7fb7dca1 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -16,13 +16,6 @@ #include #include -/* - * The fs value determines whether argument validity checking should be - * performed or not. If get_fs() == USER_DS, checking is performed, with - * get_fs() == KERNEL_DS, checking is bypassed. - * - * For historical reasons, these macros are grossly misnamed. - */ #ifdef CONFIG_32BIT #define __UA_LIMIT 0x80000000UL @@ -49,38 +42,6 @@ extern u64 __ua_limit; #endif /* CONFIG_64BIT */ -/* - * USER_DS is a bitmask that has the bits set that may not be set in a valid - * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but - * the arithmetic we're doing only works if the limit is a power of two, so - * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid - * address in this range it's the process's problem, not ours :-) - */ - -#define KERNEL_DS ((mm_segment_t) { 0UL }) -#define USER_DS ((mm_segment_t) { __UA_LIMIT }) - -#define get_fs() (current_thread_info()->addr_limit) -#define set_fs(x) (current_thread_info()->addr_limit = (x)) - -#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) - -/* - * eva_kernel_access() - determine whether kernel memory access on an EVA system - * - * Determines whether memory accesses should be performed to kernel memory - * on a system using Extended Virtual Addressing (EVA). - * - * Return: true if a kernel memory access on an EVA system, else false. - */ -static inline bool eva_kernel_access(void) -{ - if (!IS_ENABLED(CONFIG_EVA)) - return false; - - return uaccess_kernel(); -} - /* * Is a address valid? This does a straightforward calculation rather * than tests. @@ -118,7 +79,7 @@ static inline bool eva_kernel_access(void) static inline int __access_ok(const void __user *p, unsigned long size) { unsigned long addr = (unsigned long)p; - return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0; + return (__UA_LIMIT & (addr | (addr + size) | __ua_size(size))) == 0; } #define access_ok(addr, size) \ @@ -215,43 +176,6 @@ static inline int __access_ok(const void __user *p, unsigned long size) struct __large_struct { unsigned long buf[100]; }; #define __m(x) (*(struct __large_struct __user *)(x)) -/* - * Yuck. We need two variants, one for 64bit operation and one - * for 32 bit mode and old iron. - */ -#ifndef CONFIG_EVA -#define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr) -#else -/* - * Kernel specific functions for EVA. We need to use normal load instructions - * to read data from kernel when operating in EVA mode. We use these macros to - * avoid redefining __get_user_asm for EVA. - */ -#undef _loadd -#undef _loadw -#undef _loadh -#undef _loadb -#ifdef CONFIG_32BIT -#define _loadd _loadw -#else -#define _loadd(reg, addr) "ld " reg ", " addr -#endif -#define _loadw(reg, addr) "lw " reg ", " addr -#define _loadh(reg, addr) "lh " reg ", " addr -#define _loadb(reg, addr) "lb " reg ", " addr - -#define __get_kernel_common(val, size, ptr) \ -do { \ - switch (size) { \ - case 1: __get_data_asm(val, _loadb, ptr); break; \ - case 2: __get_data_asm(val, _loadh, ptr); break; \ - case 4: __get_data_asm(val, _loadw, ptr); break; \ - case 8: __GET_DW(val, _loadd, ptr); break; \ - default: __get_user_unknown(); break; \ - } \ -} while (0) -#endif - #ifdef CONFIG_32BIT #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr) #endif @@ -276,12 +200,9 @@ do { \ ({ \ int __gu_err; \ \ - if (eva_kernel_access()) { \ - __get_kernel_common((x), size, ptr); \ - } else { \ - __chk_user_ptr(ptr); \ - __get_user_common((x), size, ptr); \ - } \ + __chk_user_ptr(ptr); \ + __get_user_common((x), size, ptr); \ + \ __gu_err; \ }) @@ -291,11 +212,8 @@ do { \ const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ \ might_fault(); \ - if (likely(access_ok( __gu_ptr, size))) { \ - if (eva_kernel_access()) \ - __get_kernel_common((x), size, __gu_ptr); \ - else \ - __get_user_common((x), size, __gu_ptr); \ + if (likely(access_ok(__gu_ptr, size))) { \ + __get_user_common((x), size, __gu_ptr); \ } else \ (x) = 0; \ \ @@ -361,46 +279,31 @@ do { \ do { \ int __gu_err; \ \ - __get_kernel_common(*((type *)(dst)), sizeof(type), \ - (__force type *)(src)); \ + switch (sizeof(type)) { \ + case 1: \ + __get_data_asm(*(type *)(dst), kernel_lb, \ + (__force type *)(src)); \ + break; \ + case 2: \ + __get_data_asm(*(type *)(dst), kernel_lh, \ + (__force type *)(src)); \ + break; \ + case 4: \ + __get_data_asm(*(type *)(dst), kernel_lw, \ + (__force type *)(src)); \ + break; \ + case 8: \ + __GET_DW(*(type *)(dst), kernel_ld, \ + (__force type *)(src)); \ + break; \ + default: \ + __get_user_unknown(); \ + break; \ + } \ if (unlikely(__gu_err)) \ goto err_label; \ } while (0) -#ifndef CONFIG_EVA -#define __put_kernel_common(ptr, size) __put_user_common(ptr, size) -#else -/* - * Kernel specific functions for EVA. We need to use normal load instructions - * to read data from kernel when operating in EVA mode. We use these macros to - * avoid redefining __get_data_asm for EVA. - */ -#undef _stored -#undef _storew -#undef _storeh -#undef _storeb -#ifdef CONFIG_32BIT -#define _stored _storew -#else -#define _stored(reg, addr) "ld " reg ", " addr -#endif - -#define _storew(reg, addr) "sw " reg ", " addr -#define _storeh(reg, addr) "sh " reg ", " addr -#define _storeb(reg, addr) "sb " reg ", " addr - -#define __put_kernel_common(ptr, size) \ -do { \ - switch (size) { \ - case 1: __put_data_asm(_storeb, ptr); break; \ - case 2: __put_data_asm(_storeh, ptr); break; \ - case 4: __put_data_asm(_storew, ptr); break; \ - case 8: __PUT_DW(_stored, ptr); break; \ - default: __put_user_unknown(); break; \ - } \ -} while(0) -#endif - /* * Yuck. We need two variants, one for 64bit operation and one * for 32 bit mode and old iron. @@ -429,12 +332,9 @@ do { \ int __pu_err = 0; \ \ __pu_val = (x); \ - if (eva_kernel_access()) { \ - __put_kernel_common(ptr, size); \ - } else { \ - __chk_user_ptr(ptr); \ - __put_user_common(ptr, size); \ - } \ + __chk_user_ptr(ptr); \ + __put_user_common(ptr, size); \ + \ __pu_err; \ }) @@ -445,11 +345,8 @@ do { \ int __pu_err = -EFAULT; \ \ might_fault(); \ - if (likely(access_ok( __pu_addr, size))) { \ - if (eva_kernel_access()) \ - __put_kernel_common(__pu_addr, size); \ - else \ - __put_user_common(__pu_addr, size); \ + if (likely(access_ok(__pu_addr, size))) { \ + __put_user_common(__pu_addr, size); \ } \ \ __pu_err; \ @@ -501,7 +398,23 @@ do { \ int __pu_err = 0; \ \ __pu_val = *(__force type *)(src); \ - __put_kernel_common(((type *)(dst)), sizeof(type)); \ + switch (sizeof(type)) { \ + case 1: \ + __put_data_asm(kernel_sb, (type *)(dst)); \ + break; \ + case 2: \ + __put_data_asm(kernel_sh, (type *)(dst)); \ + break; \ + case 4: \ + __put_data_asm(kernel_sw, (type *)(dst)) \ + break; \ + case 8: \ + __PUT_DW(kernel_sd, (type *)(dst)); \ + break; \ + default: \ + __put_user_unknown(); \ + break; \ + } \ if (unlikely(__pu_err)) \ goto err_label; \ } while (0) @@ -529,124 +442,85 @@ do { \ #define DADDI_SCRATCH "$0" #endif -extern size_t __copy_user(void *__to, const void *__from, size_t __n); - -#define __invoke_copy_from(func, to, from, n) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ - __MODULE_JAL(func) \ - ".set\tnoat\n\t" \ - __UA_ADDU "\t$1, %1, %2\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - -#define __invoke_copy_to(func, to, from, n) \ -({ \ - register void __user *__cu_to_r __asm__("$4"); \ - register const void *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - __MODULE_JAL(func) \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - -#define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to(__copy_user, to, from, n) - -#define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#ifndef CONFIG_EVA -#define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_to_user(to, from, n) \ - __invoke_copy_to(__copy_user, to, from, n) - -#define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#else - -/* EVA specific functions */ - -extern size_t __copy_from_user_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_to_user_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); - -/* - * Source or destination address is in userland. We need to go through - * the TLB - */ -#define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from(__copy_from_user_eva, to, from, n) - -#define __invoke_copy_to_user(to, from, n) \ - __invoke_copy_to(__copy_to_user_eva, to, from, n) - -#define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from(__copy_in_user_eva, to, from, n) - -#endif /* CONFIG_EVA */ +extern size_t __raw_copy_from_user(void *__to, const void *__from, size_t __n); +extern size_t __raw_copy_to_user(void *__to, const void *__from, size_t __n); +extern size_t __raw_copy_in_user(void *__to, const void *__from, size_t __n); static inline unsigned long -raw_copy_to_user(void __user *to, const void *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - if (eva_kernel_access()) - return __invoke_copy_to_kernel(to, from, n); - else - return __invoke_copy_to_user(to, from, n); + register void *__cu_to_r __asm__("$4"); + register const void __user *__cu_from_r __asm__("$5"); + register long __cu_len_r __asm__("$6"); + + __cu_to_r = to; + __cu_from_r = from; + __cu_len_r = n; + + __asm__ __volatile__( + ".set\tnoreorder\n\t" + __MODULE_JAL(__raw_copy_from_user) + ".set\tnoat\n\t" + __UA_ADDU "\t$1, %1, %2\n\t" + ".set\tat\n\t" + ".set\treorder" + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) + : + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", + DADDI_SCRATCH, "memory"); + + return __cu_len_r; } static inline unsigned long -raw_copy_from_user(void *to, const void __user *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - if (eva_kernel_access()) - return __invoke_copy_from_kernel(to, from, n); - else - return __invoke_copy_from_user(to, from, n); + register void __user *__cu_to_r __asm__("$4"); + register const void *__cu_from_r __asm__("$5"); + register long __cu_len_r __asm__("$6"); + + __cu_to_r = (to); + __cu_from_r = (from); + __cu_len_r = (n); + + __asm__ __volatile__( + __MODULE_JAL(__raw_copy_to_user) + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) + : + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", + DADDI_SCRATCH, "memory"); + + return __cu_len_r; } #define INLINE_COPY_FROM_USER #define INLINE_COPY_TO_USER static inline unsigned long -raw_copy_in_user(void __user*to, const void __user *from, unsigned long n) +raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) { - if (eva_kernel_access()) - return ___invoke_copy_in_kernel(to, from, n); - else - return ___invoke_copy_in_user(to, from, n); + register void __user *__cu_to_r __asm__("$4"); + register const void __user *__cu_from_r __asm__("$5"); + register long __cu_len_r __asm__("$6"); + + __cu_to_r = to; + __cu_from_r = from; + __cu_len_r = n; + + __asm__ __volatile__( + ".set\tnoreorder\n\t" + __MODULE_JAL(__raw_copy_in_user) + ".set\tnoat\n\t" + __UA_ADDU "\t$1, %1, %2\n\t" + ".set\tat\n\t" + ".set\treorder" + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) + : + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", + DADDI_SCRATCH, "memory"); + return __cu_len_r; } -extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size); extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size); /* @@ -672,28 +546,16 @@ __clear_user(void __user *addr, __kernel_size_t size) #define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31" #endif /* CONFIG_CPU_MICROMIPS */ - if (eva_kernel_access()) { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, $0\n\t" - "move\t$6, %2\n\t" - __MODULE_JAL(__bzero_kernel) - "move\t%0, $6" - : "=r" (res) - : "r" (addr), "r" (size) - : bzero_clobbers); - } else { - might_fault(); - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, $0\n\t" - "move\t$6, %2\n\t" - __MODULE_JAL(__bzero) - "move\t%0, $6" - : "=r" (res) - : "r" (addr), "r" (size) - : bzero_clobbers); - } + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, $0\n\t" + "move\t$6, %2\n\t" + __MODULE_JAL(__bzero) + "move\t%0, $6" + : "=r" (res) + : "r" (addr), "r" (size) + : bzero_clobbers); return res; } @@ -707,7 +569,6 @@ __clear_user(void __user *addr, __kernel_size_t size) __cl_size; \ }) -extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len); extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len); /* @@ -733,33 +594,23 @@ strncpy_from_user(char *__to, const char __user *__from, long __len) { long res; - if (eva_kernel_access()) { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - "move\t$6, %3\n\t" - __MODULE_JAL(__strncpy_from_kernel_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (__to), "r" (__from), "r" (__len) - : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); - } else { - might_fault(); - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - "move\t$6, %3\n\t" - __MODULE_JAL(__strncpy_from_user_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (__to), "r" (__from), "r" (__len) - : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); - } + if (!access_ok(__from, __len)) + return -EFAULT; + + might_fault(); + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" + "move\t$6, %3\n\t" + __MODULE_JAL(__strncpy_from_user_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (__to), "r" (__from), "r" (__len) + : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); return res; } -extern long __strnlen_kernel_asm(const char __user *s, long n); extern long __strnlen_user_asm(const char __user *s, long n); /* @@ -779,26 +630,18 @@ static inline long strnlen_user(const char __user *s, long n) { long res; + if (!access_ok(s, n)) + return -0; + might_fault(); - if (eva_kernel_access()) { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - __MODULE_JAL(__strnlen_kernel_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (s), "r" (n) - : "$2", "$4", "$5", __UA_t0, "$31"); - } else { - __asm__ __volatile__( - "move\t$4, %1\n\t" - "move\t$5, %2\n\t" - __MODULE_JAL(__strnlen_user_asm) - "move\t%0, $2" - : "=r" (res) - : "r" (s), "r" (n) - : "$2", "$4", "$5", __UA_t0, "$31"); - } + __asm__ __volatile__( + "move\t$4, %1\n\t" + "move\t$5, %2\n\t" + __MODULE_JAL(__strnlen_user_asm) + "move\t%0, $2" + : "=r" (res) + : "r" (s), "r" (n) + : "$2", "$4", "$5", __UA_t0, "$31"); return res; } diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index aebfda81120a..5735b2cd6f2a 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -98,7 +98,6 @@ void output_thread_info_defines(void) OFFSET(TI_TP_VALUE, thread_info, tp_value); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); - OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); OFFSET(TI_REGS, thread_info, regs); DEFINE(_THREAD_SIZE, THREAD_SIZE); DEFINE(_THREAD_MASK, THREAD_MASK); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 7efa0d1a4c2b..bff080db0294 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -124,7 +124,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, /* kernel thread */ unsigned long status = p->thread.cp0_status; memset(childregs, 0, sizeof(struct pt_regs)); - ti->addr_limit = KERNEL_DS; p->thread.reg16 = usp; /* fn */ p->thread.reg17 = kthread_arg; p->thread.reg29 = childksp; @@ -145,7 +144,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, childregs->regs[2] = 0; /* Child gets zero as return value */ if (usp) childregs->regs[29] = usp; - ti->addr_limit = USER_DS; p->thread.reg29 = (unsigned long) childregs; p->thread.reg31 = (unsigned long) ret_from_fork; diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index 84e8624e83a2..b1b2e106f711 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -48,10 +48,8 @@ NESTED(handle_sys, PT_SIZE, sp) * We intentionally keep the kernel stack a little below the top of * userspace so we don't have to do a slower byte accurate check here. */ - lw t5, TI_ADDR_LIMIT($28) addu t4, t0, 32 - and t5, t4 - bltz t5, bad_stack # -> sp is bad + bltz t4, bad_stack # -> sp is bad /* * Ok, copy the args from the luser stack to the kernel stack. diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index 88065ee433cd..e19fb98b5d38 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -661,8 +661,14 @@ LEAF(memcpy) /* a0=dst a1=src a2=len */ EXPORT_SYMBOL(memcpy) move v0, dst /* return value */ .L__memcpy: -FEXPORT(__copy_user) -EXPORT_SYMBOL(__copy_user) +#ifndef CONFIG_EVA +FEXPORT(__raw_copy_from_user) +EXPORT_SYMBOL(__raw_copy_from_user) +FEXPORT(__raw_copy_to_user) +EXPORT_SYMBOL(__raw_copy_to_user) +FEXPORT(__raw_copy_in_user) +EXPORT_SYMBOL(__raw_copy_in_user) +#endif /* Legacy Mode, user <-> user */ __BUILD_COPY_USER LEGACY_MODE USEROP USEROP @@ -681,10 +687,10 @@ EXPORT_SYMBOL(__copy_user) * __copy_from_user (EVA) */ -LEAF(__copy_from_user_eva) -EXPORT_SYMBOL(__copy_from_user_eva) +LEAF(__raw_copy_from_user) +EXPORT_SYMBOL(__raw_copy_from_user) __BUILD_COPY_USER EVA_MODE USEROP KERNELOP -END(__copy_from_user_eva) +END(__raw_copy_from_user) @@ -692,18 +698,18 @@ END(__copy_from_user_eva) * __copy_to_user (EVA) */ -LEAF(__copy_to_user_eva) -EXPORT_SYMBOL(__copy_to_user_eva) +LEAF(__raw_copy_to_user) +EXPORT_SYMBOL(__raw_copy_to_user) __BUILD_COPY_USER EVA_MODE KERNELOP USEROP -END(__copy_to_user_eva) +END(__raw_copy_to_user) /* * __copy_in_user (EVA) */ -LEAF(__copy_in_user_eva) -EXPORT_SYMBOL(__copy_in_user_eva) +LEAF(__raw_copy_in_user) +EXPORT_SYMBOL(__raw_copy_in_user) __BUILD_COPY_USER EVA_MODE USEROP USEROP -END(__copy_in_user_eva) +END(__raw_copy_in_user) #endif diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index d5449e8a3dfc..b0baa3c79fad 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -314,9 +314,6 @@ EXPORT_SYMBOL(memset) #ifndef CONFIG_EVA FEXPORT(__bzero) EXPORT_SYMBOL(__bzero) -#else -FEXPORT(__bzero_kernel) -EXPORT_SYMBOL(__bzero_kernel) #endif __BUILD_BZERO LEGACY_MODE diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S index acdff66bd5d2..556acf684d7b 100644 --- a/arch/mips/lib/strncpy_user.S +++ b/arch/mips/lib/strncpy_user.S @@ -29,19 +29,17 @@ * it happens at most some bytes of the exceptions handlers will be copied. */ - .macro __BUILD_STRNCPY_ASM func -LEAF(__strncpy_from_\func\()_asm) - LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? - and v0, a1 - bnez v0, .Lfault\@ - +LEAF(__strncpy_from_user_asm) move t0, zero move v1, a1 -.ifeqs "\func","kernel" -1: EX(lbu, v0, (v1), .Lfault\@) -.else -1: EX(lbue, v0, (v1), .Lfault\@) -.endif +#ifdef CONFIG_EVA + .set push + .set eva +1: EX(lbue, v0, (v1), .Lfault) + .set pop +#else +1: EX(lbu, v0, (v1), .Lfault) +#endif PTR_ADDIU v1, 1 R10KCBARRIER(0(ra)) sb v0, (a0) @@ -51,35 +49,17 @@ LEAF(__strncpy_from_\func\()_asm) bne t0, a2, 1b 2: PTR_ADDU v0, a1, t0 xor v0, a1 - bltz v0, .Lfault\@ + bltz v0, .Lfault move v0, t0 jr ra # return n - END(__strncpy_from_\func\()_asm) + END(__strncpy_from_user_asm) -.Lfault\@: +.Lfault: li v0, -EFAULT jr ra .section __ex_table,"a" - PTR 1b, .Lfault\@ + PTR 1b, .Lfault .previous - .endm - -#ifndef CONFIG_EVA - /* Set aliases */ - .global __strncpy_from_user_asm - .set __strncpy_from_user_asm, __strncpy_from_kernel_asm -EXPORT_SYMBOL(__strncpy_from_user_asm) -#endif - -__BUILD_STRNCPY_ASM kernel -EXPORT_SYMBOL(__strncpy_from_kernel_asm) - -#ifdef CONFIG_EVA - .set push - .set eva -__BUILD_STRNCPY_ASM user - .set pop -EXPORT_SYMBOL(__strncpy_from_user_asm) -#endif + EXPORT_SYMBOL(__strncpy_from_user_asm) diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S index e1bacf5a3abe..92b63f20ec05 100644 --- a/arch/mips/lib/strnlen_user.S +++ b/arch/mips/lib/strnlen_user.S @@ -26,12 +26,7 @@ * bytes. There's nothing secret there. On 64-bit accessing beyond * the maximum is a tad hairier ... */ - .macro __BUILD_STRNLEN_ASM func -LEAF(__strnlen_\func\()_asm) - LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok? - and v0, a0 - bnez v0, .Lfault\@ - +LEAF(__strnlen_user_asm) move v0, a0 PTR_ADDU a1, a0 # stop pointer 1: @@ -40,11 +35,14 @@ LEAF(__strnlen_\func\()_asm) li AT, 1 #endif beq v0, a1, 1f # limit reached? -.ifeqs "\func", "kernel" - EX(lb, t0, (v0), .Lfault\@) -.else - EX(lbe, t0, (v0), .Lfault\@) -.endif +#ifdef CONFIG_EVA + .set push + .set eva + EX(lbe, t0, (v0), .Lfault) + .set pop +#else + EX(lb, t0, (v0), .Lfault) +#endif .set noreorder bnez t0, 1b 1: @@ -57,28 +55,10 @@ LEAF(__strnlen_\func\()_asm) .set reorder PTR_SUBU v0, a0 jr ra - END(__strnlen_\func\()_asm) + END(__strnlen_user_asm) -.Lfault\@: +.Lfault: move v0, zero jr ra - .endm - -#ifndef CONFIG_EVA - /* Set aliases */ - .global __strnlen_user_asm - .set __strnlen_user_asm, __strnlen_kernel_asm -EXPORT_SYMBOL(__strnlen_user_asm) -#endif - -__BUILD_STRNLEN_ASM kernel -EXPORT_SYMBOL(__strnlen_kernel_asm) - -#ifdef CONFIG_EVA - .set push - .set eva -__BUILD_STRNLEN_ASM user - .set pop -EXPORT_SYMBOL(__strnlen_user_asm) -#endif + EXPORT_SYMBOL(__strnlen_user_asm) -- cgit v1.2.3 From 5e65c52ec716af6e8f51dacdaeb4a4d872249af1 Mon Sep 17 00:00:00 2001 From: Tiezhu Yang Date: Tue, 6 Apr 2021 17:25:12 +0800 Subject: MIPS: Loongson64: Use _CACHE_UNCACHED instead of _CACHE_UNCACHED_ACCELERATED Loongson64 processors have a writecombine issue that maybe failed to write back framebuffer used with ATI Radeon or AMD GPU at times, after commit 8a08e50cee66 ("drm: Permit video-buffers writecombine mapping for MIPS"), there exists some errors such as blurred screen and lockup, and so on. [ 60.958721] radeon 0000:03:00.0: ring 0 stalled for more than 10079msec [ 60.965315] radeon 0000:03:00.0: GPU lockup (current fence id 0x0000000000000112 last fence id 0x000000000000011d on ring 0) [ 60.976525] radeon 0000:03:00.0: ring 3 stalled for more than 10086msec [ 60.983156] radeon 0000:03:00.0: GPU lockup (current fence id 0x0000000000000374 last fence id 0x00000000000003a8 on ring 3) As discussed earlier [1], it might be better to disable writecombine on the CPU detection side because the root cause is unknown now. Actually, this patch is a temporary solution to just make it work well, it is not a proper and final solution, I hope someone will have a better solution to fix this issue in the future. [1] https://lore.kernel.org/patchwork/patch/1285542/ Signed-off-by: Tiezhu Yang Signed-off-by: Thomas Bogendoerfer --- arch/mips/kernel/cpu-probe.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index b71892064f27..0ef240adefb5 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -1752,7 +1752,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) set_isa(c, MIPS_CPU_ISA_M64R2); break; } - c->writecombine = _CACHE_UNCACHED_ACCELERATED; c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); break; @@ -1782,7 +1781,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) * register, we correct it here. */ c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; - c->writecombine = _CACHE_UNCACHED_ACCELERATED; c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ @@ -1793,7 +1791,6 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) set_elf_platform(cpu, "loongson3a"); set_isa(c, MIPS_CPU_ISA_M64R2); decode_cpucfg(c); - c->writecombine = _CACHE_UNCACHED_ACCELERATED; break; default: panic("Unknown Loongson Processor ID!"); -- cgit v1.2.3 From 7cba4128c2c6e9c67a819c5946ed8066c7306418 Mon Sep 17 00:00:00 2001 From: Thomas Bogendoerfer Date: Wed, 7 Apr 2021 01:03:48 +0200 Subject: MIPS: Fix new sparse warnings Commit 45deb5faeb9e ("MIPS: uaccess: Remove get_fs/set_fs call sites") caused a few new sparse warnings, fix them. Signed-off-by: Thomas Bogendoerfer --- arch/mips/kernel/access-helper.h | 7 ++++--- arch/mips/kernel/unaligned.c | 6 +++--- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/access-helper.h b/arch/mips/kernel/access-helper.h index dd5b502813b8..590388031503 100644 --- a/arch/mips/kernel/access-helper.h +++ b/arch/mips/kernel/access-helper.h @@ -4,15 +4,16 @@ static inline int __get_addr(unsigned long *a, unsigned long *p, bool user) { - return user ? get_user(*a, p) : get_kernel_nofault(*a, p); + return user ? get_user(*a, (unsigned long __user *)p) : + get_kernel_nofault(*a, p); } static inline int __get_inst16(u16 *i, u16 *p, bool user) { - return user ? get_user(*i, p) : get_kernel_nofault(*i, p); + return user ? get_user(*i, (u16 __user *)p) : get_kernel_nofault(*i, p); } static inline int __get_inst32(u32 *i, u32 *p, bool user) { - return user ? get_user(*i, p) : get_kernel_nofault(*i, p); + return user ? get_user(*i, (u32 __user *)p) : get_kernel_nofault(*i, p); } diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 1f3b20a8c377..df4b708c04a9 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -109,7 +109,7 @@ static u32 unaligned_action; extern void show_registers(struct pt_regs *regs); static void emulate_load_store_insn(struct pt_regs *regs, - void __user *addr, unsigned int __user *pc) + void __user *addr, unsigned int *pc) { unsigned long origpc, orig31, value; union mips_instruction insn; @@ -1475,7 +1475,7 @@ sigill: asmlinkage void do_ade(struct pt_regs *regs) { enum ctx_state prev_state; - unsigned int __user *pc; + unsigned int *pc; prev_state = exception_enter(); perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, @@ -1526,7 +1526,7 @@ asmlinkage void do_ade(struct pt_regs *regs) if (unaligned_action == UNALIGNED_ACTION_SHOW) show_registers(regs); - pc = (unsigned int __user *)exception_epc(regs); + pc = (unsigned int *)exception_epc(regs); emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); -- cgit v1.2.3 From 6ce48897ce476bed86fde28752c27596e8753277 Mon Sep 17 00:00:00 2001 From: Huacai Chen Date: Tue, 13 Apr 2021 16:57:23 +0800 Subject: MIPS: Loongson64: Add kexec/kdump support Add kexec/kdump support for Loongson64 by: 1, Provide Loongson-specific kexec functions: loongson_kexec_prepare(), loongson_kexec_shutdown() and loongson_crash_shutdown(); 2, Provide Loongson-specific assembly code in kexec_smp_wait(); To start Loongson64, The boot CPU needs 3 parameters: fw_arg0: the number of arguments in cmdline (i.e., argc). fw_arg1: structure holds cmdline such as "root=/dev/sda1 console=tty" (i.e., argv). fw_arg2: environment (i.e., envp, additional boot parameters from LEFI). Non-boot CPUs do not need one parameter as the IPI mailbox base address. They query their own IPI mailbox to get PC, SP and GP in a loopi, until the boot CPU brings them up. loongson_kexec_prepare(): Setup cmdline for kexec/kdump. The kexec/kdump cmdline comes from kexec's "append" option string. This structure will be parsed in fw_init_cmdline() of arch/mips/fw/lib/cmdline.c. Both image ->control_code_page and the cmdline need to be in a safe memory region (memory allocated by the old kernel may be corrupted by the new kernel). In order to maintain compatibility for the old firmware, the low 2MB is reserverd and safe for Loongson. So let KEXEC_CTRL_CODE and KEXEC_ARGV_ ADDR be here. LEFI parameters may be corrupted at runtime, so backup it at mips_reboot_setup(), and then restore it at loongson_kexec_shutdown() /loongson_crash_shutdown(). loongson_kexec_shutdown(): Wake up all present CPUs and let them go to reboot_code_buffer. Pass the kexec parameters to kexec_args. loongson_crash_shutdown(): Pass the kdump parameters to kexec_args. The assembly part in kexec_smp_wait provide a routine as BIOS does, in order to keep secondary CPUs in a querying loop. The layout of low 2MB memory in our design: 0x80000000, the first MB, the first 64K, Exception vectors 0x80010000, the first MB, the second 64K, STR (suspend) data 0x80020000, the first MB, the third and fourth 64K, UEFI HOB 0x80040000, the first MB, the fifth 64K, RT-Thread for SMC 0x80100000, the second MB, the first 64K, KEXEC code 0x80108000, the second MB, the second 64K, KEXEC data Cc: Eric Biederman Tested-by: Jinyang He Signed-off-by: Huacai Chen Signed-off-by: Jinyang He Signed-off-by: Youling Tang Signed-off-by: Thomas Bogendoerfer --- .../asm/mach-cavium-octeon/kernel-entry-init.h | 8 ++ .../asm/mach-loongson64/kernel-entry-init.h | 27 +++++ arch/mips/kernel/relocate_kernel.S | 9 +- arch/mips/loongson64/reset.c | 113 +++++++++++++++++++++ 4 files changed, 152 insertions(+), 5 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index c38b38ce5a3d..b071a7353ee1 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h @@ -157,4 +157,12 @@ octeon_main_processor: .macro smp_slave_setup .endm +#define USE_KEXEC_SMP_WAIT_FINAL + .macro kexec_smp_wait_final + .set push + .set noreorder + synci 0($0) + .set pop + .endm + #endif /* __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H */ diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h index e4d77f4f0fe3..13373c5144f8 100644 --- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h @@ -75,4 +75,31 @@ .set pop .endm +#define USE_KEXEC_SMP_WAIT_FINAL + .macro kexec_smp_wait_final + /* s0:prid s1:initfn */ + /* a0:base t1:cpuid t2:node t9:count */ + mfc0 t1, CP0_EBASE + andi t1, MIPS_EBASE_CPUNUM + dins a0, t1, 8, 2 /* insert core id*/ + dext t2, t1, 2, 2 + dins a0, t2, 44, 2 /* insert node id */ + mfc0 s0, CP0_PRID + andi s0, s0, (PRID_IMP_MASK | PRID_REV_MASK) + beq s0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R1), 1f + beq s0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3B_R2), 1f + b 2f /* Loongson-3A1000/3A2000/3A3000/3A4000 */ +1: dins a0, t2, 14, 2 /* Loongson-3B1000/3B1500 need bit 15~14 */ +2: li t9, 0x100 /* wait for init loop */ +3: addiu t9, -1 /* limit mailbox access */ + bnez t9, 3b + lw s1, 0x20(a0) /* check PC as an indicator */ + beqz s1, 2b + ld s1, 0x20(a0) /* get PC via mailbox reg0 */ + ld sp, 0x28(a0) /* get SP via mailbox reg1 */ + ld gp, 0x30(a0) /* get GP via mailbox reg2 */ + ld a1, 0x38(a0) + jr s1 /* jump to initial PC */ + .endm + #endif /* __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H */ diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index ac870893ba2d..f3c908abdbb8 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S @@ -11,6 +11,8 @@ #include #include +#include + LEAF(relocate_new_kernel) PTR_L a0, arg0 PTR_L a1, arg1 @@ -125,11 +127,8 @@ LEAF(kexec_smp_wait) 1: LONG_L s0, (t0) bne s0, zero,1b -#ifdef CONFIG_CPU_CAVIUM_OCTEON - .set push - .set noreorder - synci 0($0) - .set pop +#ifdef USE_KEXEC_SMP_WAIT_FINAL + kexec_smp_wait_final #else sync #endif diff --git a/arch/mips/loongson64/reset.c b/arch/mips/loongson64/reset.c index 3bb8a1ed9348..c97bfdc8c922 100644 --- a/arch/mips/loongson64/reset.c +++ b/arch/mips/loongson64/reset.c @@ -6,9 +6,14 @@ * Copyright (C) 2009 Lemote, Inc. * Author: Zhangjin Wu, wuzhangjin@gmail.com */ +#include +#include #include +#include #include +#include +#include #include #include @@ -47,12 +52,120 @@ static void loongson_halt(void) } } +#ifdef CONFIG_KEXEC + +/* 0X80000000~0X80200000 is safe */ +#define MAX_ARGS 64 +#define KEXEC_CTRL_CODE 0xFFFFFFFF80100000UL +#define KEXEC_ARGV_ADDR 0xFFFFFFFF80108000UL +#define KEXEC_ARGV_SIZE COMMAND_LINE_SIZE +#define KEXEC_ENVP_SIZE 4800 + +static int kexec_argc; +static int kdump_argc; +static void *kexec_argv; +static void *kdump_argv; +static void *kexec_envp; + +static int loongson_kexec_prepare(struct kimage *image) +{ + int i, argc = 0; + unsigned int *argv; + char *str, *ptr, *bootloader = "kexec"; + + /* argv at offset 0, argv[] at offset KEXEC_ARGV_SIZE/2 */ + if (image->type == KEXEC_TYPE_DEFAULT) + argv = (unsigned int *)kexec_argv; + else + argv = (unsigned int *)kdump_argv; + + argv[argc++] = (unsigned int)(KEXEC_ARGV_ADDR + KEXEC_ARGV_SIZE/2); + + for (i = 0; i < image->nr_segments; i++) { + if (!strncmp(bootloader, (char *)image->segment[i].buf, + strlen(bootloader))) { + /* + * convert command line string to array + * of parameters (as bootloader does). + */ + int offt; + str = (char *)argv + KEXEC_ARGV_SIZE/2; + memcpy(str, image->segment[i].buf, KEXEC_ARGV_SIZE/2); + ptr = strchr(str, ' '); + + while (ptr && (argc < MAX_ARGS)) { + *ptr = '\0'; + if (ptr[1] != ' ') { + offt = (int)(ptr - str + 1); + argv[argc] = KEXEC_ARGV_ADDR + KEXEC_ARGV_SIZE/2 + offt; + argc++; + } + ptr = strchr(ptr + 1, ' '); + } + break; + } + } + + if (image->type == KEXEC_TYPE_DEFAULT) + kexec_argc = argc; + else + kdump_argc = argc; + + /* kexec/kdump need a safe page to save reboot_code_buffer */ + image->control_code_page = virt_to_page((void *)KEXEC_CTRL_CODE); + + return 0; +} + +static void loongson_kexec_shutdown(void) +{ +#ifdef CONFIG_SMP + int cpu; + + /* All CPUs go to reboot_code_buffer */ + for_each_possible_cpu(cpu) + if (!cpu_online(cpu)) + cpu_device_up(get_cpu_device(cpu)); +#endif + kexec_args[0] = kexec_argc; + kexec_args[1] = fw_arg1; + kexec_args[2] = fw_arg2; + secondary_kexec_args[0] = TO_UNCAC(0x3ff01000); + memcpy((void *)fw_arg1, kexec_argv, KEXEC_ARGV_SIZE); + memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE); +} + +static void loongson_crash_shutdown(struct pt_regs *regs) +{ + default_machine_crash_shutdown(regs); + kexec_args[0] = kdump_argc; + kexec_args[1] = fw_arg1; + kexec_args[2] = fw_arg2; + secondary_kexec_args[0] = TO_UNCAC(0x3ff01000); + memcpy((void *)fw_arg1, kdump_argv, KEXEC_ARGV_SIZE); + memcpy((void *)fw_arg2, kexec_envp, KEXEC_ENVP_SIZE); +} + +#endif + static int __init mips_reboot_setup(void) { _machine_restart = loongson_restart; _machine_halt = loongson_halt; pm_power_off = loongson_poweroff; +#ifdef CONFIG_KEXEC + kexec_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL); + kdump_argv = kmalloc(KEXEC_ARGV_SIZE, GFP_KERNEL); + kexec_envp = kmalloc(KEXEC_ENVP_SIZE, GFP_KERNEL); + fw_arg1 = KEXEC_ARGV_ADDR; + memcpy(kexec_envp, (void *)fw_arg2, KEXEC_ENVP_SIZE); + + _machine_kexec_prepare = loongson_kexec_prepare; + _machine_kexec_shutdown = loongson_kexec_shutdown; + _machine_crash_shutdown = loongson_crash_shutdown; +#endif + return 0; } -- cgit v1.2.3 From 1b6bc35a01bd6b874165379255929b7badfdecb5 Mon Sep 17 00:00:00 2001 From: zhaoxiao Date: Tue, 20 Apr 2021 15:00:52 +0800 Subject: MIPS: Makefile: Replace -pg with CC_FLAGS_FTRACE This patch replaces the "open-coded" -pg compile flag with a CC_FLAGS_FTRACE makefile variable which architectures can override if a different option should be used for code generation. Signed-off-by: zhaoxiao Signed-off-by: Thomas Bogendoerfer --- arch/mips/boot/compressed/Makefile | 2 +- arch/mips/kernel/Makefile | 8 ++++---- arch/mips/vdso/Makefile | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index f93f72bcba97..e4b7839293e1 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -18,7 +18,7 @@ include $(srctree)/arch/mips/Kbuild.platforms BOOT_HEAP_SIZE := 0x400000 # Disable Function Tracer -KBUILD_CFLAGS := $(filter-out -pg, $(KBUILD_CFLAGS)) +KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE), $(KBUILD_CFLAGS)) KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS)) diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index b4a57f1de772..814b3da30501 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -17,10 +17,10 @@ obj-y += cpu-probe.o endif ifdef CONFIG_FUNCTION_TRACER -CFLAGS_REMOVE_ftrace.o = -pg -CFLAGS_REMOVE_early_printk.o = -pg -CFLAGS_REMOVE_perf_event.o = -pg -CFLAGS_REMOVE_perf_event_mipsxx.o = -pg +CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_early_printk.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_perf_event.o = $(CC_FLAGS_FTRACE) +CFLAGS_REMOVE_perf_event_mipsxx.o = $(CC_FLAGS_FTRACE) endif obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index 2131d3fd7333..1b2ea34c3d3b 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile @@ -46,7 +46,7 @@ CFLAGS_vgettimeofday-o32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -in CFLAGS_vgettimeofday-n32.o = -include $(srctree)/$(src)/config-n32-o32-env.c -include $(c-gettimeofday-y) endif -CFLAGS_REMOVE_vgettimeofday.o = -pg +CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) ifdef CONFIG_MIPS_DISABLE_VDSO ifndef CONFIG_MIPS_LD_CAN_LINK_VDSO @@ -60,7 +60,7 @@ ldflags-y := -Bsymbolic --no-undefined -soname=linux-vdso.so.1 \ $(filter -E%,$(KBUILD_CFLAGS)) -nostdlib -shared \ -G 0 --eh-frame-hdr --hash-style=sysv --build-id=sha1 -T -CFLAGS_REMOVE_vdso.o = -pg +CFLAGS_REMOVE_vdso.o = $(CC_FLAGS_FTRACE) GCOV_PROFILE := n UBSAN_SANITIZE := n -- cgit v1.2.3