Commit 27c9844a authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior
Browse files

Merge tag 'v5.14' into linux-5.14.y-rt

Linux 5.14
parents 2f5188cd 7d2a07b7
......@@ -6955,7 +6955,7 @@ F: include/uapi/linux/mdio.h
F: include/uapi/linux/mii.h
 
EXFAT FILE SYSTEM
M: Namjae Jeon <namjae.jeon@samsung.com>
M: Namjae Jeon <linkinjeon@kernel.org>
M: Sungjong Seo <sj1557.seo@samsung.com>
L: linux-fsdevel@vger.kernel.org
S: Maintained
......
......@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION =
NAME = Opossums on Parade
# *DOCUMENTATION*
......
......@@ -160,10 +160,11 @@ extern unsigned long vectors_base;
/*
* Physical start and end address of the kernel sections. These addresses are
* 2MB-aligned to match the section mappings placed over the kernel.
* 2MB-aligned to match the section mappings placed over the kernel. We use
* u64 so that LPAE mappings beyond the 32bit limit will work out as well.
*/
extern u32 kernel_sec_start;
extern u32 kernel_sec_end;
extern u64 kernel_sec_start;
extern u64 kernel_sec_end;
/*
* Physical vs virtual RAM address space conversion. These are
......
......@@ -49,7 +49,8 @@
/*
* This needs to be assigned at runtime when the linker symbols are
* resolved.
* resolved. These are unsigned 64bit really, but in this assembly code
* We store them as 32bit.
*/
.pushsection .data
.align 2
......@@ -57,7 +58,9 @@
.globl kernel_sec_end
kernel_sec_start:
.long 0
.long 0
kernel_sec_end:
.long 0
.long 0
.popsection
......@@ -250,7 +253,11 @@ __create_page_tables:
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
ldr r6, =(_end - 1)
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
str r8, [r5] @ Save physical start of kernel
#ifdef CONFIG_CPU_ENDIAN_BE8
str r8, [r5, #4] @ Save physical start of kernel (BE)
#else
str r8, [r5] @ Save physical start of kernel (LE)
#endif
orr r3, r8, r7 @ Add the MMU flags
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
1: str r3, [r0], #1 << PMD_ORDER
......@@ -259,7 +266,11 @@ __create_page_tables:
bls 1b
eor r3, r3, r7 @ Remove the MMU flags
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
str r3, [r5] @ Save physical end of kernel
#ifdef CONFIG_CPU_ENDIAN_BE8
str r3, [r5, #4] @ Save physical end of kernel (BE)
#else
str r3, [r5] @ Save physical end of kernel (LE)
#endif
#ifdef CONFIG_XIP_KERNEL
/*
......
......@@ -1608,6 +1608,13 @@ static void __init early_paging_init(const struct machine_desc *mdesc)
if (offset == 0)
return;
/*
* Offset the kernel section physical offsets so that the kernel
* mapping will work out later on.
*/
kernel_sec_start += offset;
kernel_sec_end += offset;
/*
* Get the address of the remap function in the 1:1 identity
* mapping setup by the early page table assembly code. We
......@@ -1716,7 +1723,7 @@ void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
pr_debug("physical kernel sections: 0x%08x-0x%08x\n",
pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
kernel_sec_start, kernel_sec_end);
prepare_page_table();
......
......@@ -29,7 +29,7 @@ ENTRY(lpae_pgtables_remap_asm)
ldr r6, =(_end - 1)
add r7, r2, #0x1000
add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
add r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER)
add r7, r7, #KERNEL_OFFSET >> (SECTION_SHIFT - L2_ORDER)
1: ldrd r4, r5, [r7]
adds r4, r4, r0
adc r5, r5, r1
......
......@@ -157,6 +157,7 @@ config ARM64
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
select HAVE_ARCH_PFN_VALID
select HAVE_ARCH_PREL32_RELOCATIONS
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
......
......@@ -41,6 +41,7 @@ void tag_clear_highpage(struct page *to);
typedef struct page *pgtable_t;
int pfn_valid(unsigned long pfn);
int pfn_is_map_memory(unsigned long pfn);
#include <asm/memory.h>
......
......@@ -219,6 +219,43 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
free_area_init(max_zone_pfns);
}
int pfn_valid(unsigned long pfn)
{
phys_addr_t addr = PFN_PHYS(pfn);
struct mem_section *ms;
/*
* Ensure the upper PAGE_SHIFT bits are clear in the
* pfn. Else it might lead to false positives when
* some of the upper bits are set, but the lower bits
* match a valid pfn.
*/
if (PHYS_PFN(addr) != pfn)
return 0;
if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
return 0;
ms = __pfn_to_section(pfn);
if (!valid_section(ms))
return 0;
/*
* ZONE_DEVICE memory does not have the memblock entries.
* memblock_is_map_memory() check for ZONE_DEVICE based
* addresses will always fail. Even the normal hotplugged
* memory will never have MEMBLOCK_NOMAP flag set in their
* memblock entries. Skip memblock search for all non early
* memory sections covering all of hotplug memory including
* both normal and ZONE_DEVICE based.
*/
if (!early_section(ms))
return pfn_section_valid(ms, pfn);
return memblock_is_memory(addr);
}
EXPORT_SYMBOL(pfn_valid);
int pfn_is_map_memory(unsigned long pfn)
{
phys_addr_t addr = PFN_PHYS(pfn);
......
......@@ -8,19 +8,4 @@ extern void * memset(void *, int, size_t);
#define __HAVE_ARCH_MEMCPY
void * memcpy(void * dest,const void *src,size_t count);
#define __HAVE_ARCH_STRLEN
extern size_t strlen(const char *s);
#define __HAVE_ARCH_STRCPY
extern char *strcpy(char *dest, const char *src);
#define __HAVE_ARCH_STRNCPY
extern char *strncpy(char *dest, const char *src, size_t count);
#define __HAVE_ARCH_STRCAT
extern char *strcat(char *dest, const char *src);
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, size_t);
#endif
......@@ -17,10 +17,6 @@
#include <linux/string.h>
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
#include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8);
......
......@@ -3,7 +3,7 @@
# Makefile for parisc-specific library files
#
lib-y := lusercopy.o bitops.o checksum.o io.o memcpy.o \
ucmpdi2.o delay.o string.o
lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
ucmpdi2.o delay.o
obj-y := iomap.o
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <linux/types.h>
#include <asm/string.h>
#define OPSIZ (BITS_PER_LONG/8)
typedef unsigned long op_t;
void *
memset (void *dstpp, int sc, size_t len)
{
unsigned int c = sc;
long int dstp = (long int) dstpp;
if (len >= 8)
{
size_t xlen;
op_t cccc;
cccc = (unsigned char) c;
cccc |= cccc << 8;
cccc |= cccc << 16;
if (OPSIZ > 4)
/* Do the shift in two steps to avoid warning if long has 32 bits. */
cccc |= (cccc << 16) << 16;
/* There are at least some bytes to set.
No need to test for LEN == 0 in this alignment loop. */
while (dstp % OPSIZ != 0)
{
((unsigned char *) dstp)[0] = c;
dstp += 1;
len -= 1;
}
/* Write 8 `op_t' per iteration until less than 8 `op_t' remain. */
xlen = len / (OPSIZ * 8);
while (xlen > 0)
{
((op_t *) dstp)[0] = cccc;
((op_t *) dstp)[1] = cccc;
((op_t *) dstp)[2] = cccc;
((op_t *) dstp)[3] = cccc;
((op_t *) dstp)[4] = cccc;
((op_t *) dstp)[5] = cccc;
((op_t *) dstp)[6] = cccc;
((op_t *) dstp)[7] = cccc;
dstp += 8 * OPSIZ;
xlen -= 1;
}
len %= OPSIZ * 8;
/* Write 1 `op_t' per iteration until less than OPSIZ bytes remain. */
xlen = len / OPSIZ;
while (xlen > 0)
{
((op_t *) dstp)[0] = cccc;
dstp += OPSIZ;
xlen -= 1;
}
len %= OPSIZ;
}
/* Write the last few bytes. */
while (len > 0)
{
((unsigned char *) dstp)[0] = c;
dstp += 1;
len -= 1;
}
return dstpp;
}
// SPDX-License-Identifier: GPL-2.0
/*
* PA-RISC assembly string functions
*
* Copyright (C) 2019 Helge Deller <deller@gmx.de>
*/
#include <asm/assembly.h>
#include <linux/linkage.h>
.section .text.hot
.level PA_ASM_LEVEL
t0 = r20
t1 = r21
t2 = r22
ENTRY_CFI(strlen, frame=0,no_calls)
or,COND(<>) arg0,r0,ret0
b,l,n .Lstrlen_null_ptr,r0
depwi 0,31,2,ret0
cmpb,COND(<>) arg0,ret0,.Lstrlen_not_aligned
ldw,ma 4(ret0),t0
cmpib,tr 0,r0,.Lstrlen_loop
uxor,nbz r0,t0,r0
.Lstrlen_not_aligned:
uaddcm arg0,ret0,t1
shladd t1,3,r0,t1
mtsar t1
depwi -1,%sar,32,t0
uxor,nbz r0,t0,r0
.Lstrlen_loop:
b,l,n .Lstrlen_end_loop,r0
ldw,ma 4(ret0),t0
cmpib,tr 0,r0,.Lstrlen_loop
uxor,nbz r0,t0,r0
.Lstrlen_end_loop:
extrw,u,<> t0,7,8,r0
addib,tr,n -3,ret0,.Lstrlen_out
extrw,u,<> t0,15,8,r0
addib,tr,n -2,ret0,.Lstrlen_out
extrw,u,<> t0,23,8,r0
addi -1,ret0,ret0
.Lstrlen_out:
bv r0(rp)
uaddcm ret0,arg0,ret0
.Lstrlen_null_ptr:
bv,n r0(rp)
ENDPROC_CFI(strlen)
ENTRY_CFI(strcpy, frame=0,no_calls)
ldb 0(arg1),t0
stb t0,0(arg0)
ldo 0(arg0),ret0
ldo 1(arg1),t1
cmpb,= r0,t0,2f
ldo 1(arg0),t2
1: ldb 0(t1),arg1
stb arg1,0(t2)
ldo 1(t1),t1
cmpb,<> r0,arg1,1b
ldo 1(t2),t2
2: bv,n r0(rp)
ENDPROC_CFI(strcpy)
ENTRY_CFI(strncpy, frame=0,no_calls)
ldb 0(arg1),t0
stb t0,0(arg0)
ldo 1(arg1),t1
ldo 0(arg0),ret0
cmpb,= r0,t0,2f
ldo 1(arg0),arg1
1: ldo -1(arg2),arg2
cmpb,COND(=),n r0,arg2,2f
ldb 0(t1),arg0
stb arg0,0(arg1)
ldo 1(t1),t1
cmpb,<> r0,arg0,1b
ldo 1(arg1),arg1
2: bv,n r0(rp)
ENDPROC_CFI(strncpy)
ENTRY_CFI(strcat, frame=0,no_calls)
ldb 0(arg0),t0
cmpb,= t0,r0,2f
ldo 0(arg0),ret0
ldo 1(arg0),arg0
1: ldb 0(arg0),t1
cmpb,<>,n r0,t1,1b
ldo 1(arg0),arg0
2: ldb 0(arg1),t2
stb t2,0(arg0)
ldo 1(arg0),arg0
ldb 0(arg1),t0
cmpb,<> r0,t0,2b
ldo 1(arg1),arg1
bv,n r0(rp)
ENDPROC_CFI(strcat)
ENTRY_CFI(memset, frame=0,no_calls)
copy arg0,ret0
cmpb,COND(=) r0,arg0,4f
copy arg0,t2
cmpb,COND(=) r0,arg2,4f
ldo -1(arg2),arg3
subi -1,arg3,t0
subi 0,t0,t1
cmpiclr,COND(>=) 0,t1,arg2
ldo -1(t1),arg2
extru arg2,31,2,arg0
2: stb arg1,0(t2)
ldo 1(t2),t2
addib,>= -1,arg0,2b
ldo -1(arg3),arg3
cmpiclr,COND(<=) 4,arg2,r0
b,l,n 4f,r0
#ifdef CONFIG_64BIT
depd,* r0,63,2,arg2
#else
depw r0,31,2,arg2
#endif
ldo 1(t2),t2
3: stb arg1,-1(t2)
stb arg1,0(t2)
stb arg1,1(t2)
stb arg1,2(t2)
addib,COND(>) -4,arg2,3b
ldo 4(t2),t2
4: bv,n r0(rp)
ENDPROC_CFI(memset)
.end
......@@ -812,7 +812,6 @@ __start_interrupts:
* syscall register convention is in Documentation/powerpc/syscall64-abi.rst
*/
EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
1:
/* SCV 0 */
mr r9,r13
GET_PACA(r13)
......@@ -842,10 +841,12 @@ EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
b system_call_vectored_sigill
#endif
.endr
2:
EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
SOFT_MASK_TABLE(1b, 2b) // Treat scv vectors as soft-masked, see comment above.
// Treat scv vectors as soft-masked, see comment above.
// Use absolute values rather than labels here, so they don't get relocated,
// because this code runs unrelocated.
SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000)
#ifdef CONFIG_RELOCATABLE
TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
......
......@@ -98,7 +98,7 @@ config PPC_BOOK3S_64
select PPC_HAVE_PMU_SUPPORT
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
select ARCH_ENABLE_PMD_SPLIT_PTLOCK
select ARCH_ENABLE_SPLIT_PMD_PTLOCK
select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_HUGETLBFS
select ARCH_SUPPORTS_NUMA_BALANCING
......
......@@ -14,6 +14,10 @@ / {
model = "Microchip PolarFire-SoC Icicle Kit";
compatible = "microchip,mpfs-icicle-kit";
aliases {
ethernet0 = &emac1;
};
chosen {
stdout-path = &serial0;
};
......
......@@ -317,7 +317,7 @@ emac1: ethernet@20112000 {
reg = <0x0 0x20112000 0x0 0x2000>;
interrupt-parent = <&plic>;
interrupts = <70 71 72 73>;
mac-address = [00 00 00 00 00 00];
local-mac-address = [00 00 00 00 00 00];
clocks = <&clkcfg 5>, <&clkcfg 2>;
status = "disabled";
clock-names = "pclk", "hclk";
......
......@@ -10,6 +10,7 @@
#include <asm/ptrace.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
#include <asm/switch_to.h>
#include <linux/audit.h>
#include <linux/ptrace.h>
#include <linux/elf.h>
......@@ -56,6 +57,9 @@ static int riscv_fpr_get(struct task_struct *target,
{
struct __riscv_d_ext_state *fstate = &target->thread.fstate;
if (target == current)
fstate_save(current, task_pt_regs(current));
membuf_write(&to, fstate, offsetof(struct __riscv_d_ext_state, fcsr));
membuf_store(&to, fstate->fcsr);
return membuf_zero(&to, 4); // explicitly pad
......
......@@ -5,9 +5,8 @@
* Early support for invoking 32-bit EFI services from a 64-bit kernel.
*
* Because this thunking occurs before ExitBootServices() we have to
* restore the firmware's 32-bit GDT before we make EFI service calls,
* since the firmware's 32-bit IDT is still currently installed and it
* needs to be able to service interrupts.
* restore the firmware's 32-bit GDT and IDT before we make EFI service
* calls.
*
* On the plus side, we don't have to worry about mangling 64-bit
* addresses into 32-bits because we're executing with an identity
......@@ -39,7 +38,7 @@ SYM_FUNC_START(__efi64_thunk)
/*
* Convert x86-64 ABI params to i386 ABI
*/
subq $32, %rsp
subq $64, %rsp
movl %esi, 0x0(%rsp)
movl %edx, 0x4(%rsp)
movl %ecx, 0x8(%rsp)
......@@ -49,14 +48,19 @@ SYM_FUNC_START(__efi64_thunk)
leaq 0x14(%rsp), %rbx
sgdt (%rbx)
addq $16, %rbx
sidt (%rbx)
/*
* Switch to gdt with 32-bit segments. This is the firmware GDT
* that was installed when the kernel started executing. This
* pointer was saved at the EFI stub entry point in head_64.S.
* Switch to IDT and GDT with 32-bit segments. This is the firmware GDT
* and IDT that was installed when the kernel started executing. The
* pointers were saved at the EFI stub entry point in head_64.S.
*
* Pass the saved DS selector to the 32-bit code, and use far return to
* restore the saved CS selector.
*/
leaq efi32_boot_idt(%rip), %rax
lidt (%rax)
leaq efi32_boot_gdt(%rip), %rax
lgdt (%rax)
......@@ -67,7 +71,7 @@ SYM_FUNC_START(__efi64_thunk)
pushq %rax
lretq
1: addq $32, %rsp
1: addq $64, %rsp
movq %rdi, %rax
pop %rbx
......@@ -128,10 +132,13 @@ SYM_FUNC_START_LOCAL(efi_enter32)
/*
* Some firmware will return with interrupts enabled. Be sure to
* disable them before we switch GDTs.
* disable them before we switch GDTs and IDTs.
*/
cli
lidtl (%ebx)
subl $16, %ebx
lgdtl (%ebx)
movl %cr4, %eax
......@@ -166,6 +173,11 @@ SYM_DATA_START(efi32_boot_gdt)
.quad 0
SYM_DATA_END(efi32_boot_gdt)
SYM_DATA_START(efi32_boot_idt)
.word 0
.quad 0
SYM_DATA_END(efi32_boot_idt)
SYM_DATA_START(efi32_boot_cs)
.word 0
SYM_DATA_END(efi32_boot_cs)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment