Skip to content

Commit 7006fe2

Browse files
committed
Merge tag 'x86-urgent-2024-08-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: - Fix 32-bit PTI for real. pti_clone_entry_text() is called twice, once before initcalls so that initcalls can use the user-mode helper and then again after text is set read only. Setting read only on 32-bit might break up the PMD mapping, which makes the second invocation of pti_clone_entry_text() find the mappings out of sync and failing. Allow the second call to split the existing PMDs in the user mapping and synchronize with the kernel mapping. - Don't make acpi_mp_wake_mailbox read-only after init as the mail box must be writable in the case that CPU hotplug operations happen after boot. Otherwise the attempt to start a CPU crashes with a write to read only memory. - Add a missing sanity check in mtrr_save_state() to ensure that the fixed MTRR MSRs are supported. Otherwise mtrr_save_state() ends up in a #GP, which is fixed up, but the WARN_ON() can bring systems down when panic on warn is set. * tag 'x86-urgent-2024-08-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mtrr: Check if fixed MTRRs exist before saving them x86/paravirt: Fix incorrect virt spinlock setting on bare metal x86/acpi: Remove __ro_after_init from acpi_mp_wake_mailbox x86/mm: Fix PTI for i386 some more
2 parents 7270e93 + 919f18f commit 7006fe2

File tree

5 files changed

+41
-27
lines changed

5 files changed

+41
-27
lines changed

arch/x86/include/asm/qspinlock.h

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -66,13 +66,15 @@ static inline bool vcpu_is_preempted(long cpu)
6666

6767
#ifdef CONFIG_PARAVIRT
6868
/*
69-
* virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
69+
* virt_spin_lock_key - disables by default the virt_spin_lock() hijack.
7070
*
71-
* Native (and PV wanting native due to vCPU pinning) should disable this key.
72-
* It is done in this backwards fashion to only have a single direction change,
73-
* which removes ordering between native_pv_spin_init() and HV setup.
71+
* Native (and PV wanting native due to vCPU pinning) should keep this key
72+
* disabled. Native does not touch the key.
73+
*
74+
* When in a guest then native_pv_lock_init() enables the key first and
75+
* KVM/XEN might conditionally disable it later in the boot process again.
7476
*/
75-
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
77+
DECLARE_STATIC_KEY_FALSE(virt_spin_lock_key);
7678

7779
/*
7880
* Shortcut for the queued_spin_lock_slowpath() function that allows

arch/x86/kernel/acpi/madt_wakeup.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
static u64 acpi_mp_wake_mailbox_paddr __ro_after_init;
2020

2121
/* Virtual address of the Multiprocessor Wakeup Structure mailbox */
22-
static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox __ro_after_init;
22+
static struct acpi_madt_multiproc_wakeup_mailbox *acpi_mp_wake_mailbox;
2323

2424
static u64 acpi_mp_pgd __ro_after_init;
2525
static u64 acpi_mp_reset_vector_paddr __ro_after_init;

arch/x86/kernel/cpu/mtrr/mtrr.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -609,7 +609,7 @@ void mtrr_save_state(void)
609609
{
610610
int first_cpu;
611611

612-
if (!mtrr_enabled())
612+
if (!mtrr_enabled() || !mtrr_state.have_fixed)
613613
return;
614614

615615
first_cpu = cpumask_first(cpu_online_mask);

arch/x86/kernel/paravirt.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,13 +51,12 @@ DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text);
5151
DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
5252
#endif
5353

54-
DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
54+
DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
5555

5656
void __init native_pv_lock_init(void)
5757
{
58-
if (IS_ENABLED(CONFIG_PARAVIRT_SPINLOCKS) &&
59-
!boot_cpu_has(X86_FEATURE_HYPERVISOR))
60-
static_branch_disable(&virt_spin_lock_key);
58+
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
59+
static_branch_enable(&virt_spin_lock_key);
6160
}
6261

6362
static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)

arch/x86/mm/pti.c

Lines changed: 29 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
241241
*
242242
* Returns a pointer to a PTE on success, or NULL on failure.
243243
*/
244-
static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
244+
static pte_t *pti_user_pagetable_walk_pte(unsigned long address, bool late_text)
245245
{
246246
gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
247247
pmd_t *pmd;
@@ -251,10 +251,15 @@ static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
251251
if (!pmd)
252252
return NULL;
253253

254-
/* We can't do anything sensible if we hit a large mapping. */
254+
/* Large PMD mapping found */
255255
if (pmd_leaf(*pmd)) {
256-
WARN_ON(1);
257-
return NULL;
256+
/* Clear the PMD if we hit a large mapping from the first round */
257+
if (late_text) {
258+
set_pmd(pmd, __pmd(0));
259+
} else {
260+
WARN_ON_ONCE(1);
261+
return NULL;
262+
}
258263
}
259264

260265
if (pmd_none(*pmd)) {
@@ -283,7 +288,7 @@ static void __init pti_setup_vsyscall(void)
283288
if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
284289
return;
285290

286-
target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
291+
target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR, false);
287292
if (WARN_ON(!target_pte))
288293
return;
289294

@@ -301,7 +306,7 @@ enum pti_clone_level {
301306

302307
static void
303308
pti_clone_pgtable(unsigned long start, unsigned long end,
304-
enum pti_clone_level level)
309+
enum pti_clone_level level, bool late_text)
305310
{
306311
unsigned long addr;
307312

@@ -390,7 +395,7 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
390395
return;
391396

392397
/* Allocate PTE in the user page-table */
393-
target_pte = pti_user_pagetable_walk_pte(addr);
398+
target_pte = pti_user_pagetable_walk_pte(addr, late_text);
394399
if (WARN_ON(!target_pte))
395400
return;
396401

@@ -452,7 +457,7 @@ static void __init pti_clone_user_shared(void)
452457
phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
453458
pte_t *target_pte;
454459

455-
target_pte = pti_user_pagetable_walk_pte(va);
460+
target_pte = pti_user_pagetable_walk_pte(va, false);
456461
if (WARN_ON(!target_pte))
457462
return;
458463

@@ -475,7 +480,7 @@ static void __init pti_clone_user_shared(void)
475480
start = CPU_ENTRY_AREA_BASE;
476481
end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
477482

478-
pti_clone_pgtable(start, end, PTI_CLONE_PMD);
483+
pti_clone_pgtable(start, end, PTI_CLONE_PMD, false);
479484
}
480485
#endif /* CONFIG_X86_64 */
481486

@@ -492,11 +497,11 @@ static void __init pti_setup_espfix64(void)
492497
/*
493498
* Clone the populated PMDs of the entry text and force it RO.
494499
*/
495-
static void pti_clone_entry_text(void)
500+
static void pti_clone_entry_text(bool late)
496501
{
497502
pti_clone_pgtable((unsigned long) __entry_text_start,
498503
(unsigned long) __entry_text_end,
499-
PTI_LEVEL_KERNEL_IMAGE);
504+
PTI_LEVEL_KERNEL_IMAGE, late);
500505
}
501506

502507
/*
@@ -571,7 +576,7 @@ static void pti_clone_kernel_text(void)
571576
* pti_set_kernel_image_nonglobal() did to clear the
572577
* global bit.
573578
*/
574-
pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
579+
pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE, false);
575580

576581
/*
577582
* pti_clone_pgtable() will set the global bit in any PMDs
@@ -638,8 +643,15 @@ void __init pti_init(void)
638643

639644
/* Undo all global bits from the init pagetables in head_64.S: */
640645
pti_set_kernel_image_nonglobal();
646+
641647
/* Replace some of the global bits just for shared entry text: */
642-
pti_clone_entry_text();
648+
/*
649+
* This is very early in boot. Device and Late initcalls can do
650+
* modprobe before free_initmem() and mark_readonly(). This
651+
* pti_clone_entry_text() allows those user-mode-helpers to function,
652+
* but notably the text is still RW.
653+
*/
654+
pti_clone_entry_text(false);
643655
pti_setup_espfix64();
644656
pti_setup_vsyscall();
645657
}
@@ -656,10 +668,11 @@ void pti_finalize(void)
656668
if (!boot_cpu_has(X86_FEATURE_PTI))
657669
return;
658670
/*
659-
* We need to clone everything (again) that maps parts of the
660-
* kernel image.
671+
* This is after free_initmem() (all initcalls are done) and we've done
672+
* mark_readonly(). Text is now NX which might've split some PMDs
673+
* relative to the early clone.
661674
*/
662-
pti_clone_entry_text();
675+
pti_clone_entry_text(true);
663676
pti_clone_kernel_text();
664677

665678
debug_checkwx_user();

0 commit comments

Comments
 (0)