2 * From Coreboot file of same name
4 * Copyright (C) 2014 Google, Inc
6 * SPDX-License-Identifier: GPL-2.0
9 #ifndef _ARCH_ASM_LAPIC_H
10 #define _ARCH_ASM_LAPIC_H
13 #include <asm/lapic_def.h>
15 #include <asm/processor.h>
17 /* See if I need to initialize the local apic */
18 #if CONFIG_SMP || CONFIG_IOAPIC
24 static inline __attribute__((always_inline))
25 unsigned long lapic_read(unsigned long reg)
27 return readl(LAPIC_DEFAULT_BASE + reg);
30 static inline __attribute__((always_inline))
31 void lapic_write(unsigned long reg, unsigned long val)
33 writel(val, LAPIC_DEFAULT_BASE + reg);
36 static inline __attribute__((always_inline)) void lapic_wait_icr_idle(void)
38 do { } while (lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY);
41 static inline void enable_lapic(void)
45 msr = msr_read(LAPIC_BASE_MSR);
47 msr.lo |= LAPIC_BASE_MSR_ENABLE;
48 msr.lo &= ~LAPIC_BASE_MSR_ADDR_MASK;
49 msr.lo |= LAPIC_DEFAULT_BASE;
50 msr_write(LAPIC_BASE_MSR, msr);
53 static inline void disable_lapic(void)
57 msr = msr_read(LAPIC_BASE_MSR);
59 msr_write(LAPIC_BASE_MSR, msr);
62 static inline __attribute__((always_inline)) unsigned long lapicid(void)
64 return lapic_read(LAPIC_ID) >> 24;
67 #if !CONFIG_AP_IN_SIPI_WAIT
68 /* If we need to go back to sipi wait, we use the long non-inlined version of
69 * this function in lapic_cpu_init.c
71 static inline __attribute__((always_inline)) void stop_this_cpu(void)
73 /* Called by an AP when it is ready to halt and wait for a new task */
78 void stop_this_cpu(void);
81 #define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
84 struct __xchg_dummy { unsigned long a[100]; };
85 #define __xg(x) ((struct __xchg_dummy *)(x))
88 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
89 * Note 2: xchg has side effect, so that attribute volatile is necessary,
90 * but generally the primitive is invalid, *ptr is output argument. --ANK
92 static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
97 __asm__ __volatile__("xchgb %b0,%1"
99 : "m" (*__xg(ptr)), "0" (x)
103 __asm__ __volatile__("xchgw %w0,%1"
105 : "m" (*__xg(ptr)), "0" (x)
109 __asm__ __volatile__("xchgl %0,%1"
111 : "m" (*__xg(ptr)), "0" (x)
119 static inline void lapic_write_atomic(unsigned long reg, unsigned long v)
121 (void)xchg((volatile unsigned long *)(LAPIC_DEFAULT_BASE + reg), v);
126 # define FORCE_READ_AROUND_WRITE 0
127 # define lapic_read_around(x) lapic_read(x)
128 # define lapic_write_around(x, y) lapic_write((x), (y))
130 # define FORCE_READ_AROUND_WRITE 1
131 # define lapic_read_around(x) lapic_read(x)
132 # define lapic_write_around(x, y) lapic_write_atomic((x), (y))
135 static inline int lapic_remote_read(int apicid, int reg, unsigned long *pvalue)
138 unsigned long status;
140 lapic_wait_icr_idle();
141 lapic_write_around(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
142 lapic_write_around(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
145 status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
146 } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
149 if (status == LAPIC_ICR_RR_VALID) {
150 *pvalue = lapic_read(LAPIC_RRR);
157 void lapic_setup(void);
161 int start_cpu(struct device *cpu);
162 #endif /* CONFIG_SMP */
167 * struct x86_cpu_priv - Information about a single CPU
169 * @apic_id: Advanced Programmable Interrupt Controller Identifier, which is
170 * just a number representing the CPU core
172 * TODO: Move this to driver model once lifecycle is understood
174 struct x86_cpu_priv {