Newer
Older
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/slab.h>
#include <linux/sched.h>
Ingo Molnar
committed
#include <linux/sched/idle.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/tick.h>
#include <linux/user-return-notifier.h>
#include <linux/dmi.h>
#include <linux/utsname.h>
#include <linux/stackprotector.h>
#include <linux/tick.h>
#include <linux/cpuidle.h>
#include <trace/events/power.h>
Frederic Weisbecker
committed
#include <linux/hw_breakpoint.h>
#include <asm/cpu.h>
#include <asm/apic.h>
#include <asm/syscalls.h>
#include <linux/uaccess.h>
#include <asm/mwait.h>
#include <asm/fpu/internal.h>
K.Prasad
committed
#include <asm/debugreg.h>
#include <asm/nmi.h>
Brian Gerst
committed
#include <asm/switch_to.h>
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
* no more per-task TSS's. The TSS size is kept cacheline-aligned
* so they are allowed to end up in the .data..cacheline_aligned
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
.x86_tss = {
.sp0 = TOP_OF_INIT_STACK,
#ifdef CONFIG_X86_32
.ss0 = __KERNEL_DS,
.ss1 = __KERNEL_CS,
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
#endif
},
#ifdef CONFIG_X86_32
/*
* Note that the .io_bitmap member must be extra-big. This is because
* the CPU will access an additional byte beyond the end of the IO
* permission bitmap. The extra byte must be all 1 bits, and must
* be within the limit.
*/
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
#endif
#ifdef CONFIG_X86_32
.SYSENTER_stack_canary = STACK_END_MAGIC,
#endif
};
EXPORT_PER_CPU_SYMBOL(cpu_tss);
DEFINE_PER_CPU(bool, need_tr_refresh);
EXPORT_PER_CPU_SYMBOL_GPL(need_tr_refresh);
/*
* this gets called so that we can store lazy state into memory and copy the
* current task into the new thread.
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
Ingo Molnar
committed
memcpy(dst, src, arch_task_struct_size);
#ifdef CONFIG_VM86
dst->thread.vm86 = NULL;
#endif
return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
/*
* Free current thread data structures etc..
*/
void exit_thread(struct task_struct *tsk)
struct thread_struct *t = &tsk->thread;
unsigned long *bp = t->io_bitmap_ptr;
struct fpu *fpu = &t->fpu;
struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
t->io_bitmap_ptr = NULL;
clear_thread_flag(TIF_IO_BITMAP);
/*
* Careful, clear this in the TSS too:
*/
memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
t->io_bitmap_max = 0;
put_cpu();
fpu__drop(fpu);
}
void flush_thread(void)
{
struct task_struct *tsk = current;
Frederic Weisbecker
committed
flush_ptrace_hw_breakpoint(tsk);
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
fpu__clear(&tsk->thread.fpu);
}
static void hard_disable_TSC(void)
{
}
void disable_TSC(void)
{
preempt_disable();
if (!test_and_set_thread_flag(TIF_NOTSC))
/*
* Must flip the CPU state synchronously with
* TIF_NOTSC in the current running context.
*/
hard_disable_TSC();
preempt_enable();
}
static void hard_enable_TSC(void)
{
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
}
static void enable_TSC(void)
{
preempt_disable();
if (test_and_clear_thread_flag(TIF_NOTSC))
/*
* Must flip the CPU state synchronously with
* TIF_NOTSC in the current running context.
*/
hard_enable_TSC();
preempt_enable();
}
int get_tsc_mode(unsigned long adr)
{
unsigned int val;
if (test_thread_flag(TIF_NOTSC))
val = PR_TSC_SIGSEGV;
else
val = PR_TSC_ENABLE;
return put_user(val, (unsigned int __user *)adr);
}
int set_tsc_mode(unsigned int val)
{
if (val == PR_TSC_SIGSEGV)
disable_TSC();
else if (val == PR_TSC_ENABLE)
enable_TSC();
else
return -EINVAL;
return 0;
}
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
struct tss_struct *tss)
{
struct thread_struct *prev, *next;
prev = &prev_p->thread;
next = &next_p->thread;
if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
unsigned long debugctl = get_debugctlmsr();
debugctl &= ~DEBUGCTLMSR_BTF;
if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
debugctl |= DEBUGCTLMSR_BTF;
update_debugctlmsr(debugctl);
}
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
/* prev and next are different */
if (test_tsk_thread_flag(next_p, TIF_NOTSC))
hard_disable_TSC();
else
hard_enable_TSC();
}
if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
/*
* Copy the relevant range of the IO bitmap.
* Normally this is 128 bytes or less:
*/
memcpy(tss->io_bitmap, next->io_bitmap_ptr,
max(prev->io_bitmap_max, next->io_bitmap_max));
/*
* Make sure that the TSS limit is correct for the CPU
* to notice the IO bitmap.
*/
refresh_TR();
} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
/*
* Clear any possible leftover bits:
*/
memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
}
propagate_user_return_notify(prev_p, next_p);
/*
* Idle related variables and functions
*/
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
static void (*x86_idle)(void);
#ifndef CONFIG_SMP
static inline void play_dead(void)
{
BUG();
}
#endif
tsc_verify_tsc_adjust(false);
void arch_cpu_idle_dead(void)
{
play_dead();
}
/*
* Called from the generic idle code.
*/
void arch_cpu_idle(void)
{
* We use this if we don't have any better idle routine..
void __cpuidle default_idle(void)
trace_cpu_idle_rcuidle(1, smp_processor_id());
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(default_idle);
#endif
#ifdef CONFIG_XEN
bool xen_set_default_idle(void)
return ret;
}
void stop_this_cpu(void *dummy)
{
local_irq_disable();
/*
* Remove this CPU:
*/
set_cpu_online(smp_processor_id(), false);
disable_local_APIC();
mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
for (;;)
halt();
* AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
* states (local apic timer and TSC stop).
static void amd_e400_idle(void)
/*
* We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
* gets set after static_cpu_has() places have been converted via
* alternatives.
*/
if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
default_idle();
return;
/*
* The switch back from broadcast mode needs to be called with
* interrupts disabled.
*/
local_irq_disable();
tick_broadcast_exit();
local_irq_enable();
/*
* Intel Core2 and older machines prefer MWAIT over HALT for C1.
* We can't rely on cpuidle installing MWAIT, because it will not load
* on systems that support only C1 -- so the boot default must be MWAIT.
*
* Some AMD machines are the opposite, they depend on using HALT.
*
* So for default C1, which is used during boot until cpuidle loads,
* use MWAIT-C1 on Intel HW that has it, else use HALT.
*/
static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
{
if (c->x86_vendor != X86_VENDOR_INTEL)
return 0;
Peter Zijlstra
committed
if (!cpu_has(c, X86_FEATURE_MWAIT) || static_cpu_has_bug(X86_BUG_MONITOR))
return 0;
return 1;
}
/*
* MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
* with interrupts enabled and no flags, which is backwards compatible with the
* original MWAIT implementation.
*/
static __cpuidle void mwait_idle(void)
{
if (!current_set_polling_and_test()) {
trace_cpu_idle_rcuidle(1, smp_processor_id());
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
clflush((void *)¤t_thread_info()->flags);
__monitor((void *)¤t_thread_info()->flags, 0, 0);
if (!need_resched())
__sti_mwait(0, 0);
else
local_irq_enable();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
}
__current_clr_polling();
}
void select_idle_routine(const struct cpuinfo_x86 *c)
if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
if (x86_idle || boot_option_idle_override == IDLE_POLL)
if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
pr_info("using AMD E400 aware idle routine\n");
x86_idle = amd_e400_idle;
} else if (prefer_mwait_c1_over_halt(c)) {
pr_info("using mwait in idle threads\n");
x86_idle = mwait_idle;
void amd_e400_c1e_apic_setup(void)
if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
local_irq_disable();
tick_broadcast_force();
local_irq_enable();
}
void __init arch_post_acpi_subsys_init(void)
{
u32 lo, hi;
if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
return;
/*
* AMD E400 detection needs to happen after ACPI has been enabled. If
* the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
* MSR_K8_INT_PENDING_MSG.
*/
rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
return;
boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halt in AMD C1E");
pr_info("System has AMD C1E enabled\n");
}
static int __init idle_setup(char *str)
{
if (!str)
return -EINVAL;
pr_info("using polling idle threads\n");
boot_option_idle_override = IDLE_POLL;
} else if (!strcmp(str, "halt")) {
/*
* When the boot option of idle=halt is added, halt is
* forced to be used for CPU idle. In such case CPU C2/C3
* won't be used again.
* To continue to load the CPU idle driver, don't touch
* the boot_option_idle_override.
*/
boot_option_idle_override = IDLE_HALT;
} else if (!strcmp(str, "nomwait")) {
/*
* If the boot option of "idle=nomwait" is added,
* it means that mwait will be disabled for CPU C2/C3
* states. In such case it won't touch the variable
* of boot_option_idle_override.
*/
boot_option_idle_override = IDLE_NOMWAIT;
return -1;
return 0;
}
early_param("idle", idle_setup);
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_int() % 8192;
return sp & ~0xf;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
{
return randomize_page(mm->brk, 0x02000000);
/*
* Return saved PC of a blocked thread.
* What is this good for? it will be always the scheduler or ret_from_fork.
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct inactive_task_frame *frame =
(struct inactive_task_frame *) READ_ONCE(tsk->thread.sp);
return READ_ONCE_NOCHECK(frame->ret_addr);
}
/*
* Called from fs/proc with a reference on @p to find the function
* which called into schedule(). This needs to be done carefully
* because the task might wake up and we might look at a stack
* changing under us.
*/
unsigned long get_wchan(struct task_struct *p)
{
unsigned long start, bottom, top, sp, fp, ip, ret = 0;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
if (!try_get_task_stack(p))
return 0;
start = (unsigned long)task_stack_page(p);
if (!start)
/*
* Layout of the stack page:
*
* ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
* PADDING
* ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
* stack
*
* The tasks stack pointer points at the location where the
* framepointer is stored. The data on the stack is:
* ... IP FP ... IP FP
*
* We need to read FP and IP, so we need to adjust the upper
* bound by another unsigned long.
*/
top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
top -= 2 * sizeof(unsigned long);
sp = READ_ONCE(p->thread.sp);
if (sp < bottom || sp > top)
Brian Gerst
committed
fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
do {
if (fp < bottom || fp > top)
ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
if (!in_sched_functions(ip)) {
ret = ip;
goto out;
}
fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
} while (count++ < 16 && p->state != TASK_RUNNING);
out:
put_task_stack(p);
return ret;