Newer
Older
if (get_isa16_mode(regs->cp0_epc)) {
unsigned short mmop[2] = { 0 };
if (unlikely(get_user(mmop[0], epc) < 0))
status = SIGSEGV;
if (unlikely(get_user(mmop[1], epc) < 0))
status = SIGSEGV;
opcode = (mmop[0] << 16) | mmop[1];
if (status < 0)
status = simulate_rdhwr_mm(regs, opcode);
} else {
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
if (!cpu_has_llsc && status < 0)
status = simulate_llsc(regs, opcode);
if (status < 0)
status = simulate_rdhwr_normal(regs, opcode);
if (status < 0)
status = simulate_sync(regs, opcode);
}
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
regs->regs[31] = old31;
force_sig(status, current);
}
out:
exception_exit(prev_state);
/*
* MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
* emulated more than some threshold number of instructions, force migration to
* a "CPU" that has FP support.
*/
static void mt_ase_fp_affinity(void)
{
#ifdef CONFIG_MIPS_MT_FPAFF
if (mt_fpemul_threshold > 0 &&
((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
/*
* If there's no FPU present, or if the application has already
* restricted the allowed set to exclude any CPUs with FPUs,
* we'll skip the procedure.
*/
if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
cpumask_t tmask;
current->thread.user_cpus_allowed
= current->cpus_allowed;
cpus_and(tmask, current->cpus_allowed,
mt_fpu_cpumask);
}
}
#endif /* CONFIG_MIPS_MT_FPAFF */
}
/*
* No lock; only written during early bootup by CPU 0.
*/
static RAW_NOTIFIER_HEAD(cu2_chain);
int __ref register_cu2_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&cu2_chain, nb);
}
int cu2_notifier_call_chain(unsigned long val, void *v)
{
return raw_notifier_call_chain(&cu2_chain, val, v);
}
static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
static int enable_restore_fp_context(int msa)
{
int err, was_fpu_owner;
if (!used_math()) {
/* First time FP context user. */
err = init_fpu();
if (msa && !err)
enable_msa();
if (!err)
set_used_math();
return err;
}
/*
* This task has formerly used the FP context.
*
* If this thread has no live MSA vector context then we can simply
* restore the scalar FP context. If it has live MSA vector context
* (that is, it has or may have used MSA since last performing a
* function call) then we'll need to restore the vector context. This
* applies even if we're currently only executing a scalar FP
* instruction. This is because if we were to later execute an MSA
* instruction then we'd either have to:
*
* - Restore the vector context & clobber any registers modified by
* scalar FP instructions between now & then.
*
* or
*
* - Not restore the vector context & lose the most significant bits
* of all vector registers.
*
* Neither of those options is acceptable. We cannot restore the least
* significant bits of the registers now & only restore the most
* significant bits later because the most significant bits of any
* vector registers whose aliased FP register is modified now will have
* been zeroed. We'd have no way to know that when restoring the vector
* context & thus may load an outdated value for the most significant
* bits of a vector register.
*/
if (!msa && !thread_msa_context_live())
return own_fpu(1);
/*
* This task is using or has previously used MSA. Thus we require
* that Status.FR == 1.
*/
was_fpu_owner = is_fpu_owner();
err = own_fpu(0);
if (err)
return err;
enable_msa();
write_msa_csr(current->thread.fpu.msacsr);
set_thread_flag(TIF_USEDMSA);
/*
* If this is the first time that the task is using MSA and it has
* previously used scalar FP in this time slice then we already nave
* FP context which we shouldn't clobber.
*/
if (!test_and_set_thread_flag(TIF_MSA_CTX_LIVE) && was_fpu_owner)
return 0;
/* We need to restore the vector context. */
restore_msa(current);
return 0;
}
unsigned int __user *epc;
unsigned long old_epc, old31;
unsigned long __maybe_unused flags;
if (cpid != 2)
die_if_kernel("do_cpu invoked from kernel context!", regs);
epc = (unsigned int __user *)exception_epc(regs);
old_epc = regs->cp0_epc;
old31 = regs->regs[31];
opcode = 0;
status = -1;
if (unlikely(compute_return_epc(regs) < 0))
if (get_isa16_mode(regs->cp0_epc)) {
unsigned short mmop[2] = { 0 };
if (unlikely(get_user(mmop[0], epc) < 0))
status = SIGSEGV;
if (unlikely(get_user(mmop[1], epc) < 0))
status = SIGSEGV;
opcode = (mmop[0] << 16) | mmop[1];
if (status < 0)
status = simulate_rdhwr_mm(regs, opcode);
} else {
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
if (!cpu_has_llsc && status < 0)
status = simulate_llsc(regs, opcode);
if (status < 0)
status = simulate_rdhwr_normal(regs, opcode);
}
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
regs->regs[31] = old31;
force_sig(status, current);
}
case 3:
/*
* Old (MIPS I and MIPS II) processors will set this code
* for COP1X opcode instructions that replaced the original
* COP3 space. We don't limit COP1 space instructions in
* the emulator according to the CPU ISA, so we want to
* treat COP1X instructions consistently regardless of which
* code the CPU chose. Therefore we redirect this trap to
* the FP emulator too.
*
* Then some newer FPU-less processors use this code
* erroneously too, so they are covered by this choice
* as well.
*/
if (raw_cpu_has_fpu)
break;
/* Fall through. */
err = enable_restore_fp_context(0);
if (!raw_cpu_has_fpu || err) {
void __user *fault_addr = NULL;
sig = fpu_emulator_cop1Handler(regs,
¤t->thread.fpu,
0, &fault_addr);
if (!process_fpemu_return(sig, fault_addr) && !err)
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
out:
exception_exit(prev_state);
asmlinkage void do_msa_fpe(struct pt_regs *regs)
{
enum ctx_state prev_state;
prev_state = exception_enter();
die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
force_sig(SIGFPE, current);
exception_exit(prev_state);
}
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
asmlinkage void do_msa(struct pt_regs *regs)
{
enum ctx_state prev_state;
int err;
prev_state = exception_enter();
if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
force_sig(SIGILL, current);
goto out;
}
die_if_kernel("do_msa invoked from kernel context!", regs);
err = enable_restore_fp_context(1);
if (err)
force_sig(SIGILL, current);
out:
exception_exit(prev_state);
}
enum ctx_state prev_state;
prev_state = exception_enter();
/*
* Called with interrupts disabled.
*/
asmlinkage void do_watch(struct pt_regs *regs)
{
* Clear WP (bit 22) bit of cause register so we don't loop
* forever.
cause = read_c0_cause();
cause &= ~(1 << 22);
write_c0_cause(cause);
/*
* If the current thread has the watch registers loaded, save
* their values and send SIGTRAP. Otherwise another thread
* left the registers set, clear them and continue.
*/
if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
mips_read_watch_registers();
force_sig(SIGTRAP, current);
mips_clear_watch_registers();
local_irq_enable();
}
}
asmlinkage void do_mcheck(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
if (multi_match) {
printk("Pagemask: %0x\n", read_c0_pagemask());
printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
printk("\n");
dump_tlb_all();
}
show_code((unsigned int __user *) regs->cp0_epc);
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)
*/
panic("Caught Machine Check exception - %scaused by multiple "
"matching entries in the TLB.",
(multi_match) ? "" : "not ");
asmlinkage void do_mt(struct pt_regs *regs)
{
int subcode;
subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
>> VPECONTROL_EXCPT_SHIFT;
switch (subcode) {
case 0:
printk(KERN_DEBUG "Thread Underflow\n");
printk(KERN_DEBUG "Thread Overflow\n");
printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
printk(KERN_DEBUG "Gating Storage Exception\n");
printk(KERN_DEBUG "YIELD Scheduler Exception\n");
printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
subcode);
break;
}
die_if_kernel("MIPS MT Thread exception in kernel", regs);
force_sig(SIGILL, current);
}
asmlinkage void do_dsp(struct pt_regs *regs)
{
if (cpu_has_dsp)
panic("Unexpected DSP exception");
force_sig(SIGILL, current);
}
asmlinkage void do_reserved(struct pt_regs *regs)
{
/*
* Game over - no way to handle this if it ever occurs. Most probably
* caused by a new unknown cpu type or after another deadly
* hard/software error.
*/
show_regs(regs);
panic("Caught reserved exception %ld - should not happen.",
(regs->cp0_cause & 0x7f) >> 2);
}
static int __initdata l1parity = 1;
static int __init nol1parity(char *s)
{
l1parity = 0;
return 1;
}
__setup("nol1par", nol1parity);
static int __initdata l2parity = 1;
static int __init nol2parity(char *s)
{
l2parity = 0;
return 1;
}
__setup("nol2par", nol2parity);
/*
* Some MIPS CPUs can enable/disable for cache parity detection, but do
* it different ways.
*/
static inline void parity_protection_init(void)
{
switch (current_cpu_type()) {
case CPU_74K:
case CPU_1004K:
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
{
#define ERRCTL_PE 0x80000000
#define ERRCTL_L2P 0x00800000
unsigned long errctl;
unsigned int l1parity_present, l2parity_present;
errctl = read_c0_ecc();
errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
/* probe L1 parity support */
write_c0_ecc(errctl | ERRCTL_PE);
back_to_back_c0_hazard();
l1parity_present = (read_c0_ecc() & ERRCTL_PE);
/* probe L2 parity support */
write_c0_ecc(errctl|ERRCTL_L2P);
back_to_back_c0_hazard();
l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
if (l1parity_present && l2parity_present) {
if (l1parity)
errctl |= ERRCTL_PE;
if (l1parity ^ l2parity)
errctl |= ERRCTL_L2P;
} else if (l1parity_present) {
if (l1parity)
errctl |= ERRCTL_PE;
} else if (l2parity_present) {
if (l2parity)
errctl |= ERRCTL_L2P;
} else {
/* No parity available */
}
printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
write_c0_ecc(errctl);
back_to_back_c0_hazard();
errctl = read_c0_ecc();
printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
if (l1parity_present)
printk(KERN_INFO "Cache parity protection %sabled\n",
(errctl & ERRCTL_PE) ? "en" : "dis");
if (l2parity_present) {
if (l1parity_present && l1parity)
errctl ^= ERRCTL_L2P;
printk(KERN_INFO "L2 cache parity protection %sabled\n",
(errctl & ERRCTL_L2P) ? "en" : "dis");
}
}
break;
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
/* Set the PE bit (bit 31) in the c0_errctl register. */
printk(KERN_INFO "Cache parity protection %sabled\n",
(read_c0_ecc() & 0x80000000) ? "en" : "dis");
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
break;
case CPU_20KC:
case CPU_25KF:
/* Clear the DE bit (bit 16) in the c0_status register. */
printk(KERN_INFO "Enable cache parity protection for "
"MIPS 20KC/25KF CPUs.\n");
clear_c0_status(ST0_DE);
break;
default:
break;
}
}
asmlinkage void cache_parity_error(void)
{
const int field = 2 * sizeof(unsigned long);
unsigned int reg_val;
/* For the moment, report the problem and hang. */
printk("Cache error exception:\n");
printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
reg_val = read_c0_cacheerr();
printk("c0_cacheerr == %08x\n", reg_val);
printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
reg_val & (1<<30) ? "secondary" : "primary",
reg_val & (1<<31) ? "data" : "insn");
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
if (cpu_has_mips_r2 &&
((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) {
pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
reg_val & (1<<29) ? "ED " : "",
reg_val & (1<<28) ? "ET " : "",
reg_val & (1<<27) ? "ES " : "",
reg_val & (1<<26) ? "EE " : "",
reg_val & (1<<25) ? "EB " : "",
reg_val & (1<<24) ? "EI " : "",
reg_val & (1<<23) ? "E1 " : "",
reg_val & (1<<22) ? "E0 " : "");
} else {
pr_err("Error bits: %s%s%s%s%s%s%s\n",
reg_val & (1<<29) ? "ED " : "",
reg_val & (1<<28) ? "ET " : "",
reg_val & (1<<26) ? "EE " : "",
reg_val & (1<<25) ? "EB " : "",
reg_val & (1<<24) ? "EI " : "",
reg_val & (1<<23) ? "E1 " : "",
reg_val & (1<<22) ? "E0 " : "");
}
#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
if (reg_val & (1<<22))
printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
if (reg_val & (1<<23))
printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
#endif
panic("Can't handle the cache error!");
}
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
asmlinkage void do_ftlb(void)
{
const int field = 2 * sizeof(unsigned long);
unsigned int reg_val;
/* For the moment, report the problem and hang. */
if (cpu_has_mips_r2 &&
((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) {
pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
read_c0_ecc());
pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
reg_val = read_c0_cacheerr();
pr_err("c0_cacheerr == %08x\n", reg_val);
if ((reg_val & 0xc0000000) == 0xc0000000) {
pr_err("Decoded c0_cacheerr: FTLB parity error\n");
} else {
pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
reg_val & (1<<30) ? "secondary" : "primary",
reg_val & (1<<31) ? "data" : "insn");
}
} else {
pr_err("FTLB error exception\n");
}
/* Just print the cacheerr bits for now */
cache_parity_error();
}
/*
* SDBBP EJTAG debug exception handler.
* We skip the instruction and return to the next instruction.
*/
void ejtag_exception_handler(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
unsigned long depc, old_epc, old_ra;
printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
if (debug & 0x80000000) {
/*
* In branch delay slot.
* We cheat a little bit here and use EPC to calculate the
* debug return address (DEPC). EPC is restored after the
* calculation.
*/
old_epc = regs->cp0_epc;
old_ra = regs->regs[31];
compute_return_epc(regs);
regs->regs[31] = old_ra;
} else
depc += 4;
write_c0_depc(depc);
#if 0
printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
write_c0_debug(debug | 0x100);
#endif
}
/*
* NMI exception handler.
* No lock; only written during early bootup by CPU 0.
static RAW_NOTIFIER_HEAD(nmi_chain);
int register_nmi_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&nmi_chain, nb);
}
void __noreturn nmi_exception_handler(struct pt_regs *regs)
char str[100];
snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
smp_processor_id(), regs->cp0_epc);
regs->cp0_epc = read_c0_errorepc();
die(str, regs);
#define VECTORSPACING 0x100 /* for EI/VI mode */
unsigned long ebase;
unsigned long vi_handlers[64];
void __init *set_except_vector(int n, void *addr)
#ifdef CONFIG_CPU_MICROMIPS
/*
* Only the TLB handlers are cache aligned with an even
* address. All other handlers are on an odd address and
* require no modification. Otherwise, MIPS32 mode will
* be entered when handling any TLB exceptions. That
* would be bad...since we must stay in microMIPS mode.
*/
if (!(handler & 0x1))
handler |= 1;
#endif
old_handler = xchg(&exception_handlers[n], handler);
#ifdef CONFIG_CPU_MICROMIPS
unsigned long jump_mask = ~((1 << 27) - 1);
#else
unsigned long jump_mask = ~((1 << 28) - 1);
u32 *buf = (u32 *)(ebase + 0x200);
unsigned int k0 = 26;
if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
uasm_i_j(&buf, handler & ~jump_mask);
uasm_i_nop(&buf);
} else {
UASM_i_LA(&buf, k0, handler);
uasm_i_jr(&buf, k0);
uasm_i_nop(&buf);
}
local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
}
return (void *)old_handler;
}
static void do_default_vi(void)
{
show_regs(get_irq_regs());
panic("Caught unexpected vectored interrupt.");
}
static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
{
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
int srssets = current_cpu_data.srsets;
unsigned char *b;
BUG_ON(!cpu_has_veic && !cpu_has_vint);
if (addr == NULL) {
handler = (unsigned long) do_default_vi;
srs = 0;
handler = (unsigned long) addr;
vi_handlers[n] = handler;
b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
panic("Shadow register set %d not supported", srs);
if (cpu_has_veic) {
if (board_bind_eic_interrupt)
board_bind_eic_interrupt(n, srs);
/* SRSMap is only defined if shadow sets are implemented */
change_c0_srsmap(0xf << n*4, srs << n*4);
}
if (srs == 0) {
/*
* If no shadow set is selected then use the default handler
* that does normal register saving and standard interrupt exit
*/
extern char except_vec_vi, except_vec_vi_lui;
extern char except_vec_vi_ori, except_vec_vi_end;
extern char rollback_except_vec_vi;
char *vec_start = using_rollback_handler() ?
&rollback_except_vec_vi : &except_vec_vi;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* We need to provide the SMTC vectored interrupt handler
* not only with the address of the handler, but with the
* Status.IM bit to be masked before going there.
*/
extern char except_vec_vi_mori;
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
const int mori_offset = &except_vec_vi_mori - vec_start + 2;
#else
const int mori_offset = &except_vec_vi_mori - vec_start;
#endif /* CONFIG_MIPS_MT_SMTC */
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
const int lui_offset = &except_vec_vi_lui - vec_start + 2;
const int ori_offset = &except_vec_vi_ori - vec_start + 2;
#else
const int lui_offset = &except_vec_vi_lui - vec_start;
const int ori_offset = &except_vec_vi_ori - vec_start;
#endif
const int handler_len = &except_vec_vi_end - vec_start;
if (handler_len > VECTORSPACING) {
/*
* Sigh... panicing won't help as the console
* is probably not configured :(
*/
panic("VECTORSPACING too small");
}
set_handler(((unsigned long)b - ebase), vec_start,
#ifdef CONFIG_CPU_MICROMIPS
(handler_len - 1));
#else
handler_len);
#endif
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
h = (u16 *)(b + mori_offset);
*h = (0x100 << n);
#endif /* CONFIG_MIPS_MT_SMTC */
h = (u16 *)(b + lui_offset);
*h = (handler >> 16) & 0xffff;
h = (u16 *)(b + ori_offset);
*h = (handler & 0xffff);
local_flush_icache_range((unsigned long)b,
(unsigned long)(b+handler_len));
}
else {
/*
* In other cases jump directly to the interrupt handler. It
* is the handler's responsibility to save registers if required
* (eg hi/lo) and return from the exception using "eret".
*/
u32 insn;
h = (u16 *)b;
/* j handler */
#ifdef CONFIG_CPU_MICROMIPS
insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
#else
insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
#endif
h[0] = (insn >> 16) & 0xffff;
h[1] = insn & 0xffff;
h[2] = 0;
h[3] = 0;
local_flush_icache_range((unsigned long)b,
(unsigned long)(b+8));
void *set_vi_handler(int n, vi_handler_t addr)
/*
* Timer interrupt
*/
int cp0_compare_irq;
EXPORT_SYMBOL_GPL(cp0_compare_irq);
int cp0_compare_irq_shift;
/*
* Performance counter IRQ or -1 if shared with timer
*/
int cp0_perfcount_irq;
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
static int noulri;
static int __init ulri_disable(char *s)
{
pr_info("Disabling ulri\n");
noulri = 1;
return 1;
}
__setup("noulri", ulri_disable);
void per_cpu_trap_init(bool is_boot_cpu)
{
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
unsigned int hwrena = cpu_hwrena_impl_bits;
#ifdef CONFIG_MIPS_MT_SMTC
int secondaryTC = 0;
int bootTC = (cpu == 0);
/*
* Only do per_cpu_trap_init() for first TC of Each VPE.
* Note that this hack assumes that the SMTC init code
* assigns TCs consecutively and in ascending order.
*/
if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
secondaryTC = 1;
#endif /* CONFIG_MIPS_MT_SMTC */
/*
* Disable coprocessors and select 32-bit or 64-bit addressing
* and the 16/32 or 32/32 FPR register model. Reset the BEV
* flag that some firmware may have left set and the TS bit (for
* IP27). Set XX for ISA IV code to work.
*/
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
#endif
if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
if (cpu_has_dsp)
status_set |= ST0_MX;
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
if (cpu_has_mips_r2)
hwrena |= 0x0000000f;
if (!noulri && cpu_has_userlocal)
hwrena |= (1 << 29);
if (hwrena)
write_c0_hwrena(hwrena);
#ifdef CONFIG_MIPS_MT_SMTC
if (!secondaryTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
if (cpu_has_veic || cpu_has_vint) {
unsigned long sr = set_c0_status(ST0_BEV);
write_c0_ebase(ebase);
write_c0_status(sr);
/* Setting vector spacing enables EI/VI mode */
change_c0_intctl(0x3e0, VECTORSPACING);
if (cpu_has_divec) {
if (cpu_has_mipsmt) {
unsigned int vpflags = dvpe();
set_c0_cause(CAUSEF_IV);
evpe(vpflags);
} else
set_c0_cause(CAUSEF_IV);
}
/*
* Before R2 both interrupt numbers were fixed to 7, so on R2 only:
*
* o read IntCtl.IPTI to determine the timer interrupt
* o read IntCtl.IPPCI to determine the performance counter interrupt
*/
if (cpu_has_mips_r2) {
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
if (cp0_perfcount_irq == cp0_compare_irq)
cp0_perfcount_irq = -1;
} else {
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
cp0_perfcount_irq = -1;
}
#ifdef CONFIG_MIPS_MT_SMTC
}
#endif /* CONFIG_MIPS_MT_SMTC */
if (!cpu_data[cpu].asid_cache)
cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
#ifdef CONFIG_MIPS_MT_SMTC
if (bootTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
/* Boot CPU's cache setup in setup_arch(). */
if (!is_boot_cpu)
cpu_cache_init();
tlb_init();
#ifdef CONFIG_MIPS_MT_SMTC
} else if (!secondaryTC) {
/*
* First TC in non-boot VPE must do subset of tlb_init()
* for MMU countrol registers.
*/
write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_wired(0);
}
#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP();
/* Install CPU exception handler */
void set_handler(unsigned long offset, void *addr, unsigned long size)
#ifdef CONFIG_CPU_MICROMIPS
memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
#else
memcpy((void *)(ebase + offset), addr, size);
local_flush_icache_range(ebase + offset, ebase + offset + size);
}
static char panic_null_cerr[] =
"Trying to set NULL cache error exception handler";
/*
* Install uncached CPU exception handler.
* This is suitable only for the cache error exception which is the only