Skip to content
traps.c 60.6 KiB
Newer Older
			/* 16-bit microMIPS BREAK */
			bcode = instr[0] & 0xf;
		} else {
			/* 32-bit microMIPS BREAK */
			if (__get_user(instr[1], (u16 __user *)(epc + 2)))
			opcode = (instr[0] << 16) | instr[1];
			bcode = (opcode >> 6) & ((1 << 20) - 1);
		if (__get_user(opcode, (unsigned int __user *)epc))
		bcode = (opcode >> 6) & ((1 << 20) - 1);

	/*
	 * There is the ancient bug in the MIPS assemblers that the break
	 * code starts left to bit 16 instead to bit 6 in the opcode.
	 * Gas is bug-compatible, but not always, grrr...
	 * We handle both cases with a simple heuristics.  --macro
	 */
	if (bcode >= (1 << 10))
		bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
David Daney's avatar
David Daney committed
	/*
	 * notify the kprobe handlers, if instruction is likely to
	 * pertain to them.
	 */
	switch (bcode) {
Ralf Baechle's avatar
Ralf Baechle committed
	case BRK_UPROBE:
		if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
			goto out;
		else
			break;
	case BRK_UPROBE_XOL:
		if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
			goto out;
		else
			break;
David Daney's avatar
David Daney committed
	case BRK_KPROBE_BP:
		if (notify_die(DIE_BREAK, "debug", regs, bcode,
			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
David Daney's avatar
David Daney committed
		else
			break;
	case BRK_KPROBE_SSTEPBP:
		if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
			       current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
David Daney's avatar
David Daney committed
		else
			break;
	default:
		break;
	}

	do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
	exception_exit(prev_state);

out_sigsegv:
	force_sig(SIGSEGV, current);
Linus Torvalds's avatar
Linus Torvalds committed
}

asmlinkage void do_tr(struct pt_regs *regs)
{
	u32 opcode, tcode = 0;
	enum ctx_state prev_state;
	unsigned long epc = msk_isa16_mode(exception_epc(regs));
Linus Torvalds's avatar
Linus Torvalds committed

	seg = get_fs();
	if (!user_mode(regs))
		set_fs(KERNEL_DS);
	prev_state = exception_enter();
	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
	if (get_isa16_mode(regs->cp0_epc)) {
		if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
		    __get_user(instr[1], (u16 __user *)(epc + 2)))
		opcode = (instr[0] << 16) | instr[1];
		/* Immediate versions don't provide a code.  */
		if (!(opcode & OPCODE))
			tcode = (opcode >> 12) & ((1 << 4) - 1);
	} else {
		if (__get_user(opcode, (u32 __user *)epc))
			goto out_sigsegv;
		/* Immediate versions don't provide a code.  */
		if (!(opcode & OPCODE))
			tcode = (opcode >> 6) & ((1 << 10) - 1);
Linus Torvalds's avatar
Linus Torvalds committed

	exception_exit(prev_state);

out_sigsegv:
	force_sig(SIGSEGV, current);
Linus Torvalds's avatar
Linus Torvalds committed
}

asmlinkage void do_ri(struct pt_regs *regs)
{
	unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
	unsigned long old_epc = regs->cp0_epc;
	unsigned long old31 = regs->regs[31];
	enum ctx_state prev_state;
	unsigned int opcode = 0;
	int status = -1;
Linus Torvalds's avatar
Linus Torvalds committed

	/*
	 * Avoid any kernel code. Just emulate the R2 instruction
	 * as quickly as possible.
	 */
	if (mipsr2_emulation && cpu_has_mips_r6 &&
	    likely(user_mode(regs)) &&
	    likely(get_user(opcode, epc) >= 0)) {
		unsigned long fcr31 = 0;

		status = mipsr2_decoder(regs, opcode, &fcr31);
		switch (status) {
		case 0:
		case SIGEMT:
			return;
		case SIGILL:
			goto no_r2_instr;
		default:
			process_fpemu_return(status,
					     &current->thread.cp0_baduaddr,
					     fcr31);
	prev_state = exception_enter();
	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
	if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
	die_if_kernel("Reserved instruction in kernel code", regs);
Linus Torvalds's avatar
Linus Torvalds committed

	if (unlikely(compute_return_epc(regs) < 0))
Ralf Baechle's avatar
Ralf Baechle committed

	if (!get_isa16_mode(regs->cp0_epc)) {
		if (unlikely(get_user(opcode, epc) < 0))
			status = SIGSEGV;
		if (!cpu_has_llsc && status < 0)
			status = simulate_llsc(regs, opcode);

		if (status < 0)
			status = simulate_rdhwr_normal(regs, opcode);

		if (status < 0)
			status = simulate_sync(regs, opcode);

		if (status < 0)
			status = simulate_fp(regs, opcode, old_epc, old31);
	} else if (cpu_has_mmips) {
		unsigned short mmop[2] = { 0 };

		if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
			status = SIGSEGV;
		if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
			status = SIGSEGV;
		opcode = mmop[0];
		opcode = (opcode << 16) | mmop[1];

		if (status < 0)
			status = simulate_rdhwr_mm(regs, opcode);

	if (status < 0)
		status = SIGILL;

	if (unlikely(status > 0)) {
		regs->cp0_epc = old_epc;		/* Undo skip-over.  */
		regs->regs[31] = old31;
		force_sig(status, current);
	}

out:
	exception_exit(prev_state);
/*
 * No lock; only written during early bootup by CPU 0.
 */
static RAW_NOTIFIER_HEAD(cu2_chain);

int __ref register_cu2_notifier(struct notifier_block *nb)
{
	return raw_notifier_chain_register(&cu2_chain, nb);
}

int cu2_notifier_call_chain(unsigned long val, void *v)
{
	return raw_notifier_call_chain(&cu2_chain, val, v);
}

static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
Ralf Baechle's avatar
Ralf Baechle committed
	void *data)
{
	struct pt_regs *regs = data;

	die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
			      "instruction", regs);
	force_sig(SIGILL, current);
static int enable_restore_fp_context(int msa)
{
	int err, was_fpu_owner, prior_msa;
	bool first_fp;
	/* Initialize context if it hasn't been used already */
	first_fp = init_fp_ctx(current);
	if (first_fp) {
		preempt_disable();
		err = own_fpu_inatomic(1);
			enable_msa();
			set_thread_flag(TIF_USEDMSA);
			set_thread_flag(TIF_MSA_CTX_LIVE);
		return err;
	}

	/*
	 * This task has formerly used the FP context.
	 *
	 * If this thread has no live MSA vector context then we can simply
	 * restore the scalar FP context. If it has live MSA vector context
	 * (that is, it has or may have used MSA since last performing a
	 * function call) then we'll need to restore the vector context. This
	 * applies even if we're currently only executing a scalar FP
	 * instruction. This is because if we were to later execute an MSA
	 * instruction then we'd either have to:
	 *
	 *  - Restore the vector context & clobber any registers modified by
	 *    scalar FP instructions between now & then.
	 *
	 * or
	 *
	 *  - Not restore the vector context & lose the most significant bits
	 *    of all vector registers.
	 *
	 * Neither of those options is acceptable. We cannot restore the least
	 * significant bits of the registers now & only restore the most
	 * significant bits later because the most significant bits of any
	 * vector registers whose aliased FP register is modified now will have
	 * been zeroed. We'd have no way to know that when restoring the vector
	 * context & thus may load an outdated value for the most significant
	 * bits of a vector register.
	 */
	if (!msa && !thread_msa_context_live())
		return own_fpu(1);

	/*
	 * This task is using or has previously used MSA. Thus we require
	 * that Status.FR == 1.
	 */
	preempt_disable();
	was_fpu_owner = is_fpu_owner();
	err = own_fpu_inatomic(0);

	enable_msa();
	write_msa_csr(current->thread.fpu.msacsr);
	set_thread_flag(TIF_USEDMSA);

	/*
	 * If this is the first time that the task is using MSA and it has
	 * previously used scalar FP in this time slice then we already nave
	 * FP context which we shouldn't clobber. We do however need to clear
	 * the upper 64b of each vector register so that this task has no
	 * opportunity to see data left behind by another.
	prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
	if (!prior_msa && was_fpu_owner) {
	if (!prior_msa) {
		/*
		 * Restore the least significant 64b of each vector register
		 * from the existing scalar FP context.
		 */
		_restore_fp(current);
		/*
		 * The task has not formerly used MSA, so clear the upper 64b
		 * of each vector register such that it cannot see data left
		 * behind by another task.
		 */
	} else {
		/* We need to restore the vector context. */
		restore_msa(current);
		/* Restore the scalar FP control & status register */
		if (!was_fpu_owner)
			write_32bit_cp1_register(CP1_STATUS,
						 current->thread.fpu.fcr31);
#else /* !CONFIG_MIPS_FP_SUPPORT */

static int enable_restore_fp_context(int msa)
{
	return SIGILL;
}

#endif /* CONFIG_MIPS_FP_SUPPORT */

Linus Torvalds's avatar
Linus Torvalds committed
asmlinkage void do_cpu(struct pt_regs *regs)
{
	enum ctx_state prev_state;
	unsigned int __user *epc;
	unsigned long old_epc, old31;
	unsigned int opcode;
Linus Torvalds's avatar
Linus Torvalds committed
	unsigned int cpid;
Linus Torvalds's avatar
Linus Torvalds committed

	prev_state = exception_enter();
Linus Torvalds's avatar
Linus Torvalds committed
	cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;

	if (cpid != 2)
		die_if_kernel("do_cpu invoked from kernel context!", regs);

Linus Torvalds's avatar
Linus Torvalds committed
	switch (cpid) {
	case 0:
		epc = (unsigned int __user *)exception_epc(regs);
		old_epc = regs->cp0_epc;
		old31 = regs->regs[31];
		opcode = 0;
		status = -1;
Linus Torvalds's avatar
Linus Torvalds committed

		if (unlikely(compute_return_epc(regs) < 0))
Ralf Baechle's avatar
Ralf Baechle committed

		if (!get_isa16_mode(regs->cp0_epc)) {
			if (unlikely(get_user(opcode, epc) < 0))
				status = SIGSEGV;

			if (!cpu_has_llsc && status < 0)
				status = simulate_llsc(regs, opcode);
		}

		if (status < 0)
			status = SIGILL;

		if (unlikely(status > 0)) {
			regs->cp0_epc = old_epc;	/* Undo skip-over.  */
			regs->regs[31] = old31;
			force_sig(status, current);
		}

Linus Torvalds's avatar
Linus Torvalds committed

#ifdef CONFIG_MIPS_FP_SUPPORT
		 * The COP3 opcode space and consequently the CP0.Status.CU3
		 * bit and the CP0.Cause.CE=3 encoding have been removed as
		 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
		 * up the space has been reused for COP1X instructions, that
		 * are enabled by the CP0.Status.CU1 bit and consequently
		 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
		 * exceptions.  Some FPU-less processors that implement one
		 * of these ISAs however use this code erroneously for COP1X
		 * instructions.  Therefore we redirect this trap to the FP
		 * emulator too.
		if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
			force_sig(SIGILL, current);
	case 1: {
		void __user *fault_addr;
		unsigned long fcr31;
		int err, sig;

		err = enable_restore_fp_context(0);
Linus Torvalds's avatar
Linus Torvalds committed

Linus Torvalds's avatar
Linus Torvalds committed

		sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
					       &fault_addr);

		/*
		 * We can't allow the emulated instruction to leave
		 * any enabled Cause bits set in $fcr31.
		fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
		current->thread.fpu.fcr31 &= ~fcr31;

		/* Send a signal if required.  */
		if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
			mt_ase_fp_affinity();
Linus Torvalds's avatar
Linus Torvalds committed

	}
#else /* CONFIG_MIPS_FP_SUPPORT */
	case 1:
	case 3:
		force_sig(SIGILL, current);
		break;
#endif /* CONFIG_MIPS_FP_SUPPORT */
Linus Torvalds's avatar
Linus Torvalds committed

	case 2:
		raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
	exception_exit(prev_state);
asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
{
	enum ctx_state prev_state;

	prev_state = exception_enter();
	current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
	if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
		       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
		goto out;

	/* Clear MSACSR.Cause before enabling interrupts */
	write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
	local_irq_enable();

	die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
	force_sig(SIGFPE, current);
	exception_exit(prev_state);
}

asmlinkage void do_msa(struct pt_regs *regs)
{
	enum ctx_state prev_state;
	int err;

	prev_state = exception_enter();

	if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
		force_sig(SIGILL, current);
		goto out;
	}

	die_if_kernel("do_msa invoked from kernel context!", regs);

	err = enable_restore_fp_context(1);
	if (err)
		force_sig(SIGILL, current);
out:
	exception_exit(prev_state);
}

Linus Torvalds's avatar
Linus Torvalds committed
asmlinkage void do_mdmx(struct pt_regs *regs)
{
	enum ctx_state prev_state;

	prev_state = exception_enter();
Linus Torvalds's avatar
Linus Torvalds committed
	force_sig(SIGILL, current);
	exception_exit(prev_state);
/*
 * Called with interrupts disabled.
 */
Linus Torvalds's avatar
Linus Torvalds committed
asmlinkage void do_watch(struct pt_regs *regs)
{
	enum ctx_state prev_state;
	prev_state = exception_enter();
Linus Torvalds's avatar
Linus Torvalds committed
	/*
	 * Clear WP (bit 22) bit of cause register so we don't loop
	 * forever.
Linus Torvalds's avatar
Linus Torvalds committed
	 */
	clear_c0_cause(CAUSEF_WP);

	/*
	 * If the current thread has the watch registers loaded, save
	 * their values and send SIGTRAP.  Otherwise another thread
	 * left the registers set, clear them and continue.
	 */
	if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
		mips_read_watch_registers();
		force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL, current);
		mips_clear_watch_registers();
	exception_exit(prev_state);
Linus Torvalds's avatar
Linus Torvalds committed
}

asmlinkage void do_mcheck(struct pt_regs *regs)
{
	int multi_match = regs->cp0_status & ST0_TS;
	enum ctx_state prev_state;
	mm_segment_t old_fs = get_fs();
	prev_state = exception_enter();
Linus Torvalds's avatar
Linus Torvalds committed
	show_regs(regs);
		dump_tlb_regs();
		pr_info("\n");
	if (!user_mode(regs))
		set_fs(KERNEL_DS);

	show_code((unsigned int __user *) regs->cp0_epc);
Linus Torvalds's avatar
Linus Torvalds committed
	/*
	 * Some chips may have other causes of machine check (e.g. SB1
	 * graduation timer)
	 */
	panic("Caught Machine Check exception - %scaused by multiple "
	      "matching entries in the TLB.",
asmlinkage void do_mt(struct pt_regs *regs)
{
	int subcode;

	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
			>> VPECONTROL_EXCPT_SHIFT;
	switch (subcode) {
	case 0:
		printk(KERN_DEBUG "Thread Underflow\n");
		printk(KERN_DEBUG "Thread Overflow\n");
		printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
		printk(KERN_DEBUG "Gating Storage Exception\n");
		printk(KERN_DEBUG "YIELD Scheduler Exception\n");
Masanari Iida's avatar
Masanari Iida committed
		printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
		printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
	die_if_kernel("MIPS MT Thread exception in kernel", regs);

	force_sig(SIGILL, current);
}


asmlinkage void do_dsp(struct pt_regs *regs)
{
	if (cpu_has_dsp)
		panic("Unexpected DSP exception");

	force_sig(SIGILL, current);
}

Linus Torvalds's avatar
Linus Torvalds committed
asmlinkage void do_reserved(struct pt_regs *regs)
{
	/*
Ralf Baechle's avatar
Ralf Baechle committed
	 * Game over - no way to handle this if it ever occurs.	 Most probably
Linus Torvalds's avatar
Linus Torvalds committed
	 * caused by a new unknown cpu type or after another deadly
	 * hard/software error.
	 */
	show_regs(regs);
	panic("Caught reserved exception %ld - should not happen.",
	      (regs->cp0_cause & 0x7f) >> 2);
}

static int __initdata l1parity = 1;
static int __init nol1parity(char *s)
{
	l1parity = 0;
	return 1;
}
__setup("nol1par", nol1parity);
static int __initdata l2parity = 1;
static int __init nol2parity(char *s)
{
	l2parity = 0;
	return 1;
}
__setup("nol2par", nol2parity);

Linus Torvalds's avatar
Linus Torvalds committed
/*
 * Some MIPS CPUs can enable/disable for cache parity detection, but do
 * it different ways.
 */
static inline void parity_protection_init(void)
{
#define ERRCTL_PE	0x80000000
#define ERRCTL_L2P	0x00800000

	if (mips_cm_revision() >= CM_REV_CM3) {
		ulong gcr_ectl, cp0_ectl;

		/*
		 * With CM3 systems we need to ensure that the L1 & L2
		 * parity enables are set to the same value, since this
		 * is presumed by the hardware engineers.
		 *
		 * If the user disabled either of L1 or L2 ECC checking,
		 * disable both.
		 */
		l1parity &= l2parity;
		l2parity &= l1parity;

		/* Probe L1 ECC support */
		cp0_ectl = read_c0_ecc();
		write_c0_ecc(cp0_ectl | ERRCTL_PE);
		back_to_back_c0_hazard();
		cp0_ectl = read_c0_ecc();

		/* Probe L2 ECC support */
		gcr_ectl = read_gcr_err_control();

		if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
		    !(cp0_ectl & ERRCTL_PE)) {
			/*
			 * One of L1 or L2 ECC checking isn't supported,
			 * so we cannot enable either.
			 */
			l1parity = l2parity = 0;
		}

		/* Configure L1 ECC checking */
		if (l1parity)
			cp0_ectl |= ERRCTL_PE;
		else
			cp0_ectl &= ~ERRCTL_PE;
		write_c0_ecc(cp0_ectl);
		back_to_back_c0_hazard();
		WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);

		/* Configure L2 ECC checking */
		if (l2parity)
			gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
			gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
		write_gcr_err_control(gcr_ectl);
		gcr_ectl = read_gcr_err_control();
		gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
		WARN_ON(!!gcr_ectl != l2parity);

		pr_info("Cache parity protection %sabled\n",
			l1parity ? "en" : "dis");
		return;
	}

Linus Torvalds's avatar
Linus Torvalds committed
	case CPU_24K:
	case CPU_34K:
	case CPU_74K:
	case CPU_1004K:
	case CPU_1074K:
	case CPU_INTERAPTIV:
	case CPU_PROAPTIV:
	case CPU_P5600:
	case CPU_QEMU_GENERIC:
		{
			unsigned long errctl;
			unsigned int l1parity_present, l2parity_present;

			errctl = read_c0_ecc();
			errctl &= ~(ERRCTL_PE|ERRCTL_L2P);

			/* probe L1 parity support */
			write_c0_ecc(errctl | ERRCTL_PE);
			back_to_back_c0_hazard();
			l1parity_present = (read_c0_ecc() & ERRCTL_PE);

			/* probe L2 parity support */
			write_c0_ecc(errctl|ERRCTL_L2P);
			back_to_back_c0_hazard();
			l2parity_present = (read_c0_ecc() & ERRCTL_L2P);

			if (l1parity_present && l2parity_present) {
				if (l1parity)
					errctl |= ERRCTL_PE;
				if (l1parity ^ l2parity)
					errctl |= ERRCTL_L2P;
			} else if (l1parity_present) {
				if (l1parity)
					errctl |= ERRCTL_PE;
			} else if (l2parity_present) {
				if (l2parity)
					errctl |= ERRCTL_L2P;
			} else {
				/* No parity available */
			}

			printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);

			write_c0_ecc(errctl);
			back_to_back_c0_hazard();
			errctl = read_c0_ecc();
			printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);

			if (l1parity_present)
				printk(KERN_INFO "Cache parity protection %sabled\n",
				       (errctl & ERRCTL_PE) ? "en" : "dis");

			if (l2parity_present) {
				if (l1parity_present && l1parity)
					errctl ^= ERRCTL_L2P;
				printk(KERN_INFO "L2 cache parity protection %sabled\n",
				       (errctl & ERRCTL_L2P) ? "en" : "dis");
			}
		}
		break;

Linus Torvalds's avatar
Linus Torvalds committed
	case CPU_5KC:
	case CPU_5KE:
	case CPU_LOONGSON1:
		write_c0_ecc(0x80000000);
		back_to_back_c0_hazard();
		/* Set the PE bit (bit 31) in the c0_errctl register. */
		printk(KERN_INFO "Cache parity protection %sabled\n",
		       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
Linus Torvalds's avatar
Linus Torvalds committed
		break;
	case CPU_20KC:
	case CPU_25KF:
		/* Clear the DE bit (bit 16) in the c0_status register. */
		printk(KERN_INFO "Enable cache parity protection for "
		       "MIPS 20KC/25KF CPUs.\n");
		clear_c0_status(ST0_DE);
		break;
	default:
		break;
	}
}

asmlinkage void cache_parity_error(void)
{
	const int field = 2 * sizeof(unsigned long);
	unsigned int reg_val;

	/* For the moment, report the problem and hang. */
	printk("Cache error exception:\n");
	printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
	reg_val = read_c0_cacheerr();
	printk("c0_cacheerr == %08x\n", reg_val);

	printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
	       reg_val & (1<<30) ? "secondary" : "primary",
	       reg_val & (1<<31) ? "data" : "insn");
	if ((cpu_has_mips_r2_r6) &&
	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
		pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
			reg_val & (1<<29) ? "ED " : "",
			reg_val & (1<<28) ? "ET " : "",
			reg_val & (1<<27) ? "ES " : "",
			reg_val & (1<<26) ? "EE " : "",
			reg_val & (1<<25) ? "EB " : "",
			reg_val & (1<<24) ? "EI " : "",
			reg_val & (1<<23) ? "E1 " : "",
			reg_val & (1<<22) ? "E0 " : "");
	} else {
		pr_err("Error bits: %s%s%s%s%s%s%s\n",
			reg_val & (1<<29) ? "ED " : "",
			reg_val & (1<<28) ? "ET " : "",
			reg_val & (1<<26) ? "EE " : "",
			reg_val & (1<<25) ? "EB " : "",
			reg_val & (1<<24) ? "EI " : "",
			reg_val & (1<<23) ? "E1 " : "",
			reg_val & (1<<22) ? "E0 " : "");
	}
Linus Torvalds's avatar
Linus Torvalds committed
	printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));

#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
Linus Torvalds's avatar
Linus Torvalds committed
	if (reg_val & (1<<22))
		printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());

	if (reg_val & (1<<23))
		printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
#endif

	panic("Can't handle the cache error!");
}

asmlinkage void do_ftlb(void)
{
	const int field = 2 * sizeof(unsigned long);
	unsigned int reg_val;

	/* For the moment, report the problem and hang. */
	if ((cpu_has_mips_r2_r6) &&
	    (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
	    ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
		pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
		       read_c0_ecc());
		pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
		reg_val = read_c0_cacheerr();
		pr_err("c0_cacheerr == %08x\n", reg_val);

		if ((reg_val & 0xc0000000) == 0xc0000000) {
			pr_err("Decoded c0_cacheerr: FTLB parity error\n");
		} else {
			pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
			       reg_val & (1<<30) ? "secondary" : "primary",
			       reg_val & (1<<31) ? "data" : "insn");
		}
	} else {
		pr_err("FTLB error exception\n");
	}
	/* Just print the cacheerr bits for now */
	cache_parity_error();
}

Linus Torvalds's avatar
Linus Torvalds committed
/*
 * SDBBP EJTAG debug exception handler.
 * We skip the instruction and return to the next instruction.
 */
void ejtag_exception_handler(struct pt_regs *regs)
{
	const int field = 2 * sizeof(unsigned long);
	unsigned long depc, old_epc, old_ra;
Linus Torvalds's avatar
Linus Torvalds committed
	unsigned int debug;

	printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
Linus Torvalds's avatar
Linus Torvalds committed
	depc = read_c0_depc();
	debug = read_c0_debug();
	printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
Linus Torvalds's avatar
Linus Torvalds committed
	if (debug & 0x80000000) {
		/*
		 * In branch delay slot.
		 * We cheat a little bit here and use EPC to calculate the
		 * debug return address (DEPC). EPC is restored after the
		 * calculation.
		 */
		old_epc = regs->cp0_epc;
		old_ra = regs->regs[31];
Linus Torvalds's avatar
Linus Torvalds committed
		regs->cp0_epc = depc;
		compute_return_epc(regs);
Linus Torvalds's avatar
Linus Torvalds committed
		depc = regs->cp0_epc;
		regs->cp0_epc = old_epc;
		regs->regs[31] = old_ra;
Linus Torvalds's avatar
Linus Torvalds committed
	} else
		depc += 4;
	write_c0_depc(depc);

#if 0
	printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
Linus Torvalds's avatar
Linus Torvalds committed
	write_c0_debug(debug | 0x100);
#endif
}

/*
 * NMI exception handler.
Kevin Cernekee's avatar
Kevin Cernekee committed
 * No lock; only written during early bootup by CPU 0.
Linus Torvalds's avatar
Linus Torvalds committed
 */
Kevin Cernekee's avatar
Kevin Cernekee committed
static RAW_NOTIFIER_HEAD(nmi_chain);

int register_nmi_notifier(struct notifier_block *nb)
{
	return raw_notifier_chain_register(&nmi_chain, nb);
}

void __noreturn nmi_exception_handler(struct pt_regs *regs)
Linus Torvalds's avatar
Linus Torvalds committed
{
Kevin Cernekee's avatar
Kevin Cernekee committed
	raw_notifier_call_chain(&nmi_chain, 0, regs);
	bust_spinlocks(1);
	snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
		 smp_processor_id(), regs->cp0_epc);
	regs->cp0_epc = read_c0_errorepc();
	die(str, regs);
#define VECTORSPACING 0x100	/* for EI/VI mode */

unsigned long ebase;
EXPORT_SYMBOL_GPL(ebase);
Linus Torvalds's avatar
Linus Torvalds committed
unsigned long exception_handlers[32];
Linus Torvalds's avatar
Linus Torvalds committed

void __init *set_except_vector(int n, void *addr)
Linus Torvalds's avatar
Linus Torvalds committed
{
	unsigned long handler = (unsigned long) addr;
	unsigned long old_handler;
Linus Torvalds's avatar
Linus Torvalds committed

#ifdef CONFIG_CPU_MICROMIPS
	/*
	 * Only the TLB handlers are cache aligned with an even
	 * address. All other handlers are on an odd address and
	 * require no modification. Otherwise, MIPS32 mode will
	 * be entered when handling any TLB exceptions. That
	 * would be bad...since we must stay in microMIPS mode.
	 */
	if (!(handler & 0x1))
		handler |= 1;
#endif
	old_handler = xchg(&exception_handlers[n], handler);
Linus Torvalds's avatar
Linus Torvalds committed

	if (n == 0 && cpu_has_divec) {
#ifdef CONFIG_CPU_MICROMIPS
		unsigned long jump_mask = ~((1 << 27) - 1);
#else
		unsigned long jump_mask = ~((1 << 28) - 1);
		u32 *buf = (u32 *)(ebase + 0x200);
		unsigned int k0 = 26;
		if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
			uasm_i_j(&buf, handler & ~jump_mask);
			uasm_i_nop(&buf);
		} else {
			UASM_i_LA(&buf, k0, handler);
			uasm_i_jr(&buf, k0);
			uasm_i_nop(&buf);
		}
		local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
{
	show_regs(get_irq_regs());
	panic("Caught unexpected vectored interrupt.");
}

static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
{
	unsigned long handler;
	unsigned long old_handler = vi_handlers[n];
	int srssets = current_cpu_data.srsets;
	BUG_ON(!cpu_has_veic && !cpu_has_vint);

	if (addr == NULL) {
		handler = (unsigned long) do_default_vi;
		srs = 0;
	vi_handlers[n] = handler;

	b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);

	if (srs >= srssets)
		panic("Shadow register set %d not supported", srs);

	if (cpu_has_veic) {
		if (board_bind_eic_interrupt)
	} else if (cpu_has_vint) {