Skip to content
Snippets Groups Projects
vz.c 85.8 KiB
Newer Older
				 */
				if (cpu_guest_has_rw_llb &&
				    !(val & MIPS_LLADDR_LLB))
					write_gc0_lladdr(0);
			} else if (rd == MIPS_CP0_LLADDR &&
				   sel == 1 &&		/* MAAR */
				   cpu_guest_has_maar &&
				   !cpu_guest_has_dyn_maar) {
				val = mips_process_maar(inst.c0r_format.rs,
							val);

				/* MAARI must be in range */
				BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
						ARRAY_SIZE(vcpu->arch.maar));
				vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
									val;
			} else if (rd == MIPS_CP0_LLADDR &&
				   (sel == 2) &&	/* MAARI */
				   cpu_guest_has_maar &&
				   !cpu_guest_has_dyn_maar) {
				kvm_write_maari(vcpu, val);
			} else if (rd == MIPS_CP0_CONFIG &&
				   (sel == 6)) {
				cop0->reg[rd][sel] = (int)val;
			} else if (rd == MIPS_CP0_ERRCTL &&
				   (sel == 0)) {	/* ErrCtl */
				/* ignore the written value */
#ifdef CONFIG_CPU_LOONGSON64
			} else if (rd == MIPS_CP0_DIAG &&
				   (sel == 0)) {	/* Diag */
				unsigned long flags;

				local_irq_save(flags);
				if (val & LOONGSON_DIAG_BTB) {
					/* Flush BTB */
					set_c0_diag(LOONGSON_DIAG_BTB);
				}
				if (val & LOONGSON_DIAG_ITLB) {
					/* Flush ITLB */
					set_c0_diag(LOONGSON_DIAG_ITLB);
				}
				if (val & LOONGSON_DIAG_DTLB) {
					/* Flush DTLB */
					set_c0_diag(LOONGSON_DIAG_DTLB);
				}
				if (val & LOONGSON_DIAG_VTLB) {
					/* Flush VTLB */
					kvm_loongson_clear_guest_vtlb();
				}
				if (val & LOONGSON_DIAG_FTLB) {
					/* Flush FTLB */
					kvm_loongson_clear_guest_ftlb();
				}
				local_irq_restore(flags);
#endif
			} else {
				er = EMULATE_FAIL;
			}
			break;

		default:
			er = EMULATE_FAIL;
			break;
		}
	}
	/* Rollback PC only if emulation was unsuccessful */
	if (er == EMULATE_FAIL) {
		kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
			curr_pc, __func__, inst.word);

		vcpu->arch.pc = curr_pc;
	}

	return er;
}

static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
					       u32 *opc, u32 cause,
					       struct kvm_run *run,
					       struct kvm_vcpu *vcpu)
{
	enum emulation_result er = EMULATE_DONE;
	u32 cache, op_inst, op, base;
	s16 offset;
	struct kvm_vcpu_arch *arch = &vcpu->arch;
	unsigned long va, curr_pc;

	/*
	 * Update PC and hold onto current PC in case there is
	 * an error and we want to rollback the PC
	 */
	curr_pc = vcpu->arch.pc;
	er = update_pc(vcpu, cause);
	if (er == EMULATE_FAIL)
		return er;

	base = inst.i_format.rs;
	op_inst = inst.i_format.rt;
	if (cpu_has_mips_r6)
		offset = inst.spec3_format.simmediate;
	else
		offset = inst.i_format.simmediate;
	cache = op_inst & CacheOp_Cache;
	op = op_inst & CacheOp_Op;

	va = arch->gprs[base] + offset;

	kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
		  cache, op, base, arch->gprs[base], offset);

	/* Secondary or tirtiary cache ops ignored */
	if (cache != Cache_I && cache != Cache_D)
		return EMULATE_DONE;

	switch (op_inst) {
	case Index_Invalidate_I:
		flush_icache_line_indexed(va);
		return EMULATE_DONE;
	case Index_Writeback_Inv_D:
		flush_dcache_line_indexed(va);
		return EMULATE_DONE;
	case Hit_Invalidate_I:
	case Hit_Invalidate_D:
	case Hit_Writeback_Inv_D:
		if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
			/* We can just flush entire icache */
			local_flush_icache_range(0, 0);
			return EMULATE_DONE;
		}

		/* So far, other platforms support guest hit cache ops */
		break;
	default:
		break;

	kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
		curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
		offset);
	/* Rollback PC */
	vcpu->arch.pc = curr_pc;

	return EMULATE_FAIL;
}

#ifdef CONFIG_CPU_LOONGSON64
static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
					      u32 *opc, u32 cause,
					      struct kvm_run *run,
					      struct kvm_vcpu *vcpu)
{
	unsigned int rs, rd;
	unsigned int hostcfg;
	unsigned long curr_pc;
	enum emulation_result er = EMULATE_DONE;

	/*
	 * Update PC and hold onto current PC in case there is
	 * an error and we want to rollback the PC
	 */
	curr_pc = vcpu->arch.pc;
	er = update_pc(vcpu, cause);
	if (er == EMULATE_FAIL)
		return er;

	rs = inst.loongson3_lscsr_format.rs;
	rd = inst.loongson3_lscsr_format.rd;
	switch (inst.loongson3_lscsr_format.fr) {
	case 0x8:  /* Read CPUCFG */
		++vcpu->stat.vz_cpucfg_exits;
		hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);

		switch (vcpu->arch.gprs[rs]) {
		case LOONGSON_CFG0:
			vcpu->arch.gprs[rd] = 0x14c000;
			break;
		case LOONGSON_CFG1:
			hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
				    LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
				    LOONGSON_CFG1_SFBP);
			vcpu->arch.gprs[rd] = hostcfg;
			break;
		case LOONGSON_CFG2:
			hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
				    LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
			vcpu->arch.gprs[rd] = hostcfg;
			break;
		case LOONGSON_CFG3:
			vcpu->arch.gprs[rd] = hostcfg;
			break;
		default:
			/* Don't export any other advanced features to guest */
			vcpu->arch.gprs[rd] = 0;
			break;
		}
		break;

	default:
		kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
			inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
		er = EMULATE_FAIL;
		break;
	}

	/* Rollback PC only if emulation was unsuccessful */
	if (er == EMULATE_FAIL) {
		kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
			curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);

		vcpu->arch.pc = curr_pc;
	}

	return er;
}
#endif

static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
						     struct kvm_vcpu *vcpu)
{
	enum emulation_result er = EMULATE_DONE;
	struct kvm_vcpu_arch *arch = &vcpu->arch;
	struct kvm_run *run = vcpu->run;
	union mips_instruction inst;
	int rd, rt, sel;
	int err;

	/*
	 *  Fetch the instruction.
	 */
	if (cause & CAUSEF_BD)
		opc += 1;
	err = kvm_get_badinstr(opc, vcpu, &inst.word);
	if (err)
		return EMULATE_FAIL;

	switch (inst.r_format.opcode) {
	case cop0_op:
		er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
		break;
#ifndef CONFIG_CPU_MIPSR6
	case cache_op:
		trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
		er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
		break;
#endif
#ifdef CONFIG_CPU_LOONGSON64
	case lwc2_op:
		er = kvm_vz_gpsi_lwc2(inst, opc, cause, run, vcpu);
		break;
#endif
	case spec3_op:
		switch (inst.spec3_format.func) {
#ifdef CONFIG_CPU_MIPSR6
		case cache6_op:
			trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
			er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
			break;
#endif
		case rdhwr_op:
			if (inst.r_format.rs || (inst.r_format.re >> 3))
				goto unknown;

			rd = inst.r_format.rd;
			rt = inst.r_format.rt;
			sel = inst.r_format.re & 0x7;

			switch (rd) {
			case MIPS_HWR_CC:	/* Read count register */
				arch->gprs[rt] =
					(long)(int)kvm_mips_read_count(vcpu);
				break;
			default:
				trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
					      KVM_TRACE_HWR(rd, sel), 0);
				goto unknown;

			trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
				      KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);

			er = update_pc(vcpu, cause);
			break;
		default:
			goto unknown;
		break;
unknown:

	default:
		kvm_err("GPSI exception not supported (%p/%#x)\n",
				opc, inst.word);
		kvm_arch_vcpu_dump_regs(vcpu);
		er = EMULATE_FAIL;
		break;
	}

	return er;
}

static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
						     struct kvm_vcpu *vcpu)
{
	enum emulation_result er = EMULATE_DONE;
	struct kvm_vcpu_arch *arch = &vcpu->arch;
	union mips_instruction inst;
	int err;

	/*
	 *  Fetch the instruction.
	 */
	if (cause & CAUSEF_BD)
		opc += 1;
	err = kvm_get_badinstr(opc, vcpu, &inst.word);
	if (err)
		return EMULATE_FAIL;

	/* complete MTC0 on behalf of guest and advance EPC */
	if (inst.c0r_format.opcode == cop0_op &&
	    inst.c0r_format.rs == mtc_op &&
	    inst.c0r_format.z == 0) {
		int rt = inst.c0r_format.rt;
		int rd = inst.c0r_format.rd;
		int sel = inst.c0r_format.sel;
		unsigned int val = arch->gprs[rt];
		unsigned int old_val, change;

		trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
			      val);

		if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
			/* FR bit should read as zero if no FPU */
			if (!kvm_mips_guest_has_fpu(&vcpu->arch))
				val &= ~(ST0_CU1 | ST0_FR);

			/*
			 * Also don't allow FR to be set if host doesn't support
			 * it.
			 */
			if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
				val &= ~ST0_FR;

			old_val = read_gc0_status();
			change = val ^ old_val;

			if (change & ST0_FR) {
				/*
				 * FPU and Vector register state is made
				 * UNPREDICTABLE by a change of FR, so don't
				 * even bother saving it.
				 */
				kvm_drop_fpu(vcpu);
			}

			/*
			 * If MSA state is already live, it is undefined how it
			 * interacts with FR=0 FPU state, and we don't want to
			 * hit reserved instruction exceptions trying to save
			 * the MSA state later when CU=1 && FR=1, so play it
			 * safe and save it first.
			 */
			if (change & ST0_CU1 && !(val & ST0_FR) &&
			    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
				kvm_lose_fpu(vcpu);

			write_gc0_status(val);
		} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
			u32 old_cause = read_gc0_cause();
			u32 change = old_cause ^ val;

			/* DC bit enabling/disabling timer? */
			if (change & CAUSEF_DC) {
				if (val & CAUSEF_DC) {
					kvm_vz_lose_htimer(vcpu);
					kvm_mips_count_disable_cause(vcpu);
					kvm_mips_count_enable_cause(vcpu);
			}

			/* Only certain bits are RW to the guest */
			change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
				   CAUSEF_IP0 | CAUSEF_IP1);

			/* WP can only be cleared */
			change &= ~CAUSEF_WP | old_cause;

			write_gc0_cause(old_cause ^ change);
		} else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
			write_gc0_intctl(val);
		} else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
			old_val = read_gc0_config5();
			change = val ^ old_val;
			/* Handle changes in FPU/MSA modes */
			preempt_disable();

			/*
			 * Propagate FRE changes immediately if the FPU
			 * context is already loaded.
			 */
			if (change & MIPS_CONF5_FRE &&
			    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
				change_c0_config5(MIPS_CONF5_FRE, val);

			preempt_enable();

			val = old_val ^
				(change & kvm_vz_config5_guest_wrmask(vcpu));
			write_gc0_config5(val);
		} else {
			kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
			    opc, inst.word);
			er = EMULATE_FAIL;
		}

		if (er != EMULATE_FAIL)
			er = update_pc(vcpu, cause);
	} else {
		kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
			opc, inst.word);
		er = EMULATE_FAIL;
	}

	return er;
}

static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
						     struct kvm_vcpu *vcpu)
{
	/*
	 * Presumably this is due to MC (guest mode change), so lets trace some
	 * relevant info.
	 */
	trace_kvm_guest_mode_change(vcpu);

	return EMULATE_DONE;
}

static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
						   struct kvm_vcpu *vcpu)
{
	enum emulation_result er;
	union mips_instruction inst;
	unsigned long curr_pc;
	int err;

	if (cause & CAUSEF_BD)
		opc += 1;
	err = kvm_get_badinstr(opc, vcpu, &inst.word);
	if (err)
		return EMULATE_FAIL;

	/*
	 * Update PC and hold onto current PC in case there is
	 * an error and we want to rollback the PC
	 */
	curr_pc = vcpu->arch.pc;
	er = update_pc(vcpu, cause);
	if (er == EMULATE_FAIL)
		return er;

	er = kvm_mips_emul_hypcall(vcpu, inst);
	if (er == EMULATE_FAIL)
		vcpu->arch.pc = curr_pc;

	return er;
}

static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
							u32 cause,
							u32 *opc,
							struct kvm_vcpu *vcpu)
{
	u32 inst;

	/*
	 *  Fetch the instruction.
	 */
	if (cause & CAUSEF_BD)
		opc += 1;
	kvm_get_badinstr(opc, vcpu, &inst);

	kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x  Status: %#x\n",
		gexccode, opc, inst, read_gc0_status());

	return EMULATE_FAIL;
}

static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
{
	u32 *opc = (u32 *) vcpu->arch.pc;
	u32 cause = vcpu->arch.host_cp0_cause;
	enum emulation_result er = EMULATE_DONE;
	u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
			MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
	int ret = RESUME_GUEST;

	trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
	switch (gexccode) {
	case MIPS_GCTL0_GEXC_GPSI:
		++vcpu->stat.vz_gpsi_exits;
		er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
		break;
	case MIPS_GCTL0_GEXC_GSFC:
		++vcpu->stat.vz_gsfc_exits;
		er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
		break;
	case MIPS_GCTL0_GEXC_HC:
		++vcpu->stat.vz_hc_exits;
		er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
		break;
	case MIPS_GCTL0_GEXC_GRR:
		++vcpu->stat.vz_grr_exits;
		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
						       vcpu);
		break;
	case MIPS_GCTL0_GEXC_GVA:
		++vcpu->stat.vz_gva_exits;
		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
						       vcpu);
		break;
	case MIPS_GCTL0_GEXC_GHFC:
		++vcpu->stat.vz_ghfc_exits;
		er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
		break;
	case MIPS_GCTL0_GEXC_GPA:
		++vcpu->stat.vz_gpa_exits;
		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
						       vcpu);
		break;
	default:
		++vcpu->stat.vz_resvd_exits;
		er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
						       vcpu);
		break;

	}

	if (er == EMULATE_DONE) {
		ret = RESUME_GUEST;
	} else if (er == EMULATE_HYPERCALL) {
		ret = kvm_mips_handle_hypcall(vcpu);
	} else {
		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		ret = RESUME_HOST;
	}
	return ret;
}

/**
 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
 * @vcpu:	Virtual CPU context.
 *
 * Handle when the guest attempts to use a coprocessor which hasn't been allowed
 * by the root context.
 */
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
	struct kvm_run *run = vcpu->run;
	u32 cause = vcpu->arch.host_cp0_cause;
	enum emulation_result er = EMULATE_FAIL;
	int ret = RESUME_GUEST;

	if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
		/*
		 * If guest FPU not present, the FPU operation should have been
		 * treated as a reserved instruction!
		 * If FPU already in use, we shouldn't get this at all.
		 */
		if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
			    vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
			preempt_enable();
			return EMULATE_FAIL;
		}

		kvm_own_fpu(vcpu);
		er = EMULATE_DONE;
	}
	/* other coprocessors not handled */

	switch (er) {
	case EMULATE_DONE:
		ret = RESUME_GUEST;
		break;

	case EMULATE_FAIL:
		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		ret = RESUME_HOST;
		break;

	default:
		BUG();
	}
	return ret;
}

/**
 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
 * @vcpu:	Virtual CPU context.
 *
 * Handle when the guest attempts to use MSA when it is disabled in the root
 * context.
 */
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
	struct kvm_run *run = vcpu->run;

	/*
	 * If MSA not present or not exposed to guest or FR=0, the MSA operation
	 * should have been treated as a reserved instruction!
	 * Same if CU1=1, FR=0.
	 * If MSA already in use, we shouldn't get this at all.
	 */
	if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
	    (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
	    !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
	    vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		return RESUME_HOST;
	}

	kvm_own_msa(vcpu);

	return RESUME_GUEST;
}

static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
{
	struct kvm_run *run = vcpu->run;
	u32 *opc = (u32 *) vcpu->arch.pc;
	u32 cause = vcpu->arch.host_cp0_cause;
	ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
	union mips_instruction inst;
	enum emulation_result er = EMULATE_DONE;
	int err, ret = RESUME_GUEST;

	if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
		/* A code fetch fault doesn't count as an MMIO */
		if (kvm_is_ifetch_fault(&vcpu->arch)) {
			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
			return RESUME_HOST;
		}

		/* Fetch the instruction */
		if (cause & CAUSEF_BD)
			opc += 1;
		err = kvm_get_badinstr(opc, vcpu, &inst.word);
		if (err) {
			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
			return RESUME_HOST;
		}

		/* Treat as MMIO */
		er = kvm_mips_emulate_load(inst, cause, run, vcpu);
		if (er == EMULATE_FAIL) {
			kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
				opc, badvaddr);
			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		}
	}

	if (er == EMULATE_DONE) {
		ret = RESUME_GUEST;
	} else if (er == EMULATE_DO_MMIO) {
		run->exit_reason = KVM_EXIT_MMIO;
		ret = RESUME_HOST;
	} else {
		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		ret = RESUME_HOST;
	}
	return ret;
}

static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
{
	struct kvm_run *run = vcpu->run;
	u32 *opc = (u32 *) vcpu->arch.pc;
	u32 cause = vcpu->arch.host_cp0_cause;
	ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
	union mips_instruction inst;
	enum emulation_result er = EMULATE_DONE;
	int err;
	int ret = RESUME_GUEST;

	/* Just try the access again if we couldn't do the translation */
	if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
		return RESUME_GUEST;
	vcpu->arch.host_cp0_badvaddr = badvaddr;

	if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
		/* Fetch the instruction */
		if (cause & CAUSEF_BD)
			opc += 1;
		err = kvm_get_badinstr(opc, vcpu, &inst.word);
		if (err) {
			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
			return RESUME_HOST;
		}

		/* Treat as MMIO */
		er = kvm_mips_emulate_store(inst, cause, run, vcpu);
		if (er == EMULATE_FAIL) {
			kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
				opc, badvaddr);
			run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		}
	}

	if (er == EMULATE_DONE) {
		ret = RESUME_GUEST;
	} else if (er == EMULATE_DO_MMIO) {
		run->exit_reason = KVM_EXIT_MMIO;
		ret = RESUME_HOST;
	} else {
		run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
		ret = RESUME_HOST;
	}
	return ret;
}

static u64 kvm_vz_get_one_regs[] = {
	KVM_REG_MIPS_CP0_INDEX,
	KVM_REG_MIPS_CP0_ENTRYLO0,
	KVM_REG_MIPS_CP0_ENTRYLO1,
	KVM_REG_MIPS_CP0_CONTEXT,
	KVM_REG_MIPS_CP0_PAGEMASK,
	KVM_REG_MIPS_CP0_PAGEGRAIN,
	KVM_REG_MIPS_CP0_WIRED,
	KVM_REG_MIPS_CP0_HWRENA,
	KVM_REG_MIPS_CP0_BADVADDR,
	KVM_REG_MIPS_CP0_COUNT,
	KVM_REG_MIPS_CP0_ENTRYHI,
	KVM_REG_MIPS_CP0_COMPARE,
	KVM_REG_MIPS_CP0_STATUS,
	KVM_REG_MIPS_CP0_INTCTL,
	KVM_REG_MIPS_CP0_CAUSE,
	KVM_REG_MIPS_CP0_EPC,
	KVM_REG_MIPS_CP0_PRID,
	KVM_REG_MIPS_CP0_EBASE,
	KVM_REG_MIPS_CP0_CONFIG,
	KVM_REG_MIPS_CP0_CONFIG1,
	KVM_REG_MIPS_CP0_CONFIG2,
	KVM_REG_MIPS_CP0_CONFIG3,
	KVM_REG_MIPS_CP0_CONFIG4,
	KVM_REG_MIPS_CP0_CONFIG5,
	KVM_REG_MIPS_CP0_CONFIG6,
#ifdef CONFIG_64BIT
	KVM_REG_MIPS_CP0_XCONTEXT,
#endif
	KVM_REG_MIPS_CP0_ERROREPC,

	KVM_REG_MIPS_COUNT_CTL,
	KVM_REG_MIPS_COUNT_RESUME,
	KVM_REG_MIPS_COUNT_HZ,
};

static u64 kvm_vz_get_one_regs_contextconfig[] = {
	KVM_REG_MIPS_CP0_CONTEXTCONFIG,
#ifdef CONFIG_64BIT
	KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
#endif
};

static u64 kvm_vz_get_one_regs_segments[] = {
	KVM_REG_MIPS_CP0_SEGCTL0,
	KVM_REG_MIPS_CP0_SEGCTL1,
	KVM_REG_MIPS_CP0_SEGCTL2,
};

static u64 kvm_vz_get_one_regs_htw[] = {
	KVM_REG_MIPS_CP0_PWBASE,
	KVM_REG_MIPS_CP0_PWFIELD,
	KVM_REG_MIPS_CP0_PWSIZE,
	KVM_REG_MIPS_CP0_PWCTL,
};

static u64 kvm_vz_get_one_regs_kscratch[] = {
	KVM_REG_MIPS_CP0_KSCRATCH1,
	KVM_REG_MIPS_CP0_KSCRATCH2,
	KVM_REG_MIPS_CP0_KSCRATCH3,
	KVM_REG_MIPS_CP0_KSCRATCH4,
	KVM_REG_MIPS_CP0_KSCRATCH5,
	KVM_REG_MIPS_CP0_KSCRATCH6,
};

static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
{
	unsigned long ret;

	ret = ARRAY_SIZE(kvm_vz_get_one_regs);
	if (cpu_guest_has_userlocal)
		++ret;
	if (cpu_guest_has_badinstr)
		++ret;
	if (cpu_guest_has_badinstrp)
		++ret;
	if (cpu_guest_has_contextconfig)
		ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
	if (cpu_guest_has_segments)
		ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
	if (cpu_guest_has_htw || cpu_guest_has_ldpte)
		ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
	if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
		ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
	ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);

	return ret;
}

static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
{
	u64 index;
	unsigned int i;

	if (copy_to_user(indices, kvm_vz_get_one_regs,
			 sizeof(kvm_vz_get_one_regs)))
		return -EFAULT;
	indices += ARRAY_SIZE(kvm_vz_get_one_regs);

	if (cpu_guest_has_userlocal) {
		index = KVM_REG_MIPS_CP0_USERLOCAL;
		if (copy_to_user(indices, &index, sizeof(index)))
			return -EFAULT;
		++indices;
	}
	if (cpu_guest_has_badinstr) {
		index = KVM_REG_MIPS_CP0_BADINSTR;
		if (copy_to_user(indices, &index, sizeof(index)))
			return -EFAULT;
		++indices;
	}
	if (cpu_guest_has_badinstrp) {
		index = KVM_REG_MIPS_CP0_BADINSTRP;
		if (copy_to_user(indices, &index, sizeof(index)))
			return -EFAULT;
		++indices;
	}
	if (cpu_guest_has_contextconfig) {
		if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
				 sizeof(kvm_vz_get_one_regs_contextconfig)))
			return -EFAULT;
		indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
	}
	if (cpu_guest_has_segments) {
		if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
				 sizeof(kvm_vz_get_one_regs_segments)))
			return -EFAULT;
		indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
	}
	if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
		if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
				 sizeof(kvm_vz_get_one_regs_htw)))
			return -EFAULT;
		indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
	}
	if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
		for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
			index = KVM_REG_MIPS_CP0_MAAR(i);
			if (copy_to_user(indices, &index, sizeof(index)))
				return -EFAULT;
			++indices;
		}

		index = KVM_REG_MIPS_CP0_MAARI;
		if (copy_to_user(indices, &index, sizeof(index)))
			return -EFAULT;
		++indices;
	}
	for (i = 0; i < 6; ++i) {
		if (!cpu_guest_has_kscr(i + 2))
			continue;

		if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
				 sizeof(kvm_vz_get_one_regs_kscratch[i])))
			return -EFAULT;
		++indices;
	}

	return 0;
}

static inline s64 entrylo_kvm_to_user(unsigned long v)
{
	s64 mask, ret = v;

	if (BITS_PER_LONG == 32) {
		/*
		 * KVM API exposes 64-bit version of the register, so move the
		 * RI/XI bits up into place.
		 */
		mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
		ret &= ~mask;
		ret |= ((s64)v & mask) << 32;
	}
	return ret;
}

static inline unsigned long entrylo_user_to_kvm(s64 v)
{
	unsigned long mask, ret = v;

	if (BITS_PER_LONG == 32) {
		/*
		 * KVM API exposes 64-bit versiono of the register, so move the
		 * RI/XI bits down into place.
		 */
		mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
		ret &= ~mask;
		ret |= (v >> 32) & mask;
	}
	return ret;
}

static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
			      const struct kvm_one_reg *reg,
			      s64 *v)
{
	struct mips_coproc *cop0 = vcpu->arch.cop0;
	unsigned int idx;

	switch (reg->id) {
	case KVM_REG_MIPS_CP0_INDEX:
		*v = (long)read_gc0_index();
		break;
	case KVM_REG_MIPS_CP0_ENTRYLO0:
		*v = entrylo_kvm_to_user(read_gc0_entrylo0());
		break;
	case KVM_REG_MIPS_CP0_ENTRYLO1:
		*v = entrylo_kvm_to_user(read_gc0_entrylo1());
		break;
	case KVM_REG_MIPS_CP0_CONTEXT:
		*v = (long)read_gc0_context();
		break;
	case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
		if (!cpu_guest_has_contextconfig)
			return -EINVAL;
		*v = read_gc0_contextconfig();
		break;
	case KVM_REG_MIPS_CP0_USERLOCAL:
		if (!cpu_guest_has_userlocal)
			return -EINVAL;
		*v = read_gc0_userlocal();
		break;
#ifdef CONFIG_64BIT
	case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
		if (!cpu_guest_has_contextconfig)
			return -EINVAL;
		*v = read_gc0_xcontextconfig();
		break;
#endif
	case KVM_REG_MIPS_CP0_PAGEMASK:
		*v = (long)read_gc0_pagemask();
		break;
	case KVM_REG_MIPS_CP0_PAGEGRAIN:
		*v = (long)read_gc0_pagegrain();
		break;
	case KVM_REG_MIPS_CP0_SEGCTL0:
		if (!cpu_guest_has_segments)
			return -EINVAL;
		*v = read_gc0_segctl0();
		break;
	case KVM_REG_MIPS_CP0_SEGCTL1:
		if (!cpu_guest_has_segments)
			return -EINVAL;
		*v = read_gc0_segctl1();
		break;
	case KVM_REG_MIPS_CP0_SEGCTL2:
		if (!cpu_guest_has_segments)
			return -EINVAL;
		*v = read_gc0_segctl2();
		break;
	case KVM_REG_MIPS_CP0_PWBASE:
		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
			return -EINVAL;
		*v = read_gc0_pwbase();
		break;
	case KVM_REG_MIPS_CP0_PWFIELD:
		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
			return -EINVAL;
		*v = read_gc0_pwfield();
		break;
	case KVM_REG_MIPS_CP0_PWSIZE:
		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
			return -EINVAL;
		*v = read_gc0_pwsize();
		break;
	case KVM_REG_MIPS_CP0_WIRED:
		*v = (long)read_gc0_wired();
		break;
	case KVM_REG_MIPS_CP0_PWCTL:
		if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
			return -EINVAL;
		*v = read_gc0_pwctl();
		break;
	case KVM_REG_MIPS_CP0_HWRENA:
		*v = (long)read_gc0_hwrena();
		break;
	case KVM_REG_MIPS_CP0_BADVADDR:
		*v = (long)read_gc0_badvaddr();
		break;
	case KVM_REG_MIPS_CP0_BADINSTR: