Newer
Older
*/
if (cpu_guest_has_rw_llb &&
!(val & MIPS_LLADDR_LLB))
write_gc0_lladdr(0);
} else if (rd == MIPS_CP0_LLADDR &&
sel == 1 && /* MAAR */
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) {
val = mips_process_maar(inst.c0r_format.rs,
val);
/* MAARI must be in range */
BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
ARRAY_SIZE(vcpu->arch.maar));
vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
val;
} else if (rd == MIPS_CP0_LLADDR &&
(sel == 2) && /* MAARI */
cpu_guest_has_maar &&
!cpu_guest_has_dyn_maar) {
kvm_write_maari(vcpu, val);
} else if (rd == MIPS_CP0_CONFIG &&
(sel == 6)) {
cop0->reg[rd][sel] = (int)val;
} else if (rd == MIPS_CP0_ERRCTL &&
(sel == 0)) { /* ErrCtl */
/* ignore the written value */
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
#ifdef CONFIG_CPU_LOONGSON64
} else if (rd == MIPS_CP0_DIAG &&
(sel == 0)) { /* Diag */
unsigned long flags;
local_irq_save(flags);
if (val & LOONGSON_DIAG_BTB) {
/* Flush BTB */
set_c0_diag(LOONGSON_DIAG_BTB);
}
if (val & LOONGSON_DIAG_ITLB) {
/* Flush ITLB */
set_c0_diag(LOONGSON_DIAG_ITLB);
}
if (val & LOONGSON_DIAG_DTLB) {
/* Flush DTLB */
set_c0_diag(LOONGSON_DIAG_DTLB);
}
if (val & LOONGSON_DIAG_VTLB) {
/* Flush VTLB */
kvm_loongson_clear_guest_vtlb();
}
if (val & LOONGSON_DIAG_FTLB) {
/* Flush FTLB */
kvm_loongson_clear_guest_ftlb();
}
local_irq_restore(flags);
#endif
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
} else {
er = EMULATE_FAIL;
}
break;
default:
er = EMULATE_FAIL;
break;
}
}
/* Rollback PC only if emulation was unsuccessful */
if (er == EMULATE_FAIL) {
kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
curr_pc, __func__, inst.word);
vcpu->arch.pc = curr_pc;
}
return er;
}
static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
u32 *opc, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
u32 cache, op_inst, op, base;
s16 offset;
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long va, curr_pc;
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
base = inst.i_format.rs;
op_inst = inst.i_format.rt;
if (cpu_has_mips_r6)
offset = inst.spec3_format.simmediate;
else
offset = inst.i_format.simmediate;
cache = op_inst & CacheOp_Cache;
op = op_inst & CacheOp_Op;
va = arch->gprs[base] + offset;
kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
cache, op, base, arch->gprs[base], offset);
/* Secondary or tirtiary cache ops ignored */
if (cache != Cache_I && cache != Cache_D)
return EMULATE_DONE;
switch (op_inst) {
case Index_Invalidate_I:
flush_icache_line_indexed(va);
return EMULATE_DONE;
case Index_Writeback_Inv_D:
flush_dcache_line_indexed(va);
return EMULATE_DONE;
case Hit_Invalidate_I:
case Hit_Invalidate_D:
case Hit_Writeback_Inv_D:
if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
/* We can just flush entire icache */
local_flush_icache_range(0, 0);
return EMULATE_DONE;
}
/* So far, other platforms support guest hit cache ops */
break;
kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
offset);
/* Rollback PC */
vcpu->arch.pc = curr_pc;
return EMULATE_FAIL;
}
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
#ifdef CONFIG_CPU_LOONGSON64
static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
u32 *opc, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
unsigned int rs, rd;
unsigned int hostcfg;
unsigned long curr_pc;
enum emulation_result er = EMULATE_DONE;
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
rs = inst.loongson3_lscsr_format.rs;
rd = inst.loongson3_lscsr_format.rd;
switch (inst.loongson3_lscsr_format.fr) {
case 0x8: /* Read CPUCFG */
++vcpu->stat.vz_cpucfg_exits;
hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
switch (vcpu->arch.gprs[rs]) {
case LOONGSON_CFG0:
vcpu->arch.gprs[rd] = 0x14c000;
break;
case LOONGSON_CFG1:
hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
LOONGSON_CFG1_SFBP);
vcpu->arch.gprs[rd] = hostcfg;
break;
case LOONGSON_CFG2:
hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
vcpu->arch.gprs[rd] = hostcfg;
break;
case LOONGSON_CFG3:
vcpu->arch.gprs[rd] = hostcfg;
break;
default:
/* Don't export any other advanced features to guest */
vcpu->arch.gprs[rd] = 0;
break;
}
break;
default:
kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
er = EMULATE_FAIL;
break;
}
/* Rollback PC only if emulation was unsuccessful */
if (er == EMULATE_FAIL) {
kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
vcpu->arch.pc = curr_pc;
}
return er;
}
#endif
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
struct kvm_vcpu_arch *arch = &vcpu->arch;
struct kvm_run *run = vcpu->run;
union mips_instruction inst;
int rd, rt, sel;
int err;
/*
* Fetch the instruction.
*/
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err)
return EMULATE_FAIL;
switch (inst.r_format.opcode) {
case cop0_op:
er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
break;
#ifndef CONFIG_CPU_MIPSR6
case cache_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
break;
#endif
#ifdef CONFIG_CPU_LOONGSON64
case lwc2_op:
er = kvm_vz_gpsi_lwc2(inst, opc, cause, run, vcpu);
break;
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
#endif
case spec3_op:
switch (inst.spec3_format.func) {
#ifdef CONFIG_CPU_MIPSR6
case cache6_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
break;
#endif
case rdhwr_op:
if (inst.r_format.rs || (inst.r_format.re >> 3))
goto unknown;
rd = inst.r_format.rd;
rt = inst.r_format.rt;
sel = inst.r_format.re & 0x7;
switch (rd) {
case MIPS_HWR_CC: /* Read count register */
arch->gprs[rt] =
(long)(int)kvm_mips_read_count(vcpu);
break;
default:
trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
KVM_TRACE_HWR(rd, sel), 0);
goto unknown;
trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
er = update_pc(vcpu, cause);
break;
default:
goto unknown;
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
break;
unknown:
default:
kvm_err("GPSI exception not supported (%p/%#x)\n",
opc, inst.word);
kvm_arch_vcpu_dump_regs(vcpu);
er = EMULATE_FAIL;
break;
}
return er;
}
static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
struct kvm_vcpu_arch *arch = &vcpu->arch;
union mips_instruction inst;
int err;
/*
* Fetch the instruction.
*/
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err)
return EMULATE_FAIL;
/* complete MTC0 on behalf of guest and advance EPC */
if (inst.c0r_format.opcode == cop0_op &&
inst.c0r_format.rs == mtc_op &&
inst.c0r_format.z == 0) {
int rt = inst.c0r_format.rt;
int rd = inst.c0r_format.rd;
int sel = inst.c0r_format.sel;
unsigned int val = arch->gprs[rt];
unsigned int old_val, change;
trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
val);
if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
/* FR bit should read as zero if no FPU */
if (!kvm_mips_guest_has_fpu(&vcpu->arch))
val &= ~(ST0_CU1 | ST0_FR);
/*
* Also don't allow FR to be set if host doesn't support
* it.
*/
if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
val &= ~ST0_FR;
old_val = read_gc0_status();
change = val ^ old_val;
if (change & ST0_FR) {
/*
* FPU and Vector register state is made
* UNPREDICTABLE by a change of FR, so don't
* even bother saving it.
*/
kvm_drop_fpu(vcpu);
}
/*
* If MSA state is already live, it is undefined how it
* interacts with FR=0 FPU state, and we don't want to
* hit reserved instruction exceptions trying to save
* the MSA state later when CU=1 && FR=1, so play it
* safe and save it first.
*/
if (change & ST0_CU1 && !(val & ST0_FR) &&
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
kvm_lose_fpu(vcpu);
write_gc0_status(val);
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
u32 old_cause = read_gc0_cause();
u32 change = old_cause ^ val;
/* DC bit enabling/disabling timer? */
if (change & CAUSEF_DC) {
if (val & CAUSEF_DC) {
kvm_vz_lose_htimer(vcpu);
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
}
/* Only certain bits are RW to the guest */
change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
CAUSEF_IP0 | CAUSEF_IP1);
/* WP can only be cleared */
change &= ~CAUSEF_WP | old_cause;
write_gc0_cause(old_cause ^ change);
} else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
write_gc0_intctl(val);
} else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
old_val = read_gc0_config5();
change = val ^ old_val;
/* Handle changes in FPU/MSA modes */
preempt_disable();
/*
* Propagate FRE changes immediately if the FPU
* context is already loaded.
*/
if (change & MIPS_CONF5_FRE &&
vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
change_c0_config5(MIPS_CONF5_FRE, val);
preempt_enable();
val = old_val ^
(change & kvm_vz_config5_guest_wrmask(vcpu));
write_gc0_config5(val);
} else {
kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
opc, inst.word);
er = EMULATE_FAIL;
}
if (er != EMULATE_FAIL)
er = update_pc(vcpu, cause);
} else {
kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
opc, inst.word);
er = EMULATE_FAIL;
}
return er;
}
static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
/*
* Presumably this is due to MC (guest mode change), so lets trace some
* relevant info.
*/
trace_kvm_guest_mode_change(vcpu);
return EMULATE_DONE;
}
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu)
{
enum emulation_result er;
union mips_instruction inst;
unsigned long curr_pc;
int err;
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err)
return EMULATE_FAIL;
/*
* Update PC and hold onto current PC in case there is
* an error and we want to rollback the PC
*/
curr_pc = vcpu->arch.pc;
er = update_pc(vcpu, cause);
if (er == EMULATE_FAIL)
return er;
er = kvm_mips_emul_hypcall(vcpu, inst);
if (er == EMULATE_FAIL)
vcpu->arch.pc = curr_pc;
return er;
}
static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
u32 cause,
u32 *opc,
struct kvm_vcpu *vcpu)
{
u32 inst;
/*
* Fetch the instruction.
*/
if (cause & CAUSEF_BD)
opc += 1;
kvm_get_badinstr(opc, vcpu, &inst);
kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
gexccode, opc, inst, read_gc0_status());
return EMULATE_FAIL;
}
static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
{
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
int ret = RESUME_GUEST;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
switch (gexccode) {
case MIPS_GCTL0_GEXC_GPSI:
++vcpu->stat.vz_gpsi_exits;
er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
break;
case MIPS_GCTL0_GEXC_GSFC:
++vcpu->stat.vz_gsfc_exits;
er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
break;
case MIPS_GCTL0_GEXC_HC:
++vcpu->stat.vz_hc_exits;
er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
break;
case MIPS_GCTL0_GEXC_GRR:
++vcpu->stat.vz_grr_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu);
break;
case MIPS_GCTL0_GEXC_GVA:
++vcpu->stat.vz_gva_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu);
break;
case MIPS_GCTL0_GEXC_GHFC:
++vcpu->stat.vz_ghfc_exits;
er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
break;
case MIPS_GCTL0_GEXC_GPA:
++vcpu->stat.vz_gpa_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu);
break;
default:
++vcpu->stat.vz_resvd_exits;
er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
vcpu);
break;
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else if (er == EMULATE_HYPERCALL) {
ret = kvm_mips_handle_hypcall(vcpu);
} else {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
/**
* kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use a coprocessor which hasn't been allowed
* by the root context.
*/
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_FAIL;
int ret = RESUME_GUEST;
if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
/*
* If guest FPU not present, the FPU operation should have been
* treated as a reserved instruction!
* If FPU already in use, we shouldn't get this at all.
*/
if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
preempt_enable();
return EMULATE_FAIL;
}
kvm_own_fpu(vcpu);
er = EMULATE_DONE;
}
/* other coprocessors not handled */
switch (er) {
case EMULATE_DONE:
ret = RESUME_GUEST;
break;
case EMULATE_FAIL:
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
break;
default:
BUG();
}
return ret;
}
/**
* kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use MSA when it is disabled in the root
* context.
*/
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
/*
* If MSA not present or not exposed to guest or FR=0, the MSA operation
* should have been treated as a reserved instruction!
* Same if CU1=1, FR=0.
* If MSA already in use, we shouldn't get this at all.
*/
if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
(read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
!(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
kvm_own_msa(vcpu);
return RESUME_GUEST;
}
static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
union mips_instruction inst;
enum emulation_result er = EMULATE_DONE;
int err, ret = RESUME_GUEST;
if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
/* A code fetch fault doesn't count as an MMIO */
if (kvm_is_ifetch_fault(&vcpu->arch)) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Fetch the instruction */
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Treat as MMIO */
er = kvm_mips_emulate_load(inst, cause, run, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
}
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else if (er == EMULATE_DO_MMIO) {
run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
} else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 *opc = (u32 *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause;
ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
union mips_instruction inst;
enum emulation_result er = EMULATE_DONE;
int err;
int ret = RESUME_GUEST;
/* Just try the access again if we couldn't do the translation */
if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
return RESUME_GUEST;
vcpu->arch.host_cp0_badvaddr = badvaddr;
if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
/* Fetch the instruction */
if (cause & CAUSEF_BD)
opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST;
}
/* Treat as MMIO */
er = kvm_mips_emulate_store(inst, cause, run, vcpu);
if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
}
}
if (er == EMULATE_DONE) {
ret = RESUME_GUEST;
} else if (er == EMULATE_DO_MMIO) {
run->exit_reason = KVM_EXIT_MMIO;
ret = RESUME_HOST;
} else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
return ret;
}
static u64 kvm_vz_get_one_regs[] = {
KVM_REG_MIPS_CP0_INDEX,
KVM_REG_MIPS_CP0_ENTRYLO0,
KVM_REG_MIPS_CP0_ENTRYLO1,
KVM_REG_MIPS_CP0_CONTEXT,
KVM_REG_MIPS_CP0_PAGEMASK,
KVM_REG_MIPS_CP0_PAGEGRAIN,
KVM_REG_MIPS_CP0_WIRED,
KVM_REG_MIPS_CP0_HWRENA,
KVM_REG_MIPS_CP0_BADVADDR,
KVM_REG_MIPS_CP0_COUNT,
KVM_REG_MIPS_CP0_ENTRYHI,
KVM_REG_MIPS_CP0_COMPARE,
KVM_REG_MIPS_CP0_STATUS,
KVM_REG_MIPS_CP0_INTCTL,
KVM_REG_MIPS_CP0_CAUSE,
KVM_REG_MIPS_CP0_EPC,
KVM_REG_MIPS_CP0_PRID,
KVM_REG_MIPS_CP0_EBASE,
KVM_REG_MIPS_CP0_CONFIG,
KVM_REG_MIPS_CP0_CONFIG1,
KVM_REG_MIPS_CP0_CONFIG2,
KVM_REG_MIPS_CP0_CONFIG3,
KVM_REG_MIPS_CP0_CONFIG4,
KVM_REG_MIPS_CP0_CONFIG5,
KVM_REG_MIPS_CP0_CONFIG6,
#ifdef CONFIG_64BIT
KVM_REG_MIPS_CP0_XCONTEXT,
#endif
KVM_REG_MIPS_CP0_ERROREPC,
KVM_REG_MIPS_COUNT_CTL,
KVM_REG_MIPS_COUNT_RESUME,
KVM_REG_MIPS_COUNT_HZ,
};
static u64 kvm_vz_get_one_regs_contextconfig[] = {
KVM_REG_MIPS_CP0_CONTEXTCONFIG,
#ifdef CONFIG_64BIT
KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
#endif
};
static u64 kvm_vz_get_one_regs_segments[] = {
KVM_REG_MIPS_CP0_SEGCTL0,
KVM_REG_MIPS_CP0_SEGCTL1,
KVM_REG_MIPS_CP0_SEGCTL2,
};
static u64 kvm_vz_get_one_regs_htw[] = {
KVM_REG_MIPS_CP0_PWBASE,
KVM_REG_MIPS_CP0_PWFIELD,
KVM_REG_MIPS_CP0_PWSIZE,
KVM_REG_MIPS_CP0_PWCTL,
};
static u64 kvm_vz_get_one_regs_kscratch[] = {
KVM_REG_MIPS_CP0_KSCRATCH1,
KVM_REG_MIPS_CP0_KSCRATCH2,
KVM_REG_MIPS_CP0_KSCRATCH3,
KVM_REG_MIPS_CP0_KSCRATCH4,
KVM_REG_MIPS_CP0_KSCRATCH5,
KVM_REG_MIPS_CP0_KSCRATCH6,
};
static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
{
unsigned long ret;
ret = ARRAY_SIZE(kvm_vz_get_one_regs);
if (cpu_guest_has_userlocal)
++ret;
if (cpu_guest_has_badinstr)
++ret;
if (cpu_guest_has_badinstrp)
++ret;
if (cpu_guest_has_contextconfig)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
if (cpu_guest_has_segments)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
if (cpu_guest_has_htw || cpu_guest_has_ldpte)
ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
return ret;
}
static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
{
u64 index;
unsigned int i;
if (copy_to_user(indices, kvm_vz_get_one_regs,
sizeof(kvm_vz_get_one_regs)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs);
if (cpu_guest_has_userlocal) {
index = KVM_REG_MIPS_CP0_USERLOCAL;
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
if (cpu_guest_has_badinstr) {
index = KVM_REG_MIPS_CP0_BADINSTR;
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
if (cpu_guest_has_badinstrp) {
index = KVM_REG_MIPS_CP0_BADINSTRP;
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
if (cpu_guest_has_contextconfig) {
if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
sizeof(kvm_vz_get_one_regs_contextconfig)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
}
if (cpu_guest_has_segments) {
if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
sizeof(kvm_vz_get_one_regs_segments)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
}
if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
sizeof(kvm_vz_get_one_regs_htw)))
return -EFAULT;
indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
}
if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
index = KVM_REG_MIPS_CP0_MAAR(i);
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
index = KVM_REG_MIPS_CP0_MAARI;
if (copy_to_user(indices, &index, sizeof(index)))
return -EFAULT;
++indices;
}
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
for (i = 0; i < 6; ++i) {
if (!cpu_guest_has_kscr(i + 2))
continue;
if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
sizeof(kvm_vz_get_one_regs_kscratch[i])))
return -EFAULT;
++indices;
}
return 0;
}
static inline s64 entrylo_kvm_to_user(unsigned long v)
{
s64 mask, ret = v;
if (BITS_PER_LONG == 32) {
/*
* KVM API exposes 64-bit version of the register, so move the
* RI/XI bits up into place.
*/
mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
ret &= ~mask;
ret |= ((s64)v & mask) << 32;
}
return ret;
}
static inline unsigned long entrylo_user_to_kvm(s64 v)
{
unsigned long mask, ret = v;
if (BITS_PER_LONG == 32) {
/*
* KVM API exposes 64-bit versiono of the register, so move the
* RI/XI bits down into place.
*/
mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
ret &= ~mask;
ret |= (v >> 32) & mask;
}
return ret;
}
static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 *v)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
unsigned int idx;
switch (reg->id) {
case KVM_REG_MIPS_CP0_INDEX:
*v = (long)read_gc0_index();
break;
case KVM_REG_MIPS_CP0_ENTRYLO0:
*v = entrylo_kvm_to_user(read_gc0_entrylo0());
break;
case KVM_REG_MIPS_CP0_ENTRYLO1:
*v = entrylo_kvm_to_user(read_gc0_entrylo1());
break;
case KVM_REG_MIPS_CP0_CONTEXT:
*v = (long)read_gc0_context();
break;
case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
*v = read_gc0_contextconfig();
break;
case KVM_REG_MIPS_CP0_USERLOCAL:
if (!cpu_guest_has_userlocal)
return -EINVAL;
*v = read_gc0_userlocal();
break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
if (!cpu_guest_has_contextconfig)
return -EINVAL;
*v = read_gc0_xcontextconfig();
break;
#endif
case KVM_REG_MIPS_CP0_PAGEMASK:
*v = (long)read_gc0_pagemask();
break;
case KVM_REG_MIPS_CP0_PAGEGRAIN:
*v = (long)read_gc0_pagegrain();
break;
case KVM_REG_MIPS_CP0_SEGCTL0:
if (!cpu_guest_has_segments)
return -EINVAL;
*v = read_gc0_segctl0();
break;
case KVM_REG_MIPS_CP0_SEGCTL1:
if (!cpu_guest_has_segments)
return -EINVAL;
*v = read_gc0_segctl1();
break;
case KVM_REG_MIPS_CP0_SEGCTL2:
if (!cpu_guest_has_segments)
return -EINVAL;
*v = read_gc0_segctl2();
break;
case KVM_REG_MIPS_CP0_PWBASE:
if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwbase();
break;
case KVM_REG_MIPS_CP0_PWFIELD:
if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwfield();
break;
case KVM_REG_MIPS_CP0_PWSIZE:
if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwsize();
break;
case KVM_REG_MIPS_CP0_WIRED:
*v = (long)read_gc0_wired();
break;
case KVM_REG_MIPS_CP0_PWCTL:
if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
return -EINVAL;
*v = read_gc0_pwctl();
break;
case KVM_REG_MIPS_CP0_HWRENA:
*v = (long)read_gc0_hwrena();
break;
case KVM_REG_MIPS_CP0_BADVADDR:
*v = (long)read_gc0_badvaddr();
break;
case KVM_REG_MIPS_CP0_BADINSTR: