1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
|
/* SPDX-License-Identifier: MIT */
#include "hv.h"
#include "assert.h"
#include "cpu_regs.h"
#include "display.h"
#include "gxf.h"
#include "memory.h"
#include "pcie.h"
#include "smp.h"
#include "string.h"
#include "usb.h"
#include "utils.h"
#define HV_TICK_RATE 1000
DECLARE_SPINLOCK(bhl);
void hv_enter_guest(u64 x0, u64 x1, u64 x2, u64 x3, void *entry);
void hv_exit_guest(void) __attribute__((noreturn));
extern char _hv_vectors_start[0];
u64 hv_tick_interval;
int hv_pinned_cpu;
int hv_want_cpu;
static bool hv_should_exit;
bool hv_started_cpus[MAX_CPUS];
u32 hv_cpus_in_guest;
u64 hv_saved_sp[MAX_CPUS];
struct hv_secondary_info_t {
uint64_t hcr;
uint64_t hacr;
uint64_t vtcr, vttbr;
uint64_t mdcr;
uint64_t mdscr;
uint64_t amx_ctl;
uint64_t apvmkeylo, apvmkeyhi, apsts;
uint64_t actlr_el2;
uint64_t actlr_el1;
uint64_t cnthctl;
uint64_t sprr_config;
uint64_t gxf_config;
};
static struct hv_secondary_info_t hv_secondary_info;
void hv_init(void)
{
pcie_shutdown();
// Make sure we wake up DCP if we put it to sleep, just quiesce it to match ADT
if (display_is_external && display_start_dcp() >= 0)
display_shutdown(DCP_QUIESCED);
// reenable hpm interrupts for the guest for unused iodevs
usb_hpm_restore_irqs(0);
smp_start_secondaries();
smp_set_wfe_mode(true);
hv_wdt_init();
// Enable physical timer for EL1
msr(CNTHCTL_EL2, CNTHCTL_EL1PTEN | CNTHCTL_EL1PCTEN);
hv_pt_init();
// Configure hypervisor defaults
hv_write_hcr(HCR_API | // Allow PAuth instructions
HCR_APK | // Allow PAuth key registers
HCR_TEA | // Trap external aborts
HCR_E2H | // VHE mode (forced)
HCR_RW | // AArch64 guest
HCR_AMO | // Trap SError exceptions
HCR_VM); // Enable stage 2 translation
// No guest vectors initially
msr(VBAR_EL12, 0);
// Compute tick interval
hv_tick_interval = mrs(CNTFRQ_EL0) / HV_TICK_RATE;
sysop("dsb ishst");
sysop("tlbi alle1is");
sysop("dsb ish");
sysop("isb");
}
static void hv_set_gxf_vbar(void)
{
msr(SYS_IMP_APL_VBAR_GL1, _hv_vectors_start);
}
void hv_start(void *entry, u64 regs[4])
{
hv_should_exit = false;
memset(hv_started_cpus, 0, sizeof(hv_started_cpus));
hv_started_cpus[0] = 1;
msr(VBAR_EL1, _hv_vectors_start);
if (gxf_enabled())
gl2_call(hv_set_gxf_vbar, 0, 0, 0, 0);
hv_secondary_info.hcr = mrs(HCR_EL2);
hv_secondary_info.hacr = mrs(HACR_EL2);
hv_secondary_info.vtcr = mrs(VTCR_EL2);
hv_secondary_info.vttbr = mrs(VTTBR_EL2);
hv_secondary_info.mdcr = mrs(MDCR_EL2);
hv_secondary_info.mdscr = mrs(MDSCR_EL1);
hv_secondary_info.amx_ctl = mrs(SYS_IMP_APL_AMX_CTL_EL2);
hv_secondary_info.apvmkeylo = mrs(SYS_IMP_APL_APVMKEYLO_EL2);
hv_secondary_info.apvmkeyhi = mrs(SYS_IMP_APL_APVMKEYHI_EL2);
hv_secondary_info.apsts = mrs(SYS_IMP_APL_APSTS_EL12);
hv_secondary_info.actlr_el2 = mrs(ACTLR_EL2);
hv_secondary_info.actlr_el1 = mrs(SYS_IMP_APL_ACTLR_EL12);
hv_secondary_info.cnthctl = mrs(CNTHCTL_EL2);
hv_secondary_info.sprr_config = mrs(SYS_IMP_APL_SPRR_CONFIG_EL1);
hv_secondary_info.gxf_config = mrs(SYS_IMP_APL_GXF_CONFIG_EL1);
hv_arm_tick();
hv_pinned_cpu = -1;
hv_want_cpu = -1;
hv_cpus_in_guest = 1;
hv_enter_guest(regs[0], regs[1], regs[2], regs[3], entry);
__atomic_sub_fetch(&hv_cpus_in_guest, 1, __ATOMIC_ACQUIRE);
spin_lock(&bhl);
hv_wdt_stop();
hv_should_exit = true;
printf("HV: Exiting hypervisor (main CPU)\n");
for (int i = 0; i < MAX_CPUS; i++) {
if (hv_started_cpus[i]) {
printf("HV: Waiting for CPU %d to exit\n", i);
spin_unlock(&bhl);
smp_wait(i);
spin_lock(&bhl);
hv_started_cpus[i] = false;
}
}
printf("HV: All CPUs exited\n");
spin_unlock(&bhl);
}
static void hv_init_secondary(struct hv_secondary_info_t *info)
{
gxf_init();
msr(VBAR_EL1, _hv_vectors_start);
msr(HCR_EL2, info->hcr);
msr(HACR_EL2, info->hacr);
msr(VTCR_EL2, info->vtcr);
msr(VTTBR_EL2, info->vttbr);
msr(MDCR_EL2, info->mdcr);
msr(MDSCR_EL1, info->mdscr);
msr(SYS_IMP_APL_AMX_CTL_EL2, info->amx_ctl);
msr(SYS_IMP_APL_APVMKEYLO_EL2, info->apvmkeylo);
msr(SYS_IMP_APL_APVMKEYHI_EL2, info->apvmkeyhi);
msr(SYS_IMP_APL_APSTS_EL12, info->apsts);
msr(ACTLR_EL2, info->actlr_el2);
msr(SYS_IMP_APL_ACTLR_EL12, info->actlr_el1);
msr(CNTHCTL_EL2, info->cnthctl);
msr(SYS_IMP_APL_SPRR_CONFIG_EL1, info->sprr_config);
msr(SYS_IMP_APL_GXF_CONFIG_EL1, info->gxf_config);
if (gxf_enabled())
gl2_call(hv_set_gxf_vbar, 0, 0, 0, 0);
hv_arm_tick();
}
static void hv_enter_secondary(void *entry, u64 regs[4])
{
hv_enter_guest(regs[0], regs[1], regs[2], regs[3], entry);
spin_lock(&bhl);
hv_should_exit = true;
printf("HV: Exiting from CPU %d\n", smp_id());
__atomic_sub_fetch(&hv_cpus_in_guest, 1, __ATOMIC_ACQUIRE);
spin_unlock(&bhl);
}
void hv_start_secondary(int cpu, void *entry, u64 regs[4])
{
printf("HV: Initializing secondary %d\n", cpu);
iodev_console_flush();
mmu_init_secondary(cpu);
iodev_console_flush();
smp_call4(cpu, hv_init_secondary, (u64)&hv_secondary_info, 0, 0, 0);
smp_wait(cpu);
iodev_console_flush();
printf("HV: Entering guest secondary %d at %p\n", cpu, entry);
hv_started_cpus[cpu] = true;
__atomic_add_fetch(&hv_cpus_in_guest, 1, __ATOMIC_ACQUIRE);
iodev_console_flush();
smp_call4(cpu, hv_enter_secondary, (u64)entry, (u64)regs, 0, 0);
}
void hv_rendezvous(void)
{
if (!__atomic_load_n(&hv_cpus_in_guest, __ATOMIC_ACQUIRE))
return;
/* IPI all CPUs. This might result in spurious IPIs to the guest... */
for (int i = 0; i < MAX_CPUS; i++) {
if (i != smp_id() && hv_started_cpus[i]) {
smp_send_ipi(i);
}
}
while (__atomic_load_n(&hv_cpus_in_guest, __ATOMIC_ACQUIRE))
;
}
bool hv_switch_cpu(int cpu)
{
if (cpu > MAX_CPUS || cpu < 0 || !hv_started_cpus[cpu]) {
printf("HV: CPU #%d is inactive or invalid\n", cpu);
return false;
}
printf("HV: switching to CPU #%d\n", cpu);
hv_want_cpu = cpu;
hv_rendezvous();
return true;
}
void hv_pin_cpu(int cpu)
{
hv_pinned_cpu = cpu;
}
void hv_write_hcr(u64 val)
{
if (gxf_enabled() && !in_gl12())
gl2_call(hv_write_hcr, val, 0, 0, 0);
else
msr(HCR_EL2, val);
}
u64 hv_get_spsr(void)
{
if (in_gl12())
return mrs(SYS_IMP_APL_SPSR_GL1);
else
return mrs(SPSR_EL2);
}
void hv_set_spsr(u64 val)
{
if (in_gl12())
return msr(SYS_IMP_APL_SPSR_GL1, val);
else
return msr(SPSR_EL2, val);
}
u64 hv_get_esr(void)
{
if (in_gl12())
return mrs(SYS_IMP_APL_ESR_GL1);
else
return mrs(ESR_EL2);
}
u64 hv_get_far(void)
{
if (in_gl12())
return mrs(SYS_IMP_APL_FAR_GL1);
else
return mrs(FAR_EL2);
}
u64 hv_get_afsr1(void)
{
if (in_gl12())
return mrs(SYS_IMP_APL_AFSR1_GL1);
else
return mrs(AFSR1_EL2);
}
u64 hv_get_elr(void)
{
if (in_gl12())
return mrs(SYS_IMP_APL_ELR_GL1);
else
return mrs(ELR_EL2);
}
void hv_set_elr(u64 val)
{
if (in_gl12())
return msr(SYS_IMP_APL_ELR_GL1, val);
else
return msr(ELR_EL2, val);
}
void hv_arm_tick(void)
{
msr(CNTP_TVAL_EL0, hv_tick_interval);
msr(CNTP_CTL_EL0, CNTx_CTL_ENABLE);
}
void hv_maybe_exit(void)
{
if (hv_should_exit) {
hv_exit_guest();
}
}
void hv_tick(struct exc_info *ctx)
{
hv_wdt_pet();
iodev_handle_events(uartproxy_iodev);
if (iodev_can_read(uartproxy_iodev)) {
if (hv_pinned_cpu == -1 || hv_pinned_cpu == smp_id())
hv_exc_proxy(ctx, START_HV, HV_USER_INTERRUPT, NULL);
}
hv_vuart_poll();
}
|