1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
|
/* The kernel call implemented in this file:
* m_type: SYS_VMCTL
*
* The parameters for this kernel call are:
* SVMCTL_WHO which process
* SVMCTL_PARAM set this setting (VMCTL_*)
* SVMCTL_VALUE to this value
*/
#include "kernel/system.h"
#include "kernel/vm.h"
#include <assert.h>
/*===========================================================================*
* do_vmctl *
*===========================================================================*/
int do_vmctl(struct proc * caller, message * m_ptr)
{
int proc_nr;
endpoint_t ep = m_ptr->SVMCTL_WHO;
struct proc *p, *rp, **rpp, *target;
if(ep == SELF) { ep = caller->p_endpoint; }
if(!isokendpt(ep, &proc_nr)) {
printf("do_vmctl: unexpected endpoint %d from VM\n", ep);
return EINVAL;
}
p = proc_addr(proc_nr);
switch(m_ptr->SVMCTL_PARAM) {
case VMCTL_CLEAR_PAGEFAULT:
assert(RTS_ISSET(p,RTS_PAGEFAULT));
RTS_UNSET(p, RTS_PAGEFAULT);
return OK;
case VMCTL_MEMREQ_GET:
/* Send VM the information about the memory request. We can
* not simply send the first request on the list, because IPC
* filters may forbid VM from getting requests for particular
* sources. However, IPC filters are used only in rare cases.
*/
for (rpp = &vmrequest; *rpp != NULL;
rpp = &(*rpp)->p_vmrequest.nextrequestor) {
rp = *rpp;
assert(RTS_ISSET(rp, RTS_VMREQUEST));
okendpt(rp->p_vmrequest.target, &proc_nr);
target = proc_addr(proc_nr);
/* Check against IPC filters. */
if (!allow_ipc_filtered_memreq(rp, target))
continue;
/* Reply with request fields. */
if (rp->p_vmrequest.req_type != VMPTYPE_CHECK)
panic("VMREQUEST wrong type");
m_ptr->SVMCTL_MRG_TARGET =
rp->p_vmrequest.target;
m_ptr->SVMCTL_MRG_ADDR =
rp->p_vmrequest.params.check.start;
m_ptr->SVMCTL_MRG_LENGTH =
rp->p_vmrequest.params.check.length;
m_ptr->SVMCTL_MRG_FLAG =
rp->p_vmrequest.params.check.writeflag;
m_ptr->SVMCTL_MRG_REQUESTOR =
(void *) rp->p_endpoint;
rp->p_vmrequest.vmresult = VMSUSPEND;
/* Remove from request chain. */
*rpp = rp->p_vmrequest.nextrequestor;
return rp->p_vmrequest.req_type;
}
return ENOENT;
case VMCTL_MEMREQ_REPLY:
assert(RTS_ISSET(p, RTS_VMREQUEST));
assert(p->p_vmrequest.vmresult == VMSUSPEND);
okendpt(p->p_vmrequest.target, &proc_nr);
target = proc_addr(proc_nr);
p->p_vmrequest.vmresult = m_ptr->SVMCTL_VALUE;
assert(p->p_vmrequest.vmresult != VMSUSPEND);
switch(p->p_vmrequest.type) {
case VMSTYPE_KERNELCALL:
/*
* we will have to resume execution of the kernel call
* as soon the scheduler picks up this process again
*/
p->p_misc_flags |= MF_KCALL_RESUME;
break;
case VMSTYPE_DELIVERMSG:
assert(p->p_misc_flags & MF_DELIVERMSG);
assert(p == target);
assert(RTS_ISSET(p, RTS_VMREQUEST));
break;
case VMSTYPE_MAP:
assert(RTS_ISSET(p, RTS_VMREQUEST));
break;
default:
panic("strange request type: %d",p->p_vmrequest.type);
}
RTS_UNSET(p, RTS_VMREQUEST);
return OK;
case VMCTL_KERN_PHYSMAP:
{
int i = m_ptr->SVMCTL_VALUE;
return arch_phys_map(i,
(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_ADDR,
(phys_bytes *) &m_ptr->SVMCTL_MAP_PHYS_LEN,
&m_ptr->SVMCTL_MAP_FLAGS);
}
case VMCTL_KERN_MAP_REPLY:
{
return arch_phys_map_reply(m_ptr->SVMCTL_VALUE,
(vir_bytes) m_ptr->SVMCTL_MAP_VIR_ADDR);
}
case VMCTL_VMINHIBIT_SET:
/* check if we must stop a process on a different CPU */
#if CONFIG_SMP
if (p->p_cpu != cpuid) {
smp_schedule_vminhibit(p);
} else
#endif
RTS_SET(p, RTS_VMINHIBIT);
#if CONFIG_SMP
p->p_misc_flags |= MF_FLUSH_TLB;
#endif
return OK;
case VMCTL_VMINHIBIT_CLEAR:
assert(RTS_ISSET(p, RTS_VMINHIBIT));
/*
* the processes is certainly not runnable, no need to tell its
* cpu
*/
RTS_UNSET(p, RTS_VMINHIBIT);
#ifdef CONFIG_SMP
if (p->p_misc_flags & MF_SENDA_VM_MISS) {
struct priv *privp;
p->p_misc_flags &= ~MF_SENDA_VM_MISS;
privp = priv(p);
try_deliver_senda(p, (asynmsg_t *) privp->s_asyntab,
privp->s_asynsize);
}
/*
* We don't know whether kernel has the changed mapping
* installed to access userspace memory. And if so, on what CPU.
* More over we don't know what mapping has changed and how and
* therefore we must invalidate all mappings we have anywhere.
* Next time we map memory, we map it fresh.
*/
bits_fill(p->p_stale_tlb, CONFIG_MAX_CPUS);
#endif
return OK;
case VMCTL_CLEARMAPCACHE:
/* VM says: forget about old mappings we have cached. */
mem_clear_mapcache();
return OK;
case VMCTL_BOOTINHIBIT_CLEAR:
RTS_UNSET(p, RTS_BOOTINHIBIT);
return OK;
}
/* Try architecture-specific vmctls. */
return arch_do_vmctl(m_ptr, p);
}
|