Skip to content

Commit d8cc07f

Browse files
committed
Implement wasmtime_memory_image APIs
Signed-off-by: Lucy Menon <168595099+syntactically@users.noreply.github.com>
1 parent 3fa7745 commit d8cc07f

1 file changed

Lines changed: 108 additions & 12 deletions

File tree

src/hyperlight_wasm_runtime/src/platform.rs

Lines changed: 108 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -172,32 +172,128 @@ pub extern "C" fn wasmtime_init_traps(handler: wasmtime_trap_handler_t) -> i32 {
172172
0
173173
}
174174

175-
// The wasmtime_memory_image APIs are not yet supported.
175+
// Copy a VA range to a new VA. Old and new VA, and len, must be
176+
// page-aligned.
177+
fn copy_va_mapping(base: *const u8, len: usize, to_va: *mut u8, remap_original: bool) {
178+
debug_assert!((base as usize).is_multiple_of(vmem::PAGE_SIZE));
179+
debug_assert!(len.is_multiple_of(vmem::PAGE_SIZE));
180+
// TODO: all this barrier code is amd64 specific. It should be
181+
// refactored to use some better architecture-independent APIs.
182+
//
183+
// On amd64, "upgrades" including the first time that a a valid
184+
// translation exists for a VA, only need a light (serialising
185+
// instruction) barrier. Since invlpg is also a barrier, we don't
186+
// even need that, if we did do a downgrade remap just before.
187+
let mut needs_first_valid_exposure_barrier = false;
188+
189+
// TODO: make this more efficient by directly exposing the ability
190+
// to traverse an entire VA range in
191+
// hyperlight_guest_bin::paging::virt_to_phys, and coalescing
192+
// continuous ranges there.
193+
let base_u = base as u64;
194+
let va_page_bases = (base_u..(base_u + len as u64)).step_by(vmem::PAGE_SIZE);
195+
let mappings = va_page_bases.flat_map(paging::virt_to_phys);
196+
for mapping in mappings {
197+
// TODO: Deduplicate with identical logic in hyperlight_host snapshot.
198+
let (new_kind, was_writable) = match mapping.kind {
199+
// Skip unmapped pages, since they will be unmapped in
200+
// both the original and the new copy
201+
vmem::MappingKind::Unmapped => continue,
202+
vmem::MappingKind::Basic(bm) if bm.writable => (
203+
vmem::MappingKind::Cow(vmem::CowMapping {
204+
readable: bm.readable,
205+
executable: bm.executable,
206+
}),
207+
true,
208+
),
209+
vmem::MappingKind::Basic(bm) => (
210+
vmem::MappingKind::Basic(vmem::BasicMapping {
211+
readable: bm.readable,
212+
writable: false,
213+
executable: bm.executable,
214+
}),
215+
false,
216+
),
217+
vmem::MappingKind::Cow(cm) => (vmem::MappingKind::Cow(cm), false),
218+
};
219+
let do_downgrade = remap_original && was_writable;
220+
if do_downgrade {
221+
// If necessary, remap the original page as Cow, instead
222+
// of whatever it is now, to ensure that any more writes to
223+
// that region do not change the image base.
224+
//
225+
// TODO: could the table traversal needed for this be fused
226+
// with the table traversal that got the original mapping,
227+
// above?
228+
unsafe {
229+
paging::map_region(
230+
mapping.phys_base,
231+
mapping.virt_base as *mut u8,
232+
vmem::PAGE_SIZE as u64,
233+
new_kind,
234+
);
235+
}
236+
}
237+
// map the same pages to the new VA
238+
unsafe {
239+
paging::map_region(
240+
mapping.phys_base,
241+
to_va.wrapping_add((mapping.virt_base - base_u) as usize),
242+
vmem::PAGE_SIZE as u64,
243+
new_kind,
244+
);
245+
}
246+
if do_downgrade {
247+
// Since we have downgraded a page from writable to CoW we
248+
// need to do an invlpg on it. Because invlpg is a
249+
// serialising instruction, we don't need the other
250+
// barrier for the new mapping.
251+
unsafe {
252+
core::arch::asm!("invlpg [{}]", in(reg) mapping.virt_base, options(readonly, nostack, preserves_flags));
253+
}
254+
needs_first_valid_exposure_barrier = false;
255+
} else {
256+
needs_first_valid_exposure_barrier = true;
257+
}
258+
}
259+
if needs_first_valid_exposure_barrier {
260+
paging::barrier::first_valid_same_ctx();
261+
}
262+
}
263+
264+
// Create a copy-on-write memory image from some existing VA range.
265+
// `ptr` and `len` must be page-aligned (which is guaranteed by the
266+
// wasmtime-platform.h interface).
176267
#[no_mangle]
177268
pub extern "C" fn wasmtime_memory_image_new(
178-
_ptr: *const u8,
179-
_len: usize,
269+
ptr: *const u8,
270+
len: usize,
180271
ret: &mut *mut c_void,
181272
) -> i32 {
182-
*ret = core::ptr::null_mut();
273+
// Choose an arbitrary VA, which we will use as the memory image
274+
// identifier. We will construct the image by mapping a copy of
275+
// the original VA range here, making the original copy CoW as we
276+
// go.
277+
let new_virt = FIRST_VADDR.fetch_add(0x100_0000_0000, Ordering::Relaxed) as *mut u8;
278+
copy_va_mapping(ptr, len, new_virt, true);
279+
*ret = new_virt as *mut c_void;
183280
0
184281
}
185282

186283
#[no_mangle]
187284
pub extern "C" fn wasmtime_memory_image_map_at(
188-
_image: *mut c_void,
189-
_addr: *mut u8,
190-
_len: usize,
285+
image: *mut c_void,
286+
addr: *mut u8,
287+
len: usize,
191288
) -> i32 {
192-
/* This should never be called because wasmtime_memory_image_new
193-
* returns NULL */
194-
panic!("wasmtime_memory_image_map_at");
289+
copy_va_mapping(image as *mut u8, len, addr, false);
290+
0
195291
}
196292

197293
#[no_mangle]
198294
pub extern "C" fn wasmtime_memory_image_free(_image: *mut c_void) {
199-
/* This should never be called because wasmtime_memory_image_new
200-
* returns NULL */
295+
/* This should never be called in practice, because we simply
296+
* restore the snapshot rather than actually unload/destroy instances */
201297
panic!("wasmtime_memory_image_free");
202298
}
203299

0 commit comments

Comments
 (0)