Skip to content

Commit 02e8a70

Browse files
committed
feat(virtq): add MemOps for host and guest
Signed-off-by: Tomasz Andrzejak <andreiltd@gmail.com>
1 parent aa4c417 commit 02e8a70

5 files changed

Lines changed: 198 additions & 6 deletions

File tree

src/hyperlight_guest/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ pub mod error;
2525
pub mod exit;
2626
pub mod layout;
2727
pub mod prim_alloc;
28+
pub mod virtq_mem;
2829

2930
pub mod guest_handle {
3031
pub mod handle;
Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
/*
2+
Copyright 2026 The Hyperlight Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
//! Guest-side [`MemOps`] implementation for virtqueue access.
18+
19+
use core::convert::Infallible;
20+
use core::sync::atomic::{AtomicU16, Ordering};
21+
use core::{ptr, slice};
22+
23+
use hyperlight_common::virtq::MemOps;
24+
25+
/// Guest-side memory accessor for virtqueue operations. Treats virtq
26+
/// addresses as guest virtual addresses that map directly to memory.
27+
#[derive(Clone, Copy, Debug)]
28+
pub struct GuestMemOps;
29+
30+
impl MemOps for GuestMemOps {
31+
type Error = Infallible;
32+
33+
fn read(&self, addr: u64, dst: &mut [u8]) -> Result<usize, Self::Error> {
34+
let src = addr as *const u8;
35+
unsafe {
36+
ptr::copy_nonoverlapping(src, dst.as_mut_ptr(), dst.len());
37+
}
38+
Ok(dst.len())
39+
}
40+
41+
fn write(&self, addr: u64, src: &[u8]) -> Result<usize, Self::Error> {
42+
let dst = addr as *mut u8;
43+
unsafe {
44+
ptr::copy_nonoverlapping(src.as_ptr(), dst, src.len());
45+
}
46+
Ok(src.len())
47+
}
48+
49+
fn load_acquire(&self, addr: u64) -> Result<u16, Self::Error> {
50+
let ptr = addr as *const AtomicU16;
51+
Ok(unsafe { (*ptr).load(Ordering::Acquire) })
52+
}
53+
54+
fn store_release(&self, addr: u64, val: u16) -> Result<(), Self::Error> {
55+
let ptr = addr as *const AtomicU16;
56+
unsafe { (*ptr).store(val, Ordering::Release) };
57+
Ok(())
58+
}
59+
60+
unsafe fn as_slice(&self, addr: u64, len: usize) -> Result<&[u8], Self::Error> {
61+
Ok(unsafe { slice::from_raw_parts(addr as *const u8, len) })
62+
}
63+
64+
unsafe fn as_mut_slice(&self, addr: u64, len: usize) -> Result<&mut [u8], Self::Error> {
65+
Ok(unsafe { slice::from_raw_parts_mut(addr as *mut u8, len) })
66+
}
67+
}

src/hyperlight_host/src/mem/mgr.rs

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -818,10 +818,9 @@ impl SandboxMemoryManager<HostSharedMemory> {
818818
/// Compute the G2H virtqueue Layout from scratch region addresses.
819819
pub(crate) fn g2h_virtq_layout(&self) -> Result<hyperlight_common::virtq::Layout> {
820820
let base = self.layout.get_g2h_ring_gva();
821-
let depth = self.layout.sandbox_memory_config.get_g2h_queue_depth();
821+
let depth = self.layout.sandbox_memory_config.get_g2h_queue_depth() as u16;
822822

823-
let nz = NonZeroU16::new(depth as u16)
824-
.ok_or_else(|| new_error!("G2H queue depth is zero"))?;
823+
let nz = NonZeroU16::new(depth).ok_or_else(|| new_error!("G2H queue depth is zero"))?;
825824

826825
unsafe { VirtqLayout::from_base(base, nz) }
827826
.map_err(|e| new_error!("Invalid G2H virtq layout: {:?}", e))
@@ -830,10 +829,9 @@ impl SandboxMemoryManager<HostSharedMemory> {
830829
/// Compute the H2G virtqueue Layout from scratch region addresses.
831830
pub(crate) fn h2g_virtq_layout(&self) -> Result<hyperlight_common::virtq::Layout> {
832831
let base = self.layout.get_h2g_ring_gva();
833-
let depth = self.layout.sandbox_memory_config.get_h2g_queue_depth();
832+
let depth = self.layout.sandbox_memory_config.get_h2g_queue_depth() as u16;
834833

835-
let nz = NonZeroU16::new(depth as u16)
836-
.ok_or_else(|| new_error!("H2G queue depth is zero"))?;
834+
let nz = NonZeroU16::new(depth).ok_or_else(|| new_error!("H2G queue depth is zero"))?;
837835

838836
unsafe { VirtqLayout::from_base(base, nz) }
839837
.map_err(|e| new_error!("Invalid H2G virtq layout: {:?}", e))

src/hyperlight_host/src/mem/mod.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,3 +38,5 @@ pub mod shared_mem;
3838
/// Utilities for writing shared memory tests
3939
#[cfg(all(test, not(miri)))] // uses proptest which isn't miri-compatible
4040
pub(crate) mod shared_mem_tests;
41+
/// Host-side [`hyperlight_common::virtq::MemOps`] for virtqueue access.
42+
pub(crate) mod virtq_mem;
Lines changed: 124 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,124 @@
1+
/*
2+
Copyright 2026 The Hyperlight Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
//! Host-side [`MemOps`] implementation for virtqueue access.
18+
//!
19+
//! Translates guest virtual addresses used in virtqueue descriptors
20+
//! to offsets into the scratch [`HostSharedMemory`], reusing its
21+
//! volatile access and locking patterns.
22+
23+
use core::sync::atomic::{AtomicU16, Ordering};
24+
25+
use hyperlight_common::virtq::MemOps;
26+
27+
use super::shared_mem::{HostSharedMemory, SharedMemory};
28+
29+
/// Error type for host memory operations.
30+
#[derive(Debug, thiserror::Error)]
31+
pub enum HostMemError {
32+
#[error("address {addr:#x} out of bounds scratch_size={scratch_size}")]
33+
OutOfBounds { addr: u64, scratch_size: usize },
34+
#[error("shared memory error: {0}")]
35+
SharedMem(String),
36+
#[error("as_slice/as_mut_slice not supported on host")]
37+
DirectSliceNotSupported,
38+
}
39+
40+
/// Host-side memory accessor for virtqueue operations.
41+
///
42+
/// Owns a clone of the scratch [`HostSharedMemory`] and translates
43+
/// guest virtual addresses (in the scratch region) to offsets for the
44+
/// existing volatile read/write methods.
45+
#[derive(Clone)]
46+
pub(crate) struct HostMemOps {
47+
/// Cloned handle to the scratch shared memory
48+
scratch: HostSharedMemory,
49+
/// The guest virtual address that corresponds to scratch offset 0.
50+
scratch_base_gva: u64,
51+
}
52+
53+
impl HostMemOps {
54+
/// Create a new `HostMemOps` backed by shared memory.
55+
pub(crate) fn new(scratch: &HostSharedMemory, scratch_base_gva: u64) -> Self {
56+
Self {
57+
scratch: scratch.clone(),
58+
scratch_base_gva,
59+
}
60+
}
61+
62+
/// Translate a guest virtual address to a scratch offset.
63+
fn to_offset(&self, addr: u64) -> Result<usize, HostMemError> {
64+
addr.checked_sub(self.scratch_base_gva)
65+
.map(|o| o as usize)
66+
.ok_or(HostMemError::OutOfBounds {
67+
addr,
68+
scratch_size: self.scratch.mem_size(),
69+
})
70+
}
71+
72+
/// Get a raw pointer into scratch memory at the given guest address.
73+
fn raw_ptr(&self, addr: u64, len: usize) -> Result<*mut u8, HostMemError> {
74+
let offset = self.to_offset(addr)?;
75+
let scratch_size = self.scratch.mem_size();
76+
77+
if offset.checked_add(len).is_none_or(|end| end > scratch_size) {
78+
return Err(HostMemError::OutOfBounds { addr, scratch_size });
79+
}
80+
81+
Ok(self.scratch.base_ptr().wrapping_add(offset))
82+
}
83+
}
84+
85+
impl MemOps for HostMemOps {
86+
type Error = HostMemError;
87+
88+
fn read(&self, addr: u64, dst: &mut [u8]) -> Result<usize, Self::Error> {
89+
let offset = self.to_offset(addr)?;
90+
self.scratch
91+
.copy_to_slice(dst, offset)
92+
.map_err(|e| HostMemError::SharedMem(e.to_string()))?;
93+
Ok(dst.len())
94+
}
95+
96+
fn write(&self, addr: u64, src: &[u8]) -> Result<usize, Self::Error> {
97+
let offset = self.to_offset(addr)?;
98+
self.scratch
99+
.copy_from_slice(src, offset)
100+
.map_err(|e| HostMemError::SharedMem(e.to_string()))?;
101+
Ok(src.len())
102+
}
103+
104+
fn load_acquire(&self, addr: u64) -> Result<u16, Self::Error> {
105+
let ptr = self.raw_ptr(addr, core::mem::size_of::<u16>())?;
106+
let atomic = unsafe { &*(ptr as *const AtomicU16) };
107+
Ok(atomic.load(Ordering::Acquire))
108+
}
109+
110+
fn store_release(&self, addr: u64, val: u16) -> Result<(), Self::Error> {
111+
let ptr = self.raw_ptr(addr, core::mem::size_of::<u16>())?;
112+
let atomic = unsafe { &*(ptr as *const AtomicU16) };
113+
atomic.store(val, Ordering::Release);
114+
Ok(())
115+
}
116+
117+
unsafe fn as_slice(&self, _addr: u64, _len: usize) -> Result<&[u8], Self::Error> {
118+
Err(HostMemError::DirectSliceNotSupported)
119+
}
120+
121+
unsafe fn as_mut_slice(&self, _addr: u64, _len: usize) -> Result<&mut [u8], Self::Error> {
122+
Err(HostMemError::DirectSliceNotSupported)
123+
}
124+
}

0 commit comments

Comments
 (0)