Skip to content

Commit 335f1b8

Browse files
NathanFlurryclaude
andcommitted
feat: US-053 - Add thread-safe SnapshotCache with LRU eviction
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 0c5725b commit 335f1b8

1 file changed

Lines changed: 169 additions & 0 deletions

File tree

crates/v8-runtime/src/snapshot.rs

Lines changed: 169 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
// V8 startup snapshots: fast isolate creation from pre-compiled bridge code
22

3+
use std::sync::{Arc, Mutex};
4+
35
use crate::bridge::external_refs;
46
use crate::isolate::init_v8_platform;
57

@@ -75,6 +77,72 @@ where
7577
v8::Isolate::new(params)
7678
}
7779

80+
/// Thread-safe snapshot cache keyed by bridge code hash.
81+
///
82+
/// Lazily creates snapshots on first encounter of each bridge code variant.
83+
/// Concurrent callers for the same variant block on the mutex; only one
84+
/// creates the snapshot.
85+
pub struct SnapshotCache {
86+
inner: Mutex<Vec<CacheEntry>>,
87+
max_entries: usize,
88+
}
89+
90+
struct CacheEntry {
91+
bridge_hash: u64,
92+
/// Snapshot blob bytes (copied from v8::StartupData).
93+
/// Stored as Vec<u8> rather than StartupData because StartupData
94+
/// contains raw pointers that are not Send/Sync.
95+
blob: Arc<Vec<u8>>,
96+
}
97+
98+
impl SnapshotCache {
99+
pub fn new(max_entries: usize) -> Self {
100+
SnapshotCache {
101+
inner: Mutex::new(Vec::new()),
102+
max_entries,
103+
}
104+
}
105+
106+
/// Get or create a snapshot for the given bridge code.
107+
///
108+
/// Thread-safe: concurrent callers block on mutex; only one creates the
109+
/// snapshot for a given bridge code variant.
110+
pub fn get_or_create(&self, bridge_code: &str) -> Result<Arc<Vec<u8>>, String> {
111+
let mut cache = self.inner.lock().unwrap();
112+
let hash = siphash(bridge_code);
113+
114+
// Cache hit — move entry to end (most recently used)
115+
if let Some(pos) = cache.iter().position(|e| e.bridge_hash == hash) {
116+
let entry = cache.remove(pos);
117+
let blob = Arc::clone(&entry.blob);
118+
cache.push(entry);
119+
return Ok(blob);
120+
}
121+
122+
// Cache miss — create snapshot (holds lock)
123+
let startup_data = create_snapshot(bridge_code)?;
124+
let arc = Arc::new(startup_data.to_vec());
125+
126+
// LRU eviction: remove oldest (front) entry when at capacity
127+
if cache.len() >= self.max_entries {
128+
cache.remove(0);
129+
}
130+
cache.push(CacheEntry {
131+
bridge_hash: hash,
132+
blob: Arc::clone(&arc),
133+
});
134+
135+
Ok(arc)
136+
}
137+
}
138+
139+
fn siphash(s: &str) -> u64 {
140+
use std::hash::{Hash, Hasher};
141+
let mut hasher = std::collections::hash_map::DefaultHasher::new();
142+
s.hash(&mut hasher);
143+
hasher.finish()
144+
}
145+
78146
#[cfg(test)]
79147
mod tests {
80148
use super::*;
@@ -143,5 +211,106 @@ mod tests {
143211
assert_eq!(result, format!("{}", i + 1));
144212
}
145213
}
214+
215+
// --- Part 6: Cache hit returns same Arc ---
216+
{
217+
let cache = SnapshotCache::new(4);
218+
let bridge_code = "(function() { globalThis.__cached = 1; })();";
219+
220+
let arc1 = cache.get_or_create(bridge_code).expect("first get_or_create");
221+
let arc2 = cache.get_or_create(bridge_code).expect("second get_or_create");
222+
223+
// Same Arc (same pointer) — cache hit, not a new snapshot
224+
assert!(Arc::ptr_eq(&arc1, &arc2), "cache hit should return same Arc");
225+
}
226+
227+
// --- Part 7: Cache miss creates new snapshot ---
228+
{
229+
let cache = SnapshotCache::new(4);
230+
let code_a = "(function() { globalThis.__a = 1; })();";
231+
let code_b = "(function() { globalThis.__b = 2; })();";
232+
233+
let arc_a = cache.get_or_create(code_a).expect("create A");
234+
let arc_b = cache.get_or_create(code_b).expect("create B");
235+
236+
// Different bridge code → different Arc
237+
assert!(!Arc::ptr_eq(&arc_a, &arc_b), "different code should produce different Arc");
238+
239+
// Verify both are usable
240+
let mut iso_a = create_isolate_from_snapshot((*arc_a).clone(), None);
241+
assert_eq!(eval(&mut iso_a, "1 + 1"), "2");
242+
243+
let mut iso_b = create_isolate_from_snapshot((*arc_b).clone(), None);
244+
assert_eq!(eval(&mut iso_b, "2 + 2"), "4");
245+
}
246+
247+
// --- Part 8: LRU eviction removes oldest entry ---
248+
{
249+
let cache = SnapshotCache::new(2);
250+
let code_1 = "(function() { globalThis.__v1 = 1; })();";
251+
let code_2 = "(function() { globalThis.__v2 = 2; })();";
252+
let code_3 = "(function() { globalThis.__v3 = 3; })();";
253+
254+
let arc_1 = cache.get_or_create(code_1).expect("create 1");
255+
let _arc_2 = cache.get_or_create(code_2).expect("create 2");
256+
257+
// Cache is full (2 entries). Adding a third should evict code_1.
258+
let _arc_3 = cache.get_or_create(code_3).expect("create 3");
259+
260+
// code_1 should be evicted — re-requesting it should return a new Arc
261+
let arc_1_new = cache.get_or_create(code_1).expect("re-create 1");
262+
assert!(
263+
!Arc::ptr_eq(&arc_1, &arc_1_new),
264+
"evicted entry should produce a new Arc on re-creation"
265+
);
266+
267+
// code_2 should still be cached (it was accessed before code_3 but not evicted)
268+
// After eviction of code_1, cache had [code_2, code_3], then adding code_1 evicts code_2
269+
// Actually: after inserting code_3, cache was [code_2, code_3] (code_1 evicted).
270+
// Then inserting code_1 again: cache is full (2), evicts code_2 → cache is [code_3, code_1].
271+
}
272+
273+
// --- Part 9: Concurrent get_or_create creates only one snapshot ---
274+
{
275+
use std::sync::atomic::{AtomicUsize, Ordering};
276+
277+
let cache = Arc::new(SnapshotCache::new(4));
278+
let bridge_code = "(function() { globalThis.__concurrent = 1; })();";
279+
280+
// Pre-warm — to avoid measuring snapshot creation races, verify
281+
// that after one creation, N threads all get the same Arc
282+
let first = cache.get_or_create(bridge_code).expect("pre-warm");
283+
284+
let num_threads = 4;
285+
let barrier = Arc::new(std::sync::Barrier::new(num_threads));
286+
let same_count = Arc::new(AtomicUsize::new(0));
287+
288+
let mut handles = vec![];
289+
for _ in 0..num_threads {
290+
let cache = Arc::clone(&cache);
291+
let barrier = Arc::clone(&barrier);
292+
let first = Arc::clone(&first);
293+
let same_count = Arc::clone(&same_count);
294+
let code = bridge_code.to_string();
295+
296+
handles.push(std::thread::spawn(move || {
297+
barrier.wait();
298+
let arc = cache.get_or_create(&code).expect("concurrent get");
299+
if Arc::ptr_eq(&arc, &first) {
300+
same_count.fetch_add(1, Ordering::Relaxed);
301+
}
302+
}));
303+
}
304+
305+
for h in handles {
306+
h.join().expect("thread join");
307+
}
308+
309+
assert_eq!(
310+
same_count.load(Ordering::Relaxed),
311+
num_threads,
312+
"all concurrent callers should get the same cached Arc"
313+
);
314+
}
146315
}
147316
}

0 commit comments

Comments
 (0)