Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(zkvm): implement freeing allocator and reserved memory section #1858

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 87 additions & 4 deletions crates/zkvm/entrypoint/src/heap.rs
Original file line number Diff line number Diff line change
@@ -1,16 +1,99 @@
use core::alloc::{GlobalAlloc, Layout};
use core::ptr::NonNull;
use core::sync::atomic::{AtomicUsize, Ordering};

use crate::syscalls::sys_alloc_aligned;

/// A simple heap allocator.
/// A block in our free list
#[repr(C, align(8))]
struct FreeBlock {
size: usize,
next: Option<NonNull<FreeBlock>>,
}

// Global free list head stored as raw pointer value
static FREE_LIST_HEAD: AtomicUsize = AtomicUsize::new(0);

/// A simple heap allocator that supports freeing memory.
///
/// Allocates memory from left to right, without any deallocation.
/// Uses a first-fit strategy for allocation and maintains a free list
/// for memory reuse. Designed for single-threaded embedded systems
/// with a memory limit of 0x78000000.
#[derive(Copy, Clone)]
pub struct SimpleAlloc;

// Implementation detail functions
impl SimpleAlloc {
unsafe fn add_free_block(ptr: *mut u8, size: usize) {
let block = ptr as *mut FreeBlock;
(*block).size = size;

loop {
let current_head = FREE_LIST_HEAD.load(Ordering::Relaxed);
(*block).next = NonNull::new(current_head as *mut FreeBlock);

if FREE_LIST_HEAD
.compare_exchange(
current_head,
block as usize,
Ordering::Release,
Ordering::Relaxed,
)
.is_ok()
{
break;
}
}
}

unsafe fn find_block(size: usize, align: usize) -> Option<(*mut u8, usize)> {
let mut prev: Option<*mut FreeBlock> = None;
let mut current_ptr = FREE_LIST_HEAD.load(Ordering::Acquire) as *mut FreeBlock;

while !current_ptr.is_null() {
let addr = current_ptr as *mut u8;
let aligned_addr = ((addr as usize + align - 1) & !(align - 1)) as *mut u8;
let align_adj = aligned_addr as usize - addr as usize;

if (*current_ptr).size >= size + align_adj {
let next = (*current_ptr).next;
let next_raw = next.map_or(0, |n| n.as_ptr() as usize);

match prev {
Some(p) => (*p).next = next,
None => {
FREE_LIST_HEAD.store(next_raw, Ordering::Release);
}
}
return Some((aligned_addr, (*current_ptr).size));
}
prev = Some(current_ptr);
current_ptr = (*current_ptr).next.map_or(core::ptr::null_mut(), |n| n.as_ptr());
}
None
}
}

unsafe impl GlobalAlloc for SimpleAlloc {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
sys_alloc_aligned(layout.size(), layout.align())
let size = layout.size().max(core::mem::size_of::<FreeBlock>());
let align = layout.align();

// Try to find a block in free list
if let Some((ptr, _)) = Self::find_block(size, align) {
return ptr;
}

// If no suitable block found, allocate new memory
sys_alloc_aligned(size, align)
}

unsafe fn dealloc(&self, _: *mut u8, _: Layout) {}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
let size = layout.size().max(core::mem::size_of::<FreeBlock>());
Self::add_free_block(ptr, size);
}
}

#[used]
#[no_mangle]
pub static HEAP_ALLOCATOR: SimpleAlloc = SimpleAlloc;
40 changes: 40 additions & 0 deletions crates/zkvm/entrypoint/src/syscalls/memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,21 @@
// Memory addresses must be lower than BabyBear prime.
const MAX_MEMORY: usize = 0x78000000;

const RESERVED_SIZE: usize = MAX_MEMORY / 256;
static mut RESERVED_POS: usize = 0;
static mut RESERVED_START: usize = 0;

#[allow(clippy::missing_safety_doc)]
#[no_mangle]
pub unsafe extern "C" fn sys_alloc_aligned(bytes: usize, align: usize) -> *mut u8 {
if !align.is_power_of_two() {
panic!("Alignment must be power of 2");
}

if bytes.checked_add(align - 1).is_none() {
panic!("Memory allocation would overflow");
}

extern "C" {
// https://lld.llvm.org/ELF/linker_script.html#sections-command
static _end: u8;
Expand All @@ -32,6 +44,15 @@ pub unsafe extern "C" fn sys_alloc_aligned(bytes: usize, align: usize) -> *mut u

if heap_pos == 0 {
heap_pos = unsafe { (&_end) as *const u8 as usize };
unsafe {
// Check if reserved section would exceed memory limit
if heap_pos + RESERVED_SIZE > MAX_MEMORY {
panic!("Reserved section would exceed memory limit");
}
RESERVED_START = heap_pos;
RESERVED_POS = heap_pos;
heap_pos += RESERVED_SIZE;
}
}

let offset = heap_pos & (align - 1);
Expand All @@ -49,3 +70,22 @@ pub unsafe extern "C" fn sys_alloc_aligned(bytes: usize, align: usize) -> *mut u
unsafe { HEAP_POS = heap_pos };
ptr
}
#[allow(clippy::missing_safety_doc)]
#[no_mangle]
pub unsafe extern "C" fn sys_alloc_reserved(bytes: usize, align: usize) -> *mut u8 {
let mut pos = RESERVED_POS;
debug_assert!(align.is_power_of_two());

let offset = pos & (align - 1);
if offset != 0 {
pos += align - offset;
}

let new_pos = pos + bytes;
if new_pos < pos || new_pos > RESERVED_START + RESERVED_SIZE {
panic!("Reserved memory section full");
}

RESERVED_POS = new_pos;
pos as *mut u8
}
21 changes: 3 additions & 18 deletions crates/zkvm/lib/src/io.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,7 @@
#![allow(unused_unsafe)]
use crate::{syscall_hint_len, syscall_hint_read, syscall_write};
use crate::{sys_alloc_reserved, syscall_hint_len, syscall_hint_read, syscall_write};
use serde::{de::DeserializeOwned, Serialize};
use std::{
alloc::Layout,
io::{Result, Write},
};
use std::io::{Result, Write};

/// The file descriptor for public values.
pub const FD_PUBLIC_VALUES: u32 = 3;
Expand Down Expand Up @@ -43,24 +40,12 @@ impl Write for SyscallWriter {
/// let data: Vec<u8> = sp1_zkvm::io::read_vec();
/// ```
pub fn read_vec() -> Vec<u8> {
// Round up to the nearest multiple of 4 so that the memory allocated is in whole words
let len = unsafe { syscall_hint_len() };
let capacity = (len + 3) / 4 * 4;
let ptr = unsafe { sys_alloc_reserved(capacity, 4) };

// Allocate a buffer of the required length that is 4 byte aligned
let layout = Layout::from_size_align(capacity, 4).expect("vec is too large");
let ptr = unsafe { std::alloc::alloc(layout) };

// SAFETY:
// 1. `ptr` was allocated using alloc
// 2. We assuume that the VM global allocator doesn't dealloc
// 3/6. Size is correct from above
// 4/5. Length is 0
// 7. Layout::from_size_align already checks this
let mut vec = unsafe { Vec::from_raw_parts(ptr, 0, capacity) };

// Read the vec into uninitialized memory. The syscall assumes the memory is uninitialized,
// which should be true because the allocator does not dealloc, so a new alloc should be fresh.
unsafe {
syscall_hint_read(ptr, len);
vec.set_len(len);
Expand Down
3 changes: 3 additions & 0 deletions crates/zkvm/lib/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,9 @@ extern "C" {
/// Allocates a buffer aligned to the given alignment.
pub fn sys_alloc_aligned(bytes: usize, align: usize) -> *mut u8;

/// Allocates from the reserved memory section
pub fn sys_alloc_reserved(bytes: usize, align: usize) -> *mut u8;

/// Decompresses a BLS12-381 point.
pub fn syscall_bls12381_decompress(point: &mut [u8; 96], is_odd: bool);

Expand Down
Loading
Loading