Skip to content

Commit

Permalink
Merge branch 'main' into documentation
Browse files Browse the repository at this point in the history
  • Loading branch information
Nashtare committed Oct 30, 2023
2 parents 2607293 + af4935c commit d8ff620
Show file tree
Hide file tree
Showing 46 changed files with 1,042 additions and 777 deletions.
8 changes: 8 additions & 0 deletions evm/src/cpu/bootstrap_kernel.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
//! The initial phase of execution, where the kernel code is hashed while being written to memory.
//! The hash is then checked against a precomputed kernel hash.
use ethereum_types::U256;
use itertools::Itertools;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
Expand Down Expand Up @@ -53,6 +54,13 @@ pub(crate) fn generate_bootstrap_kernel<F: Field>(state: &mut GenerationState<F>
MemoryAddress::new(0, Segment::Code, 0),
KERNEL.code.clone(),
);
state.registers.stack_top = KERNEL
.code_hash
.iter()
.enumerate()
.fold(0.into(), |acc, (i, &elt)| {
acc + (U256::from(elt) << (224 - 32 * i))
});
state.traces.push_cpu(final_cpu_row);
log::info!("Bootstrapping took {} cycles", state.traces.clock());
}
Expand Down
12 changes: 4 additions & 8 deletions evm/src/cpu/columns/ops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,14 @@ pub struct OpsColumnsView<T: Copy> {
pub eq_iszero: T,
/// Combines AND, OR and XOR flags.
pub logic_op: T,
/// Flag for NOT.
pub not: T,
/// Combines NOT and POP flags.
pub not_pop: T,
/// Combines SHL and SHR flags.
pub shift: T,
/// Flag for KECCAK_GENERAL.
pub keccak_general: T,
/// Flag for PROVER_INPUT.
pub prover_input: T,
/// Flag for POP.
pub pop: T,
/// Combines JUMP and JUMPI flags.
pub jumps: T,
/// Flag for PC.
Expand All @@ -40,10 +38,8 @@ pub struct OpsColumnsView<T: Copy> {
pub push: T,
/// Combines DUP and SWAP flags.
pub dup_swap: T,
/// Flag for GET_CONTEXT
pub get_context: T,
/// Flag for SET_CONTEXT
pub set_context: T,
/// Combines GET_CONTEXT and SET_CONTEXT flags.
pub context_op: T,
/// Flag for MSTORE_32BYTES.
pub mstore_32bytes: T,
/// Flag for MLOAD_32BYTES.
Expand Down
242 changes: 162 additions & 80 deletions evm/src/cpu/contextops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;

use super::membus::NUM_GP_CHANNELS;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::constants::context_metadata::ContextMetadata;
Expand All @@ -16,14 +17,25 @@ fn eval_packed_get<P: PackedField>(
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.get_context;

// Ensure that we are pushing the current context.
// If the opcode is GET_CONTEXT, then lv.opcode_bits[0] = 0.
let filter = lv.op.context_op * (P::ONES - lv.opcode_bits[0]);
let new_stack_top = nv.mem_channels[0].value;
yield_constr.constraint(filter * (new_stack_top[0] - lv.context));
for &limb in &new_stack_top[1..] {
yield_constr.constraint(filter * limb);
}

// Constrain new stack length.
yield_constr.constraint(filter * (nv.stack_len - (lv.stack_len + P::ONES)));

// Unused channels.
for i in 1..NUM_GP_CHANNELS {
if i != 3 {
let channel = lv.mem_channels[i];
yield_constr.constraint(filter * channel.used);
}
}
yield_constr.constraint(filter * nv.mem_channels[0].used);
}

/// Circuit version of `eval_packed_get`.
Expand All @@ -34,9 +46,9 @@ fn eval_ext_circuit_get<F: RichField + Extendable<D>, const D: usize>(
nv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.get_context;

// Ensure that we are pushing the current context.
// If the opcode is GET_CONTEXT, then lv.opcode_bits[0] = 0.
let prod = builder.mul_extension(lv.op.context_op, lv.opcode_bits[0]);
let filter = builder.sub_extension(lv.op.context_op, prod);
let new_stack_top = nv.mem_channels[0].value;
{
let diff = builder.sub_extension(new_stack_top[0], lv.context);
Expand All @@ -47,6 +59,27 @@ fn eval_ext_circuit_get<F: RichField + Extendable<D>, const D: usize>(
let constr = builder.mul_extension(filter, limb);
yield_constr.constraint(builder, constr);
}

// Constrain new stack length.
{
let new_len = builder.add_const_extension(lv.stack_len, F::ONE);
let diff = builder.sub_extension(nv.stack_len, new_len);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}

// Unused channels.
for i in 1..NUM_GP_CHANNELS {
if i != 3 {
let channel = lv.mem_channels[i];
let constr = builder.mul_extension(filter, channel.used);
yield_constr.constraint(builder, constr);
}
}
{
let constr = builder.mul_extension(filter, nv.mem_channels[0].used);
yield_constr.constraint(builder, constr);
}
}

/// Evaluates constraints for `SET_CONTEXT`.
Expand All @@ -55,7 +88,7 @@ fn eval_packed_set<P: PackedField>(
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.set_context;
let filter = lv.op.context_op * lv.opcode_bits[0];
let stack_top = lv.mem_channels[0].value;
let write_old_sp_channel = lv.mem_channels[1];
let read_new_sp_channel = lv.mem_channels[2];
Expand Down Expand Up @@ -85,34 +118,29 @@ fn eval_packed_set<P: PackedField>(
yield_constr.constraint(filter * (read_new_sp_channel.addr_segment - ctx_metadata_segment));
yield_constr.constraint(filter * (read_new_sp_channel.addr_virtual - stack_size_field));

// The next row's stack top is loaded from memory (if the stack isn't empty).
yield_constr.constraint(filter * nv.mem_channels[0].used);

let read_new_stack_top_channel = lv.mem_channels[3];
let stack_segment = P::Scalar::from_canonical_u64(Segment::Stack as u64);
let new_filter = filter * nv.stack_len;

for (limb_channel, limb_top) in read_new_stack_top_channel
.value
.iter()
.zip(nv.mem_channels[0].value)
{
yield_constr.constraint(new_filter * (*limb_channel - limb_top));
}
yield_constr.constraint(new_filter * (read_new_stack_top_channel.used - P::ONES));
yield_constr.constraint(new_filter * (read_new_stack_top_channel.is_read - P::ONES));
yield_constr.constraint(new_filter * (read_new_stack_top_channel.addr_context - nv.context));
yield_constr.constraint(new_filter * (read_new_stack_top_channel.addr_segment - stack_segment));
// Constrain stack_inv_aux_2.
let new_top_channel = nv.mem_channels[0];
yield_constr.constraint(
new_filter * (read_new_stack_top_channel.addr_virtual - (nv.stack_len - P::ONES)),
lv.op.context_op
* (lv.general.stack().stack_inv_aux * lv.opcode_bits[0]
- lv.general.stack().stack_inv_aux_2),
);

// If the new stack is empty, disable the channel read.
// The new top is loaded in memory channel 3, if the stack isn't empty (see eval_packed).
yield_constr.constraint(
filter * (nv.stack_len * lv.general.stack().stack_inv - lv.general.stack().stack_inv_aux),
lv.op.context_op
* lv.general.stack().stack_inv_aux_2
* (lv.mem_channels[3].value[0] - new_top_channel.value[0]),
);
let empty_stack_filter = filter * (lv.general.stack().stack_inv_aux - P::ONES);
yield_constr.constraint(empty_stack_filter * read_new_stack_top_channel.used);
for &limb in &new_top_channel.value[1..] {
yield_constr.constraint(lv.op.context_op * lv.general.stack().stack_inv_aux_2 * limb);
}

// Unused channels.
for i in 4..NUM_GP_CHANNELS {
let channel = lv.mem_channels[i];
yield_constr.constraint(filter * channel.used);
}
yield_constr.constraint(filter * new_top_channel.used);
}

/// Circuit version of `eval_packed_set`.
Expand All @@ -123,7 +151,7 @@ fn eval_ext_circuit_set<F: RichField + Extendable<D>, const D: usize>(
nv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.set_context;
let filter = builder.mul_extension(lv.op.context_op, lv.opcode_bits[0]);
let stack_top = lv.mem_channels[0].value;
let write_old_sp_channel = lv.mem_channels[1];
let read_new_sp_channel = lv.mem_channels[2];
Expand Down Expand Up @@ -207,66 +235,38 @@ fn eval_ext_circuit_set<F: RichField + Extendable<D>, const D: usize>(
yield_constr.constraint(builder, constr);
}

// The next row's stack top is loaded from memory (if the stack isn't empty).
{
let constr = builder.mul_extension(filter, nv.mem_channels[0].used);
yield_constr.constraint(builder, constr);
}

let read_new_stack_top_channel = lv.mem_channels[3];
let stack_segment =
builder.constant_extension(F::Extension::from_canonical_u32(Segment::Stack as u32));

let new_filter = builder.mul_extension(filter, nv.stack_len);

for (limb_channel, limb_top) in read_new_stack_top_channel
.value
.iter()
.zip(nv.mem_channels[0].value)
{
let diff = builder.sub_extension(*limb_channel, limb_top);
let constr = builder.mul_extension(new_filter, diff);
yield_constr.constraint(builder, constr);
}
{
let constr =
builder.mul_sub_extension(new_filter, read_new_stack_top_channel.used, new_filter);
yield_constr.constraint(builder, constr);
}
// Constrain stack_inv_aux_2.
let new_top_channel = nv.mem_channels[0];
{
let constr =
builder.mul_sub_extension(new_filter, read_new_stack_top_channel.is_read, new_filter);
let diff = builder.mul_sub_extension(
lv.general.stack().stack_inv_aux,
lv.opcode_bits[0],
lv.general.stack().stack_inv_aux_2,
);
let constr = builder.mul_extension(lv.op.context_op, diff);
yield_constr.constraint(builder, constr);
}
// The new top is loaded in memory channel 3, if the stack isn't empty (see eval_packed).
{
let diff = builder.sub_extension(read_new_stack_top_channel.addr_context, nv.context);
let constr = builder.mul_extension(new_filter, diff);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(read_new_stack_top_channel.addr_segment, stack_segment);
let constr = builder.mul_extension(new_filter, diff);
let diff = builder.sub_extension(lv.mem_channels[3].value[0], new_top_channel.value[0]);
let prod = builder.mul_extension(lv.general.stack().stack_inv_aux_2, diff);
let constr = builder.mul_extension(lv.op.context_op, prod);
yield_constr.constraint(builder, constr);
}
{
let diff = builder.sub_extension(nv.stack_len, one);
let diff = builder.sub_extension(read_new_stack_top_channel.addr_virtual, diff);
let constr = builder.mul_extension(new_filter, diff);
for &limb in &new_top_channel.value[1..] {
let prod = builder.mul_extension(lv.general.stack().stack_inv_aux_2, limb);
let constr = builder.mul_extension(lv.op.context_op, prod);
yield_constr.constraint(builder, constr);
}

// If the new stack is empty, disable the channel read.
{
let diff = builder.mul_extension(nv.stack_len, lv.general.stack().stack_inv);
let diff = builder.sub_extension(diff, lv.general.stack().stack_inv_aux);
let constr = builder.mul_extension(filter, diff);
// Unused channels.
for i in 4..NUM_GP_CHANNELS {
let channel = lv.mem_channels[i];
let constr = builder.mul_extension(filter, channel.used);
yield_constr.constraint(builder, constr);
}

{
let empty_stack_filter =
builder.mul_sub_extension(filter, lv.general.stack().stack_inv_aux, filter);
let constr = builder.mul_extension(empty_stack_filter, read_new_stack_top_channel.used);
let constr = builder.mul_extension(filter, new_top_channel.used);
yield_constr.constraint(builder, constr);
}
}
Expand All @@ -279,6 +279,33 @@ pub fn eval_packed<P: PackedField>(
) {
eval_packed_get(lv, nv, yield_constr);
eval_packed_set(lv, nv, yield_constr);

// Stack constraints.
// Both operations use memory channel 3. The operations are similar enough that
// we can constrain both at the same time.
let filter = lv.op.context_op;
let channel = lv.mem_channels[3];
// For get_context, we check if lv.stack_len is 0. For set_context, we check if nv.stack_len is 0.
// However, for get_context, we can deduce lv.stack_len from nv.stack_len since the operation only pushes.
let stack_len = nv.stack_len - (P::ONES - lv.opcode_bits[0]);
// Constrain stack_inv_aux. It's 0 if the relevant stack is empty, 1 otherwise.
yield_constr.constraint(
filter * (stack_len * lv.general.stack().stack_inv - lv.general.stack().stack_inv_aux),
);
// Enable or disable the channel.
yield_constr.constraint(filter * (lv.general.stack().stack_inv_aux - channel.used));
let new_filter = filter * lv.general.stack().stack_inv_aux;
// It's a write for get_context, a read for set_context.
yield_constr.constraint(new_filter * (channel.is_read - lv.opcode_bits[0]));
// In both cases, next row's context works.
yield_constr.constraint(new_filter * (channel.addr_context - nv.context));
// Same segment for both.
yield_constr.constraint(
new_filter * (channel.addr_segment - P::Scalar::from_canonical_u64(Segment::Stack as u64)),
);
// The address is one less than stack_len.
let addr_virtual = stack_len - P::ONES;
yield_constr.constraint(new_filter * (channel.addr_virtual - addr_virtual));
}

/// Circuit version of èval_packed`.
Expand All @@ -291,4 +318,59 @@ pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
) {
eval_ext_circuit_get(builder, lv, nv, yield_constr);
eval_ext_circuit_set(builder, lv, nv, yield_constr);

// Stack constraints.
// Both operations use memory channel 3. The operations are similar enough that
// we can constrain both at the same time.
let filter = lv.op.context_op;
let channel = lv.mem_channels[3];
// For get_context, we check if lv.stack_len is 0. For set_context, we check if nv.stack_len is 0.
// However, for get_context, we can deduce lv.stack_len from nv.stack_len since the operation only pushes.
let diff = builder.add_const_extension(lv.opcode_bits[0], -F::ONE);
let stack_len = builder.add_extension(nv.stack_len, diff);
// Constrain stack_inv_aux. It's 0 if the relevant stack is empty, 1 otherwise.
{
let diff = builder.mul_sub_extension(
stack_len,
lv.general.stack().stack_inv,
lv.general.stack().stack_inv_aux,
);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
// Enable or disable the channel.
{
let diff = builder.sub_extension(lv.general.stack().stack_inv_aux, channel.used);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
}
let new_filter = builder.mul_extension(filter, lv.general.stack().stack_inv_aux);
// It's a write for get_context, a read for set_context.
{
let diff = builder.sub_extension(channel.is_read, lv.opcode_bits[0]);
let constr = builder.mul_extension(new_filter, diff);
yield_constr.constraint(builder, constr);
}
// In both cases, next row's context works.
{
let diff = builder.sub_extension(channel.addr_context, nv.context);
let constr = builder.mul_extension(new_filter, diff);
yield_constr.constraint(builder, constr);
}
// Same segment for both.
{
let diff = builder.add_const_extension(
channel.addr_segment,
-F::from_canonical_u64(Segment::Stack as u64),
);
let constr = builder.mul_extension(new_filter, diff);
yield_constr.constraint(builder, constr);
}
// The address is one less than stack_len.
{
let addr_virtual = builder.add_const_extension(stack_len, -F::ONE);
let diff = builder.sub_extension(channel.addr_virtual, addr_virtual);
let constr = builder.mul_extension(new_filter, diff);
yield_constr.constraint(builder, constr);
}
}
Loading

0 comments on commit d8ff620

Please sign in to comment.