From 6dcbd541ac26649e42fc69188dcb64e8d227d7b0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Nov 2024 11:12:03 -0600 Subject: [PATCH] GH-984 Update code_cache_sync to honor whitelist and add tests --- .../webassembly/eos-vm-oc/code_cache.hpp | 14 +++--- .../chain/webassembly/runtimes/eos-vm-oc.cpp | 5 +- .../runtimes/eos-vm-oc/code_cache.cpp | 9 ++-- unittests/eosvmoc_limits_tests.cpp | 48 ++++++++++++------- 4 files changed, 47 insertions(+), 29 deletions(-) diff --git a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp index 8637383008..1097229574 100644 --- a/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/eos-vm-oc/code_cache.hpp @@ -57,6 +57,13 @@ class code_cache_base { void free_code(const digest_type& code_id, const uint8_t& vm_version); + // mode for get_descriptor_for_code calls + struct mode { + bool whitelisted = false; + bool high_priority = false; + bool write_window = true; + }; + // get_descriptor_for_code failure reasons enum class get_cd_failure { temporary, // oc compile not done yet, users like read-only trxs can retry @@ -117,11 +124,6 @@ class code_cache_async : public code_cache_base { code_cache_async(const std::filesystem::path& data_dir, const eosvmoc::config& eosvmoc_config, const chainbase::database& db); ~code_cache_async(); - struct mode { - bool whitelisted = false; - bool high_priority = false; - bool write_window = true; - }; //If code is in cache: returns pointer & bumps to front of MRU list //If code is not in cache, and not blacklisted, and not currently compiling: return nullptr and kick off compile //otherwise: return nullptr @@ -142,7 +144,7 @@ class code_cache_sync : public code_cache_base { ~code_cache_sync(); //Can still fail and return nullptr if, for example, there is an expected instantiation failure - const code_descriptor* const get_descriptor_for_code_sync(const digest_type& code_id, const uint8_t& vm_version, bool is_write_window); + const code_descriptor* const get_descriptor_for_code_sync(mode m, const digest_type& code_id, const uint8_t& vm_version); }; }}} diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc.cpp index 3a20d83e7c..2e3edee4ff 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc.cpp @@ -28,7 +28,10 @@ class eosvmoc_instantiated_module : public wasm_instantiated_module_interface { bool is_main_thread() { return _main_thread_id == std::this_thread::get_id(); }; void apply(apply_context& context) override { - const code_descriptor* const cd = _eosvmoc_runtime.cc.get_descriptor_for_code_sync(_code_hash, _vm_version, context.control.is_write_window()); + eosio::chain::eosvmoc::code_cache_sync::mode m; + m.whitelisted = context.is_eos_vm_oc_whitelisted(); + m.write_window = context.control.is_write_window(); + const code_descriptor* const cd = _eosvmoc_runtime.cc.get_descriptor_for_code_sync(m, _code_hash, _vm_version); EOS_ASSERT(cd, wasm_execution_error, "EOS VM OC instantiation failed"); if ( is_main_thread() ) diff --git a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp index ff053479f3..4ce5d0a2e8 100644 --- a/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp +++ b/libraries/chain/webassembly/runtimes/eos-vm-oc/code_cache.cpp @@ -198,15 +198,15 @@ code_cache_sync::~code_cache_sync() { elog("unexpected response from EOS VM OC compile monitor during shutdown"); } -const code_descriptor* const code_cache_sync::get_descriptor_for_code_sync(const digest_type& code_id, const uint8_t& vm_version, bool is_write_window) { +const code_descriptor* const code_cache_sync::get_descriptor_for_code_sync(mode m, const digest_type& code_id, const uint8_t& vm_version) { //check for entry in cache code_cache_index::index::type::iterator it = _cache_index.get().find(boost::make_tuple(code_id, vm_version)); if(it != _cache_index.get().end()) { - if (is_write_window) + if (m.write_window) _cache_index.relocate(_cache_index.begin(), _cache_index.project<0>(it)); return &*it; } - if(!is_write_window) + if(!m.write_window) return nullptr; const code_object* const codeobject = _db.find(boost::make_tuple(code_id, 0, vm_version)); @@ -216,7 +216,8 @@ const code_descriptor* const code_cache_sync::get_descriptor_for_code_sync(const std::vector fds_to_pass; fds_to_pass.emplace_back(memfd_for_bytearray(codeobject->code)); - write_message_with_fds(_compile_monitor_write_socket, compile_wasm_message{ {code_id, vm_version}, std::optional{} }, fds_to_pass); + auto msg = compile_wasm_message{ {code_id, vm_version}, !m.whitelisted ? _eosvmoc_config.non_whitelisted_limits : std::optional{} }; + write_message_with_fds(_compile_monitor_write_socket, msg, fds_to_pass); auto [success, message, fds] = read_message_with_fds(_compile_monitor_read_socket); EOS_ASSERT(success, wasm_execution_error, "failed to read response from monitor process"); EOS_ASSERT(std::holds_alternative(message), wasm_execution_error, "unexpected response from monitor process"); diff --git a/unittests/eosvmoc_limits_tests.cpp b/unittests/eosvmoc_limits_tests.cpp index 3c3a32506c..21cbd43892 100644 --- a/unittests/eosvmoc_limits_tests.cpp +++ b/unittests/eosvmoc_limits_tests.cpp @@ -11,7 +11,8 @@ BOOST_AUTO_TEST_SUITE(eosvmoc_limits_tests) // common routine to verify wasm_execution_error is raised when a resource // limit specified in eosvmoc_config is reached -void limit_violated_test(const eosvmoc::config& eosvmoc_config) { +// eosio.* is whitelisted, use a different account to avoid whitelist +void limit_violated_test(const eosvmoc::config& eosvmoc_config, const std::string& account, bool expect_exception) { fc::temp_directory tempdir; constexpr bool use_genesis = true; @@ -23,26 +24,34 @@ void limit_violated_test(const eosvmoc::config& eosvmoc_config) { use_genesis ); - chain.create_accounts({"eosio.token"_n}); - chain.set_code("eosio.token"_n, test_contracts::eosio_token_wasm()); - chain.set_abi("eosio.token"_n, test_contracts::eosio_token_abi()); + name acc = name{account}; + + chain.create_accounts({acc}); + chain.set_code(acc, test_contracts::eosio_token_wasm()); + chain.set_abi(acc, test_contracts::eosio_token_abi()); #ifdef EOSIO_EOS_VM_OC_RUNTIME_ENABLED if (chain.control->is_eos_vm_oc_enabled()) { - BOOST_CHECK_EXCEPTION( - chain.push_action( "eosio.token"_n, "create"_n, "eosio.token"_n, mvo() - ( "issuer", "eosio.token" ) - ( "maximum_supply", "1000000.00 TOK" )), - eosio::chain::wasm_execution_error, - [](const eosio::chain::wasm_execution_error& e) { - return expect_assert_message(e, "failed to compile wasm"); - } - ); + if (expect_exception) { + BOOST_CHECK_EXCEPTION( + chain.push_action( acc, "create"_n, acc, mvo() + ( "issuer", account ) + ( "maximum_supply", "1000000.00 TOK" )), + eosio::chain::wasm_execution_error, + [](const eosio::chain::wasm_execution_error& e) { + return expect_assert_message(e, "failed to compile wasm"); + } + ); + } else { + chain.push_action( acc, "create"_n, acc, mvo() + ( "issuer", account ) + ( "maximum_supply", "1000000.00 TOK" )); + } } else #endif { - chain.push_action( "eosio.token"_n, "create"_n, "eosio.token"_n, mvo() - ( "issuer", "eosio.token" ) + chain.push_action( acc, "create"_n, acc, mvo() + ( "issuer", account ) ( "maximum_supply", "1000000.00 TOK" ) ); } @@ -107,7 +116,8 @@ BOOST_AUTO_TEST_CASE( vm_limit ) { try { // set vm_limit to a small value such that it is exceeded eosvmoc_config.non_whitelisted_limits.vm_limit = 64u*1024u*1024u; - limit_violated_test(eosvmoc_config); + limit_violated_test(eosvmoc_config, "test", true); + limit_violated_test(eosvmoc_config, "eosio.token", false); // whitelisted account, no exception // set vm_limit to a large value such that it is not exceeded eosvmoc_config.non_whitelisted_limits.vm_limit = 128u*1024u*1024u; @@ -129,7 +139,8 @@ BOOST_AUTO_TEST_CASE( stack_limit ) { try { // The stack size of the compiled WASM in the test is 104. // Set stack_size_limit one less than the actual needed stack size eosvmoc_config.non_whitelisted_limits.stack_size_limit = 103; - limit_violated_test(eosvmoc_config); + limit_violated_test(eosvmoc_config, "test", true); + limit_violated_test(eosvmoc_config, "eosio.token", false); // whitelisted account, no exception // set stack_size_limit to the actual needed stack size eosvmoc_config.non_whitelisted_limits.stack_size_limit = 104; @@ -145,7 +156,8 @@ BOOST_AUTO_TEST_CASE( generated_code_size_limit ) { try { // berth to work on. As a single data point, LLVM11 used in reproducible builds during // Spring 1.0 timeframe was 36856 eosvmoc_config.non_whitelisted_limits.generated_code_size_limit = 20*1024; - limit_violated_test(eosvmoc_config); + limit_violated_test(eosvmoc_config, "test", true); + limit_violated_test(eosvmoc_config, "eosio.token", false); // whitelisted account, no exception eosvmoc_config.non_whitelisted_limits.generated_code_size_limit = 40*1024; limit_not_violated_test(eosvmoc_config);