From 30559d8aec14f7660157bb2df00b44c47f94ab56 Mon Sep 17 00:00:00 2001 From: Alexander Lednev <57529355+iceseer@users.noreply.github.com> Date: Fri, 24 Dec 2021 15:08:04 +0300 Subject: [PATCH 01/14] Feature/healthcheck (#1735) * civetweb as http server Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * status update Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * last round + healthy Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * memory consumption Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * grpc healthcheck Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * metrics healthcheck Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * config Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * proposal_delay check Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> --- example/config.postgres.sample | 3 +- example/config.sample | 3 +- irohad/CMakeLists.txt | 1 + irohad/http/CMakeLists.txt | 9 + irohad/http/http_server.cpp | 156 ++++++++++++++++++ irohad/http/http_server.hpp | 81 +++++++++ irohad/main/CMakeLists.txt | 2 + irohad/main/application.cpp | 133 ++++++++++++++- irohad/main/application.hpp | 19 +++ irohad/main/impl/on_demand_ordering_init.cpp | 31 ++++ irohad/main/iroha_conf_literals.cpp | 1 + irohad/main/iroha_conf_literals.hpp | 1 + irohad/main/iroha_conf_loader.cpp | 1 + irohad/main/iroha_conf_loader.hpp | 1 + irohad/main/iroha_status.hpp | 32 ++++ irohad/main/subscription_fwd.hpp | 3 + irohad/maintenance/metrics.cpp | 22 +++ irohad/maintenance/metrics.hpp | 9 +- .../ordering/impl/on_demand_ordering_gate.hpp | 4 + .../synchronizer/impl/synchronizer_impl.cpp | 15 ++ irohad/torii/impl/query_service.cpp | 31 +++- irohad/torii/query_service.hpp | 23 ++- libs/common/mem_operations.hpp | 32 ++++ shared_model/schema/endpoint.proto | 1 + shared_model/schema/qry_responses.proto | 18 ++ .../integration_test_framework.cpp | 8 +- test/fuzzing/find_fuzz.cpp | 3 +- .../module/irohad/synchronizer/CMakeLists.txt | 1 + .../irohad/torii/query_service_test.cpp | 3 +- .../irohad/torii/torii_queries_test.cpp | 3 +- .../irohad/torii/torii_service_query_test.cpp | 2 +- vcpkg/VCPKG_DEPS_LIST | 1 + 32 files changed, 631 insertions(+), 22 deletions(-) create mode 100644 irohad/http/CMakeLists.txt create mode 100644 irohad/http/http_server.cpp create mode 100644 irohad/http/http_server.hpp create mode 100644 irohad/main/iroha_status.hpp diff --git a/example/config.postgres.sample b/example/config.postgres.sample index ea47dff255b..d356b6f50ee 100644 --- a/example/config.postgres.sample +++ b/example/config.postgres.sample @@ -18,5 +18,6 @@ "mst_expiration_time" : 1440, "max_rounds_delay": 3000, "stale_stream_max_rounds": 2, - "metrics": "127.0.0.1:8080" + "metrics": "127.0.0.1:8080", + "healthcheck_port": 50508 } diff --git a/example/config.sample b/example/config.sample index f99de708f27..9289a65e8fe 100644 --- a/example/config.sample +++ b/example/config.sample @@ -12,5 +12,6 @@ "mst_expiration_time": 1440, "max_rounds_delay": 3000, "stale_stream_max_rounds": 2, - "metrics": "0.0.0.0:7001" + "metrics": "0.0.0.0:7001", + "healthcheck_port": 50508 } diff --git a/irohad/CMakeLists.txt b/irohad/CMakeLists.txt index 2531e11259f..483fd893999 100644 --- a/irohad/CMakeLists.txt +++ b/irohad/CMakeLists.txt @@ -19,3 +19,4 @@ add_subdirectory(util) add_subdirectory(maintenance) add_subdirectory(iroha_migrate) add_subdirectory(iroha_wsv_diff) +add_subdirectory(http) diff --git a/irohad/http/CMakeLists.txt b/irohad/http/CMakeLists.txt new file mode 100644 index 00000000000..fe1720745d8 --- /dev/null +++ b/irohad/http/CMakeLists.txt @@ -0,0 +1,9 @@ +# +# Copyright Soramitsu Co., Ltd. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +# + +add_library(iroha_http_server http_server.cpp) +target_link_libraries(iroha_http_server + civetweb::civetweb + ) diff --git a/irohad/http/http_server.cpp b/irohad/http/http_server.cpp new file mode 100644 index 00000000000..9dcff80aa36 --- /dev/null +++ b/irohad/http/http_server.cpp @@ -0,0 +1,156 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "http/http_server.hpp" + +#include +#include +#include "CivetServer.h" + +#include "common/mem_operations.hpp" +#include "logger/logger.hpp" + +namespace iroha::network { + std::string HttpServer::Options::toString() const { + return fmt::format("Options [ports:{}, request_timeout_ms: {}]", + ports, + request_timeout_ms); + } + + HttpRequestResponse::HttpRequestResponse(mg_connection *connection, + mg_request_info const *request_info) + : connection_(connection), request_info_(request_info) {} + + std::optional HttpRequestResponse::init() { + if (0 == strcmp(request_info_->request_method, "GET")) { + method_ = eMethodType::kGet; + } + + /** + * Uncomment for PUT, POST and DELETE processing. + */ + /* else if (0 == strcmp(request_info_->request_method, "PUT")) { + method_ = eMethodType::kPut; + } else if (0 == strcmp(request_info_->request_method, "POST")) { + method_ = eMethodType::kPost; + } else if (0 == strcmp(request_info_->request_method, "DELETE")) { + method_ = eMethodType::kDelete; + } */ + else { + mg_send_http_error(connection_, 405, "Only GET method supported"); + return 405; + } + return std::nullopt; + } + + bool HttpRequestResponse::setJsonResponse(std::string_view data) { + if (!method_) + return false; + + mg_send_http_ok( + connection_, "application/json; charset=utf-8", (long long)data.size()); + mg_write(connection_, data.data(), data.size()); + return true; + } + + eMethodType HttpRequestResponse::getMethodType() const { + assert(method_); + return *method_; + } + + HttpServer::HttpServer(Options options, logger::LoggerPtr logger) + : context_(nullptr), + options_(std::move(options)), + logger_(std::move(logger)) {} + + HttpServer::~HttpServer() { + stop(); + } + + bool HttpServer::start() { + if (context_ != nullptr) { + logger_->error("Http server already started."); + return false; + } + + if (options_.ports.empty()) { + logger_->error("Http server ports are not defined."); + return false; + } + + logger_->info("Try to start Http server with options: {}", options_); + mg_init_library(0); + + mg_callbacks callbacks{}; + callbacks.log_message = [](const struct mg_connection *conn, + const char *message) { return 1; }; + + const char *options[] = {"listening_ports", + options_.ports.data(), + "request_timeout_ms", + options_.request_timeout_ms.empty() + ? "10000" + : options_.request_timeout_ms.data(), + nullptr}; + + context_ = mg_start(&callbacks, nullptr, options); + if (nullptr == context_) { + logger_->error("Cannot start Http server. Check options."); + return false; + } + + logger_->info("Http server started successfully"); + return true; + } + + void HttpServer::stop() { + if (context_) { + mg_stop(context_); + context_ = nullptr; + } + mg_exit_library(); + logger_->info("Http server stopped"); + } + + void HttpServer::registerHandler(std::string_view uri, + HandlerCallback &&handler) { + if (uri.empty()) { + logger_->error("URI cannot be empty."); + return; + } + + if (nullptr == context_) { + logger_->error("Server is not started."); + return; + } + + handlers_.emplace_back(std::move(handler), logger_); + mg_set_request_handler( + context_, + uri.data(), + [](struct mg_connection *conn, void *cbdata) { + assert(nullptr != cbdata); + HandlerData &handler = *(HandlerData *)cbdata; + + HttpRequestResponse req_res(conn, mg_get_request_info(conn)); + if (auto code = req_res.init(); code) { + handler.logger->error( + "Init HttpRequestResponse failed with code: {}", *code); + return *code; + } + + if (!handler.callback) { + handler.logger->error("No registered callback"); + mg_send_http_error(conn, 500, "Server error"); + return 500; + } + + handler.callback(req_res); + return 200; + }, + &handlers_.back()); + } + +} // namespace iroha::network diff --git a/irohad/http/http_server.hpp b/irohad/http/http_server.hpp new file mode 100644 index 00000000000..d1f16b18322 --- /dev/null +++ b/irohad/http/http_server.hpp @@ -0,0 +1,81 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_HTTP_SERVER_HPP +#define IROHA_HTTP_SERVER_HPP + +#include +#include +#include +#include +#include +#include + +#include "common/common.hpp" +#include "logger/logger_fwd.hpp" + +struct mg_context; +struct mg_request_info; +struct mg_connection; + +namespace iroha::network { + + enum eMethodType { kGet, kPut, kPost, kDelete }; + constexpr std::string_view kHealthcheckDefaultPort = "50508"; + + class HttpRequestResponse { + mg_connection *connection_; + mg_request_info const *request_info_; + std::optional method_; + + public: + HttpRequestResponse(mg_connection *connection, + mg_request_info const *request_info); + std::optional init(); + + bool setJsonResponse(std::string_view data); + + eMethodType getMethodType() const; + }; + + class HttpServer : utils::NoMove, utils::NoCopy { + public: + using Headers = std::vector>; + using ResponseData = std::string; + using HandlerCallback = std::function; + + struct HandlerData { + HandlerCallback callback; + logger::LoggerPtr logger; + + HandlerData(HandlerCallback c, logger::LoggerPtr l) + : callback(std::move(c)), logger(std::move(l)) {} + }; + + struct Options { + std::string ports; // ex. "50500,50501,50502" + std::string request_timeout_ms; // default: 10000 + + std::string toString() const; + }; + + private: + mg_context *context_; + Options options_; + logger::LoggerPtr logger_; + std::list handlers_; + + public: + HttpServer(Options options, logger::LoggerPtr logger); + ~HttpServer(); + + bool start(); + void stop(); + void registerHandler(std::string_view uri, HandlerCallback &&handler); + }; + +} // namespace iroha::network + +#endif // IROHA_HTTP_SERVER_HPP diff --git a/irohad/main/CMakeLists.txt b/irohad/main/CMakeLists.txt index 25eaaf9ab0e..e9a503b0745 100644 --- a/irohad/main/CMakeLists.txt +++ b/irohad/main/CMakeLists.txt @@ -96,6 +96,7 @@ target_link_libraries(application rdb_connection_init generator async_subscription + iroha_http_server ) add_executable(irohad irohad.cpp) @@ -116,6 +117,7 @@ target_link_libraries(irohad pg_connection_init rdb_connection_init maintenance + iroha_http_server ) add_library(iroha_conf_loader iroha_conf_loader.cpp) diff --git a/irohad/main/application.cpp b/irohad/main/application.cpp index c6d1b8a73e5..fbbc6f612d3 100644 --- a/irohad/main/application.cpp +++ b/irohad/main/application.cpp @@ -5,6 +5,10 @@ #include "main/application.hpp" +#include +#include +#include +#include #include #include @@ -40,6 +44,7 @@ #include "main/impl/pg_connection_init.hpp" #include "main/impl/rocksdb_connection_init.hpp" #include "main/impl/storage_init.hpp" +#include "main/iroha_status.hpp" #include "main/server_runner.hpp" #include "main/subscription.hpp" #include "multi_sig_transactions/gossip_propagation_strategy.hpp" @@ -156,11 +161,15 @@ Irohad::Irohad( } Irohad::~Irohad() { + iroha_status_subscription_->unsubscribe(); + if (db_context_ && log_) { RocksDbCommon common(db_context_); common.printStatus(*log_); } - + if (http_server_) { + http_server_->stop(); + } if (consensus_gate) { consensus_gate->stop(); } @@ -174,6 +183,7 @@ Irohad::~Irohad() { * Initializing iroha daemon */ Irohad::RunResult Irohad::init() { + IROHA_EXPECTED_ERROR_CHECK(initNodeStatus()); IROHA_EXPECTED_ERROR_CHECK(initSettings()); IROHA_EXPECTED_ERROR_CHECK(initValidatorsConfigs()); IROHA_EXPECTED_ERROR_CHECK(initBatchParser()); @@ -202,6 +212,8 @@ Irohad::RunResult Irohad::init() { // Torii IROHA_EXPECTED_ERROR_CHECK(initTransactionCommandService()); IROHA_EXPECTED_ERROR_CHECK(initQueryService()); + // HTTP + IROHA_EXPECTED_ERROR_CHECK(initHttpServer()); return {}; } @@ -223,6 +235,39 @@ Irohad::RunResult Irohad::resetWsv() { : StorageType::kPostgres); } +/** + * Initialize Iroha status. + */ +Irohad::RunResult Irohad::initNodeStatus() { + iroha_status_subscription_ = SubscriberCreator< + utils::ReadWriteObject, + iroha::IrohaStatus>:: + template create( + iroha::SubscriptionEngineHandlers::kMetrics, + [](utils::ReadWriteObject + &stored_status, + iroha::IrohaStatus new_status) { + stored_status.exclusiveAccess([&](IrohaStoredStatus &status) { + if (new_status.is_healthy) + status.status.is_healthy = new_status.is_healthy; + if (new_status.is_syncing) + status.status.is_syncing = new_status.is_syncing; + if (new_status.memory_consumption) + status.status.memory_consumption = + new_status.memory_consumption; + if (new_status.last_round) + status.status.last_round = new_status.last_round; + + status.serialized_status.Clear(); + }); + }); + + iroha_status_subscription_->get().exclusiveAccess( + [](IrohaStoredStatus &status) { status.status.is_syncing = false; }); + + return {}; +} + /** * Initializing setting query */ @@ -255,6 +300,83 @@ Irohad::RunResult Irohad::initValidatorsConfigs() { return {}; } +/** + * Initializing Http server. + */ +Irohad::RunResult Irohad::initHttpServer() { + iroha::network::HttpServer::Options options; + options.ports = config_.healthcheck_port + ? std::to_string(*config_.healthcheck_port) + : iroha::network::kHealthcheckDefaultPort; + + http_server_ = std::make_unique( + std::move(options), log_manager_->getChild("HTTP server")->getLogger()); + http_server_->start(); + + http_server_->registerHandler( + "/healthcheck", + [status_sub(iroha_status_subscription_)]( + iroha::network::HttpRequestResponse &req_res) { + status_sub->get().exclusiveAccess( + [&](iroha::IrohaStoredStatus &status) { + if (0ull == status.serialized_status.GetSize()) { + using namespace rapidjson; + using namespace std; + Writer writer( + status.serialized_status); + + auto setOptBool = [](auto &writer, bool pred, bool value) { + if (pred) + writer.Bool(value); + else + writer.Null(); + }; + + auto setOptUInt64 = + [](auto &writer, bool pred, uint64_t value) { + if (pred) + writer.Int64((int64_t)value); + else + writer.Null(); + }; + + writer.StartObject(); + + writer.Key("memory_consumption"); + setOptUInt64(writer, + status.status.memory_consumption.has_value(), + *status.status.memory_consumption); + + writer.Key("last_block_round"); + setOptUInt64(writer, + status.status.last_round.has_value(), + status.status.last_round->block_round); + + writer.Key("last_reject_round"); + setOptUInt64(writer, + status.status.last_round.has_value(), + status.status.last_round->reject_round); + + writer.Key("is_syncing"); + setOptBool(writer, + status.status.is_syncing.has_value(), + *status.status.is_syncing); + + writer.Key("status"); + setOptBool(writer, + status.status.is_healthy.has_value(), + *status.status.is_healthy); + + writer.EndObject(); + } + req_res.setJsonResponse( + std::string_view(status.serialized_status.GetString(), + status.serialized_status.GetLength())); + }); + }); + return {}; +} + /** * Initializing iroha daemon storage */ @@ -933,11 +1055,13 @@ Irohad::RunResult Irohad::initQueryService() { query_response_factory_, query_service_log_manager->getChild("Processor")->getLogger()); + assert(iroha_status_subscription_); query_service = std::make_shared<::torii::QueryService>( query_processor, query_factory, blocks_query_factory, - query_service_log_manager->getLogger()); + query_service_log_manager->getLogger(), + iroha_status_subscription_); log_->info("[Init] => query service"); return {}; @@ -997,6 +1121,11 @@ namespace { * Run iroha daemon */ Irohad::RunResult Irohad::run() { + if (config_.proposal_delay + <= config_.proposal_creation_timeout.value_or(kMaxRoundsDelayDefault)) { + return expected::makeError( + "proposal_delay must be more than proposal_creation_timeout"); + } ordering_init->subscribe([simulator(utils::make_weak(simulator)), consensus_gate(utils::make_weak(consensus_gate)), tx_processor(utils::make_weak(tx_processor)), diff --git a/irohad/main/application.hpp b/irohad/main/application.hpp index 180fa3038d9..84ab16c0f4b 100644 --- a/irohad/main/application.hpp +++ b/irohad/main/application.hpp @@ -12,12 +12,14 @@ #include "consensus/gate_object.hpp" #include "cryptography/crypto_provider/abstract_crypto_model_signer.hpp" #include "cryptography/keypair.hpp" +#include "http/http_server.hpp" #include "interfaces/queries/blocks_query.hpp" #include "interfaces/queries/query.hpp" #include "logger/logger_fwd.hpp" #include "logger/logger_manager_fwd.hpp" #include "main/impl/block_loader_init.hpp" #include "main/iroha_conf_loader.hpp" +#include "main/iroha_status.hpp" #include "main/server_runner.hpp" #include "main/startup_params.hpp" #include "main/subscription_fwd.hpp" @@ -28,6 +30,10 @@ namespace google::protobuf { class Empty; } +namespace evpp::evpphttp { + class Service; +} + namespace iroha { class PendingTransactionStorage; class MstProcessor; @@ -230,8 +236,12 @@ class Irohad { virtual RunResult initSettings(); + virtual RunResult initNodeStatus(); + virtual RunResult initValidatorsConfigs(); + virtual RunResult initHttpServer(); + /** * Initialize WSV restorer */ @@ -378,9 +388,18 @@ class Irohad { std::shared_ptr command_service_transport; + // subscriptions + std::shared_ptr, + iroha::IrohaStatus>> + iroha_status_subscription_; + // query service std::shared_ptr query_service; + // Http server + std::unique_ptr http_server_; + // consensus gate std::shared_ptr consensus_gate; diff --git a/irohad/main/impl/on_demand_ordering_init.cpp b/irohad/main/impl/on_demand_ordering_init.cpp index a6373a4c92a..4f7c897248e 100644 --- a/irohad/main/impl/on_demand_ordering_init.cpp +++ b/irohad/main/impl/on_demand_ordering_init.cpp @@ -5,10 +5,12 @@ #include "main/impl/on_demand_ordering_init.hpp" +#include "common/mem_operations.hpp" #include "common/permutation_generator.hpp" #include "interfaces/iroha_internal/block.hpp" #include "logger/logger.hpp" #include "logger/logger_manager.hpp" +#include "main/iroha_status.hpp" #include "main/subscription.hpp" #include "network/impl/client_factory_impl.hpp" #include "ordering/impl/on_demand_common.hpp" @@ -145,6 +147,35 @@ OnDemandOrderingInit::initOrderingGate( std::move(tx_cache), max_number_of_transactions, ordering_log_manager); + + getSubscription()->dispatcher()->repeat( + iroha::SubscriptionEngineHandlers::kMetrics, + std::max(delay * 4, std::chrono::milliseconds(1000ull)), + [round(consensus::Round(0ull, 0ull)), + wgate(utils::make_weak(ordering_gate_))]() mutable { + if (auto gate = wgate.lock()) { + auto const new_round = gate->getRound(); + iroha::IrohaStatus status; + status.is_healthy = (new_round != round); + status.last_round = new_round; + iroha::getSubscription()->notify(iroha::EventTypes::kOnIrohaStatus, + status); + round = new_round; + } + }, + [wgate(utils::make_weak(ordering_gate_))]() { return !wgate.expired(); }); + + getSubscription()->dispatcher()->repeat( + iroha::SubscriptionEngineHandlers::kMetrics, + std::chrono::minutes(1ull), + []() { + iroha::IrohaStatus status; + status.memory_consumption = getMemoryUsage(); + iroha::getSubscription()->notify(iroha::EventTypes::kOnIrohaStatus, + status); + }, + []() { return true; }); + return ordering_gate_; } diff --git a/irohad/main/iroha_conf_literals.cpp b/irohad/main/iroha_conf_literals.cpp index b3b2350dcd7..bf7752486ab 100644 --- a/irohad/main/iroha_conf_literals.cpp +++ b/irohad/main/iroha_conf_literals.cpp @@ -31,6 +31,7 @@ namespace config_members { const char *MaxProposalSize = "max_proposal_size"; const char *ProposalDelay = "proposal_delay"; const char *ProposalCreationTimeout = "proposal_creation_timeout"; + const char *HealthcheckPort = "healthcheck_port"; const char *VoteDelay = "vote_delay"; const char *MstSupport = "mst_enable"; const char *MstExpirationTime = "mst_expiration_time"; diff --git a/irohad/main/iroha_conf_literals.hpp b/irohad/main/iroha_conf_literals.hpp index 1f0f211ed25..308091ec2c4 100644 --- a/irohad/main/iroha_conf_literals.hpp +++ b/irohad/main/iroha_conf_literals.hpp @@ -37,6 +37,7 @@ namespace config_members { extern const char *MaxProposalSize; extern const char *ProposalDelay; extern const char *ProposalCreationTimeout; + extern const char *HealthcheckPort; extern const char *VoteDelay; extern const char *MstSupport; extern const char *MstExpirationTime; diff --git a/irohad/main/iroha_conf_loader.cpp b/irohad/main/iroha_conf_loader.cpp index 72db6fcb29c..03aab5f1c8a 100644 --- a/irohad/main/iroha_conf_loader.cpp +++ b/irohad/main/iroha_conf_loader.cpp @@ -692,6 +692,7 @@ inline bool JsonDeserializerImpl::loadInto(IrohadConfig &dest) { and getDictChild(ProposalDelay).loadInto(dest.proposal_delay) and getDictChild(ProposalCreationTimeout) .loadInto(dest.proposal_creation_timeout) + and getDictChild(HealthcheckPort).loadInto(dest.healthcheck_port) and getDictChild(VoteDelay).loadInto(dest.vote_delay) and getDictChild(MstSupport).loadInto(dest.mst_support) and getDictChild(MstExpirationTime).loadInto(dest.mst_expiration_time) diff --git a/irohad/main/iroha_conf_loader.hpp b/irohad/main/iroha_conf_loader.hpp index 655c151c586..17db4111108 100644 --- a/irohad/main/iroha_conf_loader.hpp +++ b/irohad/main/iroha_conf_loader.hpp @@ -67,6 +67,7 @@ struct IrohadConfig { boost::optional mst_expiration_time; boost::optional max_round_delay_ms; boost::optional proposal_creation_timeout; + boost::optional healthcheck_port; boost::optional stale_stream_max_rounds; boost::optional logger_manager; std::optional initial_peers; diff --git a/irohad/main/iroha_status.hpp b/irohad/main/iroha_status.hpp new file mode 100644 index 00000000000..bf9f9803485 --- /dev/null +++ b/irohad/main/iroha_status.hpp @@ -0,0 +1,32 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_STATUS_HPP +#define IROHA_STATUS_HPP + +#include +#include +#include +#include + +#include "consensus/round.hpp" + +namespace iroha { + + struct IrohaStatus { + std::optional memory_consumption; + std::optional last_round; + std::optional is_syncing; + std::optional is_healthy; + }; + + struct IrohaStoredStatus { + IrohaStatus status; + rapidjson::StringBuffer serialized_status; + }; + +} // namespace iroha + +#endif // IROHA_STATUS_HPP diff --git a/irohad/main/subscription_fwd.hpp b/irohad/main/subscription_fwd.hpp index 19fc17b4089..a17268c3387 100644 --- a/irohad/main/subscription_fwd.hpp +++ b/irohad/main/subscription_fwd.hpp @@ -43,6 +43,9 @@ namespace iroha { kOnConsensusGateEvent, kSendBatchComplete, + // Node status + kOnIrohaStatus, + // MST kOnMstStateUpdate, kOnMstPreparedBatches, diff --git a/irohad/maintenance/metrics.cpp b/irohad/maintenance/metrics.cpp index 86de213c430..03c09715c3d 100644 --- a/irohad/maintenance/metrics.cpp +++ b/irohad/maintenance/metrics.cpp @@ -139,6 +139,28 @@ Metrics::Metrics(std::string const &listen_addr, }); ///////////////////////////// + auto &is_syncing_state = BuildGauge() + .Name("is_syncing_state") + .Help("Iroha is syncing state") + .Register(*registry_) + .Add({}); + + auto &is_healthy = BuildGauge() + .Name("is_healthy") + .Help("Iroha is healthy status") + .Register(*registry_) + .Add({}); + + iroha_status_subscription_ = + SubscriberCreator::template create< + EventTypes::kOnIrohaStatus>( + iroha::SubscriptionEngineHandlers::kMetrics, + [&](bool, iroha::IrohaStatus new_status) { + is_syncing_state.Set( + new_status.is_syncing && *new_status.is_syncing ? 1 : 0); + is_healthy.Set(new_status.is_healthy && *new_status.is_healthy ? 1 + : 0); + }); auto &number_of_pending_mst_batches = BuildGauge() diff --git a/irohad/maintenance/metrics.hpp b/irohad/maintenance/metrics.hpp index f13c3d9cc2e..bc40df5e603 100644 --- a/irohad/maintenance/metrics.hpp +++ b/irohad/maintenance/metrics.hpp @@ -20,6 +20,7 @@ #include "interfaces/common_objects/types.hpp" #include "interfaces/iroha_internal/block.hpp" #include "logger/logger_fwd.hpp" +#include "main/iroha_status.hpp" #include "main/subscription.hpp" #include "network/ordering_gate_common.hpp" @@ -29,7 +30,7 @@ class Metrics : public std::enable_shared_from_this { iroha::network::OrderingEvent>; // FixMe subscribtion ≠ subscriber using BlockPtr = std::shared_ptr; using BlockSubscriber = iroha::BaseSubscriber; - using MstMetrics = std::tuple; + using MstMetrics = std::tuple; using MstSubscriber = iroha::BaseSubscriber; std::string listen_addr_port_; @@ -42,6 +43,8 @@ class Metrics : public std::enable_shared_from_this { std::chrono::steady_clock::time_point uptime_start_timepoint_; std::thread uptime_thread_; std::atomic_bool uptime_thread_cancelation_flag_{false}; + std::shared_ptr> + iroha_status_subscription_; Metrics(std::string const &listen_addr, std::shared_ptr storage, @@ -55,9 +58,9 @@ class Metrics : public std::enable_shared_from_this { } template - static std::shared_ptr create(Ts &&...args) { + static std::shared_ptr create(Ts &&... args) { struct Resolver : Metrics { - Resolver(Ts &&...args) : Metrics(std::forward(args)...) {} + Resolver(Ts &&... args) : Metrics(std::forward(args)...) {} }; return std::make_shared(std::forward(args)...); } diff --git a/irohad/ordering/impl/on_demand_ordering_gate.hpp b/irohad/ordering/impl/on_demand_ordering_gate.hpp index bdc27cc9d69..0a892ba31b6 100644 --- a/irohad/ordering/impl/on_demand_ordering_gate.hpp +++ b/irohad/ordering/impl/on_demand_ordering_gate.hpp @@ -57,6 +57,10 @@ namespace iroha { void stop() override; + consensus::Round getRound() const { + return current_round_; + } + private: void sendCachedTransactions(); diff --git a/irohad/synchronizer/impl/synchronizer_impl.cpp b/irohad/synchronizer/impl/synchronizer_impl.cpp index ba878a77c40..d9781e4857f 100644 --- a/irohad/synchronizer/impl/synchronizer_impl.cpp +++ b/irohad/synchronizer/impl/synchronizer_impl.cpp @@ -16,6 +16,8 @@ #include "interfaces/common_objects/string_view_types.hpp" #include "interfaces/iroha_internal/block.hpp" #include "logger/logger.hpp" +#include "main/iroha_status.hpp" +#include "main/subscription.hpp" using iroha::synchronizer::SynchronizerImpl; @@ -91,6 +93,19 @@ iroha::ametsuchi::CommitResult SynchronizerImpl::downloadAndCommitMissingBlocks( auto storage = std::move(storage_result).assumeValue(); shared_model::interface::types::HeightType my_height = start_height; + iroha::IrohaStatus status; + status.is_syncing = true; + iroha::getSubscription()->notify(iroha::EventTypes::kOnIrohaStatus, status); + + /// To reset iroha is_syncing status on break loop + std::unique_ptr iroha_status_reseter( + (bool *)0x1, [](bool *) { + iroha::IrohaStatus status; + status.is_syncing = false; + iroha::getSubscription()->notify(iroha::EventTypes::kOnIrohaStatus, + status); + }); + // TODO andrei 17.10.18 IR-1763 Add delay strategy for loading blocks using namespace iroha::expected; for (const auto &public_key : public_keys) { diff --git a/irohad/torii/impl/query_service.cpp b/irohad/torii/impl/query_service.cpp index c320a0e46bb..80560c7ade5 100644 --- a/irohad/torii/impl/query_service.cpp +++ b/irohad/torii/impl/query_service.cpp @@ -22,11 +22,15 @@ QueryService::QueryService( std::shared_ptr query_processor, std::shared_ptr query_factory, std::shared_ptr blocks_query_factory, - logger::LoggerPtr log) + logger::LoggerPtr log, + std::shared_ptr, + iroha::IrohaStatus>> iroha_status_subscription) : query_processor_{std::move(query_processor)}, query_factory_{std::move(query_factory)}, blocks_query_factory_{std::move(blocks_query_factory)}, - log_{std::move(log)} {} + log_{std::move(log)}, + iroha_status_subscription_(std::move(iroha_status_subscription)) {} void QueryService::Find(iroha::protocol::Query const &request, iroha::protocol::QueryResponse &response) { @@ -71,6 +75,29 @@ grpc::Status QueryService::Find(grpc::ServerContext *context, return grpc::Status::OK; } +grpc::Status QueryService::Healthcheck( + grpc::ServerContext *context, + const google::protobuf::Empty *request, + iroha::protocol::HealthcheckData *response) { + if (iroha_status_subscription_) + iroha_status_subscription_->get().exclusiveAccess( + [&](iroha::IrohaStoredStatus &status) { + if (status.status.is_syncing) + response->set_is_syncing(*status.status.is_syncing); + if (status.status.is_healthy) + response->set_is_healthy(*status.status.is_healthy); + if (status.status.memory_consumption) + response->set_memory_consumption(*status.status.memory_consumption); + if (status.status.last_round) { + response->set_last_block_height( + status.status.last_round->block_round); + response->set_last_block_reject( + status.status.last_round->reject_round); + } + }); + return grpc::Status::OK; +} + grpc::Status QueryService::FetchCommits( grpc::ServerContext *context, const iroha::protocol::BlocksQuery *request, diff --git a/irohad/torii/query_service.hpp b/irohad/torii/query_service.hpp index 12293baa5dc..d5008ac3f74 100644 --- a/irohad/torii/query_service.hpp +++ b/irohad/torii/query_service.hpp @@ -16,6 +16,8 @@ #include "builders/protobuf/transport_builder.hpp" #include "cache/cache.hpp" #include "logger/logger_fwd.hpp" +#include "main/iroha_status.hpp" +#include "main/subscription.hpp" #include "torii/processor/query_processor.hpp" namespace shared_model::interface { @@ -39,10 +41,14 @@ namespace iroha::torii { shared_model::interface::BlocksQuery, iroha::protocol::BlocksQuery>; - QueryService(std::shared_ptr query_processor, - std::shared_ptr query_factory, - std::shared_ptr blocks_query_factory, - logger::LoggerPtr log); + QueryService( + std::shared_ptr query_processor, + std::shared_ptr query_factory, + std::shared_ptr blocks_query_factory, + logger::LoggerPtr log, + std::shared_ptr, + iroha::IrohaStatus>> iroha_status_subscription); QueryService(const QueryService &) = delete; QueryService &operator=(const QueryService &) = delete; @@ -65,6 +71,11 @@ namespace iroha::torii { grpc::ServerWriter<::iroha::protocol::BlockQueryResponse> *writer) override; + grpc::Status Healthcheck( + grpc::ServerContext *context, + const google::protobuf::Empty *request, + iroha::protocol::HealthcheckData *response) override; + private: std::shared_ptr query_processor_; std::shared_ptr query_factory_; @@ -77,6 +88,10 @@ namespace iroha::torii { cache_; logger::LoggerPtr log_; + std::shared_ptr, + iroha::IrohaStatus>> + iroha_status_subscription_; }; } // namespace iroha::torii diff --git a/libs/common/mem_operations.hpp b/libs/common/mem_operations.hpp index 6c738316aa5..a99f04a9ebd 100644 --- a/libs/common/mem_operations.hpp +++ b/libs/common/mem_operations.hpp @@ -6,6 +6,8 @@ #ifndef IROHA_COMMON_MEM_OPERATIONS_HPP #define IROHA_COMMON_MEM_OPERATIONS_HPP +#include +#include #include namespace iroha { @@ -21,6 +23,36 @@ namespace iroha { static_assert(std::is_pod::value, "T must be POD."); std::memcpy(&dst, &src, sizeof(src)); } + +#ifdef __linux__ + inline uint64_t getMemoryUsage() { + auto parseLine = [](char const *line) { + while (!std::isdigit(*line)) ++line; + return (uint64_t)atoll(line); + }; + + uint64_t result {}; + char line[128]; + + std::unique_ptr file(fopen("/proc/self/status", "r"), + &fclose); + + constexpr char VM_SZ_FIELD[] = "VmSize:"; + auto const vm_size_len = strlen(VM_SZ_FIELD); + while (fgets(line, sizeof(line), file.get()) != NULL) + if (strncmp(line, VM_SZ_FIELD, vm_size_len) == 0) { + result = parseLine(line + vm_size_len); + break; + } + + return result * 1024ull; + } +#else //__linux__ + inline uint64_t getMemoryUsage() { + return 0ull; + } +#endif //__linux__ + } // namespace iroha #endif // IROHA_COMMON_MEM_OPERATIONS_HPP diff --git a/shared_model/schema/endpoint.proto b/shared_model/schema/endpoint.proto index 56275753d28..480b6c7e6c3 100644 --- a/shared_model/schema/endpoint.proto +++ b/shared_model/schema/endpoint.proto @@ -53,4 +53,5 @@ service CommandService_v1 { service QueryService_v1 { rpc Find (Query) returns (QueryResponse); rpc FetchCommits (BlocksQuery) returns (stream BlockQueryResponse); + rpc Healthcheck(google.protobuf.Empty) returns (HealthcheckData); } diff --git a/shared_model/schema/qry_responses.proto b/shared_model/schema/qry_responses.proto index 8a8f7fe1f4d..bf599f2082f 100644 --- a/shared_model/schema/qry_responses.proto +++ b/shared_model/schema/qry_responses.proto @@ -154,3 +154,21 @@ message BlockQueryResponse { BlockErrorResponse block_error_response = 2; } } + +message HealthcheckData { + oneof opt_memory_consumption { + uint64 memory_consumption = 1; + } + oneof opt_is_healthy { + bool is_healthy = 2; + } + oneof opt_is_syncing { + bool is_syncing = 3; + } + oneof opt_last_block_height { + uint64 last_block_height = 4; + } + oneof opt_last_block_reject { + uint64 last_block_reject = 5; + } +} diff --git a/test/framework/integration_framework/integration_test_framework.cpp b/test/framework/integration_framework/integration_test_framework.cpp index 25c38b7ca98..a5e675b7bb4 100644 --- a/test/framework/integration_framework/integration_test_framework.cpp +++ b/test/framework/integration_framework/integration_test_framework.cpp @@ -303,16 +303,14 @@ IntegrationTestFramework::IntegrationTestFramework( cleanup_on_exit_(cleanup_on_exit), db_wsv_path_(std::move(db_wsv_path)), db_store_path_(std::move(db_store_path)) { - // 1 h proposal_timeout results in non-deterministic behavior due to thread - // scheduling and network - config_.proposal_delay = 3600'000; + config_.proposal_delay = 1000; + config_.proposal_creation_timeout = 500; // 100 ms is small delay to avoid unnecessary messages due to eternal voting // and to allow scheduler to switch threads config_.vote_delay = 100; // amount of minutes in a day config_.mst_expiration_time = 24 * 60; config_.max_round_delay_ms = 0; - config_.proposal_creation_timeout = 1000; config_.stale_stream_max_rounds = 2; config_.max_proposal_size = 10; config_.mst_support = mst_support; @@ -400,7 +398,7 @@ void IntegrationTestFramework::printDbStatus() { shared_model::proto::Block IntegrationTestFramework::defaultBlock( const shared_model::crypto::Keypair &key) const { - shared_model::interface::RolePermissionSet all_perms {}; + shared_model::interface::RolePermissionSet all_perms{}; for (size_t i = 0; i < all_perms.size(); ++i) { auto perm = static_cast(i); all_perms.set(perm); diff --git a/test/fuzzing/find_fuzz.cpp b/test/fuzzing/find_fuzz.cpp index afd61a32468..378cafbb6f9 100644 --- a/test/fuzzing/find_fuzz.cpp +++ b/test/fuzzing/find_fuzz.cpp @@ -98,7 +98,8 @@ struct QueryFixture { qry_processor_, query_factory, blocks_query_factory, - logger::getDummyLoggerPtr()); + logger::getDummyLoggerPtr(), + nullptr); } }; diff --git a/test/module/irohad/synchronizer/CMakeLists.txt b/test/module/irohad/synchronizer/CMakeLists.txt index eb1a7604138..8fd52edd747 100644 --- a/test/module/irohad/synchronizer/CMakeLists.txt +++ b/test/module/irohad/synchronizer/CMakeLists.txt @@ -13,4 +13,5 @@ target_link_libraries(synchronizer_test shared_model_default_builders consensus_round test_logger + sync_subscription ) diff --git a/test/module/irohad/torii/query_service_test.cpp b/test/module/irohad/torii/query_service_test.cpp index 678edde4cea..eef9c50c91c 100644 --- a/test/module/irohad/torii/query_service_test.cpp +++ b/test/module/irohad/torii/query_service_test.cpp @@ -75,7 +75,8 @@ class QueryServiceTest : public ::testing::Test { std::make_shared(query_processor, query_factory, blocks_query_factory, - getTestLogger("QueryService")); + getTestLogger("QueryService"), + nullptr); } std::unique_ptr getResponse() { diff --git a/test/module/irohad/torii/torii_queries_test.cpp b/test/module/irohad/torii/torii_queries_test.cpp index 3e931e3f4bd..b1a6a5a411c 100644 --- a/test/module/irohad/torii/torii_queries_test.cpp +++ b/test/module/irohad/torii/torii_queries_test.cpp @@ -91,7 +91,8 @@ class ToriiQueriesTest : public testing::Test { ->append(std::make_unique(qpi, query_factory, blocks_query_factory, - getTestLogger("QueryService"))) + getTestLogger("QueryService"), + nullptr)) .run() .match([this](auto port) { this->port = port.value; }, [](const auto &err) { FAIL() << err.error; }); diff --git a/test/module/irohad/torii/torii_service_query_test.cpp b/test/module/irohad/torii/torii_service_query_test.cpp index 8ee62799a5b..081a4afa338 100644 --- a/test/module/irohad/torii/torii_service_query_test.cpp +++ b/test/module/irohad/torii/torii_service_query_test.cpp @@ -122,7 +122,7 @@ class ToriiQueryServiceTest : public ::testing::Test { query_processor, query_factory, blocks_query_factory, - getTestLogger("QueryService"))) + getTestLogger("QueryService"), nullptr)) .run() .match([this](auto port) { this->port = port.value; }, [](const auto &err) { FAIL() << err.error; }); diff --git a/vcpkg/VCPKG_DEPS_LIST b/vcpkg/VCPKG_DEPS_LIST index c4826891eef..a62a04431dc 100644 --- a/vcpkg/VCPKG_DEPS_LIST +++ b/vcpkg/VCPKG_DEPS_LIST @@ -18,6 +18,7 @@ boost-format boost-circular-buffer boost-assign boost-uuid +boost-asio boost-accumulators boost-property-tree boost-process From 10b527ccf35b0f771c3931463eccddfedd5d5c21 Mon Sep 17 00:00:00 2001 From: Alexander Lednev <57529355+iceseer@users.noreply.github.com> Date: Mon, 27 Dec 2021 21:01:54 +0300 Subject: [PATCH 02/14] Feature/rdb metrics (#1692) * rocksdb metrics Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> --- irohad/ametsuchi/CMakeLists.txt | 1 + .../impl/rocksdb_command_executor.cpp | 19 +++++++ irohad/ametsuchi/impl/rocksdb_common.hpp | 41 +++++++++++++++ irohad/iroha_migrate/iroha_migrate.cpp | 7 +++ irohad/main/rdb_status.hpp | 23 ++++++++ irohad/main/subscription_fwd.hpp | 3 ++ irohad/maintenance/metrics.cpp | 52 +++++++++++++++++++ irohad/maintenance/metrics.hpp | 3 ++ irohad/subscription/sync_dispatcher_impl.hpp | 2 +- .../executor/executor_fixture_param.cpp | 7 ++- .../executor/executor_fixture_param.hpp | 2 + 11 files changed, 157 insertions(+), 3 deletions(-) create mode 100644 irohad/main/rdb_status.hpp diff --git a/irohad/ametsuchi/CMakeLists.txt b/irohad/ametsuchi/CMakeLists.txt index d624204deca..bee6e31d2c1 100644 --- a/irohad/ametsuchi/CMakeLists.txt +++ b/irohad/ametsuchi/CMakeLists.txt @@ -258,4 +258,5 @@ target_link_libraries(ametsuchi_rocksdb shared_model_interfaces_factories rocksdb_indexer shared_model_interfaces + async_subscription ) diff --git a/irohad/ametsuchi/impl/rocksdb_command_executor.cpp b/irohad/ametsuchi/impl/rocksdb_command_executor.cpp index ce2146fcb86..a4e1bbc3484 100644 --- a/irohad/ametsuchi/impl/rocksdb_command_executor.cpp +++ b/irohad/ametsuchi/impl/rocksdb_command_executor.cpp @@ -34,6 +34,8 @@ #include "interfaces/commands/set_setting_value.hpp" #include "interfaces/commands/subtract_asset_quantity.hpp" #include "interfaces/commands/transfer_asset.hpp" +#include "main/rdb_status.hpp" +#include "main/subscription.hpp" using namespace iroha; using namespace iroha::ametsuchi; @@ -53,6 +55,23 @@ RocksDbCommandExecutor::RocksDbCommandExecutor( vm_caller_{vm_caller}, db_transaction_(db_context_) { assert(db_context_); + + getSubscription()->dispatcher()->repeat( + SubscriptionEngineHandlers::kMetrics, + std::chrono::seconds(5ull), /// repeat task execution period + [wdb_context_(utils::make_weak(db_context_))]() { + if (auto db_context = wdb_context_.lock()) { + RocksDbCommon common(db_context); + getSubscription()->notify( + EventTypes::kOnRdbStats, + RocksDbStatus{common.propGetBlockCacheCapacity(), + common.propGetBlockCacheUsage(), + common.propGetCurSzAllMemTables(), + common.propGetNumSnapshots(), + common.propGetTotalSSTFilesSize()}); + } + }, + []() { return true; }); } RocksDbCommandExecutor::~RocksDbCommandExecutor() = default; diff --git a/irohad/ametsuchi/impl/rocksdb_common.hpp b/irohad/ametsuchi/impl/rocksdb_common.hpp index d81bfa1a1b7..550f8ff34d0 100644 --- a/irohad/ametsuchi/impl/rocksdb_common.hpp +++ b/irohad/ametsuchi/impl/rocksdb_common.hpp @@ -534,6 +534,24 @@ namespace iroha::ametsuchi { } } + std::optional getPropUInt64(const rocksdb::Slice &property) { + if (transaction_db_) { + uint64_t value; + transaction_db_->GetIntProperty(property, &value); + return value; + } + return std::nullopt; + } + + std::optional getPropStr(const rocksdb::Slice &property) { + if (transaction_db_) { + std::string value; + transaction_db_->GetProperty(property, &value); + return value; + } + return std::nullopt; + } + private: std::unique_ptr transaction_db_; std::optional db_name_; @@ -655,6 +673,29 @@ namespace iroha::ametsuchi { tx_context_->db_port->printStatus(log); } + auto propGetBlockCacheUsage() { + return tx_context_->db_port->getPropUInt64("rocksdb.block-cache-usage"); + } + + auto propGetCurSzAllMemTables() { + return tx_context_->db_port->getPropUInt64( + "rocksdb.cur-size-all-mem-tables"); + } + + auto propGetNumSnapshots() { + return tx_context_->db_port->getPropUInt64("rocksdb.num-snapshots"); + } + + auto propGetTotalSSTFilesSize() { + return tx_context_->db_port->getPropUInt64( + "rocksdb.total-sst-files-size"); + } + + auto propGetBlockCacheCapacity() { + return tx_context_->db_port->getPropUInt64( + "rocksdb.block-cache-capacity"); + } + /// Makes commit to DB auto commit() { rocksdb::Status status; diff --git a/irohad/iroha_migrate/iroha_migrate.cpp b/irohad/iroha_migrate/iroha_migrate.cpp index 10935e54201..16e53bd7656 100644 --- a/irohad/iroha_migrate/iroha_migrate.cpp +++ b/irohad/iroha_migrate/iroha_migrate.cpp @@ -45,6 +45,7 @@ #include "validators/default_validator.hpp" #include "validators/protobuf/proto_block_validator.hpp" #include "validators/protobuf/proto_query_validator.hpp" +#include "main/subscription.hpp" #define STR(y) STRH(y) #define STRH(x) #x @@ -280,7 +281,13 @@ expected::Result restoreWsv() { return {}; } +std::shared_ptr subscription_manager; int main(int argc, char *argv[]) try { + subscription_manager = iroha::getSubscription(); + std::unique_ptr keeper((int*)0x01, [](auto *){ + subscription_manager->dispose(); + }); + gflags::SetVersionString("1.2"); gflags::ParseCommandLineFlags(&argc, &argv, true); gflags::SetUsageMessage( diff --git a/irohad/main/rdb_status.hpp b/irohad/main/rdb_status.hpp new file mode 100644 index 00000000000..3be4f4d0175 --- /dev/null +++ b/irohad/main/rdb_status.hpp @@ -0,0 +1,23 @@ +/** + * Copyright Soramitsu Co., Ltd. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef IROHA_RDB_STATUS_HPP +#define IROHA_RDB_STATUS_HPP + +#include + +namespace iroha { + + struct RocksDbStatus { + std::optional block_cache_capacity; + std::optional block_cache_usage; + std::optional all_mem_tables_sz; + std::optional num_snapshots; + std::optional sst_files_size; + }; + +} // namespace iroha + +#endif // IROHA_RDB_STATUS_HPP diff --git a/irohad/main/subscription_fwd.hpp b/irohad/main/subscription_fwd.hpp index a17268c3387..b2bd7d11261 100644 --- a/irohad/main/subscription_fwd.hpp +++ b/irohad/main/subscription_fwd.hpp @@ -43,6 +43,9 @@ namespace iroha { kOnConsensusGateEvent, kSendBatchComplete, + // RDB + kOnRdbStats, + // Node status kOnIrohaStatus, diff --git a/irohad/maintenance/metrics.cpp b/irohad/maintenance/metrics.cpp index 03c09715c3d..33f5b346ab2 100644 --- a/irohad/maintenance/metrics.cpp +++ b/irohad/maintenance/metrics.cpp @@ -186,6 +186,58 @@ Metrics::Metrics(std::string const &listen_addr, number_of_pending_mst_transactions.Set(std::get<1>(mstmetr)); }); + //////////////////////////////////////////////////////////// + + auto ¶m_block_cache_cap = BuildGauge() + .Name("rdb_block_cache_capacity") + .Help("RocksDB block cache capacity") + .Register(*registry_) + .Add({}); + + auto ¶m_block_cache_usage = BuildGauge() + .Name("rdb_block_cache_usage") + .Help("RocksDB block cache usage") + .Register(*registry_) + .Add({}); + + auto ¶m_all_mem_tables_sz = BuildGauge() + .Name("rdb_all_mem_tables_sz") + .Help("RocksDB all mem tables size") + .Register(*registry_) + .Add({}); + + auto ¶m_num_snapshots = BuildGauge() + .Name("rdb_num_snapshots") + .Help("RocksDB number of snapshots") + .Register(*registry_) + .Add({}); + + auto ¶m_sst_files_size = BuildGauge() + .Name("rdb_sst_files_size") + .Help("RocksDB SST files size") + .Register(*registry_) + .Add({}); + + rdb_subscriber_ = + SubscriberCreator::template create< + EventTypes::kOnRdbStats>( + SubscriptionEngineHandlers::kMetrics, + [&](auto &, iroha::RocksDbStatus status) { + if (status.block_cache_capacity) + param_block_cache_cap.Set(*status.block_cache_capacity); + + if (status.block_cache_usage) + param_block_cache_usage.Set(*status.block_cache_usage); + + if (status.all_mem_tables_sz) + param_all_mem_tables_sz.Set(*status.all_mem_tables_sz); + + if (status.num_snapshots) + param_num_snapshots.Set(*status.num_snapshots); + + if (status.sst_files_size) + param_sst_files_size.Set(*status.sst_files_size); + }); /////////////////////////////// auto calc_uptime_ms = [uptime_start_timepoint_(uptime_start_timepoint_)] { diff --git a/irohad/maintenance/metrics.hpp b/irohad/maintenance/metrics.hpp index bc40df5e603..fd744529ecb 100644 --- a/irohad/maintenance/metrics.hpp +++ b/irohad/maintenance/metrics.hpp @@ -20,6 +20,7 @@ #include "interfaces/common_objects/types.hpp" #include "interfaces/iroha_internal/block.hpp" #include "logger/logger_fwd.hpp" +#include "main/rdb_status.hpp" #include "main/iroha_status.hpp" #include "main/subscription.hpp" #include "network/ordering_gate_common.hpp" @@ -32,6 +33,7 @@ class Metrics : public std::enable_shared_from_this { using BlockSubscriber = iroha::BaseSubscriber; using MstMetrics = std::tuple; using MstSubscriber = iroha::BaseSubscriber; + using RdbSubscriber = iroha::BaseSubscriber; std::string listen_addr_port_; std::shared_ptr exposer_; @@ -39,6 +41,7 @@ class Metrics : public std::enable_shared_from_this { std::shared_ptr storage_; std::shared_ptr block_subscriber_; std::shared_ptr mst_subscriber_; + std::shared_ptr rdb_subscriber_; logger::LoggerPtr logger_; std::chrono::steady_clock::time_point uptime_start_timepoint_; std::thread uptime_thread_; diff --git a/irohad/subscription/sync_dispatcher_impl.hpp b/irohad/subscription/sync_dispatcher_impl.hpp index 136d2b7479d..9755cf33c4d 100644 --- a/irohad/subscription/sync_dispatcher_impl.hpp +++ b/irohad/subscription/sync_dispatcher_impl.hpp @@ -40,7 +40,7 @@ namespace iroha::subscription { std::chrono::microseconds timeout, typename Parent::Task &&task, typename Parent::Predicate &&pred) override { - while (!pred || pred()) task(); + if (!pred || pred()) task(); } std::optional bind(std::shared_ptr scheduler) override { diff --git a/test/integration/executor/executor_fixture_param.cpp b/test/integration/executor/executor_fixture_param.cpp index d1d7eb6730a..ab1db516837 100644 --- a/test/integration/executor/executor_fixture_param.cpp +++ b/test/integration/executor/executor_fixture_param.cpp @@ -10,6 +10,9 @@ using namespace executor_testing; ExecutorTestParam::ExecutorTestParam() - : vm_caller_(std::make_unique()) {} + : vm_caller_(std::make_unique()), + subscription_manager_(iroha::getSubscription()) {} -ExecutorTestParam::~ExecutorTestParam() = default; +ExecutorTestParam::~ExecutorTestParam() { + subscription_manager_->dispose(); +} diff --git a/test/integration/executor/executor_fixture_param.hpp b/test/integration/executor/executor_fixture_param.hpp index f2d6227f51d..59ec6a21b38 100644 --- a/test/integration/executor/executor_fixture_param.hpp +++ b/test/integration/executor/executor_fixture_param.hpp @@ -10,6 +10,7 @@ #include #include "interfaces/common_objects/types.hpp" +#include "main/subscription.hpp" namespace iroha::ametsuchi { class BlockIndex; @@ -52,6 +53,7 @@ namespace executor_testing { virtual std::string toString() const = 0; std::unique_ptr vm_caller_; + std::shared_ptr subscription_manager_; }; } // namespace executor_testing From e30070f60af1bed4455b8373cb5f81670be311bf Mon Sep 17 00:00:00 2001 From: Alexander Lednev <57529355+iceseer@users.noreply.github.com> Date: Tue, 28 Dec 2021 11:36:57 +0300 Subject: [PATCH 03/14] Feature/syncing node (#1648) * Syncing node Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * YAC removed Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * RocksDB store sync peers Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * Peers refactoring Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * Ledger state Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> --- .../ametsuchi/impl/mutable_storage_impl.cpp | 23 ++++-- irohad/ametsuchi/impl/peer_query_wsv.cpp | 6 +- irohad/ametsuchi/impl/peer_query_wsv.hpp | 3 +- .../impl/postgres_command_executor.cpp | 72 ++++++++++++++++++- .../impl/postgres_command_executor.hpp | 2 + .../impl/postgres_specific_query_executor.cpp | 13 ++-- .../ametsuchi/impl/postgres_wsv_command.cpp | 14 ++-- irohad/ametsuchi/impl/postgres_wsv_query.cpp | 34 +++++---- irohad/ametsuchi/impl/postgres_wsv_query.hpp | 5 +- .../impl/rocksdb_command_executor.cpp | 49 ++++++++----- irohad/ametsuchi/impl/rocksdb_common.hpp | 70 ++++++++++++++---- .../impl/rocksdb_specific_query_executor.cpp | 31 +++++--- .../ametsuchi/impl/rocksdb_storage_impl.cpp | 7 +- irohad/ametsuchi/impl/rocksdb_wsv_command.cpp | 40 +++++++++-- irohad/ametsuchi/impl/rocksdb_wsv_command.hpp | 2 +- irohad/ametsuchi/impl/rocksdb_wsv_query.cpp | 67 ++++++++++------- irohad/ametsuchi/impl/rocksdb_wsv_query.hpp | 5 +- irohad/ametsuchi/impl/storage_base.cpp | 19 ++--- irohad/ametsuchi/impl/storage_impl.cpp | 7 +- irohad/ametsuchi/ledger_state.hpp | 3 + irohad/ametsuchi/peer_query.hpp | 3 +- irohad/ametsuchi/wsv_query.hpp | 7 +- irohad/consensus/yac/impl/yac.cpp | 10 +-- irohad/consensus/yac/impl/yac_gate_impl.cpp | 4 +- irohad/consensus/yac/yac.hpp | 4 +- irohad/consensus/yac/yac_gate.hpp | 3 +- irohad/main/application.cpp | 9 ++- irohad/main/application.hpp | 2 +- irohad/main/impl/consensus_init.cpp | 6 +- irohad/main/impl/consensus_init.hpp | 3 +- irohad/main/impl/on_demand_ordering_init.cpp | 41 +++++++---- irohad/main/impl/on_demand_ordering_init.hpp | 6 +- irohad/main/impl/pg_connection_init.cpp | 6 ++ irohad/main/iroha_conf_loader.hpp | 1 + irohad/main/irohad.cpp | 9 +-- irohad/main/server_runner.cpp | 3 +- irohad/maintenance/metrics.cpp | 2 +- .../impl/gossip_propagation_strategy.cpp | 2 +- .../ordering/impl/on_demand_ordering_gate.cpp | 61 +++++++++------- .../ordering/impl/on_demand_ordering_gate.hpp | 16 ++++- shared_model/backend/plain/impl/peer.cpp | 8 ++- shared_model/backend/plain/peer.hpp | 6 +- .../backend/protobuf/common_objects/peer.hpp | 4 ++ .../interfaces/common_objects/impl/peer.cpp | 4 +- .../interfaces/common_objects/peer.hpp | 5 ++ shared_model/schema/primitive.proto | 1 + .../integration_test_framework.cpp | 1 + test/integration/acceptance/add_peer_test.cpp | 4 +- .../acceptance/remove_peer_test.cpp | 4 +- .../irohad/ametsuchi/ametsuchi_test.cpp | 6 +- .../irohad/ametsuchi/mock_peer_query.hpp | 2 +- .../irohad/ametsuchi/mock_wsv_query.hpp | 10 +-- .../irohad/ametsuchi/peer_query_wsv_test.cpp | 27 +++++-- .../ametsuchi/postgres_executor_test.cpp | 6 +- .../postgres_query_executor_test.cpp | 5 +- .../irohad/ametsuchi/rdb_wsv_query_test.cpp | 68 ++++++++++++++++-- .../ametsuchi/rocksdb_executor_test.cpp | 6 +- .../irohad/ametsuchi/wsv_query_test.cpp | 43 ++++++++--- .../consensus/yac/mock_yac_hash_gate.hpp | 1 + .../irohad/consensus/yac/yac_gate_test.cpp | 2 + .../consensus/yac/yac_hash_provider_test.cpp | 18 +++-- .../consensus/yac/yac_rainy_day_test.cpp | 4 +- .../yac/yac_synchronization_test.cpp | 8 ++- .../gossip_propagation_strategy_test.cpp | 6 +- .../irohad/network/block_loader_test.cpp | 2 +- .../ordering/on_demand_ordering_gate_test.cpp | 5 +- .../irohad/simulator/simulator_test.cpp | 4 ++ .../irohad/synchronizer/synchronizer_test.cpp | 32 ++++++--- .../processor/transaction_processor_test.cpp | 6 +- .../validation/chain_validation_test.cpp | 34 ++++++--- test/module/shared_model/interface_mocks.hpp | 1 + 71 files changed, 732 insertions(+), 271 deletions(-) diff --git a/irohad/ametsuchi/impl/mutable_storage_impl.cpp b/irohad/ametsuchi/impl/mutable_storage_impl.cpp index 50b570b36f7..9435dba032d 100644 --- a/irohad/ametsuchi/impl/mutable_storage_impl.cpp +++ b/irohad/ametsuchi/impl/mutable_storage_impl.cpp @@ -78,14 +78,22 @@ namespace iroha::ametsuchi { block_storage_->insert(block); block_index_->index(*block); - auto opt_ledger_peers = peer_query_->getLedgerPeers(); - if (not opt_ledger_peers) { - log_->error("Failed to get ledger peers!"); - return false; - } + boost::optional< + std::vector>> + opt_ledger_peers[] = {peer_query_->getLedgerPeers(false), + peer_query_->getLedgerPeers(true)}; + + for (auto const &peer_list : opt_ledger_peers) + if (!peer_list) { + log_->error("Failed to get ledger peers!"); + return false; + } ledger_state_ = std::make_shared( - std::move(*opt_ledger_peers), block->height(), block->hash()); + std::move(*(opt_ledger_peers[0])), // peers + std::move(*(opt_ledger_peers[1])), // syncing peers + block->height(), + block->hash()); } return block_applied; @@ -161,7 +169,8 @@ namespace iroha::ametsuchi { try { db_tx_.rollback(); } catch (std::exception &e) { - log_->warn("~MutableStorageImpl(): rollback failed. Reason: {}", e.what()); + log_->warn("~MutableStorageImpl(): rollback failed. Reason: {}", + e.what()); } } } diff --git a/irohad/ametsuchi/impl/peer_query_wsv.cpp b/irohad/ametsuchi/impl/peer_query_wsv.cpp index 10765a4663e..bb5f75992b3 100644 --- a/irohad/ametsuchi/impl/peer_query_wsv.cpp +++ b/irohad/ametsuchi/impl/peer_query_wsv.cpp @@ -15,9 +15,9 @@ namespace iroha { PeerQueryWsv::PeerQueryWsv(std::shared_ptr wsv) : wsv_(std::move(wsv)) {} - boost::optional> - PeerQueryWsv::getLedgerPeers() { - return wsv_->getPeers(); + boost::optional> PeerQueryWsv::getLedgerPeers( + bool syncing_peers) { + return wsv_->getPeers(syncing_peers); } boost::optional PeerQueryWsv::getLedgerPeerByPublicKey( diff --git a/irohad/ametsuchi/impl/peer_query_wsv.hpp b/irohad/ametsuchi/impl/peer_query_wsv.hpp index 449d328b30e..d390bfcf7ac 100644 --- a/irohad/ametsuchi/impl/peer_query_wsv.hpp +++ b/irohad/ametsuchi/impl/peer_query_wsv.hpp @@ -29,7 +29,8 @@ namespace iroha { * Fetch peers stored in ledger * @return list of peers in insertion to ledger order */ - boost::optional> getLedgerPeers() override; + boost::optional> getLedgerPeers( + bool syncing_peers) override; /** * Fetch peer with given public key from ledger diff --git a/irohad/ametsuchi/impl/postgres_command_executor.cpp b/irohad/ametsuchi/impl/postgres_command_executor.cpp index 71dd4c4296c..ca757036e71 100644 --- a/irohad/ametsuchi/impl/postgres_command_executor.cpp +++ b/irohad/ametsuchi/impl/postgres_command_executor.cpp @@ -697,6 +697,26 @@ namespace iroha { AND NOT (SELECT * FROM has_root_perm) THEN 2 WHEN NOT (SELECT * FROM has_perm) THEN 2)"}); + add_sync_peer_statements_ = makeCommandStatements( + sql_, + R"( + WITH %s + inserted AS ( + INSERT INTO sync_peer(public_key, address, tls_certificate) + ( + SELECT lower(:pubkey), :address, :tls_certificate + %s + ) RETURNING (1) + ) + SELECT CASE WHEN EXISTS (SELECT * FROM inserted) THEN 0 + %s + ELSE 1 END AS result)", + {(boost::format(R"(has_perm AS (%s),)") + % checkAccountRolePermission(Role::kAddPeer, ":creator")) + .str(), + "WHERE (SELECT * FROM has_perm)", + "WHEN NOT (SELECT * from has_perm) THEN 2"}); + compare_and_set_account_detail_statements_ = makeCommandStatements( sql_, R"( @@ -1176,6 +1196,40 @@ namespace iroha { R"( AND (SELECT * FROM has_perm))", R"( WHEN NOT (SELECT * FROM has_perm) THEN 2 )"}); + remove_sync_peer_statements_ = makeCommandStatements( + sql_, + R"( + WITH %s + removed AS ( + DELETE FROM sync_peer WHERE public_key = lower(:pubkey) + %s + RETURNING (1) + ) + SELECT CASE + WHEN EXISTS (SELECT * FROM removed) THEN 0 + %s + ELSE 1 + END AS result)", + {(boost::format(R"( + has_perm AS (%s), + get_peer AS ( + SELECT * from sync_peer WHERE public_key = lower(:pubkey) LIMIT 1 + ), + check_peers AS ( + SELECT 1 WHERE (SELECT COUNT(*) FROM sync_peer) > 0 + ),)") + % checkAccountRolePermission( + Role::kAddPeer, Role::kRemovePeer, ":creator")) + .str(), + R"( + AND (SELECT * FROM has_perm) + AND EXISTS (SELECT * FROM get_peer) + AND EXISTS (SELECT * FROM check_peers))", + R"( + WHEN NOT EXISTS (SELECT * from get_peer) THEN 3 + WHEN NOT EXISTS (SELECT * from check_peers) THEN 4 + WHEN NOT (SELECT * from has_perm) THEN 2)"}); + set_quorum_statements_ = makeCommandStatements( sql_, R"( @@ -1499,8 +1553,12 @@ namespace iroha { bool do_validation) { auto &peer = command.peer(); - StatementExecutor executor( - add_peer_statements_, do_validation, "AddPeer", perm_converter_); + StatementExecutor executor(peer.isSyncingPeer() + ? add_sync_peer_statements_ + : add_peer_statements_, + do_validation, + "AddPeer", + perm_converter_); executor.use("creator", creator_account_id); executor.use("address", peer.address()); executor.use("pubkey", peer.pubkey()); @@ -1795,6 +1853,16 @@ namespace iroha { bool do_validation) { auto pubkey = command.pubkey(); + { + StatementExecutor executor(remove_sync_peer_statements_, + do_validation, + "RemovePeer", + perm_converter_); + executor.use("creator", creator_account_id); + executor.use("pubkey", pubkey); + executor.execute(); + } + StatementExecutor executor(remove_peer_statements_, do_validation, "RemovePeer", diff --git a/irohad/ametsuchi/impl/postgres_command_executor.hpp b/irohad/ametsuchi/impl/postgres_command_executor.hpp index fbec9a3c8f7..cd9865d7177 100644 --- a/irohad/ametsuchi/impl/postgres_command_executor.hpp +++ b/irohad/ametsuchi/impl/postgres_command_executor.hpp @@ -255,6 +255,7 @@ namespace iroha { std::unique_ptr add_asset_quantity_statements_; std::unique_ptr add_peer_statements_; + std::unique_ptr add_sync_peer_statements_; std::unique_ptr add_signatory_statements_; std::unique_ptr append_role_statements_; std::unique_ptr @@ -266,6 +267,7 @@ namespace iroha { std::unique_ptr detach_role_statements_; std::unique_ptr grant_permission_statements_; std::unique_ptr remove_peer_statements_; + std::unique_ptr remove_sync_peer_statements_; std::unique_ptr remove_signatory_statements_; std::unique_ptr revoke_permission_statements_; std::unique_ptr set_account_detail_statements_; diff --git a/irohad/ametsuchi/impl/postgres_specific_query_executor.cpp b/irohad/ametsuchi/impl/postgres_specific_query_executor.cpp index 41800853c43..87105ddc92c 100644 --- a/irohad/ametsuchi/impl/postgres_specific_query_executor.cpp +++ b/irohad/ametsuchi/impl/postgres_specific_query_executor.cpp @@ -1481,6 +1481,9 @@ namespace iroha { R"(WITH has_perms AS ({}) SELECT public_key, address, tls_certificate, perm FROM peer RIGHT OUTER JOIN has_perms ON TRUE + UNION + SELECT public_key, address, tls_certificate, perm FROM sync_peer + RIGHT OUTER JOIN has_perms ON TRUE )", getAccountRolePermissionCheckSql(Role::kGetPeers)); @@ -1500,12 +1503,10 @@ namespace iroha { if (peer_key and address) { peers.push_back( std::make_shared( - *address, *std::move(peer_key), tls_certificate)); - } else { - log_->error( - "Address or public key not set for some peer!"); - assert(peer_key); - assert(address); + *address, + *std::move(peer_key), + tls_certificate, + false)); } }); } diff --git a/irohad/ametsuchi/impl/postgres_wsv_command.cpp b/irohad/ametsuchi/impl/postgres_wsv_command.cpp index 7d02a0c6041..2f31cdf62f3 100644 --- a/irohad/ametsuchi/impl/postgres_wsv_command.cpp +++ b/irohad/ametsuchi/impl/postgres_wsv_command.cpp @@ -314,8 +314,10 @@ namespace iroha { WsvCommandResult PostgresWsvCommand::insertPeer( const shared_model::interface::Peer &peer) { soci::statement st = sql_.prepare - << "INSERT INTO peer(public_key, address, tls_certificate)" - " VALUES (lower(:pk), :address, :tls_certificate)"; + << fmt::format("INSERT INTO {}(public_key, address, " + "tls_certificate) VALUES (lower(:pk), :address, " + ":tls_certificate)", + peer.isSyncingPeer() ? "sync_peer" : "peer"); st.exchange(soci::use(peer.pubkey())); st.exchange(soci::use(peer.address())); st.exchange(soci::use(peer.tlsCertificate())); @@ -329,8 +331,12 @@ namespace iroha { WsvCommandResult PostgresWsvCommand::deletePeer( const shared_model::interface::Peer &peer) { soci::statement st = sql_.prepare - << "DELETE FROM peer WHERE public_key = lower(:pk) AND address = " - ":address"; + << fmt::format("DELETE FROM {} WHERE public_key = " + "lower(:pk) AND address = :address", + peer.isSyncingPeer() ? "sync_peer" : "peer" + + ); + st.exchange(soci::use(peer.pubkey())); st.exchange(soci::use(peer.address())); diff --git a/irohad/ametsuchi/impl/postgres_wsv_query.cpp b/irohad/ametsuchi/impl/postgres_wsv_query.cpp index b25df7b32fc..b3938e9c66d 100644 --- a/irohad/ametsuchi/impl/postgres_wsv_query.cpp +++ b/irohad/ametsuchi/impl/postgres_wsv_query.cpp @@ -17,14 +17,16 @@ namespace { template boost::optional>> - getPeersFromSociRowSet(T &&rowset) { + getPeersFromSociRowSet(T &&rowset, bool syncing_peer) { return iroha::ametsuchi::flatMapValues< std::vector>>( std::forward(rowset), [&](auto &public_key, auto &address, auto &tls_certificate) { return boost::make_optional( - std::make_shared( - address, std::move(public_key), tls_certificate)); + std::make_shared(address, + std::move(public_key), + tls_certificate, + syncing_peer)); }); } } // namespace @@ -69,15 +71,19 @@ namespace iroha { } boost::optional>> - PostgresWsvQuery::getPeers() { + PostgresWsvQuery::getPeers(bool syncing_peers) { using T = boost:: tuple>; auto result = execute([&] { - return (sql_.prepare - << "SELECT public_key, address, tls_certificate FROM peer"); + return ( + sql_.prepare + << (syncing_peers + ? "SELECT public_key, address, tls_certificate FROM " + "sync_peer" + : "SELECT public_key, address, tls_certificate FROM peer")); }); - return getPeersFromSociRowSet(result); + return getPeersFromSociRowSet(result, syncing_peers); } iroha::expected::Result PostgresWsvQuery::count( @@ -92,9 +98,9 @@ namespace iroha { return iroha::expected::makeError(msg); } - iroha::expected::Result - PostgresWsvQuery::countPeers() { - return count("peer"); + iroha::expected::Result PostgresWsvQuery::countPeers( + bool syncing_peers) { + return count(syncing_peers ? "sync_peer" : "peer"); } iroha::expected::Result @@ -117,13 +123,13 @@ namespace iroha { std::string target_public_key{public_key}; auto result = execute([&] { return (sql_.prepare << R"( - SELECT public_key, address, tls_certificate - FROM peer - WHERE public_key = :public_key)", + SELECT public_key, address, tls_certificate FROM peer WHERE public_key = :public_key + UNION + SELECT public_key, address, tls_certificate FROM sync_peer WHERE public_key = :public_key)", soci::use(target_public_key, "public_key")); }); - return getPeersFromSociRowSet(result) | [](auto &&peers) + return getPeersFromSociRowSet(result, false) | [](auto &&peers) -> boost::optional< std::shared_ptr> { if (!peers.empty()) { diff --git a/irohad/ametsuchi/impl/postgres_wsv_query.hpp b/irohad/ametsuchi/impl/postgres_wsv_query.hpp index 25ad27bf612..3355a5fee45 100644 --- a/irohad/ametsuchi/impl/postgres_wsv_query.hpp +++ b/irohad/ametsuchi/impl/postgres_wsv_query.hpp @@ -26,9 +26,10 @@ namespace iroha { boost::optional< std::vector>> - getPeers() override; + getPeers(bool syncing_peers) override; - iroha::expected::Result countPeers() override; + iroha::expected::Result countPeers( + bool syncing_peers) override; iroha::expected::Result countDomains() override; iroha::expected::Result countTransactions() override; diff --git a/irohad/ametsuchi/impl/rocksdb_command_executor.cpp b/irohad/ametsuchi/impl/rocksdb_command_executor.cpp index a4e1bbc3484..122df681754 100644 --- a/irohad/ametsuchi/impl/rocksdb_command_executor.cpp +++ b/irohad/ametsuchi/impl/rocksdb_command_executor.cpp @@ -58,7 +58,7 @@ RocksDbCommandExecutor::RocksDbCommandExecutor( getSubscription()->dispatcher()->repeat( SubscriptionEngineHandlers::kMetrics, - std::chrono::seconds(5ull), /// repeat task execution period + std::chrono::seconds(5ull), /// repeat task execution period [wdb_context_(utils::make_weak(db_context_))]() { if (auto db_context = wdb_context_.lock()) { RocksDbCommon common(db_context); @@ -221,23 +221,28 @@ RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( toLowerAppend(peer.pubkey(), pk); RDB_ERROR_CHECK(forPeerAddress( - common, pk)); + common, pk, false)); + RDB_ERROR_CHECK(forPeerAddress( + common, pk, true)); - RDB_TRY_GET_VALUE( - opt_peers_count, - forPeersCount(common)); + RDB_TRY_GET_VALUE(opt_peers_count, + forPeersCount( + common, peer.isSyncingPeer())); common.encode((opt_peers_count ? *opt_peers_count : 0ull) + 1ull); - RDB_ERROR_CHECK(forPeersCount(common)); + RDB_ERROR_CHECK( + forPeersCount(common, peer.isSyncingPeer())); /// Store address common.valueBuffer().assign(peer.address()); - RDB_ERROR_CHECK(forPeerAddress(common, pk)); + RDB_ERROR_CHECK( + forPeerAddress(common, pk, peer.isSyncingPeer())); /// Store TLS if present if (peer.tlsCertificate().has_value()) { common.valueBuffer().assign(peer.tlsCertificate().value()); - RDB_ERROR_CHECK(forPeerTLS(common, pk)); + RDB_ERROR_CHECK( + forPeerTLS(common, pk, peer.isSyncingPeer())); } return {}; @@ -668,23 +673,31 @@ RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( std::string pk; toLowerAppend(command.pubkey(), pk); - RDB_ERROR_CHECK( - forPeerAddress(common, pk)); + bool syncing_node = false; + auto res = forPeerAddress( + common, pk, syncing_node); + if (expected::hasError(res)) { + syncing_node = true; + if (res = forPeerAddress( + common, pk, syncing_node); + expected::hasError(res)) + return res.assumeError(); + } - RDB_TRY_GET_VALUE( - opt_peers_count, - forPeersCount(common)); + RDB_TRY_GET_VALUE(opt_peers_count, + forPeersCount( + common, syncing_node)); if (*opt_peers_count == 1ull) return makeError( ErrorCodes::kPeersCountIsNotEnough, "Can not remove last peer {}.", pk); common.encode(*opt_peers_count - 1ull); - RDB_ERROR_CHECK(forPeersCount(common)); + RDB_ERROR_CHECK(forPeersCount(common, syncing_node)); - RDB_ERROR_CHECK( - forPeerAddress(common, pk)); - RDB_ERROR_CHECK( - forPeerTLS(common, pk)); + RDB_ERROR_CHECK(forPeerAddress( + common, pk, syncing_node)); + RDB_ERROR_CHECK(forPeerTLS( + common, pk, syncing_node)); return {}; } diff --git a/irohad/ametsuchi/impl/rocksdb_common.hpp b/irohad/ametsuchi/impl/rocksdb_common.hpp index 550f8ff34d0..6160887fed0 100644 --- a/irohad/ametsuchi/impl/rocksdb_common.hpp +++ b/irohad/ametsuchi/impl/rocksdb_common.hpp @@ -37,13 +37,21 @@ * | +- * | +- * | - * +-|WSV|-+-|NETWORK|-+-|PEERS|-+-|ADDRESS|-+- - * | | | +- - * | | | - * | | +-|TLS|-+- - * | | | +- - * | | | - * | | +- + * +-|WSV|-+-|NETWORK|-+-|PEERS|---+-|ADDRESS|-+- + * | | | +- + * | | | + * | | +-|TLS|-+- + * | | | +- + * | | | + * | | +- + * | | + * | +-|S_PEERS|-+-|ADDRESS|-+- + * | | | +- + * | | | + * | | +-|TLS|-+- + * | | | +- + * | | | + * | | +- * | | * | +-|STORE|-+- * | @@ -125,6 +133,7 @@ * ### TRANSACTIONS ## t ### * ### ACCOUNTS ## a ### * ### PEERS ## p ### + * ### S_PEERS ## l ### * ### STATUSES ## u ### * ### DETAILS ## d ### * ### GRANTABLE_PER ## g ### @@ -170,6 +179,7 @@ #define RDB_TRANSACTIONS "t" #define RDB_ACCOUNTS "a" #define RDB_PEERS "p" +#define RDB_S_PEERS "l" #define RDB_STATUSES "u" #define RDB_DETAILS "d" #define RDB_GRANTABLE_PER "g" @@ -219,6 +229,11 @@ namespace iroha::ametsuchi::fmtstrings { static auto constexpr kPathPeers{FMT_STRING( RDB_ROOT /**/ RDB_WSV /**/ RDB_NETWORK /**/ RDB_PEERS /**/ RDB_ADDRESS)}; + // no params + static auto constexpr kPathSPeers{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_NETWORK /**/ RDB_S_PEERS /**/ + RDB_ADDRESS)}; + // domain_id/account_name static auto constexpr kPathSignatories{ FMT_STRING(RDB_PATH_ACCOUNT /**/ RDB_SIGNATORIES)}; @@ -310,11 +325,21 @@ namespace iroha::ametsuchi::fmtstrings { FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_NETWORK /**/ RDB_PEERS /**/ RDB_ADDRESS /**/ RDB_XXX)}; + // pubkey ➡️ address + static auto constexpr kSPeerAddress{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_NETWORK /**/ RDB_S_PEERS /**/ + RDB_ADDRESS /**/ RDB_XXX)}; + // pubkey ➡️ tls static auto constexpr kPeerTLS{ FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_NETWORK /**/ RDB_PEERS /**/ RDB_TLS /**/ RDB_XXX)}; + // pubkey ➡️ tls + static auto constexpr kSPeerTLS{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_NETWORK /**/ RDB_S_PEERS /**/ + RDB_TLS /**/ RDB_XXX)}; + // domain_id/account_name/grantee_domain_id/grantee_account_name // ➡️ permissions static auto constexpr kGranted{ @@ -349,6 +374,10 @@ namespace iroha::ametsuchi::fmtstrings { FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_NETWORK /**/ RDB_PEERS /**/ RDB_F_PEERS_COUNT)}; + static auto constexpr kSPeersCount{ + FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_NETWORK /**/ RDB_S_PEERS /**/ + RDB_F_PEERS_COUNT)}; + // account ➡️ txs total count static auto constexpr kTxsTotalCount{ FMT_STRING(RDB_ROOT /**/ RDB_WSV /**/ RDB_TRANSACTIONS /**/ @@ -1373,7 +1402,7 @@ namespace iroha::ametsuchi { } /** - * Access to peers count file + * Access to peers and syncing peers count file * @tparam kOp @see kDbOperation * @tparam kSc @see kDbEntry * @param common @see RocksDbCommon @@ -1382,7 +1411,10 @@ namespace iroha::ametsuchi { template inline expected::Result, DbError> forPeersCount( - RocksDbCommon &common) { + RocksDbCommon &common, bool is_syncing_peer) { + if (is_syncing_peer) + return dbCall(common, fmtstrings::kSPeersCount); + return dbCall(common, fmtstrings::kPeersCount); } @@ -1466,33 +1498,45 @@ namespace iroha::ametsuchi { } /** - * Access to peer address file + * Access to peer and syncing peer address file * @tparam kOp @see kDbOperation * @tparam kSc @see kDbEntry * @param common @see RocksDbCommon * @param pubkey public key of the peer + * @param is_sync_peer node mode * @return operation result */ template inline expected::Result, DbError> - forPeerAddress(RocksDbCommon &common, std::string_view pubkey) { + forPeerAddress(RocksDbCommon &common, + std::string_view pubkey, + bool is_sync_peer) { + if (is_sync_peer) + return dbCall( + common, fmtstrings::kSPeerAddress, pubkey); + return dbCall( common, fmtstrings::kPeerAddress, pubkey); } /** - * Access to peer TLS file + * Access to peer and syncing peer TLS file * @tparam kOp @see kDbOperation * @tparam kSc @see kDbEntry * @param common @see RocksDbCommon * @param pubkey is a public key of the peer + * @param is_sync_peer node mode * @return operation result */ template inline expected::Result, DbError> forPeerTLS( - RocksDbCommon &common, std::string_view pubkey) { + RocksDbCommon &common, std::string_view pubkey, bool is_sync_peer) { + if (is_sync_peer) + return dbCall( + common, fmtstrings::kSPeerTLS, pubkey); + return dbCall( common, fmtstrings::kPeerTLS, pubkey); } diff --git a/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp b/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp index df651508655..5232eabf555 100644 --- a/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp +++ b/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp @@ -939,23 +939,32 @@ operator()( RDB_ERROR_CHECK(checkPermissions(creator_permissions, {Role::kGetPeers})); std::vector> peers; - auto status = enumerateKeysAndValues( - common, - [&](auto pubkey, auto address) { - peers.emplace_back(std::make_shared( - address.ToStringView(), - std::string{pubkey.ToStringView()}, - std::nullopt)); - return true; - }, - fmtstrings::kPathPeers); + auto enum_peers = [&](auto const &path, bool syncing_peer) { + return enumerateKeysAndValues( + common, + [&](auto pubkey, auto address) { + peers.emplace_back(std::make_shared( + address.ToStringView(), + std::string{pubkey.ToStringView()}, + std::nullopt, + syncing_peer)); + return true; + }, + path); + }; + + auto status = enum_peers(fmtstrings::kPathPeers, false); + RDB_ERROR_CHECK( + canExist(status, [&]() { return fmt::format("Enumerate peers"); })); + + status = enum_peers(fmtstrings::kPathSPeers, true); RDB_ERROR_CHECK( canExist(status, [&]() { return fmt::format("Enumerate peers"); })); for (auto &peer : peers) { RDB_TRY_GET_VALUE(opt_tls, forPeerTLS( - common, peer->pubkey())); + common, peer->pubkey(), peer->isSyncingPeer())); if (opt_tls) utils::reinterpret_pointer_cast(peer) diff --git a/irohad/ametsuchi/impl/rocksdb_storage_impl.cpp b/irohad/ametsuchi/impl/rocksdb_storage_impl.cpp index 849ced82724..7ebba4dea19 100644 --- a/irohad/ametsuchi/impl/rocksdb_storage_impl.cpp +++ b/irohad/ametsuchi/impl/rocksdb_storage_impl.cpp @@ -166,11 +166,14 @@ namespace iroha::ametsuchi { log_manager->getChild("WsvQuery")->getLogger()); auto maybe_top_block_info = wsv_query.getTopBlockInfo(); - auto maybe_ledger_peers = wsv_query.getPeers(); + auto maybe_ledger_peers = wsv_query.getPeers(false); + auto maybe_ledger_sync_peers = wsv_query.getPeers(true); - if (expected::hasValue(maybe_top_block_info) and maybe_ledger_peers) + if (expected::hasValue(maybe_top_block_info) && maybe_ledger_peers + && maybe_ledger_sync_peers) ledger_state = std::make_shared( std::move(*maybe_ledger_peers), + std::move(*maybe_ledger_sync_peers), maybe_top_block_info.assumeValue().height, maybe_top_block_info.assumeValue().top_hash); } diff --git a/irohad/ametsuchi/impl/rocksdb_wsv_command.cpp b/irohad/ametsuchi/impl/rocksdb_wsv_command.cpp index 50169da15b7..f3fb4a573f2 100644 --- a/irohad/ametsuchi/impl/rocksdb_wsv_command.cpp +++ b/irohad/ametsuchi/impl/rocksdb_wsv_command.cpp @@ -337,13 +337,32 @@ namespace iroha::ametsuchi { std::back_inserter(result), [](auto c) { return std::tolower(c); }); + RDB_ERROR_CHECK( + forPeerAddress( + common, result, false)); + RDB_ERROR_CHECK( + forPeerAddress( + common, result, true)); + + RDB_TRY_GET_VALUE( + opt_peers_count, + forPeersCount( + common, peer.isSyncingPeer())); + + common.encode((opt_peers_count ? *opt_peers_count : 0ull) + 1ull); + RDB_ERROR_CHECK( + forPeersCount(common, peer.isSyncingPeer())); + common.valueBuffer().assign(peer.address()); - RDB_ERROR_CHECK(forPeerAddress(common, result)); + RDB_ERROR_CHECK(forPeerAddress( + common, result, peer.isSyncingPeer())); if (peer.tlsCertificate()) { common.valueBuffer().assign(peer.tlsCertificate().value()); - RDB_ERROR_CHECK(forPeerTLS(common, result)); + RDB_ERROR_CHECK(forPeerTLS( + common, result, peer.isSyncingPeer())); } + return {}; }, [&]() { @@ -363,11 +382,22 @@ namespace iroha::ametsuchi { std::back_inserter(result), [](auto c) { return std::tolower(c); }); + RDB_TRY_GET_VALUE( + opt_peers_count, + forPeersCount( + common, peer.isSyncingPeer())); + + common.encode((opt_peers_count && *opt_peers_count > 0ull) + ? (*opt_peers_count - 1ull) + : 0ull); + RDB_ERROR_CHECK( + forPeersCount(common, peer.isSyncingPeer())); + RDB_ERROR_CHECK( - forPeerAddress(common, - result)); + forPeerAddress( + common, result, peer.isSyncingPeer())); RDB_ERROR_CHECK(forPeerTLS( - common, result)); + common, result, peer.isSyncingPeer())); return {}; }, [&]() { diff --git a/irohad/ametsuchi/impl/rocksdb_wsv_command.hpp b/irohad/ametsuchi/impl/rocksdb_wsv_command.hpp index 3ebdb0409ec..b3e94fb4242 100644 --- a/irohad/ametsuchi/impl/rocksdb_wsv_command.hpp +++ b/irohad/ametsuchi/impl/rocksdb_wsv_command.hpp @@ -17,7 +17,7 @@ namespace iroha { class RocksDBWsvCommand : public WsvCommand { public: - enum ErrorCodes { kNotUsed = 1000 }; + enum ErrorCodes { kNotUsed = 1000, kCommandUnexeptable = 1001 }; explicit RocksDBWsvCommand(std::shared_ptr db_context); WsvCommandResult insertRole( diff --git a/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp b/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp index c278c9e13f7..66ebc44fe79 100644 --- a/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp +++ b/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp @@ -76,28 +76,35 @@ namespace iroha::ametsuchi { } boost::optional>> - RocksDBWsvQuery::getPeers() { + RocksDBWsvQuery::getPeers(bool syncing_peers) { using RetType = std::vector>; return execute( db_context_, log_, [&](auto &common) -> expected::Result { RetType peers; - auto status = enumerateKeysAndValues( - common, - [&](auto pubkey, auto address) { - if (!pubkey.empty()) - peers.emplace_back( - std::make_shared( - address.ToStringView(), - std::string{pubkey.ToStringView()}, - std::nullopt)); - else - assert(!"Pubkey can not be empty!"); - return true; - }, - fmtstrings::kPathPeers); + auto callback = [&](auto pubkey, auto address) { + if (!pubkey.empty()) + peers.emplace_back(std::make_shared( + address.ToStringView(), + std::string{pubkey.ToStringView()}, + std::nullopt, + syncing_peers)); + else + assert(!"Pubkey can not be empty!"); + + return true; + }; + + rocksdb::Status status; + if (syncing_peers) + status = enumerateKeysAndValues( + common, std::move(callback), fmtstrings::kPathSPeers); + else + status = enumerateKeysAndValues( + common, std::move(callback), fmtstrings::kPathPeers); + RDB_ERROR_CHECK(canExist( status, [&]() { return fmt::format("Enumerate peers"); })); @@ -105,7 +112,7 @@ namespace iroha::ametsuchi { RDB_TRY_GET_VALUE( opt_tls, forPeerTLS( - common, peer->pubkey())); + common, peer->pubkey(), syncing_peers)); if (opt_tls) utils::reinterpret_pointer_cast(peer) @@ -135,16 +142,26 @@ namespace iroha::ametsuchi { std::back_inserter(result), [](auto c) { return std::tolower(c); }); - RDB_TRY_GET_VALUE( - opt_addr, - forPeerAddress(common, - result)); + bool syncing_node = false; + auto res = forPeerAddress( + common, result, syncing_node); + if (expected::hasError(res)) { + syncing_node = true; + if (res = forPeerAddress( + common, result, syncing_node); + expected::hasError(res)) + return res.assumeError(); + } + auto peer = std::make_shared( - std::move(*opt_addr), std::string(pubkey), std::nullopt); + std::move(*res.assumeValue()), + std::string(pubkey), + std::nullopt, + syncing_node); RDB_TRY_GET_VALUE(opt_tls, forPeerTLS( - common, result)); + common, result, syncing_node)); if (opt_tls) peer->setTlsCertificate(*opt_tls); @@ -186,11 +203,13 @@ namespace iroha::ametsuchi { } } - iroha::expected::Result RocksDBWsvQuery::countPeers() { + iroha::expected::Result RocksDBWsvQuery::countPeers( + bool syncing_peers) { RocksDbCommon common(db_context_); RDB_TRY_GET_VALUE_OR_STR_ERR( opt_count, - forPeersCount(common)); + forPeersCount(common, + syncing_peers)); return *opt_count; } diff --git a/irohad/ametsuchi/impl/rocksdb_wsv_query.hpp b/irohad/ametsuchi/impl/rocksdb_wsv_query.hpp index 0400fc8ade0..340120eb6ce 100644 --- a/irohad/ametsuchi/impl/rocksdb_wsv_query.hpp +++ b/irohad/ametsuchi/impl/rocksdb_wsv_query.hpp @@ -24,7 +24,7 @@ namespace iroha { boost::optional< std::vector>> - getPeers() override; + getPeers(bool syncing_peers) override; boost::optional> getPeerByPublicKey(shared_model::interface::types::PublicKeyHexStringView @@ -33,7 +33,8 @@ namespace iroha { iroha::expected::Result getTopBlockInfo() const override; - iroha::expected::Result countPeers() override; + iroha::expected::Result countPeers( + bool syncing_peers) override; iroha::expected::Result countDomains() override; iroha::expected::Result countTransactions() override; diff --git a/irohad/ametsuchi/impl/storage_base.cpp b/irohad/ametsuchi/impl/storage_base.cpp index 2e0171d4a81..10502fafbc1 100644 --- a/irohad/ametsuchi/impl/storage_base.cpp +++ b/irohad/ametsuchi/impl/storage_base.cpp @@ -177,17 +177,20 @@ namespace iroha::ametsuchi { callback_(block); - decltype(std::declval().getPeers()) opt_ledger_peers; - { - if (not(opt_ledger_peers = wsv_query.getPeers())) { + boost::optional< + std::vector>> + opt_ledger_peers[] = {wsv_query.getPeers(false), // peers + wsv_query.getPeers(true)}; // syncing peers + for (auto &peer_list : opt_ledger_peers) + if (!peer_list) return expected::makeError( std::string{"Failed to get ledger peers! Will retry."}); - } - } - assert(opt_ledger_peers); - ledgerState(std::make_shared( - std::move(*opt_ledger_peers), block->height(), block->hash())); + ledgerState( + std::make_shared(std::move(*(opt_ledger_peers[0])), + std::move(*(opt_ledger_peers[1])), + block->height(), + block->hash())); return expected::makeValue(ledgerState().value()); } catch (const std::exception &e) { std::string msg(fmt::format("failed to apply prepared block {}: {}", diff --git a/irohad/ametsuchi/impl/storage_impl.cpp b/irohad/ametsuchi/impl/storage_impl.cpp index 18d7dd61809..1af75b23333 100644 --- a/irohad/ametsuchi/impl/storage_impl.cpp +++ b/irohad/ametsuchi/impl/storage_impl.cpp @@ -230,11 +230,14 @@ namespace iroha::ametsuchi { sql, log_manager->getChild("WsvQuery")->getLogger()); auto maybe_top_block_info = wsv_query.getTopBlockInfo(); - auto maybe_ledger_peers = wsv_query.getPeers(); + auto maybe_ledger_peers = wsv_query.getPeers(false); + auto maybe_ledger_syncing_peers = wsv_query.getPeers(true); - if (expected::hasValue(maybe_top_block_info) and maybe_ledger_peers) + if (expected::hasValue(maybe_top_block_info) && maybe_ledger_peers + && maybe_ledger_syncing_peers) ledger_state = std::make_shared( std::move(*maybe_ledger_peers), + std::move(*maybe_ledger_syncing_peers), maybe_top_block_info.assumeValue().height, maybe_top_block_info.assumeValue().top_hash); } diff --git a/irohad/ametsuchi/ledger_state.hpp b/irohad/ametsuchi/ledger_state.hpp index a4dddcc24ff..d9ad2519e8a 100644 --- a/irohad/ametsuchi/ledger_state.hpp +++ b/irohad/ametsuchi/ledger_state.hpp @@ -24,12 +24,15 @@ namespace iroha { struct LedgerState { shared_model::interface::types::PeerList ledger_peers; + shared_model::interface::types::PeerList ledger_syncing_peers; TopBlockInfo top_block_info; LedgerState(shared_model::interface::types::PeerList peers, + shared_model::interface::types::PeerList syncing_peers, shared_model::interface::types::HeightType height, shared_model::crypto::Hash top_hash) : ledger_peers(std::move(peers)), + ledger_syncing_peers(std::move(syncing_peers)), top_block_info(height, std::move(top_hash)) {} }; } // namespace iroha diff --git a/irohad/ametsuchi/peer_query.hpp b/irohad/ametsuchi/peer_query.hpp index f25ada855ca..4e936223609 100644 --- a/irohad/ametsuchi/peer_query.hpp +++ b/irohad/ametsuchi/peer_query.hpp @@ -36,7 +36,8 @@ namespace iroha { * Fetch peers stored in ledger * @return list of peers in insertion to ledger order */ - virtual boost::optional> getLedgerPeers() = 0; + virtual boost::optional> getLedgerPeers( + bool syncing_peers) = 0; /** * Fetch peer with given public key from ledger diff --git a/irohad/ametsuchi/wsv_query.hpp b/irohad/ametsuchi/wsv_query.hpp index 84985b55d45..8df9e5f115d 100644 --- a/irohad/ametsuchi/wsv_query.hpp +++ b/irohad/ametsuchi/wsv_query.hpp @@ -38,9 +38,9 @@ namespace iroha { */ virtual boost::optional< std::vector>> - getPeers() = 0; + getPeers(bool syncing_peers) = 0; - // ToDo?(kuvaldini,iceseer) #997 + // ToDo?(iceseer) #997 // /** // * @brief Fetch domains stored in ledger // * @return list of domains in insertion to ledger order @@ -54,7 +54,8 @@ namespace iroha { * @brief Fetch number of domains in ledger * @return number of domains in ledger */ - virtual iroha::expected::Result countPeers() = 0; + virtual iroha::expected::Result countPeers( + bool syncing_peers) = 0; /** * @brief Fetch number of domains in ledger diff --git a/irohad/consensus/yac/impl/yac.cpp b/irohad/consensus/yac/impl/yac.cpp index 48cfd7073b2..344e1655d7c 100644 --- a/irohad/consensus/yac/impl/yac.cpp +++ b/irohad/consensus/yac/impl/yac.cpp @@ -58,9 +58,11 @@ void Yac::stop() { std::optional Yac::processRoundSwitch( consensus::Round const &round, - shared_model::interface::types::PeerList const &peers) { + shared_model::interface::types::PeerList const &peers, + shared_model::interface::types::PeerList const &sync_peers) { round_ = round; cluster_order_ = peers; + syncing_peers_ = sync_peers; std::optional result; auto it = future_states_.lower_bound(round_); while (it != future_states_.end() @@ -297,9 +299,9 @@ void Yac::tryPropagateBack(const std::vector &state) { // ------|Propagation|------ void Yac::propagateState(const std::vector &msg) { - for (const auto &peer : cluster_order_) { - propagateStateDirectly(*peer, msg); - } + for (const auto &peer : cluster_order_) propagateStateDirectly(*peer, msg); + + for (const auto &peer : syncing_peers_) propagateStateDirectly(*peer, msg); } void Yac::propagateStateDirectly(const shared_model::interface::Peer &to, diff --git a/irohad/consensus/yac/impl/yac_gate_impl.cpp b/irohad/consensus/yac/impl/yac_gate_impl.cpp index 46049899238..77d97d4522e 100644 --- a/irohad/consensus/yac/impl/yac_gate_impl.cpp +++ b/irohad/consensus/yac/impl/yac_gate_impl.cpp @@ -110,7 +110,9 @@ std::optional YacGateImpl::processRoundSwitch( current_block_ = std::nullopt; consensus_result_cache_->release(); if (auto answer = hash_gate_->processRoundSwitch( - current_hash_.vote_round, current_ledger_state_->ledger_peers)) { + current_hash_.vote_round, + current_ledger_state_->ledger_peers, + current_ledger_state_->ledger_syncing_peers)) { return processOutcome(*answer); } return std::nullopt; diff --git a/irohad/consensus/yac/yac.hpp b/irohad/consensus/yac/yac.hpp index 4dba30716c3..eebceeb6662 100644 --- a/irohad/consensus/yac/yac.hpp +++ b/irohad/consensus/yac/yac.hpp @@ -54,7 +54,8 @@ namespace iroha::consensus::yac { std::optional processRoundSwitch( consensus::Round const &round, - shared_model::interface::types::PeerList const &peers) override; + shared_model::interface::types::PeerList const &peers, + shared_model::interface::types::PeerList const &sync_peers) override; // ------|Network notifications|------ @@ -103,6 +104,7 @@ namespace iroha::consensus::yac { // ------|One round|------ shared_model::interface::types::PeerList cluster_order_; + shared_model::interface::types::PeerList syncing_peers_; std::optional alternative_order_; Round round_; diff --git a/irohad/consensus/yac/yac_gate.hpp b/irohad/consensus/yac/yac_gate.hpp index 8bbc13754c3..22726720b2b 100644 --- a/irohad/consensus/yac/yac_gate.hpp +++ b/irohad/consensus/yac/yac_gate.hpp @@ -48,7 +48,8 @@ namespace iroha::consensus::yac { */ virtual std::optional processRoundSwitch( consensus::Round const &round, - shared_model::interface::types::PeerList const &peers) = 0; + shared_model::interface::types::PeerList const &peers, + shared_model::interface::types::PeerList const &sync_peers) = 0; /// Prevent any new outgoing network activity. Be passive. virtual void stop() = 0; diff --git a/irohad/main/application.cpp b/irohad/main/application.cpp index fbbc6f612d3..849ed8df61b 100644 --- a/irohad/main/application.cpp +++ b/irohad/main/application.cpp @@ -766,7 +766,8 @@ Irohad::RunResult Irohad::initOrderingGate() { log_manager_->getChild("Ordering"), inter_peer_client_factory_, std::chrono::milliseconds( - config_.proposal_creation_timeout.value_or(kMaxRoundsDelayDefault))); + config_.proposal_creation_timeout.value_or(kMaxRoundsDelayDefault)), + config_.syncing_mode); log_->info("[Init] => init ordering gate - [{}]", logger::boolRepr(bool(ordering_gate))); return {}; @@ -847,7 +848,8 @@ Irohad::RunResult Irohad::initConsensusGate() { std::chrono::milliseconds(config_.vote_delay), kConsensusConsistencyModel, log_manager_->getChild("Consensus"), - inter_peer_client_factory_); + inter_peer_client_factory_, + config_.syncing_mode); log_->info("[Init] => consensus gate"); return {}; } @@ -1145,6 +1147,7 @@ Irohad::RunResult Irohad::run() { verified_proposal); auto block = maybe_simulator->processVerifiedProposal( std::move(verified_proposal)); + maybe_consensus_gate->vote(std::move(block)); } }); @@ -1234,7 +1237,7 @@ Irohad::RunResult Irohad::run() { auto block_height = block->height(); auto peers = storage->createPeerQuery() | - [](auto &&peer_query) { return peer_query->getLedgerPeers(); }; + [](auto &&peer_query) { return peer_query->getLedgerPeers(false); }; if (not peers) { return expected::makeError("Failed to fetch ledger peers!"); } diff --git a/irohad/main/application.hpp b/irohad/main/application.hpp index 84ab16c0f4b..af275efae63 100644 --- a/irohad/main/application.hpp +++ b/irohad/main/application.hpp @@ -248,7 +248,7 @@ class Irohad { virtual RunResult initWsvRestorer(); // constructor dependencies - IrohadConfig config_; + IrohadConfig const config_; const std::string listen_ip_; boost::optional keypair_; iroha::StartupWsvSynchronizationPolicy startup_wsv_sync_policy_; diff --git a/irohad/main/impl/consensus_init.cpp b/irohad/main/impl/consensus_init.cpp index 2e9414de5d5..9cd72d13e1c 100644 --- a/irohad/main/impl/consensus_init.cpp +++ b/irohad/main/impl/consensus_init.cpp @@ -126,7 +126,8 @@ std::shared_ptr YacInit::initConsensusGate( std::chrono::milliseconds vote_delay_milliseconds, ConsistencyModel consistency_model, const logger::LoggerManagerTreePtr &consensus_log_manager, - std::shared_ptr client_factory) { + std::shared_ptr client_factory, + bool /*syncing_mode*/) { consensus_network_ = std::make_shared( consensus_log_manager->getChild("Service")->getLogger(), [](std::vector state) { @@ -143,9 +144,7 @@ std::shared_ptr YacInit::initConsensusGate( consistency_model, consensus_log_manager); auto hash_provider = createHashProvider(); - initialized_ = true; - yac_gate_ = std::make_shared( yac_, std::make_shared(), @@ -155,5 +154,6 @@ std::shared_ptr YacInit::initConsensusGate( hash_provider, std::move(consensus_result_cache), consensus_log_manager->getChild("Gate")->getLogger()); + return yac_gate_; } diff --git a/irohad/main/impl/consensus_init.hpp b/irohad/main/impl/consensus_init.hpp index b6b1f446af4..00394f98fea 100644 --- a/irohad/main/impl/consensus_init.hpp +++ b/irohad/main/impl/consensus_init.hpp @@ -44,7 +44,8 @@ namespace iroha::consensus::yac { std::chrono::milliseconds vote_delay_milliseconds, ConsistencyModel consistency_model, const logger::LoggerManagerTreePtr &consensus_log_manager, - std::shared_ptr client_factory); + std::shared_ptr client_factory, + bool syncing_mode); std::shared_ptr getConsensusNetwork() const; diff --git a/irohad/main/impl/on_demand_ordering_init.cpp b/irohad/main/impl/on_demand_ordering_init.cpp index 4f7c897248e..c5b88c17002 100644 --- a/irohad/main/impl/on_demand_ordering_init.cpp +++ b/irohad/main/impl/on_demand_ordering_init.cpp @@ -85,14 +85,16 @@ auto OnDemandOrderingInit::createGate( proposal_factory, std::shared_ptr tx_cache, size_t max_number_of_transactions, - const logger::LoggerManagerTreePtr &ordering_log_manager) { + const logger::LoggerManagerTreePtr &ordering_log_manager, + bool syncing_mode) { return std::make_shared( std::move(ordering_service), std::move(network_client), std::move(proposal_factory), std::move(tx_cache), max_number_of_transactions, - ordering_log_manager->getChild("Gate")->getLogger()); + ordering_log_manager->getChild("Gate")->getLogger(), + syncing_mode); } auto OnDemandOrderingInit::createService( @@ -125,18 +127,23 @@ OnDemandOrderingInit::initOrderingGate( std::shared_ptr tx_cache, logger::LoggerManagerTreePtr ordering_log_manager, std::shared_ptr client_factory, - std::chrono::milliseconds proposal_creation_timeout) { - auto ordering_service = createService(max_number_of_transactions, - proposal_factory, - tx_cache, - ordering_log_manager); - service = std::make_shared( - ordering_service, - std::move(transaction_factory), - std::move(batch_parser), - std::move(transaction_batch_factory), - ordering_log_manager->getChild("Server")->getLogger(), - proposal_creation_timeout); + std::chrono::milliseconds proposal_creation_timeout, + bool syncing_mode) { + std::shared_ptr ordering_service; + if (!syncing_mode) { + ordering_service = createService(max_number_of_transactions, + proposal_factory, + tx_cache, + ordering_log_manager); + service = std::make_shared( + ordering_service, + std::move(transaction_factory), + std::move(batch_parser), + std::move(transaction_batch_factory), + ordering_log_manager->getChild("Server")->getLogger(), + proposal_creation_timeout); + } + ordering_gate_ = createGate(ordering_service, createConnectionManager(std::move(proposal_transport_factory), @@ -146,7 +153,8 @@ OnDemandOrderingInit::initOrderingGate( std::move(proposal_factory), std::move(tx_cache), max_number_of_transactions, - ordering_log_manager); + ordering_log_manager, + syncing_mode); getSubscription()->dispatcher()->repeat( iroha::SubscriptionEngineHandlers::kMetrics, @@ -269,6 +277,9 @@ void OnDemandOrderingInit::processCommittedBlock( // take committed & rejected transaction hashes from committed block log_->debug("Committed block handle: height {}.", block->height()); + if (!ordering_service_) + return; + auto hashes = std::make_shared(); for (shared_model::interface::Transaction const &tx : block->transactions()) { hashes->insert(tx.hash()); diff --git a/irohad/main/impl/on_demand_ordering_init.hpp b/irohad/main/impl/on_demand_ordering_init.hpp index 13db63e14bc..43e3eec42d1 100644 --- a/irohad/main/impl/on_demand_ordering_init.hpp +++ b/irohad/main/impl/on_demand_ordering_init.hpp @@ -94,7 +94,8 @@ namespace iroha::ordering { proposal_factory, std::shared_ptr tx_cache, size_t max_number_of_transactions, - const logger::LoggerManagerTreePtr &ordering_log_manager); + const logger::LoggerManagerTreePtr &ordering_log_manager, + bool syncing_mode); /** * Creates on-demand ordering service. \see initOrderingGate for @@ -145,7 +146,8 @@ namespace iroha::ordering { std::shared_ptr tx_cache, logger::LoggerManagerTreePtr ordering_log_manager, std::shared_ptr client_factory, - std::chrono::milliseconds proposal_creation_timeout); + std::chrono::milliseconds proposal_creation_timeout, + bool syncing_mode); iroha::ordering::RoundSwitch processSynchronizationEvent( synchronizer::SynchronizationEvent event); diff --git a/irohad/main/impl/pg_connection_init.cpp b/irohad/main/impl/pg_connection_init.cpp index aa19d36cbf5..0c8f24ff44d 100644 --- a/irohad/main/impl/pg_connection_init.cpp +++ b/irohad/main/impl/pg_connection_init.cpp @@ -286,6 +286,12 @@ CREATE TABLE peer ( tls_certificate varchar, PRIMARY KEY (public_key) ); +CREATE TABLE sync_peer ( + public_key varchar NOT NULL, + address character varying(261) NOT NULL UNIQUE, + tls_certificate varchar, + PRIMARY KEY (public_key) +); CREATE TABLE asset ( asset_id character varying(288), domain_id character varying(255) NOT NULL REFERENCES domain, diff --git a/irohad/main/iroha_conf_loader.hpp b/irohad/main/iroha_conf_loader.hpp index 17db4111108..67b5e3827ef 100644 --- a/irohad/main/iroha_conf_loader.hpp +++ b/irohad/main/iroha_conf_loader.hpp @@ -64,6 +64,7 @@ struct IrohadConfig { uint32_t proposal_delay; uint32_t vote_delay; bool mst_support; + bool syncing_mode; boost::optional mst_expiration_time; boost::optional max_round_delay_ms; boost::optional proposal_creation_timeout; diff --git a/irohad/main/irohad.cpp b/irohad/main/irohad.cpp index 5995dedaf69..5d506f3bb36 100644 --- a/irohad/main/irohad.cpp +++ b/irohad/main/irohad.cpp @@ -117,9 +117,9 @@ DEFINE_string(metrics_port, "", "Prometeus HTTP server listens port, disabled by default"); -DEFINE_bool(exit_after_init, - false, - "Use this flag to reindex WSV and exit"); +DEFINE_bool(exit_after_init, false, "Use this flag to reindex WSV and exit"); + +DEFINE_bool(syncing_node, false, "Use this flag to run iroha as syncing node"); std::sig_atomic_t caught_signal = 0; std::promise exit_requested; @@ -246,6 +246,7 @@ int main(int argc, char *argv[]) { return EXIT_FAILURE; } auto config = std::move(config_result).assumeValue(); + config.syncing_mode = FLAGS_syncing_node; if (FLAGS_verbosity == kLogSettingsFromConfigFile) { log_manager = config.logger_manager.value_or(getDefaultLogManager()); @@ -463,7 +464,7 @@ int main(int argc, char *argv[]) { return EXIT_FAILURE; } - if(FLAGS_exit_after_init){ + if (FLAGS_exit_after_init) { return EXIT_SUCCESS; } diff --git a/irohad/main/server_runner.cpp b/irohad/main/server_runner.cpp index 53bdfe42f0c..da59ad41273 100644 --- a/irohad/main/server_runner.cpp +++ b/irohad/main/server_runner.cpp @@ -50,7 +50,8 @@ ServerRunner::~ServerRunner() { } ServerRunner &ServerRunner::append(std::shared_ptr service) { - services_.push_back(service); + if (service) + services_.push_back(service); return *this; } diff --git a/irohad/maintenance/metrics.cpp b/irohad/maintenance/metrics.cpp index 33f5b346ab2..d52eb8dc467 100644 --- a/irohad/maintenance/metrics.cpp +++ b/irohad/maintenance/metrics.cpp @@ -77,7 +77,7 @@ Metrics::Metrics(std::string const &listen_addr, .Help("Total number peers to send transactions and request proposals") .Register(*registry_); auto &number_of_peers = peers_number_gauge.Add({}); - number_of_peers.Set(storage_->getWsvQuery()->getPeers()->size()); + number_of_peers.Set(storage_->getWsvQuery()->getPeers(false)->size()); auto &domains_number_gauge = BuildGauge() .Name("number_of_domains") diff --git a/irohad/multi_sig_transactions/impl/gossip_propagation_strategy.cpp b/irohad/multi_sig_transactions/impl/gossip_propagation_strategy.cpp index 30df9b98cd8..1a6e942d645 100644 --- a/irohad/multi_sig_transactions/impl/gossip_propagation_strategy.cpp +++ b/irohad/multi_sig_transactions/impl/gossip_propagation_strategy.cpp @@ -55,7 +55,7 @@ namespace iroha { bool GossipPropagationStrategy::initQueue() { return peer_factory->createPeerQuery() | [](const auto &query) { - return query->getLedgerPeers(); + return query->getLedgerPeers(false); } | [](auto &&data) -> boost::optional { if (data.size() == 0) { return {}; diff --git a/irohad/ordering/impl/on_demand_ordering_gate.cpp b/irohad/ordering/impl/on_demand_ordering_gate.cpp index 052687d8641..bb393470190 100644 --- a/irohad/ordering/impl/on_demand_ordering_gate.cpp +++ b/irohad/ordering/impl/on_demand_ordering_gate.cpp @@ -28,13 +28,15 @@ OnDemandOrderingGate::OnDemandOrderingGate( std::shared_ptr factory, std::shared_ptr tx_cache, size_t transaction_limit, - logger::LoggerPtr log) + logger::LoggerPtr log, + bool syncing_mode) : log_(std::move(log)), transaction_limit_(transaction_limit), ordering_service_(std::move(ordering_service)), network_client_(std::move(network_client)), proposal_factory_(std::move(factory)), - tx_cache_(std::move(tx_cache)) {} + tx_cache_(std::move(tx_cache)), + syncing_mode_(syncing_mode) {} OnDemandOrderingGate::~OnDemandOrderingGate() { stop(); @@ -49,8 +51,8 @@ void OnDemandOrderingGate::propagateBatch( } // TODO iceseer 14.01.21 IR-959 Refactor to avoid copying. - ordering_service_->onBatches( - transport::OdOsNotification::CollectionType{batch}); + forLocalOS(&OnDemandOrderingService::onBatches, + transport::OdOsNotification::CollectionType{batch}); network_client_->onBatches( transport::OdOsNotification::CollectionType{batch}); } @@ -67,12 +69,14 @@ void OnDemandOrderingGate::processRoundSwitch(RoundSwitch const &event) { } // notify our ordering service about new round - ordering_service_->onCollaborationOutcome(event.next_round); + forLocalOS(&OnDemandOrderingService::onCollaborationOutcome, + event.next_round); this->sendCachedTransactions(); // request proposal for the current round - network_client_->onRequestProposal(event.next_round); + if (!syncing_mode_) + network_client_->onRequestProposal(event.next_round); } void OnDemandOrderingGate::stop() { @@ -112,7 +116,8 @@ OnDemandOrderingGate::processProposalRequest(ProposalEvent const &event) const { std::make_shared( std::move(txs))); } - ordering_service_->processReceivedProposal(std::move(batches)); + forLocalOS(&OnDemandOrderingService::processReceivedProposal, + std::move(batches)); return network::OrderingEvent{ std::move(result), event.round, current_ledger_state_}; } @@ -120,23 +125,26 @@ OnDemandOrderingGate::processProposalRequest(ProposalEvent const &event) const { void OnDemandOrderingGate::sendCachedTransactions() { assert(not stop_mutex_.try_lock()); // lock must be taken before // TODO iceseer 14.01.21 IR-958 Check that OS is remote - ordering_service_->forCachedBatches([this](auto const &batches) { - auto end_iterator = batches.begin(); - auto current_number_of_transactions = 0u; - for (; end_iterator != batches.end(); ++end_iterator) { - auto batch_size = (*end_iterator)->transactions().size(); - if (current_number_of_transactions + batch_size <= transaction_limit_) { - current_number_of_transactions += batch_size; - } else { - break; - } - } - - if (not batches.empty()) { - network_client_->onBatches(transport::OdOsNotification::CollectionType{ - batches.begin(), end_iterator}); - } - }); + forLocalOS(&OnDemandOrderingService::forCachedBatches, + [this](auto const &batches) { + auto end_iterator = batches.begin(); + auto current_number_of_transactions = 0u; + for (; end_iterator != batches.end(); ++end_iterator) { + auto batch_size = (*end_iterator)->transactions().size(); + if (current_number_of_transactions + batch_size + <= transaction_limit_) { + current_number_of_transactions += batch_size; + } else { + break; + } + } + + if (not batches.empty()) { + network_client_->onBatches( + transport::OdOsNotification::CollectionType{ + batches.begin(), end_iterator}); + } + }); } std::shared_ptr @@ -190,9 +198,8 @@ OnDemandOrderingGate::removeReplaysAndDuplicates( return proposal; } - if (!dup_hashes->empty()) { - ordering_service_->onDuplicates(*dup_hashes); - } + if (!dup_hashes->empty()) + forLocalOS(&OnDemandOrderingService::onDuplicates, *dup_hashes); auto unprocessed_txs = proposal->transactions() | boost::adaptors::indexed() diff --git a/irohad/ordering/impl/on_demand_ordering_gate.hpp b/irohad/ordering/impl/on_demand_ordering_gate.hpp index 0a892ba31b6..966f09a25dd 100644 --- a/irohad/ordering/impl/on_demand_ordering_gate.hpp +++ b/irohad/ordering/impl/on_demand_ordering_gate.hpp @@ -39,7 +39,8 @@ namespace iroha { factory, std::shared_ptr tx_cache, size_t transaction_limit, - logger::LoggerPtr log); + logger::LoggerPtr log, + bool syncing_mode); ~OnDemandOrderingGate() override; @@ -64,6 +65,18 @@ namespace iroha { private: void sendCachedTransactions(); + template + void forLocalOS(Func func, Args &&... args) { + if (ordering_service_) + (ordering_service_.get()->*func)(std::forward(args)...); + } + + template + void forLocalOS(Func func, Args &&... args) const { + if (ordering_service_) + (ordering_service_.get()->*func)(std::forward(args)...); + } + /** * remove already processed transactions from proposal */ @@ -86,6 +99,7 @@ namespace iroha { std::shared_timed_mutex stop_mutex_; bool stop_requested_{false}; + bool syncing_mode_; }; } // namespace ordering diff --git a/shared_model/backend/plain/impl/peer.cpp b/shared_model/backend/plain/impl/peer.cpp index b955d747b1b..fae9165d20f 100644 --- a/shared_model/backend/plain/impl/peer.cpp +++ b/shared_model/backend/plain/impl/peer.cpp @@ -11,15 +11,21 @@ using namespace shared_model::plain; Peer::Peer(const interface::types::AddressTypeView address, std::string public_key_hex, const std::optional - &tls_certificate) + &tls_certificate, + bool is_syncing_peer) : address_(address), public_key_hex_(std::move(public_key_hex)), + is_syncing_peer_(is_syncing_peer), tls_certificate_(tls_certificate) {} const shared_model::interface::types::AddressType &Peer::address() const { return address_; } +bool Peer::isSyncingPeer() const { + return is_syncing_peer_; +} + const std::string &Peer::pubkey() const { return public_key_hex_; } diff --git a/shared_model/backend/plain/peer.hpp b/shared_model/backend/plain/peer.hpp index 0b02101155e..666b2a6f8fc 100644 --- a/shared_model/backend/plain/peer.hpp +++ b/shared_model/backend/plain/peer.hpp @@ -18,7 +18,8 @@ namespace shared_model { Peer(const interface::types::AddressTypeView address, std::string public_key_hex, const std::optional - &tls_certificate); + &tls_certificate, + bool is_syncing_peer); const interface::types::AddressType &address() const override; @@ -29,9 +30,12 @@ namespace shared_model { void setTlsCertificate(interface::types::TLSCertificateTypeView cert); + bool isSyncingPeer() const override; + private: const interface::types::AddressType address_; const std::string public_key_hex_; + bool is_syncing_peer_; std::optional tls_certificate_; }; diff --git a/shared_model/backend/protobuf/common_objects/peer.hpp b/shared_model/backend/protobuf/common_objects/peer.hpp index 61d0b6cb816..865b620df0d 100644 --- a/shared_model/backend/protobuf/common_objects/peer.hpp +++ b/shared_model/backend/protobuf/common_objects/peer.hpp @@ -43,6 +43,10 @@ namespace shared_model { return proto_->peer_key(); } + bool isSyncingPeer() const override { + return proto_->syncing_peer(); + } + private: detail::ReferenceHolder proto_; std::optional tls_certificate_; diff --git a/shared_model/interfaces/common_objects/impl/peer.cpp b/shared_model/interfaces/common_objects/impl/peer.cpp index e69cb818518..a9af0f584f8 100644 --- a/shared_model/interfaces/common_objects/impl/peer.cpp +++ b/shared_model/interfaces/common_objects/impl/peer.cpp @@ -15,12 +15,14 @@ namespace shared_model { .appendNamed("address", address()) .appendNamed("pubkey", pubkey()) .appendNamed("tlsCertificate", bool(tlsCertificate())) + .appendNamed("IsSyncing", isSyncingPeer()) .finalize(); } bool Peer::operator==(const ModelType &rhs) const { return address() == rhs.address() and pubkey() == rhs.pubkey() - and tlsCertificate() == rhs.tlsCertificate(); + and tlsCertificate() == rhs.tlsCertificate() + && isSyncingPeer() == rhs.isSyncingPeer(); } } // namespace interface } // namespace shared_model diff --git a/shared_model/interfaces/common_objects/peer.hpp b/shared_model/interfaces/common_objects/peer.hpp index 45b1490af93..8fcf51b45ed 100644 --- a/shared_model/interfaces/common_objects/peer.hpp +++ b/shared_model/interfaces/common_objects/peer.hpp @@ -35,6 +35,11 @@ namespace shared_model { */ virtual const std::string &pubkey() const = 0; + /** + * @return flag determines if the peer is syncing or validating + */ + virtual bool isSyncingPeer() const = 0; + std::string toString() const override; bool operator==(const ModelType &rhs) const override; diff --git a/shared_model/schema/primitive.proto b/shared_model/schema/primitive.proto index 84f9d32dfe4..340a608f029 100644 --- a/shared_model/schema/primitive.proto +++ b/shared_model/schema/primitive.proto @@ -109,6 +109,7 @@ message Peer { oneof certificate { string tls_certificate = 3; // pem-encoded string } + bool syncing_peer = 4; } message AccountDetailRecordId { diff --git a/test/framework/integration_framework/integration_test_framework.cpp b/test/framework/integration_framework/integration_test_framework.cpp index a5e675b7bb4..b7d814dae45 100644 --- a/test/framework/integration_framework/integration_test_framework.cpp +++ b/test/framework/integration_framework/integration_test_framework.cpp @@ -314,6 +314,7 @@ IntegrationTestFramework::IntegrationTestFramework( config_.stale_stream_max_rounds = 2; config_.max_proposal_size = 10; config_.mst_support = mst_support; + config_.syncing_mode = false; switch (db_type) { case iroha::StorageType::kPostgres: { diff --git a/test/integration/acceptance/add_peer_test.cpp b/test/integration/acceptance/add_peer_test.cpp index 912f161e163..480174e2907 100644 --- a/test/integration/acceptance/add_peer_test.cpp +++ b/test/integration/acceptance/add_peer_test.cpp @@ -101,7 +101,7 @@ TEST_P(AddPeerTest, FakePeerIsAdded) { ->getStorage() ->createPeerQuery() .value() - ->getLedgerPeers(); + ->getLedgerPeers(false); // check the two peers are there ASSERT_TRUE(opt_peers); @@ -299,7 +299,7 @@ TEST_P(AddPeerTest, RealPeerIsAdded) { ->getStorage() ->createPeerQuery() .value() - ->getLedgerPeers(); + ->getLedgerPeers(false); ASSERT_TRUE(opt_peers); EXPECT_THAT(*opt_peers, ::testing::UnorderedElementsAre( diff --git a/test/integration/acceptance/remove_peer_test.cpp b/test/integration/acceptance/remove_peer_test.cpp index 86e6aca8919..0d3800922c2 100644 --- a/test/integration/acceptance/remove_peer_test.cpp +++ b/test/integration/acceptance/remove_peer_test.cpp @@ -83,7 +83,7 @@ TEST_P(RemovePeerTest, FakePeerIsRemoved) { ->getStorage() ->createPeerQuery() .value() - ->getLedgerPeers(); + ->getLedgerPeers(false); // check only one peer is there ASSERT_TRUE(opt_peers); @@ -144,7 +144,7 @@ TEST_P(RemovePeerTest, RealPeerIsRemoved) { ->getStorage() ->createPeerQuery() .value() - ->getLedgerPeers(); + ->getLedgerPeers(false); // check only one peer is there ASSERT_TRUE(opt_peers); diff --git a/test/module/irohad/ametsuchi/ametsuchi_test.cpp b/test/module/irohad/ametsuchi/ametsuchi_test.cpp index 3f5d6a53640..8e6db52a590 100644 --- a/test/module/irohad/ametsuchi/ametsuchi_test.cpp +++ b/test/module/irohad/ametsuchi/ametsuchi_test.cpp @@ -208,7 +208,7 @@ TEST_F(AmetsuchiTest, PeerTest) { apply(storage, block); - auto peers = wsv->getPeers(); + auto peers = wsv->getPeers(false); ASSERT_TRUE(peers); ASSERT_EQ(peers->size(), 1); ASSERT_EQ(peers->at(0)->address(), "192.168.9.1:50051"); @@ -399,7 +399,7 @@ TEST_F(AmetsuchiTest, TestingStorageWhenInsertBlock) { "=> assert that inserted"); ASSERT_TRUE(storage); auto wsv = storage->getWsvQuery(); - ASSERT_EQ(0, wsv->getPeers().value().size()); + ASSERT_EQ(0, wsv->getPeers(false).value().size()); log->info("Try insert block"); @@ -408,7 +408,7 @@ TEST_F(AmetsuchiTest, TestingStorageWhenInsertBlock) { log->info("Request ledger information"); - ASSERT_NE(0, wsv->getPeers().value().size()); + ASSERT_NE(0, wsv->getPeers(false).value().size()); ASSERT_EQ(1, committed_blocks_.size()); } diff --git a/test/module/irohad/ametsuchi/mock_peer_query.hpp b/test/module/irohad/ametsuchi/mock_peer_query.hpp index 0feef1d9ac0..f14c6fd56ca 100644 --- a/test/module/irohad/ametsuchi/mock_peer_query.hpp +++ b/test/module/irohad/ametsuchi/mock_peer_query.hpp @@ -17,7 +17,7 @@ namespace iroha { public: MockPeerQuery() = default; - MOCK_METHOD0(getLedgerPeers, boost::optional>()); + MOCK_METHOD1(getLedgerPeers, boost::optional>(bool)); MOCK_CONST_METHOD1( getLedgerPeerByPublicKey, diff --git a/test/module/irohad/ametsuchi/mock_wsv_query.hpp b/test/module/irohad/ametsuchi/mock_wsv_query.hpp index cf37ff27ba4..a7b9835434c 100644 --- a/test/module/irohad/ametsuchi/mock_wsv_query.hpp +++ b/test/module/irohad/ametsuchi/mock_wsv_query.hpp @@ -37,10 +37,9 @@ namespace iroha { MOCK_METHOD1(getSignatories, boost::optional>( const std::string &account_id)); - MOCK_METHOD0( - getPeers, - boost::optional< - std::vector>>()); + MOCK_METHOD1(getPeers, + boost::optional>>(bool)); MOCK_METHOD1( getPeerByPublicKey, @@ -51,7 +50,8 @@ namespace iroha { getTopBlockInfo, iroha::expected::Result()); - MOCK_METHOD0(countPeers, iroha::expected::Result()); + MOCK_METHOD1(countPeers, + iroha::expected::Result(bool)); MOCK_METHOD0(countDomains, iroha::expected::Result()); MOCK_METHOD0(countTransactions, diff --git a/test/module/irohad/ametsuchi/peer_query_wsv_test.cpp b/test/module/irohad/ametsuchi/peer_query_wsv_test.cpp index c2eee90eb23..60477ffb9f7 100644 --- a/test/module/irohad/ametsuchi/peer_query_wsv_test.cpp +++ b/test/module/irohad/ametsuchi/peer_query_wsv_test.cpp @@ -29,15 +29,34 @@ TEST_F(PeerQueryWSVTest, GetPeers) { std::vector> peers; std::shared_ptr peer1 = std::make_shared( - "some-address", "0A", std::nullopt); + "some-address", "0A", std::nullopt, false); std::shared_ptr peer2 = std::make_shared( - "another-address", "0B", std::nullopt); + "another-address", "0B", std::nullopt, false); peers.push_back(peer1); peers.push_back(peer2); - EXPECT_CALL(*wsv_query_, getPeers()).WillOnce(::testing::Return(peers)); + EXPECT_CALL(*wsv_query_, getPeers(false)).WillOnce(::testing::Return(peers)); - auto result = peer_query_->getLedgerPeers(); + auto result = peer_query_->getLedgerPeers(false); + ASSERT_TRUE(result); + ASSERT_THAT(result.get(), + testing::ElementsAreArray(peers.cbegin(), peers.cend())); +} + +/** + * @given storage with sync peer + * @when trying to get all peers in the ledger + * @then get a vector with all peers in the ledger + */ +TEST_F(PeerQueryWSVTest, GetSyncPeers) { + std::vector> peers = { + std::make_shared( + "some-address", "0A", std::nullopt, true), + std::make_shared( + "another-address", "0B", std::nullopt, true)}; + EXPECT_CALL(*wsv_query_, getPeers(true)).WillOnce(::testing::Return(peers)); + + auto const result = peer_query_->getLedgerPeers(true); ASSERT_TRUE(result); ASSERT_THAT(result.get(), testing::ElementsAreArray(peers.cbegin(), peers.cend())); diff --git a/test/module/irohad/ametsuchi/postgres_executor_test.cpp b/test/module/irohad/ametsuchi/postgres_executor_test.cpp index c4486ad94b8..9779e4e701a 100644 --- a/test/module/irohad/ametsuchi/postgres_executor_test.cpp +++ b/test/module/irohad/ametsuchi/postgres_executor_test.cpp @@ -328,7 +328,7 @@ namespace iroha { CHECK_SUCCESSFUL_RESULT( execute(*mock_command_factory->constructRemovePeer(kPublicKey))); - auto peers = wsv_query->getPeers(); + auto peers = wsv_query->getPeers(false); ASSERT_TRUE(peers); ASSERT_TRUE(std::find_if(peers->begin(), peers->end(), @@ -409,7 +409,7 @@ namespace iroha { CHECK_SUCCESSFUL_RESULT( execute(*mock_command_factory->constructRemovePeer(kPublicKey))); - auto peers = wsv_query->getPeers(); + auto peers = wsv_query->getPeers(false); ASSERT_TRUE(peers); ASSERT_TRUE(std::find_if(peers->begin(), peers->end(), @@ -428,7 +428,7 @@ namespace iroha { CHECK_SUCCESSFUL_RESULT( execute(*mock_command_factory->constructRemovePeer(kPublicKey))); - auto peers = wsv_query->getPeers(); + auto peers = wsv_query->getPeers(false); ASSERT_TRUE(peers); ASSERT_TRUE(std::find_if(peers->begin(), peers->end(), diff --git a/test/module/irohad/ametsuchi/postgres_query_executor_test.cpp b/test/module/irohad/ametsuchi/postgres_query_executor_test.cpp index 87cb84ade3d..0192d32c7f1 100644 --- a/test/module/irohad/ametsuchi/postgres_query_executor_test.cpp +++ b/test/module/irohad/ametsuchi/postgres_query_executor_test.cpp @@ -142,7 +142,8 @@ namespace iroha { : peer{"127.0.0.1", "fa6ce0e0c21ce1ceaf4ba38538c1868185e9feefeafff3e42d94f218000a5" "533", - std::nullopt} { + std::nullopt, + false} { role_permissions.set( shared_model::interface::permissions::Role::kAddMySignatory); grantable_permission = @@ -1181,7 +1182,7 @@ namespace iroha { ::testing::Types; TYPED_TEST_SUITE(GetPagedTransactionsExecutorTest, - QueryTxPaginationTestingTypes, ); + QueryTxPaginationTestingTypes, ); /** * @given initialized storage, user has 3 transactions committed diff --git a/test/module/irohad/ametsuchi/rdb_wsv_query_test.cpp b/test/module/irohad/ametsuchi/rdb_wsv_query_test.cpp index cdf7b5f9004..0297780bfeb 100644 --- a/test/module/irohad/ametsuchi/rdb_wsv_query_test.cpp +++ b/test/module/irohad/ametsuchi/rdb_wsv_query_test.cpp @@ -58,12 +58,33 @@ namespace iroha { * @then peer list successfully received */ TEST_F(RdbWsvQueryTest, GetPeers) { - shared_model::plain::Peer peer1{"some-address", "0a", std::nullopt}; + shared_model::plain::Peer peer1{ + "some-address", "0a", std::nullopt, false}; command->insertPeer(peer1); - shared_model::plain::Peer peer2{"another-address", "0b", std::nullopt}; + shared_model::plain::Peer peer2{ + "another-address", "0b", std::nullopt, false}; command->insertPeer(peer2); - auto result = query->getPeers(); + auto result = query->getPeers(false); + ASSERT_TRUE(result); + ASSERT_THAT(*result, + testing::ElementsAre(testing::Pointee(testing::Eq(peer1)), + testing::Pointee(testing::Eq(peer2)))); + } + + /** + * @given storage with sync peers + * @when trying to get existing peers + * @then peer list successfully received + */ + TEST_F(RdbWsvQueryTest, GetSyncPeers) { + shared_model::plain::Peer peer1{"some-address", "0a", std::nullopt, true}; + command->insertPeer(peer1); + shared_model::plain::Peer peer2{ + "another-address", "0b", std::nullopt, true}; + command->insertPeer(peer2); + + auto result = query->getPeers(true); ASSERT_TRUE(result); ASSERT_THAT(*result, testing::ElementsAre(testing::Pointee(testing::Eq(peer1)), @@ -76,7 +97,26 @@ namespace iroha { * @then stored peer is successfully returned */ TEST_F(RdbWsvQueryTest, GetPeerWithoutTls) { - shared_model::plain::Peer peer1{"some-address", "0a", std::nullopt}; + shared_model::plain::Peer peer1{ + "some-address", "0a", std::nullopt, false}; + command->insertPeer(peer1); + + auto result = query->getPeerByPublicKey( + shared_model::interface::types::PublicKeyHexStringView{ + peer1.pubkey()}); + ASSERT_TRUE(result); + ASSERT_THAT(*result, testing::Pointee(testing::Eq(peer1))) + << "Inserted " << peer1.toString() << ", got " + << (*result)->toString(); + } + + /** + * @given storage with sync peer without TLS certificate + * @when stored peer is queried + * @then stored peer is successfully returned + */ + TEST_F(RdbWsvQueryTest, GetSyncPeerWithoutTls) { + shared_model::plain::Peer peer1{"some-address", "0c", std::nullopt, true}; command->insertPeer(peer1); auto result = query->getPeerByPublicKey( @@ -94,7 +134,25 @@ namespace iroha { * @then stored peer is successfully returned */ TEST_F(RdbWsvQueryTest, GetPeerWithTls) { - shared_model::plain::Peer peer1{"some-address", "0a", "tls"}; + shared_model::plain::Peer peer1{"some-address", "0d", "tls", false}; + command->insertPeer(peer1); + + auto result = query->getPeerByPublicKey( + shared_model::interface::types::PublicKeyHexStringView{ + peer1.pubkey()}); + ASSERT_TRUE(result); + ASSERT_THAT(*result, testing::Pointee(testing::Eq(peer1))) + << "Inserted " << peer1.toString() << ", got " + << (*result)->toString(); + } + + /** + * @given storage with sync peer with TLS certificate + * @when stored peer is queried + * @then stored peer is successfully returned + */ + TEST_F(RdbWsvQueryTest, GetSyncPeerWithTls) { + shared_model::plain::Peer peer1{"some-address", "0a", "tls", true}; command->insertPeer(peer1); auto result = query->getPeerByPublicKey( diff --git a/test/module/irohad/ametsuchi/rocksdb_executor_test.cpp b/test/module/irohad/ametsuchi/rocksdb_executor_test.cpp index ba3cd66a88e..61959ae70f7 100644 --- a/test/module/irohad/ametsuchi/rocksdb_executor_test.cpp +++ b/test/module/irohad/ametsuchi/rocksdb_executor_test.cpp @@ -487,7 +487,7 @@ namespace iroha::ametsuchi { CHECK_SUCCESSFUL_RESULT( execute(*mock_command_factory->constructRemovePeer(kPublicKey))); - auto peers = wsv_query->getPeers(); + auto peers = wsv_query->getPeers(false); ASSERT_TRUE(peers); ASSERT_TRUE(std::find_if(peers->begin(), peers->end(), @@ -568,7 +568,7 @@ namespace iroha::ametsuchi { CHECK_SUCCESSFUL_RESULT( execute(*mock_command_factory->constructRemovePeer(kPublicKey))); - auto peers = wsv_query->getPeers(); + auto peers = wsv_query->getPeers(false); ASSERT_TRUE(peers); ASSERT_TRUE(std::find_if(peers->begin(), peers->end(), @@ -587,7 +587,7 @@ namespace iroha::ametsuchi { CHECK_SUCCESSFUL_RESULT( execute(*mock_command_factory->constructRemovePeer(kPublicKey))); - auto peers = wsv_query->getPeers(); + auto peers = wsv_query->getPeers(false); ASSERT_TRUE(peers); ASSERT_TRUE(std::find_if(peers->begin(), peers->end(), diff --git a/test/module/irohad/ametsuchi/wsv_query_test.cpp b/test/module/irohad/ametsuchi/wsv_query_test.cpp index 02d8403f759..6ddee23c110 100644 --- a/test/module/irohad/ametsuchi/wsv_query_test.cpp +++ b/test/module/irohad/ametsuchi/wsv_query_test.cpp @@ -52,20 +52,45 @@ namespace iroha { * @then peer list successfully received */ TEST_F(WsvQueryTest, GetPeers) { - ASSERT_EQ(query->countPeers().assumeValue(), 0); + ASSERT_EQ(query->countPeers(false).assumeValue(), 0); - shared_model::plain::Peer peer1{"some-address", "0a", std::nullopt}; + shared_model::plain::Peer peer1{ + "some-address", "0a", std::nullopt, false}; command->insertPeer(peer1); - shared_model::plain::Peer peer2{"another-address", "0b", std::nullopt}; + shared_model::plain::Peer peer2{ + "another-address", "0b", std::nullopt, false}; command->insertPeer(peer2); - auto result = query->getPeers(); + auto result = query->getPeers(false); ASSERT_TRUE(result); ASSERT_THAT(*result, testing::ElementsAre(testing::Pointee(testing::Eq(peer1)), testing::Pointee(testing::Eq(peer2)))); - ASSERT_EQ(query->countPeers().assumeValue(), 2); + ASSERT_EQ(query->countPeers(false).assumeValue(), 2); + } + + /** + * @given storage with sync peers + * @when trying to get existing peers + * @then peer list successfully received + */ + TEST_F(WsvQueryTest, GetSyncPeers) { + ASSERT_EQ(query->countPeers(true).assumeValue(), 0); + + shared_model::plain::Peer peer1{"some-address", "0a", std::nullopt, true}; + command->insertPeer(peer1); + shared_model::plain::Peer peer2{ + "another-address", "0b", std::nullopt, true}; + command->insertPeer(peer2); + + auto result = query->getPeers(true); + ASSERT_TRUE(result); + ASSERT_THAT(*result, + testing::ElementsAre(testing::Pointee(testing::Eq(peer1)), + testing::Pointee(testing::Eq(peer2)))); + + ASSERT_EQ(query->countPeers(true).assumeValue(), 2); } TEST_F(WsvQueryTest, countDomains) { @@ -79,12 +104,12 @@ namespace iroha { } TEST_F(WsvQueryTest, countPeers) { - ASSERT_EQ(query->countPeers().assumeValue(), 0); + ASSERT_EQ(query->countPeers(false).assumeValue(), 0); command->insertPeer( - shared_model::plain::Peer{"127.0.0.1", "111", std::nullopt}); + shared_model::plain::Peer{"127.0.0.1", "111", std::nullopt, false}); command->insertPeer( - shared_model::plain::Peer{"127.0.0.2", "222", std::nullopt}); - ASSERT_EQ(query->countPeers().assumeValue(), 2); + shared_model::plain::Peer{"127.0.0.2", "222", std::nullopt, false}); + ASSERT_EQ(query->countPeers(false).assumeValue(), 2); } TEST_F(WsvQueryTest, countTransactions) { diff --git a/test/module/irohad/consensus/yac/mock_yac_hash_gate.hpp b/test/module/irohad/consensus/yac/mock_yac_hash_gate.hpp index 8a78d1e9310..786879ff53b 100644 --- a/test/module/irohad/consensus/yac/mock_yac_hash_gate.hpp +++ b/test/module/irohad/consensus/yac/mock_yac_hash_gate.hpp @@ -24,6 +24,7 @@ namespace iroha { MOCK_METHOD((std::optional), processRoundSwitch, (consensus::Round const &, + shared_model::interface::types::PeerList const &, shared_model::interface::types::PeerList const &), (override)); diff --git a/test/module/irohad/consensus/yac/yac_gate_test.cpp b/test/module/irohad/consensus/yac/yac_gate_test.cpp index c992f9ee90c..012bf2b8ef0 100644 --- a/test/module/irohad/consensus/yac/yac_gate_test.cpp +++ b/test/module/irohad/consensus/yac/yac_gate_test.cpp @@ -96,6 +96,8 @@ class YacGateTest : public ::testing::Test { auto peer = makePeer("127.0.0.1", "111"_hex_pubkey); ledger_state = std::make_shared( shared_model::interface::types::PeerList{std::move(peer)}, + shared_model::interface::types::PeerList{ + makePeer("127.0.0.1", "222"_hex_pubkey)}, block->height() - 1, block->prevHash()); diff --git a/test/module/irohad/consensus/yac/yac_hash_provider_test.cpp b/test/module/irohad/consensus/yac/yac_hash_provider_test.cpp index ce26e46932c..67e3b60fa42 100644 --- a/test/module/irohad/consensus/yac/yac_hash_provider_test.cpp +++ b/test/module/irohad/consensus/yac/yac_hash_provider_test.cpp @@ -43,10 +43,15 @@ auto signature() { TEST(YacHashProviderTest, MakeYacHashTest) { YacHashProviderImpl hash_provider; iroha::consensus::Round round{1, 0}; - auto peer = makePeer("127.0.0.1", "111"_hex_pubkey); shared_model::crypto::Hash block_hash(std::string{"hash"}); + auto ledger_state = std::make_shared( - shared_model::interface::types::PeerList{std::move(peer)}, 1, block_hash); + shared_model::interface::types::PeerList{ + makePeer("127.0.0.1", "111"_hex_pubkey)}, + shared_model::interface::types::PeerList{ + makePeer("127.0.0.2", "222"_hex_pubkey)}, + 1, + block_hash); auto proposal = std::make_shared(); EXPECT_CALL(*proposal, hash()) .WillRepeatedly( @@ -80,10 +85,15 @@ TEST(YacHashProviderTest, MakeYacHashTest) { TEST(YacHashProviderTest, ToModelHashTest) { YacHashProviderImpl hash_provider; iroha::consensus::Round round{1, 0}; - auto peer = makePeer("127.0.0.1", "111"_hex_pubkey); + shared_model::crypto::Hash block_hash(std::string{"hash"}); auto ledger_state = std::make_shared( - shared_model::interface::types::PeerList{std::move(peer)}, 1, block_hash); + shared_model::interface::types::PeerList{ + makePeer("127.0.0.1", "111"_hex_pubkey)}, + shared_model::interface::types::PeerList{ + makePeer("127.0.0.2", "222"_hex_pubkey)}, + 1, + block_hash); auto proposal = std::make_shared(); EXPECT_CALL(*proposal, hash()) .WillRepeatedly( diff --git a/test/module/irohad/consensus/yac/yac_rainy_day_test.cpp b/test/module/irohad/consensus/yac/yac_rainy_day_test.cpp index c490789a5ef..7b881c04ad7 100644 --- a/test/module/irohad/consensus/yac/yac_rainy_day_test.cpp +++ b/test/module/irohad/consensus/yac/yac_rainy_day_test.cpp @@ -155,7 +155,9 @@ TEST_F(YacTest, ValidCaseWhenReceiveOnVoteAfterReject) { setNetworkOrderCheckerSingleVote( my_order.value(), testing::AnyOf(next_reject_hash), kFixedRandomNumber); - yac->processRoundSwitch(next_reject_hash.vote_round, my_order->getPeers()); + yac->processRoundSwitch(next_reject_hash.vote_round, + my_order->getPeers(), + shared_model::interface::types::PeerList{}); yac->vote(next_reject_hash, my_order.value()); // -- now yac receives a vote from another peer when we already have a reject diff --git a/test/module/irohad/consensus/yac/yac_synchronization_test.cpp b/test/module/irohad/consensus/yac/yac_synchronization_test.cpp index 0b1671ec030..75a6eeaa6b9 100644 --- a/test/module/irohad/consensus/yac/yac_synchronization_test.cpp +++ b/test/module/irohad/consensus/yac/yac_synchronization_test.cpp @@ -81,14 +81,18 @@ class YacSynchronizationTest : public YacTest { i++) { top_hash_ = createHash(Round{i, 0}); setNetworkOrderCheckerSingleVote(order, top_hash_.value(), 2); - yac->processRoundSwitch(top_hash_->vote_round, order.getPeers()); + yac->processRoundSwitch(top_hash_->vote_round, + order.getPeers(), + shared_model::interface::types::PeerList{}); yac->vote(top_hash_.value(), order); yac->onState(network_util.createVotes(voters_, top_hash_.value())); } const YacHash next_hash = createHash( {initial_round.block_round + number_of_committed_rounds_, 0}); setNetworkOrderCheckerSingleVote(order, next_hash, 2); - yac->processRoundSwitch(next_hash.vote_round, order.getPeers()); + yac->processRoundSwitch(next_hash.vote_round, + order.getPeers(), + shared_model::interface::types::PeerList{}); yac->vote(next_hash, order); } diff --git a/test/module/irohad/multi_sig_transactions/gossip_propagation_strategy_test.cpp b/test/module/irohad/multi_sig_transactions/gossip_propagation_strategy_test.cpp index 3e30662b939..2835189063d 100644 --- a/test/module/irohad/multi_sig_transactions/gossip_propagation_strategy_test.cpp +++ b/test/module/irohad/multi_sig_transactions/gossip_propagation_strategy_test.cpp @@ -78,7 +78,8 @@ PropagationData subscribeAndEmit(boost::optional data, uint32_t amount, uint32_t take) { auto query = std::make_shared(); - EXPECT_CALL(*query, getLedgerPeers()).WillRepeatedly(testing::Return(data)); + EXPECT_CALL(*query, getLedgerPeers(false)) + .WillRepeatedly(testing::Return(data)); auto pbfactory = std::make_shared(); EXPECT_CALL(*pbfactory, createPeerQuery()) .WillRepeatedly(testing::Return(boost::make_optional( @@ -194,7 +195,8 @@ TEST(GossipPropagationStrategyTest, MultipleSubsEmission) { EXPECT_CALL(*pbfactory, createPeerQuery()) .WillRepeatedly(testing::Return(boost::make_optional( std::shared_ptr(query)))); - EXPECT_CALL(*query, getLedgerPeers()).WillRepeatedly(testing::Return(peers)); + EXPECT_CALL(*query, getLedgerPeers(false)) + .WillRepeatedly(testing::Return(peers)); iroha::GossipPropagationStrategyParams gossip_params; gossip_params.emission_period = 1ms; gossip_params.amount_per_once = amount; diff --git a/test/module/irohad/network/block_loader_test.cpp b/test/module/irohad/network/block_loader_test.cpp index 46e046a4a5a..ea97fc28b6e 100644 --- a/test/module/irohad/network/block_loader_test.cpp +++ b/test/module/irohad/network/block_loader_test.cpp @@ -86,7 +86,7 @@ class BlockLoaderTest : public testing::Test { } void setPeerQuery() { - EXPECT_CALL(*peer_query, getLedgerPeers()) + EXPECT_CALL(*peer_query, getLedgerPeers(false)) .WillRepeatedly(Return(std::vector{peer})); EXPECT_CALL( *peer_query, diff --git a/test/module/irohad/ordering/on_demand_ordering_gate_test.cpp b/test/module/irohad/ordering/on_demand_ordering_gate_test.cpp index a1189e86a6d..1838a12e566 100644 --- a/test/module/irohad/ordering/on_demand_ordering_gate_test.cpp +++ b/test/module/irohad/ordering/on_demand_ordering_gate_test.cpp @@ -57,11 +57,14 @@ class OnDemandOrderingGateTest : public ::testing::Test { std::move(ufactory), tx_cache, 1000, - getTestLogger("OrderingGate")); + getTestLogger("OrderingGate"), + false); auto peer = makePeer("127.0.0.1", "111"_hex_pubkey); ledger_state = std::make_shared( shared_model::interface::types::PeerList{std::move(peer)}, + shared_model::interface::types::PeerList{ + makePeer("127.0.0.1", "222"_hex_pubkey)}, round.block_round, shared_model::crypto::Hash{std::string{"hash"}}); } diff --git a/test/module/irohad/simulator/simulator_test.cpp b/test/module/irohad/simulator/simulator_test.cpp index 49d4de1a251..a7bd37907ea 100644 --- a/test/module/irohad/simulator/simulator_test.cpp +++ b/test/module/irohad/simulator/simulator_test.cpp @@ -74,6 +74,8 @@ class SimulatorTest : public ::testing::Test { std::shared_ptr simulator; shared_model::interface::types::PeerList ledger_peers{ makePeer("127.0.0.1", "111"_hex_pubkey)}; + shared_model::interface::types::PeerList ledger_sync_peers{ + makePeer("127.0.0.1", "222"_hex_pubkey)}; }; auto makeProposal(int height) { @@ -139,6 +141,7 @@ TEST_F(SimulatorTest, ValidWhenPreviousBlock) { auto ledger_state = std::make_shared( ledger_peers, + ledger_sync_peers, proposal->height() - 1, shared_model::crypto::Hash{std::string("hash")}); OrderingEvent ordering_event{proposal, consensus::Round{}, ledger_state}; @@ -215,6 +218,7 @@ TEST_F(SimulatorTest, SomeFailingTxs) { auto ledger_state = std::make_shared( ledger_peers, + ledger_sync_peers, proposal->height() - 1, shared_model::crypto::Hash{std::string("hash")}); OrderingEvent ordering_event{ diff --git a/test/module/irohad/synchronizer/synchronizer_test.cpp b/test/module/irohad/synchronizer/synchronizer_test.cpp index c710eb7d9b1..3b044485e47 100644 --- a/test/module/irohad/synchronizer/synchronizer_test.cpp +++ b/test/module/irohad/synchronizer/synchronizer_test.cpp @@ -92,10 +92,12 @@ class SynchronizerTest : public ::testing::Test { ON_CALL(*block_query, getTopBlockHeight()) .WillByDefault(Return(kInitTopBlockHeight)); ON_CALL(*mutable_factory, commit_(_)) - .WillByDefault(Return(ByMove(expected::makeValue( - std::make_shared(ledger_peers, - commit_message->height(), - commit_message->hash()))))); + .WillByDefault( + Return(ByMove(expected::makeValue(std::make_shared( + ledger_peers, + shared_model::interface::types::PeerList{}, + commit_message->height(), + commit_message->hash()))))); EXPECT_CALL(*mutable_factory, preparedCommitEnabled()) .WillRepeatedly(Return(false)); EXPECT_CALL(*mutable_factory, commitPrepared(_)).Times(0); @@ -109,7 +111,10 @@ class SynchronizerTest : public ::testing::Test { getTestLogger("Synchronizer")); ledger_state = std::make_shared( - ledger_peers, commit_message->height() - 1, commit_message->prevHash()); + ledger_peers, + shared_model::interface::types::PeerList{}, + commit_message->height() - 1, + commit_message->prevHash()); } std::shared_ptr makeCommit( @@ -269,7 +274,10 @@ TEST_F(SynchronizerTest, ValidWhenValidChainMultipleBlocks) { auto target_commit = makeCommit(target_height); EXPECT_CALL(*mutable_factory, commit_(_)) .WillOnce(Return(ByMove(expected::makeValue(std::make_shared( - ledger_peers, target_height, target_commit->hash()))))); + ledger_peers, + shared_model::interface::types::PeerList{}, + target_height, + target_commit->hash()))))); std::vector> commits{ commit_message, target_commit}; chainValidatorExpectChain(*chain_validator, commits); @@ -586,7 +594,10 @@ TEST_F(SynchronizerTest, VotedForBlockCommitPrepared) { EXPECT_CALL(*mutable_factory, commitPrepared(_)) .WillOnce(Return( ByMove(CommitResult{expected::makeValue(std::make_shared( - ledger_peers, kHeight, commit_message->hash()))}))); + ledger_peers, + shared_model::interface::types::PeerList{}, + kHeight, + commit_message->hash()))}))); EXPECT_CALL(*mutable_factory, commit_(_)).Times(0); @@ -638,8 +649,11 @@ TEST_F(SynchronizerTest, VotedForThisCommitPreparedFailure) { mutableStorageExpectChain(*mutable_factory, {commit_message}); EXPECT_CALL(*mutable_factory, commit_(_)) - .WillOnce(Return(ByMove(expected::makeValue( - std::make_shared(ledger_peers, kHeight, hash))))); + .WillOnce(Return(ByMove(expected::makeValue(std::make_shared( + ledger_peers, + shared_model::interface::types::PeerList{}, + kHeight, + hash))))); auto commit_event = synchronizer->processOutcome(consensus::PairValid( consensus::Round{kHeight, 1}, ledger_state, commit_message)); diff --git a/test/module/irohad/torii/processor/transaction_processor_test.cpp b/test/module/irohad/torii/processor/transaction_processor_test.cpp index c10dbf48714..74c3735d84a 100644 --- a/test/module/irohad/torii/processor/transaction_processor_test.cpp +++ b/test/module/irohad/torii/processor/transaction_processor_test.cpp @@ -50,9 +50,11 @@ class TransactionProcessorTest : public ::testing::Test { std::make_shared(), getTestLogger("TransactionProcessor")); - auto peer = makePeer("127.0.0.1", "111"_hex_pubkey); ledger_state = std::make_shared( - shared_model::interface::types::PeerList{std::move(peer)}, + shared_model::interface::types::PeerList{ + makePeer("127.0.0.1", "111"_hex_pubkey)}, + shared_model::interface::types::PeerList{ + makePeer("127.0.0.2", "222"_hex_pubkey)}, round.block_round - 1, shared_model::crypto::Hash{std::string("hash")}); } diff --git a/test/module/irohad/validation/chain_validation_test.cpp b/test/module/irohad/validation/chain_validation_test.cpp index 2924581fc50..33d94c90b45 100644 --- a/test/module/irohad/validation/chain_validation_test.cpp +++ b/test/module/irohad/validation/chain_validation_test.cpp @@ -32,12 +32,23 @@ class ChainValidationTest : public ::testing::Test { supermajority_checker, getTestLogger("ChainValidator")); storage = std::make_shared(); peers = std::vector>(); - - auto peer = std::make_shared(); - EXPECT_CALL(*peer, pubkey()) - .WillRepeatedly(ReturnRefOfCopy( - iroha::bytestringToHexstring(std::string(32, '0')))); - peers.push_back(peer); + sync_peers = std::vector>(); + + { + auto peer = std::make_shared(); + EXPECT_CALL(*peer, pubkey()) + .WillRepeatedly(ReturnRefOfCopy( + iroha::bytestringToHexstring(std::string(32, '0')))); + peers.push_back(peer); + } + + { + auto peer = std::make_shared(); + EXPECT_CALL(*peer, pubkey()) + .WillRepeatedly(ReturnRefOfCopy( + iroha::bytestringToHexstring(std::string(32, '1')))); + sync_peers.push_back(peer); + } auto signature = std::make_shared(); EXPECT_CALL(*signature, publicKey()) @@ -65,6 +76,7 @@ class ChainValidationTest : public ::testing::Test { std::vector> signatures; std::vector> peers; + std::vector> sync_peers; shared_model::crypto::Hash prev_hash = shared_model::crypto::Hash(std::string{"previous top hash"}); shared_model::interface::types::HeightType prev_height = 1; @@ -85,8 +97,8 @@ TEST_F(ChainValidationTest, ValidCase) { .WillOnce(DoAll(SaveArg<0>(&block_signatures_amount), Return(true))); EXPECT_CALL(*storage, applyIf(block, _)) - .WillOnce( - InvokeArgument<1>(block, LedgerState{peers, prev_height, prev_hash})); + .WillOnce(InvokeArgument<1>( + block, LedgerState{peers, sync_peers, prev_height, prev_hash})); ASSERT_TRUE(validator->validateAndApply(block, *storage)); ASSERT_EQ(boost::size(block->signatures()), block_signatures_amount); @@ -107,7 +119,7 @@ TEST_F(ChainValidationTest, FailWhenDifferentPrevHash) { EXPECT_CALL(*storage, applyIf(block, _)) .WillOnce(InvokeArgument<1>( - block, LedgerState{peers, prev_height, another_hash})); + block, LedgerState{peers, sync_peers, prev_height, another_hash})); ASSERT_FALSE(validator->validateAndApply(block, *storage)); } @@ -124,8 +136,8 @@ TEST_F(ChainValidationTest, FailWhenNoSupermajority) { .WillOnce(DoAll(SaveArg<0>(&block_signatures_amount), Return(false))); EXPECT_CALL(*storage, applyIf(block, _)) - .WillOnce( - InvokeArgument<1>(block, LedgerState{peers, prev_height, prev_hash})); + .WillOnce(InvokeArgument<1>( + block, LedgerState{peers, sync_peers, prev_height, prev_hash})); ASSERT_FALSE(validator->validateAndApply(block, *storage)); ASSERT_EQ(boost::size(block->signatures()), block_signatures_amount); diff --git a/test/module/shared_model/interface_mocks.hpp b/test/module/shared_model/interface_mocks.hpp index 300c6e0e8f3..16aa25c03ed 100644 --- a/test/module/shared_model/interface_mocks.hpp +++ b/test/module/shared_model/interface_mocks.hpp @@ -188,6 +188,7 @@ struct MockPeer : public shared_model::interface::Peer { const std::optional &()); MOCK_CONST_METHOD0(clone, MockPeer *()); + MOCK_CONST_METHOD0(isSyncingPeer, bool()); }; inline auto makePeer( From 40c0ce63f7a7a7fed0a35e6e0036defd66e136e1 Mon Sep 17 00:00:00 2001 From: "G. Bazior" Date: Thu, 30 Dec 2021 17:20:15 +0100 Subject: [PATCH 04/14] Replace deprecated param "max_rounds_delay" with "proposal_creation_timeout" (#1662) Update sample config file to have not deprecated DB connection params Signed-off-by: G. Bazior Signed-off-by: G.Bazior Signed-off-by: G.Bazior --- example/config.docker | 11 ++++++++++- example/config.postgres.sample | 2 +- example/config.sample | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/example/config.docker b/example/config.docker index b17cb2d7975..8be860ddc7e 100644 --- a/example/config.docker +++ b/example/config.docker @@ -2,12 +2,21 @@ "block_store_path" : "/tmp/block_store/", "torii_port" : 50051, "internal_port" : 10001, - "pg_opt" : "host=some-postgres port=5432 user=postgres password=mysecretpassword", + "database": { + "type": "postgres", + "host": "some-postgres", + "port": 5432, + "user": "postgres", + "password": "mysecretpassword", + "working database": "iroha_default", + "maintenance database": "postgres" + }, "max_proposal_size" : 10, "proposal_delay" : 5000, "vote_delay" : 5000, "mst_enable" : false, "mst_expiration_time" : 1440, "max_rounds_delay": 3000, + "proposal_creation_timeout": 3000, "stale_stream_max_rounds": 2 } diff --git a/example/config.postgres.sample b/example/config.postgres.sample index d356b6f50ee..0ffd91cdbb8 100644 --- a/example/config.postgres.sample +++ b/example/config.postgres.sample @@ -16,7 +16,7 @@ "vote_delay" : 5000, "mst_enable" : false, "mst_expiration_time" : 1440, - "max_rounds_delay": 3000, + "proposal_creation_timeout": 3000, "stale_stream_max_rounds": 2, "metrics": "127.0.0.1:8080", "healthcheck_port": 50508 diff --git a/example/config.sample b/example/config.sample index 9289a65e8fe..e9e4f50d327 100644 --- a/example/config.sample +++ b/example/config.sample @@ -10,7 +10,7 @@ "vote_delay": 5000, "mst_enable": false, "mst_expiration_time": 1440, - "max_rounds_delay": 3000, + "proposal_creation_timeout": 3000, "stale_stream_max_rounds": 2, "metrics": "0.0.0.0:7001", "healthcheck_port": 50508 From 4130e40acddde5a3d0dfe42e7e74da49b23942b0 Mon Sep 17 00:00:00 2001 From: Peter Somogyvari Date: Fri, 31 Dec 2021 03:56:10 -0800 Subject: [PATCH 05/14] docs(build): add zip and pkg-config to list of build deps (#1393) These were also missing from my WSL 2 Ubuntu 20.04 installation and had to install them manually before I could build the project successfully. Signed-off-by: Peter Somogyvari Signed-off-by: Peter Somogyvari Co-authored-by: G. Bazior --- docs/source/build/index.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/source/build/index.rst b/docs/source/build/index.rst index b2365f250c4..0ef4119fe25 100644 --- a/docs/source/build/index.rst +++ b/docs/source/build/index.rst @@ -133,7 +133,8 @@ Use this code to install environment dependencies on Debian-based Linux distro. apt-get update; \ apt-get -y --no-install-recommends install \ build-essential ninja-build \ - git ca-certificates tar curl unzip cmake + git ca-certificates tar curl unzip cmake \ + pkg-config zip .. Important:: If you would like to use `Burrow integration <../integrations/burrow.html>`_ you will also need GO. Install it following the instructions on `the official website `_ and then use the following command: From 1922aa48c36d533b60bbdb68e75ab440f7a3458c Mon Sep 17 00:00:00 2001 From: "G. Bazior" Date: Mon, 3 Jan 2022 16:26:50 +0100 Subject: [PATCH 06/14] Iroha 1 compile errors when compiling with g++11 (#1765) * Compilation error fix for g++11 Signed-off-by: G. Bazior Signed-off-by: G.Bazior Signed-off-by: G.Bazior * Corrected after review Signed-off-by: G. Bazior Signed-off-by: G.Bazior Signed-off-by: G.Bazior Co-authored-by: G.Bazior --- irohad/ordering/impl/batches_cache.cpp | 1 + irohad/pending_txs_storage/impl/pending_txs_storage_impl.cpp | 2 ++ libs/common/common.hpp | 1 + libs/common/radix_tree.hpp | 1 + 4 files changed, 5 insertions(+) diff --git a/irohad/ordering/impl/batches_cache.cpp b/irohad/ordering/impl/batches_cache.cpp index d66f3924c8e..cd3f049d1a4 100644 --- a/irohad/ordering/impl/batches_cache.cpp +++ b/irohad/ordering/impl/batches_cache.cpp @@ -7,6 +7,7 @@ #include #include +#include #include "interfaces/iroha_internal/transaction_batch.hpp" #include "interfaces/transaction.hpp" diff --git a/irohad/pending_txs_storage/impl/pending_txs_storage_impl.cpp b/irohad/pending_txs_storage/impl/pending_txs_storage_impl.cpp index baa619852ec..7f7f3936cdd 100644 --- a/irohad/pending_txs_storage/impl/pending_txs_storage_impl.cpp +++ b/irohad/pending_txs_storage/impl/pending_txs_storage_impl.cpp @@ -5,6 +5,8 @@ #include "pending_txs_storage/impl/pending_txs_storage_impl.hpp" +#include + #include "ametsuchi/tx_presence_cache_utils.hpp" #include "interfaces/transaction.hpp" #include "multi_sig_transactions/state/mst_state.hpp" diff --git a/libs/common/common.hpp b/libs/common/common.hpp index f29d5da4abe..de3a6cfe145 100644 --- a/libs/common/common.hpp +++ b/libs/common/common.hpp @@ -7,6 +7,7 @@ #define IROHA_LIBS_COMMON_HPP #include +#include #include #include diff --git a/libs/common/radix_tree.hpp b/libs/common/radix_tree.hpp index 6c5a4c23e48..407cd23cf8e 100644 --- a/libs/common/radix_tree.hpp +++ b/libs/common/radix_tree.hpp @@ -8,6 +8,7 @@ #include #include +#include #include #include From ac087ea2bc03cd770ad795d1781d0098e9efe6da Mon Sep 17 00:00:00 2001 From: Alexander Lednev <57529355+iceseer@users.noreply.github.com> Date: Mon, 17 Jan 2022 21:08:05 +0300 Subject: [PATCH 07/14] Fix/Iroha v1.4-rc.1 fixes (#1785) * added 10bit bloom filter Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * space optimization Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * small-case signatory fixup Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> vcpkg-build to gitignore Signed-off-by: iceseer Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> * radix tree filter enumerate Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> Signed-off-by: iceseer * 2-layer cache for rdb Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> Signed-off-by: iceseer * max_opened_files = 100 Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> Signed-off-by: iceseer * patches from 1.2.1-patch.2 migrated to 1.4 Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> Signed-off-by: iceseer --- .gitignore | 1 + .../ametsuchi/impl/database_cache/cache.hpp | 37 ++++- irohad/ametsuchi/impl/rocksdb_block_query.cpp | 2 +- .../ametsuchi/impl/rocksdb_block_storage.cpp | 10 +- irohad/ametsuchi/impl/rocksdb_common.hpp | 96 ++++++++++-- irohad/ametsuchi/impl/rocksdb_indexer.cpp | 4 +- .../ametsuchi/impl/rocksdb_query_executor.cpp | 11 +- .../impl/rocksdb_temporary_wsv_impl.cpp | 10 +- irohad/ametsuchi/impl/wsv_restorer_impl.cpp | 7 +- irohad/main/application.cpp | 4 +- irohad/main/impl/rocksdb_connection_init.cpp | 5 +- irohad/ordering/impl/batches_cache.cpp | 34 +---- irohad/ordering/impl/batches_cache.hpp | 35 ++++- .../ordering/impl/on_demand_ordering_gate.cpp | 56 ++++--- .../impl/on_demand_ordering_service_impl.cpp | 10 +- .../impl/on_demand_ordering_service_impl.hpp | 4 +- .../ordering/on_demand_ordering_service.hpp | 4 +- libs/common/radix_tree.hpp | 69 ++++++++- .../network/on_demand_os_network_notifier.cpp | 7 +- .../network/on_demand_os_network_notifier.hpp | 7 +- .../irohad/ametsuchi/rocksdb_common_test.cpp | 138 +++++++++++++++++- .../on_demand_connection_manager_test.cpp | 2 + .../irohad/ordering/on_demand_os_test.cpp | 8 +- .../module/irohad/ordering/ordering_mocks.hpp | 9 +- test/module/shared_model/interface_mocks.hpp | 4 + 25 files changed, 453 insertions(+), 121 deletions(-) diff --git a/.gitignore b/.gitignore index 609b1e5ea82..98d92390181 100644 --- a/.gitignore +++ b/.gitignore @@ -31,6 +31,7 @@ include/generated/* iroha.conf peers.list cmake-build* +*vcpkg-build* .gtm /.gtm/ diff --git a/irohad/ametsuchi/impl/database_cache/cache.hpp b/irohad/ametsuchi/impl/database_cache/cache.hpp index afd8ebf22a0..d04282ca9ed 100644 --- a/irohad/ametsuchi/impl/database_cache/cache.hpp +++ b/irohad/ametsuchi/impl/database_cache/cache.hpp @@ -8,6 +8,7 @@ #include #include +#include #include #include "common/common.hpp" @@ -21,6 +22,7 @@ namespace iroha::ametsuchi { CachebleSetType cacheable_paths_; std::unique_ptr> cache_; + std::unique_ptr>> tmp_cache_; auto cachebleSearch(std::string_view key) const { auto it = std::lower_bound( @@ -59,25 +61,54 @@ namespace iroha::ametsuchi { template bool get(std::string_view key, Func &&func) { + if (auto *ptr = tmp_cache_->find(key.data(), key.size())) + return *ptr ? std::forward(func)(**ptr) : false; if (auto *ptr = cache_->find(key.data(), key.size())) return std::forward(func)(*ptr); return false; } void set(std::string_view key, std::string_view const &value) { + assert(isCacheable(key)); + tmp_cache_->template insert(key.data(), key.size(), value); + } + + void setCommit(std::string_view key, std::string_view const &value) { + assert(isCacheable(key)); + assert(tmp_cache_->find(key.data(), key.size()) == nullptr); cache_->template insert(key.data(), key.size(), value); } auto erase(std::string_view key) { - return cache_->erase(key.data(), key.size()); + return tmp_cache_->template insert(key.data(), key.size(), std::nullopt); + } + + void filterDelete(std::string_view filter) { + cache_->filterEnumerate( + filter.data(), filter.size(), [&](std::string_view key, Type *) { + tmp_cache_->template insert(key.data(), key.size(), std::nullopt); + }); + } + + void rollback() { + tmp_cache_ = std::make_unique>>(); } - auto filterDelete(std::string_view key) { - return cache_->filterDelete(key.data(), key.size()); + void commit() { + tmp_cache_->filterEnumerate( + nullptr, 0ul, [&](std::string_view key, std::optional *value) { + if (*value) + cache_->template insert( + key.data(), key.size(), std::move(**value)); + else + cache_->erase(key.data(), key.size()); + }); + tmp_cache_ = std::make_unique>>(); } void drop() { cache_ = std::make_unique>(); + tmp_cache_ = std::make_unique>>(); } }; diff --git a/irohad/ametsuchi/impl/rocksdb_block_query.cpp b/irohad/ametsuchi/impl/rocksdb_block_query.cpp index 64f29c95249..e83ce7887cd 100644 --- a/irohad/ametsuchi/impl/rocksdb_block_query.cpp +++ b/irohad/ametsuchi/impl/rocksdb_block_query.cpp @@ -33,7 +33,7 @@ namespace iroha::ametsuchi { return std::nullopt; } else if (status.assumeValue()) { auto const &[tx_status] = staticSplitId<1ull>(*status.assumeValue(), "#"); - res = tx_status == "TRUE" ? 1 : 0; + res = (!tx_status.empty() && tx_status[0] == 'T') ? 1 : 0; } return res; diff --git a/irohad/ametsuchi/impl/rocksdb_block_storage.cpp b/irohad/ametsuchi/impl/rocksdb_block_storage.cpp index 0e4f6e9ce0d..9a1023ca114 100644 --- a/irohad/ametsuchi/impl/rocksdb_block_storage.cpp +++ b/irohad/ametsuchi/impl/rocksdb_block_storage.cpp @@ -110,11 +110,13 @@ void RocksDbBlockStorage::reload() {} void RocksDbBlockStorage::clear() { RocksDbCommon common(db_context_); - if (auto status = common.filterDelete(fmtstrings::kPathWsv); !status.ok()) - log_->error("Unable to delete WSV. Description: {}", status.ToString()); + if (auto res = dropStore(common); expected::hasError(res)) + log_->error("Unable to delete Store. Description: {}", + res.assumeError().description); - if (auto status = common.filterDelete(fmtstrings::kPathStore); !status.ok()) - log_->error("Unable to delete STORE. Description: {}", status.ToString()); + if (auto res = dropWSV(common); expected::hasError(res)) + log_->error("Unable to delete WSV. Description: {}", + res.assumeError().description); } iroha::expected::Result RocksDbBlockStorage::forEach( diff --git a/irohad/ametsuchi/impl/rocksdb_common.hpp b/irohad/ametsuchi/impl/rocksdb_common.hpp index 6160887fed0..bea065a0640 100644 --- a/irohad/ametsuchi/impl/rocksdb_common.hpp +++ b/irohad/ametsuchi/impl/rocksdb_common.hpp @@ -10,10 +10,12 @@ #include #include #include +#include #include #include #include +#include #include #include #include @@ -502,6 +504,7 @@ namespace iroha::ametsuchi { private: expected::Result reinitDB() { assert(db_name_); + transaction_db_.reset(); rocksdb::BlockBasedTableOptions table_options; table_options.block_cache = @@ -509,9 +512,12 @@ namespace iroha::ametsuchi { table_options.block_size = 32 * 1024; // table_options.pin_l0_filter_and_index_blocks_in_cache = true; table_options.cache_index_and_filter_blocks = true; + table_options.filter_policy.reset( + rocksdb::NewBloomFilterPolicy(10, false)); rocksdb::Options options; options.create_if_missing = true; + options.max_open_files = 100; options.optimize_filters_for_hits = true; options.table_factory.reset( rocksdb::NewBlockBasedTableFactory(table_options)); @@ -530,6 +536,12 @@ namespace iroha::ametsuchi { return {}; } + void flushDB() { + assert(transaction_db_); + assert(transaction_db_->Flush(rocksdb::FlushOptions()).ok()); + assert(transaction_db_->FlushWAL(true).ok()); + } + template void printStatus(LoggerT &log) { if (transaction_db_) { @@ -588,8 +600,16 @@ namespace iroha::ametsuchi { void prepareTransaction(RocksDBContext &tx_context) { assert(transaction_db_); - tx_context.transaction.reset( - transaction_db_->BeginTransaction(rocksdb::WriteOptions())); + if (tx_context.transaction) { + auto result = transaction_db_->BeginTransaction( + rocksdb::WriteOptions(), + rocksdb::OptimisticTransactionOptions(), + tx_context.transaction.get()); + assert(result == tx_context.transaction.get()); + } else { + tx_context.transaction.reset( + transaction_db_->BeginTransaction(rocksdb::WriteOptions())); + } } }; @@ -641,6 +661,10 @@ namespace iroha::ametsuchi { return tx_context_->key_buffer; } + auto &context() { + return tx_context_; + } + private: auto &transaction() { if (!tx_context_->transaction) @@ -691,9 +715,19 @@ namespace iroha::ametsuchi { c->set(key, valueBuffer()); } + void storeCommit(std::string_view key) { + if (auto c = cache(); c && c->isCacheable(key)) + c->setCommit(key, valueBuffer()); + } + void dropCache() { if (auto c = cache()) - c->drop(); + c->rollback(); + } + + void commitCache() { + if (auto c = cache()) + c->commit(); } public: @@ -725,13 +759,21 @@ namespace iroha::ametsuchi { "rocksdb.block-cache-capacity"); } + auto reinit() { + return tx_context_->db_port->reinitDB(); + } + /// Makes commit to DB auto commit() { rocksdb::Status status; - if (isTransaction()) - status = transaction()->Commit(); + if (isTransaction()) { + while ((status = transaction()->Commit()).IsTryAgain()) + ; + context()->db_port->prepareTransaction(*tx_context_); + } + commitCache(); - transaction().reset(); + assert(status.ok()); return status; } @@ -820,7 +862,7 @@ namespace iroha::ametsuchi { auto status = transaction()->Get(ro, slice, &valueBuffer()); if (status.ok()) - storeInCache(slice.ToStringView()); + storeCommit(slice.ToStringView()); return status; } @@ -890,20 +932,27 @@ namespace iroha::ametsuchi { /// Removes range of items by key-filter template - auto filterDelete(S const &fmtstring, Args &&... args) { + auto filterDelete(uint64_t delete_count, + S const &fmtstring, + Args &&... args) -> std::pair { auto it = seek(fmtstring, std::forward(args)...); if (!it->status().ok()) - return it->status(); + return std::make_pair(false, it->status()); rocksdb::Slice const key(keyBuffer().data(), keyBuffer().size()); if (auto c = cache(); c && c->isCacheable(key.ToStringView())) c->filterDelete(key.ToStringView()); - for (; it->Valid() && it->key().starts_with(key); it->Next()) + bool was_deleted = false; + for (; delete_count-- && it->Valid() && it->key().starts_with(key); + it->Next()) { if (auto status = transaction()->Delete(it->key()); !status.ok()) - return status; + return std::pair(was_deleted, status); + else + was_deleted = true; + } - return it->status(); + return std::pair(was_deleted, it->status()); } private: @@ -1938,13 +1987,28 @@ namespace iroha::ametsuchi { return result; } - inline expected::Result dropWSV(RocksDbCommon &common) { - if (auto status = common.filterDelete(fmtstrings::kPathWsv); !status.ok()) - return makeError(DbErrorCode::kOperationFailed, - "Clear WSV failed."); + template + inline expected::Result dropBranch(RocksDbCommon &common, + S const &fmtstring) { + std::pair status; + do { + status = common.filterDelete(1'000ull, fmtstring); + if (!status.second.ok()) + return makeError( + DbErrorCode::kOperationFailed, "Clear {} failed.", fmtstring); + common.commit(); + } while (status.first); return {}; } + inline expected::Result dropStore(RocksDbCommon &common) { + return dropBranch(common, fmtstrings::kPathStore); + } + + inline expected::Result dropWSV(RocksDbCommon &common) { + return dropBranch(common, fmtstrings::kPathWsv); + } + } // namespace iroha::ametsuchi #endif diff --git a/irohad/ametsuchi/impl/rocksdb_indexer.cpp b/irohad/ametsuchi/impl/rocksdb_indexer.cpp index 83d84b01367..5e0126269a5 100644 --- a/irohad/ametsuchi/impl/rocksdb_indexer.cpp +++ b/irohad/ametsuchi/impl/rocksdb_indexer.cpp @@ -22,8 +22,8 @@ void RocksDBIndexer::txHashStatus(const TxPosition &position, const HashType &tx_hash, bool is_committed) { RocksDbCommon common(db_context_); - common.valueBuffer() = - is_committed ? "TRUE" : "FALSE"; // TODO USE ONLY ONE BYTE T OR F + if (is_committed) + common.valueBuffer() = 'T'; common.valueBuffer() += '#'; common.valueBuffer() += std::to_string(position.height); common.valueBuffer() += '#'; diff --git a/irohad/ametsuchi/impl/rocksdb_query_executor.cpp b/irohad/ametsuchi/impl/rocksdb_query_executor.cpp index e7e0e502d38..47cbaa76b4b 100644 --- a/irohad/ametsuchi/impl/rocksdb_query_executor.cpp +++ b/irohad/ametsuchi/impl/rocksdb_query_executor.cpp @@ -5,9 +5,8 @@ #include "ametsuchi/impl/rocksdb_query_executor.hpp" -#include -#include #include "ametsuchi/impl/rocksdb_specific_query_executor.hpp" +#include "common/to_lower.hpp" #include "interfaces/iroha_internal/query_response_factory.hpp" #include "interfaces/queries/blocks_query.hpp" #include "interfaces/queries/query.hpp" @@ -42,16 +41,20 @@ namespace iroha::ametsuchi { auto const &[account, domain] = staticSplitId<2>(query.creatorAccountId()); RocksDbCommon common(tx_context_); - for (auto &signatory : query.signatures()) + std::string pk; + for (auto &signatory : query.signatures()) { + pk.clear(); + toLowerAppend(signatory.publicKey(), pk); if (auto result = forSignatory( - common, account, domain, signatory.publicKey()); + common, account, domain, pk); expected::hasError(result)) { log_->error("code:{}, description:{}", result.assumeError().code, result.assumeError().description); return false; } + } return true; } diff --git a/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.cpp b/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.cpp index 26d21ff030e..e463e6b6d11 100644 --- a/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.cpp +++ b/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.cpp @@ -7,8 +7,8 @@ #include "ametsuchi/impl/rocksdb_command_executor.hpp" #include "ametsuchi/impl/rocksdb_common.hpp" -#include "ametsuchi/impl/rocksdb_db_transaction.hpp" #include "ametsuchi/tx_executor.hpp" +#include "common/to_lower.hpp" #include "interfaces/commands/command.hpp" #include "interfaces/permission_to_string.hpp" #include "interfaces/transaction.hpp" @@ -42,16 +42,20 @@ namespace iroha::ametsuchi { else quorum = *result.assumeValue(); - for (auto &signatory : transaction.signatures()) + std::string pk; + for (auto &signatory : transaction.signatures()) { + pk.clear(); + toLowerAppend(signatory.publicKey(), pk); if (auto result = forSignatory( - common, account, domain, signatory.publicKey()); + common, account, domain, pk); expected::hasError(result)) return expected::makeError( validation::CommandError{"signatures validation", 1, result.assumeError().description, false}); + } if (boost::size(transaction.signatures()) < quorum) { auto error_str = "Transaction " + transaction.toString() diff --git a/irohad/ametsuchi/impl/wsv_restorer_impl.cpp b/irohad/ametsuchi/impl/wsv_restorer_impl.cpp index 149ea223888..2246261277a 100644 --- a/irohad/ametsuchi/impl/wsv_restorer_impl.cpp +++ b/irohad/ametsuchi/impl/wsv_restorer_impl.cpp @@ -166,7 +166,8 @@ namespace iroha::ametsuchi { std::shared_ptr bsf) { IROHA_EXPECTED_TRY_GET_VALUE(command_executor_uniq, storage.createCommandExecutor()); - std::shared_ptr command_executor(std::move(command_executor_uniq)); + std::shared_ptr command_executor( + std::move(command_executor_uniq)); std::shared_ptr block_storage_factory{ bsf ? std::move(bsf) : std::make_shared()}; @@ -226,12 +227,12 @@ namespace iroha::ametsuchi { wsv_ledger_height = 0; } - /// Commit reindexed blocks every 1000 blocks. For reliability. + /// Commit reindexed blocks every 10 blocks. For reliability. /// When doing reindex of huge blockchain and the procedure is interrupted /// it is important to continue from last commit point to save time. do { auto commit_height = - std::min(wsv_ledger_height + 1000, last_block_in_storage); + std::min(wsv_ledger_height + 10, last_block_in_storage); IROHA_EXPECTED_TRY_GET_VALUE( mutable_storage, storage.createMutableStorage(command_executor, diff --git a/irohad/main/application.cpp b/irohad/main/application.cpp index 849ed8df61b..a130e8cbe03 100644 --- a/irohad/main/application.cpp +++ b/irohad/main/application.cpp @@ -459,8 +459,8 @@ Irohad::RunResult Irohad::initStorage( cache->addCacheblePath(RDB_ROOT /**/ RDB_WSV /**/ RDB_ROLES); cache->addCacheblePath(RDB_ROOT /**/ RDB_WSV /**/ RDB_DOMAIN); - db_context_ = - std::make_shared(std::move(rdb_port)); + db_context_ = std::make_shared( + std::move(rdb_port), std::move(cache)); } break; default: diff --git a/irohad/main/impl/rocksdb_connection_init.cpp b/irohad/main/impl/rocksdb_connection_init.cpp index 6da2dec04a7..d5854cc8eb6 100644 --- a/irohad/main/impl/rocksdb_connection_init.cpp +++ b/irohad/main/impl/rocksdb_connection_init.cpp @@ -53,7 +53,10 @@ namespace { iroha::expected::Result, std::string> RdbConnectionInit::init(StartupWsvDataPolicy startup_wsv_data_policy, iroha::ametsuchi::RocksDbOptions const &opt, - logger::LoggerManagerTreePtr) { + logger::LoggerManagerTreePtr log_manager) { + log_manager->getLogger()->info( + "Working database prepare started(with 'drop_state' flag it can take a " + "long time)..."); return prepareWorkingDatabase(startup_wsv_data_policy, opt); } diff --git a/irohad/ordering/impl/batches_cache.cpp b/irohad/ordering/impl/batches_cache.cpp index cd3f049d1a4..df51261f511 100644 --- a/irohad/ordering/impl/batches_cache.cpp +++ b/irohad/ordering/impl/batches_cache.cpp @@ -29,7 +29,7 @@ namespace iroha::ordering { return tx_count_; } - BatchesContext::BatchesSetType const &BatchesContext::getBatchesSet() const { + BatchesContext::BatchesSetType &BatchesContext::getBatchesSet() { return batches_; } @@ -97,7 +97,7 @@ namespace iroha::ordering { }); } - bool BatchesCache::isEmpty() const { + bool BatchesCache::isEmpty() { std::shared_lock lock(batches_cache_cs_); return batches_cache_.getBatchesSet().empty(); } @@ -113,35 +113,9 @@ namespace iroha::ordering { } void BatchesCache::forCachedBatches( - std::function const &f) const { - std::shared_lock lock(batches_cache_cs_); - f(batches_cache_.getBatchesSet()); - } - - void BatchesCache::getTransactions( - size_t requested_tx_amount, - std::vector> - &collection) { - collection.clear(); - collection.reserve(requested_tx_amount); - + std::function const &f) { std::unique_lock lock(batches_cache_cs_); - uint32_t depth_counter = 0ul; - batches_cache_.remove([&](auto &batch, bool &process_iteration) { - auto const txs_count = batch->transactions().size(); - if (collection.size() + txs_count > requested_tx_amount) { - ++depth_counter; - process_iteration = (depth_counter < 8ull); - return false; - } - - collection.insert(std::end(collection), - std::begin(batch->transactions()), - std::end(batch->transactions())); - - used_batches_cache_.insert(batch); - return true; - }); + f(batches_cache_.getBatchesSet()); } void BatchesCache::processReceivedProposal( diff --git a/irohad/ordering/impl/batches_cache.hpp b/irohad/ordering/impl/batches_cache.hpp index 5ffcc20c2d0..e72218d3d22 100644 --- a/irohad/ordering/impl/batches_cache.hpp +++ b/irohad/ordering/impl/batches_cache.hpp @@ -46,7 +46,7 @@ namespace iroha::ordering { public: uint64_t getTxsCount() const; - BatchesSetType const &getBatchesSet() const; + BatchesSetType &getBatchesSet(); bool insert(std::shared_ptr const &batch); @@ -97,18 +97,43 @@ namespace iroha::ordering { void remove(const OnDemandOrderingService::HashesSetType &hashes); - bool isEmpty() const; + bool isEmpty(); uint64_t txsCount() const; uint64_t availableTxsCount() const; - void forCachedBatches( - std::function const &f) const; + void forCachedBatches(std::function const &f); + template void getTransactions( size_t requested_tx_amount, std::vector> - &txs); + &collection, + IsProcessedFunc &&is_processed) { + collection.clear(); + collection.reserve(requested_tx_amount); + + std::unique_lock lock(batches_cache_cs_); + uint32_t depth_counter = 0ul; + batches_cache_.remove([&](auto &batch, bool &process_iteration) { + if (std::forward(is_processed)(batch)) + return true; + + auto const txs_count = batch->transactions().size(); + if (collection.size() + txs_count > requested_tx_amount) { + ++depth_counter; + process_iteration = (depth_counter < 8ull); + return false; + } + + collection.insert(std::end(collection), + std::begin(batch->transactions()), + std::end(batch->transactions())); + + used_batches_cache_.insert(batch); + return true; + }); + } void processReceivedProposal( OnDemandOrderingService::CollectionType batches); diff --git a/irohad/ordering/impl/on_demand_ordering_gate.cpp b/irohad/ordering/impl/on_demand_ordering_gate.cpp index bb393470190..e2b03244d1d 100644 --- a/irohad/ordering/impl/on_demand_ordering_gate.cpp +++ b/irohad/ordering/impl/on_demand_ordering_gate.cpp @@ -11,14 +11,17 @@ #include #include #include + #include "ametsuchi/tx_presence_cache.hpp" #include "ametsuchi/tx_presence_cache_utils.hpp" #include "common/visitor.hpp" +#include "datetime/time.hpp" #include "interfaces/iroha_internal/transaction_batch.hpp" #include "interfaces/iroha_internal/transaction_batch_impl.hpp" #include "interfaces/iroha_internal/transaction_batch_parser_impl.hpp" #include "logger/logger.hpp" #include "ordering/impl/on_demand_common.hpp" +#include "validators/field_validator.hpp" using iroha::ordering::OnDemandOrderingGate; @@ -125,26 +128,39 @@ OnDemandOrderingGate::processProposalRequest(ProposalEvent const &event) const { void OnDemandOrderingGate::sendCachedTransactions() { assert(not stop_mutex_.try_lock()); // lock must be taken before // TODO iceseer 14.01.21 IR-958 Check that OS is remote - forLocalOS(&OnDemandOrderingService::forCachedBatches, - [this](auto const &batches) { - auto end_iterator = batches.begin(); - auto current_number_of_transactions = 0u; - for (; end_iterator != batches.end(); ++end_iterator) { - auto batch_size = (*end_iterator)->transactions().size(); - if (current_number_of_transactions + batch_size - <= transaction_limit_) { - current_number_of_transactions += batch_size; - } else { - break; - } - } - - if (not batches.empty()) { - network_client_->onBatches( - transport::OdOsNotification::CollectionType{ - batches.begin(), end_iterator}); - } - }); + forLocalOS(&OnDemandOrderingService::forCachedBatches, [this](auto &batches) { + auto end_iterator = batches.begin(); + auto current_number_of_transactions = 0u; + auto const now = iroha::time::now(); + + for (; end_iterator != batches.end();) { + if (std::any_of( + end_iterator->get()->transactions().begin(), + end_iterator->get()->transactions().end(), + [&](const auto &tx) { + return (uint64_t)now + > shared_model::validation::FieldValidator::kMaxDelay + + tx->createdTime(); + })) { + end_iterator = batches.erase(end_iterator); + continue; + } + + auto batch_size = (*end_iterator)->transactions().size(); + if (current_number_of_transactions + batch_size <= transaction_limit_) { + current_number_of_transactions += batch_size; + } else { + break; + } + + ++end_iterator; + } + + if (not batches.empty()) { + network_client_->onBatches(transport::OdOsNotification::CollectionType{ + batches.begin(), end_iterator}); + } + }); } std::shared_ptr diff --git a/irohad/ordering/impl/on_demand_ordering_service_impl.cpp b/irohad/ordering/impl/on_demand_ordering_service_impl.cpp index 5a59bde3f03..235867f1832 100644 --- a/irohad/ordering/impl/on_demand_ordering_service_impl.cpp +++ b/irohad/ordering/impl/on_demand_ordering_service_impl.cpp @@ -71,7 +71,7 @@ void OnDemandOrderingServiceImpl::removeFromBatchesCache( batches_cache_.remove(hashes); } -bool OnDemandOrderingServiceImpl::isEmptyBatchesCache() const { +bool OnDemandOrderingServiceImpl::isEmptyBatchesCache() { return batches_cache_.isEmpty(); } @@ -80,7 +80,7 @@ bool OnDemandOrderingServiceImpl::hasEnoughBatchesInCache() const { } void OnDemandOrderingServiceImpl::forCachedBatches( - std::function const &f) const { + std::function const &f) { batches_cache_.forCachedBatches(f); } @@ -144,7 +144,11 @@ OnDemandOrderingServiceImpl::packNextProposals(const consensus::Round &round) { auto now = iroha::time::now(); std::vector> txs; if (!isEmptyBatchesCache()) - batches_cache_.getTransactions(transaction_limit_, txs); + batches_cache_.getTransactions( + transaction_limit_, txs, [&](auto const &batch) { + assert(batch); + return batchAlreadyProcessed(*batch); + }); log_->debug("Packed proposal contains: {} transactions.", txs.size()); return tryCreateProposal(round, txs, now); diff --git a/irohad/ordering/impl/on_demand_ordering_service_impl.hpp b/irohad/ordering/impl/on_demand_ordering_service_impl.hpp index 983d8080108..a16bd72106e 100644 --- a/irohad/ordering/impl/on_demand_ordering_service_impl.hpp +++ b/irohad/ordering/impl/on_demand_ordering_service_impl.hpp @@ -108,12 +108,12 @@ namespace iroha { void removeFromBatchesCache( const OnDemandOrderingService::HashesSetType &hashes); - bool isEmptyBatchesCache() const override; + bool isEmptyBatchesCache() override; bool hasEnoughBatchesInCache() const override; void forCachedBatches( - std::function const &f) const override; + std::function const &f) override; bool hasProposal(consensus::Round round) const override; diff --git a/irohad/ordering/on_demand_ordering_service.hpp b/irohad/ordering/on_demand_ordering_service.hpp index 33727ab90c9..5bb7875d948 100644 --- a/irohad/ordering/on_demand_ordering_service.hpp +++ b/irohad/ordering/on_demand_ordering_service.hpp @@ -95,9 +95,9 @@ namespace iroha { * @param f - callback function */ virtual void forCachedBatches( - std::function const &f) const = 0; + std::function const &f) = 0; - virtual bool isEmptyBatchesCache() const = 0; + virtual bool isEmptyBatchesCache() = 0; virtual bool hasEnoughBatchesInCache() const = 0; diff --git a/libs/common/radix_tree.hpp b/libs/common/radix_tree.hpp index 407cd23cf8e..ebb45226d7d 100644 --- a/libs/common/radix_tree.hpp +++ b/libs/common/radix_tree.hpp @@ -75,8 +75,11 @@ namespace iroha { "Context must be with 0 offset."); using Alloc = typename std::allocator_traits::template rebind_alloc; + using AllocStr = + typename std::allocator_traits::template rebind_alloc; mutable NodeContext root_; + std::basic_string, AllocStr> key_name_; Alloc alloc_; template @@ -104,6 +107,16 @@ namespace iroha { return what; } + void createNodeKey(NodeContext const *const from) { + key_name_.clear(); + auto parent = from; + while (parent != &root_) { + key_name_.insert( + key_name_.begin(), parent->key, parent->key + parent->key_sz); + parent = parent->parent; + } + } + NodeContext *&getFromKey(NodeContext *const parent, CharT const *key) const { return parent->children[AlphabetT::position(key[0ul])]; @@ -206,6 +219,24 @@ namespace iroha { return nullptr; } + NodeContext *getChildAfter(NodeContext const *const node, + NodeContext const *const target = nullptr) { + if (!target) + return (node->children_count > 0) ? getFirstChild(node) : nullptr; + + assert(target->parent == node); + assert(target->key_sz > 0ul); + + for (auto pos = AlphabetT::position(target->key[0ul]) + 1; + pos < AlphabetT::size(); + ++pos) { + auto const child = node->children[pos]; + if (child != nullptr) + return child; + } + return nullptr; + } + bool compress(NodeContext *const parent, NodeContext *const target, NodeContext *const child) { @@ -315,7 +346,7 @@ namespace iroha { void eraseWithChildren(NodeContext *const from) { NodeContext *node = from; - NodeContext *parent = from->parent; + NodeContext *const parent = from->parent; while (node != parent) node = (node->children_count != 0ul) ? getFirstChild(node) @@ -335,7 +366,7 @@ namespace iroha { public: RadixTree() = default; - explicit RadixTree(AllocT &alloc) : alloc_(alloc) {} + explicit RadixTree(AllocT &alloc) : alloc_(alloc), key_name_(alloc_) {} ~RadixTree() { eraseWithChildren(&root_); @@ -410,6 +441,40 @@ namespace iroha { eraseWithChildren(getFromKey(context.node, context.target_begin)); } } + + template + void filterEnumerate(CharT const *key, uint32_t len, Func &&func) { + SearchContext context; + findNearest(&root_, key, len, context); + + if (context.prefix_remains_len == 0ul) { + auto const target_remains_len = context.target_end - context.target; + NodeContext *const from = (target_remains_len == 0ul) + ? context.node + : getFromKey(context.node, context.target_begin); + createNodeKey(from); + + NodeContext *child = nullptr; + NodeContext *node = from; + + do { + while ((child = getChildAfter(node, child))) { + node = child; + child = nullptr; + key_name_.append(node->key, node->key_sz); + } + if (node != &root_) { + if (Node *const n = nodeContextToNode(node); n->data) + std::forward(func)( + std::string_view(key_name_.data(), key_name_.size()), + &(*n->data)); + key_name_.resize(key_name_.size() - node->key_sz); + } + child = node; + node = node->parent; + } while (child != from); + } + } }; } // namespace iroha diff --git a/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.cpp b/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.cpp index 9547161dca6..b6ebeac983d 100644 --- a/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.cpp +++ b/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.cpp @@ -49,10 +49,11 @@ namespace integration_framework::fake_peer { void OnDemandOsNetworkNotifier::onDuplicates(const HashesSetType &hashes) {} void OnDemandOsNetworkNotifier::forCachedBatches( - std::function const &f) const {} + std::function const + &f) {} - bool OnDemandOsNetworkNotifier::isEmptyBatchesCache() const { + bool OnDemandOsNetworkNotifier::isEmptyBatchesCache() { return true; } diff --git a/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.hpp b/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.hpp index c3abf53bd24..b2d5234cea0 100644 --- a/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.hpp +++ b/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.hpp @@ -33,10 +33,11 @@ namespace integration_framework::fake_peer { void onDuplicates(const HashesSetType &hashes) override; void forCachedBatches( - std::function const &f) const override; + std::function const + &f) override; - bool isEmptyBatchesCache() const override; + bool isEmptyBatchesCache() override; bool hasEnoughBatchesInCache() const override; diff --git a/test/module/irohad/ametsuchi/rocksdb_common_test.cpp b/test/module/irohad/ametsuchi/rocksdb_common_test.cpp index 3a87ee3751c..9518270dadd 100644 --- a/test/module/irohad/ametsuchi/rocksdb_common_test.cpp +++ b/test/module/irohad/ametsuchi/rocksdb_common_test.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include "ametsuchi/impl/database_cache/cache.hpp" #include "ametsuchi/impl/rocksdb_common.hpp" @@ -119,9 +120,90 @@ TEST_F(RocksDBTest, DatabaseCacheTest) { ASSERT_EQ(counter, 2ull); } +TEST_F(RocksDBTest, RadixTreeFilterEnum2) { + iroha::RadixTree rt; + std::set expect; + auto insert = [&](std::string_view data, bool do_expected_insert) { + rt.insert(data.data(), data.size(), data.data()); + if (do_expected_insert) + expect.insert(std::string{data}); + }; + + insert("1", true); + insert("12578", true); + insert("125789", true); + insert("1257890000", true); + insert("123", true); + insert("124", true); + + auto filter = [&](std::string_view key, QQQ *data) { + ASSERT_NE(data, nullptr); + ASSERT_FALSE(data->s.empty()); + ASSERT_TRUE(key == data->s); + + auto it = expect.find(data->s); + ASSERT_NE(it, expect.end()); + + expect.erase(it); + }; + + rt.filterEnumerate(nullptr, 0ul, filter); + ASSERT_TRUE(expect.empty()); +} + +TEST_F(RocksDBTest, RadixTreeFilterEnum) { + iroha::RadixTree rt; + std::set expect; + auto insert = [&](std::string_view data, bool do_expected_insert) { + rt.insert(data.data(), data.size(), data.data()); + if (do_expected_insert) + expect.insert(std::string{data}); + }; + + auto filter = [&](std::string_view key, QQQ *data) { + ASSERT_NE(data, nullptr); + ASSERT_FALSE(data->s.empty()); + ASSERT_TRUE(key == data->s); + + auto it = expect.find(data->s); + ASSERT_NE(it, expect.end()); + + expect.erase(it); + }; + + insert("1", true); + rt.filterEnumerate("1", 1, filter); + ASSERT_TRUE(expect.empty()); + + insert("12", true); + insert("123", true); + insert("124", true); + rt.filterEnumerate("12", 2, filter); + ASSERT_TRUE(expect.empty()); + + insert("1256", true); + insert("1257", true); + rt.filterEnumerate("125", 3, filter); + ASSERT_TRUE(expect.empty()); + + insert("12578", true); + insert("125789", true); + insert("1257890000", true); + expect.insert("1257"); + rt.filterEnumerate("1257", 4, filter); + ASSERT_TRUE(expect.empty()); +} + TEST_F(RocksDBTest, RadixTreeTest) { iroha::RadixTree rt; + rt.insert("1234", 4, "9"); + rt.filterDelete("123", 3); + ASSERT_TRUE(rt.find("1", 1) == nullptr); + ASSERT_TRUE(rt.find("12", 2) == nullptr); + ASSERT_TRUE(rt.find("123", 3) == nullptr); + ASSERT_TRUE(rt.find("1234", 4) == nullptr); + rt.insert("123", 3, "d"); rt.filterDelete("12", 2); ASSERT_TRUE(rt.find("1", 1) == nullptr); @@ -299,6 +381,18 @@ TEST_F(RocksDBTest, SimpleDelete) { ASSERT_TRUE(status.IsNotFound()); } +TEST_F(RocksDBTest, SimpleInsert) { + RocksDbCommon common(tx_context_); + + common.valueBuffer() = "k777"; + common.put("k777"); + + common.valueBuffer().clear(); + auto status = common.get("k777"); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(common.valueBuffer() == "k777"); +} + TEST_F(RocksDBTest, SimpleSeek) { RocksDbCommon common(tx_context_); auto it = common.seek("key"); @@ -333,7 +427,45 @@ TEST_F(RocksDBTest, SimpleEnumerateKeys) { TEST_F(RocksDBTest, FilterDelete) { { RocksDbCommon common(tx_context_); - ASSERT_TRUE(common.filterDelete("keY").ok()); + insertDb("ab", "ab"); + insertDb("k", "121"); + ASSERT_TRUE(common.filterDelete(2ull, "keY").second.ok()); + ASSERT_TRUE(common.commit().ok()); + } + { + RocksDbCommon common(tx_context_); + ASSERT_TRUE(common.get(key1_).IsNotFound()); + ASSERT_TRUE(common.get(key2_).IsNotFound()); + } + { + ASSERT_TRUE(readDb(key3_) == value3_); + ASSERT_TRUE(readDb(key4_) == value4_); + ASSERT_TRUE(readDb(key5_) == value5_); + } +} + +TEST_F(RocksDBTest, FilterDelete2) { + { + RocksDbCommon common(tx_context_); + ASSERT_TRUE(common.filterDelete(1ull, "keY").second.ok()); + ASSERT_TRUE(common.commit().ok()); + } + { + RocksDbCommon common(tx_context_); + ASSERT_TRUE(common.get(key1_).IsNotFound()); + } + { + ASSERT_TRUE(readDb(key2_) == value2_); + ASSERT_TRUE(readDb(key3_) == value3_); + ASSERT_TRUE(readDb(key4_) == value4_); + ASSERT_TRUE(readDb(key5_) == value5_); + } +} + +TEST_F(RocksDBTest, FilterDelete3) { + { + RocksDbCommon common(tx_context_); + ASSERT_TRUE(common.filterDelete(1000ull, "keY").second.ok()); ASSERT_TRUE(common.commit().ok()); } { @@ -452,7 +584,7 @@ TEST_F(RocksDBTest, Quorum) { TEST_F(RocksDBTest, SortingOrder) { RocksDbCommon common(tx_context_); - common.filterDelete(""); + common.filterDelete(1ull, ""); common.valueBuffer().clear(); ASSERT_TRUE(common.put("5").ok()); @@ -486,7 +618,7 @@ TEST_F(RocksDBTest, SortingOrder) { TEST_F(RocksDBTest, LowerBoundSearch) { RocksDbCommon common(tx_context_); - common.filterDelete(""); + common.filterDelete(1ull, ""); char const *target = "wta1234569#1#2"; char const *target2 = "wta1234367#1#1"; diff --git a/test/module/irohad/ordering/on_demand_connection_manager_test.cpp b/test/module/irohad/ordering/on_demand_connection_manager_test.cpp index aa525834172..6080408a757 100644 --- a/test/module/irohad/ordering/on_demand_connection_manager_test.cpp +++ b/test/module/irohad/ordering/on_demand_connection_manager_test.cpp @@ -7,6 +7,8 @@ #include #include + +#include "datetime/time.hpp" #include "framework/test_logger.hpp" #include "interfaces/iroha_internal/proposal.hpp" #include "module/irohad/ordering/ordering_mocks.hpp" diff --git a/test/module/irohad/ordering/on_demand_os_test.cpp b/test/module/irohad/ordering/on_demand_os_test.cpp index ca23c74038b..d65523ac3be 100644 --- a/test/module/irohad/ordering/on_demand_os_test.cpp +++ b/test/module/irohad/ordering/on_demand_os_test.cpp @@ -262,7 +262,7 @@ TEST_F(OnDemandOsTest, PassMissingTransaction) { auto &batch = *batches.at(0); EXPECT_CALL(*mock_cache, check(batchRef(batch))) - .WillOnce(Return(std::vector{ + .WillRepeatedly(Return(std::vector{ iroha::ametsuchi::tx_cache_status_responses::Missing()})); os->onBatches(batches); @@ -288,13 +288,13 @@ TEST_F(OnDemandOsTest, SeveralTransactionsOneCommited) { auto &batch3 = *batches.at(2); EXPECT_CALL(*mock_cache, check(batchRef(batch1))) - .WillOnce(Return(std::vector{ + .WillRepeatedly(Return(std::vector{ iroha::ametsuchi::tx_cache_status_responses::Missing()})); EXPECT_CALL(*mock_cache, check(batchRef(batch2))) - .WillOnce(Return(std::vector{ + .WillRepeatedly(Return(std::vector{ iroha::ametsuchi::tx_cache_status_responses::Committed()})); EXPECT_CALL(*mock_cache, check(batchRef(batch3))) - .WillOnce(Return(std::vector{ + .WillRepeatedly(Return(std::vector{ iroha::ametsuchi::tx_cache_status_responses::Missing()})); os->onBatches(batches); diff --git a/test/module/irohad/ordering/ordering_mocks.hpp b/test/module/irohad/ordering/ordering_mocks.hpp index 250aacab52d..2af8aeb117f 100644 --- a/test/module/irohad/ordering/ordering_mocks.hpp +++ b/test/module/irohad/ordering/ordering_mocks.hpp @@ -35,11 +35,10 @@ namespace iroha::ordering { MOCK_METHOD(void, onCollaborationOutcome, (consensus::Round), (override)); MOCK_METHOD(void, onTxsCommitted, (const HashesSetType &), (override)); MOCK_METHOD(void, onDuplicates, (const HashesSetType &), (override)); - MOCK_CONST_METHOD1( - forCachedBatches, - void(std::function< - void(const OnDemandOrderingService::BatchesSetType &)> const &)); - MOCK_METHOD(bool, isEmptyBatchesCache, (), (const, override)); + MOCK_METHOD1(forCachedBatches, + void(std::function const &)); + MOCK_METHOD(bool, isEmptyBatchesCache, (), (override)); MOCK_METHOD(bool, hasEnoughBatchesInCache, (), (const, override)); MOCK_METHOD(bool, hasProposal, (consensus::Round), (const, override)); MOCK_METHOD(void, processReceivedProposal, (CollectionType), (override)); diff --git a/test/module/shared_model/interface_mocks.hpp b/test/module/shared_model/interface_mocks.hpp index 16aa25c03ed..a718c9cf0f4 100644 --- a/test/module/shared_model/interface_mocks.hpp +++ b/test/module/shared_model/interface_mocks.hpp @@ -7,6 +7,8 @@ #define IROHA_SHARED_MODEL_INTERFACE_MOCKS_HPP #include + +#include "datetime/time.hpp" #include "interfaces/commands/command.hpp" #include "interfaces/common_objects/common_objects_factory.hpp" #include "interfaces/common_objects/peer.hpp" @@ -86,7 +88,9 @@ inline auto createMockTransactionWithHash( auto res = std::make_shared>(); + auto now = iroha::time::now(); ON_CALL(*res, hash()).WillByDefault(ReturnRefOfCopy(hash)); + ON_CALL(*res, createdTime()).WillByDefault(testing::Return(now)); return res; } From ee98f0a6715ebea0214917ecb63c9a3eeaa9ef82 Mon Sep 17 00:00:00 2001 From: kuvaldini <47349143+kuvaldini@users.noreply.github.com> Date: Mon, 17 Jan 2022 22:47:50 +0200 Subject: [PATCH 08/14] gha docker tag latest (#1609) * GHA docker.meta: flavor: suffix=....onlatest=true * GHA no dockertag for release * GHA clean up Signed-off-by: kuvaldini Signed-off-by: kuvaldini <47349143+kuvaldini@users.noreply.github.com> --- .github/build-iroha1.src.yml | 112 ++++------------ .github/chatops-gen-matrix.sh | 24 +--- .github/workflows/build-iroha1.yml | 200 +++++++---------------------- 3 files changed, 72 insertions(+), 264 deletions(-) diff --git a/.github/build-iroha1.src.yml b/.github/build-iroha1.src.yml index 61d7aa7309c..8d824764f77 100644 --- a/.github/build-iroha1.src.yml +++ b/.github/build-iroha1.src.yml @@ -70,7 +70,6 @@ jobs: ## anyway please read .github/README.md check_workflow_yaml_coressponds_to_src_yaml: runs-on: ubuntu-20.04 #ubuntu-latest - #container: ubuntu:latest ## This is required as barrier between AWS-hosted runners and GitHub-hosted runners - they have different set of software, so run in container name: Check if github workflows were properly made from sources steps: - &step_detect_commented_pr @@ -232,14 +231,6 @@ jobs: matrix_dockerimage_release: ${{steps.matrixes.outputs.matrix_dockerimage_release}} matrix_dockerimage_debug: ${{steps.matrixes.outputs.matrix_dockerimage_debug}} - buildspec_ubuntu: ${{steps.matrixes.outputs.buildspec_ubuntu}} - buildspec_ubuntu_debug: ${{steps.matrixes.outputs.buildspec_ubuntu_release}} - buildspec_ubuntu_release: ${{steps.matrixes.outputs.buildspec_ubuntu_debug}} - buildspec_macos: ${{steps.matrixes.outputs.buildspec_macos}} - buildspec_windows: ${{steps.matrixes.outputs.buildspec_windows}} - buildspec_dockerimage_release: ${{steps.matrixes.outputs.buildspec_dockerimage_release}} - buildspec_dockerimage_debug: ${{steps.matrixes.outputs.buildspec_dockerimage_debug}} - ## Build docker image named 'hyperledger/iroha-builder' with all stuff to compile iroha and its dependancies ## The result docker image is pushed with tags :pr-NUMBER, :commit-HASH, :branch-name, :tag-name, ## and conditional tags :edge for development branch, and :latest for git-tags. @@ -318,18 +309,11 @@ jobs: type=ref,event=pr type=ref,event=tag type=schedule - type=edge,branch=develop - # type=semver,pattern={{version}} - # type=semver,pattern={{major}}.{{minor}} - # type=sha,prefix=commit-,format=short - # type=sha,prefix=commit-,format=long ## Docker image will be pushed with tags: ## - hash of file Dockerfile.builder ## - branchname, when branch is pushed ## - pr-NUMBER, when pushed to PR ## - git tag when tag is pushed - ## - semver like 1.2.3 and 1.2 when tag vX.X.X is pushed - ## - tag 'edge' when branch support/1.2.x is pushed ## - schedule, see the docs - &step_docker_meta_ghcr <<: *step_docker_meta @@ -380,29 +364,16 @@ jobs: name: Check if dockertaghash exists in remote registry id: dockertag_already run: | - ## FIXME page size is 100 and cannot be more, this needs to be extended in loop asking tags till the end - exists=$( curl -fL https://hub.docker.com/v2/repositories/$DOCKERHUB_ORG/iroha-builder/tags | - jq 'any( .results[]|.name == "${{env.dockertag}}" ; .)' ) - echo "::set-output name=exists::$exists" - - if test $exists = true ;then - tag=$dockertag - else - tag=edge - fi - container="$DOCKERHUB_ORG/iroha-builder:$tag" - echo "::set-output name=container::$container" - echo "::set-output name=container_tag::$tag" + echo "::set-output name=container::$DOCKERHUB_ORG/iroha-builder:$dockertag" - - if: ${{ steps.dockertag_already.outputs.container_tag != env.dockertag || - steps.dockertag_already.outputs.container == '' }} - name: Possible ERROR, edited but not pushed + name: Possible ERROR, Dockerfile edited but image cannot be pushed + if: ${{ steps.docker_login.outcome != 'success' || steps.build_and_push.outcome != 'success' }} env: container: ${{steps.dockertag_already.outputs.container}} dockertag: ${{env.dockertag}} run: | cat <buildspec -echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/' >buildspec_ubuntu -echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/ && /release/' >buildspec_ubuntu_release -echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/ && /debug/' >buildspec_ubuntu_debug -echo "$MATRIX" | awk -v IGNORECASE=1 '/macos/' >buildspec_macos -echo "$MATRIX" | awk -v IGNORECASE=1 '/windows/' >buildspec_windows -## Build Docker images only with GCC-9 (mainstream compiler) -echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/ && /release/ && /gcc-9/' >buildspec_dockerimage_release -echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/ && /debug/ && /gcc-9/' >buildspec_dockerimage_debug - echo "$MATRIX" | json_include >matrix echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/' | json_include >matrix_ubuntu echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/ && /release/' | json_include >matrix_ubuntu_release diff --git a/.github/workflows/build-iroha1.yml b/.github/workflows/build-iroha1.yml index 79136ea1068..43b5eda8f73 100644 --- a/.github/workflows/build-iroha1.yml +++ b/.github/workflows/build-iroha1.yml @@ -71,7 +71,6 @@ jobs: ## anyway please read .github/README.md check_workflow_yaml_coressponds_to_src_yaml: runs-on: ubuntu-20.04 #ubuntu-latest - #container: ubuntu:latest ## This is required as barrier between AWS-hosted runners and GitHub-hosted runners - they have different set of software, so run in container name: Check if github workflows were properly made from sources steps: - name: REF and SHA of commented PR to ENV @@ -237,13 +236,6 @@ jobs: matrix_windows: ${{steps.matrixes.outputs.matrix_windows}} matrix_dockerimage_release: ${{steps.matrixes.outputs.matrix_dockerimage_release}} matrix_dockerimage_debug: ${{steps.matrixes.outputs.matrix_dockerimage_debug}} - buildspec_ubuntu: ${{steps.matrixes.outputs.buildspec_ubuntu}} - buildspec_ubuntu_debug: ${{steps.matrixes.outputs.buildspec_ubuntu_release}} - buildspec_ubuntu_release: ${{steps.matrixes.outputs.buildspec_ubuntu_debug}} - buildspec_macos: ${{steps.matrixes.outputs.buildspec_macos}} - buildspec_windows: ${{steps.matrixes.outputs.buildspec_windows}} - buildspec_dockerimage_release: ${{steps.matrixes.outputs.buildspec_dockerimage_release}} - buildspec_dockerimage_debug: ${{steps.matrixes.outputs.buildspec_dockerimage_debug}} ## Build docker image named 'hyperledger/iroha-builder' with all stuff to compile iroha and its dependancies ## The result docker image is pushed with tags :pr-NUMBER, :commit-HASH, :branch-name, :tag-name, ## and conditional tags :edge for development branch, and :latest for git-tags. @@ -338,19 +330,12 @@ jobs: type=ref,event=pr type=ref,event=tag type=schedule - type=edge,branch=develop - # type=semver,pattern={{version}} - # type=semver,pattern={{major}}.{{minor}} - # type=sha,prefix=commit-,format=short - # type=sha,prefix=commit-,format=long - ## Docker image will be pushed with tags: - ## - hash of file Dockerfile.builder - ## - branchname, when branch is pushed - ## - pr-NUMBER, when pushed to PR - ## - git tag when tag is pushed - ## - semver like 1.2.3 and 1.2 when tag vX.X.X is pushed - ## - tag 'edge' when branch support/1.2.x is pushed - ## - schedule, see the docs + ## Docker image will be pushed with tags: + ## - hash of file Dockerfile.builder + ## - branchname, when branch is pushed + ## - pr-NUMBER, when pushed to PR + ## - git tag when tag is pushed + ## - schedule, see the docs - uses: docker/metadata-action@v3 name: Docker meta GHCR id: meta_ghcr @@ -361,11 +346,12 @@ jobs: type=ref,event=pr type=ref,event=tag type=schedule - type=edge,branch=develop - # type=semver,pattern={{version}} - # type=semver,pattern={{major}}.{{minor}} - # type=sha,prefix=commit-,format=short - # type=sha,prefix=commit-,format=long + ## Docker image will be pushed with tags: + ## - hash of file Dockerfile.builder + ## - branchname, when branch is pushed + ## - pr-NUMBER, when pushed to PR + ## - git tag when tag is pushed + ## - schedule, see the docs images: ghcr.io/${{ github.repository }}-builder - name: Set up Docker Buildx @@ -406,27 +392,15 @@ jobs: - name: Check if dockertaghash exists in remote registry id: dockertag_already run: | - ## FIXME page size is 100 and cannot be more, this needs to be extended in loop asking tags till the end - exists=$( curl -fL https://hub.docker.com/v2/repositories/$DOCKERHUB_ORG/iroha-builder/tags | - jq 'any( .results[]|.name == "${{env.dockertag}}" ; .)' ) - echo "::set-output name=exists::$exists" - - if test $exists = true ;then - tag=$dockertag - else - tag=edge - fi - container="$DOCKERHUB_ORG/iroha-builder:$tag" - echo "::set-output name=container::$container" - echo "::set-output name=container_tag::$tag" - - if: ${{ steps.dockertag_already.outputs.container_tag != env.dockertag || steps.dockertag_already.outputs.container == '' }} - name: Possible ERROR, edited but not pushed + echo "::set-output name=container::$DOCKERHUB_ORG/iroha-builder:$dockertag" + - name: Possible ERROR, Dockerfile edited but image cannot be pushed + if: ${{ steps.docker_login.outcome != 'success' || steps.build_and_push.outcome != 'success' }} env: container: ${{steps.dockertag_already.outputs.container}} dockertag: ${{env.dockertag}} run: | cat </dev/null <<'END' ${{ toJson(needs) }} END - - run: test -n "$container" - env: - container: ${{needs.Docker-iroha-builder.outputs.container}} - - if: ${{ needs.Docker-iroha-builder.outputs.container_tag != needs.Docker-iroha-builder.outputs.dockertag || needs.Docker-iroha-builder.outputs.container == '' }} - name: Possible WARNING - env: + - env: container: ${{needs.Docker-iroha-builder.outputs.container}} - dockertag: ${{needs.Docker-iroha-builder.outputs.dockertag}} - run: | - cat </dev/null <<'END' ${{ toJson(needs) }} END - - run: test -n "$container" - env: - container: ${{needs.Docker-iroha-builder.outputs.container}} - - if: ${{ needs.Docker-iroha-builder.outputs.container_tag != needs.Docker-iroha-builder.outputs.dockertag || needs.Docker-iroha-builder.outputs.container == '' }} - name: Possible WARNING - env: + - env: container: ${{needs.Docker-iroha-builder.outputs.container}} - dockertag: ${{needs.Docker-iroha-builder.outputs.dockertag}} - run: | - cat < Date: Sun, 30 Jan 2022 18:22:08 +0300 Subject: [PATCH 09/14] Fix/Iroha v1.4-rc.2 fixes (#1824) [Iroha] version fixup [RDB] optimistic db -> transactions db [RDB] cache size reduced to 500 Mb [RDB] column families Signed-off-by: Alexander Lednev <57529355+iceseer@users.noreply.github.com> Signed-off-by: iceseer --- CMakeLists.txt | 2 +- .../impl/rocksdb_command_executor.cpp | 5 +- irohad/ametsuchi/impl/rocksdb_common.hpp | 385 +++++++++++++----- .../ametsuchi/impl/rocksdb_db_transaction.hpp | 3 +- .../ametsuchi/impl/rocksdb_settings_query.cpp | 4 +- .../impl/rocksdb_specific_query_executor.cpp | 17 +- irohad/ametsuchi/impl/rocksdb_wsv_query.cpp | 30 +- irohad/iroha_wsv_diff/iroha_wsv_diff.cpp | 10 +- irohad/main/impl/pg_connection_init.cpp | 22 +- irohad/main/impl/pg_connection_init.hpp | 6 +- .../irohad/ametsuchi/rocksdb_common_test.cpp | 149 +++++-- .../ametsuchi/rocksdb_executor_test.cpp | 2 + .../irohad/ametsuchi/rocksdb_indexer_test.cpp | 4 + 13 files changed, 458 insertions(+), 181 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index dda17a72b48..c7544d27562 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -10,7 +10,7 @@ if(CCACHE_PROGRAM) endif() PROJECT(iroha - VERSION 1.2.0 + VERSION 1.4.0 LANGUAGES C CXX) SET(CMAKE_CXX_STANDARD 17) diff --git a/irohad/ametsuchi/impl/rocksdb_command_executor.cpp b/irohad/ametsuchi/impl/rocksdb_command_executor.cpp index 122df681754..5e943272490 100644 --- a/irohad/ametsuchi/impl/rocksdb_command_executor.cpp +++ b/irohad/ametsuchi/impl/rocksdb_command_executor.cpp @@ -760,6 +760,7 @@ RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( ++counter; return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathSignatories, domain_id, account_name); @@ -927,6 +928,7 @@ RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( ++counter; return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathSignatories, domain_id, account_name); @@ -1062,7 +1064,8 @@ RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( RDB_ERROR_CHECK(forAsset( common, asset_name, domain_id)); - auto status = common.get(fmtstrings::kSetting, + auto status = common.get(RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kSetting, iroha::ametsuchi::kMaxDescriptionSizeKey); RDB_ERROR_CHECK(canExist( status, [&] { return fmt::format("Max description size key"); })); diff --git a/irohad/ametsuchi/impl/rocksdb_common.hpp b/irohad/ametsuchi/impl/rocksdb_common.hpp index bea065a0640..c7bfa050eb7 100644 --- a/irohad/ametsuchi/impl/rocksdb_common.hpp +++ b/irohad/ametsuchi/impl/rocksdb_common.hpp @@ -7,6 +7,7 @@ #define IROHA_ROCKSDB_COMMON_HPP #include +#include #include #include #include @@ -17,8 +18,8 @@ #include #include #include -#include #include +#include #include "ametsuchi/impl/database_cache/cache.hpp" #include "ametsuchi/impl/executor_common.hpp" #include "common/disable_warnings.h" @@ -434,6 +435,11 @@ namespace iroha::ametsuchi { assert(db_port); } + ~RocksDBContext() { + transaction.reset(); + db_port.reset(); + } + private: friend class RocksDbCommon; friend struct RocksDBPort; @@ -491,7 +497,7 @@ namespace iroha::ametsuchi { /** * Port to provide access to RocksDB instance. */ - struct RocksDBPort { + struct RocksDBPort final { RocksDBPort(RocksDBPort const &) = delete; RocksDBPort &operator=(RocksDBPort const &) = delete; RocksDBPort() = default; @@ -501,14 +507,54 @@ namespace iroha::ametsuchi { return reinitDB(); } + enum ColumnFamilyType { + kDefault, + kWsv, + kStore, + ////// + kTotal + }; + + ~RocksDBPort() { + closeDb(); + } + private: + struct { + std::string name; + rocksdb::ColumnFamilyHandle *handle; + } cf_handles[ColumnFamilyType::kTotal] = { + {rocksdb::kDefaultColumnFamilyName, nullptr}, + {"wsv", nullptr}, + {"store", nullptr}}; + + void closeDb() { + for (auto &cf : cf_handles) + if (nullptr != cf.handle) { + transaction_db_->DestroyColumnFamilyHandle(cf.handle); + cf.handle = nullptr; + } + transaction_db_.reset(); + } + + void dropColumnFamily(ColumnFamilyType type) { + assert(type < ColumnFamilyType::kTotal); + auto &cf = cf_handles[type]; + + if (cf.handle) { + assert(transaction_db_); + transaction_db_->DropColumnFamily(cf.handle); + transaction_db_->DestroyColumnFamilyHandle(cf.handle); + transaction_db_->CreateColumnFamily({}, cf.name, &cf.handle); + } + } + expected::Result reinitDB() { assert(db_name_); - transaction_db_.reset(); + closeDb(); rocksdb::BlockBasedTableOptions table_options; - table_options.block_cache = - rocksdb::NewLRUCache(1 * 1024 * 1024 * 1024LL); + table_options.block_cache = rocksdb::NewLRUCache(512 * 1024 * 1024LL); table_options.block_size = 32 * 1024; // table_options.pin_l0_filter_and_index_blocks_in_cache = true; table_options.cache_index_and_filter_blocks = true; @@ -517,14 +563,32 @@ namespace iroha::ametsuchi { rocksdb::Options options; options.create_if_missing = true; + options.create_missing_column_families = true; options.max_open_files = 100; options.optimize_filters_for_hits = true; options.table_factory.reset( rocksdb::NewBlockBasedTableFactory(table_options)); - rocksdb::OptimisticTransactionDB *transaction_db; - auto status = rocksdb::OptimisticTransactionDB::Open( - options, *db_name_, &transaction_db); + /// print all column families + std::vector colfam; + rocksdb::DB::ListColumnFamilies(options, *db_name_, &colfam); + std::cout << "RocksDB detected column families:" << std::endl; + for (auto const &cf : colfam) std::cout << cf << std::endl; + + std::vector column_families; + for (auto &cf : cf_handles) + column_families.emplace_back( + rocksdb::ColumnFamilyDescriptor{cf.name, {}}); + + std::vector handles; + rocksdb::TransactionDB *transaction_db; + auto status = + rocksdb::TransactionDB::Open(options, + rocksdb::TransactionDBOptions(), + *db_name_, + column_families, + &handles, + &transaction_db); if (!status.ok()) return makeError(DbErrorCode::kInitializeFailed, @@ -532,16 +596,15 @@ namespace iroha::ametsuchi { *db_name_, status.ToString()); + assert(ColumnFamilyType::kTotal == handles.size()); + for (uint32_t ix = 0; ix < handles.size(); ++ix) { + assert(handles[ix]->GetName() == cf_handles[ix].name); + cf_handles[ix].handle = handles[ix]; + } transaction_db_.reset(transaction_db); return {}; } - void flushDB() { - assert(transaction_db_); - assert(transaction_db_->Flush(rocksdb::FlushOptions()).ok()); - assert(transaction_db_->FlushWAL(true).ok()); - } - template void printStatus(LoggerT &log) { if (transaction_db_) { @@ -584,27 +647,18 @@ namespace iroha::ametsuchi { return std::nullopt; } - std::optional getPropStr(const rocksdb::Slice &property) { - if (transaction_db_) { - std::string value; - transaction_db_->GetProperty(property, &value); - return value; - } - return std::nullopt; - } - private: - std::unique_ptr transaction_db_; + std::unique_ptr transaction_db_; std::optional db_name_; friend class RocksDbCommon; void prepareTransaction(RocksDBContext &tx_context) { assert(transaction_db_); if (tx_context.transaction) { - auto result = transaction_db_->BeginTransaction( - rocksdb::WriteOptions(), - rocksdb::OptimisticTransactionOptions(), - tx_context.transaction.get()); + [[maybe_unused]] auto result = + transaction_db_->BeginTransaction(rocksdb::WriteOptions(), + rocksdb::TransactionOptions(), + tx_context.transaction.get()); assert(result == tx_context.transaction.get()); } else { tx_context.transaction.reset( @@ -665,6 +719,11 @@ namespace iroha::ametsuchi { return tx_context_; } + auto &port() { + assert(tx_context_); + return tx_context_->db_port; + } + private: auto &transaction() { if (!tx_context_->transaction) @@ -730,50 +789,53 @@ namespace iroha::ametsuchi { c->commit(); } + auto getHandle(RocksDBPort::ColumnFamilyType type) { + assert(type < RocksDBPort::ColumnFamilyType::kTotal); + assert(port()->cf_handles[type].handle != nullptr); + + return port()->cf_handles[type].handle; + } + public: template void printStatus(LoggerT &log) { - tx_context_->db_port->printStatus(log); + port()->printStatus(log); } auto propGetBlockCacheUsage() { - return tx_context_->db_port->getPropUInt64("rocksdb.block-cache-usage"); + return port()->getPropUInt64("rocksdb.block-cache-usage"); } auto propGetCurSzAllMemTables() { - return tx_context_->db_port->getPropUInt64( - "rocksdb.cur-size-all-mem-tables"); + return port()->getPropUInt64("rocksdb.cur-size-all-mem-tables"); } auto propGetNumSnapshots() { - return tx_context_->db_port->getPropUInt64("rocksdb.num-snapshots"); + return port()->getPropUInt64("rocksdb.num-snapshots"); } auto propGetTotalSSTFilesSize() { - return tx_context_->db_port->getPropUInt64( - "rocksdb.total-sst-files-size"); + return port()->getPropUInt64("rocksdb.total-sst-files-size"); } auto propGetBlockCacheCapacity() { - return tx_context_->db_port->getPropUInt64( - "rocksdb.block-cache-capacity"); + return port()->getPropUInt64("rocksdb.block-cache-capacity"); } auto reinit() { - return tx_context_->db_port->reinitDB(); + return port()->reinitDB(); } /// Makes commit to DB auto commit() { rocksdb::Status status; if (isTransaction()) { - while ((status = transaction()->Commit()).IsTryAgain()) - ; - context()->db_port->prepareTransaction(*tx_context_); + if ((status = transaction()->Commit()); !status.ok()) + dropCache(); + else + commitCache(); } - commitCache(); - - assert(status.ok()); + transaction().reset(); return status; } @@ -842,7 +904,9 @@ namespace iroha::ametsuchi { /// Read data from database to @see valueBuffer template - auto get(S const &fmtstring, Args &&... args) { + auto get(RocksDBPort::ColumnFamilyType cf_type, + S const &fmtstring, + Args &&... args) { keyBuffer().clear(); fmt::format_to(keyBuffer(), fmtstring, std::forward(args)...); @@ -860,7 +924,8 @@ namespace iroha::ametsuchi { rocksdb::ReadOptions ro; ro.fill_cache = false; - auto status = transaction()->Get(ro, slice, &valueBuffer()); + auto status = + transaction()->Get(ro, getHandle(cf_type), slice, &valueBuffer()); if (status.ok()) storeCommit(slice.ToStringView()); @@ -869,12 +934,15 @@ namespace iroha::ametsuchi { /// Put data from @see valueBuffer to database template - auto put(S const &fmtstring, Args &&... args) { + auto put(RocksDBPort::ColumnFamilyType cf_type, + S const &fmtstring, + Args &&... args) { keyBuffer().clear(); fmt::format_to(keyBuffer(), fmtstring, std::forward(args)...); rocksdb::Slice const slice(keyBuffer().data(), keyBuffer().size()); - auto status = transaction()->Put(slice, valueBuffer()); + auto status = + transaction()->Put(getHandle(cf_type), slice, valueBuffer()); if (status.ok()) storeInCache(slice.ToStringView()); @@ -884,7 +952,9 @@ namespace iroha::ametsuchi { /// Delete database entry by the key template - auto del(S const &fmtstring, Args &&... args) { + auto del(RocksDBPort::ColumnFamilyType cf_type, + S const &fmtstring, + Args &&... args) { keyBuffer().clear(); fmt::format_to(keyBuffer(), fmtstring, std::forward(args)...); @@ -892,19 +962,22 @@ namespace iroha::ametsuchi { if (auto c = cache(); c && c->isCacheable(slice.ToStringView())) c->erase(slice.ToStringView()); - return transaction()->Delete(slice); + return transaction()->Delete(getHandle(cf_type), slice); } /// Searches for the first key that matches a prefix template - auto seek(S const &fmtstring, Args &&... args) { + auto seek(RocksDBPort::ColumnFamilyType cf_type, + S const &fmtstring, + Args &&... args) { keyBuffer().clear(); fmt::format_to(keyBuffer(), fmtstring, std::forward(args)...); rocksdb::ReadOptions ro; ro.fill_cache = false; - std::unique_ptr it(transaction()->GetIterator(ro)); + std::unique_ptr it( + transaction()->GetIterator(ro, getHandle(cf_type))); it->Seek(rocksdb::Slice(keyBuffer().data(), keyBuffer().size())); return it; @@ -925,17 +998,21 @@ namespace iroha::ametsuchi { /// Iterate over all the keys that matches a prefix and call lambda /// with key-value. To stop enumeration callback F must return false. template - auto enumerate(F &&func, S const &fmtstring, Args &&... args) { - auto it = seek(fmtstring, std::forward(args)...); + auto enumerate(F &&func, + RocksDBPort::ColumnFamilyType cf_type, + S const &fmtstring, + Args &&... args) { + auto it = seek(cf_type, fmtstring, std::forward(args)...); return enumerate(it, std::forward(func)); } /// Removes range of items by key-filter template auto filterDelete(uint64_t delete_count, + RocksDBPort::ColumnFamilyType cf_type, S const &fmtstring, Args &&... args) -> std::pair { - auto it = seek(fmtstring, std::forward(args)...); + auto it = seek(cf_type, fmtstring, std::forward(args)...); if (!it->status().ok()) return std::make_pair(false, it->status()); @@ -946,7 +1023,8 @@ namespace iroha::ametsuchi { bool was_deleted = false; for (; delete_count-- && it->Valid() && it->key().starts_with(key); it->Next()) { - if (auto status = transaction()->Delete(it->key()); !status.ok()) + if (auto status = transaction()->Delete(getHandle(cf_type), it->key()); + !status.ok()) return std::pair(was_deleted, status); else was_deleted = true; @@ -955,6 +1033,10 @@ namespace iroha::ametsuchi { return std::pair(was_deleted, it->status()); } + void dropTable(RocksDBPort::ColumnFamilyType cf_type) { + port()->dropColumnFamily(cf_type); + } + private: std::shared_ptr tx_context_; std::lock_guard context_guard_; @@ -986,6 +1068,7 @@ namespace iroha::ametsuchi { template inline auto enumerateKeys(RocksDbCommon &rdb, F &&func, + RocksDBPort::ColumnFamilyType cf_type, S const &strformat, Args &&... args) { static_assert( @@ -1002,6 +1085,7 @@ namespace iroha::ametsuchi { - fmtstrings::kDelimiterCountForAField * fmtstrings::kDelimiterSize)); }, + cf_type, strformat, std::forward(args)...); } @@ -1025,9 +1109,11 @@ namespace iroha::ametsuchi { template inline auto enumerateKeysAndValues(RocksDbCommon &rdb, F &&func, + RocksDBPort::ColumnFamilyType cf_type, S const &strformat, Args &&... args) { return rdb.enumerate(makeKVLambda(std::forward(func)), + cf_type, strformat, std::forward(args)...); } @@ -1114,14 +1200,15 @@ namespace iroha::ametsuchi { inline expected::Result executeOperation( RocksDbCommon &common, OperationDescribtionF &&op_formatter, + RocksDBPort::ColumnFamilyType cf_type, Args &&... args) { rocksdb::Status status; if constexpr (kOp == kDbOperation::kGet || kOp == kDbOperation::kCheck) - status = common.get(std::forward(args)...); + status = common.get(cf_type, std::forward(args)...); else if constexpr (kOp == kDbOperation::kPut) - status = common.put(std::forward(args)...); + status = common.put(cf_type, std::forward(args)...); else if constexpr (kOp == kDbOperation::kDel) - status = common.del(std::forward(args)...); + status = common.del(cf_type, std::forward(args)...); static_assert(kOp == kDbOperation::kGet || kOp == kDbOperation::kCheck || kOp == kDbOperation::kPut || kOp == kDbOperation::kDel, @@ -1267,10 +1354,13 @@ namespace iroha::ametsuchi { template inline expected::Result, DbError> dbCall( - RocksDbCommon &common, Args &&... args) { + RocksDbCommon &common, + RocksDBPort::ColumnFamilyType cf_type, + Args &&... args) { auto status = executeOperation( common, [&] { return fmt::format(std::forward(args)...); }, + cf_type, std::forward(args)...); RDB_ERROR_CHECK(status); return loadValue(common, status); @@ -1291,8 +1381,11 @@ namespace iroha::ametsuchi { forAccountDetailsCount(RocksDbCommon &common, std::string_view account, std::string_view domain) { - return dbCall( - common, fmtstrings::kAccountDetailsCount, domain, account); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAccountDetailsCount, + domain, + account); } /** @@ -1306,7 +1399,10 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forStoreVersion(RocksDbCommon &common) { - return dbCall(common, fmtstrings::kStoreVersion); + return dbCall( + common, + RocksDBPort::ColumnFamilyType::kStore, + fmtstrings::kStoreVersion); } /** @@ -1320,7 +1416,8 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forWSVVersion( RocksDbCommon &common) { - return dbCall(common, fmtstrings::kWsvVersion); + return dbCall( + common, RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kWsvVersion); } /** @@ -1336,7 +1433,10 @@ namespace iroha::ametsuchi { inline expected::Result, DbError> forBlock( RocksDbCommon &common, uint64_t height) { return dbCall( - common, fmtstrings::kBlockDataInStore, height); + common, + RocksDBPort::ColumnFamilyType::kStore, + fmtstrings::kBlockDataInStore, + height); } /** @@ -1350,7 +1450,9 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forBlocksTotalCount( RocksDbCommon &common) { - return dbCall(common, fmtstrings::kBlocksTotalCount); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kStore, + fmtstrings::kBlocksTotalCount); } /** @@ -1368,8 +1470,11 @@ namespace iroha::ametsuchi { RocksDbCommon &common, std::string_view account, std::string_view domain) { - return dbCall( - common, fmtstrings::kQuorum, domain, account); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kQuorum, + domain, + account); } /** @@ -1384,8 +1489,10 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forTxsTotalCount( RocksDbCommon &common, std::string_view account_id) { - return dbCall( - common, fmtstrings::kTxsTotalCount, account_id); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTxsTotalCount, + account_id); } /** @@ -1399,7 +1506,9 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forTxsTotalCount( RocksDbCommon &common) { - return dbCall(common, fmtstrings::kAllTxsTotalCount); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAllTxsTotalCount); } /** @@ -1413,7 +1522,9 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forDomainsTotalCount(RocksDbCommon &common) { - return dbCall(common, fmtstrings::kDomainsTotalCount); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kDomainsTotalCount); } /** @@ -1447,7 +1558,7 @@ namespace iroha::ametsuchi { Result, DbError> forRole(RocksDbCommon &common, std::string_view role) { return dbCall( - common, fmtstrings::kRole, role); + common, RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kRole, role); } /** @@ -1462,9 +1573,12 @@ namespace iroha::ametsuchi { inline expected::Result, DbError> forPeersCount( RocksDbCommon &common, bool is_syncing_peer) { if (is_syncing_peer) - return dbCall(common, fmtstrings::kSPeersCount); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kSPeersCount); - return dbCall(common, fmtstrings::kPeersCount); + return dbCall( + common, RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPeersCount); } /** @@ -1482,6 +1596,7 @@ namespace iroha::ametsuchi { shared_model::crypto::Hash const &tx_hash) { return dbCall( common, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kTransactionStatus, std::string_view((char const *)tx_hash.blob().data(), tx_hash.blob().size())); @@ -1506,7 +1621,13 @@ namespace iroha::ametsuchi { uint64_t height, uint64_t index) { return dbCall( - common, fmtstrings::kTransactionByPosition, account, height, index, ts); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTransactionByPosition, + account, + height, + index, + ts); } /** @@ -1527,7 +1648,13 @@ namespace iroha::ametsuchi { uint64_t height, uint64_t index) { return dbCall( - common, fmtstrings::kTransactionByTs, account, ts, height, index); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTransactionByTs, + account, + ts, + height, + index); } /** @@ -1543,7 +1670,7 @@ namespace iroha::ametsuchi { inline expected::Result, DbError> forSettings( RocksDbCommon &common, std::string_view key) { return dbCall( - common, fmtstrings::kSetting, key); + common, RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kSetting, key); } /** @@ -1563,10 +1690,16 @@ namespace iroha::ametsuchi { bool is_sync_peer) { if (is_sync_peer) return dbCall( - common, fmtstrings::kSPeerAddress, pubkey); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kSPeerAddress, + pubkey); return dbCall( - common, fmtstrings::kPeerAddress, pubkey); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kPeerAddress, + pubkey); } /** @@ -1584,10 +1717,16 @@ namespace iroha::ametsuchi { RocksDbCommon &common, std::string_view pubkey, bool is_sync_peer) { if (is_sync_peer) return dbCall( - common, fmtstrings::kSPeerTLS, pubkey); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kSPeerTLS, + pubkey); return dbCall( - common, fmtstrings::kPeerTLS, pubkey); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kPeerTLS, + pubkey); } /** @@ -1603,8 +1742,11 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forAsset( RocksDbCommon &common, std::string_view asset, std::string_view domain) { - return dbCall( - common, fmtstrings::kAsset, domain, asset); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAsset, + domain, + asset); } /** @@ -1620,7 +1762,8 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> expected::Result, DbError> forTopBlockInfo( RocksDbCommon &common) { - return dbCall(common, fmtstrings::kTopBlock); + return dbCall( + common, RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kTopBlock); } /** @@ -1640,8 +1783,12 @@ namespace iroha::ametsuchi { std::string_view account, std::string_view domain, std::string_view role) { - return dbCall( - common, fmtstrings::kAccountRole, domain, account, role); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAccountRole, + domain, + account, + role); } /** @@ -1665,7 +1812,13 @@ namespace iroha::ametsuchi { std::string_view creator_id, std::string_view key) { return dbCall( - common, fmtstrings::kAccountDetail, domain, account, creator_id, key); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAccountDetail, + domain, + account, + creator_id, + key); } /** @@ -1685,8 +1838,12 @@ namespace iroha::ametsuchi { std::string_view account, std::string_view domain, std::string_view pubkey) { - return dbCall( - common, fmtstrings::kSignatory, domain, account, pubkey); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kSignatory, + domain, + account, + pubkey); } /** @@ -1702,7 +1859,10 @@ namespace iroha::ametsuchi { inline expected::Result, DbError> forDomain( RocksDbCommon &common, std::string_view domain) { return dbCall( - common, fmtstrings::kDomain, domain); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kDomain, + domain); } /** @@ -1720,8 +1880,11 @@ namespace iroha::ametsuchi { RocksDbCommon &common, std::string_view account, std::string_view domain) { - return dbCall( - common, fmtstrings::kAccountAssetSize, domain, account); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAccountAssetSize, + domain, + account); } /** @@ -1743,7 +1906,12 @@ namespace iroha::ametsuchi { std::string_view domain, std::string_view asset) { return dbCall( - common, fmtstrings::kAccountAsset, domain, account, asset); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAccountAsset, + domain, + account, + asset); } /** @@ -1767,7 +1935,12 @@ namespace iroha::ametsuchi { std::string_view domain, std::string_view grantee_account_id) { return dbCall( - common, fmtstrings::kGranted, domain, account, grantee_account_id); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kGranted, + domain, + account, + grantee_account_id); } /** @@ -1796,6 +1969,7 @@ namespace iroha::ametsuchi { } return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathAccountRoles, domain, account); @@ -1972,6 +2146,7 @@ namespace iroha::ametsuchi { return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathAccountDetail, domain, account); @@ -1987,26 +2162,14 @@ namespace iroha::ametsuchi { return result; } - template - inline expected::Result dropBranch(RocksDbCommon &common, - S const &fmtstring) { - std::pair status; - do { - status = common.filterDelete(1'000ull, fmtstring); - if (!status.second.ok()) - return makeError( - DbErrorCode::kOperationFailed, "Clear {} failed.", fmtstring); - common.commit(); - } while (status.first); - return {}; - } - inline expected::Result dropStore(RocksDbCommon &common) { - return dropBranch(common, fmtstrings::kPathStore); + common.dropTable(RocksDBPort::ColumnFamilyType::kStore); + return {}; } inline expected::Result dropWSV(RocksDbCommon &common) { - return dropBranch(common, fmtstrings::kPathWsv); + common.dropTable(RocksDBPort::ColumnFamilyType::kWsv); + return {}; } } // namespace iroha::ametsuchi diff --git a/irohad/ametsuchi/impl/rocksdb_db_transaction.hpp b/irohad/ametsuchi/impl/rocksdb_db_transaction.hpp index 19c40ed230a..f158457f8a6 100644 --- a/irohad/ametsuchi/impl/rocksdb_db_transaction.hpp +++ b/irohad/ametsuchi/impl/rocksdb_db_transaction.hpp @@ -39,7 +39,8 @@ namespace iroha::ametsuchi { void commit() override { RocksDbCommon common(tx_context_); - common.commit(); + if (!common.commit().ok()) + throw std::runtime_error("RocksDb commit failed."); } void rollback() override { diff --git a/irohad/ametsuchi/impl/rocksdb_settings_query.cpp b/irohad/ametsuchi/impl/rocksdb_settings_query.cpp index 78480de2a95..5a77c57ca8d 100644 --- a/irohad/ametsuchi/impl/rocksdb_settings_query.cpp +++ b/irohad/ametsuchi/impl/rocksdb_settings_query.cpp @@ -20,7 +20,9 @@ namespace { const shared_model::interface::types::SettingKeyType &key, uint64_t &destination) { RocksDbCommon common(db_context); - auto status = common.get(fmtstrings::kSetting, kMaxDescriptionSizeKey); + auto status = common.get(RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kSetting, + kMaxDescriptionSizeKey); if (auto result = iroha::ametsuchi::canExist( status, [&] { return fmt::format("Max description size key"); }); diff --git a/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp b/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp index 5232eabf555..4943098d1a5 100644 --- a/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp +++ b/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp @@ -187,6 +187,7 @@ operator()( roles.emplace_back(role.ToStringView()); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathAccountRoles, domain_id, account_name); @@ -262,6 +263,7 @@ operator()( signatories.emplace_back(signatory.ToStringView()); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathSignatories, domain_id, account_name); @@ -452,7 +454,8 @@ RocksDbSpecificQueryExecutor::readTxs( if (ordering_ptr->field == shared_model::interface::Ordering::Field::kCreatedTime) { - auto it = common.template seek(fmtstrings::kTransactionByTs, + auto it = common.template seek(RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTransactionByTs, query.accountId(), tx_ts, tx_height, @@ -463,7 +466,8 @@ RocksDbSpecificQueryExecutor::readTxs( fmtstrings::kPathTransactionByTs, query.accountId()); } else { - auto it = common.template seek(fmtstrings::kTransactionByPosition, + auto it = common.template seek(RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTransactionByPosition, query.accountId(), tx_height, tx_index, @@ -478,7 +482,8 @@ RocksDbSpecificQueryExecutor::readTxs( } else { if (ordering_ptr->field == shared_model::interface::Ordering::Field::kCreatedTime) { - auto it = common.template seek(fmtstrings::kTransactionByTsLowerBound, + auto it = common.template seek(RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTransactionByTsLowerBound, query.accountId(), bounds.tsFrom); status = enumerateKeysAndValues(common, @@ -487,7 +492,8 @@ RocksDbSpecificQueryExecutor::readTxs( fmtstrings::kPathTransactionByTs, query.accountId()); } else { - auto it = common.template seek(fmtstrings::kTransactionByHeight, + auto it = common.template seek(RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTransactionByHeight, query.accountId(), bounds.heightFrom); status = enumerateKeysAndValues(common, @@ -696,6 +702,7 @@ operator()( return false; } }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathAccountAssets, domain_id, account_name); @@ -804,6 +811,7 @@ operator()( } return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathRoles); RDB_ERROR_CHECK(canExist(status, [&] { return "Enumerate roles"; })); @@ -950,6 +958,7 @@ operator()( syncing_peer)); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, path); }; diff --git a/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp b/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp index 66ebc44fe79..659d555fc6e 100644 --- a/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp +++ b/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp @@ -61,6 +61,7 @@ namespace iroha::ametsuchi { signatories.emplace_back(signatory.ToStringView()); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathSignatories, domain_id, account_name); @@ -99,11 +100,15 @@ namespace iroha::ametsuchi { rocksdb::Status status; if (syncing_peers) - status = enumerateKeysAndValues( - common, std::move(callback), fmtstrings::kPathSPeers); + status = enumerateKeysAndValues(common, + std::move(callback), + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kPathSPeers); else - status = enumerateKeysAndValues( - common, std::move(callback), fmtstrings::kPathPeers); + status = enumerateKeysAndValues(common, + std::move(callback), + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kPathPeers); RDB_ERROR_CHECK(canExist( status, [&]() { return fmt::format("Enumerate peers"); })); @@ -194,12 +199,17 @@ namespace iroha::ametsuchi { assert(!hash_str.empty()); uint64_t number; - std::from_chars( - height_str.data(), height_str.data() + height_str.size(), number); - return iroha::TopBlockInfo( - number, - shared_model::crypto::Hash(shared_model::crypto::Blob::fromHexString( - std::string{hash_str}))); + auto [ptr, ec]{std::from_chars( + height_str.data(), height_str.data() + height_str.size(), number)}; + if (ec == std::errc()) + return iroha::TopBlockInfo( + number, + shared_model::crypto::Hash( + shared_model::crypto::Blob::fromHexString( + std::string{hash_str}))); + else + return expected::makeError( + "Height in top block info is not a valid number."); } } diff --git a/irohad/iroha_wsv_diff/iroha_wsv_diff.cpp b/irohad/iroha_wsv_diff/iroha_wsv_diff.cpp index c3651477181..fdfef728d10 100644 --- a/irohad/iroha_wsv_diff/iroha_wsv_diff.cpp +++ b/irohad/iroha_wsv_diff/iroha_wsv_diff.cpp @@ -130,7 +130,8 @@ expected::Result initialize() try { FLAGS_pg_opt, "iroha_default", log_manager->getChild("PostgresOptions")->getLogger()), - log_manager)); + log_manager, + true)); pg_pool_wrapper_ = std::move(pool_wrapper); IROHA_EXPECTED_TRY_GET_VALUE( @@ -650,7 +651,7 @@ bool Wsv::from_rocksdb(RocksDbCommon &rdbc) { if (key_starts_with_and_drop(RDB_F_VERSION)) { assert(key.empty()); schema_version = std::string{val}; - assert(schema_version == "1#2#0" && + assert(schema_version == "1#4#0" && "This version of iroha_wsv_diff can check WSV in RocksDB of version 1.2.0 only"); } else if (key_starts_with_and_drop(RDB_NETWORK)) { if (key_starts_with_and_drop(RDB_PEERS)) { @@ -838,6 +839,7 @@ bool Wsv::from_rocksdb(RocksDbCommon &rdbc) { assert(key.empty()); return true; }, + iroha::ametsuchi::RocksDBPort::ColumnFamilyType::kWsv, RDB_ROOT RDB_WSV); for (auto &[permaccid, gp_set] : grant_perms_map) { auto &acc = find_account_by_id(permaccid); @@ -1064,7 +1066,9 @@ int wsv_check() try { return 0; } else { cout << "~~~ WSV-s DIFFER!!! ~~~" << endl; - cout << "For future investigation use difftool on files rocksdb.wsv and postgres.wsv. Just like:" << endl; + cout << "For future investigation use difftool on files rocksdb.wsv and " + "postgres.wsv. Just like:" + << endl; cout << " diff <(tail -n+2 postgres.wsv) <(tail -n+2 rockdb.wsv)" << endl; cout << "(Here command tail is to drop first line.)" << endl; return 1; diff --git a/irohad/main/impl/pg_connection_init.cpp b/irohad/main/impl/pg_connection_init.cpp index 0c8f24ff44d..47c5be4a2ed 100644 --- a/irohad/main/impl/pg_connection_init.cpp +++ b/irohad/main/impl/pg_connection_init.cpp @@ -399,19 +399,23 @@ iroha::expected::Result, std::string> PgConnectionInit::init(StartupWsvDataPolicy startup_wsv_data_policy, iroha::ametsuchi::PostgresOptions const &pg_opt, - logger::LoggerManagerTreePtr log_manager) { - return prepareWorkingDatabase(startup_wsv_data_policy, pg_opt) | [&] { - return prepareConnectionPool(KTimesReconnectionStrategyFactory{10}, - pg_opt, - kDbPoolSize, - log_manager); - }; + logger::LoggerManagerTreePtr log_manager, + bool skip_schema_check) { + return prepareWorkingDatabase( + startup_wsv_data_policy, pg_opt, skip_schema_check) + | [&] { + return prepareConnectionPool(KTimesReconnectionStrategyFactory{10}, + pg_opt, + kDbPoolSize, + log_manager); + }; } iroha::expected::Result PgConnectionInit::prepareWorkingDatabase( StartupWsvDataPolicy startup_wsv_data_policy, - const PostgresOptions &options) { + const PostgresOptions &options, + bool skip_schema_check) { return getMaintenanceSession(options) | [&](auto maintenance_sql) { int work_db_exists; *maintenance_sql << "select exists(" @@ -428,7 +432,7 @@ PgConnectionInit::prepareWorkingDatabase( } else { // StartupWsvDataPolicy::kReuse return isSchemaCompatible(options) | [&](bool is_compatible) -> iroha::expected::Result { - if (not is_compatible) { + if (not is_compatible && !skip_schema_check) { return "The schema is not compatible. " "Either overwrite the ledger or use a compatible binary " "version."; diff --git a/irohad/main/impl/pg_connection_init.hpp b/irohad/main/impl/pg_connection_init.hpp index 47254f3df4f..53e3c417c62 100644 --- a/irohad/main/impl/pg_connection_init.hpp +++ b/irohad/main/impl/pg_connection_init.hpp @@ -34,11 +34,13 @@ namespace iroha { std::string> init(StartupWsvDataPolicy startup_wsv_data_policy, iroha::ametsuchi::PostgresOptions const &pg_opt, - logger::LoggerManagerTreePtr log_manager); + logger::LoggerManagerTreePtr log_manager, + bool skip_schema_check = false); static expected::Result prepareWorkingDatabase( StartupWsvDataPolicy startup_wsv_data_policy, - const PostgresOptions &options); + const PostgresOptions &options, + bool skip_schema_check = false); static expected::Result, std::string> prepareConnectionPool( diff --git a/test/module/irohad/ametsuchi/rocksdb_common_test.cpp b/test/module/irohad/ametsuchi/rocksdb_common_test.cpp index 9518270dadd..afccdfb6ad5 100644 --- a/test/module/irohad/ametsuchi/rocksdb_common_test.cpp +++ b/test/module/irohad/ametsuchi/rocksdb_common_test.cpp @@ -54,13 +54,13 @@ class RocksDBTest : public ::testing::Test { void insertDb(std::string_view key, std::string_view value) { RocksDbCommon common(tx_context_); common.valueBuffer() = value; - common.put(key); + common.put(RocksDBPort::ColumnFamilyType::kWsv, key); common.commit(); } std::string_view readDb(std::string_view key) { RocksDbCommon common(tx_context_); - common.get(key); + common.get(RocksDBPort::ColumnFamilyType::kWsv, key); return common.valueBuffer(); } @@ -375,33 +375,84 @@ TEST_F(RocksDBTest, SimpleOperation) { TEST_F(RocksDBTest, SimpleDelete) { RocksDbCommon common(tx_context_); - ASSERT_TRUE(common.del(key3_).ok()); + ASSERT_TRUE(common.del(RocksDBPort::ColumnFamilyType::kWsv, key3_).ok()); - auto status = common.get(key3_); + auto status = common.get(RocksDBPort::ColumnFamilyType::kWsv, key3_); ASSERT_TRUE(status.IsNotFound()); } +TEST_F(RocksDBTest, RemoveTableTest) { + { + RocksDbCommon common(tx_context_); + common.valueBuffer() = "aaa"; + ASSERT_TRUE( + common.put(RocksDBPort::ColumnFamilyType::kWsv, "test_key").ok()); + ASSERT_TRUE( + common.put(RocksDBPort::ColumnFamilyType::kStore, "test_key").ok()); + ASSERT_TRUE(common.commit().ok()); + + common.valueBuffer().clear(); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "aaa"); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kStore, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "aaa"); + ASSERT_TRUE(common.commit().ok()); + + common.dropTable(RocksDBPort::ColumnFamilyType::kWsv); + ASSERT_TRUE(common.get(RocksDBPort::ColumnFamilyType::kWsv, "test_key") + .IsNotFound()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kStore, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "aaa"); + + common.valueBuffer() = "bbb"; + ASSERT_TRUE( + common.put(RocksDBPort::ColumnFamilyType::kWsv, "test_key").ok()); + ASSERT_TRUE(common.commit().ok()); + + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "bbb"); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kStore, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "aaa"); + } + { + RocksDbCommon common(tx_context_); + common.valueBuffer().clear(); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "bbb"); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kStore, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "aaa"); + ASSERT_TRUE(common.commit().ok()); + } +} + TEST_F(RocksDBTest, SimpleInsert) { RocksDbCommon common(tx_context_); common.valueBuffer() = "k777"; - common.put("k777"); + common.put(RocksDBPort::ColumnFamilyType::kWsv, "k777"); common.valueBuffer().clear(); - auto status = common.get("k777"); + auto status = common.get(RocksDBPort::ColumnFamilyType::kWsv, "k777"); ASSERT_TRUE(status.ok()); ASSERT_TRUE(common.valueBuffer() == "k777"); } TEST_F(RocksDBTest, SimpleSeek) { RocksDbCommon common(tx_context_); - auto it = common.seek("key"); + auto it = common.seek(RocksDBPort::ColumnFamilyType::kWsv, "key"); ASSERT_TRUE(it->status().ok()); ASSERT_TRUE(it->key().ToStringView() == key4_); ASSERT_TRUE(it->value().ToStringView() == value4_); - it = common.seek("ke1"); + it = common.seek(RocksDBPort::ColumnFamilyType::kWsv, "ke1"); ASSERT_TRUE(it->status().ok()); ASSERT_TRUE(it->key().ToStringView() == key3_); @@ -419,6 +470,7 @@ TEST_F(RocksDBTest, SimpleEnumerateKeys) { throw; return true; }, + RocksDBPort::ColumnFamilyType::kWsv, "keY"); ASSERT_TRUE(status.ok()); ASSERT_EQ(counter, 2); @@ -429,13 +481,17 @@ TEST_F(RocksDBTest, FilterDelete) { RocksDbCommon common(tx_context_); insertDb("ab", "ab"); insertDb("k", "121"); - ASSERT_TRUE(common.filterDelete(2ull, "keY").second.ok()); + ASSERT_TRUE( + common.filterDelete(2ull, RocksDBPort::ColumnFamilyType::kWsv, "keY") + .second.ok()); ASSERT_TRUE(common.commit().ok()); } { RocksDbCommon common(tx_context_); - ASSERT_TRUE(common.get(key1_).IsNotFound()); - ASSERT_TRUE(common.get(key2_).IsNotFound()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, key1_).IsNotFound()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, key2_).IsNotFound()); } { ASSERT_TRUE(readDb(key3_) == value3_); @@ -447,12 +503,15 @@ TEST_F(RocksDBTest, FilterDelete) { TEST_F(RocksDBTest, FilterDelete2) { { RocksDbCommon common(tx_context_); - ASSERT_TRUE(common.filterDelete(1ull, "keY").second.ok()); + ASSERT_TRUE( + common.filterDelete(1ull, RocksDBPort::ColumnFamilyType::kWsv, "keY") + .second.ok()); ASSERT_TRUE(common.commit().ok()); } { RocksDbCommon common(tx_context_); - ASSERT_TRUE(common.get(key1_).IsNotFound()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, key1_).IsNotFound()); } { ASSERT_TRUE(readDb(key2_) == value2_); @@ -465,13 +524,17 @@ TEST_F(RocksDBTest, FilterDelete2) { TEST_F(RocksDBTest, FilterDelete3) { { RocksDbCommon common(tx_context_); - ASSERT_TRUE(common.filterDelete(1000ull, "keY").second.ok()); + ASSERT_TRUE( + common.filterDelete(1000ull, RocksDBPort::ColumnFamilyType::kWsv, "keY") + .second.ok()); ASSERT_TRUE(common.commit().ok()); } { RocksDbCommon common(tx_context_); - ASSERT_TRUE(common.get(key1_).IsNotFound()); - ASSERT_TRUE(common.get(key2_).IsNotFound()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, key1_).IsNotFound()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, key2_).IsNotFound()); } { ASSERT_TRUE(readDb(key3_) == value3_); @@ -490,6 +553,7 @@ TEST_F(RocksDBTest, SimpleEnumerateKeys2) { throw; return true; }, + RocksDBPort::ColumnFamilyType::kWsv, "key"); ASSERT_TRUE(status.ok()); ASSERT_EQ(counter, 1); @@ -503,6 +567,7 @@ TEST_F(RocksDBTest, SimpleEnumerateKeys3) { throw; return false; }, + RocksDBPort::ColumnFamilyType::kWsv, "keyT") .ok()); ASSERT_TRUE(common @@ -511,6 +576,7 @@ TEST_F(RocksDBTest, SimpleEnumerateKeys3) { throw; return false; }, + RocksDBPort::ColumnFamilyType::kWsv, "ko") .ok()); } @@ -524,13 +590,15 @@ TEST_F(RocksDBTest, NumberRewrite) { { RocksDbCommon common(tx_context_); common.encode(55ull); - ASSERT_TRUE(common.put("{}", "123").ok()); + ASSERT_TRUE( + common.put(RocksDBPort::ColumnFamilyType::kWsv, "{}", "123").ok()); ASSERT_TRUE(common.commit().ok()); } uint64_t value; { RocksDbCommon common(tx_context_); - ASSERT_TRUE(common.get("{}", "123").ok()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, "{}", "123").ok()); common.decode(value); } ASSERT_TRUE(value == 55ull); @@ -540,13 +608,14 @@ TEST_F(RocksDBTest, Skip) { { RocksDbCommon common(tx_context_); common.encode(55ull); - ASSERT_TRUE(common.put("123").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "123").ok()); common.skip(); } { RocksDbCommon common(tx_context_); - ASSERT_FALSE(common.get("123").ok()); - ASSERT_TRUE(common.get("123").IsNotFound()); + ASSERT_FALSE(common.get(RocksDBPort::ColumnFamilyType::kWsv, "123").ok()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, "123").IsNotFound()); } } @@ -584,17 +653,17 @@ TEST_F(RocksDBTest, Quorum) { TEST_F(RocksDBTest, SortingOrder) { RocksDbCommon common(tx_context_); - common.filterDelete(1ull, ""); + common.filterDelete(1ull, RocksDBPort::ColumnFamilyType::kWsv, ""); common.valueBuffer().clear(); - ASSERT_TRUE(common.put("5").ok()); - ASSERT_TRUE(common.put("3").ok()); - ASSERT_TRUE(common.put("11").ok()); - ASSERT_TRUE(common.put("6").ok()); - ASSERT_TRUE(common.put("27").ok()); - ASSERT_TRUE(common.put("1").ok()); - ASSERT_TRUE(common.put("144").ok()); - ASSERT_TRUE(common.put("2").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "5").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "3").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "11").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "6").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "27").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "1").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "144").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "2").ok()); std::vector s; common.enumerate( @@ -604,6 +673,7 @@ TEST_F(RocksDBTest, SortingOrder) { s.push_back(std::string(key.ToStringView())); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, ""); ASSERT_EQ(s[0], "1"); @@ -618,36 +688,38 @@ TEST_F(RocksDBTest, SortingOrder) { TEST_F(RocksDBTest, LowerBoundSearch) { RocksDbCommon common(tx_context_); - common.filterDelete(1ull, ""); + common.filterDelete(1ull, RocksDBPort::ColumnFamilyType::kWsv, ""); char const *target = "wta1234569#1#2"; char const *target2 = "wta1234367#1#1"; common.valueBuffer().clear(); - ASSERT_TRUE(common.put(target2).ok()); - ASSERT_TRUE(common.put(target).ok()); - ASSERT_TRUE(common.put("wta1234570#2#1").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, target2).ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, target).ok()); + ASSERT_TRUE( + common.put(RocksDBPort::ColumnFamilyType::kWsv, "wta1234570#2#1").ok()); { - auto it = common.seek("wta0"); + auto it = common.seek(RocksDBPort::ColumnFamilyType::kWsv, "wta0"); ASSERT_TRUE(it->Valid()); ASSERT_TRUE(it->key().ToStringView() == target2); } { - auto it = common.seek("wta1234411#0#0"); + auto it = + common.seek(RocksDBPort::ColumnFamilyType::kWsv, "wta1234411#0#0"); ASSERT_TRUE(it->Valid()); ASSERT_TRUE(it->key().ToStringView() == target); } { - auto it = common.seek("wta1234411"); + auto it = common.seek(RocksDBPort::ColumnFamilyType::kWsv, "wta1234411"); ASSERT_TRUE(it->Valid()); ASSERT_TRUE(it->key().ToStringView() == target); } { - auto it = common.seek("wta1239411"); + auto it = common.seek(RocksDBPort::ColumnFamilyType::kWsv, "wta1239411"); ASSERT_FALSE(it->Valid()); } } @@ -698,6 +770,7 @@ TEST_F(RocksDBTest, Signatories) { ++counter; return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathSignatories, "dom", "acc"); diff --git a/test/module/irohad/ametsuchi/rocksdb_executor_test.cpp b/test/module/irohad/ametsuchi/rocksdb_executor_test.cpp index 61959ae70f7..a6f3344c795 100644 --- a/test/module/irohad/ametsuchi/rocksdb_executor_test.cpp +++ b/test/module/irohad/ametsuchi/rocksdb_executor_test.cpp @@ -101,6 +101,7 @@ namespace iroha::ametsuchi { roles.emplace_back(r.ToStringView()); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathAccountRoles, domain, account); @@ -234,6 +235,7 @@ namespace iroha::ametsuchi { return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathAccountDetail, domain, account); diff --git a/test/module/irohad/ametsuchi/rocksdb_indexer_test.cpp b/test/module/irohad/ametsuchi/rocksdb_indexer_test.cpp index b91e453cf2f..e8b7decbbe7 100644 --- a/test/module/irohad/ametsuchi/rocksdb_indexer_test.cpp +++ b/test/module/irohad/ametsuchi/rocksdb_indexer_test.cpp @@ -107,6 +107,7 @@ TEST_F(RocksDBIndexerTest, SimpleInsertTxByTs) { hash.ToStringView(); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathTransactionByTs, account_1_); ASSERT_TRUE(status.ok()); @@ -124,6 +125,7 @@ TEST_F(RocksDBIndexerTest, SimpleInsertTxByTs) { hash.ToStringView(); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathTransactionByTs, account_2_); ASSERT_TRUE(status.ok()); @@ -219,6 +221,7 @@ TEST_F(RocksDBIndexerTest, SimpleCheckTxByPos) { items[std::string(position.ToStringView())] = data.ToStringView(); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathTransactionByPosition, account_1_); @@ -242,6 +245,7 @@ TEST_F(RocksDBIndexerTest, SimpleCheckTxByPos) { items[std::string(position.ToStringView())] = data.ToStringView(); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathTransactionByPosition, account_2_); From 89906520120e928196cecc7f0a3734d0360a1ecb Mon Sep 17 00:00:00 2001 From: kuvaldini Date: Mon, 7 Feb 2022 15:14:59 +0200 Subject: [PATCH 10/14] Add .github/_README.md Signed-off-by: kuvaldini --- .github/.gitignore | 1 + .github/_README.make-workflows.md | 95 +++++++++++++++++++++++++++++++ .github/_README.md | 72 +++++++++++++---------- .github/chatops-gen-matrix.sh | 2 + 4 files changed, 141 insertions(+), 29 deletions(-) create mode 100644 .github/.gitignore create mode 100644 .github/_README.make-workflows.md diff --git a/.github/.gitignore b/.github/.gitignore new file mode 100644 index 00000000000..841f5dfee44 --- /dev/null +++ b/.github/.gitignore @@ -0,0 +1 @@ +/matrix* diff --git a/.github/_README.make-workflows.md b/.github/_README.make-workflows.md new file mode 100644 index 00000000000..e465cefbd14 --- /dev/null +++ b/.github/_README.make-workflows.md @@ -0,0 +1,95 @@ +make-workflows.sh for GitHub Actions +==================================== + +GitHub Workflow description in YAML does not support anchors. +There are several workarounds => anyway [they](#links) come to building-editing workflow yaml from source. +So I suggest yet another one `make-workflows.sh` based on YAML tool [`yq`](https://github.com/mikefarah/yq) version 4. + +\All these code and repo is written around `yq eval 'explode(.)'`\ + +### USAGE +0. [Install](#ways-to-install) `make-workflows.sh` to reachable place inside or outside of your repo, i.e. '.github/' +1. Put your workflows to `.github/*.src.yml` +2. (recommended) `pre-commit install` and edit [`.pre-commit-config.yaml`](/.pre-commit-config.yaml) according to where [`make-workflows.sh`](./make-workflows.sh) is placed. + (altenative optional) Copy or link `pre-commit-hook.sh` to `.git/hooks/pre-commit` + Like `ln -s ../../.github/pre-commit-hook.sh .git/hooks/pre-commit` + +``` +$ ./make-workflows.sh --help +make-workflows: + This script expands '*.src.yml' from $1..$[N-1] (default: REPO_ROOT/.github/) + to $N (default:REPO_ROOT/.github/workflows/) with corresponding name '*.yml' + Main goal is to dereference YAML anchors. + Deals only with Git cached/indexed files until --worktree passed. + DEBUG: use option -x + NOTE: spaces in filenames are not allowed to keep code simplicity. +Usage: + make-workflows.sh [--worktree] [dirs_from... [dir_to]] + make-workflows.sh [--help] +Options: + --worktree List files and get contents from working tree + instead of git index + -h, --help show this help + -x, --trace, +x, --no-trace enable/disable bash trace + -i, --install + --update + -V, --version +``` + +### Automate using pre-commit (recommended) +There is a nice tool [pre-commit](https://pre-commit.com) to do checks and some actions just before commit. The tool is called by Git pre-commit hook. + +Making workflows is better being automated – just +```sh +$ pre-commit install +``` +and add next sample to [`.pre-commit-config.yaml`](/.pre-commit-config.yaml) +```yaml +repos: +- repo: local + hooks: + - id: make-workflows + name: Make GitHub workflows from *.src.yml + entry: bash -c '.github/make-workflows.sh && git add .github/workflows' + language: system + files: '.github/.*\.src\.ya?ml' + pass_filenames: false +``` +> NOTE: pay attention to path to `make-workflows.sh` + +> NOTE2: pay attention to path(s) where source files are stored `files: 'PATH_REGEXP'` + + +## Ways to install +1. raw by hand +``` +curl 'https://raw.githubusercontent.com/kuvaldini/make-workflows.sh/main/make-workflows.sh' -LsSf >make-workflows.sh && chmod +x make-workflows.sh +``` + +2. using own facility, installs to /usr/local/bin +``` +curl 'https://raw.githubusercontent.com/kuvaldini/make-workflows.sh/main/make-workflows.sh' -LsSf | bash -s -- --install +``` +> NOTE: may require `...| sudo bash...` + +3. node package manager +``` +npm install kuvaldini/make-workflows.sh +``` +or `npm install git+https://github.com/kuvaldini/make-workflows.sh` + +4. TODO webinstall + + +### Links +1. https://stackoverflow.com/questions/67368724/share-same-steps-for-different-github-actions-jobs +2. https://github.community/t/support-for-yaml-anchors/16128/60 +3. https://github.com/mithro/actions-includes +4. https://github.com/allejo/gha-workflows +5. this repo https://github.com/kuvaldini/make-workflows.sh + +## License, Authors +Authored by [@Kuvaldini](https://github.com/kuvaldini), 2021. +Please keep links to the source code [this repo](https://github.com/kuvaldini/make-workflows.sh) + +Creation of this repo was inspired by [@karfau](https://github.com/karfau) diff --git a/.github/_README.md b/.github/_README.md index 100b4aee2e7..925bb3c86ca 100644 --- a/.github/_README.md +++ b/.github/_README.md @@ -1,31 +1,45 @@ -GitHub Actions -============== +GitHub Actions CI for Iroha +=========================== -GitHub Workflow description in YAML does not support anchors. -There are several workarounds => anyway they come to building-editing workflow yaml from source. -So I suggest yet another one `make-workflows.sh` based on YAML tool `yq`. +> For the **smooth experience** please `pre-commit install` after clone. -### USAGE -0. Move your workflows to `.github/*.src.yml` -1. Put `make-workflows.sh` to directory `.github/` -2. (optional) Copy or link `pre-commit-hook.sh` to `.git/hooks/pre-commit` - Like `ln -s ../../.github/pre-commit-hook.sh .git/hooks/pre-commit` - -### Using pre-commit -```yaml -repos: -- repo: local - hooks: - - id: make-workflows - name: Make GitHub workflows from *.src.yml - entry: bash -c '.github/make-workflows.sh && git add .github/workflows' - language: system - types: [yaml] - pass_filenames: false -``` - -### Links -1. https://stackoverflow.com/questions/67368724/share-same-steps-for-different-github-actions-jobs -2. https://github.community/t/support-for-yaml-anchors/16128/60 -3. https://github.com/mithro/actions-includes -4. https://github.com/allejo/gha-workflows +### List of files +- `build-iroha1.src.yml` + Main file here. GitHub workflow YAML description with ANCHORS, code is not duplicated. + IMPORTANT: regeneration required after after edit, which is automated with pre-commit. +- `workflows/build-iroha1.yml` + Result worflow taken by GitHub and generated with make-workflows script. Long file of repeated code. DO NOT EDIT MANUALLY. +- `make-workflows.sh` + A tool to generate workflows/*.yml from *.src.yml - evaluates anchors. [Read the docs](_README.make-workflows.md). +- `chatops-gen-matrix.sh` + Generates build matrixes form convenient user input. See `--help` + ``` + USAGE: + chatops-gen-matrix.sh --help + echo /build [build_spec...] | chatops-gen-matrix.sh + EXAMPLE build_spec: + /build ubuntu release gcc10 + /build macos llvm release + /build all + /build ubuntu all ## build all possible configurations on Ubuntu + /build ubuntu burrow all ## build all possible configurations on Ubuntu with Burrow + AVAILABLE build_spec keywords: + ubuntu|linux + macos + windows + normal + burrow + ursa + release|Release + debug|Debug + gcc|gcc-9|gcc9 + gcc-10|gcc10 + clang|clang-10|clang10 + llvm + msvc + all|everything|beforemerge|before_merge|before-merge|readytomerge|ready-to-merge|ready_to_merge + ``` +- `pre-commit-hook.sh` + See docs of make-workflows. Use instead of pre-commit as `ln -s ../../.github/pre-commit-hook.sh .git/hooks/pre-commit`, reserv alternative. +- `TESTS_ALLOWED_TO_FAIL` + One day tests of Iroha become failing. To fix CI and postpone fixing tests, this file was invented. It allows CI to pass even when listed tests are failing. HACK. DO NOT USE UNLESS YOU DEFINITELY KNOW WHAT'S GOING. diff --git a/.github/chatops-gen-matrix.sh b/.github/chatops-gen-matrix.sh index a4494248b07..4997b7f5534 100755 --- a/.github/chatops-gen-matrix.sh +++ b/.github/chatops-gen-matrix.sh @@ -39,6 +39,7 @@ END cat < Date: Mon, 7 Feb 2022 20:17:21 +0200 Subject: [PATCH 11/14] /build skip_testing; And more docs Signed-off-by: kuvaldini --- .github/_README.md | 46 ++++++++++++++------ .github/build-iroha1.src.yml | 40 +++++++++++++++--- .github/chatops-gen-matrix.sh | 7 +-- .github/workflows/build-iroha1.yml | 68 ++++++++++++++++++++++++++---- 4 files changed, 131 insertions(+), 30 deletions(-) diff --git a/.github/_README.md b/.github/_README.md index 925bb3c86ca..e8b4c9e3e13 100644 --- a/.github/_README.md +++ b/.github/_README.md @@ -1,28 +1,49 @@ GitHub Actions CI for Iroha =========================== -> For the **smooth experience** please `pre-commit install` after clone. -### List of files -- `build-iroha1.src.yml` - Main file here. GitHub workflow YAML description with ANCHORS, code is not duplicated. +> ### For the **smooth experience** please `pre-commit install` after `git clone`. + +--------------------- + +USAGE +----- +GitHub Actions Workflow [`Iroha1`](build-iroha1.src.yml) solves task of automated build and deployment Iroha1. +There are events when it is running: +- on **pull request** to Iroha1 main and development branches +- on **push** to main or development branches including event when PR is **merged** +- on **workflow dispatch** to run WF manually on special branch with defined buildspec through web interface or via CLI tool +- **scheduled** every night +- _(under construction PR #XX) on **comment to PR** which contains buildspec._ + +Default `buildspec` is _`/build all`_ + +### Buildspec +Build matrix is a way to select among number of configurations to be built. +Build matrix is generated from buildspec string and handled by script [`chatops-gen-matrix.sh`](./chatops-gen-matrix.sh) + +## List of files +- `build-iroha1.src.yml` + Main file here. GitHub workflow YAML description with ANCHORS, code is not duplicated. IMPORTANT: regeneration required after after edit, which is automated with pre-commit. -- `workflows/build-iroha1.yml` +- `workflows/build-iroha1.yml` Result worflow taken by GitHub and generated with make-workflows script. Long file of repeated code. DO NOT EDIT MANUALLY. -- `make-workflows.sh` +- `make-workflows.sh` A tool to generate workflows/*.yml from *.src.yml - evaluates anchors. [Read the docs](_README.make-workflows.md). -- `chatops-gen-matrix.sh` +- `chatops-gen-matrix.sh` Generates build matrixes form convenient user input. See `--help` ``` USAGE: chatops-gen-matrix.sh --help + chatops-gen-matrix.sh /build ubuntu clang + chatops-gen-matrix.sh '/build ubuntu clang; /build macos release ursa' echo /build [build_spec...] | chatops-gen-matrix.sh EXAMPLE build_spec: /build ubuntu release gcc10 - /build macos llvm release + /build macos llvm release; /build macos clang ursa release /build all /build ubuntu all ## build all possible configurations on Ubuntu - /build ubuntu burrow all ## build all possible configurations on Ubuntu with Burrow + /build ubuntu burrow all ## build release and debug on Ubuntu with Burrow AVAILABLE build_spec keywords: ubuntu|linux macos @@ -37,9 +58,10 @@ GitHub Actions CI for Iroha clang|clang-10|clang10 llvm msvc + skip-testing|skip_testing all|everything|beforemerge|before_merge|before-merge|readytomerge|ready-to-merge|ready_to_merge ``` -- `pre-commit-hook.sh` +- `pre-commit-hook.sh` See docs of make-workflows. Use instead of pre-commit as `ln -s ../../.github/pre-commit-hook.sh .git/hooks/pre-commit`, reserv alternative. -- `TESTS_ALLOWED_TO_FAIL` - One day tests of Iroha become failing. To fix CI and postpone fixing tests, this file was invented. It allows CI to pass even when listed tests are failing. HACK. DO NOT USE UNLESS YOU DEFINITELY KNOW WHAT'S GOING. +- `TESTS_ALLOWED_TO_FAIL` + One day tests of Iroha became failing. To fix CI and postpone fixing tests, this file was invented. It allows CI to pass even when listed tests are failing. DO NOT USE UNLESS YOU DEFINITELY KNOW WHAT'S GOING. KEEP IT EMPTY. diff --git a/.github/build-iroha1.src.yml b/.github/build-iroha1.src.yml index 8d824764f77..b778e1e2b49 100644 --- a/.github/build-iroha1.src.yml +++ b/.github/build-iroha1.src.yml @@ -15,7 +15,7 @@ name: Iroha1 ## and from 2hrs to 27min without cache ## GitHub's default runners also idles much time before started when build often ## TODO 5. [speed,optimization,resources] Cancel previous runs if their duration is less than 10 minutes, protect almost done builds from being killed -## TODO [prettify,documentation] update status badges in README.md +## TODO [prettify,documentation] update status badges in /README.md ## TODO [minor] windows ## TODO actions/create-release for main branch and tags ## TODO [cmake,dockerimage,iroha-builder] To improve speed of vcpkg step install to iroha-builder @@ -55,9 +55,31 @@ on: ## NOTE: Able to run via cmdline: gh workflow run Iroha1 inputs: build_spec: - description: 'See chatops-gen-matrix.sh, example "/build ubuntu macos gcc-9 burrow"' - required: false - default: '/build' + description: | + See chatops-gen-matrix.sh, example "/build ubuntu macos gcc-9 burrow" + EXAMPLE build_spec: + /build ubuntu release gcc10 + /build macos llvm release; /build macos clang ursa release + /build all + /build ubuntu all ## build all possible configurations on Ubuntu + /build ubuntu burrow all ## build release and debug on Ubuntu with Burrow + AVAILABLE build_spec keywords: + ubuntu|linux + macos + windows + normal + burrow + ursa + release|Release + debug|Debug + gcc|gcc-9|gcc9 + gcc-10|gcc10 + clang|clang-10|clang10 + llvm + skip-testing|skip_testing + all|everything|beforemerge|before_merge|before-merge|readytomerge|ready-to-merge|ready_to_merge + required: true + default: '/build skip_testing ' # issue_comment: # types: [created, edited] schedule: @@ -447,6 +469,11 @@ jobs: *) echo "::error::Unknown features '$features'"; false ;; esac echo >>$GITHUB_ENV features="$features" + echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fq skip_testing) + # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing + # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF + # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON + # fi - *step_detect_commented_pr - &step_checkout_full <<: *step_checkout @@ -511,8 +538,8 @@ jobs: -DCMAKE_BUILD_TYPE=${{ env.BuildType }} -GNinja $CMAKE_USE - -DTESTING=ON - -DBENCHMARKING=ON + -DTESTING=$( test "$skip_testing" = skip_testing && echo OFF || echo ON ) + -DBENCHMARKING=$( test "$skip_testing" = skip_testing && echo OFF || echo ON ) -DPACKAGE_DEB=ON #-DCMAKE_VERBOSE_MAKEFILE=ON ## Note: use for debug - &step_cmake_build @@ -574,6 +601,7 @@ jobs: build/test_data/** - &step_ctest timeout-minutes: 40 + if: env.skip_testing == '' name: CTest run: | echo ::group::'boilerplate' diff --git a/.github/chatops-gen-matrix.sh b/.github/chatops-gen-matrix.sh index 4997b7f5534..39b306e452a 100755 --- a/.github/chatops-gen-matrix.sh +++ b/.github/chatops-gen-matrix.sh @@ -40,6 +40,7 @@ END USAGE: $(basename $0) --help $(basename $0) /build ubuntu clang + $(basename $0) '/build ubuntu clang; /build macos release ursa' echo /build [build_spec...] | $(basename $0) END --help-buildspec @@ -73,7 +74,7 @@ handle_user_line(){ return fi shift - local oses compilers cmake_opts build_types + local oses compilers cmake_opts build_types skip_testing= while [[ $# > 0 ]] ;do case "$1" in @@ -91,7 +92,7 @@ handle_user_line(){ clang|clang-10|clang10) compilers+=" clang clang-10" ;; llvm) compilers+=" $1 " ;; msvc) compilers+=" $1 " ;; - skip-testing|skip_testing) skip_testing=yes ;; + skip-testing|skip_testing) skip_testing='skip_testing' ;; all|everything|beforemerge|before_merge|before-merge|readytomerge|ready-to-merge|ready_to_merge) oses=${oses:-"$ALL_oses"} build_types=${build_types:-"$ALL_build_types"} @@ -127,7 +128,7 @@ handle_user_line(){ for co in $cmake_opts ;do if test $os = macos -a $co = burrow; then continue; fi ##Reduce macos load on CI if test $os = macos -a $co = ursa; then continue; fi ##Reduce macos load on CI - MATRIX+="$os $cc $bt $co"$'\n' + MATRIX+="$os $cc $bt $co $skip_testing"$'\n' done done done diff --git a/.github/workflows/build-iroha1.yml b/.github/workflows/build-iroha1.yml index 43b5eda8f73..1e84431c791 100644 --- a/.github/workflows/build-iroha1.yml +++ b/.github/workflows/build-iroha1.yml @@ -17,7 +17,7 @@ name: Iroha1 ## and from 2hrs to 27min without cache ## GitHub's default runners also idles much time before started when build often ## TODO 5. [speed,optimization,resources] Cancel previous runs if their duration is less than 10 minutes, protect almost done builds from being killed -## TODO [prettify,documentation] update status badges in README.md +## TODO [prettify,documentation] update status badges in /README.md ## TODO [minor] windows ## TODO actions/create-release for main branch and tags ## TODO [cmake,dockerimage,iroha-builder] To improve speed of vcpkg step install to iroha-builder @@ -57,9 +57,31 @@ on: ## NOTE: Able to run via cmdline: gh workflow run Iroha1 inputs: build_spec: - description: 'See chatops-gen-matrix.sh, example "/build ubuntu macos gcc-9 burrow"' - required: false - default: '/build' + description: | + See chatops-gen-matrix.sh, example "/build ubuntu macos gcc-9 burrow" + EXAMPLE build_spec: + /build ubuntu release gcc10 + /build macos llvm release; /build macos clang ursa release + /build all + /build ubuntu all ## build all possible configurations on Ubuntu + /build ubuntu burrow all ## build release and debug on Ubuntu with Burrow + AVAILABLE build_spec keywords: + ubuntu|linux + macos + windows + normal + burrow + ursa + release|Release + debug|Debug + gcc|gcc-9|gcc9 + gcc-10|gcc10 + clang|clang-10|clang10 + llvm + skip-testing|skip_testing + all|everything|beforemerge|before_merge|before-merge|readytomerge|ready-to-merge|ready_to_merge + required: true + default: '/build skip_testing ' # issue_comment: # types: [created, edited] schedule: @@ -497,6 +519,11 @@ jobs: *) echo "::error::Unknown features '$features'"; false ;; esac echo >>$GITHUB_ENV features="$features" + echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fq skip_testing) + # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing + # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF + # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON + # fi - name: REF and SHA of commented PR to ENV if: github.event.comment run: > @@ -560,7 +587,7 @@ jobs: # sys time 0,70 secs 575,00 micros 0,70 secs - name: CMake configure ## Takes 13s on regular GitHub runner - run: cmake -B build -DCMAKE_TOOLCHAIN_FILE=$PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake -DCMAKE_BUILD_TYPE=${{ env.BuildType }} -GNinja $CMAKE_USE -DTESTING=ON -DBENCHMARKING=ON -DPACKAGE_DEB=ON + run: cmake -B build -DCMAKE_TOOLCHAIN_FILE=$PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake -DCMAKE_BUILD_TYPE=${{ env.BuildType }} -GNinja $CMAKE_USE -DTESTING=$( test "$skip_testing" = skip_testing && echo OFF || echo ON ) -DBENCHMARKING=$( test "$skip_testing" = skip_testing && echo OFF || echo ON ) -DPACKAGE_DEB=ON #-DCMAKE_VERBOSE_MAKEFILE=ON ## Note: use for debug - name: CMake build run: | @@ -612,6 +639,7 @@ jobs: build/test_bin/** build/test_data/** - timeout-minutes: 40 + if: env.skip_testing == '' name: CTest run: | echo ::group::'boilerplate' @@ -777,6 +805,11 @@ jobs: *) echo "::error::Unknown features '$features'"; false ;; esac echo >>$GITHUB_ENV features="$features" + echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fq skip_testing) + # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing + # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF + # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON + # fi - name: REF and SHA of commented PR to ENV if: github.event.comment run: > @@ -840,7 +873,7 @@ jobs: # sys time 0,70 secs 575,00 micros 0,70 secs - name: CMake configure ## Takes 13s on regular GitHub runner - run: cmake -B build -DCMAKE_TOOLCHAIN_FILE=$PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake -DCMAKE_BUILD_TYPE=${{ env.BuildType }} -GNinja $CMAKE_USE -DTESTING=ON -DBENCHMARKING=ON -DPACKAGE_DEB=ON + run: cmake -B build -DCMAKE_TOOLCHAIN_FILE=$PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake -DCMAKE_BUILD_TYPE=${{ env.BuildType }} -GNinja $CMAKE_USE -DTESTING=$( test "$skip_testing" = skip_testing && echo OFF || echo ON ) -DBENCHMARKING=$( test "$skip_testing" = skip_testing && echo OFF || echo ON ) -DPACKAGE_DEB=ON #-DCMAKE_VERBOSE_MAKEFILE=ON ## Note: use for debug - name: CMake build run: | @@ -892,6 +925,7 @@ jobs: build/test_bin/** build/test_data/** - timeout-minutes: 40 + if: env.skip_testing == '' name: CTest run: | echo ::group::'boilerplate' @@ -1070,6 +1104,11 @@ jobs: *) echo "::error::Unknown features '$features'"; false ;; esac echo >>$GITHUB_ENV features="$features" + echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fq skip_testing) + # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing + # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF + # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON + # fi - name: Homebrew run: brew install cmake ninja coreutils ccache bash ## Takes 22 seconds with default github runner @@ -1147,7 +1186,7 @@ jobs: # sys time 0,70 secs 575,00 micros 0,70 secs - name: CMake configure ## Takes 13s on regular GitHub runner - run: cmake -B build -DCMAKE_TOOLCHAIN_FILE=$PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake -DCMAKE_BUILD_TYPE=${{ env.BuildType }} -GNinja $CMAKE_USE -DTESTING=ON -DBENCHMARKING=ON -DPACKAGE_DEB=ON + run: cmake -B build -DCMAKE_TOOLCHAIN_FILE=$PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake -DCMAKE_BUILD_TYPE=${{ env.BuildType }} -GNinja $CMAKE_USE -DTESTING=$( test "$skip_testing" = skip_testing && echo OFF || echo ON ) -DBENCHMARKING=$( test "$skip_testing" = skip_testing && echo OFF || echo ON ) -DPACKAGE_DEB=ON #-DCMAKE_VERBOSE_MAKEFILE=ON ## Note: use for debug - name: CMake build run: | @@ -1192,7 +1231,8 @@ jobs: build/test_data/** - name: Install Postgres on MacOS run: brew install postgresql - - name: CTest + - if: env.skip_testing == '' + name: CTest run: | echo ::group::'boilerplate' set -euo pipefail @@ -1375,7 +1415,7 @@ jobs: # sys time 0,70 secs 575,00 micros 0,70 secs - name: CMake configure ## Takes 13s on regular GitHub runner - run: cmake -B build -DCMAKE_TOOLCHAIN_FILE=$PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake -DCMAKE_BUILD_TYPE=${{ env.BuildType }} -GNinja $CMAKE_USE -DTESTING=ON -DBENCHMARKING=ON -DPACKAGE_DEB=ON + run: cmake -B build -DCMAKE_TOOLCHAIN_FILE=$PWD/vcpkg-build/scripts/buildsystems/vcpkg.cmake -DCMAKE_BUILD_TYPE=${{ env.BuildType }} -GNinja $CMAKE_USE -DTESTING=$( test "$skip_testing" = skip_testing && echo OFF || echo ON ) -DBENCHMARKING=$( test "$skip_testing" = skip_testing && echo OFF || echo ON ) -DPACKAGE_DEB=ON #-DCMAKE_VERBOSE_MAKEFILE=ON ## Note: use for debug - name: CMake build run: | @@ -1460,6 +1500,11 @@ jobs: *) echo "::error::Unknown features '$features'"; false ;; esac echo >>$GITHUB_ENV features="$features" + echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fq skip_testing) + # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing + # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF + # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON + # fi - name: REF and SHA of commented PR to ENV if: github.event.comment run: > @@ -1635,6 +1680,11 @@ jobs: *) echo "::error::Unknown features '$features'"; false ;; esac echo >>$GITHUB_ENV features="$features" + echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fq skip_testing) + # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing + # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF + # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON + # fi - name: REF and SHA of commented PR to ENV if: github.event.comment run: > From d4d26e976926d691956d1a0dab0404a16f85132f Mon Sep 17 00:00:00 2001 From: kuvaldini Date: Mon, 7 Feb 2022 21:46:23 +0200 Subject: [PATCH 12/14] /build all skip_testing Signed-off-by: kuvaldini From 22c7f2720880752eb5b55352529f3b3b9546d28d Mon Sep 17 00:00:00 2001 From: kuvaldini Date: Mon, 7 Feb 2022 22:55:37 +0200 Subject: [PATCH 13/14] Fix 'check if docker image exists' /build all skip_testing Signed-off-by: kuvaldini --- .github/build-iroha1.src.yml | 8 +++++--- .github/workflows/build-iroha1.yml | 16 +++++++++------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/.github/build-iroha1.src.yml b/.github/build-iroha1.src.yml index b778e1e2b49..5dfbee00310 100644 --- a/.github/build-iroha1.src.yml +++ b/.github/build-iroha1.src.yml @@ -387,9 +387,11 @@ jobs: id: dockertag_already run: | echo "::set-output name=container::$DOCKERHUB_ORG/iroha-builder:$dockertag" + docker pull "$DOCKERHUB_ORG/iroha-builder:$dockertag" - - name: Possible ERROR, Dockerfile edited but image cannot be pushed - if: ${{ steps.docker_login.outcome != 'success' || steps.build_and_push.outcome != 'success' }} + name: Possible ERROR, Dockerfile edited, image was build, but seems not pushed, CANNOT PULL. + if: failure() + #if: ${{ steps.docker_login.outcome != 'success' || steps.build_and_push.outcome != 'success' }} env: container: ${{steps.dockertag_already.outputs.container}} dockertag: ${{env.dockertag}} @@ -469,7 +471,7 @@ jobs: *) echo "::error::Unknown features '$features'"; false ;; esac echo >>$GITHUB_ENV features="$features" - echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fq skip_testing) + echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fo skip_testing) # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON diff --git a/.github/workflows/build-iroha1.yml b/.github/workflows/build-iroha1.yml index 1e84431c791..ac64b0694af 100644 --- a/.github/workflows/build-iroha1.yml +++ b/.github/workflows/build-iroha1.yml @@ -415,8 +415,10 @@ jobs: id: dockertag_already run: | echo "::set-output name=container::$DOCKERHUB_ORG/iroha-builder:$dockertag" - - name: Possible ERROR, Dockerfile edited but image cannot be pushed - if: ${{ steps.docker_login.outcome != 'success' || steps.build_and_push.outcome != 'success' }} + docker pull "$DOCKERHUB_ORG/iroha-builder:$dockertag" + - name: Possible ERROR, Dockerfile edited, image was build, but seems not pushed, CANNOT PULL. + if: failure() + #if: ${{ steps.docker_login.outcome != 'success' || steps.build_and_push.outcome != 'success' }} env: container: ${{steps.dockertag_already.outputs.container}} dockertag: ${{env.dockertag}} @@ -519,7 +521,7 @@ jobs: *) echo "::error::Unknown features '$features'"; false ;; esac echo >>$GITHUB_ENV features="$features" - echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fq skip_testing) + echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fo skip_testing) # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON @@ -805,7 +807,7 @@ jobs: *) echo "::error::Unknown features '$features'"; false ;; esac echo >>$GITHUB_ENV features="$features" - echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fq skip_testing) + echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fo skip_testing) # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON @@ -1104,7 +1106,7 @@ jobs: *) echo "::error::Unknown features '$features'"; false ;; esac echo >>$GITHUB_ENV features="$features" - echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fq skip_testing) + echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fo skip_testing) # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON @@ -1500,7 +1502,7 @@ jobs: *) echo "::error::Unknown features '$features'"; false ;; esac echo >>$GITHUB_ENV features="$features" - echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fq skip_testing) + echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fo skip_testing) # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON @@ -1680,7 +1682,7 @@ jobs: *) echo "::error::Unknown features '$features'"; false ;; esac echo >>$GITHUB_ENV features="$features" - echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fq skip_testing) + echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fo skip_testing) # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON From 98aa6a3d3c9d4e5c4f365ef298bb70409b599259 Mon Sep 17 00:00:00 2001 From: kuvaldini Date: Tue, 8 Feb 2022 15:08:52 +0200 Subject: [PATCH 14/14] Reduce text, one important TODO Signed-off-by: kuvaldini --- .github/_README.make-workflows.md | 33 +++--------------------------- .github/build-iroha1.src.yml | 5 +---- .github/workflows/build-iroha1.yml | 21 +------------------ 3 files changed, 5 insertions(+), 54 deletions(-) diff --git a/.github/_README.make-workflows.md b/.github/_README.make-workflows.md index e465cefbd14..89154d7442a 100644 --- a/.github/_README.make-workflows.md +++ b/.github/_README.make-workflows.md @@ -11,7 +11,7 @@ So I suggest yet another one `make-workflows.sh` based on YAML tool [`yq`](https 0. [Install](#ways-to-install) `make-workflows.sh` to reachable place inside or outside of your repo, i.e. '.github/' 1. Put your workflows to `.github/*.src.yml` 2. (recommended) `pre-commit install` and edit [`.pre-commit-config.yaml`](/.pre-commit-config.yaml) according to where [`make-workflows.sh`](./make-workflows.sh) is placed. - (altenative optional) Copy or link `pre-commit-hook.sh` to `.git/hooks/pre-commit` + (altenative optional) Copy or link `pre-commit-hook.sh` to `.git/hooks/pre-commit` Like `ln -s ../../.github/pre-commit-hook.sh .git/hooks/pre-commit` ``` @@ -39,7 +39,7 @@ Options: ### Automate using pre-commit (recommended) There is a nice tool [pre-commit](https://pre-commit.com) to do checks and some actions just before commit. The tool is called by Git pre-commit hook. -Making workflows is better being automated – just +Making workflows is better being automated – just ```sh $ pre-commit install ``` @@ -60,36 +60,9 @@ repos: > NOTE2: pay attention to path(s) where source files are stored `files: 'PATH_REGEXP'` -## Ways to install -1. raw by hand -``` -curl 'https://raw.githubusercontent.com/kuvaldini/make-workflows.sh/main/make-workflows.sh' -LsSf >make-workflows.sh && chmod +x make-workflows.sh -``` - -2. using own facility, installs to /usr/local/bin -``` -curl 'https://raw.githubusercontent.com/kuvaldini/make-workflows.sh/main/make-workflows.sh' -LsSf | bash -s -- --install -``` -> NOTE: may require `...| sudo bash...` - -3. node package manager -``` -npm install kuvaldini/make-workflows.sh -``` -or `npm install git+https://github.com/kuvaldini/make-workflows.sh` - -4. TODO webinstall - - ### Links 1. https://stackoverflow.com/questions/67368724/share-same-steps-for-different-github-actions-jobs 2. https://github.community/t/support-for-yaml-anchors/16128/60 3. https://github.com/mithro/actions-includes 4. https://github.com/allejo/gha-workflows -5. this repo https://github.com/kuvaldini/make-workflows.sh - -## License, Authors -Authored by [@Kuvaldini](https://github.com/kuvaldini), 2021. -Please keep links to the source code [this repo](https://github.com/kuvaldini/make-workflows.sh) - -Creation of this repo was inspired by [@karfau](https://github.com/karfau) +5. dedicated repo https://github.com/kuvaldini/make-workflows.sh diff --git a/.github/build-iroha1.src.yml b/.github/build-iroha1.src.yml index 5dfbee00310..0870f13a111 100644 --- a/.github/build-iroha1.src.yml +++ b/.github/build-iroha1.src.yml @@ -1,5 +1,6 @@ name: Iroha1 +## TODO IMPORTANT DISALLOW deploying tags and main and develop builds where skip_testing was set. ## TODO 1. [vcpkg,optimization-space,optimization-speed] ## Build only Debug or only Release - reduce vcpkg build duration and output size 2times ## see directory triplets/, `vcpkg help triplets` and link: https://stackoverflow.com/a/52781832/3743145 @@ -472,10 +473,6 @@ jobs: esac echo >>$GITHUB_ENV features="$features" echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fo skip_testing) - # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing - # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF - # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON - # fi - *step_detect_commented_pr - &step_checkout_full <<: *step_checkout diff --git a/.github/workflows/build-iroha1.yml b/.github/workflows/build-iroha1.yml index ac64b0694af..08f72af2555 100644 --- a/.github/workflows/build-iroha1.yml +++ b/.github/workflows/build-iroha1.yml @@ -2,6 +2,7 @@ ## Generated from build-iroha1.src.yml with make-workflows.sh name: Iroha1 +## TODO IMPORTANT DISALLOW deploying tags and main and develop builds where skip_testing was set. ## TODO 1. [vcpkg,optimization-space,optimization-speed] ## Build only Debug or only Release - reduce vcpkg build duration and output size 2times ## see directory triplets/, `vcpkg help triplets` and link: https://stackoverflow.com/a/52781832/3743145 @@ -522,10 +523,6 @@ jobs: esac echo >>$GITHUB_ENV features="$features" echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fo skip_testing) - # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing - # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF - # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON - # fi - name: REF and SHA of commented PR to ENV if: github.event.comment run: > @@ -808,10 +805,6 @@ jobs: esac echo >>$GITHUB_ENV features="$features" echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fo skip_testing) - # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing - # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF - # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON - # fi - name: REF and SHA of commented PR to ENV if: github.event.comment run: > @@ -1107,10 +1100,6 @@ jobs: esac echo >>$GITHUB_ENV features="$features" echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fo skip_testing) - # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing - # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF - # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON - # fi - name: Homebrew run: brew install cmake ninja coreutils ccache bash ## Takes 22 seconds with default github runner @@ -1503,10 +1492,6 @@ jobs: esac echo >>$GITHUB_ENV features="$features" echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fo skip_testing) - # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing - # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF - # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON - # fi - name: REF and SHA of commented PR to ENV if: github.event.comment run: > @@ -1683,10 +1668,6 @@ jobs: esac echo >>$GITHUB_ENV features="$features" echo >>$GITHUB_ENV skip_testing=$(echo ${{matrix.buildspec}} | grep -Fo skip_testing) - # if [[ " ${{matrix.buildspec}} " = *" skip_testing "* ]] #echo ${{matrix.buildspec}} | grep -Fq skip_testing - # then echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=OFF CMAKE_BENCHMARKING=OFF - # else echo >>$GITHUB_ENV CMAKE_TESTING=-DTESTING=ON CMAKE_BENCHMARKING=ON - # fi - name: REF and SHA of commented PR to ENV if: github.event.comment run: >