diff --git a/.github/build-iroha1.src.yml b/.github/build-iroha1.src.yml index 61d7aa7309c..8d824764f77 100644 --- a/.github/build-iroha1.src.yml +++ b/.github/build-iroha1.src.yml @@ -70,7 +70,6 @@ jobs: ## anyway please read .github/README.md check_workflow_yaml_coressponds_to_src_yaml: runs-on: ubuntu-20.04 #ubuntu-latest - #container: ubuntu:latest ## This is required as barrier between AWS-hosted runners and GitHub-hosted runners - they have different set of software, so run in container name: Check if github workflows were properly made from sources steps: - &step_detect_commented_pr @@ -232,14 +231,6 @@ jobs: matrix_dockerimage_release: ${{steps.matrixes.outputs.matrix_dockerimage_release}} matrix_dockerimage_debug: ${{steps.matrixes.outputs.matrix_dockerimage_debug}} - buildspec_ubuntu: ${{steps.matrixes.outputs.buildspec_ubuntu}} - buildspec_ubuntu_debug: ${{steps.matrixes.outputs.buildspec_ubuntu_release}} - buildspec_ubuntu_release: ${{steps.matrixes.outputs.buildspec_ubuntu_debug}} - buildspec_macos: ${{steps.matrixes.outputs.buildspec_macos}} - buildspec_windows: ${{steps.matrixes.outputs.buildspec_windows}} - buildspec_dockerimage_release: ${{steps.matrixes.outputs.buildspec_dockerimage_release}} - buildspec_dockerimage_debug: ${{steps.matrixes.outputs.buildspec_dockerimage_debug}} - ## Build docker image named 'hyperledger/iroha-builder' with all stuff to compile iroha and its dependancies ## The result docker image is pushed with tags :pr-NUMBER, :commit-HASH, :branch-name, :tag-name, ## and conditional tags :edge for development branch, and :latest for git-tags. @@ -318,18 +309,11 @@ jobs: type=ref,event=pr type=ref,event=tag type=schedule - type=edge,branch=develop - # type=semver,pattern={{version}} - # type=semver,pattern={{major}}.{{minor}} - # type=sha,prefix=commit-,format=short - # type=sha,prefix=commit-,format=long ## Docker image will be pushed with tags: ## - hash of file Dockerfile.builder ## - branchname, when branch is pushed ## - pr-NUMBER, when pushed to PR ## - git tag when tag is pushed - ## - semver like 1.2.3 and 1.2 when tag vX.X.X is pushed - ## - tag 'edge' when branch support/1.2.x is pushed ## - schedule, see the docs - &step_docker_meta_ghcr <<: *step_docker_meta @@ -380,29 +364,16 @@ jobs: name: Check if dockertaghash exists in remote registry id: dockertag_already run: | - ## FIXME page size is 100 and cannot be more, this needs to be extended in loop asking tags till the end - exists=$( curl -fL https://hub.docker.com/v2/repositories/$DOCKERHUB_ORG/iroha-builder/tags | - jq 'any( .results[]|.name == "${{env.dockertag}}" ; .)' ) - echo "::set-output name=exists::$exists" - - if test $exists = true ;then - tag=$dockertag - else - tag=edge - fi - container="$DOCKERHUB_ORG/iroha-builder:$tag" - echo "::set-output name=container::$container" - echo "::set-output name=container_tag::$tag" + echo "::set-output name=container::$DOCKERHUB_ORG/iroha-builder:$dockertag" - - if: ${{ steps.dockertag_already.outputs.container_tag != env.dockertag || - steps.dockertag_already.outputs.container == '' }} - name: Possible ERROR, edited but not pushed + name: Possible ERROR, Dockerfile edited but image cannot be pushed + if: ${{ steps.docker_login.outcome != 'success' || steps.build_and_push.outcome != 'success' }} env: container: ${{steps.dockertag_already.outputs.container}} dockertag: ${{env.dockertag}} run: | cat <buildspec -echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/' >buildspec_ubuntu -echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/ && /release/' >buildspec_ubuntu_release -echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/ && /debug/' >buildspec_ubuntu_debug -echo "$MATRIX" | awk -v IGNORECASE=1 '/macos/' >buildspec_macos -echo "$MATRIX" | awk -v IGNORECASE=1 '/windows/' >buildspec_windows -## Build Docker images only with GCC-9 (mainstream compiler) -echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/ && /release/ && /gcc-9/' >buildspec_dockerimage_release -echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/ && /debug/ && /gcc-9/' >buildspec_dockerimage_debug - echo "$MATRIX" | json_include >matrix echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/' | json_include >matrix_ubuntu echo "$MATRIX" | awk -v IGNORECASE=1 '/ubuntu/ && /release/' | json_include >matrix_ubuntu_release diff --git a/.github/workflows/build-iroha1.yml b/.github/workflows/build-iroha1.yml index 79136ea1068..43b5eda8f73 100644 --- a/.github/workflows/build-iroha1.yml +++ b/.github/workflows/build-iroha1.yml @@ -71,7 +71,6 @@ jobs: ## anyway please read .github/README.md check_workflow_yaml_coressponds_to_src_yaml: runs-on: ubuntu-20.04 #ubuntu-latest - #container: ubuntu:latest ## This is required as barrier between AWS-hosted runners and GitHub-hosted runners - they have different set of software, so run in container name: Check if github workflows were properly made from sources steps: - name: REF and SHA of commented PR to ENV @@ -237,13 +236,6 @@ jobs: matrix_windows: ${{steps.matrixes.outputs.matrix_windows}} matrix_dockerimage_release: ${{steps.matrixes.outputs.matrix_dockerimage_release}} matrix_dockerimage_debug: ${{steps.matrixes.outputs.matrix_dockerimage_debug}} - buildspec_ubuntu: ${{steps.matrixes.outputs.buildspec_ubuntu}} - buildspec_ubuntu_debug: ${{steps.matrixes.outputs.buildspec_ubuntu_release}} - buildspec_ubuntu_release: ${{steps.matrixes.outputs.buildspec_ubuntu_debug}} - buildspec_macos: ${{steps.matrixes.outputs.buildspec_macos}} - buildspec_windows: ${{steps.matrixes.outputs.buildspec_windows}} - buildspec_dockerimage_release: ${{steps.matrixes.outputs.buildspec_dockerimage_release}} - buildspec_dockerimage_debug: ${{steps.matrixes.outputs.buildspec_dockerimage_debug}} ## Build docker image named 'hyperledger/iroha-builder' with all stuff to compile iroha and its dependancies ## The result docker image is pushed with tags :pr-NUMBER, :commit-HASH, :branch-name, :tag-name, ## and conditional tags :edge for development branch, and :latest for git-tags. @@ -338,19 +330,12 @@ jobs: type=ref,event=pr type=ref,event=tag type=schedule - type=edge,branch=develop - # type=semver,pattern={{version}} - # type=semver,pattern={{major}}.{{minor}} - # type=sha,prefix=commit-,format=short - # type=sha,prefix=commit-,format=long - ## Docker image will be pushed with tags: - ## - hash of file Dockerfile.builder - ## - branchname, when branch is pushed - ## - pr-NUMBER, when pushed to PR - ## - git tag when tag is pushed - ## - semver like 1.2.3 and 1.2 when tag vX.X.X is pushed - ## - tag 'edge' when branch support/1.2.x is pushed - ## - schedule, see the docs + ## Docker image will be pushed with tags: + ## - hash of file Dockerfile.builder + ## - branchname, when branch is pushed + ## - pr-NUMBER, when pushed to PR + ## - git tag when tag is pushed + ## - schedule, see the docs - uses: docker/metadata-action@v3 name: Docker meta GHCR id: meta_ghcr @@ -361,11 +346,12 @@ jobs: type=ref,event=pr type=ref,event=tag type=schedule - type=edge,branch=develop - # type=semver,pattern={{version}} - # type=semver,pattern={{major}}.{{minor}} - # type=sha,prefix=commit-,format=short - # type=sha,prefix=commit-,format=long + ## Docker image will be pushed with tags: + ## - hash of file Dockerfile.builder + ## - branchname, when branch is pushed + ## - pr-NUMBER, when pushed to PR + ## - git tag when tag is pushed + ## - schedule, see the docs images: ghcr.io/${{ github.repository }}-builder - name: Set up Docker Buildx @@ -406,27 +392,15 @@ jobs: - name: Check if dockertaghash exists in remote registry id: dockertag_already run: | - ## FIXME page size is 100 and cannot be more, this needs to be extended in loop asking tags till the end - exists=$( curl -fL https://hub.docker.com/v2/repositories/$DOCKERHUB_ORG/iroha-builder/tags | - jq 'any( .results[]|.name == "${{env.dockertag}}" ; .)' ) - echo "::set-output name=exists::$exists" - - if test $exists = true ;then - tag=$dockertag - else - tag=edge - fi - container="$DOCKERHUB_ORG/iroha-builder:$tag" - echo "::set-output name=container::$container" - echo "::set-output name=container_tag::$tag" - - if: ${{ steps.dockertag_already.outputs.container_tag != env.dockertag || steps.dockertag_already.outputs.container == '' }} - name: Possible ERROR, edited but not pushed + echo "::set-output name=container::$DOCKERHUB_ORG/iroha-builder:$dockertag" + - name: Possible ERROR, Dockerfile edited but image cannot be pushed + if: ${{ steps.docker_login.outcome != 'success' || steps.build_and_push.outcome != 'success' }} env: container: ${{steps.dockertag_already.outputs.container}} dockertag: ${{env.dockertag}} run: | cat </dev/null <<'END' ${{ toJson(needs) }} END - - run: test -n "$container" - env: - container: ${{needs.Docker-iroha-builder.outputs.container}} - - if: ${{ needs.Docker-iroha-builder.outputs.container_tag != needs.Docker-iroha-builder.outputs.dockertag || needs.Docker-iroha-builder.outputs.container == '' }} - name: Possible WARNING - env: + - env: container: ${{needs.Docker-iroha-builder.outputs.container}} - dockertag: ${{needs.Docker-iroha-builder.outputs.dockertag}} - run: | - cat </dev/null <<'END' ${{ toJson(needs) }} END - - run: test -n "$container" - env: - container: ${{needs.Docker-iroha-builder.outputs.container}} - - if: ${{ needs.Docker-iroha-builder.outputs.container_tag != needs.Docker-iroha-builder.outputs.dockertag || needs.Docker-iroha-builder.outputs.container == '' }} - name: Possible WARNING - env: + - env: container: ${{needs.Docker-iroha-builder.outputs.container}} - dockertag: ${{needs.Docker-iroha-builder.outputs.dockertag}} - run: | - cat <`_ you will also need GO. Install it following the instructions on `the official website `_ and then use the following command: diff --git a/example/config.docker b/example/config.docker index b17cb2d7975..8be860ddc7e 100644 --- a/example/config.docker +++ b/example/config.docker @@ -2,12 +2,21 @@ "block_store_path" : "/tmp/block_store/", "torii_port" : 50051, "internal_port" : 10001, - "pg_opt" : "host=some-postgres port=5432 user=postgres password=mysecretpassword", + "database": { + "type": "postgres", + "host": "some-postgres", + "port": 5432, + "user": "postgres", + "password": "mysecretpassword", + "working database": "iroha_default", + "maintenance database": "postgres" + }, "max_proposal_size" : 10, "proposal_delay" : 5000, "vote_delay" : 5000, "mst_enable" : false, "mst_expiration_time" : 1440, "max_rounds_delay": 3000, + "proposal_creation_timeout": 3000, "stale_stream_max_rounds": 2 } diff --git a/example/config.postgres.sample b/example/config.postgres.sample index d356b6f50ee..0ffd91cdbb8 100644 --- a/example/config.postgres.sample +++ b/example/config.postgres.sample @@ -16,7 +16,7 @@ "vote_delay" : 5000, "mst_enable" : false, "mst_expiration_time" : 1440, - "max_rounds_delay": 3000, + "proposal_creation_timeout": 3000, "stale_stream_max_rounds": 2, "metrics": "127.0.0.1:8080", "healthcheck_port": 50508 diff --git a/example/config.sample b/example/config.sample index 9289a65e8fe..e9e4f50d327 100644 --- a/example/config.sample +++ b/example/config.sample @@ -10,7 +10,7 @@ "vote_delay": 5000, "mst_enable": false, "mst_expiration_time": 1440, - "max_rounds_delay": 3000, + "proposal_creation_timeout": 3000, "stale_stream_max_rounds": 2, "metrics": "0.0.0.0:7001", "healthcheck_port": 50508 diff --git a/irohad/ametsuchi/impl/database_cache/cache.hpp b/irohad/ametsuchi/impl/database_cache/cache.hpp index afd8ebf22a0..d04282ca9ed 100644 --- a/irohad/ametsuchi/impl/database_cache/cache.hpp +++ b/irohad/ametsuchi/impl/database_cache/cache.hpp @@ -8,6 +8,7 @@ #include #include +#include #include #include "common/common.hpp" @@ -21,6 +22,7 @@ namespace iroha::ametsuchi { CachebleSetType cacheable_paths_; std::unique_ptr> cache_; + std::unique_ptr>> tmp_cache_; auto cachebleSearch(std::string_view key) const { auto it = std::lower_bound( @@ -59,25 +61,54 @@ namespace iroha::ametsuchi { template bool get(std::string_view key, Func &&func) { + if (auto *ptr = tmp_cache_->find(key.data(), key.size())) + return *ptr ? std::forward(func)(**ptr) : false; if (auto *ptr = cache_->find(key.data(), key.size())) return std::forward(func)(*ptr); return false; } void set(std::string_view key, std::string_view const &value) { + assert(isCacheable(key)); + tmp_cache_->template insert(key.data(), key.size(), value); + } + + void setCommit(std::string_view key, std::string_view const &value) { + assert(isCacheable(key)); + assert(tmp_cache_->find(key.data(), key.size()) == nullptr); cache_->template insert(key.data(), key.size(), value); } auto erase(std::string_view key) { - return cache_->erase(key.data(), key.size()); + return tmp_cache_->template insert(key.data(), key.size(), std::nullopt); + } + + void filterDelete(std::string_view filter) { + cache_->filterEnumerate( + filter.data(), filter.size(), [&](std::string_view key, Type *) { + tmp_cache_->template insert(key.data(), key.size(), std::nullopt); + }); + } + + void rollback() { + tmp_cache_ = std::make_unique>>(); } - auto filterDelete(std::string_view key) { - return cache_->filterDelete(key.data(), key.size()); + void commit() { + tmp_cache_->filterEnumerate( + nullptr, 0ul, [&](std::string_view key, std::optional *value) { + if (*value) + cache_->template insert( + key.data(), key.size(), std::move(**value)); + else + cache_->erase(key.data(), key.size()); + }); + tmp_cache_ = std::make_unique>>(); } void drop() { cache_ = std::make_unique>(); + tmp_cache_ = std::make_unique>>(); } }; diff --git a/irohad/ametsuchi/impl/rocksdb_block_query.cpp b/irohad/ametsuchi/impl/rocksdb_block_query.cpp index 64f29c95249..e83ce7887cd 100644 --- a/irohad/ametsuchi/impl/rocksdb_block_query.cpp +++ b/irohad/ametsuchi/impl/rocksdb_block_query.cpp @@ -33,7 +33,7 @@ namespace iroha::ametsuchi { return std::nullopt; } else if (status.assumeValue()) { auto const &[tx_status] = staticSplitId<1ull>(*status.assumeValue(), "#"); - res = tx_status == "TRUE" ? 1 : 0; + res = (!tx_status.empty() && tx_status[0] == 'T') ? 1 : 0; } return res; diff --git a/irohad/ametsuchi/impl/rocksdb_block_storage.cpp b/irohad/ametsuchi/impl/rocksdb_block_storage.cpp index 0e4f6e9ce0d..9a1023ca114 100644 --- a/irohad/ametsuchi/impl/rocksdb_block_storage.cpp +++ b/irohad/ametsuchi/impl/rocksdb_block_storage.cpp @@ -110,11 +110,13 @@ void RocksDbBlockStorage::reload() {} void RocksDbBlockStorage::clear() { RocksDbCommon common(db_context_); - if (auto status = common.filterDelete(fmtstrings::kPathWsv); !status.ok()) - log_->error("Unable to delete WSV. Description: {}", status.ToString()); + if (auto res = dropStore(common); expected::hasError(res)) + log_->error("Unable to delete Store. Description: {}", + res.assumeError().description); - if (auto status = common.filterDelete(fmtstrings::kPathStore); !status.ok()) - log_->error("Unable to delete STORE. Description: {}", status.ToString()); + if (auto res = dropWSV(common); expected::hasError(res)) + log_->error("Unable to delete WSV. Description: {}", + res.assumeError().description); } iroha::expected::Result RocksDbBlockStorage::forEach( diff --git a/irohad/ametsuchi/impl/rocksdb_command_executor.cpp b/irohad/ametsuchi/impl/rocksdb_command_executor.cpp index 122df681754..5e943272490 100644 --- a/irohad/ametsuchi/impl/rocksdb_command_executor.cpp +++ b/irohad/ametsuchi/impl/rocksdb_command_executor.cpp @@ -760,6 +760,7 @@ RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( ++counter; return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathSignatories, domain_id, account_name); @@ -927,6 +928,7 @@ RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( ++counter; return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathSignatories, domain_id, account_name); @@ -1062,7 +1064,8 @@ RocksDbCommandExecutor::ExecutionResult RocksDbCommandExecutor::operator()( RDB_ERROR_CHECK(forAsset( common, asset_name, domain_id)); - auto status = common.get(fmtstrings::kSetting, + auto status = common.get(RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kSetting, iroha::ametsuchi::kMaxDescriptionSizeKey); RDB_ERROR_CHECK(canExist( status, [&] { return fmt::format("Max description size key"); })); diff --git a/irohad/ametsuchi/impl/rocksdb_common.hpp b/irohad/ametsuchi/impl/rocksdb_common.hpp index 6160887fed0..c7bfa050eb7 100644 --- a/irohad/ametsuchi/impl/rocksdb_common.hpp +++ b/irohad/ametsuchi/impl/rocksdb_common.hpp @@ -7,16 +7,19 @@ #define IROHA_ROCKSDB_COMMON_HPP #include +#include #include #include #include +#include #include #include #include +#include #include -#include #include +#include #include "ametsuchi/impl/database_cache/cache.hpp" #include "ametsuchi/impl/executor_common.hpp" #include "common/disable_warnings.h" @@ -432,6 +435,11 @@ namespace iroha::ametsuchi { assert(db_port); } + ~RocksDBContext() { + transaction.reset(); + db_port.reset(); + } + private: friend class RocksDbCommon; friend struct RocksDBPort; @@ -489,7 +497,7 @@ namespace iroha::ametsuchi { /** * Port to provide access to RocksDB instance. */ - struct RocksDBPort { + struct RocksDBPort final { RocksDBPort(RocksDBPort const &) = delete; RocksDBPort &operator=(RocksDBPort const &) = delete; RocksDBPort() = default; @@ -499,26 +507,88 @@ namespace iroha::ametsuchi { return reinitDB(); } + enum ColumnFamilyType { + kDefault, + kWsv, + kStore, + ////// + kTotal + }; + + ~RocksDBPort() { + closeDb(); + } + private: + struct { + std::string name; + rocksdb::ColumnFamilyHandle *handle; + } cf_handles[ColumnFamilyType::kTotal] = { + {rocksdb::kDefaultColumnFamilyName, nullptr}, + {"wsv", nullptr}, + {"store", nullptr}}; + + void closeDb() { + for (auto &cf : cf_handles) + if (nullptr != cf.handle) { + transaction_db_->DestroyColumnFamilyHandle(cf.handle); + cf.handle = nullptr; + } + transaction_db_.reset(); + } + + void dropColumnFamily(ColumnFamilyType type) { + assert(type < ColumnFamilyType::kTotal); + auto &cf = cf_handles[type]; + + if (cf.handle) { + assert(transaction_db_); + transaction_db_->DropColumnFamily(cf.handle); + transaction_db_->DestroyColumnFamilyHandle(cf.handle); + transaction_db_->CreateColumnFamily({}, cf.name, &cf.handle); + } + } + expected::Result reinitDB() { assert(db_name_); + closeDb(); rocksdb::BlockBasedTableOptions table_options; - table_options.block_cache = - rocksdb::NewLRUCache(1 * 1024 * 1024 * 1024LL); + table_options.block_cache = rocksdb::NewLRUCache(512 * 1024 * 1024LL); table_options.block_size = 32 * 1024; // table_options.pin_l0_filter_and_index_blocks_in_cache = true; table_options.cache_index_and_filter_blocks = true; + table_options.filter_policy.reset( + rocksdb::NewBloomFilterPolicy(10, false)); rocksdb::Options options; options.create_if_missing = true; + options.create_missing_column_families = true; + options.max_open_files = 100; options.optimize_filters_for_hits = true; options.table_factory.reset( rocksdb::NewBlockBasedTableFactory(table_options)); - rocksdb::OptimisticTransactionDB *transaction_db; - auto status = rocksdb::OptimisticTransactionDB::Open( - options, *db_name_, &transaction_db); + /// print all column families + std::vector colfam; + rocksdb::DB::ListColumnFamilies(options, *db_name_, &colfam); + std::cout << "RocksDB detected column families:" << std::endl; + for (auto const &cf : colfam) std::cout << cf << std::endl; + + std::vector column_families; + for (auto &cf : cf_handles) + column_families.emplace_back( + rocksdb::ColumnFamilyDescriptor{cf.name, {}}); + + std::vector handles; + rocksdb::TransactionDB *transaction_db; + auto status = + rocksdb::TransactionDB::Open(options, + rocksdb::TransactionDBOptions(), + *db_name_, + column_families, + &handles, + &transaction_db); if (!status.ok()) return makeError(DbErrorCode::kInitializeFailed, @@ -526,6 +596,11 @@ namespace iroha::ametsuchi { *db_name_, status.ToString()); + assert(ColumnFamilyType::kTotal == handles.size()); + for (uint32_t ix = 0; ix < handles.size(); ++ix) { + assert(handles[ix]->GetName() == cf_handles[ix].name); + cf_handles[ix].handle = handles[ix]; + } transaction_db_.reset(transaction_db); return {}; } @@ -572,24 +647,23 @@ namespace iroha::ametsuchi { return std::nullopt; } - std::optional getPropStr(const rocksdb::Slice &property) { - if (transaction_db_) { - std::string value; - transaction_db_->GetProperty(property, &value); - return value; - } - return std::nullopt; - } - private: - std::unique_ptr transaction_db_; + std::unique_ptr transaction_db_; std::optional db_name_; friend class RocksDbCommon; void prepareTransaction(RocksDBContext &tx_context) { assert(transaction_db_); - tx_context.transaction.reset( - transaction_db_->BeginTransaction(rocksdb::WriteOptions())); + if (tx_context.transaction) { + [[maybe_unused]] auto result = + transaction_db_->BeginTransaction(rocksdb::WriteOptions(), + rocksdb::TransactionOptions(), + tx_context.transaction.get()); + assert(result == tx_context.transaction.get()); + } else { + tx_context.transaction.reset( + transaction_db_->BeginTransaction(rocksdb::WriteOptions())); + } } }; @@ -641,6 +715,15 @@ namespace iroha::ametsuchi { return tx_context_->key_buffer; } + auto &context() { + return tx_context_; + } + + auto &port() { + assert(tx_context_); + return tx_context_->db_port; + } + private: auto &transaction() { if (!tx_context_->transaction) @@ -691,46 +774,67 @@ namespace iroha::ametsuchi { c->set(key, valueBuffer()); } + void storeCommit(std::string_view key) { + if (auto c = cache(); c && c->isCacheable(key)) + c->setCommit(key, valueBuffer()); + } + void dropCache() { if (auto c = cache()) - c->drop(); + c->rollback(); + } + + void commitCache() { + if (auto c = cache()) + c->commit(); + } + + auto getHandle(RocksDBPort::ColumnFamilyType type) { + assert(type < RocksDBPort::ColumnFamilyType::kTotal); + assert(port()->cf_handles[type].handle != nullptr); + + return port()->cf_handles[type].handle; } public: template void printStatus(LoggerT &log) { - tx_context_->db_port->printStatus(log); + port()->printStatus(log); } auto propGetBlockCacheUsage() { - return tx_context_->db_port->getPropUInt64("rocksdb.block-cache-usage"); + return port()->getPropUInt64("rocksdb.block-cache-usage"); } auto propGetCurSzAllMemTables() { - return tx_context_->db_port->getPropUInt64( - "rocksdb.cur-size-all-mem-tables"); + return port()->getPropUInt64("rocksdb.cur-size-all-mem-tables"); } auto propGetNumSnapshots() { - return tx_context_->db_port->getPropUInt64("rocksdb.num-snapshots"); + return port()->getPropUInt64("rocksdb.num-snapshots"); } auto propGetTotalSSTFilesSize() { - return tx_context_->db_port->getPropUInt64( - "rocksdb.total-sst-files-size"); + return port()->getPropUInt64("rocksdb.total-sst-files-size"); } auto propGetBlockCacheCapacity() { - return tx_context_->db_port->getPropUInt64( - "rocksdb.block-cache-capacity"); + return port()->getPropUInt64("rocksdb.block-cache-capacity"); + } + + auto reinit() { + return port()->reinitDB(); } /// Makes commit to DB auto commit() { rocksdb::Status status; - if (isTransaction()) - status = transaction()->Commit(); - + if (isTransaction()) { + if ((status = transaction()->Commit()); !status.ok()) + dropCache(); + else + commitCache(); + } transaction().reset(); return status; } @@ -800,7 +904,9 @@ namespace iroha::ametsuchi { /// Read data from database to @see valueBuffer template - auto get(S const &fmtstring, Args &&... args) { + auto get(RocksDBPort::ColumnFamilyType cf_type, + S const &fmtstring, + Args &&... args) { keyBuffer().clear(); fmt::format_to(keyBuffer(), fmtstring, std::forward(args)...); @@ -818,21 +924,25 @@ namespace iroha::ametsuchi { rocksdb::ReadOptions ro; ro.fill_cache = false; - auto status = transaction()->Get(ro, slice, &valueBuffer()); + auto status = + transaction()->Get(ro, getHandle(cf_type), slice, &valueBuffer()); if (status.ok()) - storeInCache(slice.ToStringView()); + storeCommit(slice.ToStringView()); return status; } /// Put data from @see valueBuffer to database template - auto put(S const &fmtstring, Args &&... args) { + auto put(RocksDBPort::ColumnFamilyType cf_type, + S const &fmtstring, + Args &&... args) { keyBuffer().clear(); fmt::format_to(keyBuffer(), fmtstring, std::forward(args)...); rocksdb::Slice const slice(keyBuffer().data(), keyBuffer().size()); - auto status = transaction()->Put(slice, valueBuffer()); + auto status = + transaction()->Put(getHandle(cf_type), slice, valueBuffer()); if (status.ok()) storeInCache(slice.ToStringView()); @@ -842,7 +952,9 @@ namespace iroha::ametsuchi { /// Delete database entry by the key template - auto del(S const &fmtstring, Args &&... args) { + auto del(RocksDBPort::ColumnFamilyType cf_type, + S const &fmtstring, + Args &&... args) { keyBuffer().clear(); fmt::format_to(keyBuffer(), fmtstring, std::forward(args)...); @@ -850,19 +962,22 @@ namespace iroha::ametsuchi { if (auto c = cache(); c && c->isCacheable(slice.ToStringView())) c->erase(slice.ToStringView()); - return transaction()->Delete(slice); + return transaction()->Delete(getHandle(cf_type), slice); } /// Searches for the first key that matches a prefix template - auto seek(S const &fmtstring, Args &&... args) { + auto seek(RocksDBPort::ColumnFamilyType cf_type, + S const &fmtstring, + Args &&... args) { keyBuffer().clear(); fmt::format_to(keyBuffer(), fmtstring, std::forward(args)...); rocksdb::ReadOptions ro; ro.fill_cache = false; - std::unique_ptr it(transaction()->GetIterator(ro)); + std::unique_ptr it( + transaction()->GetIterator(ro, getHandle(cf_type))); it->Seek(rocksdb::Slice(keyBuffer().data(), keyBuffer().size())); return it; @@ -883,27 +998,43 @@ namespace iroha::ametsuchi { /// Iterate over all the keys that matches a prefix and call lambda /// with key-value. To stop enumeration callback F must return false. template - auto enumerate(F &&func, S const &fmtstring, Args &&... args) { - auto it = seek(fmtstring, std::forward(args)...); + auto enumerate(F &&func, + RocksDBPort::ColumnFamilyType cf_type, + S const &fmtstring, + Args &&... args) { + auto it = seek(cf_type, fmtstring, std::forward(args)...); return enumerate(it, std::forward(func)); } /// Removes range of items by key-filter template - auto filterDelete(S const &fmtstring, Args &&... args) { - auto it = seek(fmtstring, std::forward(args)...); + auto filterDelete(uint64_t delete_count, + RocksDBPort::ColumnFamilyType cf_type, + S const &fmtstring, + Args &&... args) -> std::pair { + auto it = seek(cf_type, fmtstring, std::forward(args)...); if (!it->status().ok()) - return it->status(); + return std::make_pair(false, it->status()); rocksdb::Slice const key(keyBuffer().data(), keyBuffer().size()); if (auto c = cache(); c && c->isCacheable(key.ToStringView())) c->filterDelete(key.ToStringView()); - for (; it->Valid() && it->key().starts_with(key); it->Next()) - if (auto status = transaction()->Delete(it->key()); !status.ok()) - return status; + bool was_deleted = false; + for (; delete_count-- && it->Valid() && it->key().starts_with(key); + it->Next()) { + if (auto status = transaction()->Delete(getHandle(cf_type), it->key()); + !status.ok()) + return std::pair(was_deleted, status); + else + was_deleted = true; + } - return it->status(); + return std::pair(was_deleted, it->status()); + } + + void dropTable(RocksDBPort::ColumnFamilyType cf_type) { + port()->dropColumnFamily(cf_type); } private: @@ -937,6 +1068,7 @@ namespace iroha::ametsuchi { template inline auto enumerateKeys(RocksDbCommon &rdb, F &&func, + RocksDBPort::ColumnFamilyType cf_type, S const &strformat, Args &&... args) { static_assert( @@ -953,6 +1085,7 @@ namespace iroha::ametsuchi { - fmtstrings::kDelimiterCountForAField * fmtstrings::kDelimiterSize)); }, + cf_type, strformat, std::forward(args)...); } @@ -976,9 +1109,11 @@ namespace iroha::ametsuchi { template inline auto enumerateKeysAndValues(RocksDbCommon &rdb, F &&func, + RocksDBPort::ColumnFamilyType cf_type, S const &strformat, Args &&... args) { return rdb.enumerate(makeKVLambda(std::forward(func)), + cf_type, strformat, std::forward(args)...); } @@ -1065,14 +1200,15 @@ namespace iroha::ametsuchi { inline expected::Result executeOperation( RocksDbCommon &common, OperationDescribtionF &&op_formatter, + RocksDBPort::ColumnFamilyType cf_type, Args &&... args) { rocksdb::Status status; if constexpr (kOp == kDbOperation::kGet || kOp == kDbOperation::kCheck) - status = common.get(std::forward(args)...); + status = common.get(cf_type, std::forward(args)...); else if constexpr (kOp == kDbOperation::kPut) - status = common.put(std::forward(args)...); + status = common.put(cf_type, std::forward(args)...); else if constexpr (kOp == kDbOperation::kDel) - status = common.del(std::forward(args)...); + status = common.del(cf_type, std::forward(args)...); static_assert(kOp == kDbOperation::kGet || kOp == kDbOperation::kCheck || kOp == kDbOperation::kPut || kOp == kDbOperation::kDel, @@ -1218,10 +1354,13 @@ namespace iroha::ametsuchi { template inline expected::Result, DbError> dbCall( - RocksDbCommon &common, Args &&... args) { + RocksDbCommon &common, + RocksDBPort::ColumnFamilyType cf_type, + Args &&... args) { auto status = executeOperation( common, [&] { return fmt::format(std::forward(args)...); }, + cf_type, std::forward(args)...); RDB_ERROR_CHECK(status); return loadValue(common, status); @@ -1242,8 +1381,11 @@ namespace iroha::ametsuchi { forAccountDetailsCount(RocksDbCommon &common, std::string_view account, std::string_view domain) { - return dbCall( - common, fmtstrings::kAccountDetailsCount, domain, account); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAccountDetailsCount, + domain, + account); } /** @@ -1257,7 +1399,10 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forStoreVersion(RocksDbCommon &common) { - return dbCall(common, fmtstrings::kStoreVersion); + return dbCall( + common, + RocksDBPort::ColumnFamilyType::kStore, + fmtstrings::kStoreVersion); } /** @@ -1271,7 +1416,8 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forWSVVersion( RocksDbCommon &common) { - return dbCall(common, fmtstrings::kWsvVersion); + return dbCall( + common, RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kWsvVersion); } /** @@ -1287,7 +1433,10 @@ namespace iroha::ametsuchi { inline expected::Result, DbError> forBlock( RocksDbCommon &common, uint64_t height) { return dbCall( - common, fmtstrings::kBlockDataInStore, height); + common, + RocksDBPort::ColumnFamilyType::kStore, + fmtstrings::kBlockDataInStore, + height); } /** @@ -1301,7 +1450,9 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forBlocksTotalCount( RocksDbCommon &common) { - return dbCall(common, fmtstrings::kBlocksTotalCount); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kStore, + fmtstrings::kBlocksTotalCount); } /** @@ -1319,8 +1470,11 @@ namespace iroha::ametsuchi { RocksDbCommon &common, std::string_view account, std::string_view domain) { - return dbCall( - common, fmtstrings::kQuorum, domain, account); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kQuorum, + domain, + account); } /** @@ -1335,8 +1489,10 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forTxsTotalCount( RocksDbCommon &common, std::string_view account_id) { - return dbCall( - common, fmtstrings::kTxsTotalCount, account_id); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTxsTotalCount, + account_id); } /** @@ -1350,7 +1506,9 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forTxsTotalCount( RocksDbCommon &common) { - return dbCall(common, fmtstrings::kAllTxsTotalCount); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAllTxsTotalCount); } /** @@ -1364,7 +1522,9 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forDomainsTotalCount(RocksDbCommon &common) { - return dbCall(common, fmtstrings::kDomainsTotalCount); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kDomainsTotalCount); } /** @@ -1398,7 +1558,7 @@ namespace iroha::ametsuchi { Result, DbError> forRole(RocksDbCommon &common, std::string_view role) { return dbCall( - common, fmtstrings::kRole, role); + common, RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kRole, role); } /** @@ -1413,9 +1573,12 @@ namespace iroha::ametsuchi { inline expected::Result, DbError> forPeersCount( RocksDbCommon &common, bool is_syncing_peer) { if (is_syncing_peer) - return dbCall(common, fmtstrings::kSPeersCount); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kSPeersCount); - return dbCall(common, fmtstrings::kPeersCount); + return dbCall( + common, RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPeersCount); } /** @@ -1433,6 +1596,7 @@ namespace iroha::ametsuchi { shared_model::crypto::Hash const &tx_hash) { return dbCall( common, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kTransactionStatus, std::string_view((char const *)tx_hash.blob().data(), tx_hash.blob().size())); @@ -1457,7 +1621,13 @@ namespace iroha::ametsuchi { uint64_t height, uint64_t index) { return dbCall( - common, fmtstrings::kTransactionByPosition, account, height, index, ts); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTransactionByPosition, + account, + height, + index, + ts); } /** @@ -1478,7 +1648,13 @@ namespace iroha::ametsuchi { uint64_t height, uint64_t index) { return dbCall( - common, fmtstrings::kTransactionByTs, account, ts, height, index); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTransactionByTs, + account, + ts, + height, + index); } /** @@ -1494,7 +1670,7 @@ namespace iroha::ametsuchi { inline expected::Result, DbError> forSettings( RocksDbCommon &common, std::string_view key) { return dbCall( - common, fmtstrings::kSetting, key); + common, RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kSetting, key); } /** @@ -1514,10 +1690,16 @@ namespace iroha::ametsuchi { bool is_sync_peer) { if (is_sync_peer) return dbCall( - common, fmtstrings::kSPeerAddress, pubkey); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kSPeerAddress, + pubkey); return dbCall( - common, fmtstrings::kPeerAddress, pubkey); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kPeerAddress, + pubkey); } /** @@ -1535,10 +1717,16 @@ namespace iroha::ametsuchi { RocksDbCommon &common, std::string_view pubkey, bool is_sync_peer) { if (is_sync_peer) return dbCall( - common, fmtstrings::kSPeerTLS, pubkey); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kSPeerTLS, + pubkey); return dbCall( - common, fmtstrings::kPeerTLS, pubkey); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kPeerTLS, + pubkey); } /** @@ -1554,8 +1742,11 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> inline expected::Result, DbError> forAsset( RocksDbCommon &common, std::string_view asset, std::string_view domain) { - return dbCall( - common, fmtstrings::kAsset, domain, asset); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAsset, + domain, + asset); } /** @@ -1571,7 +1762,8 @@ namespace iroha::ametsuchi { kDbEntry kSc = kDbEntry::kMustExist> expected::Result, DbError> forTopBlockInfo( RocksDbCommon &common) { - return dbCall(common, fmtstrings::kTopBlock); + return dbCall( + common, RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kTopBlock); } /** @@ -1591,8 +1783,12 @@ namespace iroha::ametsuchi { std::string_view account, std::string_view domain, std::string_view role) { - return dbCall( - common, fmtstrings::kAccountRole, domain, account, role); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAccountRole, + domain, + account, + role); } /** @@ -1616,7 +1812,13 @@ namespace iroha::ametsuchi { std::string_view creator_id, std::string_view key) { return dbCall( - common, fmtstrings::kAccountDetail, domain, account, creator_id, key); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAccountDetail, + domain, + account, + creator_id, + key); } /** @@ -1636,8 +1838,12 @@ namespace iroha::ametsuchi { std::string_view account, std::string_view domain, std::string_view pubkey) { - return dbCall( - common, fmtstrings::kSignatory, domain, account, pubkey); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kSignatory, + domain, + account, + pubkey); } /** @@ -1653,7 +1859,10 @@ namespace iroha::ametsuchi { inline expected::Result, DbError> forDomain( RocksDbCommon &common, std::string_view domain) { return dbCall( - common, fmtstrings::kDomain, domain); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kDomain, + domain); } /** @@ -1671,8 +1880,11 @@ namespace iroha::ametsuchi { RocksDbCommon &common, std::string_view account, std::string_view domain) { - return dbCall( - common, fmtstrings::kAccountAssetSize, domain, account); + return dbCall(common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAccountAssetSize, + domain, + account); } /** @@ -1694,7 +1906,12 @@ namespace iroha::ametsuchi { std::string_view domain, std::string_view asset) { return dbCall( - common, fmtstrings::kAccountAsset, domain, account, asset); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kAccountAsset, + domain, + account, + asset); } /** @@ -1718,7 +1935,12 @@ namespace iroha::ametsuchi { std::string_view domain, std::string_view grantee_account_id) { return dbCall( - common, fmtstrings::kGranted, domain, account, grantee_account_id); + common, + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kGranted, + domain, + account, + grantee_account_id); } /** @@ -1747,6 +1969,7 @@ namespace iroha::ametsuchi { } return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathAccountRoles, domain, account); @@ -1923,6 +2146,7 @@ namespace iroha::ametsuchi { return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathAccountDetail, domain, account); @@ -1938,10 +2162,13 @@ namespace iroha::ametsuchi { return result; } + inline expected::Result dropStore(RocksDbCommon &common) { + common.dropTable(RocksDBPort::ColumnFamilyType::kStore); + return {}; + } + inline expected::Result dropWSV(RocksDbCommon &common) { - if (auto status = common.filterDelete(fmtstrings::kPathWsv); !status.ok()) - return makeError(DbErrorCode::kOperationFailed, - "Clear WSV failed."); + common.dropTable(RocksDBPort::ColumnFamilyType::kWsv); return {}; } diff --git a/irohad/ametsuchi/impl/rocksdb_db_transaction.hpp b/irohad/ametsuchi/impl/rocksdb_db_transaction.hpp index 19c40ed230a..f158457f8a6 100644 --- a/irohad/ametsuchi/impl/rocksdb_db_transaction.hpp +++ b/irohad/ametsuchi/impl/rocksdb_db_transaction.hpp @@ -39,7 +39,8 @@ namespace iroha::ametsuchi { void commit() override { RocksDbCommon common(tx_context_); - common.commit(); + if (!common.commit().ok()) + throw std::runtime_error("RocksDb commit failed."); } void rollback() override { diff --git a/irohad/ametsuchi/impl/rocksdb_indexer.cpp b/irohad/ametsuchi/impl/rocksdb_indexer.cpp index 83d84b01367..5e0126269a5 100644 --- a/irohad/ametsuchi/impl/rocksdb_indexer.cpp +++ b/irohad/ametsuchi/impl/rocksdb_indexer.cpp @@ -22,8 +22,8 @@ void RocksDBIndexer::txHashStatus(const TxPosition &position, const HashType &tx_hash, bool is_committed) { RocksDbCommon common(db_context_); - common.valueBuffer() = - is_committed ? "TRUE" : "FALSE"; // TODO USE ONLY ONE BYTE T OR F + if (is_committed) + common.valueBuffer() = 'T'; common.valueBuffer() += '#'; common.valueBuffer() += std::to_string(position.height); common.valueBuffer() += '#'; diff --git a/irohad/ametsuchi/impl/rocksdb_query_executor.cpp b/irohad/ametsuchi/impl/rocksdb_query_executor.cpp index e7e0e502d38..47cbaa76b4b 100644 --- a/irohad/ametsuchi/impl/rocksdb_query_executor.cpp +++ b/irohad/ametsuchi/impl/rocksdb_query_executor.cpp @@ -5,9 +5,8 @@ #include "ametsuchi/impl/rocksdb_query_executor.hpp" -#include -#include #include "ametsuchi/impl/rocksdb_specific_query_executor.hpp" +#include "common/to_lower.hpp" #include "interfaces/iroha_internal/query_response_factory.hpp" #include "interfaces/queries/blocks_query.hpp" #include "interfaces/queries/query.hpp" @@ -42,16 +41,20 @@ namespace iroha::ametsuchi { auto const &[account, domain] = staticSplitId<2>(query.creatorAccountId()); RocksDbCommon common(tx_context_); - for (auto &signatory : query.signatures()) + std::string pk; + for (auto &signatory : query.signatures()) { + pk.clear(); + toLowerAppend(signatory.publicKey(), pk); if (auto result = forSignatory( - common, account, domain, signatory.publicKey()); + common, account, domain, pk); expected::hasError(result)) { log_->error("code:{}, description:{}", result.assumeError().code, result.assumeError().description); return false; } + } return true; } diff --git a/irohad/ametsuchi/impl/rocksdb_settings_query.cpp b/irohad/ametsuchi/impl/rocksdb_settings_query.cpp index 78480de2a95..5a77c57ca8d 100644 --- a/irohad/ametsuchi/impl/rocksdb_settings_query.cpp +++ b/irohad/ametsuchi/impl/rocksdb_settings_query.cpp @@ -20,7 +20,9 @@ namespace { const shared_model::interface::types::SettingKeyType &key, uint64_t &destination) { RocksDbCommon common(db_context); - auto status = common.get(fmtstrings::kSetting, kMaxDescriptionSizeKey); + auto status = common.get(RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kSetting, + kMaxDescriptionSizeKey); if (auto result = iroha::ametsuchi::canExist( status, [&] { return fmt::format("Max description size key"); }); diff --git a/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp b/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp index 5232eabf555..4943098d1a5 100644 --- a/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp +++ b/irohad/ametsuchi/impl/rocksdb_specific_query_executor.cpp @@ -187,6 +187,7 @@ operator()( roles.emplace_back(role.ToStringView()); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathAccountRoles, domain_id, account_name); @@ -262,6 +263,7 @@ operator()( signatories.emplace_back(signatory.ToStringView()); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathSignatories, domain_id, account_name); @@ -452,7 +454,8 @@ RocksDbSpecificQueryExecutor::readTxs( if (ordering_ptr->field == shared_model::interface::Ordering::Field::kCreatedTime) { - auto it = common.template seek(fmtstrings::kTransactionByTs, + auto it = common.template seek(RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTransactionByTs, query.accountId(), tx_ts, tx_height, @@ -463,7 +466,8 @@ RocksDbSpecificQueryExecutor::readTxs( fmtstrings::kPathTransactionByTs, query.accountId()); } else { - auto it = common.template seek(fmtstrings::kTransactionByPosition, + auto it = common.template seek(RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTransactionByPosition, query.accountId(), tx_height, tx_index, @@ -478,7 +482,8 @@ RocksDbSpecificQueryExecutor::readTxs( } else { if (ordering_ptr->field == shared_model::interface::Ordering::Field::kCreatedTime) { - auto it = common.template seek(fmtstrings::kTransactionByTsLowerBound, + auto it = common.template seek(RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTransactionByTsLowerBound, query.accountId(), bounds.tsFrom); status = enumerateKeysAndValues(common, @@ -487,7 +492,8 @@ RocksDbSpecificQueryExecutor::readTxs( fmtstrings::kPathTransactionByTs, query.accountId()); } else { - auto it = common.template seek(fmtstrings::kTransactionByHeight, + auto it = common.template seek(RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kTransactionByHeight, query.accountId(), bounds.heightFrom); status = enumerateKeysAndValues(common, @@ -696,6 +702,7 @@ operator()( return false; } }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathAccountAssets, domain_id, account_name); @@ -804,6 +811,7 @@ operator()( } return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathRoles); RDB_ERROR_CHECK(canExist(status, [&] { return "Enumerate roles"; })); @@ -950,6 +958,7 @@ operator()( syncing_peer)); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, path); }; diff --git a/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.cpp b/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.cpp index 26d21ff030e..e463e6b6d11 100644 --- a/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.cpp +++ b/irohad/ametsuchi/impl/rocksdb_temporary_wsv_impl.cpp @@ -7,8 +7,8 @@ #include "ametsuchi/impl/rocksdb_command_executor.hpp" #include "ametsuchi/impl/rocksdb_common.hpp" -#include "ametsuchi/impl/rocksdb_db_transaction.hpp" #include "ametsuchi/tx_executor.hpp" +#include "common/to_lower.hpp" #include "interfaces/commands/command.hpp" #include "interfaces/permission_to_string.hpp" #include "interfaces/transaction.hpp" @@ -42,16 +42,20 @@ namespace iroha::ametsuchi { else quorum = *result.assumeValue(); - for (auto &signatory : transaction.signatures()) + std::string pk; + for (auto &signatory : transaction.signatures()) { + pk.clear(); + toLowerAppend(signatory.publicKey(), pk); if (auto result = forSignatory( - common, account, domain, signatory.publicKey()); + common, account, domain, pk); expected::hasError(result)) return expected::makeError( validation::CommandError{"signatures validation", 1, result.assumeError().description, false}); + } if (boost::size(transaction.signatures()) < quorum) { auto error_str = "Transaction " + transaction.toString() diff --git a/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp b/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp index 66ebc44fe79..659d555fc6e 100644 --- a/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp +++ b/irohad/ametsuchi/impl/rocksdb_wsv_query.cpp @@ -61,6 +61,7 @@ namespace iroha::ametsuchi { signatories.emplace_back(signatory.ToStringView()); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathSignatories, domain_id, account_name); @@ -99,11 +100,15 @@ namespace iroha::ametsuchi { rocksdb::Status status; if (syncing_peers) - status = enumerateKeysAndValues( - common, std::move(callback), fmtstrings::kPathSPeers); + status = enumerateKeysAndValues(common, + std::move(callback), + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kPathSPeers); else - status = enumerateKeysAndValues( - common, std::move(callback), fmtstrings::kPathPeers); + status = enumerateKeysAndValues(common, + std::move(callback), + RocksDBPort::ColumnFamilyType::kWsv, + fmtstrings::kPathPeers); RDB_ERROR_CHECK(canExist( status, [&]() { return fmt::format("Enumerate peers"); })); @@ -194,12 +199,17 @@ namespace iroha::ametsuchi { assert(!hash_str.empty()); uint64_t number; - std::from_chars( - height_str.data(), height_str.data() + height_str.size(), number); - return iroha::TopBlockInfo( - number, - shared_model::crypto::Hash(shared_model::crypto::Blob::fromHexString( - std::string{hash_str}))); + auto [ptr, ec]{std::from_chars( + height_str.data(), height_str.data() + height_str.size(), number)}; + if (ec == std::errc()) + return iroha::TopBlockInfo( + number, + shared_model::crypto::Hash( + shared_model::crypto::Blob::fromHexString( + std::string{hash_str}))); + else + return expected::makeError( + "Height in top block info is not a valid number."); } } diff --git a/irohad/ametsuchi/impl/wsv_restorer_impl.cpp b/irohad/ametsuchi/impl/wsv_restorer_impl.cpp index 149ea223888..2246261277a 100644 --- a/irohad/ametsuchi/impl/wsv_restorer_impl.cpp +++ b/irohad/ametsuchi/impl/wsv_restorer_impl.cpp @@ -166,7 +166,8 @@ namespace iroha::ametsuchi { std::shared_ptr bsf) { IROHA_EXPECTED_TRY_GET_VALUE(command_executor_uniq, storage.createCommandExecutor()); - std::shared_ptr command_executor(std::move(command_executor_uniq)); + std::shared_ptr command_executor( + std::move(command_executor_uniq)); std::shared_ptr block_storage_factory{ bsf ? std::move(bsf) : std::make_shared()}; @@ -226,12 +227,12 @@ namespace iroha::ametsuchi { wsv_ledger_height = 0; } - /// Commit reindexed blocks every 1000 blocks. For reliability. + /// Commit reindexed blocks every 10 blocks. For reliability. /// When doing reindex of huge blockchain and the procedure is interrupted /// it is important to continue from last commit point to save time. do { auto commit_height = - std::min(wsv_ledger_height + 1000, last_block_in_storage); + std::min(wsv_ledger_height + 10, last_block_in_storage); IROHA_EXPECTED_TRY_GET_VALUE( mutable_storage, storage.createMutableStorage(command_executor, diff --git a/irohad/iroha_wsv_diff/iroha_wsv_diff.cpp b/irohad/iroha_wsv_diff/iroha_wsv_diff.cpp index c3651477181..fdfef728d10 100644 --- a/irohad/iroha_wsv_diff/iroha_wsv_diff.cpp +++ b/irohad/iroha_wsv_diff/iroha_wsv_diff.cpp @@ -130,7 +130,8 @@ expected::Result initialize() try { FLAGS_pg_opt, "iroha_default", log_manager->getChild("PostgresOptions")->getLogger()), - log_manager)); + log_manager, + true)); pg_pool_wrapper_ = std::move(pool_wrapper); IROHA_EXPECTED_TRY_GET_VALUE( @@ -650,7 +651,7 @@ bool Wsv::from_rocksdb(RocksDbCommon &rdbc) { if (key_starts_with_and_drop(RDB_F_VERSION)) { assert(key.empty()); schema_version = std::string{val}; - assert(schema_version == "1#2#0" && + assert(schema_version == "1#4#0" && "This version of iroha_wsv_diff can check WSV in RocksDB of version 1.2.0 only"); } else if (key_starts_with_and_drop(RDB_NETWORK)) { if (key_starts_with_and_drop(RDB_PEERS)) { @@ -838,6 +839,7 @@ bool Wsv::from_rocksdb(RocksDbCommon &rdbc) { assert(key.empty()); return true; }, + iroha::ametsuchi::RocksDBPort::ColumnFamilyType::kWsv, RDB_ROOT RDB_WSV); for (auto &[permaccid, gp_set] : grant_perms_map) { auto &acc = find_account_by_id(permaccid); @@ -1064,7 +1066,9 @@ int wsv_check() try { return 0; } else { cout << "~~~ WSV-s DIFFER!!! ~~~" << endl; - cout << "For future investigation use difftool on files rocksdb.wsv and postgres.wsv. Just like:" << endl; + cout << "For future investigation use difftool on files rocksdb.wsv and " + "postgres.wsv. Just like:" + << endl; cout << " diff <(tail -n+2 postgres.wsv) <(tail -n+2 rockdb.wsv)" << endl; cout << "(Here command tail is to drop first line.)" << endl; return 1; diff --git a/irohad/main/application.cpp b/irohad/main/application.cpp index 849ed8df61b..a130e8cbe03 100644 --- a/irohad/main/application.cpp +++ b/irohad/main/application.cpp @@ -459,8 +459,8 @@ Irohad::RunResult Irohad::initStorage( cache->addCacheblePath(RDB_ROOT /**/ RDB_WSV /**/ RDB_ROLES); cache->addCacheblePath(RDB_ROOT /**/ RDB_WSV /**/ RDB_DOMAIN); - db_context_ = - std::make_shared(std::move(rdb_port)); + db_context_ = std::make_shared( + std::move(rdb_port), std::move(cache)); } break; default: diff --git a/irohad/main/impl/pg_connection_init.cpp b/irohad/main/impl/pg_connection_init.cpp index 0c8f24ff44d..47c5be4a2ed 100644 --- a/irohad/main/impl/pg_connection_init.cpp +++ b/irohad/main/impl/pg_connection_init.cpp @@ -399,19 +399,23 @@ iroha::expected::Result, std::string> PgConnectionInit::init(StartupWsvDataPolicy startup_wsv_data_policy, iroha::ametsuchi::PostgresOptions const &pg_opt, - logger::LoggerManagerTreePtr log_manager) { - return prepareWorkingDatabase(startup_wsv_data_policy, pg_opt) | [&] { - return prepareConnectionPool(KTimesReconnectionStrategyFactory{10}, - pg_opt, - kDbPoolSize, - log_manager); - }; + logger::LoggerManagerTreePtr log_manager, + bool skip_schema_check) { + return prepareWorkingDatabase( + startup_wsv_data_policy, pg_opt, skip_schema_check) + | [&] { + return prepareConnectionPool(KTimesReconnectionStrategyFactory{10}, + pg_opt, + kDbPoolSize, + log_manager); + }; } iroha::expected::Result PgConnectionInit::prepareWorkingDatabase( StartupWsvDataPolicy startup_wsv_data_policy, - const PostgresOptions &options) { + const PostgresOptions &options, + bool skip_schema_check) { return getMaintenanceSession(options) | [&](auto maintenance_sql) { int work_db_exists; *maintenance_sql << "select exists(" @@ -428,7 +432,7 @@ PgConnectionInit::prepareWorkingDatabase( } else { // StartupWsvDataPolicy::kReuse return isSchemaCompatible(options) | [&](bool is_compatible) -> iroha::expected::Result { - if (not is_compatible) { + if (not is_compatible && !skip_schema_check) { return "The schema is not compatible. " "Either overwrite the ledger or use a compatible binary " "version."; diff --git a/irohad/main/impl/pg_connection_init.hpp b/irohad/main/impl/pg_connection_init.hpp index 47254f3df4f..53e3c417c62 100644 --- a/irohad/main/impl/pg_connection_init.hpp +++ b/irohad/main/impl/pg_connection_init.hpp @@ -34,11 +34,13 @@ namespace iroha { std::string> init(StartupWsvDataPolicy startup_wsv_data_policy, iroha::ametsuchi::PostgresOptions const &pg_opt, - logger::LoggerManagerTreePtr log_manager); + logger::LoggerManagerTreePtr log_manager, + bool skip_schema_check = false); static expected::Result prepareWorkingDatabase( StartupWsvDataPolicy startup_wsv_data_policy, - const PostgresOptions &options); + const PostgresOptions &options, + bool skip_schema_check = false); static expected::Result, std::string> prepareConnectionPool( diff --git a/irohad/main/impl/rocksdb_connection_init.cpp b/irohad/main/impl/rocksdb_connection_init.cpp index 6da2dec04a7..d5854cc8eb6 100644 --- a/irohad/main/impl/rocksdb_connection_init.cpp +++ b/irohad/main/impl/rocksdb_connection_init.cpp @@ -53,7 +53,10 @@ namespace { iroha::expected::Result, std::string> RdbConnectionInit::init(StartupWsvDataPolicy startup_wsv_data_policy, iroha::ametsuchi::RocksDbOptions const &opt, - logger::LoggerManagerTreePtr) { + logger::LoggerManagerTreePtr log_manager) { + log_manager->getLogger()->info( + "Working database prepare started(with 'drop_state' flag it can take a " + "long time)..."); return prepareWorkingDatabase(startup_wsv_data_policy, opt); } diff --git a/irohad/ordering/impl/batches_cache.cpp b/irohad/ordering/impl/batches_cache.cpp index d66f3924c8e..df51261f511 100644 --- a/irohad/ordering/impl/batches_cache.cpp +++ b/irohad/ordering/impl/batches_cache.cpp @@ -7,6 +7,7 @@ #include #include +#include #include "interfaces/iroha_internal/transaction_batch.hpp" #include "interfaces/transaction.hpp" @@ -28,7 +29,7 @@ namespace iroha::ordering { return tx_count_; } - BatchesContext::BatchesSetType const &BatchesContext::getBatchesSet() const { + BatchesContext::BatchesSetType &BatchesContext::getBatchesSet() { return batches_; } @@ -96,7 +97,7 @@ namespace iroha::ordering { }); } - bool BatchesCache::isEmpty() const { + bool BatchesCache::isEmpty() { std::shared_lock lock(batches_cache_cs_); return batches_cache_.getBatchesSet().empty(); } @@ -112,35 +113,9 @@ namespace iroha::ordering { } void BatchesCache::forCachedBatches( - std::function const &f) const { - std::shared_lock lock(batches_cache_cs_); - f(batches_cache_.getBatchesSet()); - } - - void BatchesCache::getTransactions( - size_t requested_tx_amount, - std::vector> - &collection) { - collection.clear(); - collection.reserve(requested_tx_amount); - + std::function const &f) { std::unique_lock lock(batches_cache_cs_); - uint32_t depth_counter = 0ul; - batches_cache_.remove([&](auto &batch, bool &process_iteration) { - auto const txs_count = batch->transactions().size(); - if (collection.size() + txs_count > requested_tx_amount) { - ++depth_counter; - process_iteration = (depth_counter < 8ull); - return false; - } - - collection.insert(std::end(collection), - std::begin(batch->transactions()), - std::end(batch->transactions())); - - used_batches_cache_.insert(batch); - return true; - }); + f(batches_cache_.getBatchesSet()); } void BatchesCache::processReceivedProposal( diff --git a/irohad/ordering/impl/batches_cache.hpp b/irohad/ordering/impl/batches_cache.hpp index 5ffcc20c2d0..e72218d3d22 100644 --- a/irohad/ordering/impl/batches_cache.hpp +++ b/irohad/ordering/impl/batches_cache.hpp @@ -46,7 +46,7 @@ namespace iroha::ordering { public: uint64_t getTxsCount() const; - BatchesSetType const &getBatchesSet() const; + BatchesSetType &getBatchesSet(); bool insert(std::shared_ptr const &batch); @@ -97,18 +97,43 @@ namespace iroha::ordering { void remove(const OnDemandOrderingService::HashesSetType &hashes); - bool isEmpty() const; + bool isEmpty(); uint64_t txsCount() const; uint64_t availableTxsCount() const; - void forCachedBatches( - std::function const &f) const; + void forCachedBatches(std::function const &f); + template void getTransactions( size_t requested_tx_amount, std::vector> - &txs); + &collection, + IsProcessedFunc &&is_processed) { + collection.clear(); + collection.reserve(requested_tx_amount); + + std::unique_lock lock(batches_cache_cs_); + uint32_t depth_counter = 0ul; + batches_cache_.remove([&](auto &batch, bool &process_iteration) { + if (std::forward(is_processed)(batch)) + return true; + + auto const txs_count = batch->transactions().size(); + if (collection.size() + txs_count > requested_tx_amount) { + ++depth_counter; + process_iteration = (depth_counter < 8ull); + return false; + } + + collection.insert(std::end(collection), + std::begin(batch->transactions()), + std::end(batch->transactions())); + + used_batches_cache_.insert(batch); + return true; + }); + } void processReceivedProposal( OnDemandOrderingService::CollectionType batches); diff --git a/irohad/ordering/impl/on_demand_ordering_gate.cpp b/irohad/ordering/impl/on_demand_ordering_gate.cpp index bb393470190..e2b03244d1d 100644 --- a/irohad/ordering/impl/on_demand_ordering_gate.cpp +++ b/irohad/ordering/impl/on_demand_ordering_gate.cpp @@ -11,14 +11,17 @@ #include #include #include + #include "ametsuchi/tx_presence_cache.hpp" #include "ametsuchi/tx_presence_cache_utils.hpp" #include "common/visitor.hpp" +#include "datetime/time.hpp" #include "interfaces/iroha_internal/transaction_batch.hpp" #include "interfaces/iroha_internal/transaction_batch_impl.hpp" #include "interfaces/iroha_internal/transaction_batch_parser_impl.hpp" #include "logger/logger.hpp" #include "ordering/impl/on_demand_common.hpp" +#include "validators/field_validator.hpp" using iroha::ordering::OnDemandOrderingGate; @@ -125,26 +128,39 @@ OnDemandOrderingGate::processProposalRequest(ProposalEvent const &event) const { void OnDemandOrderingGate::sendCachedTransactions() { assert(not stop_mutex_.try_lock()); // lock must be taken before // TODO iceseer 14.01.21 IR-958 Check that OS is remote - forLocalOS(&OnDemandOrderingService::forCachedBatches, - [this](auto const &batches) { - auto end_iterator = batches.begin(); - auto current_number_of_transactions = 0u; - for (; end_iterator != batches.end(); ++end_iterator) { - auto batch_size = (*end_iterator)->transactions().size(); - if (current_number_of_transactions + batch_size - <= transaction_limit_) { - current_number_of_transactions += batch_size; - } else { - break; - } - } - - if (not batches.empty()) { - network_client_->onBatches( - transport::OdOsNotification::CollectionType{ - batches.begin(), end_iterator}); - } - }); + forLocalOS(&OnDemandOrderingService::forCachedBatches, [this](auto &batches) { + auto end_iterator = batches.begin(); + auto current_number_of_transactions = 0u; + auto const now = iroha::time::now(); + + for (; end_iterator != batches.end();) { + if (std::any_of( + end_iterator->get()->transactions().begin(), + end_iterator->get()->transactions().end(), + [&](const auto &tx) { + return (uint64_t)now + > shared_model::validation::FieldValidator::kMaxDelay + + tx->createdTime(); + })) { + end_iterator = batches.erase(end_iterator); + continue; + } + + auto batch_size = (*end_iterator)->transactions().size(); + if (current_number_of_transactions + batch_size <= transaction_limit_) { + current_number_of_transactions += batch_size; + } else { + break; + } + + ++end_iterator; + } + + if (not batches.empty()) { + network_client_->onBatches(transport::OdOsNotification::CollectionType{ + batches.begin(), end_iterator}); + } + }); } std::shared_ptr diff --git a/irohad/ordering/impl/on_demand_ordering_service_impl.cpp b/irohad/ordering/impl/on_demand_ordering_service_impl.cpp index 5a59bde3f03..235867f1832 100644 --- a/irohad/ordering/impl/on_demand_ordering_service_impl.cpp +++ b/irohad/ordering/impl/on_demand_ordering_service_impl.cpp @@ -71,7 +71,7 @@ void OnDemandOrderingServiceImpl::removeFromBatchesCache( batches_cache_.remove(hashes); } -bool OnDemandOrderingServiceImpl::isEmptyBatchesCache() const { +bool OnDemandOrderingServiceImpl::isEmptyBatchesCache() { return batches_cache_.isEmpty(); } @@ -80,7 +80,7 @@ bool OnDemandOrderingServiceImpl::hasEnoughBatchesInCache() const { } void OnDemandOrderingServiceImpl::forCachedBatches( - std::function const &f) const { + std::function const &f) { batches_cache_.forCachedBatches(f); } @@ -144,7 +144,11 @@ OnDemandOrderingServiceImpl::packNextProposals(const consensus::Round &round) { auto now = iroha::time::now(); std::vector> txs; if (!isEmptyBatchesCache()) - batches_cache_.getTransactions(transaction_limit_, txs); + batches_cache_.getTransactions( + transaction_limit_, txs, [&](auto const &batch) { + assert(batch); + return batchAlreadyProcessed(*batch); + }); log_->debug("Packed proposal contains: {} transactions.", txs.size()); return tryCreateProposal(round, txs, now); diff --git a/irohad/ordering/impl/on_demand_ordering_service_impl.hpp b/irohad/ordering/impl/on_demand_ordering_service_impl.hpp index 983d8080108..a16bd72106e 100644 --- a/irohad/ordering/impl/on_demand_ordering_service_impl.hpp +++ b/irohad/ordering/impl/on_demand_ordering_service_impl.hpp @@ -108,12 +108,12 @@ namespace iroha { void removeFromBatchesCache( const OnDemandOrderingService::HashesSetType &hashes); - bool isEmptyBatchesCache() const override; + bool isEmptyBatchesCache() override; bool hasEnoughBatchesInCache() const override; void forCachedBatches( - std::function const &f) const override; + std::function const &f) override; bool hasProposal(consensus::Round round) const override; diff --git a/irohad/ordering/on_demand_ordering_service.hpp b/irohad/ordering/on_demand_ordering_service.hpp index 33727ab90c9..5bb7875d948 100644 --- a/irohad/ordering/on_demand_ordering_service.hpp +++ b/irohad/ordering/on_demand_ordering_service.hpp @@ -95,9 +95,9 @@ namespace iroha { * @param f - callback function */ virtual void forCachedBatches( - std::function const &f) const = 0; + std::function const &f) = 0; - virtual bool isEmptyBatchesCache() const = 0; + virtual bool isEmptyBatchesCache() = 0; virtual bool hasEnoughBatchesInCache() const = 0; diff --git a/irohad/pending_txs_storage/impl/pending_txs_storage_impl.cpp b/irohad/pending_txs_storage/impl/pending_txs_storage_impl.cpp index baa619852ec..7f7f3936cdd 100644 --- a/irohad/pending_txs_storage/impl/pending_txs_storage_impl.cpp +++ b/irohad/pending_txs_storage/impl/pending_txs_storage_impl.cpp @@ -5,6 +5,8 @@ #include "pending_txs_storage/impl/pending_txs_storage_impl.hpp" +#include + #include "ametsuchi/tx_presence_cache_utils.hpp" #include "interfaces/transaction.hpp" #include "multi_sig_transactions/state/mst_state.hpp" diff --git a/libs/common/common.hpp b/libs/common/common.hpp index f29d5da4abe..de3a6cfe145 100644 --- a/libs/common/common.hpp +++ b/libs/common/common.hpp @@ -7,6 +7,7 @@ #define IROHA_LIBS_COMMON_HPP #include +#include #include #include diff --git a/libs/common/radix_tree.hpp b/libs/common/radix_tree.hpp index 6c5a4c23e48..ebb45226d7d 100644 --- a/libs/common/radix_tree.hpp +++ b/libs/common/radix_tree.hpp @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -74,8 +75,11 @@ namespace iroha { "Context must be with 0 offset."); using Alloc = typename std::allocator_traits::template rebind_alloc; + using AllocStr = + typename std::allocator_traits::template rebind_alloc; mutable NodeContext root_; + std::basic_string, AllocStr> key_name_; Alloc alloc_; template @@ -103,6 +107,16 @@ namespace iroha { return what; } + void createNodeKey(NodeContext const *const from) { + key_name_.clear(); + auto parent = from; + while (parent != &root_) { + key_name_.insert( + key_name_.begin(), parent->key, parent->key + parent->key_sz); + parent = parent->parent; + } + } + NodeContext *&getFromKey(NodeContext *const parent, CharT const *key) const { return parent->children[AlphabetT::position(key[0ul])]; @@ -205,6 +219,24 @@ namespace iroha { return nullptr; } + NodeContext *getChildAfter(NodeContext const *const node, + NodeContext const *const target = nullptr) { + if (!target) + return (node->children_count > 0) ? getFirstChild(node) : nullptr; + + assert(target->parent == node); + assert(target->key_sz > 0ul); + + for (auto pos = AlphabetT::position(target->key[0ul]) + 1; + pos < AlphabetT::size(); + ++pos) { + auto const child = node->children[pos]; + if (child != nullptr) + return child; + } + return nullptr; + } + bool compress(NodeContext *const parent, NodeContext *const target, NodeContext *const child) { @@ -314,7 +346,7 @@ namespace iroha { void eraseWithChildren(NodeContext *const from) { NodeContext *node = from; - NodeContext *parent = from->parent; + NodeContext *const parent = from->parent; while (node != parent) node = (node->children_count != 0ul) ? getFirstChild(node) @@ -334,7 +366,7 @@ namespace iroha { public: RadixTree() = default; - explicit RadixTree(AllocT &alloc) : alloc_(alloc) {} + explicit RadixTree(AllocT &alloc) : alloc_(alloc), key_name_(alloc_) {} ~RadixTree() { eraseWithChildren(&root_); @@ -409,6 +441,40 @@ namespace iroha { eraseWithChildren(getFromKey(context.node, context.target_begin)); } } + + template + void filterEnumerate(CharT const *key, uint32_t len, Func &&func) { + SearchContext context; + findNearest(&root_, key, len, context); + + if (context.prefix_remains_len == 0ul) { + auto const target_remains_len = context.target_end - context.target; + NodeContext *const from = (target_remains_len == 0ul) + ? context.node + : getFromKey(context.node, context.target_begin); + createNodeKey(from); + + NodeContext *child = nullptr; + NodeContext *node = from; + + do { + while ((child = getChildAfter(node, child))) { + node = child; + child = nullptr; + key_name_.append(node->key, node->key_sz); + } + if (node != &root_) { + if (Node *const n = nodeContextToNode(node); n->data) + std::forward(func)( + std::string_view(key_name_.data(), key_name_.size()), + &(*n->data)); + key_name_.resize(key_name_.size() - node->key_sz); + } + child = node; + node = node->parent; + } while (child != from); + } + } }; } // namespace iroha diff --git a/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.cpp b/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.cpp index 9547161dca6..b6ebeac983d 100644 --- a/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.cpp +++ b/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.cpp @@ -49,10 +49,11 @@ namespace integration_framework::fake_peer { void OnDemandOsNetworkNotifier::onDuplicates(const HashesSetType &hashes) {} void OnDemandOsNetworkNotifier::forCachedBatches( - std::function const &f) const {} + std::function const + &f) {} - bool OnDemandOsNetworkNotifier::isEmptyBatchesCache() const { + bool OnDemandOsNetworkNotifier::isEmptyBatchesCache() { return true; } diff --git a/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.hpp b/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.hpp index c3abf53bd24..b2d5234cea0 100644 --- a/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.hpp +++ b/test/framework/integration_framework/fake_peer/network/on_demand_os_network_notifier.hpp @@ -33,10 +33,11 @@ namespace integration_framework::fake_peer { void onDuplicates(const HashesSetType &hashes) override; void forCachedBatches( - std::function const &f) const override; + std::function const + &f) override; - bool isEmptyBatchesCache() const override; + bool isEmptyBatchesCache() override; bool hasEnoughBatchesInCache() const override; diff --git a/test/module/irohad/ametsuchi/rocksdb_common_test.cpp b/test/module/irohad/ametsuchi/rocksdb_common_test.cpp index 3a87ee3751c..afccdfb6ad5 100644 --- a/test/module/irohad/ametsuchi/rocksdb_common_test.cpp +++ b/test/module/irohad/ametsuchi/rocksdb_common_test.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include "ametsuchi/impl/database_cache/cache.hpp" #include "ametsuchi/impl/rocksdb_common.hpp" @@ -53,13 +54,13 @@ class RocksDBTest : public ::testing::Test { void insertDb(std::string_view key, std::string_view value) { RocksDbCommon common(tx_context_); common.valueBuffer() = value; - common.put(key); + common.put(RocksDBPort::ColumnFamilyType::kWsv, key); common.commit(); } std::string_view readDb(std::string_view key) { RocksDbCommon common(tx_context_); - common.get(key); + common.get(RocksDBPort::ColumnFamilyType::kWsv, key); return common.valueBuffer(); } @@ -119,9 +120,90 @@ TEST_F(RocksDBTest, DatabaseCacheTest) { ASSERT_EQ(counter, 2ull); } +TEST_F(RocksDBTest, RadixTreeFilterEnum2) { + iroha::RadixTree rt; + std::set expect; + auto insert = [&](std::string_view data, bool do_expected_insert) { + rt.insert(data.data(), data.size(), data.data()); + if (do_expected_insert) + expect.insert(std::string{data}); + }; + + insert("1", true); + insert("12578", true); + insert("125789", true); + insert("1257890000", true); + insert("123", true); + insert("124", true); + + auto filter = [&](std::string_view key, QQQ *data) { + ASSERT_NE(data, nullptr); + ASSERT_FALSE(data->s.empty()); + ASSERT_TRUE(key == data->s); + + auto it = expect.find(data->s); + ASSERT_NE(it, expect.end()); + + expect.erase(it); + }; + + rt.filterEnumerate(nullptr, 0ul, filter); + ASSERT_TRUE(expect.empty()); +} + +TEST_F(RocksDBTest, RadixTreeFilterEnum) { + iroha::RadixTree rt; + std::set expect; + auto insert = [&](std::string_view data, bool do_expected_insert) { + rt.insert(data.data(), data.size(), data.data()); + if (do_expected_insert) + expect.insert(std::string{data}); + }; + + auto filter = [&](std::string_view key, QQQ *data) { + ASSERT_NE(data, nullptr); + ASSERT_FALSE(data->s.empty()); + ASSERT_TRUE(key == data->s); + + auto it = expect.find(data->s); + ASSERT_NE(it, expect.end()); + + expect.erase(it); + }; + + insert("1", true); + rt.filterEnumerate("1", 1, filter); + ASSERT_TRUE(expect.empty()); + + insert("12", true); + insert("123", true); + insert("124", true); + rt.filterEnumerate("12", 2, filter); + ASSERT_TRUE(expect.empty()); + + insert("1256", true); + insert("1257", true); + rt.filterEnumerate("125", 3, filter); + ASSERT_TRUE(expect.empty()); + + insert("12578", true); + insert("125789", true); + insert("1257890000", true); + expect.insert("1257"); + rt.filterEnumerate("1257", 4, filter); + ASSERT_TRUE(expect.empty()); +} + TEST_F(RocksDBTest, RadixTreeTest) { iroha::RadixTree rt; + rt.insert("1234", 4, "9"); + rt.filterDelete("123", 3); + ASSERT_TRUE(rt.find("1", 1) == nullptr); + ASSERT_TRUE(rt.find("12", 2) == nullptr); + ASSERT_TRUE(rt.find("123", 3) == nullptr); + ASSERT_TRUE(rt.find("1234", 4) == nullptr); + rt.insert("123", 3, "d"); rt.filterDelete("12", 2); ASSERT_TRUE(rt.find("1", 1) == nullptr); @@ -293,21 +375,84 @@ TEST_F(RocksDBTest, SimpleOperation) { TEST_F(RocksDBTest, SimpleDelete) { RocksDbCommon common(tx_context_); - ASSERT_TRUE(common.del(key3_).ok()); + ASSERT_TRUE(common.del(RocksDBPort::ColumnFamilyType::kWsv, key3_).ok()); - auto status = common.get(key3_); + auto status = common.get(RocksDBPort::ColumnFamilyType::kWsv, key3_); ASSERT_TRUE(status.IsNotFound()); } +TEST_F(RocksDBTest, RemoveTableTest) { + { + RocksDbCommon common(tx_context_); + common.valueBuffer() = "aaa"; + ASSERT_TRUE( + common.put(RocksDBPort::ColumnFamilyType::kWsv, "test_key").ok()); + ASSERT_TRUE( + common.put(RocksDBPort::ColumnFamilyType::kStore, "test_key").ok()); + ASSERT_TRUE(common.commit().ok()); + + common.valueBuffer().clear(); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "aaa"); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kStore, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "aaa"); + ASSERT_TRUE(common.commit().ok()); + + common.dropTable(RocksDBPort::ColumnFamilyType::kWsv); + ASSERT_TRUE(common.get(RocksDBPort::ColumnFamilyType::kWsv, "test_key") + .IsNotFound()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kStore, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "aaa"); + + common.valueBuffer() = "bbb"; + ASSERT_TRUE( + common.put(RocksDBPort::ColumnFamilyType::kWsv, "test_key").ok()); + ASSERT_TRUE(common.commit().ok()); + + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "bbb"); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kStore, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "aaa"); + } + { + RocksDbCommon common(tx_context_); + common.valueBuffer().clear(); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "bbb"); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kStore, "test_key").ok()); + ASSERT_TRUE(common.valueBuffer() == "aaa"); + ASSERT_TRUE(common.commit().ok()); + } +} + +TEST_F(RocksDBTest, SimpleInsert) { + RocksDbCommon common(tx_context_); + + common.valueBuffer() = "k777"; + common.put(RocksDBPort::ColumnFamilyType::kWsv, "k777"); + + common.valueBuffer().clear(); + auto status = common.get(RocksDBPort::ColumnFamilyType::kWsv, "k777"); + ASSERT_TRUE(status.ok()); + ASSERT_TRUE(common.valueBuffer() == "k777"); +} + TEST_F(RocksDBTest, SimpleSeek) { RocksDbCommon common(tx_context_); - auto it = common.seek("key"); + auto it = common.seek(RocksDBPort::ColumnFamilyType::kWsv, "key"); ASSERT_TRUE(it->status().ok()); ASSERT_TRUE(it->key().ToStringView() == key4_); ASSERT_TRUE(it->value().ToStringView() == value4_); - it = common.seek("ke1"); + it = common.seek(RocksDBPort::ColumnFamilyType::kWsv, "ke1"); ASSERT_TRUE(it->status().ok()); ASSERT_TRUE(it->key().ToStringView() == key3_); @@ -325,6 +470,7 @@ TEST_F(RocksDBTest, SimpleEnumerateKeys) { throw; return true; }, + RocksDBPort::ColumnFamilyType::kWsv, "keY"); ASSERT_TRUE(status.ok()); ASSERT_EQ(counter, 2); @@ -333,13 +479,62 @@ TEST_F(RocksDBTest, SimpleEnumerateKeys) { TEST_F(RocksDBTest, FilterDelete) { { RocksDbCommon common(tx_context_); - ASSERT_TRUE(common.filterDelete("keY").ok()); + insertDb("ab", "ab"); + insertDb("k", "121"); + ASSERT_TRUE( + common.filterDelete(2ull, RocksDBPort::ColumnFamilyType::kWsv, "keY") + .second.ok()); + ASSERT_TRUE(common.commit().ok()); + } + { + RocksDbCommon common(tx_context_); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, key1_).IsNotFound()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, key2_).IsNotFound()); + } + { + ASSERT_TRUE(readDb(key3_) == value3_); + ASSERT_TRUE(readDb(key4_) == value4_); + ASSERT_TRUE(readDb(key5_) == value5_); + } +} + +TEST_F(RocksDBTest, FilterDelete2) { + { + RocksDbCommon common(tx_context_); + ASSERT_TRUE( + common.filterDelete(1ull, RocksDBPort::ColumnFamilyType::kWsv, "keY") + .second.ok()); + ASSERT_TRUE(common.commit().ok()); + } + { + RocksDbCommon common(tx_context_); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, key1_).IsNotFound()); + } + { + ASSERT_TRUE(readDb(key2_) == value2_); + ASSERT_TRUE(readDb(key3_) == value3_); + ASSERT_TRUE(readDb(key4_) == value4_); + ASSERT_TRUE(readDb(key5_) == value5_); + } +} + +TEST_F(RocksDBTest, FilterDelete3) { + { + RocksDbCommon common(tx_context_); + ASSERT_TRUE( + common.filterDelete(1000ull, RocksDBPort::ColumnFamilyType::kWsv, "keY") + .second.ok()); ASSERT_TRUE(common.commit().ok()); } { RocksDbCommon common(tx_context_); - ASSERT_TRUE(common.get(key1_).IsNotFound()); - ASSERT_TRUE(common.get(key2_).IsNotFound()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, key1_).IsNotFound()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, key2_).IsNotFound()); } { ASSERT_TRUE(readDb(key3_) == value3_); @@ -358,6 +553,7 @@ TEST_F(RocksDBTest, SimpleEnumerateKeys2) { throw; return true; }, + RocksDBPort::ColumnFamilyType::kWsv, "key"); ASSERT_TRUE(status.ok()); ASSERT_EQ(counter, 1); @@ -371,6 +567,7 @@ TEST_F(RocksDBTest, SimpleEnumerateKeys3) { throw; return false; }, + RocksDBPort::ColumnFamilyType::kWsv, "keyT") .ok()); ASSERT_TRUE(common @@ -379,6 +576,7 @@ TEST_F(RocksDBTest, SimpleEnumerateKeys3) { throw; return false; }, + RocksDBPort::ColumnFamilyType::kWsv, "ko") .ok()); } @@ -392,13 +590,15 @@ TEST_F(RocksDBTest, NumberRewrite) { { RocksDbCommon common(tx_context_); common.encode(55ull); - ASSERT_TRUE(common.put("{}", "123").ok()); + ASSERT_TRUE( + common.put(RocksDBPort::ColumnFamilyType::kWsv, "{}", "123").ok()); ASSERT_TRUE(common.commit().ok()); } uint64_t value; { RocksDbCommon common(tx_context_); - ASSERT_TRUE(common.get("{}", "123").ok()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, "{}", "123").ok()); common.decode(value); } ASSERT_TRUE(value == 55ull); @@ -408,13 +608,14 @@ TEST_F(RocksDBTest, Skip) { { RocksDbCommon common(tx_context_); common.encode(55ull); - ASSERT_TRUE(common.put("123").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "123").ok()); common.skip(); } { RocksDbCommon common(tx_context_); - ASSERT_FALSE(common.get("123").ok()); - ASSERT_TRUE(common.get("123").IsNotFound()); + ASSERT_FALSE(common.get(RocksDBPort::ColumnFamilyType::kWsv, "123").ok()); + ASSERT_TRUE( + common.get(RocksDBPort::ColumnFamilyType::kWsv, "123").IsNotFound()); } } @@ -452,17 +653,17 @@ TEST_F(RocksDBTest, Quorum) { TEST_F(RocksDBTest, SortingOrder) { RocksDbCommon common(tx_context_); - common.filterDelete(""); + common.filterDelete(1ull, RocksDBPort::ColumnFamilyType::kWsv, ""); common.valueBuffer().clear(); - ASSERT_TRUE(common.put("5").ok()); - ASSERT_TRUE(common.put("3").ok()); - ASSERT_TRUE(common.put("11").ok()); - ASSERT_TRUE(common.put("6").ok()); - ASSERT_TRUE(common.put("27").ok()); - ASSERT_TRUE(common.put("1").ok()); - ASSERT_TRUE(common.put("144").ok()); - ASSERT_TRUE(common.put("2").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "5").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "3").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "11").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "6").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "27").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "1").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "144").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, "2").ok()); std::vector s; common.enumerate( @@ -472,6 +673,7 @@ TEST_F(RocksDBTest, SortingOrder) { s.push_back(std::string(key.ToStringView())); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, ""); ASSERT_EQ(s[0], "1"); @@ -486,36 +688,38 @@ TEST_F(RocksDBTest, SortingOrder) { TEST_F(RocksDBTest, LowerBoundSearch) { RocksDbCommon common(tx_context_); - common.filterDelete(""); + common.filterDelete(1ull, RocksDBPort::ColumnFamilyType::kWsv, ""); char const *target = "wta1234569#1#2"; char const *target2 = "wta1234367#1#1"; common.valueBuffer().clear(); - ASSERT_TRUE(common.put(target2).ok()); - ASSERT_TRUE(common.put(target).ok()); - ASSERT_TRUE(common.put("wta1234570#2#1").ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, target2).ok()); + ASSERT_TRUE(common.put(RocksDBPort::ColumnFamilyType::kWsv, target).ok()); + ASSERT_TRUE( + common.put(RocksDBPort::ColumnFamilyType::kWsv, "wta1234570#2#1").ok()); { - auto it = common.seek("wta0"); + auto it = common.seek(RocksDBPort::ColumnFamilyType::kWsv, "wta0"); ASSERT_TRUE(it->Valid()); ASSERT_TRUE(it->key().ToStringView() == target2); } { - auto it = common.seek("wta1234411#0#0"); + auto it = + common.seek(RocksDBPort::ColumnFamilyType::kWsv, "wta1234411#0#0"); ASSERT_TRUE(it->Valid()); ASSERT_TRUE(it->key().ToStringView() == target); } { - auto it = common.seek("wta1234411"); + auto it = common.seek(RocksDBPort::ColumnFamilyType::kWsv, "wta1234411"); ASSERT_TRUE(it->Valid()); ASSERT_TRUE(it->key().ToStringView() == target); } { - auto it = common.seek("wta1239411"); + auto it = common.seek(RocksDBPort::ColumnFamilyType::kWsv, "wta1239411"); ASSERT_FALSE(it->Valid()); } } @@ -566,6 +770,7 @@ TEST_F(RocksDBTest, Signatories) { ++counter; return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathSignatories, "dom", "acc"); diff --git a/test/module/irohad/ametsuchi/rocksdb_executor_test.cpp b/test/module/irohad/ametsuchi/rocksdb_executor_test.cpp index 61959ae70f7..a6f3344c795 100644 --- a/test/module/irohad/ametsuchi/rocksdb_executor_test.cpp +++ b/test/module/irohad/ametsuchi/rocksdb_executor_test.cpp @@ -101,6 +101,7 @@ namespace iroha::ametsuchi { roles.emplace_back(r.ToStringView()); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathAccountRoles, domain, account); @@ -234,6 +235,7 @@ namespace iroha::ametsuchi { return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathAccountDetail, domain, account); diff --git a/test/module/irohad/ametsuchi/rocksdb_indexer_test.cpp b/test/module/irohad/ametsuchi/rocksdb_indexer_test.cpp index b91e453cf2f..e8b7decbbe7 100644 --- a/test/module/irohad/ametsuchi/rocksdb_indexer_test.cpp +++ b/test/module/irohad/ametsuchi/rocksdb_indexer_test.cpp @@ -107,6 +107,7 @@ TEST_F(RocksDBIndexerTest, SimpleInsertTxByTs) { hash.ToStringView(); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathTransactionByTs, account_1_); ASSERT_TRUE(status.ok()); @@ -124,6 +125,7 @@ TEST_F(RocksDBIndexerTest, SimpleInsertTxByTs) { hash.ToStringView(); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathTransactionByTs, account_2_); ASSERT_TRUE(status.ok()); @@ -219,6 +221,7 @@ TEST_F(RocksDBIndexerTest, SimpleCheckTxByPos) { items[std::string(position.ToStringView())] = data.ToStringView(); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathTransactionByPosition, account_1_); @@ -242,6 +245,7 @@ TEST_F(RocksDBIndexerTest, SimpleCheckTxByPos) { items[std::string(position.ToStringView())] = data.ToStringView(); return true; }, + RocksDBPort::ColumnFamilyType::kWsv, fmtstrings::kPathTransactionByPosition, account_2_); diff --git a/test/module/irohad/ordering/on_demand_connection_manager_test.cpp b/test/module/irohad/ordering/on_demand_connection_manager_test.cpp index aa525834172..6080408a757 100644 --- a/test/module/irohad/ordering/on_demand_connection_manager_test.cpp +++ b/test/module/irohad/ordering/on_demand_connection_manager_test.cpp @@ -7,6 +7,8 @@ #include #include + +#include "datetime/time.hpp" #include "framework/test_logger.hpp" #include "interfaces/iroha_internal/proposal.hpp" #include "module/irohad/ordering/ordering_mocks.hpp" diff --git a/test/module/irohad/ordering/on_demand_os_test.cpp b/test/module/irohad/ordering/on_demand_os_test.cpp index ca23c74038b..d65523ac3be 100644 --- a/test/module/irohad/ordering/on_demand_os_test.cpp +++ b/test/module/irohad/ordering/on_demand_os_test.cpp @@ -262,7 +262,7 @@ TEST_F(OnDemandOsTest, PassMissingTransaction) { auto &batch = *batches.at(0); EXPECT_CALL(*mock_cache, check(batchRef(batch))) - .WillOnce(Return(std::vector{ + .WillRepeatedly(Return(std::vector{ iroha::ametsuchi::tx_cache_status_responses::Missing()})); os->onBatches(batches); @@ -288,13 +288,13 @@ TEST_F(OnDemandOsTest, SeveralTransactionsOneCommited) { auto &batch3 = *batches.at(2); EXPECT_CALL(*mock_cache, check(batchRef(batch1))) - .WillOnce(Return(std::vector{ + .WillRepeatedly(Return(std::vector{ iroha::ametsuchi::tx_cache_status_responses::Missing()})); EXPECT_CALL(*mock_cache, check(batchRef(batch2))) - .WillOnce(Return(std::vector{ + .WillRepeatedly(Return(std::vector{ iroha::ametsuchi::tx_cache_status_responses::Committed()})); EXPECT_CALL(*mock_cache, check(batchRef(batch3))) - .WillOnce(Return(std::vector{ + .WillRepeatedly(Return(std::vector{ iroha::ametsuchi::tx_cache_status_responses::Missing()})); os->onBatches(batches); diff --git a/test/module/irohad/ordering/ordering_mocks.hpp b/test/module/irohad/ordering/ordering_mocks.hpp index 250aacab52d..2af8aeb117f 100644 --- a/test/module/irohad/ordering/ordering_mocks.hpp +++ b/test/module/irohad/ordering/ordering_mocks.hpp @@ -35,11 +35,10 @@ namespace iroha::ordering { MOCK_METHOD(void, onCollaborationOutcome, (consensus::Round), (override)); MOCK_METHOD(void, onTxsCommitted, (const HashesSetType &), (override)); MOCK_METHOD(void, onDuplicates, (const HashesSetType &), (override)); - MOCK_CONST_METHOD1( - forCachedBatches, - void(std::function< - void(const OnDemandOrderingService::BatchesSetType &)> const &)); - MOCK_METHOD(bool, isEmptyBatchesCache, (), (const, override)); + MOCK_METHOD1(forCachedBatches, + void(std::function const &)); + MOCK_METHOD(bool, isEmptyBatchesCache, (), (override)); MOCK_METHOD(bool, hasEnoughBatchesInCache, (), (const, override)); MOCK_METHOD(bool, hasProposal, (consensus::Round), (const, override)); MOCK_METHOD(void, processReceivedProposal, (CollectionType), (override)); diff --git a/test/module/shared_model/interface_mocks.hpp b/test/module/shared_model/interface_mocks.hpp index 16aa25c03ed..a718c9cf0f4 100644 --- a/test/module/shared_model/interface_mocks.hpp +++ b/test/module/shared_model/interface_mocks.hpp @@ -7,6 +7,8 @@ #define IROHA_SHARED_MODEL_INTERFACE_MOCKS_HPP #include + +#include "datetime/time.hpp" #include "interfaces/commands/command.hpp" #include "interfaces/common_objects/common_objects_factory.hpp" #include "interfaces/common_objects/peer.hpp" @@ -86,7 +88,9 @@ inline auto createMockTransactionWithHash( auto res = std::make_shared>(); + auto now = iroha::time::now(); ON_CALL(*res, hash()).WillByDefault(ReturnRefOfCopy(hash)); + ON_CALL(*res, createdTime()).WillByDefault(testing::Return(now)); return res; }