diff --git a/CMakeLists.txt b/CMakeLists.txt index 249a536e74..c6c42df616 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,7 +13,7 @@ include(GNUInstallDirs) # ---- Project ---- project(QualityControl - VERSION 1.162.0 + VERSION 1.164.0 DESCRIPTION "O2 Data Quality Control Framework" LANGUAGES C CXX) diff --git a/Framework/CMakeLists.txt b/Framework/CMakeLists.txt index 104115d9b9..5f48077f10 100644 --- a/Framework/CMakeLists.txt +++ b/Framework/CMakeLists.txt @@ -5,8 +5,22 @@ configure_file("include/QualityControl/Version.h.in" "${CMAKE_CURRENT_BINARY_DIR}/include/QualityControl/Version.h" @ONLY) -# ---- Library for the types ---- + # ---- Library for IL ---- +add_library(O2QualityControlInfoLogger STATIC + src/QcInfoLogger.cxx +) + +target_include_directories(O2QualityControlInfoLogger + PUBLIC + $ +) + +target_link_libraries(O2QualityControlInfoLogger + PUBLIC + AliceO2::InfoLogger +) +# ---- Library for the types ---- add_library(O2QualityControlTypes src/MonitorObject.cxx src/QualityObject.cxx @@ -21,11 +35,13 @@ target_include_directories( ) target_link_libraries(O2QualityControlTypes + PRIVATE + O2QualityControlInfoLogger PUBLIC - ROOT::Hist + AliceO2::BookkeepingApi AliceO2::Common O2::DataFormatsQualityControl - AliceO2::BookkeepingApi + ROOT::Hist ) add_root_dictionary(O2QualityControlTypes @@ -70,7 +86,6 @@ add_library(O2QualityControl src/AggregatorInterface.cxx src/DatabaseFactory.cxx src/CcdbDatabase.cxx - src/QcInfoLogger.cxx src/TaskFactory.cxx src/TaskRunner.cxx src/TaskRunnerFactory.cxx @@ -141,7 +156,6 @@ target_link_libraries(O2QualityControl ROOT::Hist ROOT::TreePlayer AliceO2::Common - AliceO2::InfoLogger AliceO2::Monitoring AliceO2::Configuration AliceO2::Occ @@ -159,6 +173,7 @@ target_link_libraries(O2QualityControl ${RDKAFKA_LIB} PRIVATE Boost::system CURL::libcurl + O2QualityControlInfoLogger ) add_root_dictionary(O2QualityControl diff --git a/Framework/include/QualityControl/MonitorObject.h b/Framework/include/QualityControl/MonitorObject.h index 8f3ba7ac91..0f045e41a3 100644 --- a/Framework/include/QualityControl/MonitorObject.h +++ b/Framework/include/QualityControl/MonitorObject.h @@ -50,12 +50,12 @@ class MonitorObject : public TObject /// Destructor ~MonitorObject() override; - /// Copy constructor - MonitorObject(const MonitorObject& other) = default; + // /// Copy constructor + MonitorObject(const MonitorObject& other); /// Move constructor MonitorObject(MonitorObject&& other) /*noexcept*/ = default; /// Copy assignment operator - MonitorObject& operator=(const MonitorObject& other) = default; + MonitorObject& operator=(const MonitorObject& other); /// Move assignment operator MonitorObject& operator=(MonitorObject&& other) /*noexcept*/ = default; @@ -69,19 +69,19 @@ class MonitorObject : public TObject /// \brief Return joined task name and name of the encapsulated object (if any). /// @return The name as "{getTaskName()}/{getName())}. - const std::string getFullName() const { return getTaskName() + "/" + getName(); } + std::string getFullName() const; - TObject* getObject() const { return mObject; } - void setObject(TObject* object) { mObject = object; } + TObject* getObject() const; + void setObject(TObject* object); - bool isIsOwner() const { return mIsOwner; } - void setIsOwner(bool isOwner) { mIsOwner = isOwner; } + bool isIsOwner() const; + void setIsOwner(bool isOwner); - const std::string& getTaskName() const { return mTaskName; } - void setTaskName(const std::string& taskName) { mTaskName = taskName; } + const std::string& getTaskName() const; + void setTaskName(const std::string& taskName); - const std::string& getDetectorName() const { return mDetectorName; } - void setDetectorName(const std::string& detectorName) { mDetectorName = detectorName; } + const std::string& getDetectorName() const; + void setDetectorName(const std::string& detectorName); const std::string& getTaskClass() const; void setTaskClass(const std::string& taskClass); @@ -117,6 +117,8 @@ class MonitorObject : public TObject void Draw(Option_t* option) override; TObject* DrawClone(Option_t* option) const override; + void Copy(TObject& object) const override; + /// \brief Build the path to this object. /// Build the path to this object as it will appear in the GUI. /// \return A string containing the path. @@ -126,7 +128,7 @@ class MonitorObject : public TObject void setDescription(const std::string& description); private: - TObject* mObject; + std::unique_ptr mObject; std::string mTaskName; std::string mTaskClass; std::string mDetectorName; @@ -141,7 +143,10 @@ class MonitorObject : public TObject // tells Merger to create an object with data from the last cycle only on the side of the complete object bool mCreateMovingWindow = false; - ClassDefOverride(MonitorObject, 12); + void releaseObject(); + void cloneAndSetObject(const MonitorObject&); + + ClassDefOverride(MonitorObject, 13); }; } // namespace o2::quality_control::core diff --git a/Framework/include/QualityControl/ObjectsManager.h b/Framework/include/QualityControl/ObjectsManager.h index 64c659be89..dbd8968cae 100644 --- a/Framework/include/QualityControl/ObjectsManager.h +++ b/Framework/include/QualityControl/ObjectsManager.h @@ -21,12 +21,14 @@ #include "QualityControl/Activity.h" #include "QualityControl/MonitorObject.h" #include "QualityControl/MonitorObjectCollection.h" +#include // stl +#include #include #include +#include class TObject; -class TObjArray; namespace o2::quality_control::core { @@ -76,10 +78,26 @@ class ObjectsManager /** * Start publishing the object obj, i.e. it will be pushed forward in the workflow at regular intervals. * The ownership remains to the caller. + * @param IgnoreMergeable if you want to ignore static_assert check for Mergeable + * @param T type of object that we want to publish. * @param obj The object to publish. * @throws DuplicateObjectError */ - void startPublishing(TObject* obj, PublicationPolicy = PublicationPolicy::Forever); + template + void startPublishing(T obj, PublicationPolicy policy = PublicationPolicy::Forever) + { + // We don't want to do this compile time check in PostProcessing, and we want to turn off runtime check as well + bool ignoreMergeableRuntime = IgnoreMergeable; +#ifndef QUALITYCONTROL_POSTPROCESSINTERFACE_H + static_assert(std::same_as, TObject> || + IgnoreMergeable || mergers::Mergeable, + "you are trying to startPublishing object that is not mergeable." + " If you know what you are doing use startPublishing(...)"); +#else + ignoreMergeableRuntime = true; +#endif + startPublishingImpl(obj, policy, ignoreMergeableRuntime); + } /** * Stop publishing this object @@ -223,6 +241,8 @@ class ObjectsManager bool mUpdateServiceDiscovery; Activity mActivity; std::vector mMovingWindowsList; + + void startPublishingImpl(TObject* obj, PublicationPolicy, bool ignoreMergeableWarning); }; } // namespace o2::quality_control::core diff --git a/Framework/include/QualityControl/QualitiesToFlagCollectionConverter.h b/Framework/include/QualityControl/QualitiesToFlagCollectionConverter.h index c39fe79f90..f50571cfcc 100644 --- a/Framework/include/QualityControl/QualitiesToFlagCollectionConverter.h +++ b/Framework/include/QualityControl/QualitiesToFlagCollectionConverter.h @@ -46,6 +46,7 @@ class QualitiesToFlagCollectionConverter size_t getQOsIncluded() const; size_t getWorseThanGoodQOs() const; + int getRunNumber() const; /// Sets the provided validity interval, trims affected flags and fills extensions with UnknownQuality void updateValidityInterval(const ValidityInterval validityInterval); diff --git a/Framework/include/QualityControl/Quality.h b/Framework/include/QualityControl/Quality.h index b05871aee5..cac8c85632 100644 --- a/Framework/include/QualityControl/Quality.h +++ b/Framework/include/QualityControl/Quality.h @@ -35,7 +35,7 @@ class Quality { public: /// Default constructor - Quality(unsigned int level = Quality::NullLevel, std::string name = ""); + explicit Quality(unsigned int level = Quality::NullLevel, std::string name = ""); /// Destructor virtual ~Quality() = default; diff --git a/Framework/script/RepoCleaner/README.md b/Framework/script/RepoCleaner/README.md index 748166e829..173ef56349 100644 --- a/Framework/script/RepoCleaner/README.md +++ b/Framework/script/RepoCleaner/README.md @@ -33,10 +33,26 @@ There can be any number of these rules. The order is important as we use the fir The configuration for ccdb-test is described [here](../../../doc/DevelopersTips.md). +## Setup virtual environment for development and test (venv) + +1. cd Framework/script/RepoCleaner +2. python3 -m venv env +3. source env/bin/activate +4. python -m pip install -r requirements.txt +5. python3 -m pip install . +6. You can execute and work. Next time just do "activate" and then you are good to go + ## Unit Tests -`cd QualityControl/Framework/script/RepoCleaner ; python3 -m unittest discover` -and to test only one of them: `python3 -m unittest tests/test_NewProduction.py -k test_2_runs` +``` +cd Framework/script/RepoCleaner +source env/bin/activate + +# Run a test: +python -m unittest tests.test_Ccdb.TestCcdb.test_getObjectsList +``` + +`cd QualityControl/Framework/script/RepoCleaner ; python3 -m unittest discover` In particular there is a test for the `production` rule that is pretty extensive. It hits the ccdb though and it needs the following path to be truncated: ` @@ -75,11 +91,3 @@ Create new version 2. `python3 setup.py sdist bdist_wheel` 3. `python3 -m twine upload --repository pypi dist/*` -## Use venv - -1. cd Framework/script/RepoCleaner -2. python3 -m venv env -3. source env/bin/activate -4. python -m pip install -r requirements.txt -5. python3 -m pip install . -6. You can execute and work. Next time just do "activate" and then you are good to go \ No newline at end of file diff --git a/Framework/script/RepoCleaner/qcrepocleaner/Ccdb.py b/Framework/script/RepoCleaner/qcrepocleaner/Ccdb.py index ead669180a..407cec3416 100644 --- a/Framework/script/RepoCleaner/qcrepocleaner/Ccdb.py +++ b/Framework/script/RepoCleaner/qcrepocleaner/Ccdb.py @@ -26,6 +26,7 @@ def __init__(self, path: str, validFrom, validTo, createdAt, uuid=None, metadata :param uuid: unique id of the object :param validFrom: validity range smaller limit (in ms) :param validTo: validity range bigger limit (in ms) + :param createdAt: creation timestamp of the object ''' self.path = path self.uuid = uuid @@ -72,7 +73,8 @@ def getObjectsList(self, added_since: int = 0, path: str = "", no_wildcard: bool :return A list of strings, each containing a path to an object in the CCDB. ''' url_for_all_obj = self.url + '/latest/' + path - url_for_all_obj += '/' if no_wildcard else '/.*' + url_for_all_obj += '/' if path else '' + url_for_all_obj += '' if no_wildcard else '.*' logger.debug(f"Ccdb::getObjectsList -> {url_for_all_obj}") headers = {'Accept': 'application/json', 'If-Not-Before':str(added_since)} r = requests.get(url_for_all_obj, headers=headers) diff --git a/Framework/script/RepoCleaner/requirements.txt b/Framework/script/RepoCleaner/requirements.txt new file mode 100644 index 0000000000..bcd694321d --- /dev/null +++ b/Framework/script/RepoCleaner/requirements.txt @@ -0,0 +1,12 @@ +certifi==2024.7.4 +chardet==5.2.0 +charset-normalizer==3.3.2 +dryable==1.2.0 +idna==3.7 +psutil==6.1.0 +python-consul==1.1.0 +PyYAML==6.0.1 +requests==2.32.2 +responses==0.25.0 +six==1.16.0 +urllib3==2.2.2 diff --git a/Framework/script/RepoCleaner/qcrepocleaner/config-test.yaml b/Framework/script/RepoCleaner/tests/config-test.yaml similarity index 100% rename from Framework/script/RepoCleaner/qcrepocleaner/config-test.yaml rename to Framework/script/RepoCleaner/tests/config-test.yaml diff --git a/Framework/script/RepoCleaner/qcrepocleaner/objectsList.json b/Framework/script/RepoCleaner/tests/objectsList.json similarity index 100% rename from Framework/script/RepoCleaner/qcrepocleaner/objectsList.json rename to Framework/script/RepoCleaner/tests/objectsList.json diff --git a/Framework/script/RepoCleaner/tests/test_1_per_hour.py b/Framework/script/RepoCleaner/tests/test_1_per_hour.py index 0b5d01341e..5d783363b4 100644 --- a/Framework/script/RepoCleaner/tests/test_1_per_hour.py +++ b/Framework/script/RepoCleaner/tests/test_1_per_hour.py @@ -1,26 +1,12 @@ import logging import time import unittest -from datetime import timedelta, date, datetime - -from Ccdb import Ccdb, ObjectVersion -from rules import last_only -import os -import sys -import importlib - -def import_path(path): # needed because o2-qc-repo-cleaner has no suffix - module_name = os.path.basename(path).replace('-', '_') - spec = importlib.util.spec_from_loader( - module_name, - importlib.machinery.SourceFileLoader(module_name, path) - ) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - sys.modules[module_name] = module - return module - -one_per_hour = import_path("../qcrepocleaner/rules/1_per_hour.py") +from importlib import import_module +from qcrepocleaner.Ccdb import Ccdb +from tests import test_utils +from tests.test_utils import CCDB_TEST_URL + +one_per_hour = import_module(".1_per_hour", "qcrepocleaner.rules") # file names should not start with a number... class Test1PerHour(unittest.TestCase): """ @@ -35,7 +21,7 @@ class Test1PerHour(unittest.TestCase): one_minute = 60000 def setUp(self): - self.ccdb = Ccdb('http://ccdb-test.cern.ch:8080') + self.ccdb = Ccdb(CCDB_TEST_URL) # ccdb-test but please use IP to avoid DNS alerts self.path = "qc/TST/MO/repo/test" self.run = 124321 self.extra = {} @@ -43,10 +29,10 @@ def setUp(self): def test_1_per_hour(self): """ - 60 versions, 2 minutes apart + 120 versions grace period of 15 minutes - First version is preserved (always). 7 are preserved during the grace period at the end. - One more is preserved after 1 hour. --> 9 preserved + First version is preserved (always). 14 are preserved during the grace period at the end. + One more is preserved after 1 hour. --> 16 preserved """ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S') @@ -54,24 +40,26 @@ def test_1_per_hour(self): # Prepare data test_path = self.path + "/test_1_per_hour" - self.prepare_data(test_path, 60, 2) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [120], [0], 123) stats = one_per_hour.process(self.ccdb, test_path, 15, 1, self.in_ten_years, self.extra) - self.assertEqual(stats["deleted"], 51) - self.assertEqual(stats["preserved"], 9) + logging.info(stats) + self.assertEqual(stats["deleted"], 104) + self.assertEqual(stats["preserved"], 16) objects_versions = self.ccdb.getVersionsList(test_path) - self.assertEqual(len(objects_versions), 9) + self.assertEqual(len(objects_versions), 16) def test_1_per_hour_period(self): """ - 60 versions, 2 minutes apart + 120 versions no grace period period of acceptance: 1 hour in the middle - We have therefore 30 versions in the acceptance period. + We have therefore 60 versions in the acceptance period. Only 1 of them, the one 1 hour after the first version in the set, will be preserved, the others are deleted. - Thus we have 29 deletion. Everything outside the acceptance period is kept. + Thus we have 59 deletion. Everything outside the acceptance period is kept. """ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S') @@ -79,42 +67,18 @@ def test_1_per_hour_period(self): # Prepare data test_path = self.path + "/test_1_per_hour_period" - self.prepare_data(test_path, 60, 2) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [120], [0], 123) current_timestamp = int(time.time() * 1000) - logging.debug(f"{current_timestamp} - {datetime.today()}") - - objects_versions = self.ccdb.getVersionsList(test_path) - created = len(objects_versions) stats = one_per_hour.process(self.ccdb, test_path, 15, current_timestamp-90*60*1000, current_timestamp-30*60*1000, self.extra) - self.assertEqual(stats["deleted"], 29) - self.assertEqual(stats["preserved"], 31) + logging.info(stats) + self.assertEqual(stats["deleted"], 59) + self.assertEqual(stats["preserved"], 61) objects_versions = self.ccdb.getVersionsList(test_path) - self.assertEqual(len(objects_versions), 31) - - - def prepare_data(self, path, number_versions, minutes_between): - """ - Prepare a data set starting `since_minutes` in the past. - 1 version per minute - """ - - current_timestamp = int(time.time() * 1000) - data = {'part': 'part'} - run = 1234 - counter = 0 - - for x in range(number_versions+1): - counter = counter + 1 - from_ts = current_timestamp - minutes_between * x * 60 * 1000 - to_ts = current_timestamp - metadata = {'RunNumber': str(run)} - version_info = ObjectVersion(path=path, validFrom=from_ts, validTo=to_ts, metadata=metadata) - self.ccdb.putVersion(version=version_info, data=data) - - logging.debug(f"counter : {counter}") + self.assertEqual(len(objects_versions), 61) if __name__ == '__main__': diff --git a/Framework/script/RepoCleaner/tests/test_1_per_run.py b/Framework/script/RepoCleaner/tests/test_1_per_run.py index c2f6a8e4ec..887de75608 100644 --- a/Framework/script/RepoCleaner/tests/test_1_per_run.py +++ b/Framework/script/RepoCleaner/tests/test_1_per_run.py @@ -1,29 +1,13 @@ import logging import time import unittest -from datetime import timedelta, date, datetime +from importlib import import_module -from Ccdb import Ccdb, ObjectVersion -from rules import last_only -import os -import sys -import importlib - - -def import_path(path): # needed because o2-qc-repo-cleaner has no suffix - module_name = os.path.basename(path).replace('-', '_') - spec = importlib.util.spec_from_loader( - module_name, - importlib.machinery.SourceFileLoader(module_name, path) - ) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - sys.modules[module_name] = module - return module - - -one_per_run = import_path("../qcrepocleaner/rules/1_per_run.py") +from qcrepocleaner.Ccdb import Ccdb +from tests import test_utils +from tests.test_utils import CCDB_TEST_URL +one_per_run = import_module(".1_per_run", "qcrepocleaner.rules") # file names should not start with a number... class Test1PerRun(unittest.TestCase): """ @@ -38,15 +22,14 @@ class Test1PerRun(unittest.TestCase): one_minute = 60000 def setUp(self): - self.ccdb = Ccdb('http://ccdb-test.cern.ch:8080') + self.ccdb = Ccdb(CCDB_TEST_URL) self.path = "qc/TST/MO/repo/test" self.run = 124321 self.extra = {} def test_1_per_run(self): """ - 60 versions, 1 minute apart - 6 runs + 6 runs of 10 versions, versions 1 minute apart grace period of 15 minutes Preserved: 14 at the end (grace period), 6 for the runs, but 2 are in both sets --> 14+6-2=18 preserved """ @@ -56,7 +39,8 @@ def test_1_per_run(self): # Prepare data test_path = self.path + "/test_1_per_run" - self.prepare_data(test_path, 60) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [10, 10, 10, 10, 10, 10], [0, 0, 0, 0, 0, 0], 123) objects_versions = self.ccdb.getVersionsList(test_path) created = len(objects_versions) @@ -71,8 +55,7 @@ def test_1_per_run(self): def test_1_per_run_period(self): """ - 60 versions 1 minute apart - 6 runs + 6 runs of 10 versions each, versions 1 minute apart no grace period acceptance period is only the 38 minutes in the middle preserved: 6 runs + 11 first and 11 last, with an overlap of 2 --> 26 @@ -83,7 +66,8 @@ def test_1_per_run_period(self): # Prepare data test_path = self.path + "/test_1_per_run_period" - self.prepare_data(test_path, 60) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [10, 10, 10, 10, 10, 10], [0, 0, 0, 0, 0, 0], 123) current_timestamp = int(time.time() * 1000) stats = one_per_run.process(self.ccdb, test_path, 0, current_timestamp - 49 * 60 * 1000, @@ -94,29 +78,5 @@ def test_1_per_run_period(self): objects_versions = self.ccdb.getVersionsList(test_path) self.assertEqual(len(objects_versions), 26) - def prepare_data(self, path, since_minutes): - """ - Prepare a data set starting `since_minutes` in the past. - 1 version per minute, 1 run every 10 versions - """ - - current_timestamp = int(time.time() * 1000) - data = {'part': 'part'} - run = 1234 - counter = 0 - - for x in range(since_minutes + 1): - counter = counter + 1 - from_ts = current_timestamp - x * 60 * 1000 - to_ts = current_timestamp - metadata = {'RunNumber': str(run)} - version_info = ObjectVersion(path=path, validFrom=from_ts, validTo=to_ts, metadata=metadata) - self.ccdb.putVersion(version=version_info, data=data) - if x % 10 == 0: - run = run + 1 - - logging.debug(f"counter : {counter}") - - if __name__ == '__main__': unittest.main() diff --git a/Framework/script/RepoCleaner/tests/test_Ccdb.py b/Framework/script/RepoCleaner/tests/test_Ccdb.py index 7f04f27902..03030a9b0d 100644 --- a/Framework/script/RepoCleaner/tests/test_Ccdb.py +++ b/Framework/script/RepoCleaner/tests/test_Ccdb.py @@ -1,45 +1,48 @@ import logging import unittest -import requests +from typing import List + import responses -from Ccdb import Ccdb, ObjectVersion -from rules import production +from qcrepocleaner.Ccdb import Ccdb, ObjectVersion +from tests.test_utils import CCDB_TEST_URL + class TestCcdb(unittest.TestCase): def setUp(self): - with open('../qcrepocleaner/objectsList.json') as f: # will close() when we leave this block + with open('objectsList.json') as f: # will close() when we leave this block self.content_objectslist = f.read() - with open('../versionsList.json') as f: # will close() when we leave this block + with open('versionsList.json') as f: # will close() when we leave this block self.content_versionslist = f.read() - self.ccdb = Ccdb('http://ccdb-test.cern.ch:8080') + self.ccdb = Ccdb(CCDB_TEST_URL) + logging.getLogger().setLevel(logging.DEBUG) @responses.activate def test_getObjectsList(self): # Prepare mock response - responses.add(responses.GET, 'http://ccdb-test.cern.ch:8080/latest/.*', + responses.add(responses.GET, CCDB_TEST_URL + '/latest/.*', self.content_objectslist, status=200) # get list of objects - objectsList = self.ccdb.getObjectsList() - print(f"{objectsList}") - self.assertEqual(len(objectsList), 3) - self.assertEqual(objectsList[0], 'Test') - self.assertEqual(objectsList[1], 'ITSQcTask/ChipStaveCheck') + objects_list = self.ccdb.getObjectsList() + print(f"{objects_list}") + self.assertEqual(len(objects_list), 3) + self.assertEqual(objects_list[0], 'Test') + self.assertEqual(objects_list[1], 'ITSQcTask/ChipStaveCheck') @responses.activate def test_getVersionsList(self): # Prepare mock response object_path='asdfasdf/example' - responses.add(responses.GET, 'http://ccdb-test.cern.ch:8080/browse/'+object_path, + responses.add(responses.GET, CCDB_TEST_URL + '/browse/'+object_path, self.content_versionslist, status=200) # get versions for object - versionsList: List[ObjectVersion] = self.ccdb.getVersionsList(object_path) - print(f"{versionsList}") - self.assertEqual(len(versionsList), 2) - self.assertEqual(versionsList[0].path, object_path) - self.assertEqual(versionsList[1].path, object_path) - self.assertEqual(versionsList[1].metadata["custom"], "34") + versions_list: List[ObjectVersion] = self.ccdb.getVersionsList(object_path) + print(f"{versions_list}") + self.assertEqual(len(versions_list), 2) + self.assertEqual(versions_list[0].path, object_path) + self.assertEqual(versions_list[1].path, object_path) + self.assertEqual(versions_list[1].metadata["custom"], "34") if __name__ == '__main__': unittest.main() diff --git a/Framework/script/RepoCleaner/tests/test_MultiplePerRun.py b/Framework/script/RepoCleaner/tests/test_MultiplePerRun.py index 8a3f53ce0f..7bbb759e7b 100644 --- a/Framework/script/RepoCleaner/tests/test_MultiplePerRun.py +++ b/Framework/script/RepoCleaner/tests/test_MultiplePerRun.py @@ -1,14 +1,13 @@ import logging import time import unittest -from datetime import timedelta, date, datetime -from typing import List -from qcrepocleaner.Ccdb import Ccdb, ObjectVersion +import test_utils +from qcrepocleaner.Ccdb import Ccdb from qcrepocleaner.rules import multiple_per_run -class TestProduction(unittest.TestCase): +class TestMultiplePerRun(unittest.TestCase): """ This test pushes data to the CCDB and then run the Rule Production and then check. It does it for several use cases. @@ -21,7 +20,7 @@ class TestProduction(unittest.TestCase): one_minute = 60000 def setUp(self): - self.ccdb = Ccdb('http://137.138.47.222:8080') + self.ccdb = Ccdb(test_utils.CCDB_TEST_URL) # ccdb-test but please use IP to avoid DNS alerts self.extra = {"interval_between_versions": "90", "migrate_to_EOS": False} self.path = "qc/TST/MO/repo/test" @@ -36,7 +35,8 @@ def test_1_finished_run(self): # Prepare data test_path = self.path + "/test_1_finished_run" - self.prepare_data(test_path, [150], [22*60], 123) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [150], [22*60], 123) stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=1, to_timestamp=self.in_ten_years, extra_params=self.extra) @@ -56,7 +56,8 @@ def test_2_runs(self): # Prepare data test_path = self.path + "/test_2_runs" - self.prepare_data(test_path, [150, 150], [3*60, 20*60], 123) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [150, 150], [3 * 60, 20 * 60], 123) stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=1, to_timestamp=self.in_ten_years, extra_params=self.extra) @@ -77,7 +78,8 @@ def test_5_runs(self): # Prepare data test_path = self.path + "/test_5_runs" - self.prepare_data(test_path, [1*60, 2*60, 3*60+10, 4*60, 5*60], + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [1*60, 2*60, 3*60+10, 4*60, 5*60], [60, 120, 190, 240, 24*60], 123) stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=1, @@ -85,11 +87,11 @@ def test_5_runs(self): self.assertEqual(stats["deleted"], 60+120+190+240+300-18) self.assertEqual(stats["preserved"], 18) self.assertEqual(stats["updated"], 0) - + # and now re-run it to make sure we preserve the state stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=1, to_timestamp=self.in_ten_years, extra_params=self.extra) - + self.assertEqual(stats["deleted"], 0) self.assertEqual(stats["preserved"], 18) self.assertEqual(stats["updated"], 0) @@ -105,7 +107,8 @@ def test_run_one_object(self): # Prepare data test_path = self.path + "/test_run_one_object" - self.prepare_data(test_path, [1], [25*60], 123) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [1], [25*60], 123) stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=1, to_timestamp=self.in_ten_years, extra_params=self.extra) @@ -125,7 +128,8 @@ def test_run_two_object(self): # Prepare data test_path = self.path + "/test_run_two_object" - self.prepare_data(test_path, [2], [25*60], 123) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [2], [25*60], 123) stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=1, to_timestamp=self.in_ten_years, extra_params=self.extra) @@ -145,7 +149,8 @@ def test_3_runs_with_period(self): # Prepare data test_path = self.path + "/test_3_runs_with_period" - self.prepare_data(test_path, [30,30, 30], [120,120,25*60], 123) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [30,30, 30], [120,120,25*60], 123) current_timestamp = int(time.time() * 1000) stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=current_timestamp-29*60*60*1000, @@ -160,49 +165,8 @@ def test_asdf(self): datefmt='%d-%b-%y %H:%M:%S') logging.getLogger().setLevel(int(10)) test_path = self.path + "/asdf" - self.prepare_data(test_path, [70, 70, 70], [6*60, 6*60, 25*60], 55555) - - def prepare_data(self, path, run_durations: List[int], time_till_next_run: List[int], first_run_number: int): - """ - Prepare a data set populated with a number of runs. - run_durations contains the duration of each of these runs in minutes - time_till_next_run is the time between two runs in minutes. - The first element of time_till_next_run is used to separate the first two runs. - Both lists must have the same number of elements. - """ - - if len(run_durations) != len(time_till_next_run): - logging.error(f"run_durations and time_till_next_run must have the same length") - exit(1) - - total_duration = 0 - for a, b in zip(run_durations, time_till_next_run): - total_duration += a + b - logging.info(f"Total duration : {total_duration}") - - current_timestamp = int(time.time() * 1000) - cursor = current_timestamp - total_duration * 60 * 1000 - first_ts = cursor - data = {'part': 'part'} - run = first_run_number - - for run_duration, time_till_next in zip(run_durations, time_till_next_run): - metadata = {'RunNumber': str(run)} - logging.debug(f"cursor: {cursor}") - logging.debug(f"time_till_next: {time_till_next}") - - for i in range(run_duration): - to_ts = cursor + 24 * 60 * 60 * 1000 # a day - metadata2 = {**metadata, 'Created': str(cursor)} - version_info = ObjectVersion(path=path, validFrom=cursor, validTo=to_ts, metadata=metadata2, - createdAt=cursor) - self.ccdb.putVersion(version=version_info, data=data) - cursor += 1 * 60 * 1000 - - run += 1 - cursor += time_till_next * 60 * 1000 - - return first_ts + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [70, 70, 70], [6*60, 6*60, 25*60], 55555) if __name__ == '__main__': diff --git a/Framework/script/RepoCleaner/tests/test_MultiplePerRun_deleteFirstLast.py b/Framework/script/RepoCleaner/tests/test_MultiplePerRun_deleteFirstLast.py index d636c1e370..3b7780a570 100644 --- a/Framework/script/RepoCleaner/tests/test_MultiplePerRun_deleteFirstLast.py +++ b/Framework/script/RepoCleaner/tests/test_MultiplePerRun_deleteFirstLast.py @@ -1,14 +1,14 @@ import logging import time import unittest -from datetime import timedelta, date, datetime -from typing import List -from qcrepocleaner.Ccdb import Ccdb, ObjectVersion +from qcrepocleaner.Ccdb import Ccdb from qcrepocleaner.rules import multiple_per_run +from tests import test_utils +from tests.test_utils import CCDB_TEST_URL -class TestProduction(unittest.TestCase): +class TestMultiplePerRunDeleteFirstLast(unittest.TestCase): """ This test pushes data to the CCDB and then run the Rule Production and then check. It does it for several use cases. @@ -21,7 +21,7 @@ class TestProduction(unittest.TestCase): one_minute = 60000 def setUp(self): - self.ccdb = Ccdb('http://137.138.47.222:8080') + self.ccdb = Ccdb(CCDB_TEST_URL) # ccdb-test but please use IP to avoid DNS alerts self.extra = {"interval_between_versions": "90", "migrate_to_EOS": False, "delete_first_last": True} self.path = "qc/TST/MO/repo/test" @@ -36,7 +36,8 @@ def test_1_finished_run(self): # Prepare data test_path = self.path + "/test_1_finished_run" - self.prepare_data(test_path, [150], [22*60], 123) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [150], [22*60], 123) objectsBefore = self.ccdb.getVersionsList(test_path) stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=1, @@ -61,7 +62,8 @@ def test_2_runs(self): # Prepare data test_path = self.path + "/test_2_runs" - self.prepare_data(test_path, [150, 150], [3*60, 20*60], 123) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [150, 150], [3*60, 20*60], 123) stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=1, to_timestamp=self.in_ten_years, extra_params=self.extra) @@ -82,7 +84,8 @@ def test_5_runs(self): # Prepare data test_path = self.path + "/test_5_runs" - self.prepare_data(test_path, [1*60, 2*60, 3*60+10, 4*60, 5*60], + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [1*60, 2*60, 3*60+10, 4*60, 5*60], [60, 120, 190, 240, 24*60], 123) stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=1, @@ -90,11 +93,11 @@ def test_5_runs(self): self.assertEqual(stats["deleted"], 60+120+190+240+300-18) self.assertEqual(stats["preserved"], 18) self.assertEqual(stats["updated"], 0) - + # and now re-run it to make sure we preserve the state stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=1, to_timestamp=self.in_ten_years, extra_params=self.extra) - + self.assertEqual(stats["deleted"], 0) self.assertEqual(stats["preserved"], 18) self.assertEqual(stats["updated"], 0) @@ -110,7 +113,8 @@ def test_run_one_object(self): # Prepare data test_path = self.path + "/test_run_one_object" - self.prepare_data(test_path, [1], [25*60], 123) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [1], [25*60], 123) stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=1, to_timestamp=self.in_ten_years, extra_params=self.extra) @@ -130,7 +134,8 @@ def test_run_two_object(self): # Prepare data test_path = self.path + "/test_run_two_object" - self.prepare_data(test_path, [2], [25*60], 123) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [2], [25*60], 123) stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=1, to_timestamp=self.in_ten_years, extra_params=self.extra) @@ -150,7 +155,8 @@ def test_3_runs_with_period(self): # Prepare data test_path = self.path + "/test_3_runs_with_period" - self.prepare_data(test_path, [30,30, 30], [120,120,25*60], 123) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [30,30, 30], [120,120,25*60], 123) current_timestamp = int(time.time() * 1000) stats = multiple_per_run.process(self.ccdb, test_path, delay=60*24, from_timestamp=current_timestamp-29*60*60*1000, @@ -165,49 +171,8 @@ def test_asdf(self): datefmt='%d-%b-%y %H:%M:%S') logging.getLogger().setLevel(int(10)) test_path = self.path + "/asdf" - self.prepare_data(test_path, [70, 70, 70], [6*60, 6*60, 25*60], 55555) - - def prepare_data(self, path, run_durations: List[int], time_till_next_run: List[int], first_run_number: int): - """ - Prepare a data set populated with a number of runs. - run_durations contains the duration of each of these runs in minutes - time_till_next_run is the time between two runs in minutes. - The first element of time_till_next_run is used to separate the first two runs. - Both lists must have the same number of elements. - """ - - if len(run_durations) != len(time_till_next_run): - logging.error(f"run_durations and time_till_next_run must have the same length") - exit(1) - - total_duration = 0 - for a, b in zip(run_durations, time_till_next_run): - total_duration += a + b - logging.info(f"Total duration : {total_duration}") - - current_timestamp = int(time.time() * 1000) - cursor = current_timestamp - total_duration * 60 * 1000 - first_ts = cursor - data = {'part': 'part'} - run = first_run_number - - for run_duration, time_till_next in zip(run_durations, time_till_next_run): - metadata = {'RunNumber': str(run)} - logging.debug(f"cursor: {cursor}") - logging.debug(f"time_till_next: {time_till_next}") - - for i in range(run_duration): - to_ts = cursor + 24 * 60 * 60 * 1000 # a day - metadata2 = {**metadata, 'Created': str(cursor)} - version_info = ObjectVersion(path=path, validFrom=cursor, validTo=to_ts, metadata=metadata2, - createdAt=cursor) - self.ccdb.putVersion(version=version_info, data=data) - cursor += 1 * 60 * 1000 - - run += 1 - cursor += time_till_next * 60 * 1000 - - return first_ts + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [70, 70, 70], [6*60, 6*60, 25*60], 55555) if __name__ == '__main__': diff --git a/Framework/script/RepoCleaner/tests/test_Production.py b/Framework/script/RepoCleaner/tests/test_Production.py index d0d8163a2a..24aa73ca09 100644 --- a/Framework/script/RepoCleaner/tests/test_Production.py +++ b/Framework/script/RepoCleaner/tests/test_Production.py @@ -1,10 +1,12 @@ import logging import time import unittest -from datetime import timedelta, date, datetime +from datetime import timedelta, datetime -from Ccdb import Ccdb, ObjectVersion -from rules import production +from qcrepocleaner.Ccdb import Ccdb, ObjectVersion +from qcrepocleaner.rules import production +from tests import test_utils +from tests.test_utils import CCDB_TEST_URL class TestProduction(unittest.TestCase): @@ -20,7 +22,7 @@ class TestProduction(unittest.TestCase): one_minute = 60000 def setUp(self): - self.ccdb = Ccdb('http://ccdb-test.cern.ch:8080') + self.ccdb = Ccdb(CCDB_TEST_URL) self.extra = {"delay_first_trimming": "30", "period_btw_versions_first": "10", "delay_final_trimming": "60", "period_btw_versions_final": "60"} self.path = "qc/TST/MO/repo/test" @@ -40,7 +42,8 @@ def test_start_run(self): # Prepare data test_path = self.path + "/test_start_run" - self.prepare_data(test_path, 25, 30, True, 60, False) + test_utils.clean_data(self.ccdb, test_path) + self.prepare_data_for_prod_test(test_path, 25, 30, True, 60, False) production.eor_dict.pop(int(self.run), None) stats = production.process(self.ccdb, test_path, 30, 1, self.in_ten_years, self.extra) @@ -74,7 +77,8 @@ def test_start_run_period(self): # Prepare data test_path = self.path + "/test_start_run_period" - first_ts = self.prepare_data(test_path, 25, 30, True, 60, False) + test_utils.clean_data(self.ccdb, test_path) + first_ts = self.prepare_data_for_prod_test(test_path, 25, 30, True, 60, False) logging.getLogger().debug(f"first_ts : {first_ts}") # everything outside the period @@ -111,7 +115,8 @@ def test_mid_run(self): # Prepare data test_path = self.path + "/test_mid_run" - self.prepare_data(test_path, 90) + test_utils.clean_data(self.ccdb, test_path) + self.prepare_data_for_prod_test(test_path, 90) production.eor_dict.pop(int(self.run), None) stats = production.process(self.ccdb, test_path, 30, 1, self.in_ten_years, self.extra) @@ -141,7 +146,8 @@ def test_mid_run_period(self): # Prepare data test_path = self.path + "/test_mid_run_period" - first_ts = self.prepare_data(test_path, 90) + test_utils.clean_data(self.ccdb, test_path) + first_ts = self.prepare_data_for_prod_test(test_path, 90) logging.getLogger().debug(f"first_ts : {first_ts}") objects_versions = self.ccdb.getVersionsList(test_path) @@ -171,7 +177,8 @@ def test_run_finished(self): # Prepare data test_path = self.path + "/test_run_finished" - self.prepare_data(test_path, 290, 190, False, 0, True) + test_utils.clean_data(self.ccdb, test_path) + self.prepare_data_for_prod_test(test_path, 290, 190, False, 0, True) production.eor_dict[int(self.run)] = datetime.now() - timedelta(minutes=100) stats = production.process(self.ccdb, test_path, 30, 1, self.in_ten_years, self.extra) @@ -198,7 +205,8 @@ def test_run_finished_period(self): # Prepare data test_path = self.path + "/test_run_finished_period" - first_ts = self.prepare_data(test_path, 290, 190, False, 0, True) + test_utils.clean_data(self.ccdb, test_path) + first_ts = self.prepare_data_for_prod_test(test_path, 290, 190, False, 0, True) logging.getLogger().debug(f"first_ts : {first_ts}") production.eor_dict[int(self.run)] = datetime.now() - timedelta(minutes=100) @@ -214,8 +222,9 @@ def test_run_finished_period(self): self.assertTrue("trim1" not in objects_versions[6].metadata) self.assertTrue("preservation" in objects_versions[6].metadata) - def prepare_data(self, path, minutes_since_sor, duration_first_part=30, skip_first_part=False, - minutes_second_part=60, skip_second_part=False): + + def prepare_data_for_prod_test(self, path, minutes_since_sor, duration_first_part=30, skip_first_part=False, + minutes_second_part=60, skip_second_part=False): """ Prepare a data set starting `minutes_since_sor` in the past. The data is layed out in two parts @@ -242,7 +251,7 @@ def prepare_data(self, path, minutes_since_sor, duration_first_part=30, skip_fir if first_ts > from_ts: first_ts = from_ts to_ts = from_ts + 24 * 60 * 60 * 1000 # a day - version_info = ObjectVersion(path=path, validFrom=from_ts, validTo=to_ts, metadata=metadata) + version_info = ObjectVersion(path=path, validFrom=from_ts, createdAt=from_ts, validTo=to_ts, metadata=metadata) self.ccdb.putVersion(version=version_info, data=data) cursor = cursor + duration_first_part * 60 * 1000 @@ -257,7 +266,7 @@ def prepare_data(self, path, minutes_since_sor, duration_first_part=30, skip_fir if first_ts > from_ts: first_ts = from_ts to_ts = from_ts + 24 * 60 * 60 * 1000 # a day - version_info = ObjectVersion(path=path, validFrom=from_ts, validTo=to_ts, metadata=metadata) + version_info = ObjectVersion(path=path, validFrom=from_ts, createdAt=from_ts, validTo=to_ts, metadata=metadata) self.ccdb.putVersion(version=version_info, data=data) return first_ts diff --git a/Framework/script/RepoCleaner/tests/test_last_only.py b/Framework/script/RepoCleaner/tests/test_last_only.py index a8e1228789..5a58fd1a97 100644 --- a/Framework/script/RepoCleaner/tests/test_last_only.py +++ b/Framework/script/RepoCleaner/tests/test_last_only.py @@ -1,12 +1,11 @@ import logging import time import unittest -from datetime import timedelta, date, datetime - -from Ccdb import Ccdb, ObjectVersion -from rules import last_only - +from qcrepocleaner.Ccdb import Ccdb +from qcrepocleaner.rules import last_only +from tests import test_utils +from tests.test_utils import CCDB_TEST_URL class TestLastOnly(unittest.TestCase): @@ -22,7 +21,7 @@ class TestLastOnly(unittest.TestCase): one_minute = 60000 def setUp(self): - self.ccdb = Ccdb('http://ccdb-test.cern.ch:8080') + self.ccdb = Ccdb(CCDB_TEST_URL) # ccdb-test but please use IP to avoid DNS alerts self.extra = {} self.path = "qc/TST/MO/repo/test" self.run = 124321 @@ -30,7 +29,7 @@ def setUp(self): def test_last_only(self): """ - 59 versions + 60 versions grace period of 30 minutes """ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', @@ -39,10 +38,11 @@ def test_last_only(self): # Prepare data test_path = self.path + "/test_last_only" - self.prepare_data(test_path, 60) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [60], [0], 123) stats = last_only.process(self.ccdb, test_path, 30, 1, self.in_ten_years, self.extra) - self.assertEqual(stats["deleted"], 30) + self.assertEqual(stats["deleted"], 31) # 31 because between the time we produced the 60 versions and now, there is a shift self.assertEqual(stats["preserved"], 29) objects_versions = self.ccdb.getVersionsList(test_path) @@ -51,7 +51,7 @@ def test_last_only(self): def test_last_only_period(self): """ - 59 versions + 60 versions no grace period only 20 minutes in the middle are in the period """ @@ -61,40 +61,17 @@ def test_last_only_period(self): # Prepare data test_path = self.path + "/test_last_only_period" - self.prepare_data(test_path, 60) + test_utils.clean_data(self.ccdb, test_path) + test_utils.prepare_data(self.ccdb, test_path, [60], [0], 123) current_timestamp = int(time.time() * 1000) - stats = last_only.process(self.ccdb, test_path, 0, current_timestamp-41*60*1000, current_timestamp-19*60*1000, self.extra) - self.assertEqual(stats["deleted"], 19) + stats = last_only.process(self.ccdb, test_path, 0, current_timestamp-40*60*1000, current_timestamp-20*60*1000, self.extra) + self.assertEqual(stats["deleted"], 20) self.assertEqual(stats["preserved"], 40) objects_versions = self.ccdb.getVersionsList(test_path) self.assertEqual(len(objects_versions), 40) - def prepare_data(self, path, since_minutes): - """ - Prepare a data set starting `since_minutes` in the past. - 1 version per minute - Each data has a different run number. - """ - - current_timestamp = int(time.time() * 1000) - data = {'part': 'part'} - run = 1234 - counter = 0 - - for x in range(since_minutes): - counter = counter + 1 - from_ts = current_timestamp - x * 60 * 1000 - to_ts = current_timestamp - metadata = {'RunNumber': str(run)} - run = run + 1 - version_info = ObjectVersion(path=path, validFrom=from_ts, validTo=to_ts, metadata=metadata) - self.ccdb.putVersion(version=version_info, data=data) - - logging.debug(f"counter : {counter}" ) - - if __name__ == '__main__': unittest.main() diff --git a/Framework/script/RepoCleaner/tests/test_repoCleaner.py b/Framework/script/RepoCleaner/tests/test_repoCleaner.py index 8c907dd6e1..a3a518630b 100644 --- a/Framework/script/RepoCleaner/tests/test_repoCleaner.py +++ b/Framework/script/RepoCleaner/tests/test_repoCleaner.py @@ -1,11 +1,12 @@ -import unittest -import yaml - import importlib -from importlib.util import spec_from_loader, module_from_spec -from importlib.machinery import SourceFileLoader import os import sys +import unittest +from importlib.machinery import SourceFileLoader +from importlib.util import spec_from_loader + +import yaml + def import_path(path): # needed because o2-qc-repo-cleaner has no suffix module_name = os.path.basename(path).replace('-', '_') @@ -18,11 +19,10 @@ def import_path(path): # needed because o2-qc-repo-cleaner has no suffix sys.modules[module_name] = module return module - repoCleaner = import_path("../qcrepocleaner/o2-qc-repo-cleaner") parseConfig = repoCleaner.parseConfig Rule = repoCleaner.Rule -findMatchingRule = repoCleaner.findMatchingRule +findMatchingRules = repoCleaner.findMatchingRules class TestRepoCleaner(unittest.TestCase): @@ -64,12 +64,12 @@ def test_findMatchingRule(self): rules.append(Rule('task1/obj1', '120', 'policy1')) rules.append(Rule('task1/obj1', '120', 'policy2')) rules.append(Rule('task2/.*', '120', 'policy3')) - self.assertEqual(findMatchingRule(rules, 'task1/obj1').policy, 'policy1') - self.assertNotEqual(findMatchingRule(rules, 'task1/obj1').policy, 'policy2') - self.assertEqual(findMatchingRule(rules, 'task3/obj1'), None) - self.assertEqual(findMatchingRule(rules, 'task2/obj1/obj1').policy, 'policy3') + self.assertEqual(findMatchingRules(rules, 'task1/obj1')[0].policy, 'policy1') + self.assertNotEqual(findMatchingRules(rules, 'task1/obj1')[0].policy, 'policy2') + self.assertEqual(findMatchingRules(rules, 'task3/obj1'), []) + self.assertEqual(findMatchingRules(rules, 'task2/obj1/obj1')[0].policy, 'policy3') rules.append(Rule('.*', '0', 'policyAll')) - self.assertEqual(findMatchingRule(rules, 'task3/obj1').policy, 'policyAll') + self.assertEqual(findMatchingRules(rules, 'task3/obj1')[0].policy, 'policyAll') if __name__ == '__main__': diff --git a/Framework/script/RepoCleaner/tests/test_utils.py b/Framework/script/RepoCleaner/tests/test_utils.py new file mode 100644 index 0000000000..9abaf7b069 --- /dev/null +++ b/Framework/script/RepoCleaner/tests/test_utils.py @@ -0,0 +1,55 @@ +import logging +import time +from typing import List + +from qcrepocleaner.Ccdb import ObjectVersion + +CCDB_TEST_URL = 'http://128.142.249.62:8080' + +def clean_data(ccdb, path): + versions = ccdb.getVersionsList(path) + for v in versions: + ccdb.deleteVersion(v) + + +def prepare_data(ccdb, path, run_durations: List[int], time_till_next_run: List[int], first_run_number: int): + """ + Prepare a data set populated with a number of runs. + run_durations contains the duration of each of these runs in minutes + time_till_next_run is the time between two runs in minutes. + The first element of time_till_next_run is used to separate the first two runs. + Both lists must have the same number of elements. + """ + + if len(run_durations) != len(time_till_next_run): + logging.error(f"run_durations and time_till_next_run must have the same length") + exit(1) + + total_duration = 0 + for a, b in zip(run_durations, time_till_next_run): + total_duration += a + b + logging.info(f"Total duration : {total_duration}") + + current_timestamp = int(time.time() * 1000) + cursor = current_timestamp - total_duration * 60 * 1000 + first_ts = cursor + data = {'part': 'part'} + run = first_run_number + + for run_duration, time_till_next in zip(run_durations, time_till_next_run): + metadata = {'RunNumber': str(run)} + logging.debug(f"cursor: {cursor}") + logging.debug(f"time_till_next: {time_till_next}") + + for i in range(run_duration): + to_ts = cursor + 24 * 60 * 60 * 1000 # a day + metadata2 = {**metadata, 'Created': str(cursor)} + version_info = ObjectVersion(path=path, validFrom=cursor, validTo=to_ts, metadata=metadata2, + createdAt=cursor) + ccdb.putVersion(version=version_info, data=data) + cursor += 1 * 60 * 1000 + + run += 1 + cursor += time_till_next * 60 * 1000 + + return first_ts diff --git a/Framework/script/RepoCleaner/versionsList.json b/Framework/script/RepoCleaner/tests/versionsList.json similarity index 95% rename from Framework/script/RepoCleaner/versionsList.json rename to Framework/script/RepoCleaner/tests/versionsList.json index ae3e9a2aae..f5f0a8887a 100644 --- a/Framework/script/RepoCleaner/versionsList.json +++ b/Framework/script/RepoCleaner/tests/versionsList.json @@ -3,6 +3,7 @@ { "id": "0c576bb0-7304-11e9-8d02-200114580202", "validFrom": "1557479683554", + "Created": "1557479683554", "validUntil": "1872839683554", "initialValidity": "1872839683554", "createTime": "1557479683563", @@ -20,6 +21,7 @@ { "id": "06fb1e80-72f7-11e9-8d02-200114580202", "validFrom": "1557474091106", + "Created": "1557474091106", "validUntil": "1557479683553", "initialValidity": "1872834091106", "createTime": "1557474091112", diff --git a/Framework/src/BookkeepingQualitySink.cxx b/Framework/src/BookkeepingQualitySink.cxx index 003ca9e35c..4900cd0b94 100644 --- a/Framework/src/BookkeepingQualitySink.cxx +++ b/Framework/src/BookkeepingQualitySink.cxx @@ -26,6 +26,7 @@ #include "QualityControl/QcInfoLogger.h" #include #include +#include #include #include @@ -51,13 +52,18 @@ void BookkeepingQualitySink::send(const std::string& grpcUri, const BookkeepingQ std::optional periodName; for (auto& [detector, qoMap] : flags) { - ILOG(Info, Support) << "Sending " << flags.size() << " flags for detector: " << detector << ENDM; + ILOG(Info, Support) << "Processing flags for detector: " << detector << ENDM; std::vector bkpQcFlags{}; for (auto& [qoName, converter] : qoMap) { if (converter == nullptr) { continue; } + if (provenance == Provenance::AsyncQC || provenance == Provenance::MCQC) { + auto runDuration = ccdb::BasicCCDBManager::instance().getRunDuration(converter->getRunNumber(), false); + converter->updateValidityInterval({ static_cast(runDuration.first), static_cast(runDuration.second) }); + } + auto flagCollection = converter->getResult(); if (flagCollection == nullptr) { continue; @@ -86,9 +92,9 @@ void BookkeepingQualitySink::send(const std::string& grpcUri, const BookkeepingQ } if (bkpQcFlags.empty()) { + ILOG(Info, Support) << "No flags for detector '" << detector << "', skipping" << ENDM; continue; } - try { switch (provenance) { case Provenance::SyncQC: @@ -105,6 +111,7 @@ void BookkeepingQualitySink::send(const std::string& grpcUri, const BookkeepingQ ILOG(Error, Support) << "Failed to send flags for detector: " << detector << " with error: " << err.what() << ENDM; } + ILOG(Info, Support) << "Sent " << bkpQcFlags.size() << " flags for detector: " << detector << ENDM; } } diff --git a/Framework/src/FlagHelpers.cxx b/Framework/src/FlagHelpers.cxx index 4168a15096..e5d40844f9 100644 --- a/Framework/src/FlagHelpers.cxx +++ b/Framework/src/FlagHelpers.cxx @@ -64,7 +64,7 @@ std::optional intersection(const QualityControlFlag& flag, V return flag; } auto intersection = flag.getInterval().getOverlap(interval); - if (intersection.isInvalid()) { + if (intersection.isInvalid() || intersection.isZeroLength()) { return std::nullopt; } return QualityControlFlag{ intersection.getMin(), intersection.getMax(), flag.getFlag(), flag.getComment(), flag.getSource() }; diff --git a/Framework/src/MonitorObject.cxx b/Framework/src/MonitorObject.cxx index 2e1a0db2f5..bde7ceb7db 100644 --- a/Framework/src/MonitorObject.cxx +++ b/Framework/src/MonitorObject.cxx @@ -15,9 +15,11 @@ /// #include "QualityControl/MonitorObject.h" +#include +#include "QualityControl/RepoPathUtils.h" +#include "QualityControl/QcInfoLogger.h" #include -#include "QualityControl/RepoPathUtils.h" using namespace std; @@ -25,11 +27,8 @@ namespace o2::quality_control::core { MonitorObject::MonitorObject() - : TObject(), - mObject(nullptr), - mTaskName(""), - mDetectorName(""), - mIsOwner(true) + : TObject{}, + mIsOwner{ true } { mActivity.mProvenance = "qc"; mActivity.mId = 0; @@ -37,28 +36,71 @@ MonitorObject::MonitorObject() } MonitorObject::MonitorObject(TObject* object, const std::string& taskName, const std::string& taskClass, const std::string& detectorName, int runNumber, const std::string& periodName, const std::string& passName, const std::string& provenance) - : TObject(), - mObject(object), - mTaskName(taskName), - mTaskClass(taskClass), - mDetectorName(detectorName), - mActivity(runNumber, "NONE", periodName, passName, provenance, gInvalidValidityInterval), - mIsOwner(true) + : TObject{}, + mObject{ object }, + mTaskName{ taskName }, + mTaskClass{ taskClass }, + mDetectorName{ detectorName }, + mActivity{ runNumber, "NONE", periodName, passName, provenance, gInvalidValidityInterval }, + mIsOwner{ true } +{ +} + +MonitorObject::MonitorObject(const MonitorObject& other) + : TObject{ other }, + mObject{}, + mTaskName{ other.mTaskName }, + mTaskClass{ other.mTaskClass }, + mDetectorName{ other.mDetectorName }, + mUserMetadata{ other.mUserMetadata }, + mDescription{ other.mDescription }, + mActivity{ other.mActivity }, + mCreateMovingWindow{ other.mCreateMovingWindow } +{ + cloneAndSetObject(other); +} + +MonitorObject& MonitorObject::operator=(const MonitorObject& other) +{ + TObject::operator=(other); + mTaskName = other.mTaskName; + mTaskClass = other.mTaskClass; + mDetectorName = other.mDetectorName; + mUserMetadata = other.mUserMetadata; + mDescription = other.mDescription; + mActivity = other.mActivity; + mCreateMovingWindow = other.mCreateMovingWindow; + cloneAndSetObject(other); + + return *this; +} + +void MonitorObject::Copy(TObject& object) const { + static_cast(object) = *this; } MonitorObject::~MonitorObject() { - if (mIsOwner) { - delete mObject; - mObject = nullptr; - } + releaseObject(); } -void MonitorObject::Draw(Option_t* option) { mObject->Draw(option); } +void MonitorObject::Draw(Option_t* option) +{ + if (mObject) { + mObject->Draw(option); + } else { + ILOG(Error, Devel) << "MonitorObject::Draw() : You are trying to draw MonitorObject with no internal TObject" << ENDM; + } +} TObject* MonitorObject::DrawClone(Option_t* option) const { + if (!mObject) { + ILOG(Error, Devel) << "MonitorObject::DrawClone() : You are trying to draw MonitorObject with no internal TObject" << ENDM; + return nullptr; + } + auto* clone = new MonitorObject(); clone->setTaskName(this->getTaskName()); clone->setObject(mObject->DrawClone(option)); @@ -72,10 +114,9 @@ const std::string MonitorObject::getName() const const char* MonitorObject::GetName() const { - if (mObject == nullptr) { - cerr << "MonitorObject::getName() : No object in this MonitorObject, returning empty string" << endl; - static char empty[] = ""; - return empty; + if (!mObject) { + ILOG(Error, Ops) << "MonitorObject::getName() : No object in this MonitorObject, returning empty string" << ENDM; + return ""; } return mObject->GetName(); } @@ -160,6 +201,52 @@ void MonitorObject::updateValidity(validity_time_t value) mActivity.mValidity.update(value); } +std::string MonitorObject::getFullName() const +{ + return getTaskName() + "/" + getName(); +} + +TObject* MonitorObject::getObject() const +{ + return mObject.get(); +} + +void MonitorObject::setObject(TObject* object) +{ + releaseObject(); + mObject.reset(object); +} + +bool MonitorObject::isIsOwner() const +{ + return mIsOwner; +} + +void MonitorObject::setIsOwner(bool isOwner) +{ + mIsOwner = isOwner; +} + +const std::string& MonitorObject::getTaskName() const +{ + return mTaskName; +} + +void MonitorObject::setTaskName(const std::string& taskName) +{ + mTaskName = taskName; +} + +const std::string& MonitorObject::getDetectorName() const +{ + return mDetectorName; +} + +void MonitorObject::setDetectorName(const std::string& detectorName) +{ + mDetectorName = detectorName; +} + ValidityInterval MonitorObject::getValidity() const { return mActivity.mValidity; @@ -185,4 +272,23 @@ bool MonitorObject::getCreateMovingWindow() const return mCreateMovingWindow; } +void MonitorObject::releaseObject() +{ + if (!mIsOwner) { + void(mObject.release()); + } +} + +void MonitorObject::cloneAndSetObject(const MonitorObject& other) +{ + releaseObject(); + + if (auto* otherObject = other.getObject(); otherObject != nullptr && other.isIsOwner()) { + mObject.reset(otherObject->Clone()); + } else { + mObject.reset(otherObject); + } + mIsOwner = other.isIsOwner(); +} + } // namespace o2::quality_control::core diff --git a/Framework/src/MonitorObjectCollection.cxx b/Framework/src/MonitorObjectCollection.cxx index 31b021289f..abfc4572be 100644 --- a/Framework/src/MonitorObjectCollection.cxx +++ b/Framework/src/MonitorObjectCollection.cxx @@ -46,16 +46,32 @@ void MonitorObjectCollection::merge(mergers::MergeInterface* const other) if (!otherMO || !targetMO) { throw std::runtime_error("The target object or the other object could not be casted to MonitorObject."); } - if (!reportedMismatchingRunNumbers && targetMO->getActivity().mId != otherMO->getActivity().mId) { + + if (otherMO->getActivity().mId > targetMO->getActivity().mId) { + ILOG(Error, Ops) << "The run number of the input object '" << otherMO->GetName() << "' (" + << otherMO->getActivity().mId << ") " + << "is higher than the one of the target object '" + << targetMO->GetName() << "' (" << targetMO->getActivity().mId + << "). Replacing the merged object with input, " + << "but THIS SHOULD BE IMMEDIATELY ADDRESSED IN PRODUCTION. " + << "QC objects from other setups are reaching this one." + << ENDM; + otherMO->Copy(*targetMO); + continue; + } + + if (!reportedMismatchingRunNumbers && otherMO->getActivity().mId < targetMO->getActivity().mId) { ILOG(Error, Ops) << "The run number of the input object '" << otherMO->GetName() << "' (" << otherMO->getActivity().mId << ") " << "does not match the run number of the target object '" << targetMO->GetName() << "' (" << targetMO->getActivity().mId - << "). Trying to continue, but THIS SHOULD BE IMMEDIATELY ADDRESSED IN PRODUCTION. " + << "). Ignoring this object and trying to continue, but THIS SHOULD BE IMMEDIATELY ADDRESSED IN PRODUCTION. " << "QC objects from other setups are reaching this one. Will not report more mismatches in this collection." << ENDM; reportedMismatchingRunNumbers = true; + continue; } + // That might be another collection or a concrete object to be merged, we walk on the collection recursively. algorithm::merge(targetMO->getObject(), otherMO->getObject()); if (otherMO->getValidity().isValid()) { diff --git a/Framework/src/ObjectsManager.cxx b/Framework/src/ObjectsManager.cxx index eec6b25f4c..0345dac7c1 100644 --- a/Framework/src/ObjectsManager.cxx +++ b/Framework/src/ObjectsManager.cxx @@ -60,16 +60,22 @@ ObjectsManager::~ObjectsManager() ILOG(Debug, Devel) << "ObjectsManager destructor" << ENDM; } -void ObjectsManager::startPublishing(TObject* object, PublicationPolicy publicationPolicy) +void ObjectsManager::startPublishingImpl(TObject* object, PublicationPolicy publicationPolicy, bool ignoreMergeableWarning) { if (!object) { ILOG(Warning, Support) << "A nullptr provided to ObjectManager::startPublishing" << ENDM; return; } + if (mMonitorObjects->FindObject(object->GetName()) != nullptr) { ILOG(Warning, Support) << "Object is already being published (" << object->GetName() << "), will remove it and add the new one" << ENDM; stopPublishing(object->GetName()); } + + if (!ignoreMergeableWarning && !mergers::isMergeable(object)) { + ILOG(Warning, Support) << "Object '" + std::string(object->GetName()) + "' with type '" + std::string(object->ClassName()) + "' is not one of the mergeable types, it will not be correctly merged in distributed setups, such as P2 and Grid" << ENDM; + } + auto* newObject = new MonitorObject(object, mTaskName, mTaskClass, mDetectorName); newObject->setIsOwner(false); newObject->setActivity(mActivity); diff --git a/Framework/src/QualitiesToFlagCollectionConverter.cxx b/Framework/src/QualitiesToFlagCollectionConverter.cxx index ed3d4ed9f8..bfabc2862e 100644 --- a/Framework/src/QualitiesToFlagCollectionConverter.cxx +++ b/Framework/src/QualitiesToFlagCollectionConverter.cxx @@ -289,4 +289,9 @@ void QualitiesToFlagCollectionConverter::updateValidityInterval(const ValidityIn mConverted->setInterval(interval); } +int QualitiesToFlagCollectionConverter::getRunNumber() const +{ + return mConverted ? mConverted->getRunNumber() : -1; +} + } // namespace o2::quality_control::core diff --git a/Framework/src/SliceTrendingTask.cxx b/Framework/src/SliceTrendingTask.cxx index eaba14cd61..e7638c70f0 100644 --- a/Framework/src/SliceTrendingTask.cxx +++ b/Framework/src/SliceTrendingTask.cxx @@ -671,4 +671,4 @@ std::string SliceTrendingTask::beautifyTitle(const std::string_view rawtitle, co } return beautified; -} \ No newline at end of file +} diff --git a/Framework/src/Triggers.cxx b/Framework/src/Triggers.cxx index 775d639ad0..0a0dea29c4 100644 --- a/Framework/src/Triggers.cxx +++ b/Framework/src/Triggers.cxx @@ -212,9 +212,9 @@ TriggerFcn NewObject(const std::string& databaseUrl, const std::string& database const auto& objects = listing.get_child("objects"); if (objects.empty()) { // We don't make a fuss over it, because we might be just waiting for the first version of such object. - // It should not happen often though, so having a warning makes sense. - ILOG(Warning, Devel) << "Could not find the file '" << fullObjectPath << "' in the db '" - << databaseUrl << "' for given Activity settings (" << activity << "). Zeroes and empty strings are treated as wildcards." << ENDM; + // Apparently it happens always for a few iterations at SOR, so Warnings might be too annoying. + ILOG(Debug, Devel) << "Could not find the file '" << fullObjectPath << "' in the db '" + << databaseUrl << "' for given Activity settings (" << activity << "). Zeroes and empty strings are treated as wildcards." << ENDM; return gInvalidValidityInterval; } else if (objects.size() > 1) { ILOG(Warning, Support) << "Expected just one metadata entry for object '" << fullObjectPath << "'. Trying to continue by using the first." << ENDM; diff --git a/Framework/test/testAggregatorInterface.cxx b/Framework/test/testAggregatorInterface.cxx index f2cd1924dc..e71cc57f57 100644 --- a/Framework/test/testAggregatorInterface.cxx +++ b/Framework/test/testAggregatorInterface.cxx @@ -78,10 +78,10 @@ TEST_CASE("test_invoke_all_methods") test::SimpleTestAggregator testAggregator; // prepare data - std::shared_ptr qo_null = make_shared(0, "testCheckNull", "TST"); - std::shared_ptr qo_good = make_shared(1, "testCheckGood", "TST"); - std::shared_ptr qo_medium = make_shared(2, "testCheckMedium", "TST"); - std::shared_ptr qo_bad = make_shared(3, "testCheckBad", "TST"); + std::shared_ptr qo_null = make_shared(Quality::Null, "testCheckNull", "TST"); + std::shared_ptr qo_good = make_shared(Quality::Good, "testCheckGood", "TST"); + std::shared_ptr qo_medium = make_shared(Quality::Medium, "testCheckMedium", "TST"); + std::shared_ptr qo_bad = make_shared(Quality::Bad, "testCheckBad", "TST"); QualityObjectsMapType input; std::map result1 = testAggregator.aggregate(input); diff --git a/Framework/test/testBookkeepingQualitySink.cxx b/Framework/test/testBookkeepingQualitySink.cxx index 319a3e366f..2862d6627d 100644 --- a/Framework/test/testBookkeepingQualitySink.cxx +++ b/Framework/test/testBookkeepingQualitySink.cxx @@ -15,13 +15,11 @@ /// #include -#include #include "QualityControl/BookkeepingQualitySink.h" #include "QualityControl/InfrastructureGenerator.h" using namespace o2; using namespace o2::framework; -using namespace o2::utilities; void customize(std::vector& policies) { @@ -30,12 +28,8 @@ void customize(std::vector& policies) #include #include -#include -#include -#include -#include - -using namespace o2::configuration; +#include "QualityControl/QualityObject.h" +#include "QualityControl/Quality.h" void compareFatal(const quality_control::QualityControlFlag& got, const quality_control::QualityControlFlag& expected) { @@ -57,7 +51,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const&) Inputs{}, Outputs{ { { "tst-qo" }, "TST", "DATA" } }, AlgorithmSpec{ [](ProcessingContext& ctx) { - auto obj = std::make_unique(0, "testCheckNull", "TST"); + auto obj = std::make_unique(core::Quality::Null, "testCheckNull", "TST"); obj->getActivity().mValidity = core::ValidityInterval{ 10, 500 }; obj->addFlag(FlagTypeFactory::Good(), "I am comment"); ctx.outputs().snapshot(Output{ "TST", "DATA", 0 }, *obj); diff --git a/Framework/test/testFlagHelpers.cxx b/Framework/test/testFlagHelpers.cxx index 3bb5b4cbff..4c5991d3b7 100644 --- a/Framework/test/testFlagHelpers.cxx +++ b/Framework/test/testFlagHelpers.cxx @@ -295,4 +295,10 @@ TEST_CASE("intersection") REQUIRE(result->getComment() == qcFlag.getComment()); REQUIRE(result->getSource() == qcFlag.getSource()); } + SECTION("Returns nullopt if the flag and the interval are adjacent") + { + QualityControlFlag qcFlag{ 15, 25, FlagTypeFactory::BadTracking(), "comment", "source" }; + REQUIRE_FALSE(intersection(qcFlag, { 10, 15 }).has_value()); + REQUIRE_FALSE(intersection(qcFlag, { 25, 30 }).has_value()); + } } diff --git a/Framework/test/testMonitorObject.cxx b/Framework/test/testMonitorObject.cxx index 70d05981c3..b35d3e96cc 100644 --- a/Framework/test/testMonitorObject.cxx +++ b/Framework/test/testMonitorObject.cxx @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -67,6 +68,207 @@ TEST_CASE("mo_save") gSystem->Unlink(filename.data()); } +TEST_CASE("mo_clone") +{ + auto th1 = std::make_unique("name", "title"); + TObject* obj = th1.get(); + auto* cloned = obj->Clone(); + auto* secondCloned = cloned->Clone(); + delete cloned; + delete secondCloned; +} + +TEST_CASE("mo_copy") +{ + auto compareWithoutObject = [](const MonitorObject& lhs, const MonitorObject& rhs) { + REQUIRE(lhs.getName() == rhs.getName()); + REQUIRE(lhs.getTaskName() == rhs.getTaskName()); + REQUIRE(lhs.getDetectorName() == rhs.getDetectorName()); + REQUIRE(lhs.getTaskClass() == rhs.getTaskClass()); + REQUIRE(lhs.isIsOwner() == rhs.isIsOwner()); + REQUIRE(lhs.getActivity() == rhs.getActivity()); + REQUIRE(lhs.getCreateMovingWindow() == rhs.getCreateMovingWindow()); + REQUIRE(lhs.getDescription() == rhs.getDescription()); + REQUIRE(lhs.getMetadataMap() == rhs.getMetadataMap()); + }; + + SECTION("Empty orignal MO") + { + MonitorObject original{}; + + SECTION("copy of non owning object returns same pointer") + { + auto compareShallowNonOwning = [&compareWithoutObject](const MonitorObject& lhs, const MonitorObject& rhs) { + compareWithoutObject(lhs, rhs); + // we expect shallow rhs when lhs does not own the underlying object + REQUIRE(lhs.getObject() == nullptr); + REQUIRE(rhs.getObject() == nullptr); + }; + + SECTION("copy constructor") + { + MonitorObject copy{ original }; + compareShallowNonOwning(original, copy); + } + + SECTION("copy assignment operator") + { + MonitorObject copy{}; + copy = original; + compareShallowNonOwning(original, copy); + } + + SECTION("Copy method") + { + MonitorObject copy{}; + original.Copy(copy); + compareShallowNonOwning(original, copy); + } + } + + SECTION("copy of object owned by MO returns deep copy, non nullptr init") + { + auto compareTNamed = [](const MonitorObject& lhs, const MonitorObject& rhs) { + REQUIRE(lhs.getObject() == nullptr); + REQUIRE(rhs.getObject() == nullptr); + }; + + SECTION("copy owning object before") + { + MonitorObject copy{}; + copy.setObject(new TNamed("copy named", "title copy")); + copy.setIsOwner(true); + + SECTION("copy assignment operator") + { + copy = original; + compareTNamed(original, copy); + } + + SECTION("Copy method") + { + original.Copy(copy); + compareTNamed(original, copy); + } + } + } + } + + SECTION("original MO with data") + { + + MonitorObject original{}; + + original.setTaskName("taskName"); + original.setTaskClass("taskClass"); + original.setDescription("description"); + original.setDetectorName("TST"); + original.setActivity({ 123, "type", "periodName", "passName", "provenance", gFullValidityInterval, "beamType", "partitionName", 2 }); + original.setCreateMovingWindow(true); + + SECTION("copy of non owning object returns same pointer") + { + auto compareShallowNonOwning = [&compareWithoutObject](const MonitorObject& lhs, const MonitorObject& rhs) { + compareWithoutObject(lhs, rhs); + // we expect shallow rhs when lhs does not own the underlying object + REQUIRE((lhs.getObject() != nullptr && lhs.getObject() == rhs.getObject())); + }; + + auto th1 = TH1I("name", "title", 10, 0, 10); + th1.Fill(8); + original.setObject(&th1); + original.setIsOwner(false); + + SECTION("copy constructor") + { + MonitorObject copy{ original }; + compareShallowNonOwning(original, copy); + } + + SECTION("copy assignment operator") + { + MonitorObject copy{}; + copy = original; + compareShallowNonOwning(original, copy); + } + + SECTION("Copy method") + { + MonitorObject copy{}; + original.Copy(copy); + compareShallowNonOwning(original, copy); + } + } + + SECTION("copy of object owned by MO returns deep copy, init from nullptr") + { + auto compareTNamed = [](const MonitorObject& lhs, const MonitorObject& rhs) { + auto* namedoriginal = static_cast(lhs.getObject()); + auto* namedcopy = static_cast(rhs.getObject()); + REQUIRE(std::string(namedoriginal->GetName()) == std::string(namedcopy->GetName())); + REQUIRE(std::string(namedoriginal->GetTitle()) == std::string(namedcopy->GetTitle())); + }; + + auto* named = new TNamed("named", "title"); + original.setObject(named); + original.setIsOwner(true); + + SECTION("copy constructor") + { + MonitorObject copy{ original }; + compareTNamed(original, copy); + } + + SECTION("copy assignment operator") + { + MonitorObject copy{}; + copy = original; + compareTNamed(original, copy); + } + + SECTION("Copy method") + { + MonitorObject copy{}; + original.Copy(copy); + compareTNamed(original, copy); + } + } + + SECTION("copy of object owned by MO returns deep copy, non nullptr init") + { + auto compareTNamed = [](const MonitorObject& lhs, const MonitorObject& rhs) { + auto* namedoriginal = static_cast(lhs.getObject()); + auto* namedcopy = static_cast(rhs.getObject()); + REQUIRE(std::string(namedoriginal->GetName()) == std::string(namedcopy->GetName())); + REQUIRE(std::string(namedoriginal->GetTitle()) == std::string(namedcopy->GetTitle())); + }; + + auto* named = new TNamed("named", "title"); + original.setObject(named); + original.setIsOwner(true); + + SECTION("copy owning object before") + { + MonitorObject copy{}; + copy.setObject(new TNamed("copy named", "title copy")); + copy.setIsOwner(true); + + SECTION("copy assignment operator") + { + copy = original; + compareTNamed(original, copy); + } + + SECTION("Copy method") + { + original.Copy(copy); + compareTNamed(original, copy); + } + } + } + } +} + TEST_CASE("metadata") { string objectName = "asdf"; diff --git a/Framework/test/testMonitorObjectCollection.cxx b/Framework/test/testMonitorObjectCollection.cxx index 2c38208229..b097056eb5 100644 --- a/Framework/test/testMonitorObjectCollection.cxx +++ b/Framework/test/testMonitorObjectCollection.cxx @@ -16,8 +16,8 @@ #include "QualityControl/MonitorObjectCollection.h" #include "QualityControl/MonitorObject.h" -#include "QualityControl/QcInfoLogger.h" +#include #include #include #include @@ -96,6 +96,77 @@ TEST_CASE("monitor_object_collection_merge") delete target; } +TEST_CASE("monitor_object_collection_merge_different_id") +{ + const auto toHisto = [](std::unique_ptr& collection) -> TH1I* { + return dynamic_cast(dynamic_cast(collection->At(0))->getObject()); + }; + + constexpr size_t bins = 10; + constexpr size_t min = 0; + constexpr size_t max = 10; + + SECTION("other has higher run number than target") + { + auto target = std::make_unique(); + + auto* targetTH1I = new TH1I("histo 1d", "original", bins, min, max); + targetTH1I->Fill(5); + auto* targetMoTH1I = new MonitorObject(targetTH1I, "histo 1d", "class", "DET"); + targetMoTH1I->setActivity({ 123, "PHYSICS", "LHC32x", "apass2", "qc_async", { 10, 20 } }); + targetMoTH1I->setIsOwner(true); + target->Add(targetMoTH1I); + + auto other = std::make_unique(); + other->SetOwner(true); + + auto* otherTH1I = new TH1I("histo 1d", "input", bins, min, max); + otherTH1I->Fill(2); + auto* otherMoTH1I = new MonitorObject(otherTH1I, "histo 1d", "class", "DET"); + otherMoTH1I->setActivity({ 1234, "PHYSICS", "LHC32x", "apass2", "qc_async", { 43, 60 } }); + otherMoTH1I->setIsOwner(true); + other->Add(otherMoTH1I); + + CHECK_NOTHROW(algorithm::merge(target.get(), other.get())); + auto* h1orig = toHisto(target); + auto* h1other = toHisto(other); + REQUIRE(h1orig->GetAt(3) == 1); + for (size_t i = 0; i != h1orig->GetSize(); ++i) { + REQUIRE(h1orig->GetAt(i) == h1other->GetAt(i)); + } + } + + SECTION("other has lower run number than target") + { + auto target = std::make_unique(); + + auto* targetTH1I = new TH1I("histo 1d", "original", bins, min, max); + targetTH1I->Fill(5); + auto* targetMoTH1I = new MonitorObject(targetTH1I, "histo 1d", "class", "DET"); + targetMoTH1I->setActivity({ 1234, "PHYSICS", "LHC32x", "apass2", "qc_async", { 10, 20 } }); + targetMoTH1I->setIsOwner(true); + target->Add(targetMoTH1I); + + auto other = std::make_unique(); + other->SetOwner(true); + + auto* otherTH1I = new TH1I("histo 1d", "input", bins, min, max); + otherTH1I->Fill(2); + auto* otherMoTH1I = new MonitorObject(otherTH1I, "histo 1d", "class", "DET"); + otherMoTH1I->setActivity({ 123, "PHYSICS", "LHC32x", "apass2", "qc_async", { 43, 60 } }); + otherMoTH1I->setIsOwner(true); + other->Add(otherMoTH1I); + + CHECK_NOTHROW(algorithm::merge(target.get(), other.get())); + auto* h1orig = toHisto(target); + auto* h1other = toHisto(other); + REQUIRE(h1orig->At(h1orig->FindBin(5)) == 1); + REQUIRE(h1other->At(h1other->FindBin(5)) == 0); + REQUIRE(h1orig->At(h1orig->FindBin(2)) == 0); + REQUIRE(h1other->At(h1other->FindBin(2)) == 1); + } +} + TEST_CASE("monitor_object_collection_post_deserialization") { const size_t bins = 10; @@ -171,4 +242,4 @@ TEST_CASE("monitor_object_collection_clone_mw") delete mwMOC2; } -} // namespace o2::quality_control::core \ No newline at end of file +} // namespace o2::quality_control::core diff --git a/Framework/test/testObjectsManager.cxx b/Framework/test/testObjectsManager.cxx index 1844bba155..0d9618eb0e 100644 --- a/Framework/test/testObjectsManager.cxx +++ b/Framework/test/testObjectsManager.cxx @@ -53,12 +53,12 @@ BOOST_AUTO_TEST_CASE(duplicate_object_test) config.consulUrl = ""; ObjectsManager objectsManager(config.taskName, config.taskClass, config.detectorName, config.consulUrl, 0, true); TObjString s("content"); - objectsManager.startPublishing(&s, PublicationPolicy::Forever); - BOOST_CHECK_NO_THROW(objectsManager.startPublishing(&s, PublicationPolicy::Forever)); + objectsManager.startPublishing(&s, PublicationPolicy::Forever); + BOOST_CHECK_NO_THROW(objectsManager.startPublishing(&s, PublicationPolicy::Forever)); BOOST_REQUIRE(objectsManager.getMonitorObject("content") != nullptr); TObjString s2("content"); - BOOST_CHECK_NO_THROW(objectsManager.startPublishing(&s2, PublicationPolicy::Forever)); + BOOST_CHECK_NO_THROW(objectsManager.startPublishing(&s2, PublicationPolicy::Forever)); auto mo2 = objectsManager.getMonitorObject("content"); BOOST_REQUIRE(mo2 != nullptr); BOOST_REQUIRE(mo2->getObject() != &s); @@ -73,8 +73,8 @@ BOOST_AUTO_TEST_CASE(is_being_published_test) ObjectsManager objectsManager(config.taskName, config.taskClass, config.detectorName, config.consulUrl, 0, true); TObjString s("content"); BOOST_CHECK(!objectsManager.isBeingPublished("content")); - objectsManager.startPublishing(&s, PublicationPolicy::Forever); - BOOST_CHECK_NO_THROW(objectsManager.startPublishing(&s, PublicationPolicy::Forever)); + objectsManager.startPublishing(&s, PublicationPolicy::Forever); + BOOST_CHECK_NO_THROW(objectsManager.startPublishing(&s, PublicationPolicy::Forever)); BOOST_CHECK(objectsManager.isBeingPublished("content")); } @@ -84,11 +84,11 @@ BOOST_AUTO_TEST_CASE(unpublish_test) config.taskName = "test"; ObjectsManager objectsManager(config.taskName, config.taskClass, config.detectorName, config.consulUrl, 0, true); TObjString s("content"); - objectsManager.startPublishing(&s, PublicationPolicy::Forever); + objectsManager.startPublishing(&s, PublicationPolicy::Forever); BOOST_CHECK_EQUAL(objectsManager.getNumberPublishedObjects(), 1); objectsManager.stopPublishing(&s); BOOST_CHECK_EQUAL(objectsManager.getNumberPublishedObjects(), 0); - objectsManager.startPublishing(&s, PublicationPolicy::Forever); + objectsManager.startPublishing(&s, PublicationPolicy::Forever); BOOST_CHECK_EQUAL(objectsManager.getNumberPublishedObjects(), 1); objectsManager.stopPublishing("content"); BOOST_CHECK_EQUAL(objectsManager.getNumberPublishedObjects(), 0); @@ -96,7 +96,7 @@ BOOST_AUTO_TEST_CASE(unpublish_test) BOOST_CHECK_THROW(objectsManager.stopPublishing("asdf"), ObjectNotFoundError); // unpublish all - objectsManager.startPublishing(&s, PublicationPolicy::Forever); + objectsManager.startPublishing(&s, PublicationPolicy::Forever); BOOST_CHECK_EQUAL(objectsManager.getNumberPublishedObjects(), 1); objectsManager.stopPublishingAll(); BOOST_CHECK_EQUAL(objectsManager.getNumberPublishedObjects(), 0); @@ -104,7 +104,7 @@ BOOST_AUTO_TEST_CASE(unpublish_test) // unpublish after deletion auto s2 = new TObjString("content"); - objectsManager.startPublishing(s2, PublicationPolicy::Forever); + objectsManager.startPublishing(s2, PublicationPolicy::Forever); BOOST_CHECK_EQUAL(objectsManager.getNumberPublishedObjects(), 1); delete s2; objectsManager.stopPublishing(s2); @@ -112,18 +112,18 @@ BOOST_AUTO_TEST_CASE(unpublish_test) // unpublish for publication policy auto s3 = new TObjString("content3"); - objectsManager.startPublishing(s3, PublicationPolicy::Once); + objectsManager.startPublishing(s3, PublicationPolicy::Once); auto s4 = new TObjString("content4"); - objectsManager.startPublishing(s4, PublicationPolicy::Once); + objectsManager.startPublishing(s4, PublicationPolicy::Once); auto s5 = new TObjString("content5"); - objectsManager.startPublishing(s5, PublicationPolicy::ThroughStop); + objectsManager.startPublishing(s5, PublicationPolicy::ThroughStop); BOOST_CHECK_EQUAL(objectsManager.getNumberPublishedObjects(), 3); objectsManager.stopPublishing(PublicationPolicy::Once); BOOST_CHECK_EQUAL(objectsManager.getNumberPublishedObjects(), 1); objectsManager.stopPublishing(PublicationPolicy::ThroughStop); BOOST_CHECK_EQUAL(objectsManager.getNumberPublishedObjects(), 0); - objectsManager.startPublishing(s3, PublicationPolicy::Once); + objectsManager.startPublishing(s3, PublicationPolicy::Once); objectsManager.stopPublishing(s3); BOOST_CHECK_EQUAL(objectsManager.getNumberPublishedObjects(), 0); BOOST_CHECK_NO_THROW(objectsManager.stopPublishing(PublicationPolicy::Once)); @@ -145,8 +145,8 @@ BOOST_AUTO_TEST_CASE(getters_test) TObjString s("content"); TH1F h("histo", "h", 100, 0, 99); - objectsManager.startPublishing(&s, PublicationPolicy::Forever); - objectsManager.startPublishing(&h, PublicationPolicy::Forever); + objectsManager.startPublishing(&s, PublicationPolicy::Forever); + objectsManager.startPublishing(&h, PublicationPolicy::Forever); // basic gets BOOST_CHECK_NO_THROW(objectsManager.getMonitorObject("content")); @@ -174,8 +174,8 @@ BOOST_AUTO_TEST_CASE(metadata_test) TObjString s("content"); TH1F h("histo", "h", 100, 0, 99); - objectsManager.startPublishing(&s, PublicationPolicy::Forever); - objectsManager.startPublishing(&h, PublicationPolicy::Forever); + objectsManager.startPublishing(&s, PublicationPolicy::Forever); + objectsManager.startPublishing(&h, PublicationPolicy::Forever); objectsManager.addMetadata("content", "aaa", "bbb"); BOOST_CHECK_EQUAL(objectsManager.getMonitorObject("content")->getMetadataMap().at("aaa"), "bbb"); @@ -211,7 +211,7 @@ BOOST_AUTO_TEST_CASE(feed_with_nullptr) config.consulUrl = ""; ObjectsManager objectsManager(config.taskName, config.taskClass, config.detectorName, config.consulUrl, 0, true); - BOOST_CHECK_NO_THROW(objectsManager.startPublishing(nullptr, PublicationPolicy::Forever)); + BOOST_CHECK_NO_THROW(objectsManager.startPublishing(nullptr, PublicationPolicy::Forever)); BOOST_CHECK_NO_THROW(objectsManager.setDefaultDrawOptions(nullptr, "")); BOOST_CHECK_NO_THROW(objectsManager.setDisplayHint(nullptr, "")); BOOST_CHECK_NO_THROW(objectsManager.stopPublishing(nullptr)); diff --git a/Framework/test/testPublisher.cxx b/Framework/test/testPublisher.cxx index 96597dcc8e..713a52da39 100644 --- a/Framework/test/testPublisher.cxx +++ b/Framework/test/testPublisher.cxx @@ -38,7 +38,7 @@ BOOST_AUTO_TEST_CASE(publisher_test) std::string consulUrl = "invalid"; ObjectsManager objectsManager(taskName, "taskClass", detectorName, consulUrl, 0, true); TObjString s("content"); - objectsManager.startPublishing(&s, PublicationPolicy::Forever); + objectsManager.startPublishing(&s, PublicationPolicy::Forever); TObjString* s2 = (TObjString*)(objectsManager.getMonitorObject("content")->getObject()); BOOST_CHECK_EQUAL(s.GetString(), s2->GetString()); diff --git a/Modules/CTP/src/CountersQcTask.cxx b/Modules/CTP/src/CountersQcTask.cxx index 8b9b28a3cb..4e3942146f 100644 --- a/Modules/CTP/src/CountersQcTask.cxx +++ b/Modules/CTP/src/CountersQcTask.cxx @@ -62,7 +62,7 @@ void CTPCountersTask::initialize(o2::framework::InitContext& /*ctx*/) mHistInputRate[i]->Draw(); mHistInputRate[i]->SetBit(TObject::kCanDelete); } - getObjectsManager()->startPublishing(mTCanvasInputs); + getObjectsManager()->startPublishing(mTCanvasInputs); } { @@ -79,7 +79,7 @@ void CTPCountersTask::initialize(o2::framework::InitContext& /*ctx*/) mHistClassRate[i]->Draw(); mHistClassRate[i]->SetBit(TObject::kCanDelete); } - getObjectsManager()->startPublishing(mTCanvasClasses); + getObjectsManager()->startPublishing(mTCanvasClasses); } { @@ -100,7 +100,7 @@ void CTPCountersTask::initialize(o2::framework::InitContext& /*ctx*/) mHistClassRate[k]->Draw(); mHistClassRate[k]->SetBit(TObject::kCanDelete); }*/ - getObjectsManager()->startPublishing(mTCanvasClassRates[j]); + getObjectsManager()->startPublishing(mTCanvasClassRates[j]); } } @@ -136,7 +136,7 @@ void CTPCountersTask::initialize(o2::framework::InitContext& /*ctx*/) mHistClassTotalCounts[i]->Draw(); mHistClassTotalCounts[i]->SetBit(TObject::kCanDelete); } - getObjectsManager()->startPublishing(mTCanvasTotalCountsClasses); + getObjectsManager()->startPublishing(mTCanvasTotalCountsClasses); } } diff --git a/Modules/CTP/src/RawDataQcTask.cxx b/Modules/CTP/src/RawDataQcTask.cxx index a1b2f79b05..c3c2599e88 100644 --- a/Modules/CTP/src/RawDataQcTask.cxx +++ b/Modules/CTP/src/RawDataQcTask.cxx @@ -226,11 +226,13 @@ void CTPRawDataReaderTask::monitorData(o2::framework::ProcessingContext& ctx) mHistoInputs->getNum()->Fill(i); mHistoInputRatios->getNum()->Fill(i); if (i == indexMB1 - 1) { - mHistoBCMinBias1->Fill((bcid - mShiftInput1) % 3564, 1. / mScaleInput1); + int bc = bcid - mShiftInput1 >= 0 ? bcid - mShiftInput1 : bcid - mShiftInput1 + 3564; + mHistoBCMinBias1->Fill(bc, 1. / mScaleInput1); mHistoInputRatios->getDen()->Fill(0., 1); } if (i == indexMB2 - 1) { - mHistoBCMinBias2->Fill((bcid - mShiftInput2) % 3564, 1. / mScaleInput2); + int bc = bcid - mShiftInput2 >= 0 ? bcid - mShiftInput2 : bcid - mShiftInput2 + 3564; + mHistoBCMinBias2->Fill(bc, 1. / mScaleInput2); } } } diff --git a/Modules/CTP/src/RawDataReaderCheck.cxx b/Modules/CTP/src/RawDataReaderCheck.cxx index 69c8167347..e5e0671059 100644 --- a/Modules/CTP/src/RawDataReaderCheck.cxx +++ b/Modules/CTP/src/RawDataReaderCheck.cxx @@ -92,7 +92,7 @@ Quality RawDataReaderCheck::check(std::mapGetEntries() / mLHCBCs.count(); + float average = h->Integral() / mLHCBCs.count(); mThreshold = average - mNSigBC * sqrt(average); if (mThreshold < std::sqrt(average)) { mThreshold = average / 2; diff --git a/Modules/Example/src/EveryObject.cxx b/Modules/Example/src/EveryObject.cxx index ddf8d5633f..c8a198ee56 100644 --- a/Modules/Example/src/EveryObject.cxx +++ b/Modules/Example/src/EveryObject.cxx @@ -82,7 +82,7 @@ void EveryObject::initialize(o2::framework::InitContext& /*ctx*/) mTCanvasMembers[i]->Draw(); mTCanvasMembers[i]->SetBit(TObject::kCanDelete); } - getObjectsManager()->startPublishing(mTCanvas, PublicationPolicy::Forever); + getObjectsManager()->startPublishing(mTCanvas, PublicationPolicy::Forever); } } diff --git a/Modules/GLO/src/DataCompressionQcTask.cxx b/Modules/GLO/src/DataCompressionQcTask.cxx index dd06e70ef5..503a4f03f1 100644 --- a/Modules/GLO/src/DataCompressionQcTask.cxx +++ b/Modules/GLO/src/DataCompressionQcTask.cxx @@ -70,8 +70,8 @@ void DataCompressionQcTask::initialize(o2::framework::InitContext&) mEntropyCompressionCanvas->DivideSquare(mCompressionHists.size()); mCompressionCanvas->DivideSquare(mCompressionHists.size()); - getObjectsManager()->startPublishing(mEntropyCompressionCanvas.get()); - getObjectsManager()->startPublishing(mCompressionCanvas.get()); + getObjectsManager()->startPublishing(mEntropyCompressionCanvas.get()); + getObjectsManager()->startPublishing(mCompressionCanvas.get()); } } diff --git a/Modules/HMPID/src/HmpidTask.cxx b/Modules/HMPID/src/HmpidTask.cxx index 84f177ad43..f22ada1ac7 100644 --- a/Modules/HMPID/src/HmpidTask.cxx +++ b/Modules/HMPID/src/HmpidTask.cxx @@ -194,7 +194,7 @@ void HmpidTask::initialize(o2::framework::InitContext& /*ctx*/) // Error messages CheckerMessages = new TCanvas("CheckerMessages"); - getObjectsManager()->startPublishing(CheckerMessages); + getObjectsManager()->startPublishing(CheckerMessages); // TH2 to check HV hCheckHV = new TH2F("hCheckHV", "hCheckHV", 42, -0.5, 41.5, 4, 0, 4); diff --git a/Modules/ITS/include/ITS/ITSClusterTask.h b/Modules/ITS/include/ITS/ITSClusterTask.h index febab72dda..6122176fe4 100644 --- a/Modules/ITS/include/ITS/ITSClusterTask.h +++ b/Modules/ITS/include/ITS/ITSClusterTask.h @@ -92,11 +92,15 @@ class ITSClusterTask : public TaskInterface std::shared_ptr hAverageClusterOccupancySummaryOB[NLayer]; std::shared_ptr hAverageClusterSizeSummaryOB[NLayer]; - // Layer synnary + // Layer summary TH1D* hClusterSizeLayerSummary[NLayer] = { nullptr }; TH1D* hClusterTopologyLayerSummary[NLayer] = { nullptr }; TH1D* hGroupedClusterSizeLayerSummary[NLayer] = { nullptr }; + // Anomalies plots + TH2D* hLongClustersPerChip[3] = { nullptr }; + TH2D* hMultPerChipWhenLongClusters[3] = { nullptr }; + // General TH2D* hClusterVsBunchCrossing = nullptr; std::unique_ptr mGeneralOccupancy = nullptr; @@ -133,13 +137,16 @@ class ITSClusterTask : public TaskInterface std::string mLaneStatusFlag[NFlags] = { "IB", "ML", "OL", "Total" }; int mDoPublishDetailedSummary = 0; - const int mNStaves[NLayer] = { 12, 16, 20, 24, 30, 42, 48 }; - const int mNHicPerStave[NLayer] = { 1, 1, 1, 8, 8, 14, 14 }; - const int mNChipsPerHic[NLayer] = { 9, 9, 9, 14, 14, 14, 14 }; - const int mNLanePerHic[NLayer] = { 3, 3, 3, 2, 2, 2, 2 }; - const int ChipBoundary[NLayer + 1] = { 0, 108, 252, 432, 3120, 6480, 14712, 24120 }; - const int StaveBoundary[NLayer + 1] = { 0, 12, 28, 48, 72, 102, 144, 192 }; - const float mLength[NLayer] = { 14., 14., 14., 43., 43., 74., 74. }; + int minColSpanLongCluster = 128; // driven by o2::itsmft::ClusterPattern::MaxColSpan = 128 + int maxRowSpanLongCluster = 29; + + static constexpr int mNStaves[NLayer] = { 12, 16, 20, 24, 30, 42, 48 }; + static constexpr int mNHicPerStave[NLayer] = { 1, 1, 1, 8, 8, 14, 14 }; + static constexpr int mNChipsPerHic[NLayer] = { 9, 9, 9, 14, 14, 14, 14 }; + static constexpr int mNLanePerHic[NLayer] = { 3, 3, 3, 2, 2, 2, 2 }; + static constexpr int ChipBoundary[NLayer + 1] = { 0, 108, 252, 432, 3120, 6480, 14712, 24120 }; + static constexpr int StaveBoundary[NLayer + 1] = { 0, 12, 28, 48, 72, 102, 144, 192 }; + static constexpr float mLength[NLayer] = { 14., 14., 14., 43., 43., 74., 74. }; const std::string mYlabels[NLayer * 2] = { "L6B(S24#rightarrow47)", "L5B(S21#rightarrow41)", "L4B(S15#rightarrow29)", "L3B(S12#rightarrow23)", "L2B(S10#rightarrow19)", "L1B(S08#rightarrow15)", "L0B(S06#rightarrow11)", "L0T(S00#rightarrow05)", "L1T(S00#rightarrow07)", "L2T(S00#rightarrow09)", "L3T(S00#rightarrow11)", "L4T(S00#rightarrow14)", "L5T(S00#rightarrow20)", "L6T(S00#rightarrow23)" }; int mEnableLayers[NLayer] = { 0 }; diff --git a/Modules/ITS/src/ITSClusterTask.cxx b/Modules/ITS/src/ITSClusterTask.cxx index f2612a2fea..4825f9b4cc 100644 --- a/Modules/ITS/src/ITSClusterTask.cxx +++ b/Modules/ITS/src/ITSClusterTask.cxx @@ -47,6 +47,10 @@ ITSClusterTask::~ITSClusterTask() if (!mEnableLayers[iLayer]) continue; + if (iLayer < NLayerIB) { + delete hLongClustersPerChip[iLayer]; + delete hMultPerChipWhenLongClusters[iLayer]; + } delete hClusterSizeLayerSummary[iLayer]; delete hClusterTopologyLayerSummary[iLayer]; delete hGroupedClusterSizeLayerSummary[iLayer]; @@ -137,8 +141,6 @@ void ITSClusterTask::monitorData(o2::framework::ProcessingContext& ctx) auto clusPatternArr = ctx.inputs().get>("patterns"); auto pattIt = clusPatternArr.begin(); - int ChipIDprev = -1; - // Reset this histo to have the latest picture hEmptyLaneFractionGlobal->Reset("ICES"); @@ -153,6 +155,9 @@ void ITSClusterTask::monitorData(o2::framework::ProcessingContext& ctx) const auto& ROF = clusRofArr[iROF]; const auto bcdata = ROF.getBCData(); int nClustersForBunchCrossing = 0; + int nLongClusters[ChipBoundary[NLayerIB]] = {}; + int nHitsFromClusters[ChipBoundary[NLayerIB]] = {}; // only IB is implemented at the moment + for (int icl = ROF.getFirstEntry(); icl < ROF.getFirstEntry() + ROF.getNEntries(); icl++) { auto& cluster = clusArr[icl]; @@ -160,13 +165,15 @@ void ITSClusterTask::monitorData(o2::framework::ProcessingContext& ctx) int ClusterID = cluster.getPatternID(); // used for normal (frequent) cluster shapes int lay, sta, ssta, mod, chip, lane; - if (ChipID != ChipIDprev) { - mGeom->getChipId(ChipID, lay, sta, ssta, mod, chip); - mod = mod + (ssta * (mNHicPerStave[lay] / 2)); - int chipIdLocal = (ChipID - ChipBoundary[lay]) % (14 * mNHicPerStave[lay]); - lane = (chipIdLocal % (14 * mNHicPerStave[lay])) / (14 / 2); - } + // TODO: avoid call Geom if ChipID is the same as previous cluster + mGeom->getChipId(ChipID, lay, sta, ssta, mod, chip); + mod = mod + (ssta * (mNHicPerStave[lay] / 2)); + int chipIdLocal = (ChipID - ChipBoundary[lay]) % (14 * mNHicPerStave[lay]); + lane = (chipIdLocal % (14 * mNHicPerStave[lay])) / (14 / 2); + int npix = -1; + int colspan = -1; + int rowspan = -1; int isGrouped = -1; o2::math_utils::Point3D locC; // local coordinates @@ -174,6 +181,11 @@ void ITSClusterTask::monitorData(o2::framework::ProcessingContext& ctx) if (ClusterID != o2::itsmft::CompCluster::InvalidPatternID) { // Normal (frequent) cluster shapes if (!mDict->isGroup(ClusterID)) { npix = mDict->getNpixels(ClusterID); + + // TODO: is there way other than calling the pattern? + colspan = mDict->getPattern(ClusterID).getColumnSpan(); + rowspan = mDict->getPattern(ClusterID).getRowSpan(); + if (mDoPublishDetailedSummary == 1) { locC = mDict->getClusterCoordinates(cluster); } @@ -181,6 +193,8 @@ void ITSClusterTask::monitorData(o2::framework::ProcessingContext& ctx) } else { o2::itsmft::ClusterPattern patt(pattIt); npix = patt.getNPixels(); + colspan = patt.getColumnSpan(); + rowspan = patt.getRowSpan(); if (mDoPublishDetailedSummary == 1) { locC = mDict->getClusterCoordinates(cluster, patt, true); } @@ -190,6 +204,8 @@ void ITSClusterTask::monitorData(o2::framework::ProcessingContext& ctx) } else { // invalid pattern o2::itsmft::ClusterPattern patt(pattIt); npix = patt.getNPixels(); + colspan = patt.getColumnSpan(); + rowspan = patt.getRowSpan(); isGrouped = 0; if (mDoPublishDetailedSummary == 1) { locC = mDict->getClusterCoordinates(cluster, patt, false); @@ -200,6 +216,15 @@ void ITSClusterTask::monitorData(o2::framework::ProcessingContext& ctx) nClustersForBunchCrossing++; } + if (lay < NLayerIB) { + nHitsFromClusters[ChipID] += npix; + } + + if (lay < NLayerIB && colspan >= minColSpanLongCluster && rowspan <= maxRowSpanLongCluster) { + // definition of long cluster + nLongClusters[ChipID]++; + } + if (lay < NLayerIB) { hAverageClusterOccupancySummaryIB[lay]->getNum()->Fill(chip, sta); hAverageClusterSizeSummaryIB[lay]->getNum()->Fill(chip, sta, (double)npix); @@ -249,6 +274,21 @@ void ITSClusterTask::monitorData(o2::framework::ProcessingContext& ctx) } } hClusterVsBunchCrossing->Fill(bcdata.bc, nClustersForBunchCrossing); // we count only the number of clusters, not their sizes + + // filling these anomaly plots once per ROF, ignoring chips w/o long clusters + for (int ichip = 0; ichip < ChipBoundary[NLayerIB]; ichip++) { + + int nLong = TMath::Min(nLongClusters[ichip], 21); + if (nLong < 1) { + continue; + } + int ilayer = -1; + while (ichip >= ChipBoundary[ilayer + 1]) { + ilayer++; + } + hLongClustersPerChip[ilayer]->Fill(ichip, nLong); + hMultPerChipWhenLongClusters[ilayer]->Fill(ichip, nHitsFromClusters[ichip]); + } } if ((int)clusRofArr.size() > 0) { @@ -364,6 +404,8 @@ void ITSClusterTask::reset() hClusterTopologyLayerSummary[iLayer]->Reset(); if (iLayer < NLayerIB) { + hLongClustersPerChip[iLayer]->Reset(); + hMultPerChipWhenLongClusters[iLayer]->Reset(); hAverageClusterOccupancySummaryIB[iLayer]->Reset(); hAverageClusterSizeSummaryIB[iLayer]->Reset(); if (mDoPublish1DSummary == 1) { @@ -396,7 +438,7 @@ void ITSClusterTask::reset() void ITSClusterTask::createAllHistos() { - hClusterVsBunchCrossing = new TH2D("BunchCrossingIDvsClusters", "BunchCrossingIDvsClusters", nBCbins, 0, 4095, 100, 0, 1000); + hClusterVsBunchCrossing = new TH2D("BunchCrossingIDvsClusters", "BunchCrossingIDvsClusters", nBCbins, 0, 4095, 100, 0, 2000); hClusterVsBunchCrossing->SetTitle("#clusters vs BC id for clusters with npix > 2"); addObject(hClusterVsBunchCrossing); formatAxes(hClusterVsBunchCrossing, "Bunch Crossing ID", "Number of clusters with npix > 2 in ROF", 1, 1.10); @@ -419,6 +461,17 @@ void ITSClusterTask::createAllHistos() if (!mEnableLayers[iLayer]) continue; + if (iLayer < NLayerIB) { + hLongClustersPerChip[iLayer] = new TH2D(Form("Anomalies/Layer%d/LongClusters", iLayer), Form("Layer%d/LongClusters", iLayer), ChipBoundary[iLayer + 1] - ChipBoundary[iLayer], ChipBoundary[iLayer], ChipBoundary[iLayer + 1], 21, 0, 21); + hMultPerChipWhenLongClusters[iLayer] = new TH2D(Form("Anomalies/Layer%d/HitsWhenLongClusters", iLayer), Form("Layer%d/HitsWhenLongClusters", iLayer), ChipBoundary[iLayer + 1] - ChipBoundary[iLayer], ChipBoundary[iLayer], ChipBoundary[iLayer + 1], 200, 0, 20000); + addObject(hLongClustersPerChip[iLayer]); + formatAxes(hLongClustersPerChip[iLayer], "Chip ID", "number of long clusters", 1, 1.10); + hLongClustersPerChip[iLayer]->SetStats(0); + addObject(hMultPerChipWhenLongClusters[iLayer]); + formatAxes(hMultPerChipWhenLongClusters[iLayer], "Chip ID", "Sum of clusters size (events w/ long clus)", 1, 1.10); + hMultPerChipWhenLongClusters[iLayer]->SetStats(0); + } + hClusterSizeLayerSummary[iLayer] = new TH1D(Form("Layer%d/AverageClusterSizeSummary", iLayer), Form("Layer%dAverageClusterSizeSummary", iLayer), 100, 0, 100); hClusterSizeLayerSummary[iLayer]->SetTitle(Form("Cluster size summary for Layer %d", iLayer)); addObject(hClusterSizeLayerSummary[iLayer]); diff --git a/Modules/ITS/src/ITSTrackCheck.cxx b/Modules/ITS/src/ITSTrackCheck.cxx index 5489e0cbeb..5bba810093 100644 --- a/Modules/ITS/src/ITSTrackCheck.cxx +++ b/Modules/ITS/src/ITSTrackCheck.cxx @@ -33,7 +33,7 @@ Quality ITSTrackCheck::check(std::map(mCustomParameters, "EtaRatio", mEtaRatio); mPhiRatio = o2::quality_control_modules::common::getFromConfig(mCustomParameters, "PhiRatio", mPhiRatio); - Quality result = 0; + Quality result; Int_t id = 0; std::map>::iterator iter; for (iter = moMap->begin(); iter != moMap->end(); ++iter) { diff --git a/Modules/MUON/MCH/src/PedestalsTask.cxx b/Modules/MUON/MCH/src/PedestalsTask.cxx index e5dc8a5a47..221501a574 100644 --- a/Modules/MUON/MCH/src/PedestalsTask.cxx +++ b/Modules/MUON/MCH/src/PedestalsTask.cxx @@ -168,7 +168,7 @@ void PedestalsTask::initialize(o2::framework::InitContext& /*ctx*/) } mCanvasCheckerMessages = std::make_unique("CheckerMessages", "Checker Messages", 800, 600); - getObjectsManager()->startPublishing(mCanvasCheckerMessages.get()); + getObjectsManager()->startPublishing(mCanvasCheckerMessages.get()); mPrintLevel = 0; } diff --git a/Modules/TPC/CMakeLists.txt b/Modules/TPC/CMakeLists.txt index e8e9b53a95..a5c22a3444 100644 --- a/Modules/TPC/CMakeLists.txt +++ b/Modules/TPC/CMakeLists.txt @@ -38,7 +38,8 @@ target_sources(O2QcTPC PRIVATE src/PID.cxx src/TrackClusters.cxx src/VDriftCalibReductor.cxx src/SeparationPowerReductor.cxx - src/TimeGainCalibReductor.cxx) + src/TimeGainCalibReductor.cxx + src/DCSPTempReductor.cxx) target_include_directories( O2QcTPC @@ -97,6 +98,7 @@ add_root_dictionary(O2QcTPC include/TPC/VDriftCalibReductor.h include/TPC/SeparationPowerReductor.h include/TPC/TimeGainCalibReductor.h + include/TPC/DCSPTempReductor.h LINKDEF include/TPC/LinkDef.h) install(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/include/TPC @@ -180,4 +182,5 @@ install(FILES run/tpcQCPID_sampled.json run/tpcQCvDriftTrending.json run/tpcQCTrending_separationpower.json run/tpcQCTimeGainCalibTrending.json + run/tpcDCSPTempTrending.json DESTINATION etc) diff --git a/Modules/TPC/include/TPC/DCSPTempReductor.h b/Modules/TPC/include/TPC/DCSPTempReductor.h new file mode 100644 index 0000000000..0cdad5f1d6 --- /dev/null +++ b/Modules/TPC/include/TPC/DCSPTempReductor.h @@ -0,0 +1,64 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +/// +/// \file DCSPTempReductor.h +/// \author Marcel Lesch +/// + +#ifndef QUALITYCONTROL_DCSPTEMPREDUCTOR_H +#define QUALITYCONTROL_DCSPTEMPREDUCTOR_H + +#include "QualityControl/ReductorConditionAny.h" +#include + +namespace o2::quality_control_modules::tpc +{ + +/// \brief A Reductor for calibration objects of the TPC DCS temperatures +/// +/// A Reductor for TPC DCS temperatures. +/// It produces a branch in the format: +/// "tempSensor[18]/F:tempSensorErr[18]:tempMeanPerSide[2]:tempMeanPerSideErr[2]:tempGradXPerSide[2]:tempGradXPerSideErr[2]:tempGradYPerSide[2]:tempGradYPerSideErr[2]" +/// tempSensor[i] is the raw sensor temperature for each of the 18 sensores +/// tempMeanPerSide[i] is the mean temperature per TPC-Side (0: A-Side, 1: C-Side) +/// tempGradXPerSide[i] is the temperature gradient in x direction per TPC-Side (0: A-Side, 1: C-Side) +/// tempGradYPerSide[i] is the temperature gradient in y direction per TPC-Side (0: A-Side, 1: C-Side) + +class DCSPTempReductor : public quality_control::postprocessing::ReductorConditionAny +{ + public: + DCSPTempReductor() = default; + ~DCSPTempReductor() = default; + + void* getBranchAddress() override; + const char* getBranchLeafList() override; + bool update(ConditionRetriever& retriever) override; + + private: + struct { + Float_t tempSensor[18]; + Float_t tempSensorErr[18]; // uncertainties + + Float_t tempMeanPerSide[2]; + Float_t tempMeanPerSideErr[2]; // uncertainties + + Float_t tempGradXPerSide[2]; + Float_t tempGradXPerSideErr[2]; // uncertainties + + Float_t tempGradYPerSide[2]; + Float_t tempGradYPerSideErr[2]; // uncertainties + } mStats; +}; + +} // namespace o2::quality_control_modules::tpc + +#endif // QUALITYCONTROL_DCSPTEMPREDUCTOR_H diff --git a/Modules/TPC/include/TPC/LinkDef.h b/Modules/TPC/include/TPC/LinkDef.h index 93756d99cc..21565f1ee4 100644 --- a/Modules/TPC/include/TPC/LinkDef.h +++ b/Modules/TPC/include/TPC/LinkDef.h @@ -43,6 +43,7 @@ #pragma link C++ class o2::quality_control_modules::tpc::VDriftCalibReductor + ; #pragma link C++ class o2::quality_control_modules::tpc::SeparationPowerReductor + ; #pragma link C++ class o2::quality_control_modules::tpc::TimeGainCalibReductor + ; +#pragma link C++ class o2::quality_control_modules::tpc::DCSPTempReductor + ; #pragma link C++ function o2::quality_control_modules::tpc::addAndPublish + ; #pragma link C++ function o2::quality_control_modules::tpc::toVector + ; diff --git a/Modules/TPC/include/TPC/Utility.h b/Modules/TPC/include/TPC/Utility.h index 59474b5c51..bfc17727a0 100644 --- a/Modules/TPC/include/TPC/Utility.h +++ b/Modules/TPC/include/TPC/Utility.h @@ -107,5 +107,11 @@ void calculateStatistics(const double* yValues, const double* yErrors, bool useE /// \param mean double&, reference to double that should store mean /// \param stddevOfMean double&, reference to double that should store stddev of mean void retrieveStatistics(std::vector& values, std::vector& errors, bool useErrors, double& mean, double& stddevOfMean); + +/// \brief Calculates mean and stddev from a vector +/// \param values std::vector& vector that contains the data points +/// \param mean float&, reference to float that should store mean +/// \param stddev float&, reference to float that should store stddev of mean +void calcMeanAndStddev(const std::vector& values, float& mean, float& stddev); } // namespace o2::quality_control_modules::tpc #endif // QUALITYCONTROL_TPCUTILITY_H \ No newline at end of file diff --git a/Modules/TPC/run/tpcDCSPTempTrending.json b/Modules/TPC/run/tpcDCSPTempTrending.json new file mode 100644 index 0000000000..943d8dc8c8 --- /dev/null +++ b/Modules/TPC/run/tpcDCSPTempTrending.json @@ -0,0 +1,116 @@ +{ + "qc": { + "config": { + "database": { + "implementation": "CCDB", + "host": "ccdb-test.cern.ch:8080", + "username": "not_applicable", + "password": "not_applicable", + "name": "not_applicable" + }, + "Activity": { + "number": "", + "type": "", + "start": "", + "end": "" + }, + "monitoring": { + "url": "infologger:///debug?qc" + }, + "consul": { + "url": "" + }, + "conditionDB": { + "url": "ccdb-test.cern.ch:8080" + }, + "postprocessing": { + "periodSeconds": "10" + } + }, + "postprocessing": { + "TemperatureQC": { + "active": "true", + "resumeTrend": "false", + "className": "o2::quality_control::postprocessing::TrendingTask", + "moduleName": "QualityControl", + "detectorName": "TPC", + "producePlotsOnUpdate": "true", + "dataSources": [ + { + "type": "condition", + "path": "TPC/Calib/", + "names": [ "Temperature" ], + "reductorName": "o2::quality_control_modules::tpc::DCSPTempReductor", + "moduleName": "QcTPC" + } + ], + "plots": [ + { + "name": "Temp_Mean_ASide", + "title": "Mean Temperature A Side", + "varexp": "Temperature.tempMeanPerSide[0]:time", + "selection": "", + "option": "*L", + "graphAxisLabel": "mean temp A Side:time", + "graphErrors": "Temperature.tempMeanPerSideErr[0]:0" + }, + { + "name": "Temp_GradX_ASide", + "title": "GradX Temperature A Side", + "varexp": "Temperature.tempGradXPerSide[0]:time", + "selection": "", + "option": "*L", + "graphAxisLabel": "gradX temp A Side:time", + "graphErrors": "Temperature.tempGradXPerSideErr[0]:0" + }, + { + "name": "Temp_GradY_ASide", + "title": "GradY Temperature A Side", + "varexp": "Temperature.tempGradYPerSide[0]:time", + "selection": "", + "option": "*L", + "graphAxisLabel": "gradY temp A Side:time", + "graphErrors": "Temperature.tempGradYPerSideErr[0]:0" + }, + { + "name": "Temp_Mean_CSide", + "title": "Mean Temperature C Side", + "varexp": "Temperature.tempMeanPerSide[1]:time", + "selection": "", + "option": "*L", + "graphAxisLabel": "mean temp C Side:time", + "graphErrors": "Temperature.tempMeanPerSideErr[1]:0" + }, + { + "name": "Temp_GradX_CSide", + "title": "GradX Temperature C Side", + "varexp": "Temperature.tempGradXPerSide[1]:time", + "selection": "", + "option": "*L", + "graphAxisLabel": "gradX temp C Side:time", + "graphErrors": "Temperature.tempGradXPerSideErr[1]:0" + }, + { + "name": "Temp_GradY_CSide", + "title": "GradY Temperature C Side", + "varexp": "Temperature.tempGradYPerSide[1]:time", + "selection": "", + "option": "*L", + "graphAxisLabel": "gradY temp C Side:time", + "graphErrors": "Temperature.tempGradYPerSideErr[1]:0" + } + ], + "initTrigger": [ + "userorcontrol" + ], + "updateTrigger": [ + "foreachlatest:ccdb:TPC/Calib/Temperature/" + ], + "stopTrigger": [ + "userorcontrol" + ] + } + } + } + } + \ No newline at end of file diff --git a/Modules/TPC/src/Clusters.cxx b/Modules/TPC/src/Clusters.cxx index fe14ab4f6b..065ac17a10 100644 --- a/Modules/TPC/src/Clusters.cxx +++ b/Modules/TPC/src/Clusters.cxx @@ -82,7 +82,7 @@ void Clusters::initialize(InitContext& /*ctx*/) addAndPublish(getObjectsManager(), mTimeBinCanvasVec, { "c_Sides_Time_Bin", "c_ROCs_Time_Bin_1D", "c_ROCs_Time_Bin_2D" }); for (auto& wrapper : mWrapperVector) { - getObjectsManager()->startPublishing(&wrapper); + getObjectsManager()->startPublishing(&wrapper); } } } diff --git a/Modules/TPC/src/DCSPTempReductor.cxx b/Modules/TPC/src/DCSPTempReductor.cxx new file mode 100644 index 0000000000..717f98510f --- /dev/null +++ b/Modules/TPC/src/DCSPTempReductor.cxx @@ -0,0 +1,83 @@ +// Copyright 2019-2020 CERN and copyright holders of ALICE O2. +// See https://alice-o2.web.cern.ch/copyright for details of the copyright holders. +// All rights not expressly granted are reserved. +// +// This software is distributed under the terms of the GNU General Public +// License v3 (GPL Version 3), copied verbatim in the file "COPYING". +// +// In applying this license CERN does not waive the privileges and immunities +// granted to it by virtue of its status as an Intergovernmental Organization +// or submit itself to any jurisdiction. + +/// +/// \file DCSPTempReductor.cxx +/// \author Marcel Lesch +/// + +#include "TPC/DCSPTempReductor.h" +#include "DataFormatsTPC/DCS.h" +#include "TPC/Utility.h" + +namespace o2::quality_control_modules::tpc +{ + +void* DCSPTempReductor::getBranchAddress() +{ + return &mStats; +} + +const char* DCSPTempReductor::getBranchLeafList() +{ + return "tempSensor[18]/F:tempSensorErr[18]:tempMeanPerSide[2]:tempMeanPerSideErr[2]:tempGradXPerSide[2]:tempGradXPerSideErr[2]:tempGradYPerSide[2]:tempGradYPerSideErr[2]"; +} + +bool DCSPTempReductor::update(ConditionRetriever& retriever) +{ + if (auto dcstemp = retriever.retrieve()) { + + int sensorCounter = 0; + std::vector sensorData[18]; + for (const auto& sensor : dcstemp->raw) { + for (const auto& value : sensor.data) { + sensorData[sensorCounter].emplace_back(value.value); + } + calcMeanAndStddev(sensorData[sensorCounter], mStats.tempSensor[sensorCounter], mStats.tempSensorErr[sensorCounter]); + sensorCounter++; + if (sensorCounter > 17) + break; + } + + std::vector sideData[3]; // 0 mean, 1 gradX, 2 gradY + + // A-Side + for (const auto& value : dcstemp->statsA.data) { + sideData[0].emplace_back(value.value.mean); + sideData[1].emplace_back(value.value.gradX); + sideData[2].emplace_back(value.value.gradY); + } + + calcMeanAndStddev(sideData[0], mStats.tempMeanPerSide[0], mStats.tempMeanPerSideErr[0]); + calcMeanAndStddev(sideData[1], mStats.tempGradXPerSide[0], mStats.tempGradXPerSideErr[0]); + calcMeanAndStddev(sideData[2], mStats.tempGradYPerSide[0], mStats.tempGradYPerSideErr[0]); + + for (int iCount = 0; iCount < 3; iCount++) { + sideData[iCount].clear(); + } + + // C-Side + for (const auto& value : dcstemp->statsC.data) { + sideData[0].emplace_back(value.value.mean); + sideData[1].emplace_back(value.value.gradX); + sideData[2].emplace_back(value.value.gradY); + } + + calcMeanAndStddev(sideData[0], mStats.tempMeanPerSide[1], mStats.tempMeanPerSideErr[1]); + calcMeanAndStddev(sideData[1], mStats.tempGradXPerSide[1], mStats.tempGradXPerSideErr[1]); + calcMeanAndStddev(sideData[2], mStats.tempGradYPerSide[1], mStats.tempGradYPerSideErr[1]); + + return true; + } + return false; +} + +} // namespace o2::quality_control_modules::tpc \ No newline at end of file diff --git a/Modules/TPC/src/JunkDetection.cxx b/Modules/TPC/src/JunkDetection.cxx index ea7c8c29d0..b89f2f2c97 100644 --- a/Modules/TPC/src/JunkDetection.cxx +++ b/Modules/TPC/src/JunkDetection.cxx @@ -45,7 +45,7 @@ void JunkDetection::initialize(o2::framework::InitContext&) mJDHistos.emplace_back(new TH2F("h_removed_Strategy_B", "Removed Strategy (B)", 1, 0, 1, 1, 0, 1)); // dummy for the objectsManager if (!mIsMergeable) { - getObjectsManager()->startPublishing(mJDCanv.get()); + getObjectsManager()->startPublishing(mJDCanv.get()); } for (const auto& hist : mJDHistos) { getObjectsManager()->startPublishing(hist); diff --git a/Modules/TPC/src/RawDigits.cxx b/Modules/TPC/src/RawDigits.cxx index ce0fb6c696..f71d3b75af 100644 --- a/Modules/TPC/src/RawDigits.cxx +++ b/Modules/TPC/src/RawDigits.cxx @@ -73,7 +73,7 @@ void RawDigits::initialize(o2::framework::InitContext& /*ctx*/) addAndPublish(getObjectsManager(), mTimeBinCanvasVec, { "c_Sides_Time_Bin", "c_ROCs_Time_Bin_1D", "c_ROCs_Time_Bin_2D" }); for (auto& wrapper : mWrapperVector) { - getObjectsManager()->startPublishing(&wrapper); + getObjectsManager()->startPublishing(&wrapper); } } diff --git a/Modules/TPC/src/Utility.cxx b/Modules/TPC/src/Utility.cxx index dffb2b4c75..a9b6300eb0 100644 --- a/Modules/TPC/src/Utility.cxx +++ b/Modules/TPC/src/Utility.cxx @@ -60,7 +60,7 @@ void addAndPublish(std::shared_ptr ob for (const auto& canvName : canvNames) { canVec.emplace_back(std::make_unique(canvName.data())); auto canvas = canVec.back().get(); - objectsManager->startPublishing(canvas); + objectsManager->startPublishing(canvas); if (metaData.size() != 0) { for (const auto& [key, value] : metaData) { objectsManager->addMetadata(canvas->GetName(), key, value); @@ -341,4 +341,27 @@ void retrieveStatistics(std::vector& values, std::vector& errors } } +void calcMeanAndStddev(const std::vector& values, float& mean, float& stddev) +{ + if (values.size() == 0) { + mean = 0.; + stddev = 0.; + return; + } + + // Mean + const float sum = std::accumulate(values.begin(), values.end(), 0.0); + mean = sum / values.size(); + + // Stddev + if (values.size() == 1) { // we only have one point -> no stddev + stddev = 0.; + } else { // for >= 2 points, we calculate the spread + std::vector diff(values.size()); + std::transform(values.begin(), values.end(), diff.begin(), [mean](auto x) { return x - mean; }); + const auto sq_sum = std::inner_product(diff.begin(), diff.end(), diff.begin(), 0.f); + stddev = std::sqrt(sq_sum / (values.size() * (values.size() - 1.))); + } +} + } // namespace o2::quality_control_modules::tpc \ No newline at end of file diff --git a/Modules/TRD/src/PulsePositionCheck.cxx b/Modules/TRD/src/PulsePositionCheck.cxx index 7fefd4d7f7..4787d1276d 100644 --- a/Modules/TRD/src/PulsePositionCheck.cxx +++ b/Modules/TRD/src/PulsePositionCheck.cxx @@ -229,9 +229,6 @@ void PulsePositionCheck::beautify(std::shared_ptr mo, Quality che } else if (checkResult == Quality::Null) { ILOG(Debug, Devel) << "Quality::Null, setting to Blue" << ENDM; h->SetFillColor(kBlue); - } else if (checkResult == Quality::NullLevel) { - ILOG(Debug, Devel) << "Quality::Null, setting to Pink" << ENDM; - h->SetFillColor(kPink); } h->SetLineColor(kBlack); h->Draw(); diff --git a/README.md b/README.md index 5a9f3a3ac0..1565a828ef 100644 --- a/README.md +++ b/README.md @@ -38,6 +38,7 @@ For a general overview of our (O2) software, organization and processes, please * [Check](doc/ModulesDevelopment.md#check) * [Configuration](doc/ModulesDevelopment.md#configuration) * [Implementation](doc/ModulesDevelopment.md#implementation) + * [Results](doc/ModulesDevelopment.md#results) * [Quality Aggregation](doc/ModulesDevelopment.md#quality-aggregation) * [Quick try](doc/ModulesDevelopment.md#quick-try) * [Configuration](doc/ModulesDevelopment.md#configuration-1) @@ -76,6 +77,8 @@ For a general overview of our (O2) software, organization and processes, please * [Critical, resilient and non-critical tasks](doc/Advanced.md#critical-resilient-and-non-critical-tasks) * [QC with DPL Analysis](doc/Advanced.md#qc-with-dpl-analysis) * [Uploading objects to QCDB](doc/Advanced.md#uploading-objects-to-qcdb) + * [Propagating Check results to RCT in Bookkeeping](doc/Advanced.md#propagating-check-results-to-rct-in-bookkeeping) + * [Conversion details](doc/Advanced.md#conversion-details) * [Solving performance issues](doc/Advanced.md#solving-performance-issues) * [Dispatcher](doc/Advanced.md#dispatcher) * [QC Tasks](doc/Advanced.md#qc-tasks-1) diff --git a/doc/Advanced.md b/doc/Advanced.md index 9800958c16..b21df552b9 100644 --- a/doc/Advanced.md +++ b/doc/Advanced.md @@ -22,6 +22,8 @@ Advanced topics * [Critical, resilient and non-critical tasks](#critical-resilient-and-non-critical-tasks) * [QC with DPL Analysis](#qc-with-dpl-analysis) * [Uploading objects to QCDB](#uploading-objects-to-qcdb) + * [Propagating Check results to RCT in Bookkeeping](#propagating-check-results-to-rct-in-bookkeeping) + * [Conversion details](#conversion-details) * [Solving performance issues](#solving-performance-issues) * [Dispatcher](#dispatcher) * [QC Tasks](#qc-tasks-1) @@ -585,6 +587,72 @@ the directories listed in the logs: Notice that by default the executable will ignore the directory structure in the input file and upload all objects to one directory. If you need the directory structure preserved, add the argument `--preserve-directories`. +## Propagating Check results to RCT in Bookkeeping + +The framework allows to propagate Quality Objects (QOs) produced by Checks and Aggregators to RCT in Bookkeeping. +The synchronisation is done once, at the end of workflow runtime, i.e. at the End of Run or in the last stage of QC merging on Grid. +Propagation can be enabled by adding the following key-value pair to Check/Aggregator configuration: +```json + "exportToBookkeeping": "true" +``` +Using it for Aggregators is discouraged, as the information on which exact Check failed is lost or at least obfuscated. + +Check results are converted into Flags, which are documented in [O2/DataFormats/QualityControl](https://github.com/AliceO2Group/AliceO2/tree/dev/DataFormats/QualityControl). +Information about the object validity is preserved, which allows for time-based flagging of good/bad data. + +### Conversion details + +Below we describe some details of how the conversion is done. +Good QOs are marked with green, Medium QOs are marked with orange and Bad QOs are marked with red. +Null QOs are marked with purple. + +- **Good QOs with no Flags associated are not converted to any Flags.** + According to the preliminary design for Data Tagging, "bad" Flags always win, thus there is no need for explicit "good" Flags. + It also implies that there is no need to explicitly add Good Flag to Good Quality. + +![](images/qo_flag_conversion_01.svg) + +- **Bad and Medium QOs with no Flags are converted to Flag 14 (Unknown).** + This means that Medium Quality data is by default bad for Analysis. + +![](images/qo_flag_conversion_02.svg) + +- **Null QOs with no Flags are converted to Flag 1 (Unknown Quality).** + +![](images/qo_flag_conversion_03.svg) + +- **All QOs with Flags are converted to Flags, while the Quality is ignored.** + As a consequence, one can customize the meaning of any Quality (Medium in particular) in terms of data usability. + A warning is printed if a Check associates a good Flag to bad Quality or a bad Flag to good Quality. + +![](images/qo_flag_conversion_04.svg) + +- **Timespans not covered by a given QO are filled with Flag 1 (Unknown Quality).** + In other words, if an object was missing during a part of the run, we can state that the data quality is not known. + +![](images/qo_flag_conversion_05.svg) + +- **Overlapping or adjacent Flags with the same ID, comment and source (QO name) are merged.**. + This happens even if they were associated with different Qualities, e.g. Bad and Medium. + Order of Flag arrival does not matter. + +![](images/qo_flag_conversion_06.svg) +![](images/qo_flag_conversion_07.svg) + +- **Flag 1 (Unknown Quality) is overwritten by any other Flag.** + This allows us to return Null Quality when there is not enough statistics to determine data quality, but it can be suppressed later, once we can return Good/Medium/Bad. + +![](images/qo_flag_conversion_08.svg) + +- **Good and Bad flags do not affect each other, they may coexist.** + +![](images/qo_flag_conversion_09.svg) + +- **Flags for different QOs (QO names) do not affect each other. + Flag 1 (Unknown Quality) is added separately for each.** + +![](images/qo_flag_conversion_10.svg) + # Solving performance issues Problems with performance in message passing systems like QC usually manifest in backpressure seen in input channels of processes which are too slow. @@ -1389,6 +1457,9 @@ This is the global structure of the configuration in QC. }, "checks": { + }, + "aggregators": { + }, "postprocessing": { @@ -1401,7 +1472,7 @@ This is the global structure of the configuration in QC. } ``` -There are four QC-related components: +There are six QC-related components: - "config" - contains global configuration of QC which apply to any component. It is required in any configuration file. - "tasks" - contains declarations of QC Tasks. It is mandatory for running topologies with Tasks and @@ -1409,6 +1480,7 @@ There are four QC-related components: - "externalTasks" - contains declarations of external devices which sends objects to the QC to be checked and stored. - "checks" - contains declarations of QC Checks. It is mandatory for running topologies with Tasks and Checks. +- "aggregators" - contains declarations of QC Aggregators. It is not mandatory. - "postprocessing" - contains declarations of PostProcessing Tasks. It is only needed only when Post-Processing is run. diff --git a/doc/ModulesDevelopment.md b/doc/ModulesDevelopment.md index 8959b90c55..75e5ec6cba 100644 --- a/doc/ModulesDevelopment.md +++ b/doc/ModulesDevelopment.md @@ -22,6 +22,7 @@ * [Check](#check) * [Configuration](#configuration) * [Implementation](#implementation) + * [Results](#results) * [Quality Aggregation](#quality-aggregation) * [Quick try](#quick-try) * [Configuration](#configuration-1) @@ -300,7 +301,8 @@ A Check is a function (actually `Check::check()`) that determines the quality of "type": "Task", "name": "QcTask", "MOs": ["example", "other"] - }] + }], + "exportToBookkeeping": "false" }, "QcCheck": { ... @@ -323,6 +325,7 @@ A Check is a function (actually `Check::check()`) that determines the quality of * _type_ - currently only supported are _Task_ and _ExternalTask_ * _name_ - name of the _Task_ * _MOs_ - list of MonitorObjects names or can be omitted to mean that all objects should be taken. +* __exportToBookkeeping__ - allows to propagate the results of this Check to Bookkeeping, where they are visualized as time-based Flags (disabled by default). ### Implementation After the creation of the module described in the above section, every Check functionality requires a separate implementation. The module might implement several Check classes. @@ -333,12 +336,21 @@ void beautify(std::shared_ptr mo, Quality = Quality::Null) {} ``` -The `check()` function is called whenever the _policy_ is satisfied. It gets a map with all declared MonitorObjects. It is expected to return Quality of the given MonitorObjects. +The `check()` function is called whenever the _policy_ is satisfied. It gets a map with all declared MonitorObjects. +It is expected to return Quality of the given MonitorObjects. +Optionally one can associate one or more Flags to a Quality by using `addFlag` on it. For each MO or group of MOs, `beautify()` is invoked after `check()` if 1. the check() did not raise an exception 2. there is a single `dataSource` in the configuration of the check +### Results + +Checks return Qualities with associated Flags. +The framework wraps them with a QualityObject, then makes it available to Aggregators (see the next section) and stores them in the repository. +It is also possible to propagate Check results to the Run Condition Table (RCT) in Bookkeeping. +Details are explained at [Propagating Check results to RCT in Bookkeeping](Advanced.md#propagating-check-results-to-rct-in-bookkeeping) + ## Quality Aggregation The _Aggregators_ are able to collect the QualityObjects produced by the checks or other _Aggregators_ and to produce new Qualities. This is especially useful to determine the overall quality of a detector or a set of detectors. diff --git a/doc/images/qo_flag_conversion_01.svg b/doc/images/qo_flag_conversion_01.svg new file mode 100644 index 0000000000..169a1fa953 --- /dev/null +++ b/doc/images/qo_flag_conversion_01.svg @@ -0,0 +1,135 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SOR + + + + + + + + + + + + EOR + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + QO1 + + + + + + Flags for QO1 + + + + + diff --git a/doc/images/qo_flag_conversion_02.svg b/doc/images/qo_flag_conversion_02.svg new file mode 100644 index 0000000000..abd7555792 --- /dev/null +++ b/doc/images/qo_flag_conversion_02.svg @@ -0,0 +1,175 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SOR + + + + + + + + + + + + EOR + + + + + + + + + + + + + + + + + + + + + + + + + + + 14, “Unexplained Bad Quality” + + + + + + + + + + + + + + + + + + + + + + + + + QO1 + + + + + + Flags for QO1 + + + + + + + + 14, “Unexplained Medium Quality” + + + + + \ No newline at end of file diff --git a/doc/images/qo_flag_conversion_03.svg b/doc/images/qo_flag_conversion_03.svg new file mode 100644 index 0000000000..91c9b79fb4 --- /dev/null +++ b/doc/images/qo_flag_conversion_03.svg @@ -0,0 +1,157 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SOR + + + + + + + + + + + + EOR + + + + + + + + + + + + + + + + + + + + + + + + + + + 1, “Unexplained Null Quality” + + + + + + + + + + + + + + + + + + QO1 + + + + + + Flags for QO1 + + + + + \ No newline at end of file diff --git a/doc/images/qo_flag_conversion_04.svg b/doc/images/qo_flag_conversion_04.svg new file mode 100644 index 0000000000..5ed7115691 --- /dev/null +++ b/doc/images/qo_flag_conversion_04.svg @@ -0,0 +1,245 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SOR + + + + + + + + + + + + EOR + + + + + + + + 6, 9 + + + + + + + + + + + + + + + + + + + + 9 + + + + + + + + + + + + + + 9 + + + + + + + + 9 + + + + + + + + 5 + + + + + + + + + + + + + + + + + + + + + + + + + + 10 + + + + + + + + + + + + + + + + + + + + 9 + + + + + + + + 9 + + + + + + + + 6 + + + + + + + + 10 + + + + + + + + 5 + + + + + + QO1 + + + + + + Flags for QO1 + + + + + \ No newline at end of file diff --git a/doc/images/qo_flag_conversion_05.svg b/doc/images/qo_flag_conversion_05.svg new file mode 100644 index 0000000000..01f3d0d84c --- /dev/null +++ b/doc/images/qo_flag_conversion_05.svg @@ -0,0 +1,200 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SOR + + + + + + + + + + + + EOR + + + + + + + + 6 + + + + + + + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + + + 6 + + + + + + + + 1 + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + QO1 + + + + + + Flags for QO1 + + + + + \ No newline at end of file diff --git a/doc/images/qo_flag_conversion_06.svg b/doc/images/qo_flag_conversion_06.svg new file mode 100644 index 0000000000..c3e7e0bf08 --- /dev/null +++ b/doc/images/qo_flag_conversion_06.svg @@ -0,0 +1,198 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SOR + + + + + + + + + + + + EOR + + + + + + + + 14 + + + + + + + + + + + + + + + + + + + + 14 + + + + + + + + + + + + + + 6 + + + + + + + + + + + + + + + + + + + + 14 + + + + + + + + 14 + + + + + + + + 6 + + + + + + + + 6 + + + + + + + + + + + + QO1 + + + + + + Flags for QO1 + + + + + \ No newline at end of file diff --git a/doc/images/qo_flag_conversion_07.svg b/doc/images/qo_flag_conversion_07.svg new file mode 100644 index 0000000000..631d85cabb --- /dev/null +++ b/doc/images/qo_flag_conversion_07.svg @@ -0,0 +1,175 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SOR + + + + + + + + + + + + EOR + + + + + + + + + + + + + + + + + + + + 6, “Sector A off” + + + + + + + + + + + + + + 6, “Sector B off” + + + + + + + + + + + + + + + + + + + + 6, “Sector B off” + + + + + + + + 6, “Sector A off” + + + + + + QO1 + + + + + + Flags for QO1 + + + + + \ No newline at end of file diff --git a/doc/images/qo_flag_conversion_08.svg b/doc/images/qo_flag_conversion_08.svg new file mode 100644 index 0000000000..8ac438dcd3 --- /dev/null +++ b/doc/images/qo_flag_conversion_08.svg @@ -0,0 +1,202 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SOR + + + + + + + + + + + + EOR + + + + + + + + + + + + + + + + + + + + 14 + + + + + + + + + + + + + + 1 + + + + + + + + + + + + + + + + + + + + 14 + + + + + + + + 1 + + + + + + + + + + + + + + + 1 + + + + + + + + 1 + + + + + + + + + + + + + + + + + + QO1 + + + + + + Flags for QO1 + + + + + \ No newline at end of file diff --git a/doc/images/qo_flag_conversion_09.svg b/doc/images/qo_flag_conversion_09.svg new file mode 100644 index 0000000000..1b7a18a466 --- /dev/null +++ b/doc/images/qo_flag_conversion_09.svg @@ -0,0 +1,203 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SOR + + + + + + + + + + + + EOR + + + + + + + + + + + + + + + + + + + + 3 + + + + + + + + + + + + + + 6 + + + + + + + + + + + + + + + + + + + + 3 + + + + + + + + 6 + + + + + + + + 9 + + + + + + + + + + + + + + 9 + + + + + + + + + + + + + + + + + + QO1 + + + + + + Flags for QO1 + + + + + \ No newline at end of file diff --git a/doc/images/qo_flag_conversion_10.svg b/doc/images/qo_flag_conversion_10.svg new file mode 100644 index 0000000000..749e7f44fd --- /dev/null +++ b/doc/images/qo_flag_conversion_10.svg @@ -0,0 +1,228 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + SOR + + + + + + + + + + + + EOR + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 5 + + + + + + + + + + + + + + + + + + + + + + + + + + + 1 + + + + + + + + 1 + + + + + + + + 5 + + + + + + + + + + + + + + 5 + + + + + + + + + + + + + + + 5 + + + + + + QO1 + + + + + + QO2 + + + + + + Flags for QO1 + + + + + + Flags for QO2 + + + + + \ No newline at end of file