Skip to content

Commit

Permalink
UL results do not break and make dummy jsonFile names if not provided
Browse files Browse the repository at this point in the history
  • Loading branch information
APMDSLHC committed Apr 15, 2024
1 parent dc81d34 commit a03f9d7
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 12 deletions.
13 changes: 8 additions & 5 deletions smodels/matching/modelTester.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ def testPoint(inputFile, outputDir, parser, database):
""" Set BSM model, if necessary """
if parser.has_option("particles","model"):
runtime.modelFile = parser.get( "particles", "model" )
else:
logger.debug('Model file has not been defined. Using input file %s to read quantum numbers.' %inputFile)
runtime.modelFile = inputFile

"""Get run parameters and options from the parser"""
sigmacut = parser.getfloat("parameters", "sigmacut") * fb
Expand Down Expand Up @@ -122,7 +125,7 @@ def testPoint(inputFile, outputDir, parser, database):
stableWidth=stableWidth,
ignorePromptQNumbers=ignorePromptQNumbers)
except SModelSError as e:
print("Exception %s %s" % (e, type(e)))
logger.error("Exception %s %s" % (e, type(e)))
""" Update status to fail, print error message and exit """
outputStatus.updateStatus(-1)
return {os.path.basename(inputFile): masterPrinter}
Expand Down Expand Up @@ -410,10 +413,10 @@ def testPoints(fileList, inDir, outputDir, parser, database,
outputDict.update(p.get())

# Collect output to build global summary:
summaryFile = os.path.join(outputDir, 'summary.txt')
logger.info("A summary of the results can be found in %s" %
summaryFile)
printScanSummary(outputDict, summaryFile)
scanSummaryFile = os.path.join(outputDir, 'summary.txt')
logger.info("A summary of the scan results can be found in %s" %
scanSummaryFile)
printScanSummary(outputDict, scanSummaryFile)
# Remove summary log from logger
logger.removeHandler(fileLog)
fileLog.close()
Expand Down
13 changes: 7 additions & 6 deletions smodels/matching/theoryPrediction.py
Original file line number Diff line number Diff line change
Expand Up @@ -368,11 +368,12 @@ def computeStatistics(self, expected=False):
# Compute likelihoods and related parameters:
llhdDict = self.statsComputer.get_five_values(expected = expected,
return_nll = True )
self.cachedObjs[expected]["nll"] = llhdDict["lbsm"]
self.cachedObjs[expected]["nll_sm"] = llhdDict["lsm"]
self.cachedObjs[expected]["nllmax"] = llhdDict["lmax"]
self.cachedObjs[expected]["muhat"] = llhdDict["muhat"]
self.cachedObjs[expected]["sigma_mu"] = llhdDict["sigma_mu"]
if llhdDict not in [ None, {} ]:
self.cachedObjs[expected]["nll"] = llhdDict["lbsm"]
self.cachedObjs[expected]["nll_sm"] = llhdDict["lsm"]
self.cachedObjs[expected]["nllmax"] = llhdDict["lmax"]
self.cachedObjs[expected]["muhat"] = llhdDict["muhat"]
self.cachedObjs[expected]["sigma_mu"] = llhdDict["sigma_mu"]


class TheoryPredictionsCombiner(TheoryPrediction):
Expand Down Expand Up @@ -691,7 +692,7 @@ def theoryPredictionsFor(database : Database, smsTopDict : Dict,
for theoPred in expResults:
theoPred.expResult = expResult
theoPred.deltas_rel = deltas_rel
if not isinstance(theoPred.dataset,CombinedDataSet) and "CR" in theoPred.dataset.dataInfo.dataId: # Individual CRs shouldn't give results
if not isinstance(theoPred.dataset,CombinedDataSet) and not theoPred.dataset.dataInfo.dataId is None and "CR" in theoPred.dataset.dataInfo.dataId: # Individual CRs shouldn't give results
theoPred.upperLimit = None
else:
theoPred.upperLimit = theoPred.getUpperLimit()
Expand Down
4 changes: 3 additions & 1 deletion smodels/statistics/pyhfInterface.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,8 @@ def __init__(self, nsignals, inputJsons, jsonFiles=None, includeCRs=False, signa
self.cached_likelihoods = {} ## cache of likelihoods (actually twice_nlls)
self.cached_lmaxes = {} # cache of lmaxes (actually twice_nlls)
self.cachedULs = {False: {}, True: {}, "posteriori": {}}
if jsonFiles is None: # If no name has been provided for the json file(s) and the channels, use fake ones
jsonFiles = dict( zip( [ "dummy%d" % i for i in range(len(inputJsons)) ], [ "" for i in range(len(inputJsons)) ] ) )
self.jsonFiles = jsonFiles
self.includeCRs = includeCRs
self.signalUncertainty = signalUncertainty
Expand Down Expand Up @@ -148,7 +150,7 @@ def getWSInfo(self):
if "CR" in ch["name"]:
nbCRinWS += 1
if nbCRwithEM and nbCRwithEM != nbCRinWS:
logger.warning(f"Number of CRs in workspace: {nbCRwithEM} but number of CRs with EM: {nbCRwithEM}. Signal in CRs will not be patched.")
logger.warning(f"Number of CRs in workspace: {nbCRinWS} but number of CRs with EM: {nbCRwithEM}. Signal in CRs will not be patched.")
if nbCRwithEM != 0 and not self.includeCRs:
logger.warning("EM in CRs but includeCRs == False. Signal in CRs will not be patched.")
for i_ch, ch in enumerate(ws["channels"]):
Expand Down
2 changes: 2 additions & 0 deletions unittests/timeout.ini
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ sigmacut = 0.03
minmassgap = 5.0
maxcond = 0.2
ncpus = 1
[particles]
model=share.models.mssm
[database]
path = official
analyses = all
Expand Down

0 comments on commit a03f9d7

Please sign in to comment.