diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..82f32db --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,27 @@ +name: Release + +on: + release: + types: [published] + +jobs: + build_release: + runs-on: ubuntu-18.04 + steps: + - name: Install Kerberos + run: | + sudo apt-get update + sudo DEBIAN_FRONTEND=noninteractive apt-get install -y krb5-user + cat << \EOF > krb5.conf + ${{secrets.KRB5CONF}} + EOF + grep rdns krb5.conf + sudo mv -f krb5.conf /etc/krb5.conf + + - name: Trigger release in jenkins + run: | + echo ${{github.event.release.tag_name}} | grep -e 'v20[0-9][0-9][0-1][0-9][0-9][0-9]' + echo ${{secrets.JENKINS_BOT_PASS}} | kinit ${{secrets.PRINCIPAL}} + curl -X POST -k --negotiate -u : '${{secrets.API_URL}}' -H 'Content-Type: application/x-www-form-urlencoded' -d 'DELPHESO2_TAG=${{github.event.release.tag_name}}' + klist + kdestroy diff --git a/.gitignore b/.gitignore index 9512bc7..4df198e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,5 @@ *~ *.root +*.so +*.d +*.pcm diff --git a/CMakeLists.txt b/CMakeLists.txt index d4f03bb..721f852 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,14 +13,16 @@ set(CMAKE_CXX_EXTENSIONS OFF) set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake) +include(FindPackageHandleStandardArgs) find_package(fmt REQUIRED) find_package(ROOT REQUIRED) -find_package(Delphes REQUIRED) find_package(FairLogger REQUIRED) find_package(FairRoot REQUIRED) find_package(AliceO2 REQUIRED) +find_package(Delphes REQUIRED) include("${ROOT_DIR}/RootMacros.cmake") add_subdirectory(src) +add_subdirectory(rpythia8) add_subdirectory(examples) diff --git a/README.md b/README.md index 138d7bb..27e788c 100644 --- a/README.md +++ b/README.md @@ -13,8 +13,11 @@ The workflow is in general the following ## Build and enviroment The build of DelphesO2 is supported via the `aliBuild` framework. -The receipt is on the other hand not in the official `alidist` repository yet. -You must grap the `delpheso2.sh` file from `https://github.com/preghenella/alidist/blob/delpheso2/delpheso2.sh` and put it in your own `alidist` directory. +The receipt is now in the official `alidist` repository. Update `alidist` tree in case you do not find `delpheso2.sh` in there +``` +cd alidist +git pull --rebase origin master +``` Afterwards you should be able to build with ``` diff --git a/cmake/FindDelphes.cmake b/cmake/FindDelphes.cmake index fec5656..c0fc421 100644 --- a/cmake/FindDelphes.cmake +++ b/cmake/FindDelphes.cmake @@ -9,7 +9,7 @@ find_path(Delphes_INCLUDE_DIR PATH_SUFFIXES "include") find_library(Delphes_LIBRARY - NAMES libDelphes.so + NAMES libDelphes.so libDelphes.dylib HINTS ${DELPHES_ROOT}/lib ENV LD_LIBRARY_PATH) @@ -31,6 +31,5 @@ if(${Delphes_FOUND}) INTERFACE_LINK_LIBRARIES "${Delphes_LIBRARIES}" ) endif() - -endif() +endif() diff --git a/cmake/FindHepMC.cmake b/cmake/FindHepMC.cmake new file mode 100644 index 0000000..65dcfff --- /dev/null +++ b/cmake/FindHepMC.cmake @@ -0,0 +1,24 @@ +# Copyright CERN and copyright holders of ALICE O2. This software is distributed +# under the terms of the GNU General Public License v3 (GPL Version 3), copied +# verbatim in the file "COPYING". +# +# See http://alice-o2.web.cern.ch/license for full licensing information. +# +# In applying this license CERN does not waive the privileges and immunities +# granted to it by virtue of its status as an Intergovernmental Organization or +# submit itself to any jurisdiction. + +find_path(HepMC_INCLUDE_DIR + NAMES IO_BaseClass.h + PATH_SUFFIXES HepMC + PATHS $ENV{HEPMC_ROOT}/include) + +set(HepMC_INCLUDE_DIR ${HepMC_INCLUDE_DIR}/..) + +find_library(HepMC_LIBRARIES + NAMES libHepMC.so libHepMCfio.so libHepMC.dylib libHepMCfio.dylib + PATHS $ENV{HEPMC_ROOT}/lib) + +find_package_handle_standard_args(HepMC + REQUIRED_VARS HepMC_INCLUDE_DIR HepMC_LIBRARIES + FAIL_MESSAGE "HepMC could not be found") diff --git a/cmake/FindPythia.cmake b/cmake/FindPythia.cmake new file mode 100644 index 0000000..3bc6ad1 --- /dev/null +++ b/cmake/FindPythia.cmake @@ -0,0 +1,24 @@ +# Copyright CERN and copyright holders of ALICE O2. This software is distributed +# under the terms of the GNU General Public License v3 (GPL Version 3), copied +# verbatim in the file "COPYING". +# +# See http://alice-o2.web.cern.ch/license for full licensing information. +# +# In applying this license CERN does not waive the privileges and immunities +# granted to it by virtue of its status as an Intergovernmental Organization or +# submit itself to any jurisdiction. + +find_path(Pythia_INCLUDE_DIR + NAMES Pythia.h + PATH_SUFFIXES Pythia8 + PATHS $ENV{PYTHIA_ROOT}/include) + +set(Pythia_INCLUDE_DIR ${Pythia_INCLUDE_DIR}/..) + +find_library(Pythia_LIBRARIES + NAMES libpythia8.so libpythia8.dylib + PATHS $ENV{PYTHIA_ROOT}/lib) + +find_package_handle_standard_args(Pythia + REQUIRED_VARS Pythia_INCLUDE_DIR Pythia_LIBRARIES + FAIL_MESSAGE "Pythia could not be found") diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 2ed8316..98bfad9 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -29,13 +29,16 @@ file(GLOB SMEARING file(GLOB AOD aod/*.C aod/*.h + aod/*.root + scripts/common.py + scripts/createO2tables.py ) -install(FILES ${PYTHIA8} DESTINATION examples/pythia8) -install(FILES ${PYTHIA8DECAYS} DESTINATION examples/pythia8/decays) -install(FILES ${CARDS} DESTINATION examples/cards) -install(FILES ${LUTS} DESTINATION examples/smearing/luts) -install(FILES ${SCRIPTS} DESTINATION examples/scripts) -install(FILES ${SMEARING} DESTINATION examples/smearing) -install(FILES ${AOD} DESTINATION examples/aod) +install(FILES ${PYTHIA8} DESTINATION examples/pythia8) +install(FILES ${PYTHIA8DECAYS} DESTINATION examples/pythia8/decays) +install(FILES ${CARDS} DESTINATION examples/cards) +install(FILES ${LUTS} DESTINATION examples/smearing/luts) +install(PROGRAMS ${SCRIPTS} DESTINATION examples/scripts) +install(FILES ${SMEARING} DESTINATION examples/smearing) +install(FILES ${AOD} DESTINATION examples/aod) diff --git a/examples/aod/.clang-format b/examples/aod/.clang-format new file mode 100644 index 0000000..93ba0f7 --- /dev/null +++ b/examples/aod/.clang-format @@ -0,0 +1,56 @@ +BasedOnStyle: Google +AccessModifierOffset: -1 +AlignEscapedNewlinesLeft: true +AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortFunctionsOnASingleLine: true +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +#AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: true +BinPackParameters: true +BreakBeforeBinaryOperators: false +BreakBeforeBraces: Linux +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +ColumnLimit: 0 +CommentPragmas: '^ IWYU pragma:' +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 2 +ContinuationIndentWidth: 2 +Cpp11BracedListStyle: true +DerivePointerBinding: false +ExperimentalAutoDetectBinPacking: false +IndentCaseLabels: true +IndentFunctionDeclarationAfterType: true +IndentWidth: 2 +# It is broken on windows. Breaks all #include "header.h" +--- +Language: Cpp +MaxEmptyLinesToKeep: 1 +KeepEmptyLinesAtTheStartOfBlocks: true +NamespaceIndentation: None +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: false +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 200 +SortIncludes: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +Standard: Cpp11 +TabWidth: 2 +UseTab: Never +--- +# Do not format protobuf files +Language: Proto +DisableFormat: true diff --git a/examples/aod/check_consistency_with_AP.py b/examples/aod/check_consistency_with_AP.py new file mode 100755 index 0000000..6e9524b --- /dev/null +++ b/examples/aod/check_consistency_with_AP.py @@ -0,0 +1,118 @@ +#!/usr/bin/env python3 + +# from ROOT import gROOT +import wget +import os + + +def get_variable(f, v="TreeName"): + variable = "" + f = open(f) + for i in f: + if v in i: + variable += " " + i.strip() + continue + if variable != "": + variable += " " + i.strip() + if ";" in variable: + break + return variable.replace(" ", "\n") + + +def count_trees(f): + f = open(f) + t = [] + for i in f: + if "->Branch(" in i: + i = i.strip().split("->")[0] + if i not in t: + t.append(i) + return t + + +def check_tree(f1, f2, tn="Kinematics", verbose=True): + print("Checking tree", tn, "in", f1, "and", f2) + f1 = open(f1) + f2 = open(f2) + + def get(f): + t = [] + for i in f: + i = i.strip() + i = i.replace(",", ", ").replace(" ", " ") + if f"{tn}->Branch(" in i: + i = i.replace("aod_track", "tracks") + t.append(i) + return t + + t1 = get(f1) + t2 = get(f2) + r = t2 + for i in t1: + if i in t2: + r.pop(r.index(i)) + if len(r) > 0: + print("Remaining", r) + + +def count_structs(f): + nw = " " + if "AliAnalysisTaskAO2Dconverter" in f: + nw = " " + print("Checking", f) + f = open(f) + t = {} + start = False + dm = [] + for i in f: + if "struct {" in i: + start = True + dm = [] + continue + if start and "}" in i and not i.startswith(nw): + if i not in t: + t[i.replace("}", "").split(";")[0].strip().replace( + "aod_track", "tracks")] = dm + start = False + elif start: + i = i.strip() + while " " in i: + i = i.replace(" ", " ") + if not i.startswith("//"): + dm.append(i) + return t + + +def get_and_check(url, to_check="createO2tables.h"): + filename = wget.download(url, out="/tmp/", bar=None) + filename_header = wget.download( + url.replace(".cxx", ".h"), out="/tmp/", bar=None) + # print(get_variable(to_check)) + t1 = count_trees(to_check) + s1 = count_structs(to_check) + # print(s1) + # print(get_variable(filename)) + t2 = count_trees(filename) + s2 = count_structs(filename_header) + # print(s2) + + for i in s1: + if i in s2: + for j in s1[i]: + if j not in s2[i]: + print(j, "Not here") + else: + print(i, "not in s2") + for i in t1: + if i in ["tRICH", "tMID", "tFTOF"]: + continue + check_tree(to_check, filename, tn=i) + if i not in t2: + print("Tree", i, "is not in", t2) + + os.remove(filename) + os.remove(filename_header) + + +get_and_check( + "https://raw.githubusercontent.com/alisw/AliPhysics/master/RUN3/AliAnalysisTaskAO2Dconverter.cxx") diff --git a/examples/aod/createO2tables.C b/examples/aod/createO2tables.C index 7a77f6b..524b2c4 100644 --- a/examples/aod/createO2tables.C +++ b/examples/aod/createO2tables.C @@ -1,207 +1,688 @@ R__LOAD_LIBRARY(libDelphes) R__LOAD_LIBRARY(libDelphesO2) +// std::shuffle +#include +// std::default_random_engine +#include +// std::chrono::system_clock +#include + +// ROOT includes +#include "TMath.h" +#include "TChain.h" +#include "TClonesArray.h" +#include "TRandom3.h" +#include "TDatabasePDG.h" +#include "TH1F.h" + +// Delphes includes +#include "ExRootAnalysis/ExRootTreeReader.h" + +// O2 includes +#include "DetectorsVertexing/PVertexer.h" +#include "DetectorsVertexing/PVertexerHelpers.h" +#include "Steer/InteractionSampler.h" +#include "CommonDataFormat/BunchFilling.h" +#include "DetectorsBase/Propagator.h" +#include "DetectorsBase/GeometryManager.h" +#include "DataFormatsFT0/RecPoints.h" +#include "Framework/DataTypes.h" + +// DelphesO2 includes +#include "TrackSmearer.hh" +#include "TOFLayer.hh" +#include "RICHdetector.hh" +#include "ECALdetector.hh" +#include "PhotonConversion.hh" +#include "MIDdetector.hh" +#include "TrackUtils.hh" + #include "createO2tables.h" -double Bz = 0.2; -double tof_radius = 100.; // [cm] -double tof_length = 200.; // [cm] -double tof_sigmat = 0.02; // [ns] +// Detector parameters +const double Bz = 0.2; // [T] +// TOF +constexpr double tof_radius = 100.; // [cm] Radius of the TOF detector (used to compute acceptance) +const double tof_length = 200.; // [cm] Length of the TOF detector (used to compute acceptance) +const double tof_sigmat = 0.02; // [ns] Resolution of the TOF detector +const double tof_sigmat0 = 0.2; // [ns] Time spread of the vertex +const char* tof_mismatch_file = "tofMM.root"; +// Forward TOF +const double forward_tof_radius = 100.; // [cm] Radius of the Forward TOF detector (used to compute acceptance) +const double forward_tof_radius_in = 10.; // [cm] Inner radius of the Forward TOF detector (used to compute acceptance) +const double forward_tof_length = 200.; // [cm] Length of the Forward TOF detector (used to compute acceptance) +const double forward_tof_sigmat = 0.02; // [ns] Resolution of the Forward TOF detector +const double forward_tof_sigmat0 = 0.2; // [ns] Time spread of the vertex +// RICH +constexpr double rich_radius = 100.; // [cm] Radius of the RICH detector (used to compute acceptance) +const double rich_length = 200.; // [cm] Length of the RICH detector (used to compute acceptance) +const double rich_index = 1.03; // Refraction index of the RICH detector +const double rich_radiator_length = 2.; // [cm] Radiator length of the RICH detector +const double rich_efficiency = 0.4; // Efficiency of the RICH detector +const double rich_sigma = 7.e-3; // [rad] Resolution of the RICH detector +// Forward RICH +const double forward_rich_radius = 100.; // [cm] Radius of the Forward RICH detector (used to compute acceptance) +const double forward_rich_radius_in = 10.; // [cm] Inner radius of the Forward RICH detector (used to compute acceptance) +const double forward_rich_length = 200.; // [cm] Length of the Forward RICH detector (used to compute acceptance) +const double forward_rich_index = 1.0014; // Refraction index of the Forward RICH detector +const double forward_rich_radiator_length = 95; // [cm] Radiator length of the Forward RICH detector +const double forward_rich_efficiency = 0.2; // Efficiency of the Forward RICH detector +const double forward_rich_sigma = 1.5e-3; // [rad] Resolution of the Forward RICH detector +// MID +const char* inputFileAccMuonPID = "muonAccEffPID.root"; + +// Simulation parameters +constexpr bool do_vertexing = true; // Vertexing with the O2 +constexpr bool enable_nuclei = true; // Nuclei LUTs +constexpr bool enable_ecal = true; // Enable ECAL filling +constexpr bool debug_qa = false; // Debug QA histograms +constexpr int tof_mismatch = 0; // Flag to configure the TOF mismatch running mode: 0 off, 1 create, 2 use -void -createO2tables(const char *inputFile = "delphes.root", - const char *outputFile = "AODRun5.root", - int eventOffset = 0) +int createO2tables(const char* inputFile = "delphes.root", + const char* outputFile = "AODRun5.root", + int eventOffset = 0) { - + if ((inputFile != NULL) && (inputFile[0] == '\0')) { + Printf("input file is empty, returning"); + return 0; + } + + // Defining particles to transport + TDatabasePDG::Instance()->AddParticle("deuteron", "deuteron", 1.8756134, kTRUE, 0.0, 3, "Nucleus", 1000010020); + TDatabasePDG::Instance()->AddAntiParticle("anti-deuteron", -1000010020); + + TDatabasePDG::Instance()->AddParticle("triton", "triton", 2.8089218, kTRUE, 0.0, 3, "Nucleus", 1000010030); + TDatabasePDG::Instance()->AddAntiParticle("anti-triton", -1000010030); + + TDatabasePDG::Instance()->AddParticle("helium3", "helium3", 2.80839160743, kTRUE, 0.0, 6, "Nucleus", 1000020030); + TDatabasePDG::Instance()->AddAntiParticle("anti-helium3", -1000020030); + + if constexpr (do_vertexing) { // Load files for the vertexing + o2::base::GeometryManager::loadGeometry("./", false); + o2::base::Propagator::initFieldFromGRP("o2sim_grp.root"); + } + + // Debug histograms + std::map debugHisto; + std::map debugEffNum; + std::map debugEffDen; + std::map debugEffDenPart; + if constexpr (debug_qa) { // Create histograms for debug QA + debugHisto["Multiplicity"] = new TH1F("Multiplicity", "Multiplicity", 1000, 0, 5000); + } + // Create chain of root trees TChain chain("Delphes"); chain.Add(inputFile); - + // Create object of class ExRootTreeReader auto treeReader = new ExRootTreeReader(&chain); - auto numberOfEntries = treeReader->GetEntries(); - + const auto numberOfEntries = treeReader->GetEntries(); + // Get pointers to branches used in this analysis - auto events = treeReader->UseBranch("Event"); - auto tracks = treeReader->UseBranch("Track"); - auto particles = treeReader->UseBranch("Particle"); - + const auto events = treeReader->UseBranch("Event"); + const auto tracks = treeReader->UseBranch("Track"); + const auto particles = treeReader->UseBranch("Particle"); + // smearer o2::delphes::TrackSmearer smearer; - if (Bz == 0.2) { - smearer.loadTable(11, "lutCovm.el.2kG.dat"); - smearer.loadTable(13, "lutCovm.mu.2kG.dat"); - smearer.loadTable(211, "lutCovm.pi.2kG.dat"); - smearer.loadTable(321, "lutCovm.ka.2kG.dat"); - smearer.loadTable(2212, "lutCovm.pr.2kG.dat"); - } else if (Bz == 0.5) { - smearer.loadTable(11, "lutCovm.el.5kG.dat"); - smearer.loadTable(13, "lutCovm.mu.5kG.dat"); - smearer.loadTable(211, "lutCovm.pi.5kG.dat"); - smearer.loadTable(321, "lutCovm.ka.5kG.dat"); - smearer.loadTable(2212, "lutCovm.pr.5kG.dat"); - } else { - std::cout << " --- invalid Bz field: " << Bz << std::endl; - return; + std::map mapPdgLut; + mapPdgLut.insert(std::make_pair(11, "lutCovm.el.dat")); + mapPdgLut.insert(std::make_pair(13, "lutCovm.mu.dat")); + mapPdgLut.insert(std::make_pair(211, "lutCovm.pi.dat")); + mapPdgLut.insert(std::make_pair(321, "lutCovm.ka.dat")); + mapPdgLut.insert(std::make_pair(2212, "lutCovm.pr.dat")); + if constexpr (enable_nuclei) { + mapPdgLut.insert(std::make_pair(1000010020, "lutCovm.de.dat")); + mapPdgLut.insert(std::make_pair(1000010030, "lutCovm.tr.dat")); + mapPdgLut.insert(std::make_pair(1000020030, "lutCovm.he3.dat")); + } + for (auto e : mapPdgLut) { + if (!smearer.loadTable(e.first, e.second)) { + Printf("Having issue with loading the LUT %i '%s'", e.first, e.second); + return 1; + } } // TOF layer - o2::delphes::TOFLayer toflayer; - toflayer.setup(tof_radius, tof_length, tof_sigmat); - + o2::delphes::TOFLayer tof_layer; + tof_layer.setup(tof_radius, tof_length, tof_sigmat, tof_sigmat0); + TH1F* hTOFMismatchTemplate = nullptr; + if constexpr (tof_mismatch == 1) { // Create mode + hTOFMismatchTemplate = new TH1F("hTOFMismatchTemplate", "", 3000., -5., 25.); + } else if (tof_mismatch == 2) { // User mode + TFile f(tof_mismatch_file, "READ"); + if (!f.IsOpen()) { + Printf("Did not find file for input TOF mismatch distribution"); + return 1; + } + f.GetObject("hTOFMismatchTemplate", hTOFMismatchTemplate); + hTOFMismatchTemplate->SetDirectory(0); + f.Close(); + } + + // Forward TOF layer + o2::delphes::TOFLayer forward_tof_layer; + forward_tof_layer.setup(forward_tof_radius, forward_tof_length, forward_tof_sigmat, forward_tof_sigmat0); + forward_tof_layer.setType(o2::delphes::TOFLayer::kForward); + forward_tof_layer.setRadiusIn(forward_tof_radius_in); + + // RICH layer + o2::delphes::RICHdetector rich_detector; + rich_detector.setup(rich_radius, rich_length); + rich_detector.setIndex(rich_index); + rich_detector.setRadiatorLength(rich_radiator_length); + rich_detector.setEfficiency(rich_efficiency); + rich_detector.setSigma(rich_sigma); + + // Forward RICH layer + o2::delphes::RICHdetector forward_rich_detector; + forward_rich_detector.setup(forward_rich_radius, forward_rich_length); + forward_rich_detector.setIndex(forward_rich_index); + forward_rich_detector.setRadiatorLength(forward_rich_radiator_length); + forward_rich_detector.setEfficiency(forward_rich_efficiency); + forward_rich_detector.setSigma(forward_rich_sigma); + forward_rich_detector.setType(o2::delphes::RICHdetector::kForward); + forward_rich_detector.setRadiusIn(forward_rich_radius_in); + + // ECAL detector + o2::delphes::ECALdetector ecal_detector; + + // Photon Conversion Method + o2::delphes::PhotonConversion photon_conversion; + TLorentzVector photonConv; + + + // MID detector + o2::delphes::MIDdetector mid_detector; + const bool isMID = mid_detector.setup(inputFileAccMuonPID); + if (isMID) { + Printf("creating MID detector"); + } + // create output auto fout = TFile::Open(outputFile, "RECREATE"); - TTree* tBC = MakeTreeO2bc(); - TTree* fTracks = MakeTreeO2track(); - TTree* tEvents = MakeTreeO2collision(); - TTree* tMCvtx = MakeTreeO2mccollision(); - TTree* tKinematics = MakeTreeO2mcparticle(); - TTree* tLabels = MakeTreeO2mctracklabel(); - TTree* tCollisionLabels = MakeTreeO2mccollisionlabel(); - - UInt_t mTrackX = 0xFFFFFFFF; - UInt_t mTrackAlpha = 0xFFFFFFFF; - UInt_t mtrackSnp = 0xFFFFFFFF; - UInt_t mTrackTgl = 0xFFFFFFFF; - UInt_t mTrack1Pt = 0xFFFFFFFF; // Including the momentun at the inner wall of TPC - UInt_t mTrackCovDiag = 0xFFFFFFFF; // Including the chi2 - UInt_t mTrackCovOffDiag = 0xFFFFFFFF; - UInt_t mTrackSignal = 0xFFFFFFFF; // PID signals and track length - + // Make output Trees + MakeTreeO2bc(); + MakeTreeO2track(); + MakeTreeO2trackCov(); + MakeTreeO2trackExtra(); + MakeTreeO2ftof(); + MakeTreeO2rich(); + MakeTreeO2ecal(); + MakeTreeO2frich(); + MakeTreeO2photon(); + MakeTreeO2mid(); + MakeTreeO2collision(); + MakeTreeO2collisionExtra(); + MakeTreeO2mccollision(); + MakeTreeO2mcparticle(); + MakeTreeO2mctracklabel(); + MakeTreeO2mccollisionlabel(); + + const UInt_t mTrackX = 0xFFFFFFFF; + const UInt_t mTrackAlpha = 0xFFFFFFFF; + const UInt_t mtrackSnp = 0xFFFFFFFF; + const UInt_t mTrackTgl = 0xFFFFFFFF; + const UInt_t mTrack1Pt = 0xFFFFFFFF; // Including the momentun at the inner wall of TPC + const UInt_t mTrackCovDiag = 0xFFFFFFFF; // Including the chi2 + const UInt_t mTrackCovOffDiag = 0xFFFFFFFF; + const UInt_t mTrackSignal = 0xFFFFFFFF; // PID signals and track length + + // Counters int fOffsetLabel = 0; - for (Int_t ientry = 0; ientry < numberOfEntries; ++ientry) { - + int fTrackCounter = 0; // Counter for the track index, needed for derived tables e.g. RICH. To be incremented at every track filled! + + // Random generator for reshuffling tracks when reading them + std::default_random_engine e(std::chrono::system_clock::now().time_since_epoch().count()); // time-based seed: + + // Define the PVertexer and its utilities + o2::steer::InteractionSampler irSampler; + irSampler.setInteractionRate(10000); + irSampler.init(); + + o2::vertexing::PVertexer vertexer; + vertexer.setValidateWithIR(kFALSE); + vertexer.setBunchFilling(irSampler.getBunchFilling()); + vertexer.init(); + + for (Int_t ientry = 0; ientry < numberOfEntries; ++ientry) { // Loop over events + // Adjust start indices for this event in all trees by adding the number of entries of the previous event + for (auto i = 0; i < kTrees; ++i) { + eventextra.fStart[i] += eventextra.fNentries[i]; + eventextra.fNentries[i] = 0; + } + // Load selected branches with data from specified event treeReader->ReadEntry(ientry); + constexpr float multEtaRange = 2.f; // Range in eta to count the charged particles + float dNdEta = 0.f; // Charged particle multiplicity to use in the efficiency evaluation + TLorentzVector pECAL; // 4-momentum of photon in ECAL + + for (Int_t iparticle = 0; iparticle < particles->GetEntries(); ++iparticle) { // Loop over particles + auto particle = (GenParticle*)particles->At(iparticle); - // loop over particles - for (Int_t iparticle = 0; iparticle < particles->GetEntries(); ++iparticle) { - auto particle = (GenParticle *)particles->At(iparticle); - particle->SetUniqueID(iparticle + fOffsetLabel); // not sure this is needed, to be sure - - mcparticle.fMcCollisionsID = ientry + eventOffset; + + mcparticle.fIndexMcCollisions = ientry + eventOffset; mcparticle.fPdgCode = particle->PID; mcparticle.fStatusCode = particle->Status; mcparticle.fFlags = 0; - mcparticle.fMother0 = particle->M1; - if (mcparticle.fMother0 > -1) mcparticle.fMother0 += fOffsetLabel; - mcparticle.fMother1 = particle->M2; - if (mcparticle.fMother1 > -1) mcparticle.fMother1 += fOffsetLabel; - mcparticle.fDaughter0 = particle->D1; - if (mcparticle.fDaughter0 > -1) mcparticle.fDaughter0 += fOffsetLabel; - mcparticle.fDaughter1 = particle->D2; - if (mcparticle.fDaughter1 > -1) mcparticle.fDaughter1 += fOffsetLabel; + if (IsSecondary(particles, iparticle)) { + mcparticle.fFlags |= o2::aod::mcparticle::enums::ProducedByTransport; + } else { + mcparticle.fFlags |= o2::aod::mcparticle::enums::PhysicalPrimary; + } + mcparticle.fIndexMcParticles_Mother0 = particle->M1; + if (mcparticle.fIndexMcParticles_Mother0 > -1) + mcparticle.fIndexMcParticles_Mother0 += fOffsetLabel; + mcparticle.fIndexMcParticles_Mother1 = particle->M2; + if (mcparticle.fIndexMcParticles_Mother1 > -1) + mcparticle.fIndexMcParticles_Mother1 += fOffsetLabel; + mcparticle.fIndexMcParticles_Daughter0 = particle->D1; + if (mcparticle.fIndexMcParticles_Daughter0 > -1) + mcparticle.fIndexMcParticles_Daughter0 += fOffsetLabel; + mcparticle.fIndexMcParticles_Daughter1 = particle->D2; + if (mcparticle.fIndexMcParticles_Daughter1 > -1) + mcparticle.fIndexMcParticles_Daughter1 += fOffsetLabel; mcparticle.fWeight = 1.; mcparticle.fPx = particle->Px; mcparticle.fPy = particle->Py; mcparticle.fPz = particle->Pz; - mcparticle.fE = particle->E; + mcparticle.fE = particle->E; - mcparticle.fVx = particle->X; - mcparticle.fVy = particle->Y; - mcparticle.fVz = particle->Z; + mcparticle.fVx = particle->X * 0.1; + mcparticle.fVy = particle->Y * 0.1; + mcparticle.fVz = particle->Z * 0.1; mcparticle.fVt = particle->T; - - tKinematics->Fill(); + if (TMath::Abs(particle->Eta) <= multEtaRange && particle->D1 < 0 && particle->D2 < 0 && particle->Charge != 0) { + dNdEta += 1.f; + } + FillTree(kMcParticle); + + // info for the ECAL + if constexpr (enable_ecal) { + float posZ, posPhi; + if (ecal_detector.makeSignal(*particle, pECAL, posZ, posPhi)) { // to be updated 13.09.2021 + ecal.fIndexCollisions = ientry + eventOffset; + ecal.fIndexMcParticles = TMath::Abs(iparticle + fOffsetLabel); + ecal.fPx = pECAL.Px(); + ecal.fPy = pECAL.Py(); + ecal.fPz = pECAL.Pz(); + ecal.fE = pECAL.E(); + ecal.fPosZ = posZ; + ecal.fPosPhi = posPhi; + FillTree(kA3ECAL); + } + } + + // fill debug information + + // info for the PhotonConversion + TLorentzVector photonConv; + + if (photon_conversion.hasPhotonConversion(*particle)) { + if (photon_conversion.makeSignal(*particle, photonConv)) { + photon.fIndexCollisions = ientry + eventOffset; + photon.fIndexMcParticles = TMath::Abs(iparticle + fOffsetLabel); + photon.fPx = photonConv.Px(); + photon.fPy = photonConv.Py(); + photon.fPz = photonConv.Pz(); + FillTree(kA3Photon); + } + } + + if constexpr (debug_qa) { + if (!debugEffDenPart[particle->PID]) { + debugEffDenPart[particle->PID] = new TH1F(Form("denPart%i", particle->PID), Form("denPart%i;#it{p}_{T} (GeV/#it{c})", particle->PID), 1000, 0, 10); + } + debugEffDenPart[particle->PID]->Fill(particle->PT); + } + } + dNdEta = 0.5f * dNdEta / multEtaRange; + if constexpr (debug_qa) { + debugHisto["Multiplicity"]->Fill(dNdEta); } fOffsetLabel += particles->GetEntries(); - - // loop over tracks - std::vector tof_tracks; - for (Int_t itrack = 0; itrack < tracks->GetEntries(); ++itrack) { + + // For vertexing + std::vector tracks_for_vertexing; + std::vector bcData; + o2::InteractionRecord ir = irSampler.generateCollisionTime(); // Generate IR + + // Tracks used for the T0 evaluation + std::vector tof_tracks; + std::vector ftof_tracks; + std::vector> ftof_tracks_indices; + const int multiplicity = tracks->GetEntries(); + + // Build index array of tracks to randomize track writing order + std::vector tracks_indices(tracks->GetEntries()); // vector with tracks->GetEntries() + std::iota(std::begin(tracks_indices), std::end(tracks_indices), 0); // Fill with 0, 1, ... + std::shuffle(tracks_indices.begin(), tracks_indices.end(), e); + + // Flags to check that all the indices are written + bool did_first = tracks->GetEntries() == 0; + bool did_last = tracks->GetEntries() == 0; + for (Int_t itrack : tracks_indices) { // Loop over tracks + if (itrack == 0) { + did_first = true; + } + if (itrack == tracks->GetEntries() - 1) { + did_last = true; + } + if (itrack < 0) { + Printf("Got a negative index!"); + return 1; + } // get track and corresponding particle - auto track = (Track *)tracks->At(itrack); - auto particle = (GenParticle *)track->Particle.GetObject(); + const auto track = (Track*)tracks->At(itrack); + auto particle = (GenParticle*)track->Particle.GetObject(); - // fill the label tree - Int_t alabel = particle->GetUniqueID(); - mctracklabel.fLabel = TMath::Abs(alabel); - mctracklabel.fLabelMask = 0; - tLabels->Fill(); - O2Track o2track; // tracks in internal O2 format o2::delphes::TrackUtils::convertTrackToO2Track(*track, o2track, true); - smearer.smearTrack(o2track, track->PID); + if constexpr (debug_qa) { + if (!debugEffDen[track->PID]) { + debugEffDen[track->PID] = new TH1F(Form("den%i", track->PID), Form("den%i;#it{p}_{T} (GeV/#it{c})", track->PID), 1000, 0, 10); + } + debugEffDen[track->PID]->Fill(track->PT); + } + if (!smearer.smearTrack(o2track, track->PID, dNdEta)) { // Skipping inefficient/not correctly smeared tracks + continue; + } + if constexpr (debug_qa) { + if (!debugEffNum[track->PID]) { + debugEffNum[track->PID] = new TH1F(Form("num%i", track->PID), Form("num%i;#it{p}_{T} (GeV/#it{c})", track->PID), 1000, 0, 10); + } + debugEffNum[track->PID]->Fill(track->PT); + } o2::delphes::TrackUtils::convertO2TrackToTrack(o2track, *track, true); - + + // fill the label tree + Int_t alabel = particle->GetUniqueID(); + mctracklabel.fIndexMcParticles = TMath::Abs(alabel); + mctracklabel.fMcMask = 0; + FillTree(kMcTrackLabel); + // set track information - mytracks.fCollisionsID = ientry + eventOffset; - mytracks.fX = o2track.getX(); - mytracks.fAlpha = o2track.getAlpha(); - mytracks.fY = o2track.getY(); - mytracks.fZ = o2track.getZ(); - mytracks.fSnp = o2track.getSnp(); - mytracks.fTgl = o2track.getTgl(); - mytracks.fSigned1Pt = o2track.getQ2Pt(); - + aod_track.fIndexCollisions = ientry + eventOffset; + aod_track.fX = o2track.getX(); + aod_track.fAlpha = o2track.getAlpha(); + aod_track.fY = o2track.getY(); + aod_track.fZ = o2track.getZ(); + aod_track.fSnp = o2track.getSnp(); + aod_track.fTgl = o2track.getTgl(); + aod_track.fSigned1Pt = o2track.getQ2Pt(); + // Modified covariance matrix // First sigmas on the diagonal - mytracks.fSigmaY = TMath::Sqrt(o2track.getSigmaY2()); - mytracks.fSigmaZ = TMath::Sqrt(o2track.getSigmaZ2()); - mytracks.fSigmaSnp = TMath::Sqrt(o2track.getSigmaSnp2()); - mytracks.fSigmaTgl = TMath::Sqrt(o2track.getSigmaTgl2()); - mytracks.fSigma1Pt = TMath::Sqrt(o2track.getSigma1Pt2()); - - mytracks.fRhoZY = (Char_t)(128.*o2track.getSigmaZY()/mytracks.fSigmaZ/mytracks.fSigmaY); - mytracks.fRhoSnpY = (Char_t)(128.*o2track.getSigmaSnpY()/mytracks.fSigmaSnp/mytracks.fSigmaY); - mytracks.fRhoSnpZ = (Char_t)(128.*o2track.getSigmaSnpZ()/mytracks.fSigmaSnp/mytracks.fSigmaZ); - mytracks.fRhoTglY = (Char_t)(128.*o2track.getSigmaTglY()/mytracks.fSigmaTgl/mytracks.fSigmaY); - mytracks.fRhoTglZ = (Char_t)(128.*o2track.getSigmaTglZ()/mytracks.fSigmaTgl/mytracks.fSigmaZ); - mytracks.fRhoTglSnp = (Char_t)(128.*o2track.getSigmaTglSnp()/mytracks.fSigmaTgl/mytracks.fSigmaSnp); - mytracks.fRho1PtY = (Char_t)(128.*o2track.getSigma1PtY()/mytracks.fSigma1Pt/mytracks.fSigmaY); - mytracks.fRho1PtZ = (Char_t)(128.*o2track.getSigma1PtZ()/mytracks.fSigma1Pt/mytracks.fSigmaZ); - mytracks.fRho1PtSnp = (Char_t)(128.*o2track.getSigma1PtSnp()/mytracks.fSigma1Pt/mytracks.fSigmaSnp); - mytracks.fRho1PtTgl = (Char_t)(128.*o2track.getSigma1PtTgl()/mytracks.fSigma1Pt/mytracks.fSigmaTgl); + aod_track.fSigmaY = TMath::Sqrt(o2track.getSigmaY2()); + aod_track.fSigmaZ = TMath::Sqrt(o2track.getSigmaZ2()); + aod_track.fSigmaSnp = TMath::Sqrt(o2track.getSigmaSnp2()); + aod_track.fSigmaTgl = TMath::Sqrt(o2track.getSigmaTgl2()); + aod_track.fSigma1Pt = TMath::Sqrt(o2track.getSigma1Pt2()); + + aod_track.fRhoZY = (Char_t)(128. * o2track.getSigmaZY() / aod_track.fSigmaZ / aod_track.fSigmaY); + aod_track.fRhoSnpY = (Char_t)(128. * o2track.getSigmaSnpY() / aod_track.fSigmaSnp / aod_track.fSigmaY); + aod_track.fRhoSnpZ = (Char_t)(128. * o2track.getSigmaSnpZ() / aod_track.fSigmaSnp / aod_track.fSigmaZ); + aod_track.fRhoTglY = (Char_t)(128. * o2track.getSigmaTglY() / aod_track.fSigmaTgl / aod_track.fSigmaY); + aod_track.fRhoTglZ = (Char_t)(128. * o2track.getSigmaTglZ() / aod_track.fSigmaTgl / aod_track.fSigmaZ); + aod_track.fRhoTglSnp = (Char_t)(128. * o2track.getSigmaTglSnp() / aod_track.fSigmaTgl / aod_track.fSigmaSnp); + aod_track.fRho1PtY = (Char_t)(128. * o2track.getSigma1PtY() / aod_track.fSigma1Pt / aod_track.fSigmaY); + aod_track.fRho1PtZ = (Char_t)(128. * o2track.getSigma1PtZ() / aod_track.fSigma1Pt / aod_track.fSigmaZ); + aod_track.fRho1PtSnp = (Char_t)(128. * o2track.getSigma1PtSnp() / aod_track.fSigma1Pt / aod_track.fSigmaSnp); + aod_track.fRho1PtTgl = (Char_t)(128. * o2track.getSigma1PtTgl() / aod_track.fSigma1Pt / aod_track.fSigmaTgl); //FIXME this needs to be fixed - mytracks.fITSClusterMap = 3; - mytracks.fFlags = 4; + aod_track.fITSClusterMap = 3; + aod_track.fFlags = 4; + //FIXME this also needs to be fixed + aod_track.fTrackEtaEMCAL = 0; //track->GetTrackEtaOnEMCal(); + aod_track.fTrackPhiEMCAL = 0; //track->GetTrackPhiOnEMCal(); + + aod_track.fLength = track->L * 0.1; // [cm] // check if has hit the TOF - if (toflayer.hasTOF(*track)) { - tof_tracks.push_back(track); - mytracks.fLength = track->L * 0.1; // [cm] - mytracks.fTOFSignal = track->TOuter * 1.e12; // [ps] - mytracks.fTOFExpMom = track->P * 0.029979246; - } - else { - mytracks.fLength = -999.f; - mytracks.fTOFSignal = -999.f; - mytracks.fTOFExpMom = -999.f; - } - - fTracks->Fill(); + if (tof_layer.hasTOF(*track)) { + + if constexpr (tof_mismatch != 0) { + const auto L = std::sqrt(track->XOuter * track->XOuter + track->YOuter * track->YOuter + track->ZOuter * track->ZOuter); + if constexpr (tof_mismatch == 1) { // Created mode: fill output mismatch template + hTOFMismatchTemplate->Fill(track->TOuter * 1.e9 - L / 299.79246); + } else if constexpr (tof_mismatch == 2) { // User mode: do some random mismatch + auto lutEntry = smearer.getLUTEntry(track->PID, dNdEta, 0., o2track.getEta(), 1. / o2track.getQ2Pt()); + if (lutEntry && lutEntry->valid) { // Check that LUT entry is valid + if constexpr (tof_radius < 50.) { // Inner TOF + if (gRandom->Uniform() < (1.f - lutEntry->itof)) { + track->TOuter = (hTOFMismatchTemplate->GetRandom() + L / 299.79246) * 1.e-9; + } + } else { // Outer TOF + if (gRandom->Uniform() < (1.f - lutEntry->otof)) { + track->TOuter = (hTOFMismatchTemplate->GetRandom() + L / 299.79246) * 1.e-9; + } + } + } + } + } + + aod_track.fTOFChi2 = 1.f; // Negative if TOF is not available + aod_track.fTOFSignal = track->TOuter * 1.e12; // [ps] + aod_track.fTrackTime = track->TOuter * 1.e9; // [ns] + aod_track.fTrackTimeRes = 200 * 1.e9; // [ns] + aod_track.fTOFExpMom = track->P * 0.029979246; + // if primary push to TOF tracks + if (fabs(aod_track.fY) < 3. * aod_track.fSigmaY && fabs(aod_track.fZ) < 3. * aod_track.fSigmaZ) + tof_tracks.push_back(track); + } else { + aod_track.fTOFChi2 = -1.f; + aod_track.fTOFSignal = -999.f; + aod_track.fTrackTime = -999.f; + aod_track.fTrackTimeRes = 2000 * 1.e9; + aod_track.fTOFExpMom = -999.f; + } + + // check if has hit on RICH + if (rich_detector.hasRICH(*track)) { + const auto measurement = rich_detector.getMeasuredAngle(*track); + rich.fIndexCollisions = ientry + eventOffset; + rich.fIndexTracks = fTrackCounter; // Index in the Track table + rich.fRICHSignal = measurement.first; + rich.fRICHSignalError = measurement.second; + std::array deltaangle, nsigma; + rich_detector.makePID(*track, deltaangle, nsigma); + rich.fRICHDeltaEl = deltaangle[0]; + rich.fRICHDeltaMu = deltaangle[1]; + rich.fRICHDeltaPi = deltaangle[2]; + rich.fRICHDeltaKa = deltaangle[3]; + rich.fRICHDeltaPr = deltaangle[4]; + rich.fRICHNsigmaEl = nsigma[0]; + rich.fRICHNsigmaMu = nsigma[1]; + rich.fRICHNsigmaPi = nsigma[2]; + rich.fRICHNsigmaKa = nsigma[3]; + rich.fRICHNsigmaPr = nsigma[4]; + FillTree(kRICH); + } + + // check if has hit on the forward RICH + if (forward_rich_detector.hasRICH(*track)) { + const auto measurement = forward_rich_detector.getMeasuredAngle(*track); + frich.fIndexCollisions = ientry + eventOffset; + frich.fIndexTracks = fTrackCounter; // Index in the Track table + frich.fRICHSignal = measurement.first; + frich.fRICHSignalError = measurement.second; + std::array deltaangle, nsigma; + forward_rich_detector.makePID(*track, deltaangle, nsigma); + frich.fRICHDeltaEl = deltaangle[0]; + frich.fRICHDeltaMu = deltaangle[1]; + frich.fRICHDeltaPi = deltaangle[2]; + frich.fRICHDeltaKa = deltaangle[3]; + frich.fRICHDeltaPr = deltaangle[4]; + frich.fRICHNsigmaEl = nsigma[0]; + frich.fRICHNsigmaMu = nsigma[1]; + frich.fRICHNsigmaPi = nsigma[2]; + frich.fRICHNsigmaKa = nsigma[3]; + frich.fRICHNsigmaPr = nsigma[4]; + FillTree(kFRICH); + } + + // check if has Forward TOF + if (forward_tof_layer.hasTOF(*track)) { + ftof_tracks.push_back(track); + ftof_tracks_indices.push_back(std::pair{ientry + eventOffset, fTrackCounter}); + } + + + // check if it is within the acceptance of the MID + if (isMID) { + if (mid_detector.hasMID(*track)) { + mid.fIndexCollisions = ientry + eventOffset; + mid.fIndexTracks = fTrackCounter; // Index in the Track table + mid.fMIDIsMuon = mid_detector.isMuon(*track, multiplicity); + FillTree(kMID); + } + } + if constexpr (do_vertexing) { + const float t = (ir.bc2ns() + gRandom->Gaus(0., 100.)) * 1e-3; + tracks_for_vertexing.push_back(TrackAlice3{o2track, t, 100.f * 1e-3, TMath::Abs(alabel)}); + } + FillTree(kTracks); + FillTree(kTracksCov); + FillTree(kTracksExtra); + fTrackCounter++; // fill histograms } + // Filling the fTOF tree after computing its T0 + std::array ftzero; + + forward_tof_layer.eventTime(ftof_tracks, ftzero); + for (unsigned int i = 0; i < ftof_tracks.size(); i++) { + auto track = ftof_tracks[i]; + ftof.fIndexCollisions = ftof_tracks_indices[i].first; + ftof.fIndexTracks = ftof_tracks_indices[i].second; // Index in the Track table + + ftof.fFTOFLength = track->L * 0.1; // [cm] + ftof.fFTOFSignal = track->TOuter * 1.e12; // [ps] + + std::array deltat, nsigma; + forward_tof_layer.makePID(*track, deltat, nsigma); + ftof.fFTOFDeltaEl = deltat[0]; + ftof.fFTOFDeltaMu = deltat[1]; + ftof.fFTOFDeltaPi = deltat[2]; + ftof.fFTOFDeltaKa = deltat[3]; + ftof.fFTOFDeltaPr = deltat[4]; + ftof.fFTOFNsigmaEl = nsigma[0]; + ftof.fFTOFNsigmaMu = nsigma[1]; + ftof.fFTOFNsigmaPi = nsigma[2]; + ftof.fFTOFNsigmaKa = nsigma[3]; + ftof.fFTOFNsigmaPr = nsigma[4]; + FillTree(kFTOF); + } + + if (eventextra.fNentries[kTracks] != eventextra.fNentries[kTracksCov] || eventextra.fNentries[kTracks] != eventextra.fNentries[kTracksExtra]) { + Printf("Issue with the counters"); + return 1; + } + if (!did_first) { + Printf("Did not read first track"); + return 1; + } + if (!did_last) { + Printf("Did not read last track"); + return 1; + } + // compute the event time std::array tzero; - toflayer.eventTime(tof_tracks, tzero); + if (!tof_layer.eventTime(tof_tracks, tzero) && tof_tracks.size() > 0) { + Printf("Issue when evaluating the start time"); + return 1; + } // fill collision information - collision.fBCsID = ientry + eventOffset; + collision.fIndexBCs = ientry + eventOffset; bc.fGlobalBC = ientry + eventOffset; - collision.fPosX = 0.; - collision.fPosY = 0.; - collision.fPosZ = 0.; - collision.fCovXX = 0.01; - collision.fCovXY = 0.01; - collision.fCovXZ = 0.01; - collision.fCovYY = 0.01; - collision.fCovYZ = 0.01; - collision.fCovZZ = 0.01; - collision.fChi2 = 0.01; - collision.fCollisionTime = tzero[0] * 1.e3; // [ps] - collision.fCollisionTimeRes = tzero[1] * 1.e3; // [ps] - tEvents->Fill(); - tBC->Fill(); - - mccollision.fBCsID = ientry + eventOffset; + if constexpr (do_vertexing) { // Performing vertexing + std::vector lblTracks; + std::vector vertices; + std::vector vertexTrackIDs; + std::vector v2tRefs; + std::vector lblVtx; + lblVtx.emplace_back(ientry + eventOffset, 1); + std::vector idxVec; // here we will the global IDs of all used tracks + idxVec.reserve(tracks_for_vertexing.size()); + for (unsigned i = 0; i < tracks_for_vertexing.size(); i++) { + lblTracks.emplace_back(tracks_for_vertexing[i].mLabel, ientry + eventOffset, 1, false); + idxVec.emplace_back(i, o2::dataformats::GlobalTrackID::ITS); + } + const int n_vertices = vertexer.process(tracks_for_vertexing, + idxVec, + gsl::span{bcData}, + vertices, + vertexTrackIDs, + v2tRefs, + gsl::span{lblTracks}, + lblVtx); + if (n_vertices == 0) { + collision.fPosX = 0.f; + collision.fPosY = 0.f; + collision.fPosZ = 0.f; + collision.fCovXX = 0.f; + collision.fCovXY = 0.f; + collision.fCovXZ = 0.f; + collision.fCovYY = 0.f; + collision.fCovYZ = 0.f; + collision.fCovZZ = 0.f; + collision.fFlags = 0; + collision.fChi2 = 0.01f; + collision.fN = 0; + } else { + int index = 0; + int hm = 0; + for (int i = 0; i < n_vertices; i++) { + //in case of multiple vertices select the vertex with the higher multiplicities + if (vertices[i].getNContributors() > hm) { + hm = vertices[i].getNContributors(); + index = i; + } + } + collision.fPosX = vertices[index].getX(); + collision.fPosY = vertices[index].getY(); + collision.fPosZ = vertices[index].getZ(); + collision.fCovXX = vertices[index].getSigmaX2(); + collision.fCovXY = vertices[index].getSigmaXY(); + collision.fCovXZ = vertices[index].getSigmaXZ(); + collision.fCovYY = vertices[index].getSigmaY2(); + collision.fCovYZ = vertices[index].getSigmaYZ(); + collision.fCovZZ = vertices[index].getSigmaZ2(); + collision.fFlags = 0; + collision.fChi2 = vertices[index].getChi2(); + collision.fN = vertices[index].getNContributors(); + } + } else { + collision.fPosX = 0.f; + collision.fPosY = 0.f; + collision.fPosZ = 0.f; + collision.fCovXX = 0.f; + collision.fCovXY = 0.f; + collision.fCovXZ = 0.f; + collision.fCovYY = 0.f; + collision.fCovYZ = 0.f; + collision.fCovZZ = 0.f; + collision.fFlags = 0; + collision.fChi2 = 0.01f; + collision.fN = tracks->GetEntries(); + } + collision.fCollisionTime = tzero[0]; // [ns] + collision.fCollisionTimeRes = tzero[1]; // [ns] + FillTree(kEvents); + FillTree(kBC); + + mccollision.fIndexBCs = ientry + eventOffset; mccollision.fGeneratorsID = 0; mccollision.fPosX = 0.; mccollision.fPosY = 0.; @@ -209,20 +690,50 @@ createO2tables(const char *inputFile = "delphes.root", mccollision.fT = 0.; mccollision.fWeight = 0.; mccollision.fImpactParameter = 0.; - tMCvtx->Fill(); + FillTree(kMcCollision); - mccollisionlabel.fLabel = ientry + eventOffset; - mccollisionlabel.fLabelMask = 0; - tCollisionLabels->Fill(); + mccollisionlabel.fIndexMcCollisions = ientry + eventOffset; + mccollisionlabel.fMcMask = 0; + FillTree(kMcCollisionLabel); + + FillTree(kEventsExtra); + } + + Printf("Writing tables for %i events", eventextra.fStart[kEvents] + 1); + TString out_dir = outputFile; + const TObjArray* out_tag = out_dir.Tokenize("."); + out_dir = out_tag->GetEntries() > 1 ? out_tag->At(1)->GetName() : ""; + if (!out_dir.IsDec()) { + out_dir = "DF_0"; + } else { + out_dir = Form("DF_%i", out_dir.Atoi()); + } + fout->mkdir(out_dir); + fout->cd(out_dir); + for (int i = 0; i < kTrees; i++) { + if (Trees[i]) + Trees[i]->Write(); } - - fTracks->Write(); - tEvents->Write(); - tMCvtx->Write(); - tBC->Write(); - tKinematics->Write(); - tLabels->Write(); - tCollisionLabels->Write(); + fout->cd(); + for (auto e : debugHisto) { + e.second->Write(); + } + for (auto e : debugEffNum) { + e.second->Write(); + } + for (auto e : debugEffDen) { + e.second->Write(); + } + for (auto e : debugEffDenPart) { + e.second->Write(); + } + fout->ls(); fout->Close(); - + + Printf("AOD written!"); + if constexpr (tof_mismatch == 1) { + Printf("Writing the template for TOF mismatch"); + hTOFMismatchTemplate->SaveAs(Form("tof_mismatch_template_%s.root", out_dir.Data())); + } + return 0; } diff --git a/examples/aod/createO2tables.h b/examples/aod/createO2tables.h index 84b8216..ed63a78 100644 --- a/examples/aod/createO2tables.h +++ b/examples/aod/createO2tables.h @@ -1,20 +1,140 @@ +#include "TTree.h" + +enum TreeIndex { // Index of the output trees + kEvents = 0, + kEventsExtra, + kTracks, + kTracksCov, + kTracksExtra, + kFwdTrack, + kFwdTrackCov, + kCalo, + kCaloTrigger, + kMuonCls, + kZdc, + kFV0A, + kFV0C, + kFT0, + kFDD, + kV0s, + kCascades, + kTOF, + kMcParticle, + kMcCollision, + kMcTrackLabel, + kMcCaloLabel, + kMcCollisionLabel, + kBC, + kRun2BCInfo, + kOrigin, + kHMPID, + kRICH, + kFRICH, + kMID, + kFTOF, + kA3ECAL, + kA3Photon, + kTrees +}; + +const int fBasketSizeEvents = 1000000; // Maximum basket size of the trees for events +const int fBasketSizeTracks = 10000000; // Maximum basket size of the trees for tracks + +const TString TreeName[kTrees] = {"O2collision", + "DbgEventExtra", + "O2track", + "O2trackcov", + "O2trackextra", + "O2fwdtrack", + "O2fwdtrackcov", + "O2calo", + "O2calotrigger", + "O2muoncluster", + "O2zdc", + "O2fv0a", + "O2fv0c", + "O2ft0", + "O2fdd", + "O2v0", + "O2cascade", + "O2tof", + "O2mcparticle", + "O2mccollision", + "O2mctracklabel", + "O2mccalolabel", + "O2mccollisionlabel", + "O2bc", + "O2run2bcinfo", + "O2origin", + "O2hmpid", + "O2rich", + "O2frich", + "O2mid", + "O2ftof", + "O2a3ecal", + "O2photonconv"}; + +const TString TreeTitle[kTrees] = {"Collision tree", + "Collision extra", + "Barrel tracks Parameters", + "Barrel tracks Covariance", + "Barrel tracks Extra", + "Forward tracks Parameters", + "Forward tracks Covariances", + "Calorimeter cells", + "Calorimeter triggers", + "MUON clusters", + "ZDC", + "FV0A", + "FV0C", + "FT0", + "FDD", + "V0s", + "Cascades", + "TOF hits", + "Kinematics", + "MC collisions", + "MC track labels", + "MC calo labels", + "MC collision labels", + "BC info", + "Run 2 BC Info", + "DF ids", + "HMPID info", + "RICH info", + "Forward RICH info", + "MID info", + "Forward TOF info", + "ALICE3 ECAL", + "PhotonConversion"}; + +TTree* Trees[kTrees] = {nullptr}; // Array of created TTrees +TTree* CreateTree(TreeIndex t) +{ + TTree* tree = new TTree(TreeName[t], TreeTitle[t]); + tree->SetAutoFlush(0); + Trees[t] = tree; + return tree; +} + struct { // Event data - Int_t fBCsID = 0u; /// Index to BC table + Int_t fIndexBCs = 0u; /// Index to BC table // Primary vertex position - Float_t fPosX = -999.f; /// Primary vertex x coordinate - Float_t fPosY = -999.f; /// Primary vertex y coordinate - Float_t fPosZ = -999.f; /// Primary vertex z coordinate + Float_t fPosX = -999.f; /// Primary vertex x coordinate + Float_t fPosY = -999.f; /// Primary vertex y coordinate + Float_t fPosZ = -999.f; /// Primary vertex z coordinate // Primary vertex covariance matrix - Float_t fCovXX = 999.f; /// cov[0] - Float_t fCovXY = 0.f; /// cov[1] - Float_t fCovXZ = 0.f; /// cov[2] - Float_t fCovYY = 999.f; /// cov[3] - Float_t fCovYZ = 0.f; /// cov[4] - Float_t fCovZZ = 999.f; /// cov[5] + Float_t fCovXX = 999.f; /// cov[0] + Float_t fCovXY = 0.f; /// cov[1] + Float_t fCovXZ = 0.f; /// cov[2] + Float_t fCovYY = 999.f; /// cov[3] + Float_t fCovYZ = 0.f; /// cov[4] + Float_t fCovZZ = 999.f; /// cov[5] // Quality parameters - Float_t fChi2 = 999.f; /// Chi2 of the vertex - UInt_t fN = 0u; /// Number of contributors + UShort_t fFlags = 0; /// Vertex type + Float_t fChi2 = 999.f; /// Chi2 of the vertex + UShort_t fN = 0u; /// Number of contributors // The calculation of event time certainly will be modified in Run3 // The prototype below can be switched on request @@ -24,10 +144,10 @@ struct { } collision; //! structure to keep the primary vertex (avoid name conflicts) -TTree* MakeTreeO2collision() +void MakeTreeO2collision() { - TTree* tEvents = new TTree("O2collision", "Collision tree"); - tEvents->Branch("fBCsID", &collision.fBCsID, "fBCsID/I"); + TTree* tEvents = CreateTree(kEvents); + tEvents->Branch("fIndexBCs", &collision.fIndexBCs, "fIndexBCs/I"); tEvents->Branch("fPosX", &collision.fPosX, "fPosX/F"); tEvents->Branch("fPosY", &collision.fPosY, "fPosY/F"); tEvents->Branch("fPosZ", &collision.fPosZ, "fPosZ/F"); @@ -37,31 +157,49 @@ TTree* MakeTreeO2collision() tEvents->Branch("fCovYY", &collision.fCovYY, "fCovYY/F"); tEvents->Branch("fCovYZ", &collision.fCovYZ, "fCovYZ/F"); tEvents->Branch("fCovZZ", &collision.fCovZZ, "fCovZZ/F"); + tEvents->Branch("fFlags", &collision.fFlags, "fFlags/s"); tEvents->Branch("fChi2", &collision.fChi2, "fChi2/F"); - tEvents->Branch("fNumContrib", &collision.fN, "fNumContrib/i"); + tEvents->Branch("fNumContrib", &collision.fN, "fNumContrib/s"); tEvents->Branch("fCollisionTime", &collision.fCollisionTime, "fCollisionTime/F"); tEvents->Branch("fCollisionTimeRes", &collision.fCollisionTimeRes, "fCollisionTimeRes/F"); tEvents->Branch("fCollisionTimeMask", &collision.fCollisionTimeMask, "fCollisionTimeMask/b"); - return tEvents; + tEvents->SetBasketSize("*", fBasketSizeEvents); +} + +struct { + // Start indices and numbers of elements for data in the other trees matching this vertex. + // Needed for random access of collision-related data, allowing skipping data discarded by the user + Int_t fStart[kTrees] = {0}; /// Start entry indices for data in the other trees matching this vertex + Int_t fNentries[kTrees] = {0}; /// Numbers of entries for data in the other trees matching this vertex +} eventextra; //! structure for benchmarking information + +void MakeTreeO2collisionExtra() +{ + TTree* tEventsExtra = CreateTree(kEventsExtra); + TString sstart = TString::Format("fStart[%d]/I", kTrees); + TString sentries = TString::Format("fNentries[%d]/I", kTrees); + tEventsExtra->Branch("fStart", eventextra.fStart, sstart.Data()); + tEventsExtra->Branch("fNentries", eventextra.fNentries, sentries.Data()); + tEventsExtra->SetBasketSize("*", fBasketSizeEvents); } struct { // MC collision - Int_t fBCsID = 0u; /// Index to BC table + Int_t fIndexBCs = 0u; /// Index to BC table Short_t fGeneratorsID = 0u; /// Generator ID used for the MC - Float_t fPosX = -999.f; /// Primary vertex x coordinate from MC - Float_t fPosY = -999.f; /// Primary vertex y coordinate from MC - Float_t fPosZ = -999.f; /// Primary vertex z coordinate from MC - Float_t fT = -999.f; /// Time of the collision from MC - Float_t fWeight = -999.f; /// Weight from MC + Float_t fPosX = -999.f; /// Primary vertex x coordinate from MC + Float_t fPosY = -999.f; /// Primary vertex y coordinate from MC + Float_t fPosZ = -999.f; /// Primary vertex z coordinate from MC + Float_t fT = -999.f; /// Time of the collision from MC + Float_t fWeight = -999.f; /// Weight from MC // Generation details (HepMC3 in the future) Float_t fImpactParameter = -999.f; /// Impact parameter from MC -} mccollision; //! MC collisions = vertices +} mccollision; //! MC collisions = vertices -TTree* MakeTreeO2mccollision() +void MakeTreeO2mccollision() { - TTree* tMCvtx = new TTree("O2mccollision", "MC Collision tree"); - tMCvtx->Branch("fBCsID", &mccollision.fBCsID, "fBCsID/I"); + TTree* tMCvtx = CreateTree(kMcCollision); + tMCvtx->Branch("fIndexBCs", &mccollision.fIndexBCs, "fIndexBCs/I"); tMCvtx->Branch("fGeneratorsID", &mccollision.fGeneratorsID, "fGeneratorsID/S"); tMCvtx->Branch("fPosX", &mccollision.fPosX, "fPosX/F"); tMCvtx->Branch("fPosY", &mccollision.fPosY, "fPosY/F"); @@ -69,28 +207,30 @@ TTree* MakeTreeO2mccollision() tMCvtx->Branch("fT", &mccollision.fT, "fT/F"); tMCvtx->Branch("fWeight", &mccollision.fWeight, "fWeight/F"); tMCvtx->Branch("fImpactParameter", &mccollision.fImpactParameter, "fImpactParameter/F"); - return tMCvtx; + tMCvtx->SetBasketSize("*", fBasketSizeEvents); } + struct { int fRunNumber = -1; /// Run number ULong64_t fGlobalBC = 0u; /// Unique bunch crossing id. Contains period, orbit and bunch crossing numbers ULong64_t fTriggerMask = 0u; /// Trigger class mask -} bc; //! structure to keep trigger-related info +} bc; //! structure to keep trigger-related info -TTree* MakeTreeO2bc() +void MakeTreeO2bc() { - TTree* tBC = new TTree("O2bc", "BC info"); + TTree* tBC = CreateTree(kBC); tBC->Branch("fRunNumber", &bc.fRunNumber, "fRunNumber/I"); tBC->Branch("fGlobalBC", &bc.fGlobalBC, "fGlobalBC/l"); tBC->Branch("fTriggerMask", &bc.fTriggerMask, "fTriggerMask/l"); - return tBC; + tBC->SetBasketSize("*", fBasketSizeEvents); } struct { // Track data - Int_t fCollisionsID = -1; /// The index of the collision vertex in the TF, to which the track is attached - uint8_t fTrackType = 0; // Type of track: global, ITS standalone, tracklet, ... + Int_t fIndexCollisions = -1; /// The index of the collision vertex in the TF, to which the track is attached + + uint8_t fTrackType = 0; // Type of track: global, ITS standalone, tracklet, ... // In case we need connection to TOF clusters, activate next lines // Int_t fTOFclsIndex; /// The index of the associated TOF cluster @@ -101,45 +241,45 @@ struct { Float_t fAlpha = -999.f; /// Local <--> global coor.system rotation angle // Track parameters - Float_t fY = -999.f; /// fP[0] local Y-coordinate of a track (cm) - Float_t fZ = -999.f; /// fP[1] local Z-coordinate of a track (cm) - Float_t fSnp = -999.f; /// fP[2] local sine of the track momentum azimuthal angle - Float_t fTgl = -999.f; /// fP[3] tangent of the track momentum dip angle - Float_t fSigned1Pt = -999.f; /// fP[4] 1/pt (1/(GeV/c)) + Float_t fY = -999.f; /// fP[0] local Y-coordinate of a track (cm) + Float_t fZ = -999.f; /// fP[1] local Z-coordinate of a track (cm) + Float_t fSnp = -999.f; /// fP[2] local sine of the track momentum azimuthal angle + Float_t fTgl = -999.f; /// fP[3] tangent of the track momentum dip angle + Float_t fSigned1Pt = -999.f; /// fP[4] 1/pt (1/(GeV/c)) // "Covariance matrix" // The diagonal elements represent the errors = Sqrt(C[i,i]) // The off-diagonal elements are the correlations = C[i,j]/Sqrt(C[i,i])/Sqrt(C[j,j]) // The off-diagonal elements are multiplied by 128 (7bits) and packed in Char_t - Float_t fSigmaY = -999.f; /// Sqrt(fC[0]) - Float_t fSigmaZ = -999.f; /// Sqrt(fC[2]) - Float_t fSigmaSnp = -999.f; /// Sqrt(fC[5]) - Float_t fSigmaTgl = -999.f; /// Sqrt(fC[9]) - Float_t fSigma1Pt = -999.f; /// Sqrt(fC[14]) - Char_t fRhoZY = 0; /// 128*fC[1]/SigmaZ/SigmaY - Char_t fRhoSnpY = 0; /// 128*fC[3]/SigmaSnp/SigmaY - Char_t fRhoSnpZ = 0; /// 128*fC[4]/SigmaSnp/SigmaZ - Char_t fRhoTglY = 0; /// 128*fC[6]/SigmaTgl/SigmaY - Char_t fRhoTglZ = 0; /// 128*fC[7]/SigmaTgl/SigmaZ - Char_t fRhoTglSnp = 0; /// 128*fC[8]/SigmaTgl/SigmaSnp - Char_t fRho1PtY = 0; /// 128*fC[10]/Sigma1Pt/SigmaY - Char_t fRho1PtZ = 0; /// 128*fC[11]/Sigma1Pt/SigmaZ - Char_t fRho1PtSnp = 0; /// 128*fC[12]/Sigma1Pt/SigmaSnp - Char_t fRho1PtTgl = 0; /// 128*fC[13]/Sigma1Pt/SigmaTgl + Float_t fSigmaY = -999.f; /// Sqrt(fC[0]) + Float_t fSigmaZ = -999.f; /// Sqrt(fC[2]) + Float_t fSigmaSnp = -999.f; /// Sqrt(fC[5]) + Float_t fSigmaTgl = -999.f; /// Sqrt(fC[9]) + Float_t fSigma1Pt = -999.f; /// Sqrt(fC[14]) + Char_t fRhoZY = 0; /// 128*fC[1]/SigmaZ/SigmaY + Char_t fRhoSnpY = 0; /// 128*fC[3]/SigmaSnp/SigmaY + Char_t fRhoSnpZ = 0; /// 128*fC[4]/SigmaSnp/SigmaZ + Char_t fRhoTglY = 0; /// 128*fC[6]/SigmaTgl/SigmaY + Char_t fRhoTglZ = 0; /// 128*fC[7]/SigmaTgl/SigmaZ + Char_t fRhoTglSnp = 0; /// 128*fC[8]/SigmaTgl/SigmaSnp + Char_t fRho1PtY = 0; /// 128*fC[10]/Sigma1Pt/SigmaY + Char_t fRho1PtZ = 0; /// 128*fC[11]/Sigma1Pt/SigmaZ + Char_t fRho1PtSnp = 0; /// 128*fC[12]/Sigma1Pt/SigmaSnp + Char_t fRho1PtTgl = 0; /// 128*fC[13]/Sigma1Pt/SigmaTgl // Additional track parameters Float_t fTPCinnerP = -999.f; /// Full momentum at the inner wall of TPC for dE/dx PID // Track quality parameters - ULong64_t fFlags = 0u; /// Reconstruction status flags + UInt_t fFlags = 0u; /// Reconstruction status flags // Clusters and tracklets - UChar_t fITSClusterMap = 0u; /// ITS map of clusters, one bit per a layer - UChar_t fTPCNClsFindable = 0u; /// number of clusters that could be assigned in the TPC + UChar_t fITSClusterMap = 0u; /// ITS map of clusters, one bit per a layer + UChar_t fTPCNClsFindable = 0u; /// number of clusters that could be assigned in the TPC Char_t fTPCNClsFindableMinusFound = 0; /// difference between foundable and found clusters Char_t fTPCNClsFindableMinusCrossedRows = 0; /// difference between foundable clsuters and crossed rows - UChar_t fTPCNClsShared = 0u; /// Number of shared clusters - UChar_t fTRDPattern = 0u; /// Bit 0-5 if tracklet from TRD layer used for this track + UChar_t fTPCNClsShared = 0u; /// Number of shared clusters + UChar_t fTRDPattern = 0u; /// Bit 0-5 if tracklet from TRD layer used for this track // Chi2 Float_t fITSChi2NCl = -999.f; /// chi2/Ncl ITS @@ -148,141 +288,397 @@ struct { Float_t fTOFChi2 = -999.f; /// chi2 TOF match (?) // PID - Float_t fTPCSignal = -999.f; /// dE/dX TPC - Float_t fTRDSignal = -999.f; /// dE/dX TRD - Float_t fTOFSignal = -999.f; /// TOFsignal - Float_t fLength = -999.f; /// Int.Lenght @ TOF - Float_t fTOFExpMom = -999.f; /// TOF Expected momentum based on the expected time of pions -} mytracks; //! structure to keep track information - -TTree* MakeTreeO2track() + Float_t fTPCSignal = -999.f; /// dE/dX TPC + Float_t fTRDSignal = -999.f; /// dE/dX TRD + Float_t fTOFSignal = -999.f; /// TOFsignal + Float_t fTrackTime = -999.f; /// fTrackTime + Float_t fTrackTimeRes = -999.f; /// fTrackTimeRes + Float_t fLength = -999.f; /// Int.Lenght @ TOF + Float_t fTOFExpMom = -999.f; /// TOF Expected momentum based on the expected time of pions + + // Track extrapolation to EMCAL surface + Float_t fTrackEtaEMCAL = -999.f; /// Track eta at the EMCAL surface + Float_t fTrackPhiEMCAL = -999.f; /// Track phi at the EMCAL surface +} aod_track; //! structure to keep track information + +void MakeTreeO2track() { - TTree* fTracks = new TTree("O2track", "Barrel tracks"); - fTracks->Branch("fCollisionsID", &mytracks.fCollisionsID, "fCollisionsID/I"); - fTracks->Branch("fTrackType", &mytracks.fTrackType, "fTrackType/b"); - // fTracks->Branch("fTOFclsIndex", &mytracks.fTOFclsIndex, "fTOFclsIndex/I"); - // fTracks->Branch("fNTOFcls", &mytracks.fNTOFcls, "fNTOFcls/I"); - fTracks->Branch("fX", &mytracks.fX, "fX/F"); - fTracks->Branch("fAlpha", &mytracks.fAlpha, "fAlpha/F"); - fTracks->Branch("fY", &mytracks.fY, "fY/F"); - fTracks->Branch("fZ", &mytracks.fZ, "fZ/F"); - fTracks->Branch("fSnp", &mytracks.fSnp, "fSnp/F"); - fTracks->Branch("fTgl", &mytracks.fTgl, "fTgl/F"); - fTracks->Branch("fSigned1Pt", &mytracks.fSigned1Pt, "fSigned1Pt/F"); + TTree* tTracks = CreateTree(kTracks); + tTracks->Branch("fIndexCollisions", &aod_track.fIndexCollisions, "fIndexCollisions/I"); + tTracks->Branch("fTrackType", &aod_track.fTrackType, "fTrackType/b"); + tTracks->Branch("fX", &aod_track.fX, "fX/F"); + tTracks->Branch("fAlpha", &aod_track.fAlpha, "fAlpha/F"); + tTracks->Branch("fY", &aod_track.fY, "fY/F"); + tTracks->Branch("fZ", &aod_track.fZ, "fZ/F"); + tTracks->Branch("fSnp", &aod_track.fSnp, "fSnp/F"); + tTracks->Branch("fTgl", &aod_track.fTgl, "fTgl/F"); + tTracks->Branch("fSigned1Pt", &aod_track.fSigned1Pt, "fSigned1Pt/F"); + tTracks->SetBasketSize("*", fBasketSizeTracks); +} + +void MakeTreeO2trackCov() +{ + TTree* tTracksCov = CreateTree(kTracksCov); // Modified covariance matrix - fTracks->Branch("fSigmaY", &mytracks.fSigmaY, "fSigmaY/F"); - fTracks->Branch("fSigmaZ", &mytracks.fSigmaZ, "fSigmaZ/F"); - fTracks->Branch("fSigmaSnp", &mytracks.fSigmaSnp, "fSigmaSnp/F"); - fTracks->Branch("fSigmaTgl", &mytracks.fSigmaTgl, "fSigmaTgl/F"); - fTracks->Branch("fSigma1Pt", &mytracks.fSigma1Pt, "fSigma1Pt/F"); - fTracks->Branch("fRhoZY", &mytracks.fRhoZY, "fRhoZY/B"); - fTracks->Branch("fRhoSnpY", &mytracks.fRhoSnpY, "fRhoSnpY/B"); - fTracks->Branch("fRhoSnpZ", &mytracks.fRhoSnpZ, "fRhoSnpZ/B"); - fTracks->Branch("fRhoTglY", &mytracks.fRhoTglY, "fRhoTglY/B"); - fTracks->Branch("fRhoTglZ", &mytracks.fRhoTglZ, "fRhoTglZ/B"); - fTracks->Branch("fRhoTglSnp", &mytracks.fRhoTglSnp, "fRhoTglSnp/B"); - fTracks->Branch("fRho1PtY", &mytracks.fRho1PtY, "fRho1PtY/B"); - fTracks->Branch("fRho1PtZ", &mytracks.fRho1PtZ, "fRho1PtZ/B"); - fTracks->Branch("fRho1PtSnp", &mytracks.fRho1PtSnp, "fRho1PtSnp/B"); - fTracks->Branch("fRho1PtTgl", &mytracks.fRho1PtTgl, "fRho1PtTgl/B"); - // - fTracks->Branch("fTPCInnerParam", &mytracks.fTPCinnerP, "fTPCInnerParam/F"); - fTracks->Branch("fFlags", &mytracks.fFlags, "fFlags/l"); - fTracks->Branch("fITSClusterMap", &mytracks.fITSClusterMap, "fITSClusterMap/b"); - fTracks->Branch("fTPCNClsFindable", &mytracks.fTPCNClsFindable, "fTPCNClsFindable/b"); - fTracks->Branch("fTPCNClsFindableMinusFound",&mytracks.fTPCNClsFindableMinusFound, "fTPCNClsFindableMinusFound/B"); - fTracks->Branch("fTPCNClsFindableMinusCrossedRows", &mytracks.fTPCNClsFindableMinusCrossedRows, "fTPCNClsFindableMinusCrossedRows/B"); - fTracks->Branch("fTPCNClsShared", &mytracks.fTPCNClsShared, "fTPCNClsShared/b"); - fTracks->Branch("fTRDPattern", &mytracks.fTRDPattern, "fTRDPattern/b"); - fTracks->Branch("fITSChi2NCl", &mytracks.fITSChi2NCl, "fITSChi2NCl/F"); - fTracks->Branch("fTPCChi2NCl", &mytracks.fTPCChi2NCl, "fTPCChi2NCl/F"); - fTracks->Branch("fTRDChi2", &mytracks.fTRDChi2, "fTRDChi2/F"); - fTracks->Branch("fTOFChi2", &mytracks.fTOFChi2, "fTOFChi2/F"); - fTracks->Branch("fTPCSignal", &mytracks.fTPCSignal, "fTPCSignal/F"); - fTracks->Branch("fTRDSignal", &mytracks.fTRDSignal, "fTRDSignal/F"); - fTracks->Branch("fTOFSignal", &mytracks.fTOFSignal, "fTOFSignal/F"); - fTracks->Branch("fLength", &mytracks.fLength, "fLength/F"); - fTracks->Branch("fTOFExpMom", &mytracks.fTOFExpMom, "fTOFExpMom/F"); - return fTracks; + tTracksCov->Branch("fSigmaY", &aod_track.fSigmaY, "fSigmaY/F"); + tTracksCov->Branch("fSigmaZ", &aod_track.fSigmaZ, "fSigmaZ/F"); + tTracksCov->Branch("fSigmaSnp", &aod_track.fSigmaSnp, "fSigmaSnp/F"); + tTracksCov->Branch("fSigmaTgl", &aod_track.fSigmaTgl, "fSigmaTgl/F"); + tTracksCov->Branch("fSigma1Pt", &aod_track.fSigma1Pt, "fSigma1Pt/F"); + tTracksCov->Branch("fRhoZY", &aod_track.fRhoZY, "fRhoZY/B"); + tTracksCov->Branch("fRhoSnpY", &aod_track.fRhoSnpY, "fRhoSnpY/B"); + tTracksCov->Branch("fRhoSnpZ", &aod_track.fRhoSnpZ, "fRhoSnpZ/B"); + tTracksCov->Branch("fRhoTglY", &aod_track.fRhoTglY, "fRhoTglY/B"); + tTracksCov->Branch("fRhoTglZ", &aod_track.fRhoTglZ, "fRhoTglZ/B"); + tTracksCov->Branch("fRhoTglSnp", &aod_track.fRhoTglSnp, "fRhoTglSnp/B"); + tTracksCov->Branch("fRho1PtY", &aod_track.fRho1PtY, "fRho1PtY/B"); + tTracksCov->Branch("fRho1PtZ", &aod_track.fRho1PtZ, "fRho1PtZ/B"); + tTracksCov->Branch("fRho1PtSnp", &aod_track.fRho1PtSnp, "fRho1PtSnp/B"); + tTracksCov->Branch("fRho1PtTgl", &aod_track.fRho1PtTgl, "fRho1PtTgl/B"); + tTracksCov->SetBasketSize("*", fBasketSizeTracks); +} + +void MakeTreeO2trackExtra() +{ + TTree* tTracksExtra = CreateTree(kTracksExtra); + //Extra + tTracksExtra->Branch("fTPCInnerParam", &aod_track.fTPCinnerP, "fTPCInnerParam/F"); + tTracksExtra->Branch("fFlags", &aod_track.fFlags, "fFlags/i"); + tTracksExtra->Branch("fITSClusterMap", &aod_track.fITSClusterMap, "fITSClusterMap/b"); + tTracksExtra->Branch("fTPCNClsFindable", &aod_track.fTPCNClsFindable, "fTPCNClsFindable/b"); + tTracksExtra->Branch("fTPCNClsFindableMinusFound", &aod_track.fTPCNClsFindableMinusFound, "fTPCNClsFindableMinusFound/B"); + tTracksExtra->Branch("fTPCNClsFindableMinusCrossedRows", &aod_track.fTPCNClsFindableMinusCrossedRows, "fTPCNClsFindableMinusCrossedRows/B"); + tTracksExtra->Branch("fTPCNClsShared", &aod_track.fTPCNClsShared, "fTPCNClsShared/b"); + tTracksExtra->Branch("fTRDPattern", &aod_track.fTRDPattern, "fTRDPattern/b"); + tTracksExtra->Branch("fITSChi2NCl", &aod_track.fITSChi2NCl, "fITSChi2NCl/F"); + tTracksExtra->Branch("fTPCChi2NCl", &aod_track.fTPCChi2NCl, "fTPCChi2NCl/F"); + tTracksExtra->Branch("fTRDChi2", &aod_track.fTRDChi2, "fTRDChi2/F"); + tTracksExtra->Branch("fTOFChi2", &aod_track.fTOFChi2, "fTOFChi2/F"); + tTracksExtra->Branch("fTPCSignal", &aod_track.fTPCSignal, "fTPCSignal/F"); + tTracksExtra->Branch("fTRDSignal", &aod_track.fTRDSignal, "fTRDSignal/F"); + tTracksExtra->Branch("fTOFSignal", &aod_track.fTOFSignal, "fTOFSignal/F"); + tTracksExtra->Branch("fTrackTime", &aod_track.fTrackTime, "fTrackTime/F"); + tTracksExtra->Branch("fTrackTimeRes", &aod_track.fTrackTimeRes, "fTrackTimeRes/F"); + tTracksExtra->Branch("fLength", &aod_track.fLength, "fLength/F"); + tTracksExtra->Branch("fTOFExpMom", &aod_track.fTOFExpMom, "fTOFExpMom/F"); + tTracksExtra->Branch("fTrackEtaEMCAL", &aod_track.fTrackEtaEMCAL, "fTrackEtaEMCAL/F"); + tTracksExtra->Branch("fTrackPhiEMCAL", &aod_track.fTrackPhiEMCAL, "fTrackPhiEMCAL/F"); + tTracksExtra->SetBasketSize("*", fBasketSizeTracks); +} + +struct { + // RICH data + Int_t fIndexCollisions = -1; /// Collision ID + Int_t fIndexTracks = -1; /// Track ID + + Float_t fRICHSignal = -999.f; /// RICH signal + Float_t fRICHSignalError = -999.f; /// RICH signal error + Float_t fRICHDeltaEl = -999.f; /// Delta for El + Float_t fRICHDeltaMu = -999.f; /// Delta for Mu + Float_t fRICHDeltaPi = -999.f; /// Delta for Pi + Float_t fRICHDeltaKa = -999.f; /// Delta for Ka + Float_t fRICHDeltaPr = -999.f; /// Delta for Pr + Float_t fRICHNsigmaEl = -999.f; /// Nsigma for El + Float_t fRICHNsigmaMu = -999.f; /// Nsigma for Mu + Float_t fRICHNsigmaPi = -999.f; /// Nsigma for Pi + Float_t fRICHNsigmaKa = -999.f; /// Nsigma for Ka + Float_t fRICHNsigmaPr = -999.f; /// Nsigma for Pr +} rich, frich; //! structure to keep RICH info + +void MakeTreeO2rich() +{ + TTree* tRICH = CreateTree(kRICH); + tRICH->Branch("fIndexCollisions", &rich.fIndexCollisions, "fIndexCollisions/I"); + tRICH->Branch("fIndexTracks", &rich.fIndexTracks, "fIndexTracks/I"); + tRICH->Branch("fRICHSignal", &rich.fRICHSignal, "fRICHSignal/F"); + tRICH->Branch("fRICHSignalError", &rich.fRICHSignalError, "fRICHSignalError/F"); + tRICH->Branch("fRICHDeltaEl", &rich.fRICHDeltaEl, "fRICHDeltaEl/F"); + tRICH->Branch("fRICHDeltaMu", &rich.fRICHDeltaMu, "fRICHDeltaMu/F"); + tRICH->Branch("fRICHDeltaPi", &rich.fRICHDeltaPi, "fRICHDeltaPi/F"); + tRICH->Branch("fRICHDeltaKa", &rich.fRICHDeltaKa, "fRICHDeltaKa/F"); + tRICH->Branch("fRICHDeltaPr", &rich.fRICHDeltaPr, "fRICHDeltaPr/F"); + tRICH->Branch("fRICHNsigmaEl", &rich.fRICHNsigmaEl, "fRICHNsigmaEl/F"); + tRICH->Branch("fRICHNsigmaMu", &rich.fRICHNsigmaMu, "fRICHNsigmaMu/F"); + tRICH->Branch("fRICHNsigmaPi", &rich.fRICHNsigmaPi, "fRICHNsigmaPi/F"); + tRICH->Branch("fRICHNsigmaKa", &rich.fRICHNsigmaKa, "fRICHNsigmaKa/F"); + tRICH->Branch("fRICHNsigmaPr", &rich.fRICHNsigmaPr, "fRICHNsigmaPr/F"); + tRICH->SetBasketSize("*", fBasketSizeTracks); +} + +void MakeTreeO2frich() +{ + TTree* tFRICH = CreateTree(kFRICH); + tFRICH->Branch("fIndexCollisions", &frich.fIndexCollisions, "fIndexCollisions/I"); + tFRICH->Branch("fIndexTracks", &frich.fIndexTracks, "fIndexTracks/I"); + tFRICH->Branch("fFRICHSignal", &frich.fRICHSignal, "fFRICHSignal/F"); + tFRICH->Branch("fFRICHSignalError", &frich.fRICHSignalError, "fFRICHSignalError/F"); + tFRICH->Branch("fFRICHDeltaEl", &frich.fRICHDeltaEl, "fFRICHDeltaEl/F"); + tFRICH->Branch("fFRICHDeltaMu", &frich.fRICHDeltaMu, "fFRICHDeltaMu/F"); + tFRICH->Branch("fFRICHDeltaPi", &frich.fRICHDeltaPi, "fFRICHDeltaPi/F"); + tFRICH->Branch("fFRICHDeltaKa", &frich.fRICHDeltaKa, "fFRICHDeltaKa/F"); + tFRICH->Branch("fFRICHDeltaPr", &frich.fRICHDeltaPr, "fFRICHDeltaPr/F"); + tFRICH->Branch("fFRICHNsigmaEl", &frich.fRICHNsigmaEl, "fFRICHNsigmaEl/F"); + tFRICH->Branch("fFRICHNsigmaMu", &frich.fRICHNsigmaMu, "fFRICHNsigmaMu/F"); + tFRICH->Branch("fFRICHNsigmaPi", &frich.fRICHNsigmaPi, "fFRICHNsigmaPi/F"); + tFRICH->Branch("fFRICHNsigmaKa", &frich.fRICHNsigmaKa, "fFRICHNsigmaKa/F"); + tFRICH->Branch("fFRICHNsigmaPr", &frich.fRICHNsigmaPr, "fFRICHNsigmaPr/F"); + tFRICH->SetBasketSize("*", fBasketSizeTracks); +} + + + +struct { + // MID data + Int_t fIndexCollisions = -1; /// Collision ID + Int_t fIndexTracks = -1; /// Track ID + Bool_t fMIDIsMuon = kFALSE; /// MID response for the muon hypothesis +} mid; //! structure to keep MID info + +void MakeTreeO2mid() +{ + TTree* tMID = CreateTree(kMID); + tMID->Branch("fIndexCollisions", &mid.fIndexCollisions, "fIndexCollisions/I"); + tMID->Branch("fIndexTracks", &mid.fIndexTracks, "fIndexTracks/I"); + tMID->Branch("fMIDIsMuon", &mid.fMIDIsMuon, "fMIDIsMuon/b"); + tMID->SetBasketSize("*", fBasketSizeTracks); +} + +struct { + // Forward TOF data + Int_t fIndexCollisions = -1; /// Collision ID + Int_t fIndexTracks = -1; /// Track ID + + Float_t fFTOFLength = -999.f; /// Forward TOF signal + Float_t fFTOFSignal = -999.f; /// Forward TOF signal + Float_t fFTOFDeltaEl = -999.f; /// Delta for El + Float_t fFTOFDeltaMu = -999.f; /// Delta for Mu + Float_t fFTOFDeltaPi = -999.f; /// Delta for Pi + Float_t fFTOFDeltaKa = -999.f; /// Delta for Ka + Float_t fFTOFDeltaPr = -999.f; /// Delta for Pr + Float_t fFTOFNsigmaEl = -999.f; /// Nsigma for El + Float_t fFTOFNsigmaMu = -999.f; /// Nsigma for Mu + Float_t fFTOFNsigmaPi = -999.f; /// Nsigma for Pi + Float_t fFTOFNsigmaKa = -999.f; /// Nsigma for Ka + Float_t fFTOFNsigmaPr = -999.f; /// Nsigma for Pr +} ftof; //! structure to keep Forward TOF info + +void MakeTreeO2ftof() +{ + TTree* tFTOF = CreateTree(kFTOF); + tFTOF->Branch("fIndexCollisions", &ftof.fIndexCollisions, "fIndexCollisions/I"); + tFTOF->Branch("fIndexTracks", &ftof.fIndexTracks, "fIndexTracks/I"); + tFTOF->Branch("fFTOFLength", &ftof.fFTOFLength, "fFTOFLength/F"); + tFTOF->Branch("fFTOFSignal", &ftof.fFTOFSignal, "fFTOFSignal/F"); + tFTOF->Branch("fFTOFDeltaEl", &ftof.fFTOFDeltaEl, "fFTOFDeltaEl/F"); + tFTOF->Branch("fFTOFDeltaMu", &ftof.fFTOFDeltaMu, "fFTOFDeltaMu/F"); + tFTOF->Branch("fFTOFDeltaPi", &ftof.fFTOFDeltaPi, "fFTOFDeltaPi/F"); + tFTOF->Branch("fFTOFDeltaKa", &ftof.fFTOFDeltaKa, "fFTOFDeltaKa/F"); + tFTOF->Branch("fFTOFDeltaPr", &ftof.fFTOFDeltaPr, "fFTOFDeltaPr/F"); + tFTOF->Branch("fFTOFNsigmaEl", &ftof.fFTOFNsigmaEl, "fFTOFNsigmaEl/F"); + tFTOF->Branch("fFTOFNsigmaMu", &ftof.fFTOFNsigmaMu, "fFTOFNsigmaMu/F"); + tFTOF->Branch("fFTOFNsigmaPi", &ftof.fFTOFNsigmaPi, "fFTOFNsigmaPi/F"); + tFTOF->Branch("fFTOFNsigmaKa", &ftof.fFTOFNsigmaKa, "fFTOFNsigmaKa/F"); + tFTOF->Branch("fFTOFNsigmaPr", &ftof.fFTOFNsigmaPr, "fFTOFNsigmaPr/F"); + tFTOF->SetBasketSize("*", fBasketSizeTracks); +} + +struct { + // ALICE3 PhotonConversion + Int_t fIndexCollisions = -1; /// Collision ID + Int_t fIndexMcParticles = -1; /// Particle ID + Int_t fIndexTracks = -1; /// Track ID + + Float_t fPx = -999.f; /// x component of momentum + Float_t fPy = -999.f; /// y component of momentum + Float_t fPz = -999.f; /// z component of momentum + +} photon; //! structure to keep PhotonConversion info + +void MakeTreeO2photon() +{ + TTree* tPhoton = CreateTree(kA3Photon); + tPhoton->Branch("fIndexCollisions", &photon.fIndexCollisions, "fIndexCollisions/I"); + tPhoton->Branch("fIndexMcParticles", &photon.fIndexMcParticles, "fIndexMcParticles/I"); + tPhoton->Branch("fIndexTracks", &photon.fIndexTracks, "fIndexTracks/I"); + tPhoton->Branch("fPx", &photon.fPx, "fPx/F"); + tPhoton->Branch("fPy", &photon.fPy, "fPy/F"); + tPhoton->Branch("fPz", &photon.fPz, "fPz/F"); + tPhoton->SetBasketSize("*", fBasketSizeTracks); +} + +struct { + // ALICE3 ECAL data + Int_t fIndexCollisions = -1; /// Collision ID + Int_t fIndexMcParticles = -1; /// Particle ID + Int_t fIndexTracks = -1; /// Track ID + + Double_t fPx = -1.e10; /// px + Double_t fPy = -1.e10; /// py + Double_t fPz = -1.e10; /// pz + Double_t fE = -1.e10; /// E + Float_t fPosZ = -999.f; /// Position in Z + Float_t fPosPhi = -999.f; /// Position in phi +} ecal; //! structure to keep ECAL info + +void MakeTreeO2ecal() +{ + TTree* tECAL = CreateTree(kA3ECAL); + tECAL->Branch("fIndexCollisions", &ecal.fIndexCollisions, "fIndexCollisions/I"); + tECAL->Branch("fIndexMcParticles", &ecal.fIndexMcParticles, "fIndexMcParticles/I"); + tECAL->Branch("fIndexTracks", &ecal.fIndexTracks, "fIndexTracks/I"); + tECAL->Branch("fPx", &ecal.fPx, "fPx/D"); + tECAL->Branch("fPy", &ecal.fPy, "fPy/D"); + tECAL->Branch("fPz", &ecal.fPz, "fPz/D"); + tECAL->Branch("fE", &ecal.fE, "fE/D"); + tECAL->Branch("fPosZ", &ecal.fPosZ, "fPosZ/F"); + tECAL->Branch("fPosPhi", &ecal.fPosPhi, "fPosPhi/F"); + tECAL->SetBasketSize("*", fBasketSizeTracks); } struct { // MC particle - Int_t fMcCollisionsID = -1; /// The index of the MC collision vertex + Int_t fIndexMcCollisions = -1; /// The index of the MC collision vertex // MC information (modified version of TParticle - Int_t fPdgCode = -99999; /// PDG code of the particle - Int_t fStatusCode = -99999; /// generation status code - uint8_t fFlags = 0; /// See enum MCParticleFlags - Int_t fMother0 = 0; /// Indices of the mother particles - Int_t fMother1 = 0; - Int_t fDaughter0 = 0; /// Indices of the daughter particles - Int_t fDaughter1 = 0; - Float_t fWeight = 1; /// particle weight from the generator or ML + Int_t fPdgCode = -99999; /// PDG code of the particle + Int_t fStatusCode = -99999; /// generation status code + uint8_t fFlags = 0; /// See enum MCParticleFlags + Int_t fIndexMcParticles_Mother0 = 0; /// Indices of the mother particles + Int_t fIndexMcParticles_Mother1 = 0; + Int_t fIndexMcParticles_Daughter0 = 0; /// Indices of the daughter particles + Int_t fIndexMcParticles_Daughter1 = 0; + Float_t fWeight = 1; /// particle weight from the generator or ML Float_t fPx = -999.f; /// x component of momentum Float_t fPy = -999.f; /// y component of momentum Float_t fPz = -999.f; /// z component of momentum - Float_t fE = -999.f; /// Energy (covers the case of resonances, no need for calculated mass) + Float_t fE = -999.f; /// Energy (covers the case of resonances, no need for calculated mass) Float_t fVx = -999.f; /// x of production vertex Float_t fVy = -999.f; /// y of production vertex Float_t fVz = -999.f; /// z of production vertex Float_t fVt = -999.f; /// t of production vertex // We do not use the polarisation so far -} mcparticle; //! MC particles from the kinematics tree +} mcparticle; //! MC particles from the kinematics tree -TTree* MakeTreeO2mcparticle() +void MakeTreeO2mcparticle() { - TTree* tKinematics = new TTree("O2mcparticle", "Kinematics"); - tKinematics->Branch("fMcCollisionsID", &mcparticle.fMcCollisionsID, "fMcCollisionsID/I"); - tKinematics->Branch("fPdgCode", &mcparticle.fPdgCode, "fPdgCode/I"); - tKinematics->Branch("fStatusCode", &mcparticle.fStatusCode, "fStatusCode/I"); - tKinematics->Branch("fFlags", &mcparticle.fFlags, "fFlags/b"); - tKinematics->Branch("fMother0", &mcparticle.fMother0, "fMother0/I"); - tKinematics->Branch("fMother1", &mcparticle.fMother1, "fMother1/I"); - tKinematics->Branch("fDaughter0", &mcparticle.fDaughter0, "fDaughter0/I"); - tKinematics->Branch("fDaughter1", &mcparticle.fDaughter1, "fDaughter1/I"); - tKinematics->Branch("fWeight", &mcparticle.fWeight, "fWeight/F"); - tKinematics->Branch("fPx", &mcparticle.fPx, "fPx/F"); - tKinematics->Branch("fPy", &mcparticle.fPy, "fPy/F"); - tKinematics->Branch("fPz", &mcparticle.fPz, "fPz/F"); - tKinematics->Branch("fE", &mcparticle.fE, "fE/F"); - tKinematics->Branch("fVx", &mcparticle.fVx, "fVx/F"); - tKinematics->Branch("fVy", &mcparticle.fVy, "fVy/F"); - tKinematics->Branch("fVz", &mcparticle.fVz, "fVz/F"); - tKinematics->Branch("fVt", &mcparticle.fVt, "fVt/F"); - return tKinematics; + TTree* Kinematics = CreateTree(kMcParticle); + Kinematics->Branch("fIndexMcCollisions", &mcparticle.fIndexMcCollisions, "fIndexMcCollisions/I"); + Kinematics->Branch("fPdgCode", &mcparticle.fPdgCode, "fPdgCode/I"); + Kinematics->Branch("fStatusCode", &mcparticle.fStatusCode, "fStatusCode/I"); + Kinematics->Branch("fFlags", &mcparticle.fFlags, "fFlags/b"); + Kinematics->Branch("fIndexMcParticles_Mother0", &mcparticle.fIndexMcParticles_Mother0, "fIndexMcParticles_Mother0/I"); + Kinematics->Branch("fIndexMcParticles_Mother1", &mcparticle.fIndexMcParticles_Mother1, "fIndexMcParticles_Mother1/I"); + Kinematics->Branch("fIndexMcParticles_Daughter0", &mcparticle.fIndexMcParticles_Daughter0, "fIndexMcParticles_Daughter0/I"); + Kinematics->Branch("fIndexMcParticles_Daughter1", &mcparticle.fIndexMcParticles_Daughter1, "fIndexMcParticles_Daughter1/I"); + Kinematics->Branch("fWeight", &mcparticle.fWeight, "fWeight/F"); + Kinematics->Branch("fPx", &mcparticle.fPx, "fPx/F"); + Kinematics->Branch("fPy", &mcparticle.fPy, "fPy/F"); + Kinematics->Branch("fPz", &mcparticle.fPz, "fPz/F"); + Kinematics->Branch("fE", &mcparticle.fE, "fE/F"); + Kinematics->Branch("fVx", &mcparticle.fVx, "fVx/F"); + Kinematics->Branch("fVy", &mcparticle.fVy, "fVy/F"); + Kinematics->Branch("fVz", &mcparticle.fVz, "fVz/F"); + Kinematics->Branch("fVt", &mcparticle.fVt, "fVt/F"); + Kinematics->SetBasketSize("*", fBasketSizeTracks); } struct { // Track label to find the corresponding MC particle - UInt_t fLabel = 0; /// Track label - UShort_t fLabelMask = 0; /// Bit mask to indicate detector mismatches (bit ON means mismatch) - /// Bit 0-6: mismatch at ITS layer - /// Bit 7-9: # of TPC mismatches in the ranges 0, 1, 2-3, 4-7, 8-15, 16-31, 32-63, >64 - /// Bit 10: TRD, bit 11: TOF, bit 15: negative label sign -} mctracklabel; //! Track labels - -TTree *MakeTreeO2mctracklabel() + Int_t fIndexMcParticles = 0; /// Track label + UShort_t fMcMask = 0; /// Bit mask to indicate detector mismatches (bit ON means mismatch) + /// Bit 0-6: mismatch at ITS layer + /// Bit 7-9: # of TPC mismatches in the ranges 0, 1, 2-3, 4-7, 8-15, 16-31, 32-63, >64 + /// Bit 10: TRD, bit 11: TOF, bit 15: negative label sign +} mctracklabel; //! Track labels + +void MakeTreeO2mctracklabel() { - TTree* tLabels = new TTree("O2mctracklabel", "MC track labels"); - tLabels->Branch("fLabel", &mctracklabel.fLabel, "fLabel/i"); - tLabels->Branch("fLabelMask", &mctracklabel.fLabelMask, "fLabelMask/s"); - return tLabels; + TTree* tLabels = CreateTree(kMcTrackLabel); + tLabels->Branch("fIndexMcParticles", &mctracklabel.fIndexMcParticles, "fIndexMcParticles/I"); + tLabels->Branch("fMcMask", &mctracklabel.fMcMask, "fMcMask/s"); + tLabels->SetBasketSize("*", fBasketSizeTracks); } + struct { // MC collision label - UInt_t fLabel = 0; /// Collision label - UShort_t fLabelMask = 0; /// Bit mask to indicate collision mismatches (bit ON means mismatch) - /// bit 15: negative label sign -} mccollisionlabel; //! Collision labels + Int_t fIndexMcCollisions = 0; /// Collision label + UShort_t fMcMask = 0; /// Bit mask to indicate collision mismatches (bit ON means mismatch) + /// bit 15: negative label sign +} mccollisionlabel; //! Collision labels + +void MakeTreeO2mccollisionlabel() +{ + TTree* tCollisionLabels = CreateTree(kMcCollisionLabel); + tCollisionLabels->Branch("fIndexMcCollisions", &mccollisionlabel.fIndexMcCollisions, "fIndexMcCollisions/I"); + tCollisionLabels->Branch("fMcMask", &mccollisionlabel.fMcMask, "fMcMask/s"); + tCollisionLabels->SetBasketSize("*", fBasketSizeEvents); +} -TTree* MakeTreeO2mccollisionlabel() +void FillTree(TreeIndex t) { - TTree* tCollisionLabels = new TTree("O2mccollisionlabel", "MC collision labels"); - tCollisionLabels->Branch("fLabel", &mccollisionlabel.fLabel, "fLabel/i"); - tCollisionLabels->Branch("fLabelMask", &mccollisionlabel.fLabelMask, "fLabelMask/s"); - return tCollisionLabels; + Trees[t]->Fill(); + eventextra.fNentries[t]++; +} + +// Class to hold the track information for the O2 vertexing +class TrackAlice3 : public o2::track::TrackParCov +{ + using TimeEst = o2::dataformats::TimeStampWithError; + + public: + TrackAlice3() = default; + ~TrackAlice3() = default; + TrackAlice3(const TrackAlice3& src) = default; + TrackAlice3(const o2::track::TrackParCov& src, const float t = 0, const float te = 1, const int label = 0) : o2::track::TrackParCov(src), timeEst{t, te}, mLabel{label} {} + const TimeEst& getTimeMUS() const { return timeEst; } + const int mLabel = 0; + const TimeEst timeEst = {}; ///< time estimate in ns +}; + +// Function to check if a particle is a secondary based on its history +template +bool IsSecondary(const T& particleTree, const int& index) +{ + auto particle = (GenParticle*)particleTree->At(index); + if (particle->M1 < 0) { + return false; + } + + auto mother = (GenParticle*)particleTree->At(particle->M1); + if (!mother) { + return false; + } + // Ancore di salvezza :) + if ((particle->M1 == particle->M2) && (particle->M1 == 0)) { + return false; + } + if (abs(mother->PID) <= 8) { + return false; + } + // 100% secondaries if true here + switch (abs(mother->PID)) { + // K0S + case 310: + // Lambda + case 3122: + // Sigma0 + case 3212: + // Sigma- + case 3112: + // Sigma+ + case 3222: + // Xi- + case 3312: + // Xi0 + case 3322: + // Omega- + case 3334: + return true; + break; + } + + return IsSecondary(particleTree, particle->M1); } diff --git a/examples/aod/inspectTOF.C b/examples/aod/inspectTOF.C new file mode 100644 index 0000000..3aa0857 --- /dev/null +++ b/examples/aod/inspectTOF.C @@ -0,0 +1,91 @@ +R__LOAD_LIBRARY(libDelphes) +R__LOAD_LIBRARY(libDelphesO2) + +#include "createO2tables.h" + +using O2Track = o2::track::TrackParCov; + +void inspectTOF(const char* filename) +{ + auto fin = TFile::Open(filename); + + auto tcollision = (TTree*)fin->Get("O2collision"); + auto ncollision = tcollision->GetEntries(); + ConnectTreeO2collision(tcollision); + + auto ttracks = (TTree*)fin->Get("O2track"); + auto ntracks = ttracks->GetEntries(); + ConnectTreeO2track(ttracks); + + /** histograms **/ + auto hTZero = new TH1F("hTZero", ";t_{0} (ns)", 1000, -1., 1.); + auto hBetaP = new TH2F("hBetaP", ";#it{p} (GeV/#it{c});#it{v}/#it{c}", 500., 0., 5., 500, 0.1, 1.1); + auto hNsigmaP = new TH2F("hNsigmaP", ";#it{p} (GeV/#it{c});n#sigma_{K}", 500., 0., 5., 500, -100., 100.); + + /** loop over collisions **/ + for (int icollision = 0; icollision < ncollision; ++icollision) { + tcollision->GetEntry(icollision); + auto t0 = collision.fCollisionTime; + auto t0e = collision.fCollisionTimeRes; + hTZero->Fill(t0 * 0.001); + } + + /** loop over tracks **/ + for (int itrack = 0; itrack < ntracks; ++itrack) { + + /** get track **/ + ttracks->GetEntry(itrack); + + /** get collision and start time **/ + auto icollision = mytracks.fCollisionsID; + tcollision->GetEntry(icollision); + auto t0 = collision.fCollisionTime; + auto t0e = collision.fCollisionTimeRes; + + /** create o2 track **/ + O2Track o2track; + o2track.setX(mytracks.fX); + o2track.setAlpha(mytracks.fAlpha); + o2track.setY(mytracks.fY); + o2track.setZ(mytracks.fZ); + o2track.setSnp(mytracks.fSnp); + o2track.setTgl(mytracks.fTgl); + o2track.setQ2Pt(mytracks.fSigned1Pt); + + /** get track information **/ + auto p = o2track.getP(); + auto p2 = p * p; + auto pe = p * mytracks.fSigma1Pt; // this is wrong, needs to add pz contribution + auto d0 = mytracks.fY; + auto d0e = mytracks.fSigmaY; + + /** 3-sigma DCA cut on primaries **/ + if (fabs(d0 / d0e) > 1.) + continue; + + /** get TOF information **/ + auto L = mytracks.fLength; + auto t = mytracks.fTOFSignal; + auto tof = t - t0; + auto beta = L / tof / 0.029979246; + auto mass = 0.49367700; + auto mass2 = mass * mass; + auto texp = L / p / 0.029979246 * hypot(mass, p); + auto texpe = L / 0.029979246 / p / p * mass * mass * hypot(mass, p) * pe; + auto sigma = hypot(hypot(20., t0e), texpe); + auto nsigma = (tof - texp) / sigma; + + hBetaP->Fill(p, beta); + hNsigmaP->Fill(p, nsigma); + } + + /** write output **/ + auto fout = TFile::Open(std::string("inspectTOF." + std::string(filename)).c_str(), "RECREATE"); + hTZero->Write(); + hBetaP->Write(); + hNsigmaP->Write(); + fout->Close(); + + /** close input **/ + fin->Close(); +} diff --git a/examples/aod/muonAccEffPID.root b/examples/aod/muonAccEffPID.root new file mode 100644 index 0000000..4d83486 Binary files /dev/null and b/examples/aod/muonAccEffPID.root differ diff --git a/examples/cards/propagate.2kG.photons.tcl b/examples/cards/propagate.2kG.photons.tcl new file mode 100644 index 0000000..dabe33a --- /dev/null +++ b/examples/cards/propagate.2kG.photons.tcl @@ -0,0 +1,71 @@ +set barrel_Bz 0.2 +set barrel_Radius 100.e-2 +set barrel_HalfLength 200.e-2 +set barrel_TimeResolution 0.020e-9 +set barrel_Acceptance { 1.0 + 1.0 * fabs(eta) < 1.443 } + +set ExecutionPath { + + ParticlePropagator + + Merger + Acceptance + DecayFilter + TimeSmearing + + Acceptance_neutral + + TreeWriter +} + +# module Module Name +module ParticlePropagator ParticlePropagator { + set InputArray Delphes/stableParticles + set OutputArray stableParticles + set ChargedHadronOutputArray chargedHadrons + set ElectronOutputArray electrons + set MuonOutputArray muons + set NeutralOutputArray neutral + + set Bz $barrel_Bz + set Radius $barrel_Radius + set HalfLength $barrel_HalfLength +} + +module Merger Merger { + add InputArray ParticlePropagator/chargedHadrons + add InputArray ParticlePropagator/electrons + add InputArray ParticlePropagator/muons + set OutputArray tracks +} + +module Efficiency Acceptance { + add InputArray Merger/tracks + add OutputArray tracks + set EfficiencyFormula $barrel_Acceptance +} + +module Efficiency Acceptance_neutral { + add InputArray ParticlePropagator/neutral + add OutputArray neutral + set EfficiencyFormula $barrel_Acceptance +} + +module DecayFilter DecayFilter { + set InputArray Acceptance/tracks + set OutputArray tracks +} + +module TimeSmearing TimeSmearing { + add InputArray DecayFilter/tracks + add OutputArray tracks + set TimeResolution $barrel_TimeResolution +} + +module TreeWriter TreeWriter { + # add Branch InputArray BranchName BranchClass + add Branch Delphes/allParticles Particle GenParticle + add Branch TimeSmearing/tracks Track Track + add Branch Acceptance_neutral/neutral Neutral Track +} + diff --git a/examples/cards/propagate.2kG.tails.tcl b/examples/cards/propagate.2kG.tails.tcl new file mode 100644 index 0000000..71a482e --- /dev/null +++ b/examples/cards/propagate.2kG.tails.tcl @@ -0,0 +1,62 @@ +set barrel_Bz 0.2 +set barrel_Radius 100.e-2 +set barrel_HalfLength 200.e-2 +set barrel_TimeResolution 0.020e-9 +set barrel_TailRight 1.0 +set barrel_TailLeft 1.0 +set barrel_Acceptance { 0.0 + 1.0 * fabs(eta) < 1.443 } + +set ExecutionPath { + ParticlePropagator + Merger + Acceptance + DecayFilter + TimeSmearingTail + TreeWriter +} + +# module Module Name +module ParticlePropagator ParticlePropagator { + set InputArray Delphes/stableParticles + set OutputArray stableParticles + set ChargedHadronOutputArray chargedHadrons + set ElectronOutputArray electrons + set MuonOutputArray muons + + set Bz $barrel_Bz + set Radius $barrel_Radius + set HalfLength $barrel_HalfLength +} + +module Merger Merger { + add InputArray ParticlePropagator/chargedHadrons + add InputArray ParticlePropagator/electrons + add InputArray ParticlePropagator/muons + set OutputArray tracks +} + +module Efficiency Acceptance { + add InputArray Merger/tracks + add OutputArray tracks + set EfficiencyFormula $barrel_Acceptance +} + +module DecayFilter DecayFilter { + set InputArray Acceptance/tracks + set OutputArray tracks +} + +module TimeSmearingTail TimeSmearingTail { + add InputArray DecayFilter/tracks + add OutputArray tracks + set TimeResolution $barrel_TimeResolution + set TailRight $barrel_TailRight + set TailLeft $barrel_TailLeft +} + +module TreeWriter TreeWriter { + # add Branch InputArray BranchName BranchClass + add Branch Delphes/allParticles Particle GenParticle + add Branch TimeSmearingTail/tracks Track Track +} + diff --git a/examples/cards/propagate.2kG.tcl b/examples/cards/propagate.2kG.tcl index 8e0a90e..308aa72 100644 --- a/examples/cards/propagate.2kG.tcl +++ b/examples/cards/propagate.2kG.tcl @@ -8,6 +8,7 @@ set ExecutionPath { ParticlePropagator Merger Acceptance + DecayFilter TimeSmearing TreeWriter } @@ -38,8 +39,13 @@ module Efficiency Acceptance { set EfficiencyFormula $barrel_Acceptance } +module DecayFilter DecayFilter { + set InputArray Acceptance/tracks + set OutputArray tracks +} + module TimeSmearing TimeSmearing { - add InputArray Acceptance/tracks + add InputArray DecayFilter/tracks add OutputArray tracks set TimeResolution $barrel_TimeResolution } diff --git a/examples/o2kine/run.sh b/examples/o2kine/run.sh new file mode 100755 index 0000000..41548a2 --- /dev/null +++ b/examples/o2kine/run.sh @@ -0,0 +1,38 @@ +#! /usr/bin/env bash + +MODULES="TRK" +MODULES="CAVE PIPE ITS TPC" +NEVENTS=100 +NWORKERS=4 + +## create the transport.C macro +cat < transport.C +o2::data::Stack::TransportFcn +transport() +{ + return [](const TParticle& p, const std::vector& particles) -> bool { + auto eta = p.Eta(); + if (std::fabs(eta) > 1.0) return false; + auto pdg = std::abs(p.GetPdgCode()); + if (pdg == 11) return false; + if (pdg == 13) return false; + if (pdg == 22) return false; + if (pdg == 211) return false; + if (pdg == 321) return false; + if (pdg == 2212) return false; + return true; + }; +} +EOF + +## create the bkg_config.ini file with the required specs +cat < config.ini +[Stack] +transportPrimary = external +transportPrimaryFileName = transport.C +transportPrimaryFuncName = transport() +transportPrimaryInvert = false +EOF + +o2-sim -j ${NWORKERS} -n ${NEVENTS} -g pythia8hi -m ${MODULES} --configFile config.ini + diff --git a/examples/o2kine/smear_o2_kine.C b/examples/o2kine/smear_o2_kine.C new file mode 100644 index 0000000..deb8246 --- /dev/null +++ b/examples/o2kine/smear_o2_kine.C @@ -0,0 +1,64 @@ +class SmearO2KineGenerator : public o2::eventgen::GeneratorFromO2Kine +{ + +public: + + SmearO2KineGenerator(const char *name) : GeneratorFromO2Kine(name) { }; + bool Init() override { + auto retval = o2::eventgen::GeneratorFromO2Kine::Init(); + setContinueMode(true); + return retval; }; + + // bool importParticles() override { + +protected: + + +}; + + +void +smear_o2_kine(const char *o2kinefilename) +{ + + o2::delphes::TrackSmearer smearer; + smearer.loadTable(11, "lutCovm.el.dat"); + smearer.loadTable(13, "lutCovm.mu.dat"); + smearer.loadTable(211, "lutCovm.pi.dat"); + smearer.loadTable(321, "lutCovm.ka.dat"); + smearer.loadTable(2212, "lutCovm.pr.dat"); + + auto gen = new SmearO2KineGenerator(o2kinefilename); + gen->Init(); + + // loop over events + while (gen->importParticles()) { + auto particles = gen->getParticles(); + + // loop over particles + for (auto & particle : particles) { + + // we did not transport them before, we do not smear them either + if (std::fabs(particle.Eta()) > 1.0) continue; + + // only particles to be transported, which are flagged as status code = 1 + // the particles that have been transported already have status code = 0 + if (particle.GetStatusCode() != 1) continue; + + // only particles that we know how to smear + // namely el, mu, pi, ka, pr + auto pdg = std::abs(particle.GetPdgCode()); + if (pdg != 11 && pdg != 13 && pdg != 211 && pdg != 321 && pdg != 2212) continue; + + // convert particle to o2 track and smear it + O2Track o2track; + o2::delphes::TrackUtils::convertTParticleToO2Track(particle, o2track); + float nch = 1600.; + o2track.print(); + if (!smearer.smearTrack(o2track, pdg, nch)) continue; + o2track.print(); + + } + } + +} diff --git a/examples/pythia8/decays/force_hadronic_B.cfg b/examples/pythia8/decays/force_hadronic_B.cfg new file mode 100644 index 0000000..266abf5 --- /dev/null +++ b/examples/pythia8/decays/force_hadronic_B.cfg @@ -0,0 +1,18 @@ +### Force golden B-hadron hadronic decay modes +### Based on AliRoot AliDecayerPythia8::ForceBeautyUpgrade, latest commit 3154cf8 on Nov 30, 2020 + +# Bs -> Ds- pi+ +531:onMode = off +531:onIfMatch = 431 211 +# Lb: 50% to Lc any, 50% to Lc pion +#FIXME: we should find a way to generate 50% of inclusive or Lc decays +#FIXME: currently all the Lb -> Lc + pi +5122:onMode = off +#5122:onIfAll = 4122 +5122:onIfMatch = 4122 211 + +511:onMode = off +511:onIfMatch = 413 211 + +521:onMode = off +521:onIfMatch = 421 211 diff --git a/examples/pythia8/pythia8_KrKr.cfg b/examples/pythia8/pythia8_KrKr.cfg new file mode 100644 index 0000000..85375f0 --- /dev/null +++ b/examples/pythia8/pythia8_KrKr.cfg @@ -0,0 +1,19 @@ +### main +Main:numberOfEvents 1000 + +### beams +Beams:idA 1000822080 # Pb +Beams:idB 1000822080 # Pb +1000360840:all 84Kr 84Krbar 10 108 0 83.798 +Beams:eCM 6460. # GeV + +### heavy-ion settings (valid for Kr-Kr 6460 only) +HeavyIon:SigFitNGen 0 +HeavyIon:SigFitDefPar 13.88,1.84,0.22,0.0,0.0,0.0,0.0,0.0 +HeavyIon:bWidth 11. # impact parameter from 0-x [fm] + +### processes (apparently not to be defined) + +### decays +#ParticleDecays:limitTau0 on +#ParticleDecays:tau0Max 10. diff --git a/examples/pythia8/pythia8_OO.cfg b/examples/pythia8/pythia8_OO.cfg new file mode 100644 index 0000000..352e93a --- /dev/null +++ b/examples/pythia8/pythia8_OO.cfg @@ -0,0 +1,18 @@ +### main +Main:numberOfEvents 1000 + +### beams +Beams:idA 1000080160 # O +Beams:idB 1000080160 # O +Beams:eCM 7000. # GeV + +### heavy-ion settings (valid for O-O 7000) +HeavyIon:SigFitNGen = 0 +HeavyIon:SigFitDefPar = 14.30,1.87,0.23,0.0,0.0,0.0,0.0,0.0 +#HeavyIon::6.72 + +### processes (apparently not to be defined) + +### decays +ParticleDecays:limitTau0 off +ParticleDecays:tau0Max = 10 diff --git a/examples/pythia8/pythia8_PbPb.cfg b/examples/pythia8/pythia8_PbPb.cfg new file mode 100644 index 0000000..12e7276 --- /dev/null +++ b/examples/pythia8/pythia8_PbPb.cfg @@ -0,0 +1,18 @@ +### main +Main:numberOfEvents 100 + +### beams +Beams:idA 1000822080 # Pb +Beams:idB 1000822080 # Pb +Beams:eCM 5520. # GeV + +### heavy-ion settings (valid for Pb-Pb 5520 only) +HeavyIon:SigFitNGen = 0 +HeavyIon:SigFitDefPar = 13.88,1.84,0.22,0.0,0.0,0.0,0.0,0.0 +#HeavyIon:bWidth = 14.48 + +### processes (apparently not to be defined) + +### decays +ParticleDecays:limitTau0 off +ParticleDecays:tau0Max = 10 diff --git a/examples/pythia8/pythia8_XeXe.cfg b/examples/pythia8/pythia8_XeXe.cfg new file mode 100644 index 0000000..9c23e26 --- /dev/null +++ b/examples/pythia8/pythia8_XeXe.cfg @@ -0,0 +1,18 @@ +### main +Main:numberOfEvents 100 + +### beams +Beams:idA 1000541290 # Xe +Beams:idB 1000541290 # Xe +Beams:eCM 5860. # GeV + +### heavy-ion settings (valid for Xe-Xe 5860 only) +HeavyIon:SigFitNGen = 0 +HeavyIon:SigFitDefPar = 13.85,1.82,0.22,0.0,0.0,0.0,0.0,0.0 +#HeavyIon:bWidth = 12.53 + +### processes (apparently not to be defined) + +### decays +ParticleDecays:limitTau0 off +ParticleDecays:tau0Max = 10 diff --git a/examples/pythia8/pythia8_bbbar.cfg b/examples/pythia8/pythia8_bbbar.cfg new file mode 100644 index 0000000..a447d50 --- /dev/null +++ b/examples/pythia8/pythia8_bbbar.cfg @@ -0,0 +1,17 @@ +### main +Main:numberOfEvents 10000 + +### service +Next:numberShowEvent = 0 + +### random +Random:setSeed = on +Random:seed = 123456789 + +### beams +Beams:idA 2212 # proton +Beams:idB 2212 # proton +Beams:eCM 14000. # GeV + +### processes +HardQCD:hardbbbar on # g-g / q-qbar -> b-bbar diff --git a/examples/pythia8/pythia_nuclei.cfg b/examples/pythia8/pythia_nuclei.cfg new file mode 100644 index 0000000..db4a140 --- /dev/null +++ b/examples/pythia8/pythia_nuclei.cfg @@ -0,0 +1,7 @@ +#Config file to define the nuclei species that are not in vanilla pythia +1000020030:all = 3He 3Hebar 1 6 0 2.8094 +1000010030:all = 3Tr 3Trbar 1 3 0 2.8089218 +12345:all = 2CDeuteron 2CDeuteronbar 1 3 0 3.226 +# c-deuteron -> deuteron K- pi+ +12345:tau0 = 0.06000000000 +12345:addChannel = 1 .1 0 1000010020 -321 211 diff --git a/examples/pythia8/pythia_onia_X.cfg b/examples/pythia8/pythia_onia_X.cfg new file mode 100644 index 0000000..ed5d406 --- /dev/null +++ b/examples/pythia8/pythia_onia_X.cfg @@ -0,0 +1,64 @@ +### beams +Beams:idA 2212 # proton +Beams:idB 2212 # proton +Beams:eCM 14000. # GeV +#663:addChannel = 1 0. 0 1 -1 +#9920443:addParticle = X(3872) 3 0 0 3.87196 0.00012 +Onia:all = on +Random:setSeed = on + +Charmonium:states(3PJ) = 10441,20443,445,9920443 +Charmonium:O(3PJ)[3P0(1)] = 0.05,0.05,0.05,0.05 +Charmonium:O(3PJ)[3S1(8)] = 0.0031,0.0031,0.0031,0.0031 +Charmonium:gg2ccbar(3PJ)[3PJ(1)]g = off,off,off,on +Charmonium:qg2ccbar(3PJ)[3PJ(1)]q = off,off,off,on +Charmonium:qqbar2ccbar(3PJ)[3PJ(1)]g = off,off,off,on +Charmonium:gg2ccbar(3PJ)[3S1(8)]g = off,off,off,on +Charmonium:qg2ccbar(3PJ)[3S1(8)]q = off,off,off,on +Charmonium:qqbar2ccbar(3PJ)[3S1(8)]g = off,off,off,on + +### B0 -> J/psi X +511:onMode = off +511:onIfAny = 443 + +### B+/- -> J/psi X +521:onMode = off +521:onIfAny = 443 + +###B_s -> J/psi X +531:onMode = off +531:onIfAny = 443 + +##Lambda_b -> J/psi X +5122:onMode = off +5122:onIfAny = 443 + + +###J/psi -> mu+ mu- +443:onMode = off +443:onIfAll = 13 -13 +443:onIfAll = 11 -11 //e+e- + +###Psi(2S) -> mu+ mu- +100443:onMode = off +100443:onIfAll = 13 -13 +100443:onIfAll = 11 -11 //e+e- + +###Upsilon -> mu+ mu- +553:onMode = off +553:onIfAll = 13 -13 +553:onIfAll = 11 -11 //e+e- + +###Upsilon(2S) -> mu+ mu- +100553:onMode = off +100553:onIfAll = 13 -13 +100553:onIfAll = 11 -11 //e+e- + +###Upsilon(3S) -> mu+ mu- +200553:onMode = off +200553:onIfAll = 13 -13 +200553:onIfAll = 11 -11 //e+e- + +9920443:addChannel = 1 1 0 443 211 -211 +9920443:onMode = off +9920443:onIfMatch = 443 211 -211 diff --git a/examples/pythia8/pythia_onia_chic.cfg b/examples/pythia8/pythia_onia_chic.cfg new file mode 100644 index 0000000..00d4028 --- /dev/null +++ b/examples/pythia8/pythia_onia_chic.cfg @@ -0,0 +1,79 @@ +### beams +Beams:idA 2212 # proton +Beams:idB 2212 # proton +Beams:eCM 14000. # GeV +#663:addChannel = 1 0. 0 1 -1 +#9920443:addParticle = X(3872) 3 0 0 3.87196 0.00012 +Onia:all = on +Random:setSeed = on + +Charmonium:states(3PJ) = 10441,20443,445,9920443 +Charmonium:O(3PJ)[3P0(1)] = 0.05,0.05,0.05,0.05 +Charmonium:O(3PJ)[3S1(8)] = 0.0031,0.0031,0.0031,0.0031 +Charmonium:gg2ccbar(3PJ)[3PJ(1)]g = on,on,on,off +Charmonium:qg2ccbar(3PJ)[3PJ(1)]q = on,on,on,off +Charmonium:qqbar2ccbar(3PJ)[3PJ(1)]g = on,on,on,off +Charmonium:gg2ccbar(3PJ)[3S1(8)]g = on,on,on,off +Charmonium:qg2ccbar(3PJ)[3S1(8)]q = on,on,on,off +Charmonium:qqbar2ccbar(3PJ)[3S1(8)]g = on,on,on,off + +### B0 -> J/psi X +511:onMode = off +#511:onIfAny = 443 + +### B+/- -> J/psi X +521:onMode = off +#521:onIfAny = 443 + +###B_s -> J/psi X +531:onMode = off +#531:onIfAny = 443 + +##Lambda_b -> J/psi X +5122:onMode = off +#5122:onIfAny = 443 + + +###J/psi -> mu+ mu- +443:onMode = off +#443:onIfAll = 13 -13 +443:onIfAll = 11 -11 //e+e- + +###Psi(2S) -> mu+ mu- +100443:onMode = off +#100443:onIfAll = 13 -13 +#100443:onIfAll = 11 -11 //e+e- + +###Upsilon -> mu+ mu- +553:onMode = off +#553:onIfAll = 13 -13 +#553:onIfAll = 11 -11 //e+e- + +###Upsilon(2S) -> mu+ mu- +100553:onMode = off +#100553:onIfAll = 13 -13 +#100553:onIfAll = 11 -11 //e+e- + +###Upsilon(3S) -> mu+ mu- +200553:onMode = off +#200553:onIfAll = 13 -13 +#200553:onIfAll = 11 -11 //e+e- + +#9920443:addChannel = 1 1 0 443 211 -211 +#9920443:onMode = off +#9920443:onIfMatch = 443 211 -211 + + +### Chi_c0 +10441:onMode = off +10441:onIfAny = 443 22 + +### chi_c1 +20443:onMode = off +20443:onIfAny = 443 22 + +### chi_c2 +445:onMode = off +445:onIfAny = 443 22 + + diff --git a/examples/scripts/.gitignore b/examples/scripts/.gitignore new file mode 100644 index 0000000..958ff86 --- /dev/null +++ b/examples/scripts/.gitignore @@ -0,0 +1,24 @@ +createO2tables.h +createO2tables.C +*.log +*.so +*.d +*.pcm +*.json +*.cfg +*.txt +*.dat +*.tcl +*ACLiC* +tmpscript*.sh +runner*.sh +*.pyc +__pycache__ +DetectorK.* +create_libs.C +create_luts.C +fwdRes/fwdRes.C +lutWrite*.cc +*.hepmc +HistoManager.h +lutCovm.hh diff --git a/examples/scripts/D0-gun.sh b/examples/scripts/D0-gun.sh new file mode 100755 index 0000000..967372e --- /dev/null +++ b/examples/scripts/D0-gun.sh @@ -0,0 +1,18 @@ +#! /usr/bin/env bash + +NRUNS=10 +NEVENTS=10000 + +for I in $(seq 1 $NRUNS); do + + rpythia8-gun -n $NEVENTS \ + --output D0-gun.hepmc \ + --pdg 421 \ + --px 1. --py 0. --pz 0. \ + --xProd 1. --yProx 0. --zProd 0. \ + --config ~/alice/O2DPG/MC/config/PWGHF/pythia8/decayer/force_hadronic_D.cfg \ + --decay + + DelphesHepMC propagate.2kG.tcl delphes.root D0-gun.hepmc && rm -rf delphes.root + +done diff --git a/examples/scripts/README.md b/examples/scripts/README.md new file mode 100644 index 0000000..3c6b738 --- /dev/null +++ b/examples/scripts/README.md @@ -0,0 +1,12 @@ +# Running your simulation and creating your O2 tables +In order to run the simulation and create your own tables you need: +- Configuration file e.g. `default_configfile.ini` where you specify the running configuration +- The LUTs for your detector configuration (specified in the configuration file) +That's it, you are now able to run your simulation and get your tables + +As an example with 1000 events in 10 runs split across 10 jobs: +`./createO2tables.py default_configfile.ini --entry CCBAR --nevents 1000 --nruns 10 --njobs 10 -t` + +Simple QA tasks can be run on tables: +`./doanalysis.py 0` + diff --git a/examples/scripts/clean.sh b/examples/scripts/clean.sh old mode 100644 new mode 100755 index e64f5ac..7ce0e4f --- a/examples/scripts/clean.sh +++ b/examples/scripts/clean.sh @@ -1,6 +1,23 @@ -rm *.root +#!/usr/bin/env bash + +rm delphes*.root +rm AOD*.root rm *.cfg *.log *.txt +rm *.so *.d *.pcm +rm *ACLiC* rm *.tcl -rm *.dat +rm -f ./lutCovm.*.dat rm *.C *.h rm *.pdf *.swp +rm dpl-config.json imgui.ini +rm dpl-config_std.json +rm *.hepmc +rm runner*.sh +rm tmpscript*.sh +rm lutWrite*.cc +rm DetectorK.* +rm HistoManager.* +rm -r fwdRes +rm -r __pycache__ +rm lutCovm.hh +rm tof_mismatch_template_DF*.root diff --git a/examples/scripts/common.py b/examples/scripts/common.py new file mode 100644 index 0000000..56286ad --- /dev/null +++ b/examples/scripts/common.py @@ -0,0 +1,167 @@ +#! /usr/bin/env python3 + +""" +Common header for AOD python scripts +Author: Nicolò Jacazio, nicolo.jacazio@cern.ch +""" + +import argparse +import time +import multiprocessing +import sys +import os +import datetime + + +# Global running flags +verbose_mode = False + + +def set_verbose_mode(parser): + global verbose_mode + verbose_mode = parser.verbose + + +def get_default_parser(description, njobs=True): + parser = argparse.ArgumentParser(description=description) + parser.add_argument("--verbose", "-v", + action="store_true", help="Verbose mode.") + if njobs: + parser.add_argument("--njobs", "--jobs", "-j", type=int, + default=10, + help="Number of concurrent jobs, by default 10.") + return parser + + +class bcolors: + # Colors for bash + BOLD = "\033[1m" + UNDERLINE = "\033[4m" + HEADER = "\033[95m" + OKBLUE = "\033[94m" + BOKBLUE = BOLD + OKBLUE + OKGREEN = "\033[92m" + BOKGREEN = BOLD + OKGREEN + WARNING = "\033[93m" + BWARNING = BOLD + WARNING + FAIL = "\033[91m" + BFAIL = BOLD + FAIL + ENDC = "\033[0m" + + +def verbose_msg(*args, color=bcolors.OKBLUE): + if verbose_mode: + print("** ", color, *args, bcolors.ENDC) + + +def msg(*args, color=bcolors.BOKBLUE): + print(color, *args, bcolors.ENDC) + + +def fatal_msg(*args, fatal_message="Fatal Error!"): + msg("[FATAL]", *args, color=bcolors.BFAIL) + raise RuntimeError(fatal_message) + + +list_of_warnings = [] +try: + list_of_warnings = multiprocessing.Manager().list() +except: + verbose_msg("Could not load warnings from manager", + "Will not be printed from parallel processing") + + +def warning_msg(*args, add=True): + global list_of_warnings + if add: + list_of_warnings.append(args) + msg("[WARNING]", *args, color=bcolors.BWARNING) + + +def print_all_warnings(): + if len(list_of_warnings) > 0: + warning_msg("There were some warnings", add=False) + for i in list_of_warnings: + warning_msg(*i, add=False) + + +try: + import tqdm +except ImportError as e: + verbose_msg("Module tqdm is not imported.", + "Progress bar will not be available (you can install tqdm for the progress bar)") + + +def run_in_parallel(processes, job_runner, job_arguments, job_message, linearize_single_core=False, force_no_progress_line=False): + """ + In parallel processer of functions with a nice progress printing + If linearize_single_core is True and processes is 1 then the processing is not on multiple cores + """ + if processes == 1 and linearize_single_core: + msg(job_message, "using 1 core i.e. no multicores") + if "tqdm" not in sys.modules or force_no_progress_line: + for i in enumerate(job_arguments): + msg(f"Done: {i[0]+1},", len(job_arguments)-i[0]-1, "to go") + job_runner(i[1]) + else: + for i in tqdm.tqdm(job_arguments, bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}'): + job_runner(i) + return + + with multiprocessing.Pool(processes=processes) as pool: + msg(job_message) + result = [] + if "tqdm" not in sys.modules or force_no_progress_line: + for i in enumerate(pool.imap(job_runner, job_arguments)): + msg(f"Done: {i[0]+1},", len(job_arguments)-i[0]-1, "to go") + result.append(i) + else: + result = list(tqdm.tqdm(pool.imap(job_runner, job_arguments), + total=len(job_arguments), + bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}')) + return result + + +def run_cmd(cmd, comment="", check_status=True, log_file=None, print_output=False, time_it=False, throw_fatal=True): + """ + Function to run a command in bash, allows to check the status of the command and to log the command output. + If throw_fatal is true and check_status is true then it will throw a fatal message if the command is not OK + If throw_fatal is true and check_status is true then it will return False if the command is not OK + """ + verbose_msg("Running", f"'{cmd}'", bcolors.BOKBLUE + comment) + try: + if time_it: + processing_time = time.time() + to_run = cmd + if check_status: + to_run = f"{cmd} && echo OK" + content = os.popen(to_run).read() + if content: + content = content.strip() + for i in content.strip().split("\n"): + verbose_msg("++", i) + if print_output: + for i in content.strip().split("\n"): + msg(i) + if log_file is not None: + with open(log_file, "a") as f_log: + f_log.write(f" -- {datetime.datetime.now()}\n") + f_log.write(f" '{cmd}'\n") + for i in content.strip().split("\n"): + f_log.write(i + "\n") + if "Encountered error" in content: + warning_msg("Error encountered runtime error in", cmd) + if check_status: + if "OK" not in content and "root" not in cmd: + if throw_fatal: + fatal_msg("Command", cmd, + "does not have the OK tag", content) + else: + return False + if time_it: + processing_time = time.time() - processing_time + msg(f"-- took {processing_time} seconds --", + color=bcolors.BOKGREEN) + return content + except: + fatal_msg("Error while running", f"'{cmd}'") diff --git a/examples/scripts/createO2tables.py b/examples/scripts/createO2tables.py new file mode 100755 index 0000000..5351090 --- /dev/null +++ b/examples/scripts/createO2tables.py @@ -0,0 +1,569 @@ +#! /usr/bin/env python3 + +""" +Handler to run the DelphesO2 framework and to create O2 analysis tables. +Author: Nicolò Jacazio, nicolo.jacazio@cern.ch +""" + +import configparser +import os +import shutil +import time +import glob +import random +from datetime import datetime +from common import bcolors, msg, fatal_msg, verbose_msg, run_in_parallel, set_verbose_mode, get_default_parser, run_cmd + + +def process_run(run_number): + processing_time = time.time() + verbose_msg("> starting run", run_number) + run_cmd(f"bash runner{run_number}.sh") + aod_name = f"AODRun5.{run_number}.root" + if not os.path.isfile(aod_name): + msg(f"++ something went wrong for run {run_number}, no output AOD file {aod_name} found.", + f"Please check: 'AODRun5.{run_number}.log'", + color=bcolors.FAIL) + verbose_msg("< complete run", run_number) + processing_time = time.time() - processing_time + verbose_msg(f"-- took {processing_time} seconds --", + color=bcolors.BOKGREEN) + + +def main(configuration_file, + config_entry, + njobs, + nruns, + nevents, + qa, + output_path, + clean_delphes_files, + create_luts, + turn_off_vertexing, + append_production, + use_nuclei, + avoid_file_copy, + debug_aod, + tof_mismatch): + arguments = locals() # List of arguments to put into the log + parser = configparser.RawConfigParser() + parser.read(configuration_file) + if config_entry not in parser.keys(): + k = list(parser.keys()) + k.sort() + fatal_msg(f"Did not find configuration entry '{config_entry}' in config file", + configuration_file + "\n\t Available entries:\n\t\t" + "\n\t\t".join(k)) + + run_cmd("./clean.sh > /dev/null 2>&1", check_status=False) + # Dictionary of fetched options + running_options = {} + for i in arguments: + running_options["ARG "+i] = arguments[i] + + def opt(entry, require=True): + try: + o = parser.get(config_entry, entry) + b = ['yes', 'no', 'on', 'off', 'true', 'false'] + for i in b: + if o.lower() == i: + o = parser.getboolean(config_entry, entry) + break + verbose_msg("Got option", entry, "=", f"'{o}'") + running_options[entry] = o + return o + except: + if require: + fatal_msg("Missing entry", f"'{entry}'", + "in configuration file", f"'{configuration_file}'") + return None + + # Config from the config file + # simulation configuration + if output_path is None: + output_path = "" + output_path = os.path.join(os.getcwd(), output_path) + msg("Output will be found in", f"'{output_path}'") + if not os.path.isdir(output_path): + msg("Creating output path") + os.makedirs(output_path) + if not os.path.isdir(output_path): + raise RuntimeError("Cannot find output path", output_path) + + # detector configuration + bField = opt("bField") + sigmaT = opt("sigmaT") + sigmaT0 = opt("sigmaT0") + tof_radius = opt("tof_radius") + rich_radius = opt("rich_radius") + rich_index = opt("rich_index") + forward_rich_index = opt("forward_rich_index") + minimum_track_radius = opt("minimum_track_radius") + etaMax = opt("etamax") + barrel_half_length = opt("barrel_half_length") + + # copy relevant files in the working directory + def do_copy(in_file, out_file=None, in_path=None): + """Function to copy files""" + in_file = os.path.normpath(in_file) # Normalize path + if out_file is None: + # If left unconfigured use the same name but put in the current path + out_file = os.path.basename(in_file) + out_file = os.path.normpath(out_file) # Normalize path + if in_path is not None: + in_file = os.path.join(in_path, in_file) + in_file = os.path.expanduser(os.path.expandvars(in_file)) + if avoid_file_copy: + if os.path.isfile(out_file) or (in_file == out_file): + verbose_msg("Skipping copy of", in_file, "to", + out_file, "because of --avoid-config-copy") + else: + verbose_msg("Copying", in_file, "to", + out_file, "because of --avoid-config-copy") + run_cmd(f"cp {in_file} {out_file}", comment="Copying files without python") + return + verbose_msg("Copying", in_file, "to", out_file) + shutil.copy2(in_file, out_file) + + # Fetching the propagation card + do_copy(opt("propagate_card"), "propagate.tcl", in_path=opt("card_path")) + + lut_path = opt("lut_path") + lut_tag = opt("lut_tag") + lut_tag = f"rmin{int(float(minimum_track_radius))}.{lut_tag}" + lut_particles = ["el", "mu", "pi", "ka", "pr"] + if use_nuclei: + lut_particles += ["de", "tr", "he3"] + if create_luts: + # Creating LUTs + verbose_msg("Creating LUTs") + lut_path = os.path.join(lut_path, "create_luts.sh") + run_cmd(f"{lut_path} -p {lut_path} -t {lut_tag} -B {float(bField)*0.1} -R {minimum_track_radius} -P \"0 1 2 3 4 5 6\" -j 1 -F 2>&1", + f"Creating the lookup tables with tag {lut_tag} from {lut_path} script") + else: + # Fetching LUTs + verbose_msg(f"Fetching LUTs with tag {lut_tag} from path {lut_path}") + for i in lut_particles: + lut_bg = "{}kG".format(bField).replace(".", "") + do_copy(f"lutCovm.{i}.{lut_bg}.{lut_tag}.dat", + f"lutCovm.{i}.dat", in_path=lut_path) + + # Checking that we actually have LUTs + for i in lut_particles: + i = f"lutCovm.{i}.dat" + if not os.path.isfile(i): + fatal_msg("Did not find LUT file", i) + + custom_gen = opt("custom_gen", require=False) + if custom_gen is None: + # Checking that the generators are defined + generators = opt("generators", require=False) + if generators is None: + fatal_msg("Did not find any generator configuration corresponding to the entry", + config_entry, "in your configuration file", configuration_file) + generators = generators.split(" ") + for i in generators: + do_copy(i) + msg("Using pythia with configuration", generators) + else: + def check_duplicate(option_name): + if f" {option_name}" in custom_gen: + fatal_msg(f"Remove '{option_name}' from", custom_gen, + "as it will be automatically set") + for i in ["--output", "-o", "--nevents", "-n"]: + check_duplicate(i) + if "INPUT_FILES" in custom_gen: + input_hepmc_files = custom_gen.replace("INPUT_FILES", + "").strip().split(" ") + input_hepmc_file_list = [] + for i in input_hepmc_files: + input_hepmc_file_list += glob.glob(os.path.normpath(i)) + + if len(input_hepmc_file_list) >= nruns: + input_hepmc_file_list = input_hepmc_file_list[0:nruns] + else: + nruns = len(input_hepmc_file_list) + + if len(input_hepmc_file_list) <= 0: + fatal_msg("Did not find any input file matching to the request:", + custom_gen) + custom_gen = f"INPUT_FILES "+" ".join(input_hepmc_file_list) + msg("Using", len(input_hepmc_file_list), + "input HepMC file" + + ("" if len(input_hepmc_file_list) == 1 else "s"), + input_hepmc_file_list) + else: + msg("Using custom generator", custom_gen) + + # Printing configuration + msg(" --- running createO2tables.py", color=bcolors.HEADER) + msg(" n. jobs =", njobs) + msg(" n. runs =", nruns) + msg(" events per run =", nevents) + msg(" tot. events =", "{:.0e}".format(nevents*nruns)) + msg(" LUT path =", f"'{lut_path}'") + msg(" --- with detector configuration", color=bcolors.HEADER) + msg(" B field =", bField, "[kG]") + msg(" Barrel radius =", minimum_track_radius, "[cm]") + msg(" Barrel half length =", barrel_half_length, "[cm]") + if create_luts: + msg(" Minimum track radius =", minimum_track_radius, "[cm]") + msg(" LUT =", lut_tag) + msg(" etaMax =", etaMax) + msg(" --- with TOF configuration", color=bcolors.HEADER) + msg(" sigmaT =", sigmaT, "[ns]") + msg(" sigmaT0 =", sigmaT0, "[ns]") + msg(" tof_radius =", tof_radius, "[cm]") + msg(" --- with RICH configuration", color=bcolors.HEADER) + msg(" rich_radius =", rich_radius, "[cm]") + msg(" rich_index =", rich_index) + msg(" --- with Forward RICH configuration", color=bcolors.HEADER) + msg(" forward_rich_index =", forward_rich_index) + + aod_path = opt("aod_path") + do_copy("createO2tables.h", in_path=aod_path) + do_copy("createO2tables.C", in_path=aod_path) + do_copy("muonAccEffPID.root", in_path=aod_path) + if qa: + do_copy("diagnostic_tools/dpl-config_std.json") + + def set_config(config_file, config, value): + config = config.strip() + value = value.strip() + config_string = f"{config} {value}" + run_cmd("sed -i -e \"" f"s/{config} .*$/{config_string}" "\" " + + config_file) + # Checking that the file has the correct configuration + with open(config_file) as f: + has_it = False + config_string = config_string.replace("\\", "").strip("/") + for lineno, line in enumerate(f): + if line.strip() == config_string: + verbose_msg(f"Found config string '{config_string}'", + f"at line #{lineno} '{line.strip()}'") + has_it = True + break + if not has_it: + fatal_msg("Configuration file", config_file, + f"does not have config string '{config_string}'") + + # set magnetic field + set_config("propagate.tcl", "set barrel_Bz", f"{bField}""e\-1/") + set_config("createO2tables.C", "const double Bz = ", f"{bField}""e\-1\;/") + if turn_off_vertexing: + set_config("createO2tables.C", + "constexpr bool do_vertexing = ", "false\;/") + else: # Check that the geometry file for the vertexing is there + if not os.path.isfile("o2sim_grp.root") or not os.path.isfile("o2sim_geometry.root"): + run_cmd("mkdir tmpo2sim && cd tmpo2sim && o2-sim -m PIPE ITS MFT -g boxgen -n 1 -j 1 --configKeyValues 'BoxGun.number=1' && cp o2sim_grp.root .. && cp o2sim_geometry.root .. && cd .. && rm -r tmpo2sim") + if use_nuclei: + set_config("createO2tables.C", + "constexpr bool enable_nuclei = ", "true\;/") + if debug_aod: + set_config("createO2tables.C", + "constexpr bool debug_qa = ", "true\;/") + if tof_mismatch: + if not tof_mismatch in [1, 2]: + fatal_msg("tof_mismatch", tof_mismatch, "is not 1 or 2") + set_config("createO2tables.C", + "constexpr int tof_mismatch = ", f"{tof_mismatch}\;/") + if qa: + set_config("dpl-config_std.json", "\\\"d_bz\\\":", + "\\\""f"{bField}""\\\"\,/") + # set barrel_radius + set_config("propagate.tcl", "set barrel_Radius", + f"{minimum_track_radius}""e\-2/") + # set barrel_half_length + set_config("propagate.tcl", "set barrel_HalfLength", + f"{barrel_half_length}""e\-2/") + # set tof_radius + set_config("createO2tables.C", + "constexpr double tof_radius =", f"{tof_radius}""\;/") + # set tof_length + set_config("createO2tables.C", + "const double tof_length =", f"{barrel_half_length}""\;/") + # set rich_radius + set_config("createO2tables.C", + "constexpr double rich_radius =", f"{rich_radius}""\;/") + # set rich_index + set_config("createO2tables.C", + "const double rich_index =", f"{rich_index}""\;/") + # set forward_rich_index + set_config("createO2tables.C", + "const double forward_rich_index =", f"{forward_rich_index}""\;/") + # set acceptance + set_config("propagate.tcl", "set barrel_Acceptance", + "\{ 0.0 + 1.0 * fabs(eta) < "f"{etaMax}"" \}/") + # set time resolution + set_config("propagate.tcl", "set barrel_TimeResolution", + f"{sigmaT}""e\-9/") + set_config("createO2tables.C", + "const double tof_sigmat =", f"{sigmaT}""\;/") + set_config("createO2tables.C", + "const double tof_sigmat0 =", f"{sigmaT0}""\;/") + run_list = range(nruns) + if append_production: + if output_path is None: + fatal_msg("Output path is not defined, cannot append") + last_preexisting_aod = [each for each in os.listdir(output_path) + if each.endswith('.root') and "AODRun5" in each] + if len(last_preexisting_aod) == 0: + fatal_msg("Appending to a non existing production") + last_preexisting_aod = sorted([int(each.replace("AODRun5.", "").replace(".root", "")) + for each in last_preexisting_aod])[-1] + 1 + msg(f" Appending to production with {last_preexisting_aod} AODs", + color=bcolors.BWARNING) + run_list = range(last_preexisting_aod, + last_preexisting_aod + nruns) + + def configure_run(run_number): + # Create executable that runs Generation, Delphes and analysis + runner_file = f"runner{run_number}.sh" + with open(runner_file, "w") as f_run: + + def write_to_runner(line, log_file=None, check_status=False): + """ + Writes commands to runner + """ + log_line = "" + if log_file is not None: + log_line = f" &> {log_file} 2>&1" + line += log_line + line += "\n" + f_run.write(line) + if check_status: + f_run.write("\nReturnValue=$?\n") + f_run.write("if [[ $ReturnValue != 0 ]]; then\n") + f_run.write(" echo \"Encountered error with command: '") + line = line.replace(log_line, "") + f_run.write(line.replace("\"", "\\\"").strip()) + f_run.write("'\"\n") + if log_file is not None: + f_run.write(" echo \"Check log: '") + f_run.write(log_file.strip() + "'\"\n") + f_run.write(" exit $ReturnValue\n") + f_run.write("fi\n") + + def copy_and_link(file_name): + """ + In runner, copies file to output path (if different from current) and links it to current + """ + if os.path.normpath(output_path) != os.getcwd(): + write_to_runner(f"mv {file_name} {output_path} \n") + write_to_runner( + f"ln -s {os.path.join(output_path, file_name)} . \n") + + write_to_runner("#! /usr/bin/env bash\n") + delphes_file = f"delphes.{run_number}.root" + delphes_log_file = delphes_file.replace(".root", ".log") + hepmc_file = None + mc_seed = random.randint(1, 800000000) + if custom_gen: # Using HEPMC + hepmc_file = f"hepmcfile.{run_number}.hepmc" + if "INPUT_FILES" in custom_gen: + input_hepmc_file = custom_gen.replace("INPUT_FILES", + "").strip().split(" ") + input_hepmc_file = input_hepmc_file[run_number] + write_to_runner(f"ln -s {input_hepmc_file}" + f" {hepmc_file} \n") + else: + gen_log_file = f"gen.{run_number}.log" + custom_gen_option = f" --output {hepmc_file} --nevents {nevents} --seed {mc_seed}" + write_to_runner(custom_gen + custom_gen_option, + log_file=gen_log_file, check_status=True) + write_to_runner(f"DelphesHepMC propagate.tcl {delphes_file} {hepmc_file}", + log_file=delphes_log_file, check_status=True) + else: # Using DelphesPythia + # copy generator configuration + generator_cfg = f"generator.{run_number}.cfg" + generator_orig = generators[0].split("/")[-1] + do_copy(generator_orig, generator_cfg) + # Adjust configuration file + with open(generator_cfg, "a") as f_cfg: + # number of events and random seed + f_cfg.write(f"\n\n\n#### Additional part ###\n\n\n\n") + f_cfg.write(f"Main:numberOfEvents {nevents}\n") + f_cfg.write(f"Random:setSeed = on\n") + f_cfg.write(f"Random:seed = {mc_seed}\n") + # collision time spread [mm/c] + f_cfg.write("Beams:allowVertexSpread on \n") + f_cfg.write("Beams:sigmaTime 60.\n") + for i in generators[1:]: + with open(i.split("/")[-1], "r") as f_append: + f_cfg.write(f_append.read()) + write_to_runner(f"DelphesPythia8 propagate.tcl {generator_cfg} {delphes_file}", + log_file=delphes_log_file, + check_status=True) + aod_file = f"AODRun5.{run_number}.root" + aod_log_file = aod_file.replace(".root", ".log") + write_to_runner(f"root -l -b -q 'createO2tables.C+(\"{delphes_file}\", \"tmp_{aod_file}\", 0)'", + log_file=aod_log_file, + check_status=True) + # Check that there were no O2 errors + write_to_runner( + f"if grep -q \"\[ERROR\]\" {aod_log_file}; then echo \": got some errors in '{aod_log_file}'\" && echo \"Found some ERROR in this log\" >> {aod_log_file}; fi") + write_to_runner( + f"if grep -q \"\[FATAL\]\" {aod_log_file}; then echo \": got some fatals in '{aod_log_file}'\" && echo \"Found some FATAL in this log\" >> {aod_log_file} && exit 1; fi") + # Rename the temporary AODs to standard AODs + write_to_runner(f"mv tmp_{aod_file} {aod_file}", check_status=True) + if not clean_delphes_files: + copy_and_link(delphes_file) + if hepmc_file is not None: + copy_and_link(hepmc_file) + copy_and_link(aod_file) + if clean_delphes_files: + write_to_runner(f"rm {delphes_file}") + write_to_runner(f"rm {generator_cfg}") + if hepmc_file is not None: + write_to_runner(f"rm {hepmc_file}") + write_to_runner("exit 0\n") + + # Configuring all the runs + for i in run_list: + configure_run(i) + + # Compiling the table creator macro once for all + run_cmd("root -l -b -q 'createO2tables.C+(\"\")' > /dev/null 2>&1", + comment="to compile the table creator only once, before running") + if not os.path.isfile("createO2tables_C.so"): + run_cmd("root -l -b -q 'createO2tables.C+(\"\")'", + comment="to compile with full log") + fatal_msg("'createO2tables.C' did not compile!") + total_processing_time = time.time() + msg(" --- start processing the runs ", color=bcolors.HEADER) + run_in_parallel(processes=njobs, job_runner=process_run, + job_arguments=run_list, job_message="Running production") + + # merge runs when all done + msg(" --- all runs are processed, so long", color=bcolors.HEADER) + total_processing_time = time.time() - total_processing_time + msg(f"-- took {total_processing_time} seconds in total --", + color=bcolors.BOKGREEN) + + # Writing the list of produced AODs + output_list_file = "listfiles.txt" + with open(output_list_file, "w") as listfiles: + for i in os.listdir("."): + if "AODRun5." in i and i.endswith(".root"): + listfiles.write(f"{os.getcwd()}/{i}\n") + + # Writing summary of production + summaryfile = "summary.txt" + with open(summaryfile, "w") as f: + f.write("\n## Summary of last run ##\n") + now = datetime.now() + dt_string = now.strftime("%d/%m/%Y %H:%M:%S") + f.write(f"Finished at {dt_string}\n") + f.write(f"Took {total_processing_time} seconds\n") + + def write_config(entry, prefix=""): + f.write(prefix + entry.strip("ARG ") + + f" = {running_options[entry]}\n") + + f.write("\n## Configuration ##\n") + for i in running_options: + if "ARG" in i: + write_config(i, prefix=" - ") + + f.write("\n## Options ##\n") + for i in running_options: + if "ARG" not in i: + write_config(i, prefix=" * ") + + output_size = sum(os.path.getsize(os.path.join(output_path, f)) + for f in os.listdir(output_path) + if os.path.isfile(os.path.join(output_path, f))) + f.write("\n## Size of the ouput ##\n") + f.write(f" - {output_size} bytes\n") + f.write(f" - {output_size/1e6} MB\n") + f.write(f" - {output_size/1e9} GB\n") + run_cmd("echo >> " + summaryfile) + run_cmd("echo + DelphesO2 Version + >> " + summaryfile) + run_cmd("git rev-parse HEAD >> " + summaryfile, check_status=False) + + if os.path.normpath(output_path) != os.getcwd(): + if append_production: + s = os.path.join(output_path, summaryfile) + run_cmd(f"echo '' >> {s}") + run_cmd(f"echo ' **' >> {s}") + run_cmd(f"echo 'Appended production' >> {s}") + run_cmd(f"echo ' **' >> {s}") + run_cmd(f"echo '' >> {s}") + run_cmd(f"cat {summaryfile} >> {s}") + else: + run_cmd(f"mv {summaryfile} {output_path}") + run_cmd(f"ln -s {os.path.join(output_path, summaryfile)} ./") + + if qa: + msg(" --- running test analysis", color=bcolors.HEADER) + run_cmd( + f"./diagnostic_tools/doanalysis.py TrackQA RICH TOF -i {output_list_file} -M 25 -B 25") + if tof_mismatch == 1: # TOF mismatch in create mode + run_cmd( + f"hadd -j {njobs} -f tofMM.root tof_mismatch_template_DF_*.root && rm tof_mismatch_template_DF_*.root") + + +if __name__ == "__main__": + parser = get_default_parser(description=__doc__) + parser.add_argument("configuration_file", type=str, + help="Input configuration file e.g. you can use the provided default_configfile.ini or variations of it.") + parser.add_argument("--entry", "-e", type=str, + default="DEFAULT", + help="Entry in the configuration file, e.g. the INEL or CCBAR entries in the configuration file.") + parser.add_argument("--output-path", "--output_path", "-o", type=str, + default=None, + help="Output path, by default the current path is used as output.") + parser.add_argument("--nevents", "--ev", type=int, + default=1000, + help="Number of simulated events, by default 1000.") + parser.add_argument("--nruns", "--runs", "-r", type=int, + default=10, + help="Number of runs, by default 10.") + parser.add_argument("--qa", "-qa", action="store_true", + help="QA mode: runs basic tasks at the end to assess QA.") + parser.add_argument("--clean-delphes", "-c", + action="store_true", + help="Option to clean the delphes files in output and keep only the AODs, by default everything is kept.") + parser.add_argument("--no-vertexing", + action="store_true", + help="Option turning off the vertexing.") + parser.add_argument("--append", "-a", + action="store_true", + help="Option to append the results instead of starting over by shifting the AOD indexing. N.B. the user is responsible of the compatibility between appended AODs. Only works in conjuction by specifying an output path (option '-o')") + parser.add_argument("--no_nuclei", "--no-nuclei", + action="store_true", + help="Option use nuclei LUTs") + parser.add_argument("--debug", "-d", + action="store_true", + help="Option to use the debug flag for the AOD making") + parser.add_argument("--tof-mismatch", "--tof_mismatch", "--use_tof_mismatch", "-t", + type=int, + default=0, + help="Option to use the TOF mismatch in simulation, accepted values 0, 1, 2") + parser.add_argument("--avoid-config-copy", "--avoid_config_copy", "--grid", + action="store_true", + help="Option to avoid copying the configuration files and to use the ones directly in the current path e.g. for grid use") + parser.add_argument("--use-preexisting-luts", "-l", + action="store_true", + help="Option to use preexisting LUTs instead of creating new ones, in this case LUTs with the requested tag are fetched from the LUT path. By default new LUTs are created at each run.") + args = parser.parse_args() + set_verbose_mode(args) + + # Check arguments + if args.append and args.output_path is None: + fatal_msg( + "Asked to append production but did not specify output path (option '-o')") + main(configuration_file=args.configuration_file, + config_entry=args.entry, + njobs=args.njobs, + nevents=args.nevents, + nruns=args.nruns, + output_path=args.output_path, + clean_delphes_files=args.clean_delphes, + qa=args.qa, + create_luts=not args.use_preexisting_luts, + turn_off_vertexing=args.no_vertexing, + append_production=args.append, + use_nuclei=not args.no_nuclei, + avoid_file_copy=args.avoid_config_copy, + debug_aod=args.debug, + tof_mismatch=args.tof_mismatch) diff --git a/examples/scripts/createO2tables.sh b/examples/scripts/createO2tables.sh deleted file mode 100755 index 7ef2e3c..0000000 --- a/examples/scripts/createO2tables.sh +++ /dev/null @@ -1,126 +0,0 @@ -#! /usr/bin/env bash - -### run configuration -NJOBS=1 # number of max parallel runs -NRUNS=1 # number of runs -NEVENTS=1000 # number of events in a run -DOANALYSIS=0 # run O2 analysis - -### detector configuration -BFIELD=5. # magnetic field [kG] -SIGMAT=0.020 # time resolution [ns] -RADIUS=100. # radius [cm] -LENGTH=200. # half length [cm] -ETAMAX=1.443 # max pseudorapidity - -### calculate max eta from geometry -ETAMAX=`awk -v a=$RADIUS -v b=$LENGTH 'BEGIN {th=atan2(a,b)*0.5; sth=sin(th); cth=cos(th); print -log(sth/cth)}'` - -### verbose -echo " --- running createO2tables.sh " -echo " nJobs = $NJOBS " -echo " nRuns = $NRUNS " -echo " nEvents = $NEVENTS " -echo " --- with detector configuration " -echo " bField = $BFIELD [kG] " -echo " sigmaT = $SIGMAT [ns] " -echo " radius = $RADIUS [cm] " -echo " length = $LENGTH [cm] " -echo " etaMax = $ETAMAX " -echo " --- start processing the runs " - -### copy relevant files in the working directory -cp $DELPHESO2_ROOT/examples/cards/propagate.2kG.tcl propagate.tcl -cp $DELPHESO2_ROOT/examples/smearing/luts/lutCovm.* . -cp $DELPHESO2_ROOT/examples/pythia8/pythia8_ccbar.cfg . -cp $DELPHESO2_ROOT/examples/pythia8/decays/force_hadronic_D.cfg . -cp $DELPHESO2_ROOT/examples/aod/createO2tables.h . -cp $DELPHESO2_ROOT/examples/aod/createO2tables.C . -cp $DELPHESO2_ROOT/examples/scripts/dpl-config_std.json . - -### set magnetic field -sed -i -e "s/set barrel_Bz .*$/set barrel_Bz ${BFIELD}e\-1/" propagate.tcl -sed -i -e "s/double Bz = .*$/double Bz = ${BFIELD}e\-1\;/" createO2tables.C -sed -i -e "s/\"d_bz\": .*$/\"d_bz\": \"${BFIELD}\"\,/" dpl-config_std.json -### set radius -sed -i -e "s/set barrel_Radius .*$/set barrel_Radius ${RADIUS}e\-2/" propagate.tcl -sed -i -e "s/double tof_radius = .*$/double tof_radius = ${RADIUS}\;/" createO2tables.C -### set length -sed -i -e "s/set barrel_HalfLength .*$/set barrel_HalfLength ${LENGTH}e\-2/" propagate.tcl -sed -i -e "s/double tof_length = .*$/double tof_length = ${LENGTH}\;/" createO2tables.C -### set acceptance -sed -i -e "s/set barrel_Acceptance .*$/set barrel_Acceptance \{ 0.0 + 1.0 * fabs(eta) < ${ETAMAX} \}/" propagate.tcl -### set time resolution -sed -i -e "s/set barrel_TimeResolution .*$/set barrel_TimeResolution ${SIGMAT}e\-9/" propagate.tcl -sed -i -e "s/double tof_sigmat = .*$/double tof_sigmat = ${SIGMAT}\;/" createO2tables.C - -### make sure we are clean to run -rm -rf .running* delphes*.root *.log - -### loop over runs -for I in $(seq 0 $(($NRUNS - 1))); do - - ### wait for a free slot - while [ $(ls .running.* 2> /dev/null | wc -l) -ge $NJOBS ]; do - echo " --- waiting for a free slot" - sleep 1 - done - - ### book the slot - echo " --- starting run $I" - touch .running.$I - - ### copy pythia8 configuration and adjust it - cp pythia8_ccbar.cfg pythia8.$I.cfg - ### number of events and random seed - echo "Main:numberOfEvents $NEVENTS" >> pythia8.$I.cfg - echo "Random:seed = $I" >> pythia8.$I.cfg - ### collision time spread [mm/c] - echo "Beams:allowVertexSpread on " >> pythia8.$I.cfg - echo "Beams:sigmaTime 60." >> pythia8.$I.cfg - - ### force hadronic D decays - cat force_hadronic_D.cfg >> pythia8.$I.cfg - - ### run Delphes and analysis - DelphesPythia8 propagate.tcl pythia8.$I.cfg delphes.$I.root &> delphes.$I.log && \ - root -b -q -l "createO2tables.C(\"delphes.$I.root\", \"AODRun5.$I.root\", $(($I*$NEVENTS)))" &> createO2tables.$I.log && \ - rm -rf delphes.root && \ - rm -rf .running.$I && \ - echo " --- complete run $I" & - -done - -### merge runs when all done -echo " --- waiting for runs to be completed " -wait -echo " --- all runs are processed, merging " -hadd -f AODRun5Tot.root AODRun5.*.root && rm -rf AODRun5.*.root - -FILEOUTO2="AnalysisResults.root" -AOD3NAME=AODRun5Tot.root - -### perform O2 analysis -if [ $DOANALYSIS -eq 1 ]; then - LOGFILE="log_o2.log" - echo -e "\nRunning the tasks with O2... (logfile: $LOGFILE)" - rm -f $FILEOUTO2 - if [ ! -f "$AOD3NAME" ]; then - echo "Error: File $AOD3NAME does not exist." - exit 1 - fi - O2ARGS="--shm-segment-size 16000000000 --configuration json://$PWD/dpl-config_std.json --aod-file $AOD3NAME" - O2EXEC="o2-analysis-hftrackindexskimscreator $O2ARGS | o2-analysis-hfcandidatecreator2prong $O2ARGS | o2-analysis-taskdzero $O2ARGS | o2-analysis-qatask $AOD3NAME -b" - TMPSCRIPT="tmpscript.sh" - cat << EOF > $TMPSCRIPT # Create a temporary script with the full O2 commands. -#!/bin/bash -$O2EXEC -EOF - $ENVO2 bash $TMPSCRIPT # Run the script in the O2 environment. - #$ENVO2 bash $TMPSCRIPT > $LOGFILE 2>&1 # Run the script in the O2 environment. - #if [ ! $? -eq 0 ]; then echo "Error"; exit 1; fi # Exit if error. - rm -f $TMPSCRIPT -fi - -### clean -rm *.tcl *.cfg *.dat *.C diff --git a/examples/scripts/create_luts.sh b/examples/scripts/create_luts.sh new file mode 100755 index 0000000..d9b5e5b --- /dev/null +++ b/examples/scripts/create_luts.sh @@ -0,0 +1,233 @@ +#! /usr/bin/env bash + +# Configuration variables +WHAT=default +FIELD=0.5 +RMIN=100. +WRITER_PATH=${DELPHESO2_ROOT}/lut/ +if [[ -z ${DELPHESO2_ROOT} ]]; then + WRITER_PATH="../../src/" +fi +OUT_PATH=. +OUT_TAG= +PARALLEL_JOBS=1 +PARTICLES="0 1 2 3 4" +AUTOTAG="Yes" +DIPOLE="No" +FLATDIPOLE="No" +VERBOSE="No" + +# List of arguments expected in the input +optstring=":ht:B:R:p:o:T:P:j:vFDd" +# Get the options +while getopts ${optstring} option; do + case ${option} in + h) # display Help + echo "Script to generate LUTs from LUT writer, arguments:" + echo "Syntax: ./create_luts.sh [${optstring}]" + echo "options:" + echo "-t tag of the LUT writer [default]" + echo "-B Magnetic field in T [0.5]" + echo "-R Minimum radius of the track in cm [100]" + echo "-p Path where the LUT writers are located [\$DELPHESO2_ROOT/lut/]" + echo "-o Output path where to write the LUTs [.]" + echo "-T Tag to append to LUTs [\"\"]" + echo "-P Particles to consider [\"0 1 2 3 4\"]" + echo "-j Number of parallel processes to use [1]" + echo "-F Don't use the automatic tagging and use only the one provided instead for the naming of the output files" + echo "-D Use dipole" + echo "-d Use dipole flat dipole parametrization" + echo "-v Verbose mode" + echo "-h Show this help" + exit 0 + ;; + t) + WHAT=$OPTARG + echo " > Setting LUT writer to ${WHAT}" + ;; + B) + FIELD=$OPTARG + echo " > Setting B field to ${FIELD} T" + ;; + R) + RMIN=$OPTARG + echo " > Setting minimum radius to ${RMIN} cm" + ;; + p) + WRITER_PATH=$OPTARG + echo " > Setting LUT writer path to ${WRITER_PATH}" + ;; + o) + OUT_PATH=$OPTARG + echo " > Setting LUT output path to ${OUT_PATH}" + ;; + T) + OUT_TAG=$OPTARG + echo " > Setting LUT output tag to ${OUT_TAG}" + ;; + P) + PARTICLES=$OPTARG + echo " > Setting LUT particles to ${PARTICLES}" + ;; + j) + PARALLEL_JOBS=$OPTARG + echo " > Setting parallel jobs to ${PARALLEL_JOBS}" + ;; + F) + AUTOTAG="No" + echo " > Disabling autotagging mode" + ;; + D) + DIPOLE="Yes" + echo " > Enabling dipole" + ;; + d) + FLATDIPOLE="Yes" + echo " > Enabling flat dipole" + ;; + v) + VERBOSE="Yes" + echo " > Enabling verbose mode" + ;; + \?) # Invalid option + echo "$0: Error: Invalid option, use [${optstring}]" + exit + ;; + :) # Empty argument + echo "$0: Error: ust supply an argument to -$OPTARG." >&2 + exit 1 + ;; + esac +done + +if [[ ${AUTOTAG} == "Yes" ]]; then + # Defining the output tag based on the input + FIELDT=$(echo "${FIELD}*10" | bc) + FIELDT=${FIELDT%.0} + if [[ ${DIPOLE} == "Yes" ]]; then + OUT_TAG="${OUT_TAG}_Dipole" + fi + if [[ ${FLATDIPOLE} == "Yes" ]]; then + OUT_TAG="${OUT_TAG}_FlatDipole" + fi + OUT_TAG=".${FIELDT}kG.rmin${RMIN}.${WHAT}${OUT_TAG}" +fi + +if [[ ${DIPOLE} == "Yes" ]]; then + DIPOLE="useDipole = 1;" +else + DIPOLE="" +fi + +if [[ ${FLATDIPOLE} == "Yes" ]]; then + FLATDIPOLE="useFlatDipole = 1;" +else + FLATDIPOLE="" +fi + +if [[ ${VERBOSE} == "Yes" ]]; then + echo "WHAT='${WHAT}'" + echo "FIELD='${FIELD}'" + echo "RMIN='${RMIN}'" + echo "WRITER_PATH='${WRITER_PATH}'" + echo "OUT_PATH='${OUT_PATH}'" + echo "OUT_TAG='${OUT_TAG}'" + echo "PARALLEL_JOBS='${PARALLEL_JOBS}'" + echo "PARTICLES='${PARTICLES}'" + echo "AUTOTAG='${AUTOTAG}'" +fi + +if [[ -z ${WRITER_PATH} ]]; then + echo "Path of the LUT writers not defined, cannot continue" + exit 1 +fi + +function do_copy() { + cp "${1}" . || { + echo "Cannot find $2: ${1}" + exit 1 + } +} + +do_copy "${WRITER_PATH}/lutWrite.$WHAT.cc" "lut writer" +do_copy "${WRITER_PATH}/DetectorK/HistoManager.cxx" +do_copy "${WRITER_PATH}/DetectorK/HistoManager.h" +do_copy "${WRITER_PATH}/DetectorK/DetectorK.cxx" +do_copy "${WRITER_PATH}/DetectorK/DetectorK.h" +do_copy "${WRITER_PATH}/lutWrite.cc" +if [[ -z ${DELPHESO2_ROOT} ]]; then + do_copy "${WRITER_PATH}/lutCovm.hh" +fi +cp -r "${WRITER_PATH}/fwdRes" . + +echo " --- creating LUTs: config = ${WHAT}, field = ${FIELD} T, min tracking radius = ${RMIN} cm" + +function do_lut_for_particle() { + root -l -b <AddParticle("deuteron", "deuteron", 1.8756134, kTRUE, 0.0, 3, "Nucleus", 1000010020); + TDatabasePDG::Instance()->AddAntiParticle("anti-deuteron", -1000010020); + + TDatabasePDG::Instance()->AddParticle("triton", "triton", 2.8089218, kTRUE, 0.0, 3, "Nucleus", 1000010030); + TDatabasePDG::Instance()->AddAntiParticle("anti-triton", -1000010030); + + TDatabasePDG::Instance()->AddParticle("helium3", "helium3", 2.80839160743, kTRUE, 0.0, 6, "Nucleus", 1000020030); + TDatabasePDG::Instance()->AddAntiParticle("anti-helium3", -1000020030); + + const int N = 8; + const TString pn[N] = {"el", "mu", "pi", "ka", "pr", "de", "tr", "he3"}; + const int pc[N] = {11, 13, 211, 321, 2212, 1000010020, 1000010030, 1000020030 }; + const float field = ${FIELD}; + const float rmin = ${RMIN}; + const int i = ${1}; + const TString out_file = "${OUT_PATH}/lutCovm." + pn[i] + "${OUT_TAG}.dat"; + Printf("Creating LUT for particle ID %i: %s with pdg code %i to %s", i, pn[i].Data(), pc[i], out_file.Data()); + if(i >= 0 && i < N){ + lutWrite_${WHAT}(out_file, pc[i], field, rmin); + } else{ + Printf("Particle ID %i is too large or too small", i); + } + +EOF + +} + +root -l -b <(flag); + //Printf("%i %c", f, flag); + return (f & PhysicalPrimary) == PhysicalPrimary; + } + bool producedTransport(UChar_t flag) { + return (flag & ProducedByTransport) == ProducedByTransport; + uint8_t f = static_cast(flag); + //Printf("%i %c", f, flag); + return (f & ProducedByTransport) == ProducedByTransport; + } + """) +# Function to check eta and phi of the particle +gInterpreter.Declare(""" + float pMag(float px, float py, float pz) { + return TMath::Sqrt(px * px + py * py + pz * pz); + } + float etaValue(float p, float pz){ + return 0.5*TMath::Log((p + pz)/(p - pz)); + } + """) + + +def main(filename, verbose=True, pdg_of_interest=[421], event_filters=None, summary=True, continue_on_inconsistency=True): + def get_frame(file_name, df_index=0, tree_name="O2mcparticle_001"): + """ + Getter of the frame from the file + """ + if not path.isfile(file_name): + raise ValueError("Did not find AOD file", file_name) + sub_names = run_cmd(f"rootls {file_name}").strip().split() + df_name = [] + for i in sub_names: + if not i.startswith("DF_") and not i.startswith("TF_"): + continue + df_name.append(i) + df_name = df_name[df_index] + print(df_name) + frame = RDataFrame(f"{df_name}/{tree_name}", file_name) + if verbose: + colNames = frame.GetColumnNames() + for j in enumerate(colNames): + print(j, frame.GetColumnType(j[1])) + return frame + + df = get_frame(filename) + df = df.Define("part_index", + "index_maker(fIndexMcCollisions)") + df = df.Define("isPhysicalPrimary", + "physPrim(fFlags)") + df = df.Define("isProducedByTransport", + "producedTransport(fFlags)") + df = df.Define("p", + "pMag(fPx, fPy, fPz)") + df = df.Define("eta", + "etaValue(p, fPz)") + counters = {} + + def count(label, index): + if not summary: + return + c = counters.setdefault(label, []) + if index not in c: + c.append(index) + return False + return True + + def print_evt(event_filter=">= 0"): + pdg_db = TDatabasePDG() + ev_df = df.Filter(f"fIndexMcCollisions {event_filter}") + npy = ev_df.AsNumpy() + print() + lastmother = 0 + for i, part_index in enumerate(npy["part_index"]): + ev = npy["fIndexMcCollisions"][i] + count("events", ev) + if 0: + m0 = npy["fMother0"][i] + m1 = npy["fMother1"][i] + d0 = npy["fDaughter0"][i] + d1 = npy["fDaughter1"][i] + else: + m_arr = npy["fIndexArray_Mothers"][i] + d_arr = npy["fIndexSlice_Daughters"][i] + m_size = npy["fIndexArray_Mothers_size"][i] + # print(m_size) + # print("Mothers", m_arr) + # print("Daughters", d_arr) + + if len(m_arr) == 0: + m0 = -1 + m1 = -1 + else: + m0 = m_arr[0] + m1 = m_arr[int(m_size)-1] + d0 = d_arr[0] + d1 = d_arr[1] + # print(d_arr) + pdg = npy["fPdgCode"][i] + px = npy["fPx"][i] + py = npy["fPy"][i] + pz = npy["fPz"][i] + eta = npy["eta"][i] + is_ps = bool(npy["isPhysicalPrimary"][i]) + is_pt = bool(npy["isProducedByTransport"][i]) + process = npy["fStatusCode"][i] + + def getpname(pdg_code): + p = pdg_db.GetParticle(int(pdg_code)) + if p: + p = p.GetName() + else: + p = "Undef" + return p + part = getpname(pdg) + summary_line = f" ({part_index}) ev {ev} m0 {m0} m1 {m1}, d0 {d0} d1 {d1}, pdg {pdg} '{part}', physical primary {is_ps}, in transport {is_pt}, process {process}" + if abs(pdg) not in [21, 2101, 2103, 2203, 1, 2, 3, 4, 5] and m0 > -1: + if lastmother != m0 and count("mothers", m0): + raise ValueError("Duplicate mothers for ", summary_line) + lastmother = m0 + if d1 > -1 and d0 > d1: + if not continue_on_inconsistency: + raise ValueError("d0 > d1:", summary_line) + else: + warning_msg("d0 > d1 for", part_index) + + def get_the_daughters(): + idaughters = [] + if d0 > -1 and d1 > -1: + for j in range(d0, d1+1): + entry = numpy.where(npy["part_index"] == j)[0] + if len(entry) > 1: + raise ValueError("Entry size is too high!") + if len(entry) == 0: + raise ValueError("Entry size is too low!") + entry = entry[0] + if 0: + d_m0 = npy["fMother0"][entry] + d_m1 = npy["fMother1"][entry] + else: + d_m0 = npy["fIndexArray_Mothers"][entry][0] + d_m1 = npy["fIndexArray_Mothers"][entry][int( + npy["fIndexArray_Mothers_size"][entry])-1] + + if d_m0 != part_index and d_m1 != part_index: + if not continue_on_inconsistency: + raise ValueError("Daughter", j, + "has a different mother!", + "d_m0", d_m0, "d_m1", d_m1, "w.r.t.", part_index) + else: + warning_msg("Daughter", j, + "has a different mother!", + "d_m0", d_m0, "d_m1", d_m1, "w.r.t.", part_index) + if d_m0 == d_m1 and 0: + raise ValueError("Daughter has same mother!", + d_m0, d_m1) + idaughters.append(entry) + if len(idaughters) == 0: + warning_msg("Found no daughters") + return idaughters + # Checking that indices are increasing + if sorted(idaughters) != idaughters: + raise ValueError("Daughters are not in order!") + # Checking that indices have no holes + if idaughters != [*range(idaughters[0], idaughters[-1]+1)]: + raise ValueError("Daughters have hole in indices!", + idaughters) + return idaughters + + def daughters_pxpypz(daughters): + d_px = 0 + d_py = 0 + d_pz = 0 + if len(daughters) == 0: + return None + for j in daughters: + d_px += npy["fPx"][j] + d_py += npy["fPy"][j] + d_pz += npy["fPz"][j] + return d_px, d_py, d_pz + + def daughters_pdg(daughters): + d_pdgs = [] + for j in daughters: + d_pdgs.append(npy["fPdgCode"][j]) + return d_pdgs + + def check_momentum(daughters): + d_p = daughters_pxpypz(daughters) + if d_p is None: + return + m_p = [px, py, pz] + m_p_d = {0: "Px", 1: "Py", 2: "Pz"} + momentum_format = "(px={:.5f}, py={:.5f}, pz={:.5f})" + for j in enumerate(m_p): + if abs(j[1] - d_p[j[0]]) > 0.001: + e_msg = ["Non-closure in", m_p_d[j[0]], + "=", momentum_format.format(*d_p)] + if not continue_on_inconsistency: + raise ValueError(*e_msg) + else: + warning_msg(*e_msg) + warning_msg(" mother =", + momentum_format.format(*m_p)) + + def is_decay_channel(desired_pdg_codes, daughters, fill_counter=True, min_prongs=0, max_prongs=10): + d_pdgs = daughters_pdg(daughters) + if len(daughters) >= min_prongs and len(daughters) <= max_prongs: + print(pdg, part, "decaying in", len(d_pdgs), "particles") + for i, j in enumerate(d_pdgs): + if 0: + this_m0 = npy["fMother0"][daughters[i]] + this_m1 = npy["fMother1"][daughters[i]] + else: + this_m0 = npy["fIndexArray_Mothers"][daughters[i]][0] + this_m1 = npy["fIndexArray_Mothers"][daughters[i]][int( + npy["fIndexArray_Mothers_size"][daughters[i]])-1] + + print(" >", j, getpname(j), + "index", daughters[i], npy["part_index"][daughters[i]], "m0", this_m0, "m1", this_m1, " -> physical primary", npy["isPhysicalPrimary"][daughters[i]]) + if desired_pdg_codes is not None: + for i in desired_pdg_codes: + if i not in d_pdgs: + return False + if fill_counter: + count( + f"{bcolors.BOKGREEN} {pdg} {part} {bcolors.ENDC} in {d_pdgs}", part_index) + return True + + extra = [] + if m0 < 0 and m1 < 0 and d0 < 1 and d1 < 0: + extra.append("Sterile") + if d1 < 0 and d1 != d0: + extra.append(bcolors.BWARNING + "Problematic" + bcolors.ENDC) + if pdg in pdg_of_interest: + extra.append( + ", px={:.3f} py={:.2f} pz={:.2f}".format(px, py, pz)) + extra.append(", eta={:.4f}".format(eta)) + extra.append(bcolors.BOKGREEN + + "PDG of interest" + bcolors.ENDC) + extra = " ".join(extra) + extra = extra.strip() + + count(part, part_index) + if verbose or pdg in pdg_of_interest: + print(summary_line, extra) + if pdg in pdg_of_interest: + daughters = get_the_daughters() + check_momentum(daughters) + is_decay_channel(None, daughters=daughters, fill_counter=True) + + if event_filters is None: + print_evt() + else: + for i in event_filters: + if i.isdigit(): + i = f"== {i}" + print_evt(i) + for i in counters: + print(i, ":", len(counters[i])) + if not summary: + print("Processed", filename) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("file_list", type=str, nargs="+", + help="Input configuration file") + parser.add_argument("--events", "-e", type=str, nargs="+", default=None, + help="Events to analyze e.g. 0 1 2 or < 10") + parser.add_argument("--njobs", type=int, default=10, help="Number of jobs") + parser.add_argument("--pdg", "-p", nargs="+", type=int, + default=[0], help="PDG of interest") + parser.add_argument("-s", "--summary", action="store_true", + help="Flag to show summary after processing a file") + parser.add_argument("-b", action="store_true", help="Background mode") + parser.add_argument("-v", action="store_true", help="Verbose mode") + args = parser.parse_args() + file_list = args.file_list + if len(file_list) < 3: + for i in file_list: + main(i, + verbose=args.v, + event_filters=args.events, + summary=args.summary, + pdg_of_interest=args.pdg) + else: + with multiprocessing.Pool(processes=args.njobs) as pool: + pool.map(main, file_list) diff --git a/examples/scripts/diagnostic_tools/check_table_consistency.py b/examples/scripts/diagnostic_tools/check_table_consistency.py new file mode 100755 index 0000000..9690879 --- /dev/null +++ b/examples/scripts/diagnostic_tools/check_table_consistency.py @@ -0,0 +1,238 @@ +#!/usr/bin/env python3 + +""" +Script to check the consistency between O2 tables and delphes. +This allows the user to check the consistency of variables in the AOD and in the delphes file. +Author: Nicolò Jacazio, nicolo.jacazio@cern.ch +""" + +from ROOT import TFile +from sys import argv +from ROOT import RDataFrame, TCanvas, RDF, gPad, TLegend, gInterpreter +import argparse +import numpy + + +def check_trees(file_list): + """ + Function to check that the file is correctly written and has only one folder with the tree + """ + print("Checking file") + for i in file_list: + f = TFile(i, "READ") + l = f.GetListOfKeys() + if l.GetEntries() > 2: + print(i, "Bad file") + + +def check_corresponding(file_list, + origin="AODRun5", + friend="delphes", + verbose=False, + show=False): + """ + Function to check that the delphes and AODs are consistent + """ + print("Checking correspondance between O2 tables and delphes output") + for i in file_list: + def get_frame(tree_name, file_name): + frame = RDataFrame(tree_name, file_name) + if verbose: + colNames = frame.GetColumnNames() + for j in colNames: + print(j) + return frame + + df = get_frame("TF_0/O2mcparticle", i) + df = df.Define("fP", "TMath::Sqrt(fPx*fPx + fPy*fPy + fPz*fPz)") + df = df.Define("fEta", "-0.5*TMath::Log((fP+fPz)/(fP-fPz))") + # + df_reco = get_frame("TF_0/O2track", i) + df_reco = df_reco.Define("fEta", + "-1.f * TMath::Log(TMath::Tan(0.25f * TMath::Pi() - 0.5f * TMath::ATan(fTgl)))") + df_reco = df_reco.Define("fPt", "1./TMath::Abs(fSigned1Pt)") + # + df_delphes = get_frame("Delphes", i.replace(origin, friend)) + gInterpreter.Declare(""" + auto p(ROOT::VecOps::RVec px, ROOT::VecOps::RVec py, ROOT::VecOps::RVec pz) { + std::vector v; + int counter = 0; + for(auto i : px){ + v.push_back(TMath::Sqrt(px[counter]*px[counter] + py[counter]*py[counter] + pz[counter]*pz[counter])); + counter++; + } + return v; + } + """) + gInterpreter.Declare(""" + auto eta(ROOT::VecOps::RVec p, ROOT::VecOps::RVec pz) { + std::vector v; + int counter = 0; + for(auto i : p){ + v.push_back(0.5*TMath::Log((p[counter]+pz[counter])/(p[counter]-pz[counter]))); + counter++; + } + return v; + } + """) + gInterpreter.Declare(""" + auto recomc(ROOT::VecOps::RVec mc, ROOT::VecOps::RVec mclabel, ROOT::VecOps::RVec recolabel) { + std::vector v; + int counter = 0; + for(auto i : mc){ + for(auto j : recolabel){ + if (mclabel[counter] == j.GetUniqueID()) {v.push_back(i);} + } + counter++; + } + return v; + } + """) + gInterpreter.Declare(""" + auto diff(ROOT::VecOps::RVec a, ROOT::VecOps::RVec b) { + std::vector v; + int counter = 0; + for(auto i : a){ + v.push_back(i-b[counter]); + counter++; + } + return v; + } + """) + + df_delphes = df_delphes.Define("P", + "p(Particle.Px, Particle.Py, Particle.Pz)") + df_delphes = df_delphes.Define("Eta", + "eta(P, Particle.Pz)") + df_delphes = df_delphes.Define("RecoEta", + "recomc(Eta, Particle.fUniqueID, Track.Particle)") + df_delphes = df_delphes.Define( + "EtaDiff", "diff(RecoEta, Particle.Eta)") + df_delphes = df_delphes.Define("RecoPhi", + "recomc(Particle.Phi, Particle.fUniqueID, Track.Particle)") + df_delphes = df_delphes.Define("PhiDiff", "diff(RecoPhi, Track.Phi)") + + canvas_list = [] + + def canvas(name, diff=False): + can = TCanvas(name, name, 800, 1280) + can.Divide(1, 2) + canvas_list.append(can) + return can + + def check(var, nbins, low, up, friend_var="Particle.{}", frame=df): + h = f"{origin} {var}" + h = frame.Histo1D(RDF.TH1DModel(h, + h, + nbins, low, up), + f"f{var}") + friend_var = friend_var.format(var) + h2 = f"{friend} {friend_var}" + h2 = df_delphes.Histo1D(RDF.TH1DModel(h2, + h2, + nbins, low, up), + friend_var) + h2.SetLineColor(2) + h2.SetLineStyle(3) + can = canvas(var, diff=True) + can.cd(1) + h.SetDirectory(0) + h2.SetDirectory(0) + hdrawn = [h.DrawCopy(), h2.DrawCopy("same")] + for i in hdrawn: + i.SetDirectory(0) + leg = TLegend(.7, .5, .9, .75) + leg.AddEntry(h.GetValue()) + leg.AddEntry(h2.GetValue()) + leg.Draw() + gPad.Update() + can.cd(2) + hdiff = h.DrawCopy() + hdiff.SetDirectory(0) + hdiff.SetName("hdiff") + hdiff.SetTitle("diff") + hdiff.Add(h2.GetValue(), -1) + hdiff.GetYaxis().SetRangeUser(-1, 1) + gPad.Update() + for i in range(1, hdiff.GetNbinsX()+1): + diff = hdiff.GetBinContent(i) + if diff != 0: + return False + return True + + def correlate(frame, x, y): + hn = f"{x[0]}_vs_{y[0]}" + ht = f";{x[0]};{y[0]}" + h = frame.Histo2D(RDF.TH2DModel(hn, + ht, + x[1], x[2], x[3], + y[1], y[2], y[3]), + x[0], + y[0]) + can = canvas(hn) + can.SetLeftMargin(0.15) + h.Draw("COLZ") + can.Update() + + def plot(frame, x): + hn = f"{x[0]}" + ht = f";{x[0]}" + h = frame.Histo1D(RDF.TH1DModel(hn, + ht, + x[1], x[2], x[3]), + x[0]) + can = canvas(hn) + can.SetLeftMargin(0.15) + h.Draw("COLZ") + can.Update() + + # Comparison of Delphes Particles and O2 Particles + variables = {"Px": [1000, -100, 100], + "Py": [1000, -100, 100], + "Pz": [1000, -100, 100], + "P": [1000, 0, 100], + "Vx": [1000, -100, 100, "Particle.X"], + "Vy": [1000, -100, 100, "Particle.Y"], + "Vz": [1000, -100, 100, "Particle.Z"], + "Eta": [1000, -10, 10], + "E": [1000, 0, 1000]} + for i in variables: + x = variables[i] + if not check(i, *x): + print("Something is wrong for", i) + # Comparison of Delphes Tracks and O2 Tracks + check("Eta", 1000, -10, 10, frame=df_reco, friend_var="Track.{}") + check("Pt", 1000, 0, 30, frame=df_reco, friend_var="Track.PT") + # check("Pt", 1000, 0, 30, frame=df_reco, friend_var="Particle.PT") + # Correlation of Delphes variables + correlate(df_delphes, ["Eta", 1000, -10, 10], + ["Particle.Eta", 1000, -10, 10]) + correlate(df_delphes, ["P", 1000, -1, 10], + ["Particle.P", 1000, -1, 10]) + correlate(df_delphes, ["Track.Eta", 1000, -10, 10], + ["EtaDiff", 1000, -10, 10]) + # Plot of Delphes variables + plot(df_delphes, ["EtaDiff", 1000, -2, 2]) + plot(df_delphes, ["PhiDiff", 1000, -2, 2]) + if show: + input("Press enter to continue") + fout = TFile("table_check.root", "RECREATE") + fout.cd() + canvas_list[0].SaveAs("table_check.pdf[") + for i in canvas_list: + i.SaveAs("table_check.pdf") + i.Write() + canvas_list[0].SaveAs("table_check.pdf]") + fout.Close() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("file_list", type=str, nargs="+", + help="Space separated list of the AODs to analyze e.g. /tmp/AODRun5.0.root") + parser.add_argument("-b", action="store_true", help="Background mode") + parser.add_argument("-v", action="store_true", help="Verbose mode") + args = parser.parse_args() + file_list = args.file_list + check_trees(file_list) + check_corresponding(file_list, verbose=args.v, show=not args.b) diff --git a/examples/scripts/diagnostic_tools/check_table_indices.py b/examples/scripts/diagnostic_tools/check_table_indices.py new file mode 100755 index 0000000..7bd3e75 --- /dev/null +++ b/examples/scripts/diagnostic_tools/check_table_indices.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 + + +""" +Script to test the indices of the AODs +Author: Nicolò Jacazio, nicolo.jacazio@cern.ch +""" + +from ROOT import TFile, TCanvas, TH1F +import ROOT +import argparse + +ROOT.gInterpreter.Declare(""" + int Count(int x) { + if(x >= 998) + return 1; + return 0; + } + """) + + +def main(input_name, draw=True, vvv=False, tree_names=None): + f = TFile(input_name, "READ") + # f.ls() + lk = f.GetListOfKeys() + + histograms = {} + + def gethisto(k, n=len(lk), l=0, h=len(lk), y="", set_labels=True): + if histograms.setdefault(k, None) is None: + h = TH1F(k, k, n, l, h) + h.GetYaxis().SetTitle(y) + h.SetDirectory(0) + if set_labels: + for i in enumerate(lk): + h.GetXaxis().SetBinLabel(i[0]+1, i[1].GetName()) + histograms[k] = h + return histograms[k] + empty_dirs = {} + + def check_dir(directory, verbose=False): + if verbose: + print(directory, type(directory)) + directory.ls() + available_trees = ["O2bc", "O2track", "O2collision", "O2mccollision", + "O2mcparticle", "O2mctracklabel", "O2mccollisionlabel"] + if tree_names is not None: + to_remove = [] + for i in available_trees: + if i not in tree_names: + to_remove.append(i) + for i in to_remove: + available_trees.pop(available_trees.index(i)) + for i in available_trees: + tree = directory.Get(i) + if verbose: + print(i, ":") + tree.Print() + + def check_tree(tn): + df = ROOT.RDataFrame(directory.GetName()+"/"+tn, input_name) + + def fill_histo(v, y, tag=""): + h = gethisto(tn+tag, y=y) + h.Fill(directory.GetName(), v) + return v + + if df.Count().GetValue() < 1: + print(directory, "in", input_name, "has", + df.Count().GetValue(), f"entries in '{tn}'") + empty_dirs.setdefault(tn, []).append( + f"{input_name}:{directory.GetName()}") + if tn == "O2track": + print("In", directory, tn, "has", + fill_histo(df.Count().GetValue(), "#Tracks", "amount"), + "tracks") + # df = df.Define("HowMany", "Count(fIndexCollisions)").Sum( + # "HowMany").GetValue() + # print("HowMany:", df) + elif tn == "O2mctracklabel": + print("In", directory, tn, "has", df.Count().GetValue(), + "mc track labels with mean", + fill_histo(df.Mean("fIndexMcParticles").GetValue(), "")) + elif tn == "O2mccollisionlabel": + print("In", directory, tn, "has", df.Count().GetValue(), + "mc collision labels with mean", + fill_histo(df.Mean("fIndexMcCollisions").GetValue(), "")) + elif tn == "O2mcparticle": + print("In", directory, tn, "has", + fill_histo(df.Count().GetValue(), "#Particles"), + "particles") + + # check_tree("O2track") + # check_tree("O2mcparticle") + # check_tree("O2mctracklabel") + # check_tree("O2mccollisionlabel") + for i in available_trees: + check_tree(i) + + for i in lk: + d = f.Get(i.GetName()) + if "TDirectoryFile" not in d.ClassName(): + continue + check_dir(d, verbose=(i == lk[0] and vvv)) + + if draw: + for i in histograms: + can = TCanvas() + histograms[i].Draw("HIST") + can.Update() + input("Press enter to continue") + return empty_dirs + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=__name__) + parser.add_argument("--verbose", "-v", + action="store_true", help="Verbose mode.") + parser.add_argument("input_file", + type=str, + nargs="+", + help="Input files") + parser.add_argument("--trees", "-t", + type=str, + nargs="+", + default=None, + help="Name of the trees") + + args = parser.parse_args() + + results = [main(i, tree_names=args.trees) for i in args.input_file] + empty_dirs = {} + for i in results: + for j in i: + empty_dirs.setdefault(j, []).append(i[j]) + for i in empty_dirs: + print("\t-", i, empty_dirs[i], "\n") diff --git a/examples/scripts/diagnostic_tools/common.py b/examples/scripts/diagnostic_tools/common.py new file mode 120000 index 0000000..a11703e --- /dev/null +++ b/examples/scripts/diagnostic_tools/common.py @@ -0,0 +1 @@ +../common.py \ No newline at end of file diff --git a/examples/scripts/diagnostic_tools/compare_two_aods.py b/examples/scripts/diagnostic_tools/compare_two_aods.py new file mode 100755 index 0000000..af02b53 --- /dev/null +++ b/examples/scripts/diagnostic_tools/compare_two_aods.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python3 + +""" +Script to compare two AOD files. +This can be used to compare the consistency of the branches of AODs +Author: Nicolò Jacazio, nicolo.jacazio@cern.ch +""" + +from ROOT import TFile +import argparse +from sys import argv + + +def main(file1, file2, dir1, dir2): + print("Comparing content of", file1, file2, + "the first file is considered the reference") + + def get_file(f, d, verbose=False): + f = TFile(f, "READ") + if not f.IsOpen(): + raise RuntimeError("File", f, "is not open!") + if d is None: + f.ls() + else: + if verbose: + f.Get(d).ls() + if not f.Get(d): + f.ls() + raise RuntimeError("file", f, "does not have directory", d) + l = f.Get(d).GetListOfKeys() + t = {} + for i in l: + i = i.GetName() + t[i] = f.Get(f"{d}/{i}") + return f, t + + tree1 = get_file(file1, dir1) + + missing_trees = {} + tree2 = get_file(file2, dir2) + for i in tree2[1]: + missing_trees[i] = tree2[1][i] + + for i in tree1[1]: + print("Checking", f"'{i}'") + if i not in tree2[1]: + print(i, "not present in", file2) + continue + missing_trees.pop(i) + branches1 = tree1[1][i].GetListOfBranches() + branches2 = tree2[1][i].GetListOfBranches() + missing_branches = {} + for k in branches2: + missing_branches[k] = [k.GetName(), k.GetTitle()] + for j in branches1: + print(" > Branch", j.GetName(), j.GetTitle()) + has_it = False + for k in branches2: + if (j.GetName() == k.GetName()) and (j.GetTitle() == k.GetTitle()): + has_it = True + missing_branches.pop(k) + break + if not has_it: + print(" *** Branch", j, "is not in", file2, "***") + if len(missing_branches) > 0: + print("!!!!!!! Missing branches that are in", file2, + "but not in", file1, "are:", missing_branches) + print(i, "is consistent") + + if len(missing_trees) > 0: + print("!!!!!!! Missing Trees that are in", file2, + "but not in", file1, ":") + for i in missing_trees: + print(missing_trees[i]) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("file1", type=str, help="First file to check") + parser.add_argument("file2", type=str, help="Second file to check") + parser.add_argument("--dir1", type=str, default=None, + help="First directory to check") + parser.add_argument("--dir2", type=str, default=None, + help="Second directory to check") + parser.add_argument("-b", action="store_true", help="Background mode") + parser.add_argument("-v", action="store_true", help="Verbose mode") + args = parser.parse_args() + main(args.file1, args.file2, args.dir1, args.dir2) diff --git a/examples/scripts/diagnostic_tools/doanalysis.py b/examples/scripts/diagnostic_tools/doanalysis.py new file mode 100755 index 0000000..56b5855 --- /dev/null +++ b/examples/scripts/diagnostic_tools/doanalysis.py @@ -0,0 +1,561 @@ +#!/usr/bin/env python3 + +""" +Script to run o2 analyses on AODs. +This script is used to run the basic QA checks on the productions. +Several analyses are implemented already, you can pick yours and run e.g.: +`./doanalysis.py TrackQA -i ../AODRun5.0.root` +Results will be available for each batch of files in the `AnalysisResults` directory. +You can check the help of the script (i.e. `./doanalysis.py --h`) to have information on the available options and workflows. +Author: Nicolò Jacazio, nicolo.jacazio@cern.ch +""" + +import configparser +from itertools import islice +import os +from common import bcolors, msg, fatal_msg, verbose_msg, run_in_parallel, set_verbose_mode, get_default_parser, warning_msg, run_cmd, print_all_warnings +from ROOT import TFile +import datetime +from sys import argv + + +def set_o2_analysis(o2_analyses=["o2-analysis-hf-task-d0 --pipeline qa-tracking-kine:4,qa-tracking-resolution:4"], + o2_arguments="--shm-segment-size 16000000000 --readers 4 --configuration json://$PWD/dpl-config_std.json", + input_file="listfiles.txt", + tag="QA", + output_files=["AnalysisResults.root", + "AnalysisResults_trees.root", + "QAResults.root"], + dpl_configuration_file=None, + resume_previous_analysis=False, + write_runner_script=True, + allow_errors_in_logs=True): + """ + Function to prepare everything you need for your O2 analysis. + From the output folder to the script containing the O2 workflow. + The output can be found in the same directory as the input data. + """ + # Creating output directory + output_path = os.path.dirname(os.path.abspath(input_file)) + # Creating the script to run O2 + tmp_script_name = os.path.join(output_path, f"tmpscript_{tag.lower()}.sh") + if not write_runner_script: # Returning in case write_runner_script is False + return tmp_script_name + # Defining log file + log_file = f"log_{tag.lower()}.log" + verbose_msg("Configuring the tasks with O2", color=bcolors.BOKBLUE) + # Checking input file + verbose_msg("Using", input_file, "as input file") + if not input_file.endswith(".root"): + input_file = f"@{os.path.join(os.getcwd(), input_file)}" + + # Writing instructions to runner script + with open(tmp_script_name, "w") as tmp_script: + + verbose_msg("Writing o2 instructions to", f"'{tmp_script_name}'") + + def write_instructions(instructions, n=1, check_status=False): + verbose_msg("--\t", instructions.strip()) + tmp_script.write(f"{instructions}" + "".join(["\n"]*n)) + if check_status: + tmp_script.write("\nReturnValue=$?\n") + tmp_script.write("if [[ $ReturnValue != 0 ]]; then\n") + tmp_script.write(" echo \"Encountered error with command: '") + tmp_script.write(instructions.replace("\"", "\\\"").strip()) + tmp_script.write("'\"\n") + tmp_script.write(" exit $ReturnValue\n") + tmp_script.write("fi\n\n") + + write_instructions(f"#!/bin/bash", n=2) + # Move to run dir + write_instructions(f"cd {output_path} || exit 1", n=2) + # Print run dir + write_instructions(f"pwd", n=2) + write_instructions(f"echo Running \"$0\"", n=2) + + def get_tagged_output_file(output_file_name): + return output_file_name.replace(".root", f"_{tag}.root") + + for i in output_files: # Removing old output + write_instructions(f"[ -f {i} ] && rm -v {i} 2>&1") + i = get_tagged_output_file(i) + if resume_previous_analysis: + write_instructions( + f"[ -f {i} ] && echo 'file {i} already present, continuing' && exit 0") + else: + write_instructions(f"[ -f {i} ] && rm -v {i} 2>&1") + write_instructions("") + + o2_workflow = "" + for i in o2_analyses: + line = f"{i} {o2_arguments}" + if i == o2_analyses[0]: + line += f" --aod-file {input_file}" + if dpl_configuration_file is not None: + line += f" --configuration json://{os.path.normpath(dpl_configuration_file)}" + if len(o2_analyses) > 1 and i != o2_analyses[-1]: + line = f"{line} | \\\n \t" + else: + line = f"{line}" + if line.count("configuration") > 1: + fatal_msg("Cannot have more than one configuration") + o2_workflow += line + + write_instructions(f"O2Workflow=\"{o2_workflow}\"", n=2) + write_instructions("if [[ -z \"${1}\" ]]; then", n=2) + write_instructions(" echo \"Running: \n \t ${O2Workflow}\"" + f" > {log_file}") + write_instructions(" eval \"${O2Workflow}\"" + f" >> {log_file}", check_status=True) + write_instructions("else") + write_instructions(" eval \"${O2Workflow}\"") + write_instructions("fi") + + # Print run dir + write_instructions("pwd") + + if not allow_errors_in_logs: + for i in ["ERROR", "FATAL", "crash"]: + write_instructions( + f"if grep -q \"\[{i}\]\" {log_file}; then echo \": got some {i}s in '{log_file}'\" && exit 1; fi") + write_instructions("") + + for i in output_files: # renaming output with tag + write_instructions( + f"[ -f {i} ] && mv {i} {get_tagged_output_file(i)} 2>&1") + + write_instructions(f"date", n=2) + write_instructions(f"echo Completed \"$0\"", n=2) + + write_instructions("\nexit 0") + return tmp_script_name + + +do_bash_script = False +bash_parallel_jobs = 1 + + +def run_o2_analysis(tmp_script_name, + remove_tmp_script=False, + explore_bad_files=False, + time_it=True): + global number_of_runs + verbose_msg("> starting run with", tmp_script_name) + cmd = f"bash {tmp_script_name}" + if do_bash_script: + if 1: + with open("listofscripts.sh", "a") as fout: + fout.write(f"{tmp_script_name}\n") + else: + with open("parallelbash.sh", "a") as fout: + with open("parallelbash.sh", "r") as fin: + lastline = fin.readlines()[-1] + if lastline.startswith("#"): + lastline = int(lastline.strip("#")) + else: + lastline = 0 + fout.write(f"echo Running {lastline}\n") + fout.write(f"{cmd} &\n") + lastline += 1 + if lastline % (bash_parallel_jobs+1) == 0: + fout.write(f"wait\n") + fout.write(f"\n#{lastline}\n") + + return + + if explore_bad_files: + if run_cmd(cmd, check_status=True, throw_fatal=False, time_it=time_it) == False: + list_name = os.listdir(os.path.dirname(tmp_script_name)) + for i in list_name: + if "ListForRun5Analysis" in i: + list_name = i + break + if type(list_name) != list: + with open(os.path.join(os.path.dirname(tmp_script_name), list_name)) as f: + list_name = [] + for i in f: + list_name.append(i) + warning_msg("Issue when running", + tmp_script_name, "with", list_name) + else: + run_cmd(cmd, log_file=f"{tmp_script_name}.log", time_it=time_it) + if remove_tmp_script: + os.remove(tmp_script_name) + verbose_msg("< end run with", tmp_script_name) + return tmp_script_name + + +analyses = {} # List of all known analyses, taken from configuration file + + +def main(mode, + input_file, + out_path, + out_tag="", + batch_size=4, + n_max_files=100, + dpl_configuration_file=None, + njobs=1, + merge_output=True, + merge_only=False, + shm_mem_size=16000000000, + rate_lim=1000000000, + readers=1, + avoid_overwriting_merge=False, + clean_localhost_after_running=True, + extra_arguments="", + resume_previous_analysis=False, + check_input_file_integrity=True, + analysis_timeout=None, + linearize_single_core=True): + if do_bash_script: + njobs = 1 + linearize_single_core = True + + if len(input_file) == 1: + input_file = input_file[0] + elif n_max_files >= 0: + input_file = input_file[0:n_max_files] + + if not merge_only: + msg("Running", f"'{mode}'", "analysis on", + f"'{input_file}'", color=bcolors.BOKBLUE) + msg("Maximum", n_max_files, "files with batch size", + batch_size, "and", njobs, "jobs" if njobs > 1 else "job", color=bcolors.BOKBLUE) + else: + msg("Merging output of", f"'{mode}'", + "analysis", color=bcolors.BOKBLUE) + if analysis_timeout is not None: + msg("Using analysis timeout of", analysis_timeout, + "seconds", color=bcolors.BOKBLUE) + analysis_timeout = f"--time-limit {analysis_timeout}" + else: + analysis_timeout = "" + + o2_arguments = f"-b --shm-segment-size {shm_mem_size} --aod-memory-rate-limit {rate_lim} --readers {readers} {analysis_timeout}" + o2_arguments += extra_arguments + if mode not in analyses: + raise ValueError("Did not find analyses matching mode", + mode, ", please choose in", ", ".join(analyses.keys())) + an = analyses[mode] + tag = mode + out_tag + # Build input file list + input_file_list = [] + + def is_root_file_sane(file_name_to_check): + file_name_to_check = file_name_to_check.strip() + if not os.path.isfile(file_name_to_check): + warning_msg("File", file_name_to_check, "does not exist") + return "Does not exist" + file_to_check = TFile(file_name_to_check, "READ") + if not file_to_check.IsOpen(): + warning_msg("Cannot open AOD file:", file_name_to_check) + return "Cannot be open" + elif file_to_check.TestBit(TFile.kRecovered): + verbose_msg(file_name_to_check, "was a recovered file") + return "Was recovered" + else: + verbose_msg(file_name_to_check, "is OK") + return "Is Ok" + + def build_list_of_files(file_list): + verbose_msg("Building list of files from", file_list) + # Check that runlist does not have duplicates + unique_file_list = set(file_list) + if len(file_list) != len(unique_file_list): + # for i in file_list + fatal_msg("Runlist has duplicated entries, fix runlist!", + len(unique_file_list), "unique files, while got", len(file_list), "files") + file_status = {"Does not exist": [], + "Cannot be open": [], + "Was recovered": [], + "Is Ok": []} + if check_input_file_integrity: # Check that input files can be open + for i in file_list: + verbose_msg("Checking that TFile", + i.strip(), "can be processed") + file_status[is_root_file_sane(i)] = i + recovered_files = file_status["Was recovered"] + not_readable = [] + for i in file_status: + if i == "Is Ok": + continue + not_readable += file_status[i] + if len(recovered_files) > 0: + msg("Recovered", len(recovered_files), + "files:\n", ) + if len(not_readable) > 0: + warning_msg(len(not_readable), "over", len(file_list), + "files cannot be read and will be skipped") + for i in not_readable: + if i not in file_list: + warning_msg("did not find file to remove", f"'{i}'") + file_list.remove(i) + + files_per_batch = [] + iter_file_list = iter(file_list) + for i in range(0, len(file_list)): + sub_set = list(islice(iter_file_list, batch_size)) + if len(sub_set) <= 0: + continue + files_per_batch.append(sub_set) + run_list = [] + if len(files_per_batch) > 0: + for i, lines in enumerate(files_per_batch): + p = os.path.join(out_path, f"{i}") + if not os.path.isdir(p): + os.makedirs(p) + run_list.append(os.path.join( + p, f"ListForRun5Analysis.{i}.txt")) + with open(run_list[-1], "w") as f: + for j in lines: + f.write(j.strip() + "\n") + msg("Number of runs:", len(run_list)) + return run_list + + if type(input_file) is list: + input_file = [os.path.join(os.getcwd(), i) for i in input_file] + input_file_list = build_list_of_files(input_file) + elif not input_file.endswith(".root"): + with open(input_file, "r") as f: + lines = f.readlines() + msg("Building input list from", len(lines), + "inputs, limiting to", n_max_files) + if len(lines) > n_max_files and n_max_files > 0: + lines = lines[0:n_max_files] + lines = [os.path.join(os.path.dirname(os.path.abspath(input_file)), i) + for i in lines] + input_file_list = build_list_of_files(lines) + else: + input_file_list = [os.path.join(os.getcwd(), input_file)] + + if dpl_configuration_file is not None: + dpl_configuration_file = os.path.join(os.getcwd(), + dpl_configuration_file) + + run_list = [] + for i, j in enumerate(input_file_list): + run_list.append(set_o2_analysis(an, + o2_arguments=o2_arguments, + input_file=j, + tag=tag, + dpl_configuration_file=dpl_configuration_file, + resume_previous_analysis=resume_previous_analysis, + write_runner_script=not merge_only)) + if not merge_only: + if do_bash_script: + with open("listofscripts.sh", "w") as f: + pass + with open("parallelbash.sh", "w") as f: + f.write(f"#!/bin/bash\n\n") + f.write(f"# To repeat this script run\n\n") + f.write("# `{}`\n\n".format(" ".join(argv))) + f.write(f"echo \"Start running\"\n\n") + f.write(f"date\n\n") + f.write("""function trap_ctrlc (){ + # perform cleanup here + """) + f.write(""" echo "Ctrl-C caught...performing clean up"\n""") + for i in an: + i = i.split(" ")[0] + i = i.strip() + if "o2-analysis-" not in i: + continue + f.write( + f" killall -9 -u $(whoami) {i}\n") + f.write(""" + exit 2 + }\n\n""") + f.write("""trap "trap_ctrlc" 2\n\n\n""") + f.write(f"time parallel -j {bash_parallel_jobs} -a listofscripts.sh bash\n") + run_in_parallel(processes=njobs, job_runner=run_o2_analysis, + job_arguments=run_list, job_message=f"Running analysis, it's {datetime.datetime.now()}", + linearize_single_core=linearize_single_core) + if do_bash_script: + with open("parallelbash.sh", "a") as f: + f.write(f"\n\nwait\n\n") + f.write(f"date\n\n") + if 1: + f.write(f"echo now merging!\n\n") + merge_cmd = " ".join(argv) + f.write(f"{merge_cmd} --merge-only \n\n") + f.write(f"date\n\n") + os.popen("chmod +x parallelbash.sh") + msg("Now run bash script `./parallelbash.sh`") + return + if clean_localhost_after_running: + run_cmd( + "find /tmp/ -maxdepth 1 -name localhost* -user $(whoami) | xargs rm -v 2>&1", + check_status=False) + + if (merge_output or merge_only) and len(run_list) > 1: + files_to_merge = [] + for i in input_file_list: + p = os.path.dirname(os.path.abspath(i)) + for j in os.listdir(p): + if j.endswith(f"_{tag}.root"): + files_to_merge.append(os.path.join(p, j)) + if len(files_to_merge) == 0: + warning_msg("Did not find any file to merge for tag", tag) + return + files_per_type = {} # List of files to be merged per type + # List of files to be merged per type that are not declared sane + non_sane_files_per_type = {} + for i in files_to_merge: + if is_root_file_sane(i) != "Is Ok": + non_sane_files_per_type[fn].setdefault(fn, []).append(i) + warning_msg("Result file", i, "is not sane") + continue + fn = os.path.basename(i) + files_per_type.setdefault(fn, []) + files_per_type[fn].append(i) + for i in non_sane_files_per_type: + warning_msg("Non sane files for type", i) + for j in non_sane_files_per_type[i]: + msg(j) + merged_files = [] + for i in files_per_type: + merged_file = os.path.join(out_path, i) + if avoid_overwriting_merge and os.path.isfile(merged_file): + warning_msg("file", merged_file, + "is already found, remove it before merging, you can use the --mergeonly flag to avoid running the analysis again") + continue + merged_files.append(merged_file) + merge_file_list = os.path.join(os.path.dirname(os.path.abspath(merged_file)), + "tomerge_" + "".join(i.split(".")[:-1])+".txt") + verbose_msg("List of files to be merged:", merge_file_list) + with open(merge_file_list, "w") as fmerge: + for j in files_per_type[i]: + fmerge.write(j+"\n") + if len(files_per_type[i]) > len(run_list): + fatal_msg("Trying to merge too many files of type", i, "for tag", tag, ":", + len(files_per_type[i]), "vs", len(run_list), "runs") + msg("Merging", len(files_per_type[i]), "files to", merged_file) + run_cmd(f"hadd -j {njobs} -f {merged_file} `cat {merge_file_list}`", + log_file=merge_file_list.replace(".txt", ".log"), time_it=True, comment=f"Merging to {merged_file}") + if len(merged_files) == 0: + warning_msg("Merged no files") + else: + m = [os.path.abspath(i) for i in merged_files] + msg("Merging completed, merged:", *m, + color=bcolors.BOKGREEN) + + +if __name__ == "__main__": + parser = get_default_parser(description="Runner for O2 analyses") + parser.add_argument("modes", + type=str, + nargs="+", + help="Running modes, as defined in the input configuration file") + parser.add_argument("--input", "-i", + type=str, + nargs="+", + default=["listfiles.txt"], + help="Input file, can be in form of a list of AODs or a list of text files with the list of AODs") + parser.add_argument("--out_path", "-o", + type=str, + default="AnalysisResults", + help="Output path") + parser.add_argument("--tag", "-t", + type=str, + default="", + help="Tag for output files") + parser.add_argument("--timeout", "-T", + type=int, + default=None, + help="Timeout to give to the analyses. If negative no timeout is used") + parser.add_argument("--batch-size", "-B", + type=int, + default=1, + help="Size of the batch of files to analyze for multiple threads") + parser.add_argument("--max-files", "-M", + type=int, + default=-1, + help="Maximum files to process") + parser.add_argument("--configuration", "--dpl", "-D", + type=str, + default=None, + help="Name of the dpl configuration file e.g. dpl-config_std.json") + parser.add_argument("--workflows", "-w", + type=str, + nargs="+", + default=[os.path.join(os.path.dirname(os.path.abspath(__file__)), + "o2_analysis_workflows.ini")], + help="Configuration file with all the known workflows") + parser.add_argument("--readers", "-r", + default=1, type=int, + help="Number of parallel readers") + parser.add_argument("--mem", "-m", + default=16000000000, type=int, + help="Size of the shared memory to allocate") + parser.add_argument("--extra_arguments", "-e", + default="", type=str, + help="Extra arguments to feed to the workflow") + parser.add_argument("--no_merge", "--no_merge_output", "--no_merge-output", "--nomerge", + action="store_true", help="Flag to merge the output files into one") + parser.add_argument("--avoid_overwriting_merge", "--no_overwrite", "-a", + action="store_true", help="Flag to check that the old merged files are not overwritten") + parser.add_argument("--merge_only", "--merge-only", "--mergeonly", + action="store_true", help="Flag avoid running the analysis and to merge the output files into one") + parser.add_argument("--show", "-s", + action="store_true", help="Flag to show the workflow of the current tag") + parser.add_argument("--no_clean", "--noclean", "-nc", + action="store_true", help="Flag to avoid cleaning the localhost files after running") + parser.add_argument("--do_bash_script", "-P", + action="store_true", help="Flag to create a bash script that runs all the tasks in cascade") + parser.add_argument("--resume_previous_analysis", "--continue_analysis", "--resume_analysis", "--continue", + action="store_true", + help="Flag to continue the analysis from the input files that have been already built and not overwriting the output results") + parser.add_argument("--dont_check_input_integrity", "--no_check_input_integrity", "--NC", "--nocheck", + action="store_true", + help="Flag to avoid checking the input file integrity so as to gain time") + args = parser.parse_args() + set_verbose_mode(args) + + # Set bash script mode + do_bash_script = args.do_bash_script + bash_parallel_jobs = args.njobs + + # Load analysis workflows + workflows = configparser.RawConfigParser() + msg("Analysis configuration from", args.workflows) + for i in args.workflows: + if not os.path.isfile(i): + fatal_msg(f"Did not fid configuration file '{i}'") + workflows.read(i) + for i in workflows.sections(): + full_workflow = workflows.get(i, "w").split("\n") + analyses[i] = full_workflow + if "|" in full_workflow: + fatal_msg("`|` present in workflow", i) + if len(analyses[i]) == 0: + fatal_msg("Empty workflow for analysis", i) + + for i in args.modes: + if i not in analyses.keys(): + fatal_msg("Analysis", i, "not in", + " ".join(workflows.sections()), "from configuration files:", args.workflows) + if args.show: + msg(i, "workflow:") + for j in enumerate(analyses[i]): + msg(" - ", *j) + main(mode=i, + input_file=args.input, + dpl_configuration_file=args.configuration, + batch_size=args.batch_size, + n_max_files=args.max_files, + njobs=args.njobs, + out_tag=args.tag, + merge_output=not args.no_merge, + out_path=args.out_path, + merge_only=args.merge_only, + readers=args.readers, + extra_arguments=args.extra_arguments, + avoid_overwriting_merge=args.avoid_overwriting_merge, + shm_mem_size=args.mem, + clean_localhost_after_running=not args.no_clean, + resume_previous_analysis=args.resume_previous_analysis, + check_input_file_integrity=not args.dont_check_input_integrity, + analysis_timeout=args.timeout) + + print_all_warnings() diff --git a/examples/scripts/dpl-config_std.json b/examples/scripts/diagnostic_tools/dpl-config_std.json similarity index 69% rename from examples/scripts/dpl-config_std.json rename to examples/scripts/diagnostic_tools/dpl-config_std.json index 18ad271..be77c5a 100644 --- a/examples/scripts/dpl-config_std.json +++ b/examples/scripts/diagnostic_tools/dpl-config_std.json @@ -1,40 +1,40 @@ { "internal-dpl-clock": "", "internal-dpl-aod-reader": { - "aod-file": "AODRun5Tot.root", + "aod-file": "@listfiles.txt", "start-value-enumeration": "0", "end-value-enumeration": "-1", "step-value-enumeration": "1" }, - "produce-sel-track": { + "hf-produce-sel-track": { "dovalplots": "true", "ptmintrack": "0.3", - "d_tpcnclsfound": "-9999", - "dcatoprimxymin": 0.03 + "d_tpcnclsfound": "-999999", + "dcatoprimxymin": 0.02 }, - "vertexerhf-hftrackindexskimscreator": { + "hf-track-index-skims-creator": { "dovalplots": "true", "triggerindex": "-1", "do3prong": "0", - "d_bz": "2", + "d_bz": "5.", "b_propdca": "true", "d_maxr": "200", "d_maxdzini": "4", "d_minparamchange": "0.001", "d_minrelchi2change": "0.90000000000000002", - "d_minmassDp": 1.5, - "d_maxmassDp": 2.1 + "d_minmassDp": 1.7, + "d_maxmassDp": 2.05 }, - "vertexerhf-hfcandcreator2prong": { + "hf-cand-creator-2prong": { "dovalplots": "true", - "d_bz": "2", + "d_bz": "5.", "b_propdca": "true", "d_maxr": "200", "d_maxdzini": "4", "d_minparamchange": "0.001", "d_minrelchi2change": "0.90000000000000002" }, - "hf-taskdzero": "", + "hf-task-d0": "", "internal-dpl-aod-writer": { } } diff --git a/examples/scripts/diagnostic_tools/grid_downloader.py b/examples/scripts/diagnostic_tools/grid_downloader.py new file mode 100755 index 0000000..b2b01d4 --- /dev/null +++ b/examples/scripts/diagnostic_tools/grid_downloader.py @@ -0,0 +1,476 @@ +#! /usr/bin/env python3 + +""" +Utility script to download files from grid +Author: Nicolò Jacazio, nicolo.jacazio@cern.ch +""" + + +import os +from os import path +from common import bcolors, msg, run_cmd, run_in_parallel, verbose_msg, get_default_parser, set_verbose_mode, warning_msg +import getpass +import datetime +import inspect +from ROOT import TFile + +alienprefix = "alien://" + + +class InputArgument: + default = "" + helper = "" + aliases = [] + thistype = str + nargs = "?" + + def __init__(self, default, helper="", aliases=[], thistype=str, nargs="?"): + self.default = default + self.helper = helper + self.aliases = aliases + if type(self.aliases) is not list: + self.aliases = [self.aliases] + self.thistype = thistype + self.nargs = nargs + + def print(self): + print(f"default = {self.default}", + f"helper = {self.helper}", + f"aliases = {self.aliases}", + f"thistype = {self.thistype}", + f"nargs = {self.nargs}") + + +def print_now(): + now = datetime.datetime.now() + msg("- Current date and time:", str(now), color=bcolors.OKBLUE) + + +def listfiles(Path=None, + What=InputArgument("AO2D.root", + "Name of the file to look for", "-w"), + MakeXML=False, + MustHave=InputArgument(None, + "String that must be in good files path", [ + "-m"], + nargs="+"), + MustHaveCount=InputArgument(1, + "How many times the MustHave string must be present", + ["-nm"], thistype=int), + MustNotHave=InputArgument(None, + "String that must not be in good files path", ["-M"], + nargs="+"), + MustNotHaveCount=InputArgument(1, + "How many times the MustHave string must be present", + ["-NM"], thistype=int), + SubDirs="", + User=None, + MainPath=""): + """ + Lists the content of the path given in input. + Puts the content to file if required. + Can also form the output in the xml format so as to run on grid, this is done if the output filename has the xml extension. + """ + verbose_msg("Listing files", What, "in path", Path) + if Path is None or Path == "": + raise ValueError("Passed empty path", Path) + if User is None: + User = getpass.getuser() + msg("Getting user:", User) + Path = path.normpath(Path) + msg("Using path:", Path) + PathToScan = path.join(MainPath, User[0], User, Path) + if What == None: + for i in run_cmd("alien_ls {}".format(PathToScan), check_status=False): + print(i) + return + + bashCommand = "alien_find " + # Printing name of output list + if MakeXML: + bashCommand += " -x collection " + bashCommand += "{} {} ".format(PathToScan, What) + bashCommand = bashCommand.strip() + verbose_msg("This is the list of found files:") + list_of_found_files = run_cmd( + bashCommand, print_output=False, check_status=False).split("\n") + FilterList = [] + for i in list_of_found_files: + if not MakeXML and What not in i: + continue + if MustHave is not None: + hasit = True + if type(MustHave) is not list: + raise ValueError("Musthave is not a list!", MustHave) + for e in MustHave: + if e not in i: + hasit = False + if i.count(e) < MustHaveCount: + hasit = False + if not hasit: + msg(f"Discarding line '{i}' as it doesn't have '{MustHave}' {MustHaveCount} times", + color=bcolors.OKBLUE) + continue + if MustNotHave: + if type(MustNotHave) is not list: + if MustNotHave in i: + if i.count(MustNotHave) >= MustNotHaveCount: + msg(f"Discarding line '{i}' as it has '{MustNotHave}' {MustNotHaveCount} times", + color=bcolors.OKBLUE) + continue + else: + doskip = False + for t in MustNotHave: + if t in i and i.count(t) >= MustNotHaveCount: + msg(f"Discarding line '{i}' as it has '{t}' {MustNotHaveCount} times", + color=bcolors.OKBLUE) + doskip = True + break + if doskip: + continue + if SubDirs: + istrip = i.replace(PathToScan, "").strip().strip("/") + verbose_msg(istrip) + istrip = istrip.split("/") + istrip = istrip[:-1] + verbose_msg("here:", istrip, len(istrip)) + if len(istrip) != int(SubDirs): + continue + FilterList.append(i) + msg(f"Found {len(FilterList)} files responding to all criteria") + return FilterList + + +def writefiles(FileList="", Outfile=InputArgument("listoffiles.txt", + "Output file", "-o"), + append="Append to output file or create a new one"): + """ + Writes the list of file to the output file given content of the path given in input. + Can also form the output in the xml format so as to run on grid, this is done if the output filename has the xml extension. + """ + # Printing name of output list + msg(f"Output will be into file '{Outfile}'") + # Check on existing list file of this name + if path.isfile(Outfile) and not append: + msg("List file already existing, replace it? (y/[n])") + if "y" not in input(): + return + fw = open(Outfile, "a" if append else "w") + written = 0 + for i in FileList: + fw.writelines(i.strip() + "\n") + written += 1 + msg(f"Written {written} files to {Outfile}") + fw.close() + + +def check_root_file(file_name): + if not file_name.endswith(".root"): + warning_msg("Testing a non root file:", file_name) + return True + if not path.isfile(file_name): + warning_msg("Testing a non existing file:", file_name) + return True + try: + f = TFile(file_name, "READ") + if f.TestBit(TFile.kRecovered): + msg("File", file_name, "was recovered", color=bcolors.WARNING) + return False + if not f.IsOpen(): + msg("File", file_name, "is not open", color=bcolors.WARNING) + return False + except OSError: + msg("Issue when checking file", file_name, color=bcolors.WARNING) + return False + verbose_msg(file_name, "is ok and has size", + os.path.getsize(file_name)*1e-6, "MB") + return True + + +def copyfile(toget="Full path of the file to get", + Version=None, + replace_preexisting=False, + n_retry_root_files=4): + """Copies a file from grid and puts it in the same path as the grid one. + The version lets you choose between old and new alien. Versions==None means that it will autoset it""" + toget = toget.strip() + if Version == None: + stream = os.popen("which aliensh 2>/dev/null") + stream = stream.read() + stream = stream.strip() + print(stream) + if "aliensh" in stream: + Version = 0 + else: + Version = 1 + + try: + if "" == toget: + raise ValueError("Empty input") + if "/" not in toget: + raise ValueError("Input has no path") + if "." not in toget or toget.rfind("/") > toget.rfind("."): + raise ValueError("Input has no extension") + if Version == 0: + if alienprefix not in toget: + toget = alienprefix + toget + elif Version == 1: + while toget[0] == ".": + toget = toget[1:] + while "//" in toget: + toget = toget.replace("//", "/") + if toget[0] != "/": + raise ValueError(toget, "does not start with /") + else: + raise ValueError("Version is unknown", Version) + tofile = path.basename(toget) + todir = path.normpath("./" + path.dirname(toget.replace(alienprefix, + ""))) + out_file = path.join(todir, tofile) + verbose_msg( + f" --copyfile: Output dir. is '{todir}', file is '{tofile}'") + + if not path.isdir(todir): + msg("Directory '{}' does not exist - creating it".format(todir)) + os.makedirs(todir) + if path.isfile(out_file) and check_root_file(out_file): + if replace_preexisting: + msg("File '{}' already copied, overwriting".format(out_file)) + else: + msg("File '{}' already copied".format(out_file)) + return + + def proceed(handle_exit=True): + msg(f"Downloading '{toget}'", color=bcolors.OKGREEN) + print_now() + if Version == 0: + cpycmd = "alien_cp -v {} file:{}".format(toget, todir) + else: + cpycmd = "alien_cp -v {} file://{}".format(toget, todir) + verbose_msg("Running command", cpycmd) + if handle_exit: + try: + run_cmd(cpycmd) + except KeyboardInterrupt: + return False + else: + run_cmd(cpycmd) + return True + + for i in range(n_retry_root_files): + if not proceed(): + return + if check_root_file(out_file): + break + + except ValueError as err: + msg(err.args, color=bcolors.BWARNING) + msg("Input: " + toget, color=bcolors.BWARNING) + + +def copied(fname="", extra_msg="", last_time=None, check_root_files=True, + ListOfBad=InputArgument("badfiles.txt", + "Name of the file where to write the bad files", "-o")): + """Checks if how many files of a text list were correctly copied from grid to the PC""" + verbose_msg("Checking how many files were copied from from list", fname) + fname = fname.strip() + f = open(fname, "r") + n_to_copy = 0 + n_copied = 0 + not_sane = [] + for line in f: + if "%" in line: + break + if "#" in line: + continue + line = path.normpath("./" + line.strip()) + n_to_copy += 1 + if path.isfile(line): + n_copied += 1 + if check_root_files: + if not check_root_file(line): + msg(f"'{line}' downloaded but with issues", + color=bcolors.WARNING) + not_sane.append(line) + else: + msg(f"'{line}' yet to download", color=bcolors.OKBLUE) + if last_time is not None: + n_copied -= last_time[1] + msg(extra_msg, "downloaded {}/{}, {:.1f}%".format(n_copied, + n_to_copy, 100 * float(n_copied) / float(n_to_copy)), + f" -- copied {n_copied} files more, in total copied {last_time[1] + n_copied} files" if last_time is not None else "", f"{len(not_sane)} are not OK" if len(not_sane) > 0 else "") + if ListOfBad is not None and len(not_sane) >= 1: + with open(ListOfBad, "w") as f: + for i in not_sane: + f.write(i + "\n") + + return n_to_copy, n_copied + + +def copylist(fname="", + jobs=InputArgument(1, "Number of parallel jobs to use", ["--njobs", "-j"], int)): + """Takes a text file and downloads the files from grid""" + if jobs is None: + jobs = 1 + verbose_msg("Copying files from list", fname, "with", jobs, "jobs") + fname = path.normpath(fname) + if not path.isfile(fname): + warning_msg("Input file not provided! Aborting") + return + sofar = copied(fname, "So far") + f = open(fname, "r") + Group = [] + for line in f: + if "%" in line: + msg("Character % encountered! Aborting") + break + if "#" in line: + msg("Character # encountered! Skipping") + continue + line = "./" + line + if jobs == 1: + copyfile(line) + else: + Group.append(line) + if jobs > 1: + msg("Copying list in parallel with", jobs, "jobs") + run_in_parallel(processes=jobs, job_runner=copyfile, + job_arguments=Group, job_message="Downloading files", + linearize_single_core=True) + copied(fname, extra_msg="In recent run", last_time=sofar) + + +def merge_aod(in_path="", out_path="./", input_file="AO2D.root", must_have="ctf", bunch_size=50, skip_already_existing=True): + in_path = os.path.normpath(in_path) + out_path = os.path.normpath(out_path) + file_list = [] + for root, dirs, files in os.walk(in_path): + for file in files: + if file == input_file: + to_merge = os.path.abspath(os.path.join(root, file)) + print(to_merge) + if must_have is not None and must_have in to_merge: + file_list.append(to_merge) + verbose_msg("Found", len(file_list), "files called", input_file) + # Divide it in bunches + file_list = [file_list[i:i+bunch_size] + for i in range(0, len(file_list), bunch_size)] + for i in enumerate(file_list): + bunch_size = 0 + with open("inputfile.txt", "w") as f: + for j in i[1]: + f.write(f"{j}\n") + bunch_size += os.path.getsize(j) + out_aod = os.path.join(out_path, f"AO2D_{i[0]}.root") + verbose_msg("Merging bunch of", len(i[1]), + "files. I.e.", bunch_size*1e-6, "MB") + if skip_already_existing and os.path.isfile(out_aod): + verbose_msg(out_aod, "already existing, skipping") + continue + tmp_aod = os.path.join(out_path, "MergedAOD.root") + run_cmd( + f"o2-aod-merger --input inputfile.txt --output {tmp_aod} --skip-non-existing-files", + comment=f"Merging AODs into {out_aod}") + os.rename(tmp_aod, out_aod) + merged_size = os.path.getsize(out_aod) + msg("Produced a merged file of", + merged_size*1e-6, "MB from", bunch_size*1e-6, "MB, compression:", merged_size/bunch_size) + + +def main(input_files, + args=None): + if type(input_files) is not list: + input_files = [input_files] + if len(input_files) <= 0: + warning_msg("Passed no input, use: --input_files") + return + if args.command == "listfiles": + for i in input_files: + list_of_files = [] + if os.path.isfile(i): + paths_to_list = [] + with open(i) as fsecondary: + for j in fsecondary: + j = j.strip().strip(" ").strip(",") + if j == "": + continue + for k in j.split(","): + paths_to_list.append(k) + for j in paths_to_list: + list_of_files += listfiles(Path=j, + What=args.what, + MustHave=args.musthave, + MustHaveCount=args.musthavecount, + MustNotHaveCount=args.mustnothavecount, + MustNotHave=args.mustnothave) + else: + list_of_files = listfiles(Path=i, + What=args.what, + MustHave=args.musthave, + MustHaveCount=args.musthavecount, + MustNotHaveCount=args.mustnothavecount, + MustNotHave=args.mustnothave) + append = args.append + do_write_files = args.outfile + if len(list_of_files) > 0 and do_write_files: + writefiles(list_of_files, do_write_files, + append=(i == list_of_files[0]) or append) + elif args.command == "copyfile": + for i in input_files: + copyfile(i) + elif args.command == "copylist": + for i in input_files: + copylist(i, jobs=args.jobs) + elif args.command == "copied": + for i in input_files: + print(copied(i)) + elif args.command == "merge_aod": + for i in input_files: + merge_aod(i, + input_file=args.what) + else: + warning_msg("Did not do anything") + + +if __name__ == "__main__": + parser = get_default_parser(description=__doc__, njobs=False) + parser.add_argument("input_files", type=str, # nargs="+", + help="List of files in .txt file or files to download") + # parser.add_argument("--input_files", "--input", "-i", type=str,# nargs="+", + # default=[], + # help="List of files in .txt file or files to download") + subparsers = parser.add_subparsers(dest='command', help='sub-commands') + + def add_subp(fn, g=None): + if g is None: + g = subparsers.add_parser(fn.__name__, help=fn.__doc__) + a = inspect.getfullargspec(fn) + for i, j in enumerate(a.args): + d = a.defaults[i] + # print(fn, i, j, d) + if type(d) is str: + if d == "": + continue + # print("Add argument without defaults") + g.add_argument(f"--{j.lower()}", help=a.defaults[i]) + elif type(d) is InputArgument: + # print("Add argument", j, "with defaults") + g.add_argument(f"--{j.lower()}", + *d.aliases, help=d.helper, + default=d.default, + type=d.thistype, + nargs=d.nargs) + return g + + gl = add_subp(listfiles) + add_subp(writefiles, gl) + add_subp(copyfile) + add_subp(copylist) + add_subp(copied) + add_subp(merge_aod) + + args = parser.parse_args() + + set_verbose_mode(args) + main(args.input_files, + args=args) diff --git a/examples/scripts/diagnostic_tools/inspect_hepmc.py b/examples/scripts/diagnostic_tools/inspect_hepmc.py new file mode 100755 index 0000000..ff9cb84 --- /dev/null +++ b/examples/scripts/diagnostic_tools/inspect_hepmc.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 + +""" +Inspector of the HepMC file +Author: Nicolò Jacazio, nicolo.jacazio@cern.ch +""" + +import pyhepmc_ng as hep +import argparse + + +poi = {} +poi_names = {2212: "Proton", -2212: "AntiProton", + 2112: "Neutron", -2112: "AntiNeutron", + 3122: "Lambda0", 1000010020: "Deuteron", + 1000010030: "Triton", 1000020040: "Alpha", + 1000020030: "Helium3"} + +for i in poi_names: + poi[i] = 0 + + +def main(file_name, min_event, max_event, verbose): + print("Reading", file_name, "between", + min_event, "and", max_event, "events") + + def print_evt(evt): + def msg(*m): + if verbose: + print(*m) + msg("event_number:", evt.event_number) + msg("Units:", "momentum_unit:", evt.momentum_unit, + "length_unit:", evt.length_unit) + msg(len(evt.particles), "particles:") + for i in enumerate(evt.particles): + pdg = i[1].pid + if pdg in poi: + poi[pdg] = poi[pdg]+1 + pdg = f"{pdg} is of interest!!!" + msg(i, "PDG code", pdg) + msg(len(evt.vertices), "vertices:") + for i in enumerate(evt.vertices): + msg("Vertex:", i) + vertex_pdgs = [] + msg("Input particles") + for j in i[1].particles_in: + msg("\t", j, "pdg", j.pid) + msg("Output particles") + for j in i[1].particles_out: + msg("\t", j, "pdg", j.pid) + vertex_pdgs.append(j.pid) + if 2212 in vertex_pdgs and 2112 in vertex_pdgs: + print(evt.event_number, "Has both") + print(i) + for j in i[1].particles_out: + print(j) + + with hep.open(file_name) as f: + while True: + e = f.read() + if not e: + break + if e.event_number < min_event: + continue + print_evt(e) + if e.event_number >= max_event: + break + for i in poi: + print("Number of", poi_names[i]+"s", poi[i]) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("hepmcfile", type=str, + help="Input hepmc file.") + parser.add_argument("--start", type=int, default=0, + help="Start of the event counter.") + parser.add_argument("--stop", type=int, default=100, + help="Stop of the event counter.") + parser.add_argument("-v", action="store_true", + help="Verbose mode.") + args = parser.parse_args() + main(args.hepmcfile, min_event=args.start, + max_event=args.stop, verbose=args.v) diff --git a/examples/scripts/diagnostic_tools/is_aod_sane.py b/examples/scripts/diagnostic_tools/is_aod_sane.py new file mode 100755 index 0000000..aa85647 --- /dev/null +++ b/examples/scripts/diagnostic_tools/is_aod_sane.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 + +""" +Script to check if an AOD is sane and can be used for analysis or not +Author: Nicolò Jacazio, nicolo.jacazio@cern.ch +""" + +from common import get_default_parser, msg, print_all_warnings, run_in_parallel, verbose_msg, warning_msg, set_verbose_mode +from ROOT import TFile +import multiprocessing +import os + +bad_files = multiprocessing.Manager().dict() + + +def main(input_file_name="/tmp/AO2D.root", verbose=False): + global bad_files + verbose_msg("Checking file", input_file_name) + input_file = TFile(input_file_name, "READ") + if verbose: + input_file.ls() + list_of_keys = input_file.GetListOfKeys() + + def inspect(name, tree_name): + tree_name = f"{name}/{tree_name}" + t = input_file.Get(tree_name) + if not t: + warning_msg("Did not get tree", tree_name) + return -1 + if verbose: + input_file.Get(name).ls() + verbose_msg(tree_name, t.GetEntries()) + return t.GetEntries() + + for df_index, i in enumerate(list_of_keys): + if i.GetName() == "metaData": + continue + + def add_bad(): + # print(i.GetName()) + bad_files.setdefault(input_file_name, []).append(i.GetName()) + + dictionary_of_counts = {"O2bc": None, + "O2collision": None, + "O2track": None, + "O2trackcov": None, + "O2trackextra": None} + for j in dictionary_of_counts: + dictionary_of_counts[j] = inspect(i.GetName(), j) + if dictionary_of_counts[j] < 0: + add_bad() + + def must_be_same(*args): + counts = [] + names = [] + for k in args: + counts.append(dictionary_of_counts[k]) + names.append(k) + if len(set(counts)) != 1: + add_bad() + warning_msg("Did not get equal counts for", ", ".join(names), + counts, "in DF", df_index, "/", len(list_of_keys), ":", i.GetName()) + must_be_same("O2track", "O2trackcov", "O2trackextra") + + +if __name__ == "__main__": + parser = get_default_parser(description=__doc__) + parser.add_argument("input_files", + type=str, + nargs="+", + help="Input files to check") + parser.add_argument("--output", "-o", + type=str, + default=None, + help="Output file with good files only") + args = parser.parse_args() + set_verbose_mode(args) + + input_files = [] + for i in args.input_files: + i = os.path.normpath(i) + if i.endswith(".root"): + input_files.append(i) + elif i.endswith(".txt"): + with open(i, "r") as f: + for j in f: + j = j.strip() + input_files.append(os.path.join(os.path.abspath(os.path.dirname(i)), + os.path.normpath(j))) + + run_in_parallel(args.njobs, main, input_files, + "Checking file", linearize_single_core=True) + if len(bad_files) > 0: + warning_msg("There were", len(bad_files), "bad files") + for i in bad_files: + msg(i) + + if args.output is not None: + msg("Writing good files to", args.output) + with open(args.output, "w") as f: + for i in input_files: + if not i in bad_files: + f.write(i+"\n") diff --git a/examples/scripts/diagnostic_tools/merge_aods.py b/examples/scripts/diagnostic_tools/merge_aods.py new file mode 100755 index 0000000..261fc54 --- /dev/null +++ b/examples/scripts/diagnostic_tools/merge_aods.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python3 + + +""" +Script to scan paths and merge AODs +Author: Nicolò Jacazio, nicolo.jacazio@cern.ch +""" + +from common import fatal_msg, get_default_parser, run_cmd, run_in_parallel, set_verbose_mode, verbose_msg, warning_msg, msg +import os +import multiprocessing + +bunched_aod_names = multiprocessing.Manager().dict() + + +def run_merge(input_list_name): + out_aod = bunched_aod_names[input_list_name]["out_aod"] + file_index = bunched_aod_names[input_list_name]["file_index"] + total_files = bunched_aod_names[input_list_name]["total_files"] + input_size = bunched_aod_names[input_list_name]["input_size"] + run_cmd(f"o2-aod-merger --input {input_list_name} --output {out_aod}", + time_it=True, print_output=False) + msg(f"Merged #{file_index}/{total_files-1} ({input_size} MB) to", out_aod, + os.path.getsize(out_aod)*1E-6, "MB") + + +def main(input_files, do_merge=True, + sanity_file=None, max_bunch_size=200, + out_path="./", over_write_lists=False, jobs=1): + msg("Merging to", out_path, "with maximum input size", max_bunch_size) + out_path = os.path.normpath(out_path) + if not os.path.exists(out_path): + warning_msg("Output path", out_path, "does not exist") + ans = input("Create it? (Y/[N])") + if ans == "Y": + os.makedirs(out_path) + else: + msg("Exit") + return + sane_files = None + if sanity_file is not None: + msg("Using sanity file", sanity_file) + sane_files = [] + with open(sanity_file, "r") as f: + for i in f: + sane_files.append(os.path.abspath(os.path.normpath(i.strip()))) + size_of_files = {} + for i in input_files: + i = os.path.normpath(i.strip()) + if sane_files is not None and os.path.abspath(i) not in sane_files: + msg("Skipping", i, "because not in sanity file") + continue + size_of_files[i] = os.path.getsize(i)*1e-6 + bunched_files = [[]] + bunched_sizes = [] + bunch_size = [] + for i in size_of_files: + verbose_msg("Checking file", i, "of size", size_of_files[i], "MB") + if sum(bunch_size) > max_bunch_size: + verbose_msg("Bunch size", sum(bunch_size), "reached limit with", + len(bunch_size), "files", + max_bunch_size, "MB", + "preparing next bunch!") + bunched_files.append([]) + bunched_sizes.append(sum(bunch_size)) + bunch_size = [] + bunch_size.append(size_of_files[i]) + bunched_files[-1].append(i) + bunched_sizes.append(sum(bunch_size)) + verbose_msg("Got", len(bunched_files), "bunches") + for i, j in enumerate(bunched_files): + verbose_msg(f"{i})", bunched_sizes[i], "MB, with", len(j), j) + + msg("Preparing", len(bunched_files), "bunched lists") + bunched_aod_names.clear() + for i, j in enumerate(bunched_files): + fn = f"aod_merge_list_bunch{i}.txt" + verbose_msg("Writing bunch", i, "to", fn) + if not over_write_lists: + if os.path.isfile(fn): + fatal_msg(fn, "already present, remove it first") + with open(fn, "w") as f: + for k in j: + f.write(k+"\n") + if do_merge: + out_aod = os.path.join(out_path, f"AO2D_Merge_{i}.root") + if os.path.isfile(out_aod): + fatal_msg(out_aod, "already present") + bunched_aod_names[fn] = {"out_aod": out_aod, "file_index": i, + "total_files": len(bunched_files), "input_size": bunched_sizes[i]} + + run_in_parallel(jobs, run_merge, list(bunched_aod_names.keys()), job_message="Running AOD merging", + linearize_single_core=True) + + +if __name__ == "__main__": + parser = get_default_parser(__doc__) + parser.add_argument("input_files", + type=str, + nargs="+", + help="Input files to merge") + parser.add_argument("--max_bunch_size", "--max", "-m", + default=1000, + type=float, + help="Approximate maximum size of the bunch to merge in MB") + parser.add_argument("--output_path", "-o", + default="./", + type=str, + help="Output path for merged AODs") + parser.add_argument("--sanity_file", "-s", + default=None, + type=str, + help="Sanity file with the files to filter") + parser.add_argument("--overwrite", + action="store_true", help="Flag to overwrite the lists of files that are to be merged") + + args = parser.parse_args() + set_verbose_mode(args) + + main(args.input_files, max_bunch_size=args.max_bunch_size, + out_path=args.output_path, sanity_file=args.sanity_file, + over_write_lists=args.overwrite, jobs=args.njobs) diff --git a/examples/scripts/diagnostic_tools/o2_analysis_workflows.ini b/examples/scripts/diagnostic_tools/o2_analysis_workflows.ini new file mode 100644 index 0000000..f4687d7 --- /dev/null +++ b/examples/scripts/diagnostic_tools/o2_analysis_workflows.ini @@ -0,0 +1,79 @@ +[DEFAULT] +w = + +[TrackQA] +w = o2-analysis-qa-event-track + o2-analysis-qa-efficiency --make-eff 1 --eff-pi 1 --eff-el 1 --eff-ka 1 --eff-pr 1 --eta-min -0.8 --eta-max 0.8 + o2-analysis-trackextension + o2-analysis-alice3-trackselection + +[TOFQA] +w = o2-analysis-alice3-pid-tof --add-qa 1 + o2-analysis-pid-tof-beta --add-qa 1 + o2-analysis-alice3-trackselection + o2-analysis-alice3-trackextension + +[TOFQAMC] +w = o2-analysis-alice3-pid-tof --add-qa 1 + o2-analysis-pid-tof-beta --add-qa 1 + o2-analysis-alice3-trackselection + o2-analysis-alice3-trackextension + o2-analysis-pid-tof-qa-mc --qa-nuclei 1 + +[TOF] +w = o2-analysis-spectra-tof + o2-analysis-alice3-pid-tof --add-qa 1 + o2-analysis-pid-tof-beta --add-qa 1 + o2-analysis-alice3-trackselection + o2-analysis-alice3-trackextension + +[RICH] +w = o2-analysis-alice3-pid-rich-qa --minEta -4 --maxEta 4 + o2-analysis-alice3-pid-tof + +[RICHSmallEta] +w = o2-analysis-alice3-pid-rich-qa --minEta -0.25 --maxEta 0.25 + o2-analysis-alice3-pid-tof + +[RICHLargeEta] +w = o2-analysis-alice3-pid-rich-qa --minEta 0.75 --maxEta 1.25 + o2-analysis-alice3-pid-tof + +[Efficiency] +w = o2-analysis-mc-spectra-efficiency + o2-analysis-alice3-trackextension + o2-analysis-alice3-trackselection + +[TPC] +w = o2-analysis-pid-tpc --add-qa 1 + +[TreeD0] +w = o2-analysis-hf-tree-creator-d0-tokpi --aod-writer-keep AOD/HFCANDP2Full/0,AOD/HFCANDP2FullE/0,AOD/HFCANDP2FullP/0 + o2-analysis-pid-tpc + o2-analysis-pid-tof + o2-analysis-hf-candidate-creator-2prong --doMC + o2-analysis-hf-track-index-skims-creator + o2-analysis-hf-d0-candidate-selector + +[TreeLC] +w = o2-analysis-hf-tree-creator-lc-topkpi --aod-writer-keep AOD/HFCANDP3Full/0,AOD/HFCANDP3FullE/0,AOD/HFCANDP3FullP/0 + o2-analysis-pid-tpc + o2-analysis-pid-tof + o2-analysis-hf-candidate-creator-2prong --doMC + o2-analysis-hf-track-index-skims-creator + o2-analysis-hf-d0-candidate-selector + +[He3] +w = o2-analysis-qa-event-track --pdgCodeSel 1000020030 + o2-analysis-trackextension + o2-analysis-alice3-trackselection + +[LUT] +w = o2-analysis-alice3-lutmaker + +[DATA] +w = o2-analysis-trackextension --configuration json:///tmp/PilotBeamAODs/use.json + o2-analysis-trackselection --isRun3 + o2-analysis-pid-tof-full-run3 --add-qa 1 + o2-analysis-pid-tpc-full --add-qa 1 --logAxis 1 --minP 0.01 + o2-analysis-qa-efficiency --log-pt 1 --make-eff 1 --eta-min -1 --eta-max 1 --pt-min 1 --configuration json:///tmp/PilotBeamAODs/use.json diff --git a/examples/scripts/diagnostic_tools/omogenize_output.py b/examples/scripts/diagnostic_tools/omogenize_output.py new file mode 100755 index 0000000..1542f10 --- /dev/null +++ b/examples/scripts/diagnostic_tools/omogenize_output.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 + +""" +Script to split the directories of a file into several ones with the same structure, useful for ML processing +Author: Nicolò Jacazio, nicolo.jacazio@cern.ch +""" + +from multiprocessing import Pool +from ROOT import TFile +import os +import time +import argparse + + +g_verbose = False +g_out_path = None +g_base_dir = None +g_tag_dir = True + + +def split_file(input_file): + processing_time = time.time() + print(" > Processing file", input_file) + if g_verbose: + print("Omogenizing file", f"'{input_file}'", "for ML processing") + f = TFile(input_file, "READ") + if g_verbose: + f.ls() + lk = f.GetListOfKeys() + files_created = 0 + for i in lk: + obj = f.Get(i.GetName()) + if obj.ClassName() == "TDirectoryFile": + in_path = os.path.dirname(input_file) + fout_name = input_file.replace( + ".root", f"_sub{files_created}.root") + if g_tag_dir: + tag = in_path.split("/")[-1] + fout_name = input_file.replace( + ".root", f"_{tag}_sub{files_created}.root") + if g_out_path is not None: + fout_name = os.path.join( + g_out_path, os.path.basename(fout_name)) + if os.path.isfile(fout_name): + raise RuntimeError("File", fout_name, "already there!") + fout = TFile(fout_name, "RECREATE") + if not fout.IsOpen(): + raise RuntimeError("File", fout_name, "is not open!") + if g_verbose: + print("Creating omogenized file to", fout) + files_created += 1 + fout.mkdir(g_base_dir+"0") + fout.cd(g_base_dir+"0") + for j in obj.GetListOfKeys(): + if g_verbose: + print("Writing", j.ClassName(), j) + t = obj.Get(j.GetName()) + if t.ClassName() == "TTree": + t.CloneTree().Write() + else: + t.Clone().Write() + if g_verbose: + fout.ls() + fout.Close() + print(" < Processed file", input_file, + "split into", files_created, "files, in", time.time() - processing_time, "seconds") + + +def main(input_files, verbose=True, base_dir="DF_", out_path=None, jobs=10): + global g_verbose + g_verbose = verbose + global g_out_path + g_out_path = out_path + global g_base_dir + g_base_dir = base_dir + + print("Omogenizing", len(input_files), "files") + processing_time = time.time() + with Pool(jobs) as p: + p.map(split_file, input_files) + print("Done, in", time.time() - processing_time, "seconds") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Omogenizer for ML processing") + parser.add_argument("input_files", + type=str, + nargs="+", + help="Input files") + parser.add_argument("--base_dir", + type=str, + default="TF_", + help="Name of the base directory, usually `TF_` or `DF_`") + parser.add_argument("--out_dir", "-o", + type=str, + default=None, + help="Name of the output path, by default it is the same path of the input file") + parser.add_argument("--sub_dir_tag", "-S", + action="store_true", + help="Option to tag files with their directory, useful when processing files with the same name in a path and using an output path") + parser.add_argument("--jobs", "-j", + type=int, + default=10, + help="Number of parallel jobs") + parser.add_argument("-v", + action="store_true", help="Verbose mode") + args = parser.parse_args() + main(args.input_files, verbose=args.v, + out_path=args.out_dir, + base_dir=args.base_dir, jobs=args.jobs) diff --git a/examples/scripts/emcal.sh b/examples/scripts/emcal.sh new file mode 100755 index 0000000..0e38cea --- /dev/null +++ b/examples/scripts/emcal.sh @@ -0,0 +1,65 @@ +#! /usr/bin/env bash + +NJOBS=5 # number of max parallel runs +NRUNS=10 # number of runs +NEVENTS=10000 # number of events in a run + +BFIELD=5. # magnetic field [kG] +EMCRAD=100. # EMCAL radius [cm] +EMCLEN=200. # EMCAL half length [cm] +EMCETA=1.443 # EMCAL max pseudorapidity + +### calculate max eta from geometry +EMCETA=`awk -v a=$EMCRAD -v b=$EMCLEN 'BEGIN {th=atan2(a,b)*0.5; sth=sin(th); cth=cos(th); print -log(sth/cth)}'` +echo "maxEta = $EMCETA" + +### copy relevant files in the working directory +cp $DELPHESO2_ROOT/examples/cards/propagate.2kG.photons.tcl propagate.tcl +cp $DELPHESO2_ROOT/examples/pythia8/pythia8_inel.cfg . +cp $DELPHESO2_ROOT/examples/smearing/emcal.C . + +### set magnetic field +sed -i -e "s/set barrel_Bz .*$/set barrel_Bz ${BFIELD}e\-1/" propagate.tcl +sed -i -e "s/double Bz = .*$/double Bz = ${BFIELD}e\-1\;/" emcal.C +### set EMCAL radius +sed -i -e "s/set barrel_Radius .*$/set barrel_Radius ${EMCRAD}e\-2/" propagate.tcl +sed -i -e "s/double emcal_radius = .*$/double emcal_radius = ${EMCRAD}\;/" emcal.C +### set EMCAL length +sed -i -e "s/set barrel_HalfLength .*$/set barrel_HalfLength ${EMCLEN}e\-2/" propagate.tcl +sed -i -e "s/double emcal_length = .*$/double emcal_length = ${EMCLEN}\;/" emcal.C +### set EMCAL acceptance +sed -i -e "s/set barrel_Acceptance .*$/set barrel_Acceptance \{ 0.0 + 1.0 * fabs(eta) < ${EMCETA} \}/" propagate.tcl + +### make sure we are clean to run +rm -rf .running* delphes*.root *.log + +### loop over runs +for I in $(seq 1 $NRUNS); do + + ### wait for a free slot + while [ $(ls .running.* 2> /dev/null | wc -l) -ge $NJOBS ]; do + echo " --- waiting for a free slot" + sleep 1 + done + + ### book the slot + echo " --- starting run $I" + touch .running.$I + + ### copy pythia8 configuration and adjust it + cp pythia8_inel.cfg pythia8.$I.cfg + echo "Main:numberOfEvents $NEVENTS" >> pythia8.$I.cfg + echo "Random:seed = $I" >> pythia8.$I.cfg + + ### run Delphes and analysis + DelphesPythia8 propagate.tcl pythia8.$I.cfg delphes.$I.root &> delphes.$I.log && + root -b -q -l "emcal.C(\"delphes.$I.root\", \"emcal.$I.root\")" &> emcal.$I.log && + rm -rf delphes.$I.root && + rm -rf .running.$I && + echo " --- complete run $I" & + +done + +### merge runs when all done +wait +hadd -f emcal.root emcal.*.root && rm -rf emcal.*.root diff --git a/examples/scripts/ftof.sh b/examples/scripts/ftof.sh new file mode 100755 index 0000000..4da5e9d --- /dev/null +++ b/examples/scripts/ftof.sh @@ -0,0 +1,77 @@ +#! /usr/bin/env bash + +NJOBS=5 # number of max parallel runs +NRUNS=10 # number of runs +NEVENTS=10000 # number of events in a run + +BFIELD=5. # magnetic field [kG] +SIGMAT=0.020 # time resolution [ns] +TAILLX=1.0 # tail on left [q] +TAILRX=1.3 # tail on right [q] +TOFRAD=100. # TOF radius [cm] +TOFRADIN=10. # TOF inner radius [cm] +TOFLEN=200. # TOF half length [cm] +TOFETA=1.443 # TOF max pseudorapidity + +### calculate max eta from geometry +TOFETA=`awk -v a=$TOFRADIN -v b=$TOFLEN 'BEGIN {th=atan2(a,b)*0.5; sth=sin(th); cth=cos(th); print -log(sth/cth)}'` +echo "maxEta = $TOFETA" + +### copy relevant files in the working directory +cp $DELPHESO2_ROOT/examples/cards/propagate.2kG.tails.tcl propagate.tcl +cp $DELPHESO2_ROOT/examples/pythia8/pythia8_inel.cfg . +cp $DELPHESO2_ROOT/examples/smearing/ftof.C . + +### set magnetic field +sed -i -e "s/set barrel_Bz .*$/set barrel_Bz ${BFIELD}e\-1/" propagate.tcl +### set TOF radius +sed -i -e "s/set barrel_Radius .*$/set barrel_Radius ${TOFRAD}e\-2/" propagate.tcl +sed -i -e "s/double tof_radius = .*$/double tof_radius = ${TOFRAD}\;/" ftof.C +### set TOF length +sed -i -e "s/set barrel_HalfLength .*$/set barrel_HalfLength ${TOFLEN}e\-2/" propagate.tcl +sed -i -e "s/double tof_length = .*$/double tof_length = ${TOFLEN}\;/" ftof.C +### set TOF acceptance +sed -i -e "s/set barrel_Acceptance .*$/set barrel_Acceptance \{ 0.0 + 1.0 * fabs(eta) < ${TOFETA} \}/" propagate.tcl +### set TOF time resolution and tails +sed -i -e "s/set barrel_TimeResolution .*$/set barrel_TimeResolution ${SIGMAT}e\-9/" propagate.tcl +sed -i -e "s/set barrel_TailRight .*$/set barrel_TailRight ${TAILRX}/" propagate.tcl +sed -i -e "s/set barrel_TailLeft .*$/set barrel_TailLeft ${TAILLX}/" propagate.tcl +sed -i -e "s/double tof_sigmat = .*$/double tof_sigmat = ${SIGMAT}\;/" ftof.C + +### create LUTs +BFIELDT=`awk -v a=$BFIELD 'BEGIN {print a*0.1}'` +$DELPHESO2_ROOT/examples/scripts/create_luts.sh werner $BFIELDT $TOFRAD + +### loop over runs +rm -f .running.* delphes.*.root +for I in $(seq 1 $NRUNS); do + + ### wait for a free slot + while [ $(ls .running.* 2> /dev/null | wc -l) -ge $NJOBS ]; do + echo " --- waiting for a free slot" + sleep 1 + done + + ### book the slot + echo " --- starting run $I" + touch .running.$I + + ### copy pythia8 configuration and adjust it + cp pythia8_inel.cfg pythia8.$I.cfg + echo "Main:numberOfEvents $NEVENTS" >> pythia8.$I.cfg + echo "Random:seed = $I" >> pythia8.$I.cfg + echo "Beams:allowVertexSpread on " >> pythia8.$I.cfg + echo "Beams:sigmaTime 60." >> pythia8.$I.cfg + + ### run Delphes and analysis + DelphesPythia8 propagate.tcl pythia8.$I.cfg delphes.$I.root &> delphes.$I.log && + root -b -q -l "ftof.C(\"delphes.$I.root\", \"ftof.$I.root\")" &> ftof.$I.log && + rm -rf delphes.$I.root && + rm -rf .running.$I && + echo " --- complete run $I" & + +done + +### merge runs when all done +wait +hadd -f ftof.root ftof.*.root && rm -rf ftof.*.root diff --git a/examples/scripts/grid/.gitignore b/examples/scripts/grid/.gitignore new file mode 100644 index 0000000..10ddfb3 --- /dev/null +++ b/examples/scripts/grid/.gitignore @@ -0,0 +1,3 @@ +*.jdl +*.sh +*.ini diff --git a/examples/scripts/grid/make_grid_files.py b/examples/scripts/grid/make_grid_files.py new file mode 100755 index 0000000..41a6b68 --- /dev/null +++ b/examples/scripts/grid/make_grid_files.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python3 + +""" +Handler script to prepare the working environment for the grid submission of fast simulation + AOD creation with private jobs +""" + +import argparse +import os +import configparser + + +def run_cmd(cmd): + os.popen(cmd).read() + + +def main(jdl_file, + grid_path, + source_path, + number_of_events, + user_mail, + user, + njobs, + configuration_file, + config_tag, + delphes_version, + o2dpg_version, + make_alien_directory): + """ + Function to create the working environment for GRID job submission + """ + if make_alien_directory: + print("Making directory on alien:", grid_path) + run_cmd(f"alien_mkdir alien://{grid_path}") + run_cmd(f"alien_cp {configuration_file} alien://{grid_path}" + + configuration_file.split("/")[-1]) + parser = configparser.RawConfigParser() + parser.read(configuration_file) + configuration_file = configuration_file.split("/")[-1] + + print("Writing JDL file to", jdl_file) + with open(jdl_file, "w") as f: + def write_line(line): + f.write(line + "\n") + write_line("# Simulation + AOD creation JDL") + write_line("#\n") + write_line(f"Executable = \"{source_path}/starter.sh\";") + write_line(f"Validationcommand = \"{source_path}/validation.sh\";") + write_line(f"Arguments = \"python3 createO2tables.py {configuration_file} -j 1 -r 1 " + + f"--ev {number_of_events} -e {config_tag} -l -v --no-vertexing\";\n") + write_line("Requirements = ( other.Type == \"machine\" );\n") + write_line("Packages = {") + write_line(f"\t\"VO_ALICE@{delphes_version}\",") + write_line(f"\t\"VO_ALICE@{o2dpg_version}\"") + write_line("};\n") + write_line("JDLVariables = {") + write_line("\t\"Packages\",") + write_line("\t\"OutputDir\"") + write_line("};\n") + write_line("Type = \"Job\";") + write_line(f"User = \"{user}\";") + write_line("Jobtag = {") + write_line("\t\"comment: DelphesO2 Simulation + AOD jdl\"") + write_line("};") + if user_mail is None: + user_mail = f"{user}@cern.ch" + write_line(f"EMail = \"{user_mail}\";") + write_line("TTL = \"86400\";") + write_line("Price = 1;") + write_line("Workdirectorysize = {") + write_line("\t\"12000MB\"") + write_line("};\n") + write_line(f"Split = \"production:1-{njobs}\";") + write_line("SplitArguments = \"\";\n") + write_line("InputFile = {") + + lut_tag = parser.get("DEFAULT", "lut_tag") + bfield = parser.get("DEFAULT", "bfield").replace(".", "") + for i in ["el", "mu", "pi", "ka", "pr"]: + write_line( + f"\t\"LF:{source_path}/lutCovm.{i}.{int(bfield)}kG.{lut_tag}.dat\"") + files = ["createO2tables.py", + "o2sim_grp.root", + "o2sim_geometry.root"] + for i in files: + write_line(f"\t\"LF:{source_path}/{i}\",") + write_line(f"\t\"LF:{grid_path}/{configuration_file}\"") + write_line("};\n") + write_line("OutputArchive = {") + write_line("\t\"log_archive.zip:stdout,stderr,*.log,*.sh@\",") + write_line("\t\"root_archive.zip:AODRun5*.root@\"") + write_line("};\n") + write_line(f"OutputDir = \"{grid_path}/output/#alien_counter_03i#\";") + + if make_alien_directory: + run_cmd(f"alien_cp {jdl_file} alien://{grid_path}" + + jdl_file.split("/")[-1]) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("--jdl_file", "-o", type=str, + default="run5aod.jdl", + help="Name of the jdl to create") + parser.add_argument("grid_path", type=str, + help="Path on the grid where to store the configurations and output files") + parser.add_argument("source_path", type=str, + help="Path on the grid where to fetch the input files from e.g. LUTs, geometry files for vertexing, ") + parser.add_argument("--user_mail", "--mail", + type=str, default=None, + help="Mail address of the user for notification purposes") + parser.add_argument("--user", "-u", type=str, + default=os.getlogin(), + help="Name of the user") + parser.add_argument("--configuration_file", + type=str, default="../default_configfile.ini", + help="Input configuration file e.g. you can use the provided default_configfile.ini or variations of it.") + parser.add_argument("--entry", "-e", + type=str, default="INEL", + help="Entry in the configuration file, e.g. the INEL or CCBAR entries in the configuration file.") + parser.add_argument("--njobs", "-j", + type=int, default=2, + help="Number of concurrent jobs, by default 2.") + parser.add_argument("--nevents", "--ev", + type=int, default=1000, + help="Number of simulated events, by default 1000.") + parser.add_argument("--delphes_version", "--delphes", + type=str, default="DelphesO2::v20210409-1", + help="Version of DelphesO2 to use") + parser.add_argument("--o2dpg_version", "--o2dpg", + type=str, default="O2DPG::prod-202104-27-1", + help="Version of O2DPG to use") + parser.add_argument("--no-vertexing", + action="store_true", + help="Option turning off the vertexing.") + parser.add_argument("--make-alien-dir", "--make-dir", + action="store_true", + help="Option make the directory and upload files to it.") + args = parser.parse_args() + main(jdl_file=args.jdl_file, + configuration_file=args.configuration_file, + number_of_events=args.nevents, + grid_path=args.grid_path, + source_path=args.source_path, + user_mail=args.user_mail, + user=args.user, + config_tag=args.entry, + njobs=args.njobs, + delphes_version=args.delphes_version, + o2dpg_version=args.o2dpg_version, + make_alien_directory=args.make_alien_dir) diff --git a/examples/scripts/muon.sh b/examples/scripts/muon.sh new file mode 100644 index 0000000..24c64b9 --- /dev/null +++ b/examples/scripts/muon.sh @@ -0,0 +1,89 @@ +#! /usr/bin/env bash + +NJOBS=1 # number of max parallel runs +NRUNS=1 # number of runs +NEVENTS=100 # number of events in a run + +LUTTAG="werner" # LUT tag name +PY8CFG="pythia8_ccbar" # pythia8 configuration +BFIELD=5. # magnetic field [kG] +SIGMAT=0.020 # time resolution [ns] +SIGMA0=0.200 # vertex time spread [ns] +TAILLX=1.0 # tail on left [q] +TAILRX=1.0 # tail on right [q] +TOFRAD=100. # TOF radius [cm] +TOFLEN=200. # TOF half length [cm] +TOFETA=1.443 # TOF max pseudorapidity + +### calculate max eta from geometry +TOFETA=`awk -v a=$TOFRAD -v b=$TOFLEN 'BEGIN {th=atan2(a,b)*0.5; sth=sin(th); cth=cos(th); print -log(sth/cth)}'` +echo "maxEta = $TOFETA" + +### copy relevant files in the working directory +cp $DELPHESO2_ROOT/examples/cards/propagate.2kG.tails.tcl propagate.tcl +cp $DELPHESO2_ROOT/examples/pythia8/$PY8CFG.cfg pythia8.cfg +#cp $DELPHESO2_ROOT/examples/smearing/muon.C . +cp ../smearing/muon.C . + +### adjust pythia8 configuration +echo "" >> pythia8.cfg +echo "### run time configuration" >> pythia8.cfg +echo "Main:numberOfEvents $NEVENTS" >> pythia8.cfg +echo "Beams:allowVertexSpread on " >> pythia8.cfg +echo "Beams:sigmaTime 60." >> pythia8.cfg + +### set magnetic field +sed -i -e "s/set barrel_Bz .*$/set barrel_Bz ${BFIELD}e\-1/" propagate.tcl +### set TOF radius +sed -i -e "s/set barrel_Radius .*$/set barrel_Radius ${TOFRAD}e\-2/" propagate.tcl +sed -i -e "s/double tof_radius = .*$/double tof_radius = ${TOFRAD}\;/" muon.C +### set TOF length +sed -i -e "s/set barrel_HalfLength .*$/set barrel_HalfLength ${TOFLEN}e\-2/" propagate.tcl +sed -i -e "s/double tof_length = .*$/double tof_length = ${TOFLEN}\;/" muon.C +### set TOF acceptance +sed -i -e "s/set barrel_Acceptance .*$/set barrel_Acceptance \{ 0.0 + 1.0 * fabs(eta) < ${TOFETA} \}/" propagate.tcl +### set TOF time resolution and tails +sed -i -e "s/set barrel_TimeResolution .*$/set barrel_TimeResolution ${SIGMAT}e\-9/" propagate.tcl +sed -i -e "s/set barrel_TailRight .*$/set barrel_TailRight ${TAILRX}/" propagate.tcl +sed -i -e "s/set barrel_TailLeft .*$/set barrel_TailLeft ${TAILLX}/" propagate.tcl +sed -i -e "s/double tof_sigmat = .*$/double tof_sigmat = ${SIGMAT}\;/" muon.C +sed -i -e "s/double tof_sigma0 = .*$/double tof_sigma0 = ${SIGMA0}\;/" muon.C +### set TOF mismatch information +sed -i -e "s/double tof_mismatch.*$/double tof_mismatch = 0.01;/" muon.C +#sed -i -e "s/std::string tof_mismatch_fname.*$/std::string tof_mismatch_fname = \"tof_mismatch_template.root\";/" muon.C + +### create LUTs +BFIELDT=`awk -v a=$BFIELD 'BEGIN {print a*0.1}'` +$DELPHESO2_ROOT/examples/scripts/create_luts.sh $LUTTAG $BFIELDT $TOFRAD + +### loop over runs +rm -f .running.* delphes.*.root +for I in $(seq 1 $NRUNS); do + + ### wait for a free slot + while [ $(ls .running.* 2> /dev/null | wc -l) -ge $NJOBS ]; do + echo " --- waiting for a free slot" + sleep 1 + done + + ### book the slot + echo " --- starting run $I" + touch .running.$I + + ### copy pythia8 configuration and set random seed + cp pythia8.cfg pythia8.$I.cfg + echo "Random:setSeed on" >> pythia8.$I.cfg + echo "Random:seed = $I" >> pythia8.$I.cfg + + ### run Delphes and analysis + DelphesPythia8 propagate.tcl pythia8.$I.cfg delphes.$I.root &> delphes.$I.log && + root -b -q -l "muon.C(\"delphes.$I.root\",\"muonAccEffPID.root\",\"muon.$I.root\")" &> muon.$I.log && + rm -rf delphes.$I.root && + rm -rf .running.$I && + echo " --- complete run $I" & + +done + +### merge runs when all done +wait +hadd -f muon.root muon.*.root && rm -rf muon.*.root diff --git a/examples/scripts/rich.sh b/examples/scripts/rich.sh new file mode 100755 index 0000000..022d4fe --- /dev/null +++ b/examples/scripts/rich.sh @@ -0,0 +1,75 @@ +#! /usr/bin/env bash + +NJOBS=5 # number of max parallel runs +NRUNS=5 # number of runs +NEVENTS=10000 # number of events in a run + +LUTTAG="werner" # LUT tag name +PY8CFG="pythia8_ccbar" # pythia8 configuration +BFIELD=5. # magnetic field [kG] +RICHRAD=100. # RICH radius [cm] +RICHLEN=200. # RICH half length [cm] +RICHETA=1.443 # RICH max pseudorapidity + +### calculate max eta from geometry +RICHETA=`awk -v a=$RICHRAD -v b=$RICHLEN 'BEGIN {th=atan2(a,b)*0.5; sth=sin(th); cth=cos(th); print -log(sth/cth)}'` +echo "maxEta = $RICHETA" + +### copy relevant files in the working directory +cp $DELPHESO2_ROOT/examples/cards/propagate.2kG.tcl propagate.tcl +cp $DELPHESO2_ROOT/examples/pythia8/$PY8CFG.cfg pythia8.cfg +cp $DELPHESO2_ROOT/examples/smearing/rich.C . + +### adjust pythia8 configuration +echo "" >> pythia8.cfg +echo "### run time configuration" >> pythia8.cfg +echo "Main:numberOfEvents $NEVENTS" >> pythia8.cfg +echo "Beams:allowVertexSpread off " >> pythia8.cfg +echo "Beams:sigmaTime 60." >> pythia8.cfg + +### set magnetic field +sed -i -e "s/set barrel_Bz .*$/set barrel_Bz ${BFIELD}e\-1/" propagate.tcl +### set TOF radius +sed -i -e "s/set barrel_Radius .*$/set barrel_Radius ${RICHRAD}e\-2/" propagate.tcl +sed -i -e "s/double rich_radius = .*$/double rich_radius = ${RICHRAD}\;/" rich.C +### set TOF length +sed -i -e "s/set barrel_HalfLength .*$/set barrel_HalfLength ${RICHLEN}e\-2/" propagate.tcl +sed -i -e "s/double rich_length = .*$/double rich_length = ${RICHLEN}\;/" rich.C +### set TOF acceptance +sed -i -e "s/set barrel_Acceptance .*$/set barrel_Acceptance \{ 0.0 + 1.0 * fabs(eta) < ${RICHETA} \}/" propagate.tcl + +### create LUTs +BFIELDT=`awk -v a=$BFIELD 'BEGIN {print a*0.1}'` +$DELPHESO2_ROOT/examples/scripts/create_luts.sh $LUTTAG $BFIELDT $TOFRAD + +### loop over runs +rm -f .running.* delphes.*.root +for I in $(seq 1 $NRUNS); do + + ### wait for a free slot + while [ $(ls .running.* 2> /dev/null | wc -l) -ge $NJOBS ]; do + echo " --- waiting for a free slot" + sleep 1 + done + + ### book the slot + echo " --- starting run $I" + touch .running.$I + + ### copy pythia8 configuration and adjust it + cp pythia8.cfg pythia8.$I.cfg + echo "Random:setSeed on" >> pythia8.$I.cfg + echo "Random:seed = $I" >> pythia8.$I.cfg + + ### run Delphes and analysis + DelphesPythia8 propagate.tcl pythia8.$I.cfg delphes.$I.root &> delphes.$I.log && + root -b -q -l "rich.C(\"delphes.$I.root\", \"rich.$I.root\")" &> rich.$I.log && + rm -rf delphes.$I.root && + rm -rf .running.$I && + echo " --- complete run $I" & + +done + +### merge runs when all done +wait +hadd -f rich.root rich.*.root && rm -rf rich.*.root diff --git a/examples/scripts/tof.sh b/examples/scripts/tof.sh index e63fc32..3630960 100755 --- a/examples/scripts/tof.sh +++ b/examples/scripts/tof.sh @@ -1,28 +1,38 @@ #! /usr/bin/env bash -NJOBS=5 # number of max parallel runs -NRUNS=10 # number of runs -NEVENTS=10000 # number of events in a run +NJOBS=5 # number of max parallel runs +NRUNS=10 # number of runs +NEVENTS=10000 # number of events in a run -BFIELD=5. # magnetic field [kG] -SIGMAT=0.020 # time resolution [ns] -TOFRAD=100. # TOF radius [cm] -TOFLEN=200. # TOF half length [cm] -TOFETA=1.443 # TOF max pseudorapidity +LUTTAG="werner" # LUT tag name +PY8CFG="pythia8_ccbar" # pythia8 configuration +BFIELD=5. # magnetic field [kG] +SIGMAT=0.020 # time resolution [ns] +SIGMA0=0.200 # vertex time spread [ns] +TAILLX=1.0 # tail on left [q] +TAILRX=1.0 # tail on right [q] +TOFRAD=100. # TOF radius [cm] +TOFLEN=200. # TOF half length [cm] +TOFETA=1.443 # TOF max pseudorapidity ### calculate max eta from geometry TOFETA=`awk -v a=$TOFRAD -v b=$TOFLEN 'BEGIN {th=atan2(a,b)*0.5; sth=sin(th); cth=cos(th); print -log(sth/cth)}'` echo "maxEta = $TOFETA" ### copy relevant files in the working directory -cp $DELPHESO2_ROOT/examples/cards/propagate.2kG.tcl propagate.tcl -cp $DELPHESO2_ROOT/examples/smearing/luts/lutCovm.* . -cp $DELPHESO2_ROOT/examples/pythia8/pythia8_inel.cfg . +cp $DELPHESO2_ROOT/examples/cards/propagate.2kG.tails.tcl propagate.tcl +cp $DELPHESO2_ROOT/examples/pythia8/$PY8CFG.cfg pythia8.cfg cp $DELPHESO2_ROOT/examples/smearing/tof.C . +### adjust pythia8 configuration +echo "" >> pythia8.cfg +echo "### run time configuration" >> pythia8.cfg +echo "Main:numberOfEvents $NEVENTS" >> pythia8.cfg +echo "Beams:allowVertexSpread on " >> pythia8.cfg +echo "Beams:sigmaTime 60." >> pythia8.cfg + ### set magnetic field sed -i -e "s/set barrel_Bz .*$/set barrel_Bz ${BFIELD}e\-1/" propagate.tcl -sed -i -e "s/double Bz = .*$/double Bz = ${BFIELD}e\-1\;/" tof.C ### set TOF radius sed -i -e "s/set barrel_Radius .*$/set barrel_Radius ${TOFRAD}e\-2/" propagate.tcl sed -i -e "s/double tof_radius = .*$/double tof_radius = ${TOFRAD}\;/" tof.C @@ -31,11 +41,22 @@ sed -i -e "s/set barrel_HalfLength .*$/set barrel_HalfLength ${TOFLEN}e\-2/" pro sed -i -e "s/double tof_length = .*$/double tof_length = ${TOFLEN}\;/" tof.C ### set TOF acceptance sed -i -e "s/set barrel_Acceptance .*$/set barrel_Acceptance \{ 0.0 + 1.0 * fabs(eta) < ${TOFETA} \}/" propagate.tcl -### set TOF time resolution +### set TOF time resolution and tails sed -i -e "s/set barrel_TimeResolution .*$/set barrel_TimeResolution ${SIGMAT}e\-9/" propagate.tcl +sed -i -e "s/set barrel_TailRight .*$/set barrel_TailRight ${TAILRX}/" propagate.tcl +sed -i -e "s/set barrel_TailLeft .*$/set barrel_TailLeft ${TAILLX}/" propagate.tcl sed -i -e "s/double tof_sigmat = .*$/double tof_sigmat = ${SIGMAT}\;/" tof.C +sed -i -e "s/double tof_sigma0 = .*$/double tof_sigma0 = ${SIGMA0}\;/" tof.C +### set TOF mismatch information +sed -i -e "s/double tof_mismatch.*$/double tof_mismatch = 0.01;/" tof.C +#sed -i -e "s/std::string tof_mismatch_fname.*$/std::string tof_mismatch_fname = \"tof_mismatch_template.root\";/" tof.C + +### create LUTs +BFIELDT=`awk -v a=$BFIELD 'BEGIN {print a*0.1}'` +$DELPHESO2_ROOT/examples/scripts/create_luts.sh $LUTTAG $BFIELDT $TOFRAD ### loop over runs +rm -f .running.* delphes.*.root for I in $(seq 1 $NRUNS); do ### wait for a free slot @@ -48,12 +69,10 @@ for I in $(seq 1 $NRUNS); do echo " --- starting run $I" touch .running.$I - ### copy pythia8 configuration and adjust it - cp pythia8_inel.cfg pythia8.$I.cfg - echo "Main:numberOfEvents $NEVENTS" >> pythia8.$I.cfg + ### copy pythia8 configuration and set random seed + cp pythia8.cfg pythia8.$I.cfg + echo "Random:setSeed on" >> pythia8.$I.cfg echo "Random:seed = $I" >> pythia8.$I.cfg - echo "Beams:allowVertexSpread on " >> pythia8.$I.cfg - echo "Beams:sigmaTime 60." >> pythia8.$I.cfg ### run Delphes and analysis DelphesPythia8 propagate.tcl pythia8.$I.cfg delphes.$I.root &> delphes.$I.log && diff --git a/examples/smearing/.clang-format b/examples/smearing/.clang-format new file mode 120000 index 0000000..e1a5a36 --- /dev/null +++ b/examples/smearing/.clang-format @@ -0,0 +1 @@ +../aod/.clang-format \ No newline at end of file diff --git a/examples/smearing/draw.py b/examples/smearing/draw.py new file mode 100755 index 0000000..ee6a05f --- /dev/null +++ b/examples/smearing/draw.py @@ -0,0 +1,378 @@ +#!/usr/bin/env python3 + +""" +Script to plot the content of the LUT in terms of pointing resolution, efficiency, momentum resolution +""" + +from ROOT import gROOT, TLatex, TCanvas, TLegend, TColor, gPad, TGraph +from ROOT import TFile +from os import path +import argparse + + +def main(reader_name, + tags, + lut_path, + xmin, + xmax, + ymin=None, + ymax=None, + tags_name=None, + logx=False, + logy=False, + leg_pos=[0.74, 0.2, 0.90, 0.4], + particles=None, + ind_var=0, + dnch_deta=100, + rmin=None, + add_eta_label=True, + add_alice3_label=True, + save=None, + background=False, + use_p_over_z=True, + aod=None, + styles=None, + study_label="ALICE 3 study"): + gROOT.LoadMacro(reader_name) + gROOT.LoadMacro("style.C") + reader_name = reader_name.split(".")[-2] + reader = getattr(__import__('ROOT', fromlist=[reader_name]), + reader_name) + getattr(__import__('ROOT', fromlist=["style"]), + "style")() + p = {"el": "e", "pi": "#pi", "ka": "K", "pr": "p", + "de": "d", "tr": "t", "he3": "^{3}He"} + charge = {"el": 1, "pi": 1, "ka": 1, "pr": 1, + "de": 1, "tr": 1, "he3": 2} + p_colors = {"el": "#e41a1c", "pi": "#377eb8", + "ka": "#4daf4a", "pr": "#984ea3", + "de": "#ff7f00", "tr": "#999999", + "he3": "#a65628"} + + if particles is not None: + to_remove = [] + for i in p: + if i not in particles: + to_remove.append(i) + for i in to_remove: + p.pop(i) + + latex = TLatex() + latex.SetTextAlign(33) + canvas = reader_name + canvas = canvas.replace("lutRead_", "") + canvas = TCanvas(canvas, canvas, 800, 800) + canvas.Divide(2, 2) + drawn = [canvas] + drawn_graphs = {} + drawn_frames = {} + + def set_limit(l, v=0): + if l is None: + return v + return l + if "_dca" in reader_name: + ymin = set_limit(ymin, 0.1) + ymax = set_limit(ymax, 1e4) + elif "_pt" in reader_name: + ymin = set_limit(ymin, 1) + ymax = set_limit(ymax, 100) + elif "_eff" in reader_name: + ymin = set_limit(ymin, 0) + ymax = set_limit(ymax, 115) + + def adjust_pad(): + if logx: + gPad.SetLogx() + if logy: + gPad.SetLogy() + + counter = 1 + leg = None + if tags_name is not None: + leg = TLegend(*leg_pos) + if add_eta_label: + label = f"#eta = {int(ind_var)}" + if "vs_eta" in reader_name: + label = "#it{p}_{T} "f"= {int(ind_var)} ""GeV/#it{c}" + label += " dN_{Ch}/d#eta =" + label += f" {int(dnch_deta)}" + if rmin is not None: + label += " R_{min} = " + rmin + else: + leg.SetHeader() + leg.SetHeader(label) + leg.SetLineColor(0) + drawn.append(leg) + + def draw_study_label(x=0.5, y=0.9): + latex = TLatex() + latex.SetTextAlign(13) + drawn.append(latex.DrawLatexNDC(x, y, " ".join(study_label))) + for i in p: # Drawing one canvas per particle species + c = f"{canvas.GetName()}_{i}" + c = TCanvas(c, c, 800, 800) + drawn.append(c) + adjust_pad() + + frame = c.DrawFrame(xmin, ymin, + xmax, ymax, "") + frame.SetDirectory(0) + if leg is not None: + leg.Draw() + drawn_frames[i] = frame + g_list = [] + extra = {} + cols = ['#e41a1c', '#377eb8', '#4daf4a', + '#984ea3', '#ff7f00', '#ffff33'] + for k, j in enumerate(tags): + lut = f"{lut_path}/lutCovm.{i}.{j}.dat" + if j == "": + lut = f"{lut_path}/lutCovm.{i}.dat" + if not path.isfile(lut): + print("LUT file", lut, "does not exist") + return + g = reader(lut, ind_var, dnch_deta) + if g.GetN() <= 0: + print("Skipping", g.GetName(), "because empty graph") + continue + if len(g_list) == 0: + frame.GetXaxis().SetTitle(g.GetXaxis().GetTitle()) + frame.GetYaxis().SetTitle(g.GetYaxis().GetTitle()) + if use_p_over_z: + for j in range(g.GetN()): + if "_pt" in reader_name: + g.SetPoint(j, + g.GetPointX(j)/charge[i], + g.GetPointY(j)/charge[i]) + else: + g.SetPoint(j, + g.GetPointX(j)/charge[i], g.GetPointY(j)) + frame.GetXaxis().SetTitle("#it{p}_{T}/z (GeV/#it{c})") + col = TColor.GetColor(cols[len(g_list)]) + g.SetLineColor(col) + g.SetLineStyle(1) + g.SetLineWidth(3) + g.Draw("samel") + if aod is not None: + f_aod = TFile(aod, "READ") + if "_eff" in reader_name: + extra[g.GetName()] = f_aod.Get( + "qa-tracking-efficiency-kaon/pt/num") + extra[g.GetName()].Divide(f_aod.Get("qa-tracking-efficiency-kaon/pt/num"), + f_aod.Get("qa-tracking-efficiency-kaon/pt/den"), 1, 1, "B") + extra[g.GetName()].Scale(100) + extra[g.GetName()].Draw("SAME") + extra[g.GetName()].SetDirectory(0) + elif "_pt" in reader_name: + extra[g.GetName()] = f_aod.Get( + "qa-tracking-efficiency-kaon/pt/num") + extra[g.GetName()].Divide(f_aod.Get("qa-tracking-efficiency-kaon/pt/num"), + f_aod.Get("qa-tracking-efficiency-kaon/pt/den"), 1, 1, "B") + extra[g.GetName()].Scale(100) + extra[g.GetName()].Draw("SAME") + extra[g.GetName()].SetDirectory(0) + f_aod.Close() + + print("Drawing", g.GetName()) + if tags_name is not None and counter == 1: + leg.AddEntry(g, tags_name[k], "l") + g_list.append(g) + drawn_graphs[i] = g_list + if len(g_list) <= 0: + print("Nothing drawn!") + continue + drawn.append(latex.DrawLatexNDC(0.9, 0.9, p[i])) + draw_study_label(.4, .91) + gPad.Update() + canvas.cd(counter) + clone = c.DrawClonePad() + if counter != 1: + l = gPad.GetListOfPrimitives() + for i in l: + cn = i.ClassName() + if cn == "TLegend": + l.Remove(i) + elif cn == "TLatex": + if "ALICE" in i.GetTitle(): + l.Remove(i) + drawn.append(clone) + c.SaveAs(f"/tmp/{c.GetName()}.png") + gPad.Update() + counter += 1 + canvas_all_species = None + if len(tags) == 1 or styles is not None: + canvas_all_species = "all_spec_"+canvas.GetName() + canvas_all_species = TCanvas(canvas_all_species, + canvas_all_species, + 800, 800) + drawn.append(canvas_all_species) + canvas_all_species.cd() + drawn_graphs_all_spec = {} + leg_all_spec = TLegend(*leg_pos) + leg_all_spec.SetNColumns(2) + leg_all_spec.SetLineColor(0) + drawn.append(leg_all_spec) + for i in drawn_graphs: + if canvas_all_species.GetListOfPrimitives().GetEntries() == 0: + drawn_frames[i].Draw() + leg_all_spec.Draw() + drawn_graphs_all_spec[i] = [] + for k, g in enumerate(drawn_graphs[i]): + g = g.Clone() + drawn_graphs_all_spec[i].append(g) + g.SetName(g.GetName()+"_color") + g.SetLineColor(TColor.GetColor(p_colors[i])) + if styles is not None: + g.SetLineStyle(styles[k]) + g.Draw("same") + if k == 0: + leg_all_spec.AddEntry(g, p[i], "L") + if styles is not None: + for j, k in enumerate(tags_name): + g = TGraph() + g.SetLineWidth(3) + g.SetLineColor(1) + g.SetLineStyle(styles[j]) + leg_all_spec.AddEntry(g, k, "L") + drawn_graphs_all_spec[i].append(g) + for i in drawn_graphs_all_spec: + drawn_graphs[i+"_allspec"] = drawn_graphs_all_spec[i] + if add_alice3_label: + draw_study_label(.2, .91) + latex = TLatex() + latex.SetTextAlign(13) + latex.SetTextSize(0.04) + if tags_name is not None and styles is None: + drawn.append(latex.DrawLatexNDC(0.5, 0.80, tags_name[0])) + if "vs_eta" in reader_name: + drawn.append(latex.DrawLatexNDC(0.42, 0.82, "#splitline{" + + "#it{p}_{T}" + + " = {:.1f} GeV/c".format(ind_var) + + " dN_{Ch}/d#eta =" + + f" {int(dnch_deta)}" + "}" + + ("{R_{min} = " + rmin + "}" if rmin is not None else ""))) + else: + # drawn.append(latex.DrawLatexNDC(0.55, 0.82, "#splitline{" + + drawn.append(latex.DrawLatexNDC(0.55, 0.45, "#splitline{" + + f"#eta = {int(ind_var)}" + + " dN_{Ch}/d#eta =" + + f" {int(dnch_deta)}" + "}" + + ("{R_{min} = " + rmin + "}" if rmin is not None else ""))) + + adjust_pad() + canvas_all_species.Update() + canvas_all_species.SaveAs(f"/tmp/{canvas_all_species.GetName()}.png") + canvas_all_species.SaveAs(f"/tmp/{canvas_all_species.GetName()}.pdf") + canvas.SaveAs(f"/tmp/lut_{canvas.GetName()}.root") + if save is not None: + print("Saving to", save) + fo = TFile(save, "RECREATE") + fo.cd() + for i in drawn_graphs: + for j in drawn_graphs[i]: + j.Write(j.GetName().split("/")[-1]) + if not background: + input("Done, press enter to continue") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("reader", type=str, + help="Reader macro to access the information e.g. lutRead_eff.C") + parser.add_argument("--path", "-p", + type=str, + default="/tmp/myluts/", + help="Path of the LUTs") + parser.add_argument("--particles", "-P", + type=str, + nargs="+", + default=None, + help="Particles to show e.g. el pi ka mu pr") + parser.add_argument("--xmin", + type=float, + default=1e-2, + help="Minimum pT of the plot") + parser.add_argument("--xmax", + type=float, + default=100., + help="Maximum pT of the plot") + parser.add_argument("--tags", "-t", type=str, nargs="+", + default=[".5kG.20cm.its3", ".5kG.50cm.its3", + ".5kG.100cm.its3", ".5kG.20cm.scenario3"], + help="Tags to collect") + parser.add_argument("--tags_name", "-T", + type=str, nargs="+", + default=None, + help="Title of the tags that can be used in legend making") + parser.add_argument("--ind_var", "--ind", "-i", + type=float, + default=0, + help="Value of the indepentend variable, i.e. eta if plotting vs pT or pT if plotting against eta") + parser.add_argument("--styles", + type=int, + default=None, + nargs="+", + help="Plotting style of different analyses") + parser.add_argument("--nch", "--dndeta", + type=float, + default=100, + help="Value of the charged particle multiplicity") + parser.add_argument("--ymin", + type=float, + default=None, + help="Minimum y") + parser.add_argument("--ymax", + type=float, + default=None, + help="Maximum y") + parser.add_argument("--rmin", + type=str, + default=None, + help="Label for the minimum radius") + parser.add_argument("--aod", + type=str, + default=None, + help="Results from aod to show") + parser.add_argument("--save", + type=str, + default=None, + help="Name to save the figure to") + parser.add_argument("--leg-pos", "-l", + type=float, nargs="+", + default=[0.74, 0.2, 0.90, 0.4], + help="Position of the legend in NDC coordinates") + parser.add_argument("--study_label", "--label", "-L", + type=str, nargs="+", + default=["ALICE 3 study"], + help="Label to write into the label box") + parser.add_argument("--logx", action="store_true", + help="Log x") + parser.add_argument("--logy", action="store_true", + help="Log y") + parser.add_argument("--pt_over_z", action="store_true", + help="Plot pt over z") + parser.add_argument("-b", action="store_true", + help="Background mode") + args = parser.parse_args() + main(args.reader, + args.tags, + lut_path=args.path, + xmin=args.xmin, + xmax=args.xmax, + logx=args.logx, + logy=args.logy, + tags_name=args.tags_name, + particles=args.particles, + leg_pos=args.leg_pos, + ymin=args.ymin, + ymax=args.ymax, + rmin=args.rmin, + ind_var=args.ind_var, + save=args.save, + background=args.b, + aod=args.aod, + dnch_deta=args.nch, + use_p_over_z=args.pt_over_z, + study_label=args.study_label, + styles=args.styles) diff --git a/examples/smearing/draw_dNdeta.C b/examples/smearing/draw_dNdeta.C new file mode 100644 index 0000000..61edb24 --- /dev/null +++ b/examples/smearing/draw_dNdeta.C @@ -0,0 +1,61 @@ +#include "style.C" +#include "lutRead_eff.C" + +void +draw_dNdeta() +{ + + style(); + + std::vector name = {"el", "pi", "ka", "pr"}; + // std::vector title = {"electron", "pion", "kaon", "proton"}; + std::vector title = {"e", "#pi", "K", "p"}; + + TLatex latex; + latex.SetTextAlign(33); + + auto cc = new TCanvas("cc", "cc", 800, 800); + cc->Divide(2, 2); + + for (int i = 0; i < 4; ++i) { + + auto c = new TCanvas((std::string("c") + name[i]).c_str(), + (std::string("c") + name[i]).c_str(), + 800, 800); + c->SetLogx(); + c->DrawFrame(1.e-2, 0., 1., 110., ";#it{p}_{T} (GeV/#it{c});efficiency (%)"); + latex.DrawLatexNDC(0.9, 0.25, title[i].c_str()); + + auto g10 = lutRead_eff((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.dNdeta10.dat")).c_str()); + g10->SetLineColor(kAzure-3); + g10->SetLineStyle(kSolid); + g10->SetLineWidth(3); + g10->Draw("samel"); + + auto g100 = lutRead_eff((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.dNdeta100.dat")).c_str()); + g100->SetLineColor(kGreen+2); + g100->SetLineStyle(kSolid); + g100->SetLineWidth(3); + g100->Draw("samel"); + + auto g1000 = lutRead_eff((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.dNdeta1000.dat")).c_str()); + g1000->SetLineColor(kRed+1); + g1000->SetLineStyle(kSolid); + g1000->SetLineWidth(3); + g1000->Draw("samel"); + + auto gdef = lutRead_eff((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.default.dat")).c_str()); + gdef->SetLineColor(kBlack); + gdef->SetLineStyle(kDashed); + gdef->SetLineWidth(2); + gdef->Draw("samel"); + + c->SaveAs((std::string("draw_dNdeta.") + name[i] + std::string(".png")).c_str()); + + cc->cd(i + 1); + c->DrawClonePad(); + } + + cc->SaveAs("draw_dNdeta.png"); + +} diff --git a/examples/smearing/draw_dca.C b/examples/smearing/draw_dca.C new file mode 100644 index 0000000..379470d --- /dev/null +++ b/examples/smearing/draw_dca.C @@ -0,0 +1,58 @@ +#include "style.C" +#include "lutRead_dca.C" + +void +draw_dca() +{ + + style(); + + std::vector name = {"el", "pi", "ka", "pr"}; + // std::vector title = {"electron", "pion", "kaon", "proton"}; + std::vector title = {"e", "#pi", "K", "p"}; + + TLatex latex; + latex.SetTextAlign(33); + + auto cc = new TCanvas("cc", "cc", 800, 800); + cc->Divide(2, 2); + + for (int i = 0; i < 4; ++i) { + + auto c = new TCanvas((std::string("c") + name[i]).c_str(), + (std::string("c") + name[i]).c_str(), + 800, 800); + c->SetLogx(); + c->SetLogy(); + c->DrawFrame(1.e-2, 0.1, 100., 10000., ";#it{p}_{T} (GeV/#it{c});pointing resolution (#mum)"); + latex.DrawLatexNDC(0.9, 0.9, title[i].c_str()); + + auto g2a = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".2kG.20cm.dat")).c_str()); + g2a->SetLineColor(kRed+1); + g2a->SetLineStyle(kDashed); + g2a->SetLineWidth(3); + g2a->Draw("samel"); + + auto g2b = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".2kG.100cm.dat")).c_str()); + g2b->SetLineColor(kRed+1); + g2b->SetLineStyle(kSolid); + g2b->SetLineWidth(3); + g2b->Draw("samel"); + + auto g5a = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.dat")).c_str()); + g5a->SetLineColor(kAzure-3); + g5a->SetLineStyle(kDashed); + g5a->SetLineWidth(3); + g5a->Draw("samel"); + + auto g5b = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".5kG.100cm.dat")).c_str()); + g5b->SetLineColor(kAzure-3); + g5b->SetLineStyle(kSolid); + g5b->SetLineWidth(3); + g5b->Draw("samel"); + + cc->cd(i + 1); + c->DrawClonePad(); + } + +} diff --git a/examples/smearing/draw_dca_scenarios.C b/examples/smearing/draw_dca_scenarios.C new file mode 100644 index 0000000..c5def41 --- /dev/null +++ b/examples/smearing/draw_dca_scenarios.C @@ -0,0 +1,62 @@ +#include "style.C" +#include "lutRead_dca.C" + +void +draw_scenarios() +{ + + style(); + + std::vector name = {"el", "pi", "ka", "pr"}; + // std::vector title = {"electron", "pion", "kaon", "proton"}; + std::vector title = {"e", "#pi", "K", "p"}; + + TLatex latex; + latex.SetTextAlign(33); + + auto cc = new TCanvas("cc", "cc", 800, 800); + cc->Divide(2, 2); + + for (int i = 0; i < 4; ++i) { + + auto c = new TCanvas((std::string("c") + name[i]).c_str(), + (std::string("c") + name[i]).c_str(), + 800, 800); + c->SetLogx(); + c->SetLogy(); + c->DrawFrame(1.e-2, 0.1, 100., 10000., ";#it{p}_{T} (GeV/#it{c});pointing resolution (#mum)"); + latex.DrawLatexNDC(0.9, 0.9, title[i].c_str()); + + auto g1 = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.scenario1.dat")).c_str()); + g1->SetLineColor(kRed+1); + g1->SetLineStyle(kSolid); + g1->SetLineWidth(3); + g1->Draw("samel"); + + auto g2 = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.scenario2.dat")).c_str()); + g2->SetLineColor(kRed+1); + g2->SetLineStyle(kDashed); + g2->SetLineWidth(3); + g2->Draw("samel"); + + auto g3 = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.scenario3.dat")).c_str()); + g3->SetLineColor(kAzure-3); + g3->SetLineStyle(kSolid); + g3->SetLineWidth(3); + g3->Draw("samel"); + + auto g4 = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.scenario4.dat")).c_str()); + g4->SetLineColor(kAzure-3); + g4->SetLineStyle(kDashed); + g4->SetLineWidth(3); + g4->Draw("samel"); + + c->SaveAs((std::string("draw_scenarios.") + name[i] + std::string(".png")).c_str()); + + cc->cd(i + 1); + c->DrawClonePad(); + } + + cc->SaveAs("draw_scenarios.png"); + +} diff --git a/examples/smearing/draw_dca_tof.C b/examples/smearing/draw_dca_tof.C new file mode 100644 index 0000000..fb930c5 --- /dev/null +++ b/examples/smearing/draw_dca_tof.C @@ -0,0 +1,56 @@ +#include "style.C" +#include "lutRead_dca.C" + +void +draw_dca_tof() +{ + + style(); + + std::vector name = {"el", "pi", "ka", "pr"}; + // std::vector title = {"electron", "pion", "kaon", "proton"}; + std::vector title = {"e", "#pi", "K", "p"}; + + TLatex latex; + latex.SetTextAlign(33); + + auto cc = new TCanvas("cc", "cc", 800, 800); + cc->Divide(2, 2); + + for (int i = 0; i < 4; ++i) { + + auto c = new TCanvas((std::string("c") + name[i]).c_str(), + (std::string("c") + name[i]).c_str(), + 800, 800); + c->SetLogx(); + c->SetLogy(); + c->DrawFrame(1.e-2, 0.1, 100., 10000., ";#it{p}_{T} (GeV/#it{c});pointing resolution (#mum)"); + latex.DrawLatexNDC(0.9, 0.9, title[i].c_str()); + + auto g1 = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.tof1.dat")).c_str()); + g1->SetLineColor(kRed+1); + g1->SetLineStyle(kSolid); + g1->SetLineWidth(3); + g1->Draw("samel"); + + auto g2 = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.tof2.dat")).c_str()); + g2->SetLineColor(kAzure-3); + g2->SetLineStyle(kSolid); + g2->SetLineWidth(3); + g2->Draw("samel"); + + auto gdef = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.default.dat")).c_str()); + gdef->SetLineColor(kBlack); + gdef->SetLineStyle(kDashed); + gdef->SetLineWidth(2); + gdef->Draw("samel"); + + c->SaveAs((std::string("draw_dca_tof.") + name[i] + std::string(".png")).c_str()); + + cc->cd(i + 1); + c->DrawClonePad(); + } + + cc->SaveAs("draw_dca_tof.png"); + +} diff --git a/examples/smearing/draw_eff.C b/examples/smearing/draw_eff.C new file mode 100644 index 0000000..53ca41e --- /dev/null +++ b/examples/smearing/draw_eff.C @@ -0,0 +1,61 @@ +#include "style.C" +#include "lutRead_eff.C" + +void +draw_eff() +{ + + style(); + + std::vector name = {"el", "pi", "ka", "pr"}; + // std::vector title = {"electron", "pion", "kaon", "proton"}; + std::vector title = {"e", "#pi", "K", "p"}; + + TLatex latex; + latex.SetTextAlign(33); + + auto cc = new TCanvas("cc", "cc", 800, 800); + cc->Divide(2, 2); + + for (int i = 0; i < 4; ++i) { + + auto c = new TCanvas((std::string("c") + name[i]).c_str(), + (std::string("c") + name[i]).c_str(), + 800, 800); + c->SetLogx(); + c->DrawFrame(1.e-2, 0., 1., 110., ";#it{p}_{T} (GeV/#it{c});efficiency (%)"); + latex.DrawLatexNDC(0.9, 0.25, title[i].c_str()); + + auto g2a = lutRead_eff((std::string("lutCovm.") + name[i] + std::string(".2kG.20cm.default.dat")).c_str()); + g2a->SetLineColor(kRed+1); + g2a->SetLineStyle(kDashed); + g2a->SetLineWidth(3); + g2a->Draw("samel"); + + auto g2b = lutRead_eff((std::string("lutCovm.") + name[i] + std::string(".2kG.100cm.default.dat")).c_str()); + g2b->SetLineColor(kRed+1); + g2b->SetLineStyle(kSolid); + g2b->SetLineWidth(3); + g2b->Draw("samel"); + + auto g5a = lutRead_eff((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.default.dat")).c_str()); + g5a->SetLineColor(kAzure-3); + g5a->SetLineStyle(kDashed); + g5a->SetLineWidth(3); + g5a->Draw("samel"); + + auto g5b = lutRead_eff((std::string("lutCovm.") + name[i] + std::string(".5kG.100cm.default.dat")).c_str()); + g5b->SetLineColor(kAzure-3); + g5b->SetLineStyle(kSolid); + g5b->SetLineWidth(3); + g5b->Draw("samel"); + + c->SaveAs((std::string("draw_eff.") + name[i] + std::string(".png")).c_str()); + + cc->cd(i + 1); + c->DrawClonePad(); + } + + cc->SaveAs("draw_eff.png"); + +} diff --git a/examples/smearing/draw_eff_tof.C b/examples/smearing/draw_eff_tof.C new file mode 100644 index 0000000..03bdfde --- /dev/null +++ b/examples/smearing/draw_eff_tof.C @@ -0,0 +1,55 @@ +#include "style.C" +#include "lutRead_eff.C" + +void +draw_eff_tof() +{ + + style(); + + std::vector name = {"el", "pi", "ka", "pr"}; + // std::vector title = {"electron", "pion", "kaon", "proton"}; + std::vector title = {"e", "#pi", "K", "p"}; + + TLatex latex; + latex.SetTextAlign(33); + + auto cc = new TCanvas("cc", "cc", 800, 800); + cc->Divide(2, 2); + + for (int i = 0; i < 4; ++i) { + + auto c = new TCanvas((std::string("c") + name[i]).c_str(), + (std::string("c") + name[i]).c_str(), + 800, 800); + c->SetLogx(); + c->DrawFrame(1.e-2, 0., 1., 110., ";#it{p}_{T} (GeV/#it{c});efficiency (%)"); + latex.DrawLatexNDC(0.9, 0.25, title[i].c_str()); + + auto g1 = lutRead_eff((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.tof1.dat")).c_str()); + g1->SetLineColor(kRed+1); + g1->SetLineStyle(kSolid); + g1->SetLineWidth(3); + g1->Draw("samel"); + + auto g2 = lutRead_eff((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.tof2.dat")).c_str()); + g2->SetLineColor(kAzure-3); + g2->SetLineStyle(kSolid); + g2->SetLineWidth(3); + g2->Draw("samel"); + + auto gdef = lutRead_eff((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.default.dat")).c_str()); + gdef->SetLineColor(kBlack); + gdef->SetLineStyle(kDashed); + gdef->SetLineWidth(2); + gdef->Draw("samel"); + + c->SaveAs((std::string("draw_eff_tof.") + name[i] + std::string(".png")).c_str()); + + cc->cd(i + 1); + c->DrawClonePad(); + } + + cc->SaveAs("draw_eff_tof.png"); + +} diff --git a/examples/smearing/draw_etadep.C b/examples/smearing/draw_etadep.C new file mode 100644 index 0000000..4a42777 --- /dev/null +++ b/examples/smearing/draw_etadep.C @@ -0,0 +1,111 @@ +#include "style.C" +#include "lutRead_allres_vseta.C" + +void +draw_etadep() +{ + + style(); + + //std::vector name = {"el", "pi", "ka", "pr"}; + // std::vector title = {"electron", "pion", "kaon", "proton"}; + //std::vector title = {"e", "#pi", "K", "p"}; + + float ptvals[] = {1, 5, 10, 20}; + Color_t colors[] = {kRed+1, kRed+1, kRed+1, kRed+1}; + Color_t linestyles[] = {1, 2, 3, 4}; + + int nPtVals = 4; + + TLatex latex; + latex.SetTextAlign(33); + + auto c1 = new TCanvas("c1", "c1: pt reso", 800, 600); + c1->SetLogy(); + c1->DrawFrame(0, 1., 4, 200., ";#eta;momentum resolution (%)"); + + TLegend *leg = new TLegend(0.18,0.6,0.3,0.85); + leg->SetBorderSize(0); + auto c2 = new TCanvas("c2", "c2: dca xy", 800, 600); + c2->SetLogy(); + c2->DrawFrame(0, 1., 4, 200., ";#eta;dca_{xy} resolution (#mum)"); + + auto c3 = new TCanvas("c3", "c3: dca z", 800, 600); + c3->SetLogy(); + c3->DrawFrame(0, 1., 4, 200., ";#eta;dca_{z} resolution (#mum)"); + + auto c4 = new TCanvas("c4", "c4: sin phi", 800, 600); + c4->SetLogy(); + c4->DrawFrame(0, 1e-5, 4, 1e-2, ";#eta;sin(#phi) resolution"); + + auto c5 = new TCanvas("c5", "c5: tan lambda", 800, 600); + c5->SetLogy(); + c5->DrawFrame(0, 1e-5, 4, 1e-2, ";#eta;tan(#lambda) resolution"); + + + for (int i = 0; i < nPtVals; ++i) { + + /* + auto c = new TCanvas((std::string("c") + name[i]).c_str(), + (std::string("c") + name[i]).c_str(), + 800, 800); + c->SetLogx(); + c->SetLogy(); + c->DrawFrame(1.e-2, 1., 100., 100., ";#it{p}_{T} (GeV/#it{c});momentum resolution (%)"); + latex.DrawLatexNDC(0.9, 0.9, title[i].c_str()); + */ + + //auto g2a = lutRead_pt((std::string("lutCovm.") + name[i] + std::string(".2kG.20cm.default.dat")).c_str()); + auto ptres = new TGraph(); + auto dca_xy = new TGraph(); + auto dca_z = new TGraph(); + auto sinp = new TGraph(); + auto tanl = new TGraph(); + lutRead_allres_vseta("luts/lutCovm.v12.dat",dca_xy,dca_z,sinp,tanl,ptres,ptvals[i]); + ptres->SetLineColor(colors[i]); + ptres->SetLineStyle(i+1); + ptres->SetLineWidth(3); + leg->AddEntry(ptres,Form("p_{T} = %.1f GeV",ptvals[i])); + c1->cd(); + ptres->Draw("samel"); + + dca_xy->SetLineColor(colors[i]); + dca_xy->SetLineStyle(i+1); + dca_xy->SetLineWidth(3); + c2->cd(); + dca_xy->Draw("samel"); + + dca_z->SetLineColor(colors[i]); + dca_z->SetLineStyle(i+1); + dca_z->SetLineWidth(3); + c3->cd(); + dca_z->Draw("samel"); + + sinp->SetLineColor(colors[i]); + sinp->SetLineStyle(i+1); + sinp->SetLineWidth(3); + c4->cd(); + sinp->Draw("samel"); + + tanl->SetLineColor(colors[i]); + tanl->SetLineStyle(i+1); + tanl->SetLineWidth(3); + c5->cd(); + tanl->Draw("samel"); + } + c1->cd(); + leg->Draw(); + c1->Print("ptres_etadep.pdf"); + c2->cd(); + leg->Draw(); + c2->Print("dcaxyres_etadep.pdf"); + c3->cd(); + leg->Draw(); + c3->Print("dcazres_etadep.pdf"); + c4->cd(); + leg->Draw(); + c4->Print("sinphires_etadep.pdf"); + c5->cd(); + leg->Draw(); + c5->Print("tanlres_etadep.pdf"); +} diff --git a/examples/smearing/draw_mid_fwd.C b/examples/smearing/draw_mid_fwd.C new file mode 100644 index 0000000..e2becf0 --- /dev/null +++ b/examples/smearing/draw_mid_fwd.C @@ -0,0 +1,120 @@ +#include "style.C" +#include "lutRead_allres_vspt.C" + +void +draw_mid_fwd() +{ + + style(); + + //std::vector name = {"el", "pi", "ka", "pr"}; + // std::vector title = {"electron", "pion", "kaon", "proton"}; + //std::vector title = {"e", "#pi", "K", "p"}; + + float etavals[] = {0, 1, 2, 3, 4}; + Color_t colors[] = {kRed+1, kRed+1, kBlue, kBlue, kBlue}; + Color_t linestyles[] = {1, 2, 3, 4, 5}; + + int nEtaVals = 5; + + TLatex latex; + latex.SetTextAlign(33); + + auto c1 = new TCanvas("c1", "c1: pt reso", 800, 600); + //cc->Divide(2, 2); + c1->SetLogx(); + c1->SetLogy(); + c1->DrawFrame(1.e-2, 1., 100., 200., ";#it{p}_{T} (GeV/#it{c});momentum resolution (%)"); + + TLegend *leg = new TLegend(0.18,0.18,0.3,0.35); + leg->SetBorderSize(0); + auto c2 = new TCanvas("c2", "c2: dca xy", 800, 600); + c2->SetLogx(); + c2->SetLogy(); + c2->DrawFrame(1.e-2, 1., 100., 200., ";#it{p}_{T} (GeV/#it{c});dca_{xy} resolution (#mum)"); + + auto c3 = new TCanvas("c3", "c3: dca z", 800, 600); + c3->SetLogx(); + c3->SetLogy(); + c3->DrawFrame(1.e-2, 1., 100., 200., ";#it{p}_{T} (GeV/#it{c});dca_{z} resolution (#mum)"); + + auto c4 = new TCanvas("c4", "c4: sin phi", 800, 600); + c4->SetLogx(); + c4->SetLogy(); + c4->DrawFrame(1.e-2, 0.00001, 100., 5e-2, ";#it{p}_{T} (GeV/#it{c});sin(#phi) resolution"); + + auto c5 = new TCanvas("c5", "c5: tan lambda", 800, 600); + c5->SetLogx(); + c5->SetLogy(); + c5->DrawFrame(1.e-2, 0.00001, 100., 5e-2, ";#it{p}_{T} (GeV/#it{c});tan(#lambda) resolution"); + + + for (int i = 0; i < nEtaVals; ++i) { + + /* + auto c = new TCanvas((std::string("c") + name[i]).c_str(), + (std::string("c") + name[i]).c_str(), + 800, 800); + c->SetLogx(); + c->SetLogy(); + c->DrawFrame(1.e-2, 1., 100., 100., ";#it{p}_{T} (GeV/#it{c});momentum resolution (%)"); + latex.DrawLatexNDC(0.9, 0.9, title[i].c_str()); + */ + + //auto g2a = lutRead_pt((std::string("lutCovm.") + name[i] + std::string(".2kG.20cm.default.dat")).c_str()); + auto ptres = new TGraph(); + auto dca_xy = new TGraph(); + auto dca_z = new TGraph(); + auto sinp = new TGraph(); + auto tanl = new TGraph(); + lutRead_allres_vspt("luts/lutCovm.v12.dat",dca_xy,dca_z,sinp,tanl,ptres,etavals[i]); + ptres->SetLineColor(colors[i]); + ptres->SetLineStyle(i+1); + ptres->SetLineWidth(3); + leg->AddEntry(ptres,Form("#eta = %.1f",etavals[i])); + c1->cd(); + ptres->Draw("samel"); + + dca_xy->SetLineColor(colors[i]); + dca_xy->SetLineStyle(i+1); + dca_xy->SetLineWidth(3); + c2->cd(); + dca_xy->Draw("samel"); + + dca_z->SetLineColor(colors[i]); + dca_z->SetLineStyle(i+1); + dca_z->SetLineWidth(3); + c3->cd(); + dca_z->Draw("samel"); + + sinp->SetLineColor(colors[i]); + sinp->SetLineStyle(i+1); + sinp->SetLineWidth(3); + c4->cd(); + sinp->Draw("samel"); + + tanl->SetLineColor(colors[i]); + tanl->SetLineStyle(i+1); + tanl->SetLineWidth(3); + c5->cd(); + tanl->Draw("samel"); + } + c1->cd(); + leg->Draw(); + c1->Print("ptres_etabins.pdf"); + c2->cd(); + leg->Draw(); + c2->Print("dcaxyres_etabins.pdf"); + c3->cd(); + leg->Draw(); + c3->Print("dcazres_etabins.pdf"); + c4->cd(); + leg->Draw(); + c4->Print("sinphires_etabins.pdf"); + c5->cd(); + leg->Draw(); + c5->Print("tanlres_etabins.pdf"); + + //cc->SaveAs("draw_pt_mid_fwd.png"); + +} diff --git a/examples/smearing/draw_pt.C b/examples/smearing/draw_pt.C new file mode 100644 index 0000000..0647504 --- /dev/null +++ b/examples/smearing/draw_pt.C @@ -0,0 +1,62 @@ +#include "style.C" +#include "lutRead_pt.C" + +void +draw_pt() +{ + + style(); + + std::vector name = {"el", "pi", "ka", "pr"}; + // std::vector title = {"electron", "pion", "kaon", "proton"}; + std::vector title = {"e", "#pi", "K", "p"}; + + TLatex latex; + latex.SetTextAlign(33); + + auto cc = new TCanvas("cc", "cc", 800, 800); + cc->Divide(2, 2); + + for (int i = 0; i < 4; ++i) { + + auto c = new TCanvas((std::string("c") + name[i]).c_str(), + (std::string("c") + name[i]).c_str(), + 800, 800); + c->SetLogx(); + c->SetLogy(); + c->DrawFrame(1.e-2, 1., 100., 100., ";#it{p}_{T} (GeV/#it{c});momentum resolution (%)"); + latex.DrawLatexNDC(0.9, 0.9, title[i].c_str()); + + auto g2a = lutRead_pt((std::string("lutCovm.") + name[i] + std::string(".2kG.20cm.default.dat")).c_str()); + g2a->SetLineColor(kRed+1); + g2a->SetLineStyle(kDashed); + g2a->SetLineWidth(3); + g2a->Draw("samel"); + + auto g2b = lutRead_pt((std::string("lutCovm.") + name[i] + std::string(".2kG.100cm.default.dat")).c_str()); + g2b->SetLineColor(kRed+1); + g2b->SetLineStyle(kSolid); + g2b->SetLineWidth(3); + g2b->Draw("samel"); + + auto g5a = lutRead_pt((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.default.dat")).c_str()); + g5a->SetLineColor(kAzure-3); + g5a->SetLineStyle(kDashed); + g5a->SetLineWidth(3); + g5a->Draw("samel"); + + auto g5b = lutRead_pt((std::string("lutCovm.") + name[i] + std::string(".5kG.100cm.default.dat")).c_str()); + g5b->SetLineColor(kAzure-3); + g5b->SetLineStyle(kSolid); + g5b->SetLineWidth(3); + g5b->Draw("samel"); + + c->SaveAs((std::string("draw_pt.") + name[i] + std::string(".png")).c_str()); + + cc->cd(i + 1); + c->DrawClonePad(); + } + + cc->SaveAs("draw_pt.png"); + +} diff --git a/examples/smearing/draw_pt_scenarios.C b/examples/smearing/draw_pt_scenarios.C new file mode 100644 index 0000000..d7a9cdf --- /dev/null +++ b/examples/smearing/draw_pt_scenarios.C @@ -0,0 +1,62 @@ +#include "style.C" +#include "lutRead_pt.C" + +void +draw_pt_scenarios() +{ + + style(); + + std::vector name = {"el", "pi", "ka", "pr"}; + // std::vector title = {"electron", "pion", "kaon", "proton"}; + std::vector title = {"e", "#pi", "K", "p"}; + + TLatex latex; + latex.SetTextAlign(33); + + auto cc = new TCanvas("cc", "cc", 800, 800); + cc->Divide(2, 2); + + for (int i = 0; i < 4; ++i) { + + auto c = new TCanvas((std::string("c") + name[i]).c_str(), + (std::string("c") + name[i]).c_str(), + 800, 800); + c->SetLogx(); + c->SetLogy(); + c->DrawFrame(1.e-2, 1., 100., 100., ";#it{p}_{T} (GeV/#it{c});momentum resolution (%)"); + latex.DrawLatexNDC(0.9, 0.9, title[i].c_str()); + + auto g1 = lutRead_pt((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.scenario1.dat")).c_str()); + g1->SetLineColor(kRed+1); + g1->SetLineStyle(kSolid); + g1->SetLineWidth(3); + g1->Draw("samel"); + + auto g2 = lutRead_pt((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.scenario2.dat")).c_str()); + g2->SetLineColor(kRed+1); + g2->SetLineStyle(kDashed); + g2->SetLineWidth(3); + g2->Draw("samel"); + + auto g3 = lutRead_pt((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.scenario3.dat")).c_str()); + g3->SetLineColor(kAzure-3); + g3->SetLineStyle(kSolid); + g3->SetLineWidth(3); + g3->Draw("samel"); + + auto g4 = lutRead_pt((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.scenario4.dat")).c_str()); + g4->SetLineColor(kAzure-3); + g4->SetLineStyle(kDashed); + g4->SetLineWidth(3); + g4->Draw("samel"); + + c->SaveAs((std::string("draw_pt_scenarios.") + name[i] + std::string(".png")).c_str()); + + cc->cd(i + 1); + c->DrawClonePad(); + } + + cc->SaveAs("draw_pt_scenarios.png"); + +} diff --git a/examples/smearing/draw_pt_tof.C b/examples/smearing/draw_pt_tof.C new file mode 100644 index 0000000..4034188 --- /dev/null +++ b/examples/smearing/draw_pt_tof.C @@ -0,0 +1,56 @@ +#include "style.C" +#include "lutRead_pt.C" + +void +draw_pt_tof() +{ + + style(); + + std::vector name = {"el", "pi", "ka", "pr"}; + // std::vector title = {"electron", "pion", "kaon", "proton"}; + std::vector title = {"e", "#pi", "K", "p"}; + + TLatex latex; + latex.SetTextAlign(33); + + auto cc = new TCanvas("cc", "cc", 800, 800); + cc->Divide(2, 2); + + for (int i = 0; i < 4; ++i) { + + auto c = new TCanvas((std::string("c") + name[i]).c_str(), + (std::string("c") + name[i]).c_str(), + 800, 800); + c->SetLogx(); + c->SetLogy(); + c->DrawFrame(1.e-2, 1., 100., 100., ";#it{p}_{T} (GeV/#it{c});momentum resolution (%)"); + latex.DrawLatexNDC(0.9, 0.9, title[i].c_str()); + + auto g1 = lutRead_pt((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.tof1.dat")).c_str()); + g1->SetLineColor(kRed+1); + g1->SetLineStyle(kSolid); + g1->SetLineWidth(3); + g1->Draw("samel"); + + auto g2 = lutRead_pt((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.tof2.dat")).c_str()); + g2->SetLineColor(kAzure-3); + g2->SetLineStyle(kSolid); + g2->SetLineWidth(3); + g2->Draw("samel"); + + auto gdef = lutRead_pt((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.default.dat")).c_str()); + gdef->SetLineColor(kBlack); + gdef->SetLineStyle(kDashed); + gdef->SetLineWidth(2); + gdef->Draw("samel"); + + c->SaveAs((std::string("draw_pt_tof.") + name[i] + std::string(".png")).c_str()); + + cc->cd(i + 1); + c->DrawClonePad(); + } + + cc->SaveAs("draw_pt_tof.png"); + +} diff --git a/examples/smearing/draw_scenarios.C b/examples/smearing/draw_scenarios.C new file mode 100644 index 0000000..c5def41 --- /dev/null +++ b/examples/smearing/draw_scenarios.C @@ -0,0 +1,62 @@ +#include "style.C" +#include "lutRead_dca.C" + +void +draw_scenarios() +{ + + style(); + + std::vector name = {"el", "pi", "ka", "pr"}; + // std::vector title = {"electron", "pion", "kaon", "proton"}; + std::vector title = {"e", "#pi", "K", "p"}; + + TLatex latex; + latex.SetTextAlign(33); + + auto cc = new TCanvas("cc", "cc", 800, 800); + cc->Divide(2, 2); + + for (int i = 0; i < 4; ++i) { + + auto c = new TCanvas((std::string("c") + name[i]).c_str(), + (std::string("c") + name[i]).c_str(), + 800, 800); + c->SetLogx(); + c->SetLogy(); + c->DrawFrame(1.e-2, 0.1, 100., 10000., ";#it{p}_{T} (GeV/#it{c});pointing resolution (#mum)"); + latex.DrawLatexNDC(0.9, 0.9, title[i].c_str()); + + auto g1 = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.scenario1.dat")).c_str()); + g1->SetLineColor(kRed+1); + g1->SetLineStyle(kSolid); + g1->SetLineWidth(3); + g1->Draw("samel"); + + auto g2 = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.scenario2.dat")).c_str()); + g2->SetLineColor(kRed+1); + g2->SetLineStyle(kDashed); + g2->SetLineWidth(3); + g2->Draw("samel"); + + auto g3 = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.scenario3.dat")).c_str()); + g3->SetLineColor(kAzure-3); + g3->SetLineStyle(kSolid); + g3->SetLineWidth(3); + g3->Draw("samel"); + + auto g4 = lutRead_dca((std::string("lutCovm.") + name[i] + std::string(".5kG.20cm.scenario4.dat")).c_str()); + g4->SetLineColor(kAzure-3); + g4->SetLineStyle(kDashed); + g4->SetLineWidth(3); + g4->Draw("samel"); + + c->SaveAs((std::string("draw_scenarios.") + name[i] + std::string(".png")).c_str()); + + cc->cd(i + 1); + c->DrawClonePad(); + } + + cc->SaveAs("draw_scenarios.png"); + +} diff --git a/examples/smearing/emcal.C b/examples/smearing/emcal.C new file mode 100644 index 0000000..00c6da4 --- /dev/null +++ b/examples/smearing/emcal.C @@ -0,0 +1,93 @@ +R__LOAD_LIBRARY(libDelphes) +R__LOAD_LIBRARY(libDelphesO2) + +double Bz = 0.2; // [T] +double emcal_radius = 100.; // [cm] +double emcal_length = 200.; // [cm] + +void +emcal(const char *inputFile = "delphes.root", + const char *outputFile = "emcal.root") +{ + + // Create chain of root trees + TChain chain("Delphes"); + chain.Add(inputFile); + + // Create object of class ExRootTreeReader + auto treeReader = new ExRootTreeReader(&chain); + auto numberOfEntries = treeReader->GetEntries(); + + // Get pointers to branches used in this analysis + auto events = treeReader->UseBranch("Event"); + auto tracks = treeReader->UseBranch("Track"); + auto neutrals = treeReader->UseBranch("Neutral"); + auto particles = treeReader->UseBranch("Particle"); + + // histograms + auto hE_el = new TH1F("hE_el", ";log_{10}(energy/GeV);", 400, -2., 2.); + auto hE_ga = new TH1F("hE_ga", ";log_{10}(energy/GeV);", 400, -2., 2.); + + for (Int_t ientry = 0; ientry < numberOfEntries; ++ientry) { + + // Load selected branches with data from specified event + treeReader->ReadEntry(ientry); + + // loop over charged tracks + for (Int_t itrack = 0; itrack < tracks->GetEntries(); ++itrack) { + + // get track and corresponding particle + auto track = (Track *)tracks->At(itrack); + auto particle = (GenParticle *)track->Particle.GetObject(); + + // only electron tracks + if (particle->PID != 11) continue; + + // check if has hit the calorimeter + auto x = track->XOuter * 0.1; // [cm] + auto y = track->YOuter * 0.1; // [cm] + auto z = track->ZOuter * 0.1; // [cm] + auto hasHit = fabs(hypot(x, y) - emcal_radius) < 0.001 && fabs(z) < emcal_length; + if (!hasHit) continue; + + // get energy, smear and fill histogram + auto E = track->P4().Energy(); + auto Ee = 0.01 + 0.05 / sqrt(E); + E += gRandom->Gaus(0., E * Ee); + hE_el->Fill(log10(E)); + + } + + // loop over neutral tracks + for (Int_t itrack = 0; itrack < neutrals->GetEntries(); ++itrack) { + + // get track and corresponding particle + auto track = (Track *)neutrals->At(itrack); + auto particle = (GenParticle *)track->Particle.GetObject(); + + // only photon tracks + if (particle->PID != 22) continue; + + // check if has hit the calorimeter + auto x = track->XOuter * 0.1; // [cm] + auto y = track->YOuter * 0.1; // [cm] + auto z = track->ZOuter * 0.1; // [cm] + auto hasHit = fabs(hypot(x, y) - emcal_radius) < 0.001 && fabs(z) < emcal_length; + if (!hasHit) continue; + + // get energy, smear and fill histogram + auto E = track->P4().Energy(); + auto Ee = 0.01 + 0.05 / sqrt(E); + E += gRandom->Gaus(0., E * Ee); + hE_ga->Fill(log10(E)); + + } + + } + + auto fout = TFile::Open(outputFile, "RECREATE"); + hE_el->Write(); + hE_ga->Write(); + fout->Close(); + +} diff --git a/examples/smearing/ftof.C b/examples/smearing/ftof.C new file mode 100644 index 0000000..e924fdc --- /dev/null +++ b/examples/smearing/ftof.C @@ -0,0 +1,119 @@ +R__LOAD_LIBRARY(libDelphes) +R__LOAD_LIBRARY(libDelphesO2) + +double tof_radius = 100.; // [cm] +double tof_length = 200.; // [cm] +double tof_sigmat = 0.02; // [ns] + +void +ftof(const char *inputFile = "delphes.root", + const char *outputFile = "ftof.root") +{ + + // Create chain of root trees + TChain chain("Delphes"); + chain.Add(inputFile); + + // Create object of class ExRootTreeReader + auto treeReader = new ExRootTreeReader(&chain); + auto numberOfEntries = treeReader->GetEntries(); + + // Get pointers to branches used in this analysis + auto events = treeReader->UseBranch("Event"); + auto tracks = treeReader->UseBranch("Track"); + auto particles = treeReader->UseBranch("Particle"); + + // TOF layer + o2::delphes::TOFLayer toflayer; + toflayer.setup(tof_radius, tof_length, tof_sigmat); + toflayer.setType(o2::delphes::TOFLayer::kForward); + toflayer.setRadiusIn(10.); + + // smearer + o2::delphes::TrackSmearer smearer; + smearer.useEfficiency(true); + smearer.loadTable(11, "lutCovm.el.dat"); + smearer.loadTable(13, "lutCovm.mu.dat"); + smearer.loadTable(211, "lutCovm.pi.dat"); + smearer.loadTable(321, "lutCovm.ka.dat"); + smearer.loadTable(2212, "lutCovm.pr.dat"); + + // logx binning + const Int_t nbins = 80; + double xmin = 1.e-2; + double xmax = 1.e2; + double logxmin = std::log10(xmin); + double logxmax = std::log10(xmax); + double binwidth = (logxmax - logxmin) / nbins; + double xbins[nbins + 1]; + xbins[0] = xmin; + for (Int_t i = 1; i <= nbins; ++i) + xbins[i] = xmin + std::pow(10., logxmin + i * binwidth); + + // histograms + auto hTime0 = new TH1F("hTime0", ";t_{0} (ns)", 1000, -1., 1.); + auto hBetaP = new TH2F("hBetaP", ";#it{p} (GeV/#it{c});#beta", nbins, xbins, 1000, 0.1, 1.1); + TH2 *hNsigmaP[5]; + const char *pname[5] = {"el", "mu", "pi", "ka", "pr"}; + const char *plabel[5] = {"e", "#mu", "#pi", "K", "p"}; + for (int i = 0; i < 5; ++i) + hNsigmaP[i] = new TH2F(Form("hNsigmaP_%s", pname[i]), Form("#it{p} (GeV/#it{c});n#sigma_{%s}", plabel[i]), nbins, xbins, 200, -10., 10.); + + for (Int_t ientry = 0; ientry < numberOfEntries; ++ientry) { + + // Load selected branches with data from specified event + treeReader->ReadEntry(ientry); + + // loop over tracks, smear and store TOF tracks + std::vector tof_tracks; + for (Int_t itrack = 0; itrack < tracks->GetEntries(); ++itrack) { + + // get track and corresponding particle + auto track = (Track *)tracks->At(itrack); + auto particle = (GenParticle *)track->Particle.GetObject(); + + // smear track + if (!smearer.smearTrack(*track)) continue; + + // select primaries based on 3 sigma DCA cuts + if (fabs(track->D0 / track->ErrorD0) > 3.) continue; + if (fabs(track->DZ / track->ErrorDZ) > 3.) continue; + + // check if has TOF + if (!toflayer.hasTOF(*track)) continue; + + // push track + tof_tracks.push_back(track); + + } + + // compute the event time + std::array tzero; + toflayer.eventTime(tof_tracks, tzero); + hTime0->Fill(tzero[0]); + + // loop over tracks and do PID + for (auto track : tof_tracks) { + + // fill beta-p + auto p = track->P; + auto beta = toflayer.getBeta(*track); + hBetaP->Fill(p, beta); + + // fill nsigma + std::array deltat, nsigma; + toflayer.makePID(*track, deltat, nsigma); + for (int i = 0; i < 5; ++i) + hNsigmaP[i]->Fill(p, nsigma[i]); + + } + } + + auto fout = TFile::Open(outputFile, "RECREATE"); + hTime0->Write(); + hBetaP->Write(); + for (int i = 0; i < 5; ++i) + hNsigmaP[i]->Write(); + fout->Close(); + +} diff --git a/examples/smearing/lutCovm.hh b/examples/smearing/lutCovm.hh new file mode 120000 index 0000000..c4b241a --- /dev/null +++ b/examples/smearing/lutCovm.hh @@ -0,0 +1 @@ +../../src/lutCovm.hh \ No newline at end of file diff --git a/examples/smearing/lutRead.C b/examples/smearing/lutRead.C new file mode 100644 index 0000000..9226aa1 --- /dev/null +++ b/examples/smearing/lutRead.C @@ -0,0 +1,61 @@ +enum EWhat { + kEfficiency, + kEfficiency2, + kEfficiencyInnerTOF, + kEfficiencyOuterTOF, + kPtResolution, + kRPhiResolution, + kZResolution +}; + +enum EVs { + kNch, + kEta, + kPt +}; + +TGraph * +lutRead(int pdg, const char *filename, int what, int vs, float nch = 0., float radius = 0., float eta = 0., float pt = 0.) +{ + o2::delphes::TrackSmearer smearer; + smearer.loadTable(pdg, filename); + auto lutHeader = smearer.getLUTHeader(pdg); + map_t lutMap; + if (vs == kNch) lutMap = lutHeader->nchmap; + if (vs == kEta) lutMap = lutHeader->etamap; + if (vs == kPt) lutMap = lutHeader->ptmap; + auto nbins = lutMap.nbins; + auto g = new TGraph(); + + bool canBeInvalid = true; + for (int i = 0; i < nbins; ++i) { + if (vs == kNch) nch = lutMap.eval(i); + if (vs == kEta) eta = lutMap.eval(i); + if (vs == kPt) pt = lutMap.eval(i); + auto lutEntry = smearer.getLUTEntry(pdg, nch, radius, eta , pt); + if (!lutEntry->valid || lutEntry->eff == 0.) { + if (!canBeInvalid) std::cout << " --- warning: it cannot be invalid \n" << std::endl; + continue; + } + canBeInvalid = false; + + double cen = 0.; + if (vs == kNch) cen = lutEntry->nch; + if (vs == kEta) cen = lutEntry->eta; + if (vs == kPt) cen = lutEntry->pt; + double val = 0.; + if (what == kEfficiency) val = lutEntry->eff * 100.; // efficiency (%) + if (what == kEfficiency2) val = lutEntry->eff2 * 100.; // efficiency (%) + if (what == kEfficiencyInnerTOF) val = lutEntry->itof * 100.; // efficiency (%) + if (what == kEfficiencyOuterTOF) val = lutEntry->otof * 100.; // efficiency (%) + if (what == kPtResolution) val = sqrt(lutEntry->covm[14]) * lutEntry->pt * 100.; // pt resolution (%) + if (what == kRPhiResolution) val = sqrt(lutEntry->covm[0]) * 1.e4; // rphi resolution (um) + if (what == kZResolution) val = sqrt(lutEntry->covm[1]) * 1.e4; // z resolution (um) + if (val < 0.) continue; + g->SetPoint(g->GetN(), cen, val); + } + + return g; + +} + diff --git a/examples/smearing/lutRead_allres_vseta.C b/examples/smearing/lutRead_allres_vseta.C new file mode 100644 index 0000000..565c4d9 --- /dev/null +++ b/examples/smearing/lutRead_allres_vseta.C @@ -0,0 +1,106 @@ +#include "lutCovm.hh" + +void lutRead_allres_vseta(const char *filename = "lutCovm.dat", + TGraph *dca_xy = 0, TGraph *dca_z = 0, TGraph *sinp = 0, TGraph *tanl = 0, TGraph* ptres = 0, double pt = 1.) +{ + + // input file + ifstream lutFile(filename, std::ofstream::binary); + + // read header + lutHeader_t lutHeader; + lutFile.read(reinterpret_cast(&lutHeader), sizeof(lutHeader)); + lutHeader.print(); + + // entries + const int nnch = lutHeader.nchmap.nbins; + const int nrad = lutHeader.radmap.nbins; + const int neta = lutHeader.etamap.nbins; + const int npt = lutHeader.ptmap.nbins; + + auto nchbin = lutHeader.nchmap.find(0.); + auto radbin = lutHeader.radmap.find(0.); + auto ptbin = lutHeader.ptmap.find(pt); + + lutEntry_t lutTable[neta]; + + // read entries + for (int inch = 0; inch < nnch; ++inch) { + for (int irad = 0; irad < nrad; ++irad) { + for (int ieta = 0; ieta < neta; ++ieta) { + for (int ipt = 0; ipt < npt; ++ipt) { + if (inch==nchbin && irad==radbin && ipt == ptbin) { + lutFile.read(reinterpret_cast(&lutTable[ieta]), sizeof(lutEntry_t)); + //lutTable[ieta].print(); + } + else { + lutEntry_t dummy; + lutFile.read(reinterpret_cast(&dummy), sizeof(lutEntry_t)); + } + } + } + } + } + + lutFile.close(); + + if (ptres) { + ptres->SetName(filename); + ptres->SetTitle(filename); + ptres->GetXaxis()->SetTitle("#eta"); + ptres->GetYaxis()->SetTitle("momentum resolution (%)"); + } + + if (dca_xy) { + dca_xy->SetName(filename); + dca_xy->SetTitle(filename); + dca_xy->GetXaxis()->SetTitle("#eta"); + dca_xy->GetYaxis()->SetTitle("#sigma(d_{xy}) (#mum)"); + } + + if (dca_z) { + dca_z->SetName(filename); + dca_z->SetTitle(filename); + dca_z->GetXaxis()->SetTitle("#eta"); + dca_z->GetYaxis()->SetTitle("#sigma(d_{z}) (#mum)"); + } + + if (sinp) { + sinp->SetName(filename); + sinp->SetTitle(filename); + sinp->GetXaxis()->SetTitle("#eta"); + sinp->GetYaxis()->SetTitle("#sigma(sin(#phi))"); + } + + if (tanl) { + tanl->SetName(filename); + tanl->SetTitle(filename); + tanl->GetXaxis()->SetTitle("#eta"); + tanl->GetYaxis()->SetTitle("#sigma(tam(#lambda))"); + } + for (int ieta = 0; ieta < neta/2; ++ieta) { + auto lutEntry = &lutTable[neta/2+ieta]; + if (!lutEntry->valid) continue; + auto cen = lutEntry->eta; + if (ptres) { + auto val = sqrt(lutEntry->covm[14]) * lutEntry->pt * 100.; + ptres->SetPoint(ptres->GetN(), cen, val); + } + if (dca_xy) { + auto val = sqrt(lutEntry->covm[0]) * 1e4; + dca_xy->SetPoint(dca_xy->GetN(), cen, val); + } + if (dca_z) { + auto val = sqrt(lutEntry->covm[2]) * 1e4; + dca_z->SetPoint(dca_z->GetN(), cen, val); + } + if (sinp) { + auto val = sqrt(lutEntry->covm[5]); + sinp->SetPoint(sinp->GetN(), cen, val); + } + if (tanl) { + auto val = sqrt(lutEntry->covm[9]); + tanl->SetPoint(tanl->GetN(), cen, val); + } + } +} diff --git a/examples/smearing/lutRead_allres_vspt.C b/examples/smearing/lutRead_allres_vspt.C new file mode 100644 index 0000000..0cb8776 --- /dev/null +++ b/examples/smearing/lutRead_allres_vspt.C @@ -0,0 +1,106 @@ +#include "lutCovm.hh" + +void lutRead_allres_vspt(const char *filename = "lutCovm.dat", + TGraph *dca_xy = 0, TGraph *dca_z = 0, TGraph *sinp = 0, TGraph *tanl = 0, TGraph* ptres = 0, double eta = 0.) +{ + + // input file + ifstream lutFile(filename, std::ofstream::binary); + + // read header + lutHeader_t lutHeader; + lutFile.read(reinterpret_cast(&lutHeader), sizeof(lutHeader)); + lutHeader.print(); + + // entries + const int nnch = lutHeader.nchmap.nbins; + const int nrad = lutHeader.radmap.nbins; + const int neta = lutHeader.etamap.nbins; + const int npt = lutHeader.ptmap.nbins; + + auto nchbin = lutHeader.nchmap.find(0.); + auto radbin = lutHeader.radmap.find(0.); + auto etabin = lutHeader.etamap.find(eta); + + lutEntry_t lutTable[npt]; + + // read entries + for (int inch = 0; inch < nnch; ++inch) { + for (int irad = 0; irad < nrad; ++irad) { + for (int ieta = 0; ieta < neta; ++ieta) { + for (int ipt = 0; ipt < npt; ++ipt) { + if (inch==nchbin && irad==radbin && ieta == etabin) { + lutFile.read(reinterpret_cast(&lutTable[ipt]), sizeof(lutEntry_t)); + //lutTable[ipt].print(); + } + else { + lutEntry_t dummy; + lutFile.read(reinterpret_cast(&dummy), sizeof(lutEntry_t)); + } + } + } + } + } + + lutFile.close(); + + if (ptres) { + ptres->SetName(filename); + ptres->SetTitle(filename); + ptres->GetXaxis()->SetTitle("#it{p}_{T} (GeV/#it{c})"); + ptres->GetYaxis()->SetTitle("momentum resolution (%)"); + } + + if (dca_xy) { + dca_xy->SetName(filename); + dca_xy->SetTitle(filename); + dca_xy->GetXaxis()->SetTitle("#it{p}_{T} (GeV/#it{c})"); + dca_xy->GetYaxis()->SetTitle("#sigma(d_{xy}) (#mum)"); + } + + if (dca_z) { + dca_z->SetName(filename); + dca_z->SetTitle(filename); + dca_z->GetXaxis()->SetTitle("#it{p}_{T} (GeV/#it{c})"); + dca_z->GetYaxis()->SetTitle("#sigma(d_{z}) (#mum)"); + } + + if (sinp) { + sinp->SetName(filename); + sinp->SetTitle(filename); + sinp->GetXaxis()->SetTitle("#it{p}_{T} (GeV/#it{c})"); + sinp->GetYaxis()->SetTitle("#sigma(sin(#phi))"); + } + + if (tanl) { + tanl->SetName(filename); + tanl->SetTitle(filename); + tanl->GetXaxis()->SetTitle("#it{p}_{T} (GeV/#it{c})"); + tanl->GetYaxis()->SetTitle("#sigma(tam(#lambda))"); + } + for (int ipt = 0; ipt < npt; ++ipt) { + auto lutEntry = &lutTable[ipt]; + if (!lutEntry->valid) continue; + auto cen = lutEntry->pt; + if (ptres) { + auto val = sqrt(lutEntry->covm[14]) * lutEntry->pt * 100.; + ptres->SetPoint(ptres->GetN(), cen, val); + } + if (dca_xy) { + auto val = sqrt(lutEntry->covm[0]) * 1e4; + dca_xy->SetPoint(dca_xy->GetN(), cen, val); + } + if (dca_z) { + auto val = sqrt(lutEntry->covm[2]) * 1e4; + dca_z->SetPoint(dca_z->GetN(), cen, val); + } + if (sinp) { + auto val = sqrt(lutEntry->covm[5]); + sinp->SetPoint(sinp->GetN(), cen, val); + } + if (tanl) { + auto val = sqrt(lutEntry->covm[9]); + tanl->SetPoint(tanl->GetN(), cen, val); + } + } +} diff --git a/examples/smearing/lutRead_dca.C b/examples/smearing/lutRead_dca.C new file mode 100644 index 0000000..3cf86f6 --- /dev/null +++ b/examples/smearing/lutRead_dca.C @@ -0,0 +1,66 @@ +#include "lutCovm.hh" + +TGraph* lutRead_dca(const char* filename = "lutCovm.dat", + double eta = 0., + double nch = 100.) +{ + + // input file + ifstream lutFile(filename, std::ofstream::binary); + + // read header + lutHeader_t lutHeader; + lutFile.read(reinterpret_cast(&lutHeader), sizeof(lutHeader)); + lutHeader.print(); + + // entries + const int nnch = lutHeader.nchmap.nbins; + const int nrad = lutHeader.radmap.nbins; + const int neta = lutHeader.etamap.nbins; + const int npt = lutHeader.ptmap.nbins; + lutEntry_t lutTable; + + const int nch_bin = lutHeader.nchmap.find(nch); + const int rad_bin = lutHeader.radmap.find(0.); + const int eta_bin = lutHeader.etamap.find(eta); + + // create graph of pt resolution at eta = 0 + auto gpt = new TGraph(); + gpt->SetName(filename); + gpt->SetTitle(filename); + gpt->GetXaxis()->SetTitle("#it{p}_{T} (GeV/#it{c})"); + gpt->GetYaxis()->SetTitle("pointing resolution (#mum)"); + + // read entries + for (int inch = 0; inch < nnch; ++inch) { + for (int irad = 0; irad < nrad; ++irad) { + for (int ieta = 0; ieta < neta; ++ieta) { + for (int ipt = 0; ipt < npt; ++ipt) { + lutFile.read(reinterpret_cast(&lutTable), sizeof(lutEntry_t)); + // lutTable.print(); + auto lutEntry = &lutTable; + if (!lutEntry->valid) { + std::cout << " ipt = " << ipt << " is not valid " << std::endl; + continue; + } + if (nch_bin != inch) { + continue; + } + if (eta_bin != ieta) { + continue; + } + if (rad_bin != irad) { + continue; + } + auto cen = lutEntry->pt; + auto val = sqrt(lutEntry->covm[0]) * 1.e4; + gpt->SetPoint(gpt->GetN(), cen, val); + } + } + } + } + + lutFile.close(); + + return gpt; +} diff --git a/examples/smearing/lutRead_dca_vs_eta.C b/examples/smearing/lutRead_dca_vs_eta.C new file mode 100644 index 0000000..738fe26 --- /dev/null +++ b/examples/smearing/lutRead_dca_vs_eta.C @@ -0,0 +1,56 @@ +#include "lutCovm.hh" + +TGraph* lutRead_dca_vs_eta(const char* filename = "lutCovm.dat", + double pt = 1., + double nch = 100.) +{ + + // input file + ifstream lutFile(filename, std::ofstream::binary); + + // read header + lutHeader_t lutHeader; + lutFile.read(reinterpret_cast(&lutHeader), sizeof(lutHeader)); + lutHeader.print(); + + // entries + const int nnch = lutHeader.nchmap.nbins; + const int nrad = lutHeader.radmap.nbins; + const int neta = lutHeader.etamap.nbins; + const int npt = lutHeader.ptmap.nbins; + + const int nch_bin = lutHeader.nchmap.find(nch); + const int rad_bin = lutHeader.radmap.find(0.); + const int pt_bin = lutHeader.ptmap.find(pt); + + lutEntry_t lutTable; + // create graph of pt resolution at eta = 0 + auto gpt = new TGraph(); + gpt->SetName(filename); + gpt->SetTitle(filename); + gpt->GetXaxis()->SetTitle("#it{#eta}"); + gpt->GetYaxis()->SetTitle("pointing resolution (#mum)"); + + // read entries + for (int inch = 0; inch < nnch; ++inch) { + for (int irad = 0; irad < nrad; ++irad) { + for (int ieta = 0; ieta < neta; ++ieta) { + for (int ipt = 0; ipt < npt; ++ipt) { + lutFile.read(reinterpret_cast(&lutTable), sizeof(lutEntry_t)); + // lutTable.print(); + if (inch != nch_bin || irad != rad_bin || ipt != pt_bin) { + continue; + } + auto lutEntry = &lutTable; + if (!lutEntry->valid) continue; + auto val = sqrt(lutEntry->covm[0]) * 1.e4; + gpt->SetPoint(gpt->GetN(), lutEntry->eta, val); + } + } + } + } + + lutFile.close(); + + return gpt; +} diff --git a/examples/smearing/lutRead_eff.C b/examples/smearing/lutRead_eff.C new file mode 100644 index 0000000..b783514 --- /dev/null +++ b/examples/smearing/lutRead_eff.C @@ -0,0 +1,66 @@ +#include "lutCovm.hh" + +TGraph* lutRead_eff(const char* filename = "lutCovm.dat", + double eta = 0., + double nch = 100.) +{ + + // input file + ifstream lutFile(filename, std::ofstream::binary); + + // read header + lutHeader_t lutHeader; + lutFile.read(reinterpret_cast(&lutHeader), sizeof(lutHeader)); + lutHeader.print(); + + // entries + const int nnch = lutHeader.nchmap.nbins; + const int nrad = lutHeader.radmap.nbins; + const int neta = lutHeader.etamap.nbins; + const int npt = lutHeader.ptmap.nbins; + lutEntry_t lutTable; + + const int nch_bin = lutHeader.nchmap.find(nch); + const int rad_bin = lutHeader.radmap.find(0.); + const int eta_bin = lutHeader.etamap.find(eta); + + // create graph of pt resolution at eta = 0 + auto gpt = new TGraph(); + gpt->SetName(filename); + gpt->SetTitle(filename); + gpt->GetXaxis()->SetTitle("#it{p}_{T} (GeV/#it{c})"); + gpt->GetYaxis()->SetTitle("efficiency (%)"); + + // read entries + for (int inch = 0; inch < nnch; ++inch) { + for (int irad = 0; irad < nrad; ++irad) { + for (int ieta = 0; ieta < neta; ++ieta) { + for (int ipt = 0; ipt < npt; ++ipt) { + lutFile.read(reinterpret_cast(&lutTable), sizeof(lutEntry_t)); + // lutTable.print(); + auto lutEntry = &lutTable; + if (!lutEntry->valid) { + std::cout << " ipt = " << ipt << " is not valid " << std::endl; + continue; + } + if (nch_bin != inch) { + continue; + } + if (eta_bin != ieta) { + continue; + } + if (rad_bin != irad) { + continue; + } + auto cen = lutEntry->pt; + auto val = lutEntry->eff * 1.e2; + gpt->SetPoint(gpt->GetN(), cen, val); + } + } + } + } + + lutFile.close(); + + return gpt; +} diff --git a/examples/smearing/lutRead_eff_vs_eta.C b/examples/smearing/lutRead_eff_vs_eta.C new file mode 100644 index 0000000..a3fedce --- /dev/null +++ b/examples/smearing/lutRead_eff_vs_eta.C @@ -0,0 +1,56 @@ +#include "lutCovm.hh" + +TGraph* lutRead_eff_vs_eta(const char* filename = "lutCovm.dat", + double pt = 1., + double nch = 100.) +{ + + // input file + ifstream lutFile(filename, std::ofstream::binary); + + // read header + lutHeader_t lutHeader; + lutFile.read(reinterpret_cast(&lutHeader), sizeof(lutHeader)); + lutHeader.print(); + + // entries + const int nnch = lutHeader.nchmap.nbins; + const int nrad = lutHeader.radmap.nbins; + const int neta = lutHeader.etamap.nbins; + const int npt = lutHeader.ptmap.nbins; + + const int nch_bin = lutHeader.nchmap.find(nch); + const int rad_bin = lutHeader.radmap.find(0.); + const int pt_bin = lutHeader.ptmap.find(pt); + + lutEntry_t lutTable; + // create graph of pt resolution at eta = 0 + auto gpt = new TGraph(); + gpt->SetName(filename); + gpt->SetTitle(Form("%s pt=%f nch=%f", filename, pt, nch)); + gpt->GetXaxis()->SetTitle("#it{#eta}"); + gpt->GetYaxis()->SetTitle("efficiency (%)"); + + // read entries + for (int inch = 0; inch < nnch; ++inch) { + for (int irad = 0; irad < nrad; ++irad) { + for (int ieta = 0; ieta < neta; ++ieta) { + for (int ipt = 0; ipt < npt; ++ipt) { + lutFile.read(reinterpret_cast(&lutTable), sizeof(lutEntry_t)); + // lutTable.print(); + if (inch != nch_bin || irad != rad_bin || ipt != pt_bin) { + continue; + } + auto lutEntry = &lutTable; + if (!lutEntry->valid) continue; + auto val = lutEntry->eff * 1.e2; + gpt->SetPoint(gpt->GetN(), lutEntry->eta, val); + } + } + } + } + + lutFile.close(); + + return gpt; +} diff --git a/examples/smearing/lutRead_pt.C b/examples/smearing/lutRead_pt.C new file mode 100644 index 0000000..c13f85e --- /dev/null +++ b/examples/smearing/lutRead_pt.C @@ -0,0 +1,62 @@ +#include "lutCovm.hh" + +TGraph* lutRead_pt(const char* filename = "lutCovm.dat", + double eta = 0., + double nch = 100.) +{ + + // input file + ifstream lutFile(filename, std::ofstream::binary); + + // read header + lutHeader_t lutHeader; + lutFile.read(reinterpret_cast(&lutHeader), sizeof(lutHeader)); + lutHeader.print(); + cout << "header done" << endl; + // entries + const int nnch = lutHeader.nchmap.nbins; + const int nrad = lutHeader.radmap.nbins; + const int neta = lutHeader.etamap.nbins; + const int npt = lutHeader.ptmap.nbins; + + auto nchbin = lutHeader.nchmap.find(0.); + auto radbin = lutHeader.radmap.find(0.); + auto etabin = lutHeader.etamap.find(eta); + + lutEntry_t lutTable[npt]; + + // read entries + for (int inch = 0; inch < nnch; ++inch) { + for (int irad = 0; irad < nrad; ++irad) { + for (int ieta = 0; ieta < neta; ++ieta) { + for (int ipt = 0; ipt < npt; ++ipt) { + if (inch == nchbin && irad == radbin && ieta == etabin) { + lutFile.read(reinterpret_cast(&lutTable[ipt]), sizeof(lutEntry_t)); + //lutTable[ipt].print(); + } else { + lutEntry_t dummy; + lutFile.read(reinterpret_cast(&dummy), sizeof(lutEntry_t)); + } + } + } + } + } + + lutFile.close(); + // create graph of pt resolution at eta = 0 + auto gpt = new TGraph(); + gpt->SetName(filename); + gpt->SetTitle(filename); + gpt->GetXaxis()->SetTitle("#it{p}_{T} (GeV/#it{c})"); + gpt->GetYaxis()->SetTitle("momentum resolution (%)"); + for (int ipt = 0; ipt < npt; ++ipt) { + auto lutEntry = &lutTable[ipt]; + if (!lutEntry->valid) + continue; + auto cen = lutEntry->pt; + auto val = sqrt(lutEntry->covm[14]) * lutEntry->pt * 100.; + gpt->SetPoint(gpt->GetN(), cen, val); + } + + return gpt; +} diff --git a/examples/smearing/lutRead_pt_vs_eta.C b/examples/smearing/lutRead_pt_vs_eta.C new file mode 100644 index 0000000..c47225a --- /dev/null +++ b/examples/smearing/lutRead_pt_vs_eta.C @@ -0,0 +1,56 @@ +#include "lutCovm.hh" + +TGraph* lutRead_pt_vs_eta(const char* filename = "lutCovm.dat", + double pt = 1., + double nch = 100.) +{ + + // input file + ifstream lutFile(filename, std::ofstream::binary); + + // read header + lutHeader_t lutHeader; + lutFile.read(reinterpret_cast(&lutHeader), sizeof(lutHeader)); + lutHeader.print(); + + // entries + const int nnch = lutHeader.nchmap.nbins; + const int nrad = lutHeader.radmap.nbins; + const int neta = lutHeader.etamap.nbins; + const int npt = lutHeader.ptmap.nbins; + + const int nch_bin = lutHeader.nchmap.find(nch); + const int rad_bin = lutHeader.radmap.find(0.); + const int pt_bin = lutHeader.ptmap.find(pt); + + lutEntry_t lutTable; + // create graph of pt resolution at eta = 0 + auto gpt = new TGraph(); + gpt->SetName(filename); + gpt->SetTitle(filename); + gpt->GetXaxis()->SetTitle("#it{#eta}"); + gpt->GetYaxis()->SetTitle("momentum resolution (%)"); + + // read entries + for (int inch = 0; inch < nnch; ++inch) { + for (int irad = 0; irad < nrad; ++irad) { + for (int ieta = 0; ieta < neta; ++ieta) { + for (int ipt = 0; ipt < npt; ++ipt) { + lutFile.read(reinterpret_cast(&lutTable), sizeof(lutEntry_t)); + // lutTable.print(); + if (inch != nch_bin || irad != rad_bin || ipt != pt_bin) { + continue; + } + auto lutEntry = &lutTable; + if (!lutEntry->valid) continue; + auto val = sqrt(lutEntry->covm[14]) * lutEntry->pt * 100.; + gpt->SetPoint(gpt->GetN(), lutEntry->eta, val); + } + } + } + } + + lutFile.close(); + + return gpt; +} diff --git a/examples/smearing/luts/.gitignore b/examples/smearing/luts/.gitignore new file mode 100644 index 0000000..52ac371 --- /dev/null +++ b/examples/smearing/luts/.gitignore @@ -0,0 +1 @@ +lutCovm* diff --git a/examples/smearing/luts/README.md b/examples/smearing/luts/README.md new file mode 100644 index 0000000..4690c7e --- /dev/null +++ b/examples/smearing/luts/README.md @@ -0,0 +1,2 @@ +# Directory to store lookup tables for the covariance matrices +- you can create yours or get them from the repository mantainers diff --git a/examples/smearing/luts/lutCovm.el.2kG.dat b/examples/smearing/luts/lutCovm.el.2kG.dat deleted file mode 100644 index 81e97d3..0000000 Binary files a/examples/smearing/luts/lutCovm.el.2kG.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.el.5kG.dat b/examples/smearing/luts/lutCovm.el.5kG.dat deleted file mode 100644 index 6bfd5e5..0000000 Binary files a/examples/smearing/luts/lutCovm.el.5kG.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.el.5kG.fwd.dat b/examples/smearing/luts/lutCovm.el.5kG.fwd.dat deleted file mode 100644 index ad2c6b3..0000000 Binary files a/examples/smearing/luts/lutCovm.el.5kG.fwd.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.ka.2kG.dat b/examples/smearing/luts/lutCovm.ka.2kG.dat deleted file mode 100644 index 89cf058..0000000 Binary files a/examples/smearing/luts/lutCovm.ka.2kG.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.ka.5kG.dat b/examples/smearing/luts/lutCovm.ka.5kG.dat deleted file mode 100644 index 8d6c676..0000000 Binary files a/examples/smearing/luts/lutCovm.ka.5kG.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.ka.5kG.fwd.dat b/examples/smearing/luts/lutCovm.ka.5kG.fwd.dat deleted file mode 100644 index ea79572..0000000 Binary files a/examples/smearing/luts/lutCovm.ka.5kG.fwd.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.mu.2kG.dat b/examples/smearing/luts/lutCovm.mu.2kG.dat deleted file mode 100644 index b504536..0000000 Binary files a/examples/smearing/luts/lutCovm.mu.2kG.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.mu.5kG.dat b/examples/smearing/luts/lutCovm.mu.5kG.dat deleted file mode 100644 index 2edfc2e..0000000 Binary files a/examples/smearing/luts/lutCovm.mu.5kG.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.mu.5kG.fwd.dat b/examples/smearing/luts/lutCovm.mu.5kG.fwd.dat deleted file mode 100644 index cde40b3..0000000 Binary files a/examples/smearing/luts/lutCovm.mu.5kG.fwd.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.pi.2kG.dat b/examples/smearing/luts/lutCovm.pi.2kG.dat deleted file mode 100644 index 1f4d4f0..0000000 Binary files a/examples/smearing/luts/lutCovm.pi.2kG.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.pi.5kG.dat b/examples/smearing/luts/lutCovm.pi.5kG.dat deleted file mode 100644 index 5550723..0000000 Binary files a/examples/smearing/luts/lutCovm.pi.5kG.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.pi.5kG.fwd.dat b/examples/smearing/luts/lutCovm.pi.5kG.fwd.dat deleted file mode 100644 index ec7dccb..0000000 Binary files a/examples/smearing/luts/lutCovm.pi.5kG.fwd.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.pr.2kG.dat b/examples/smearing/luts/lutCovm.pr.2kG.dat deleted file mode 100644 index 20a496f..0000000 Binary files a/examples/smearing/luts/lutCovm.pr.2kG.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.pr.5kG.dat b/examples/smearing/luts/lutCovm.pr.5kG.dat deleted file mode 100644 index 2f9db7d..0000000 Binary files a/examples/smearing/luts/lutCovm.pr.5kG.dat and /dev/null differ diff --git a/examples/smearing/luts/lutCovm.pr.5kG.fwd.dat b/examples/smearing/luts/lutCovm.pr.5kG.fwd.dat deleted file mode 100644 index 1188330..0000000 Binary files a/examples/smearing/luts/lutCovm.pr.5kG.fwd.dat and /dev/null differ diff --git a/examples/smearing/muon.C b/examples/smearing/muon.C new file mode 100644 index 0000000..6201248 --- /dev/null +++ b/examples/smearing/muon.C @@ -0,0 +1,78 @@ +R__LOAD_LIBRARY(libDelphes) +R__LOAD_LIBRARY(libDelphesO2) + +void muon(const char *inputFile = "delphes.root", + const char *inputFileAccMuonPID = "muonAccEffPID.root", + const char *outputFile = "muon.root") { + + // Create chain of root trees + TChain chain("Delphes"); + chain.Add(inputFile); + + // Create object of class ExRootTreeReader + auto treeReader = new ExRootTreeReader(&chain); + auto numberOfEntries = treeReader->GetEntries(); + + // Get pointers to branches used in this analysis + auto events = treeReader->UseBranch("Event"); + auto tracks = treeReader->UseBranch("Track"); + auto particles = treeReader->UseBranch("Particle"); + + // MID detector + o2::delphes::MIDdetector mid; + mid.setup(inputFileAccMuonPID); + + // smearer + // o2::delphes::TrackSmearer smearer; + // smearer.useEfficiency(true); + // smearer.loadTable(11, "lutCovm.el.dat"); + // smearer.loadTable(13, "lutCovm.mu.dat"); + // smearer.loadTable(211, "lutCovm.pi.dat"); + // smearer.loadTable(321, "lutCovm.ka.dat"); + // smearer.loadTable(2212, "lutCovm.pr.dat"); + + // logx binning + const Int_t nbins = 80; + double xmin = 1.e-2; + double xmax = 1.e2; + double logxmin = std::log10(xmin); + double logxmax = std::log10(xmax); + double binwidth = (logxmax - logxmin) / nbins; + double xbins[nbins + 1]; + for (Int_t i=0; i<=nbins; ++i) xbins[i] = std::pow(10., logxmin + i * binwidth); + + auto hPtMuons = new TH1F("hPtMuons", "hPtMuons;#it{p_{T}} (GeV/#it{c});", nbins, xbins); + auto hPtAll = new TH1F("hPtAll", "hPtAll;#it{p_{T}} (GeV/#it{c});", nbins, xbins); + + for (Int_t ientry = 0; ientry < numberOfEntries; ++ientry) { + + // Load selected branches with data from specified event + treeReader->ReadEntry(ientry); + + // loop over tracks + Int_t multiplicity = tracks->GetEntries(); + for (Int_t itrack = 0; itrack < tracks->GetEntries(); ++itrack) { + + auto track = (Track*) tracks->At(itrack); + + // smear track + // if (!smearer.smearTrack(*track)) continue; + + // check if has MID + if (mid.hasMID(*track)) { + hPtAll->Fill(track->PT); + if (mid.isMuon(*track, multiplicity)) hPtMuons->Fill(track->PT); + } + + } + + } + + auto fout = TFile::Open(outputFile, "RECREATE"); + + hPtMuons ->Write(); + hPtAll ->Write(); + + fout->Close(); + +} diff --git a/examples/smearing/print_lut.py b/examples/smearing/print_lut.py new file mode 100755 index 0000000..befa161 --- /dev/null +++ b/examples/smearing/print_lut.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 + +""" +Inspector of the LUT file. +""" + +from ROOT import gInterpreter +import argparse + +def main(file_name): + headers = """ + #include "lutCovm.hh" + #include + #include + """ + + gInterpreter.ProcessLine(headers) + + gInterpreter.ProcessLine(f"const char* filename = \"{file_name}\";") + printer = """ + ifstream lutFile(filename, std::ofstream::binary); + lutHeader_t lutHeader; + lutFile.read(reinterpret_cast(&lutHeader), sizeof(lutHeader)); + lutHeader.print(); + """ + gInterpreter.ProcessLine(printer) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument("input_file", type=str, + help="Name of the input file.") + args = parser.parse_args() + main(args.input_file) diff --git a/examples/smearing/rich.C b/examples/smearing/rich.C new file mode 100644 index 0000000..6d1cd36 --- /dev/null +++ b/examples/smearing/rich.C @@ -0,0 +1,163 @@ +R__LOAD_LIBRARY(libDelphes) +R__LOAD_LIBRARY(libDelphesO2) + +double rich_radius = 100.; // [cm] +double rich_length = 200.; // [cm] + +void +rich(const char *inputFile = "delphes.root", + const char *outputFile = "rich.root") +{ + + // Create chain of root trees + TChain chain("Delphes"); + chain.Add(inputFile); + + // Create object of class ExRootTreeReader + auto treeReader = new ExRootTreeReader(&chain); + auto numberOfEntries = treeReader->GetEntries(); + + // Get pointers to branches used in this analysis + auto events = treeReader->UseBranch("Event"); + auto tracks = treeReader->UseBranch("Track"); + auto particles = treeReader->UseBranch("Particle"); + + // RICH detector + o2::delphes::RICHdetector richdetector; + richdetector.setup(rich_radius, rich_length); + richdetector.setIndex(1.03); + richdetector.setRadiatorLength(2.); + richdetector.setEfficiency(0.4); + richdetector.setSigma(7.e-3); + + // smearer + o2::delphes::TrackSmearer smearer; + smearer.loadTable(11, "lutCovm.el.dat"); + smearer.loadTable(13, "lutCovm.mu.dat"); + smearer.loadTable(211, "lutCovm.pi.dat"); + smearer.loadTable(321, "lutCovm.ka.dat"); + smearer.loadTable(2212, "lutCovm.pr.dat"); + + // logx binning + const Int_t nbins = 200; + double xmin = 1.e-2; + double xmax = 1.e2; + double logxmin = std::log10(xmin); + double logxmax = std::log10(xmax); + double binwidth = (logxmax - logxmin) / nbins; + double xbins[nbins + 1]; + xbins[0] = xmin; + for (Int_t i = 1; i <= nbins; ++i) + xbins[i] = xmin + std::pow(10., logxmin + i * binwidth); + + TH1F *h = new TH1F("h","hist with log x axis",nbins,xbins); + + // histograms + auto hAngleP = new TH2F("hAngleP", ";#it{p} (GeV/#it{c});#theta (rad)", nbins, xbins, 250, 0., 0.25); + TH2 *hAngleP_true[5]; + TH1 *hGenP[5], *hGenPt[5]; + TH1 *hAccP[5], *hAccPt[5]; + TH1 *hRecP[5], *hRecPt[5]; + TH2 *hNsigmaP[5], *hNsigmaP_true[5][5]; + TH2 *hNsigmaPt[5], *hNsigmaPt_true[5][5]; + const char *pname[5] = {"el", "mu", "pi", "ka", "pr"}; + const char *plabel[5] = {"e", "#mu", "#pi", "K", "p"}; + std::map pidmap = { {11, 0}, {13, 1}, {211, 2}, {321, 3}, {2212, 4} }; + std::map pidmass = { {0, 0.00051099891}, {1, 0.10565800}, {2, 0.13957000}, {3, 0.49367700}, {4, 0.93827200} }; + for (int i = 0; i < 5; ++i) { + hGenP[i] = new TH1F(Form("hGenP_%s", pname[i]), ";#it{p} (GeV/#it{c})", nbins, xbins); + hGenPt[i] = new TH1F(Form("hGenPt_%s", pname[i]), ";#it{p} (GeV/#it{c})", nbins, xbins); + hAccP[i] = new TH1F(Form("hAccP_%s", pname[i]), ";#it{p} (GeV/#it{c})", nbins, xbins); + hAccPt[i] = new TH1F(Form("hAccPt_%s", pname[i]), ";#it{p} (GeV/#it{c})", nbins, xbins); + hRecP[i] = new TH1F(Form("hRecP_%s", pname[i]), ";#it{p} (GeV/#it{c})", nbins, xbins); + hRecPt[i] = new TH1F(Form("hRecPt_%s", pname[i]), ";#it{p} (GeV/#it{c})", nbins, xbins); + hAngleP_true[i] = new TH2F(Form("hAngleP_true_%s", pname[i]), ";#it{p} (GeV/#it{c});#theta (rad)", nbins, xbins, 250, 0., 0.25); + hNsigmaP[i] = new TH2F(Form("hNsigmaP_%s", pname[i]), Form(";#it{p} (GeV/#it{c});n#sigma_{%s}", plabel[i]), nbins, xbins, 500, -25., 25.); + hNsigmaPt[i] = new TH2F(Form("hNsigmaPt_%s", pname[i]), Form(";#it{p_{T}} (GeV/#it{c});n#sigma_{%s}", plabel[i]), nbins, xbins, 500, -25., 25.); + for (int j = 0; j < 5; ++j) { + hNsigmaP_true[i][j] = new TH2F(Form("hNsigmaP_%s_true_%s", pname[i], pname[j]), Form(";#it{p} (GeV/#it{c});n#sigma_{%s}", plabel[i]), nbins, xbins, 500, -25., 25.); + hNsigmaPt_true[i][j] = new TH2F(Form("hNsigmaPt_%s_true_%s", pname[i], pname[j]), Form(";#it{p_{T}} (GeV/#it{c});n#sigma_{%s}", plabel[i]), nbins, xbins, 500, -25., 25.); + } + } + + for (Int_t ientry = 0; ientry < numberOfEntries; ++ientry) { + + // Load selected branches with data from specified event + treeReader->ReadEntry(ientry); + + // loop over tracks and smear + std::vector tof_tracks; + for (Int_t itrack = 0; itrack < tracks->GetEntries(); ++itrack) { + + // get track and corresponding particle + auto track = (Track *)tracks->At(itrack); + auto particle = (GenParticle *)track->Particle.GetObject(); + + auto pdg = std::abs(track->PID); + auto ipdg = pidmap[pdg]; + auto true_m = pidmass[ipdg]; + auto true_p = track->P; + auto true_pt = track->PT; + + if (std::fabs(track->Eta) > 0.5) continue; + + hGenP[ipdg]->Fill(true_p); + hGenPt[ipdg]->Fill(true_pt); + + // smear track + if (!smearer.smearTrack(*track)) continue; + auto p = track->P; + auto pt = track->PT; + + // select primaries based on 3 sigma DCA cuts + if (fabs(track->D0 / track->ErrorD0) > 3.) continue; + if (fabs(track->DZ / track->ErrorDZ) > 3.) continue; + + // check if has RICH + if (!richdetector.hasRICH(*track)) continue; + + // fill beta-p + auto measurement = richdetector.getMeasuredAngle(*track); + auto angle = measurement.first; + auto anglee = measurement.second; + if (anglee == 0.) continue; + + hRecP[ipdg]->Fill(true_p); + hRecPt[ipdg]->Fill(true_pt); + + hAngleP->Fill(p, angle); + hAngleP_true[ipdg]->Fill(p, angle); + + // make pid + std::array deltaangle, nsigma; + richdetector.makePID(*track, deltaangle, nsigma); + for (int i = 0; i < 5; ++i) { + hNsigmaP[i]->Fill(p, nsigma[i]); + hNsigmaPt[i]->Fill(pt, nsigma[i]); + hNsigmaP_true[i][ipdg]->Fill(p, nsigma[i]); + hNsigmaPt_true[i][ipdg]->Fill(pt, nsigma[i]); + } + + } + } + + auto fout = TFile::Open(outputFile, "RECREATE"); + hAngleP->Write(); + for (int i = 0; i < 5; ++i) { + hGenP[i]->Write(); + hGenPt[i]->Write(); + hAccP[i]->Write(); + hAccPt[i]->Write(); + hRecP[i]->Write(); + hRecPt[i]->Write(); + hAngleP_true[i]->Write(); + hNsigmaP[i]->Write(); + hNsigmaPt[i]->Write(); + for (int j = 0; j < 5; ++j) { + hNsigmaP_true[i][j]->Write(); + hNsigmaPt_true[i][j]->Write(); + } + } + fout->Close(); + +} diff --git a/examples/smearing/style.C b/examples/smearing/style.C new file mode 100644 index 0000000..71e31bf --- /dev/null +++ b/examples/smearing/style.C @@ -0,0 +1,110 @@ +#ifndef style_C +#define style_C + +void style() +{ + + gStyle->SetPadColor(0); + gStyle->SetPadBorderSize(0); + gStyle->SetPadBorderMode(0); + gStyle->SetPadTickX(1); + gStyle->SetPadTickY(1); + gStyle->SetCanvasColor(0); + gStyle->SetCanvasBorderMode(0); + gStyle->SetCanvasBorderSize(0); + gStyle->SetFrameBorderMode(0); + //gStyle->SetFrameLineColor(0); + gStyle->SetFrameFillColor(0); + + //gStyle->SetOptStat(00000); + //gStyle->SetTitleColor(0); + gStyle->SetTitleBorderSize(0); + // gStyle->SetTitleTextColor(0); + // gStyle->SetTitleFillColor(0); + + /* + gStyle->SetTitleColor(0); + gStyle->SetTitleBorderSize(0); + gStyle->SetTitleTextColor(0); + gStyle->SetTitleFillColor(0); + */ + // gStyle->SetPalette(1); + gStyle->SetCanvasColor(0); + gStyle->SetHistFillColor(0); + gStyle->SetHistFillStyle(0); + gStyle->SetOptStat(0); + // gStyle->SetPadTickX(1); + // gStyle->SetPadTickY(1); + gStyle->SetAxisColor(1, "X"); + gStyle->SetAxisColor(1, "Y"); + gStyle->SetAxisColor(1, "Z"); + /* + gStyle->SetLabelColor(0, "X"); + gStyle->SetLabelColor(0, "Y"); + gStyle->SetLabelColor(0, "Z"); + gStyle->SetTickLength(0.0, "X"); + gStyle->SetTickLength(0.0, "Y"); + gStyle->SetTickLength(0.0, "Z"); + */ + gStyle->SetTitleXSize(0.05); + gStyle->SetTitleYSize(0.05); + gStyle->SetNdivisions(506, "X"); + gStyle->SetNdivisions(506, "Y"); + gStyle->SetNdivisions(506, "Z"); + + //gStyle->SetPadGridX(1); + //gStyle->SetPadGridY(1); + + //gStyle->SetLabelOffset(0.02, "X"); + //gStyle->SetLabelOffset(0.01, "Y"); + //gStyle->SetLabelOffset(0.02, "Z"); + gStyle->SetLabelSize(0.04, "xyz"); + gStyle->SetTitleOffset(1.2,"xyz"); + gStyle->SetTitleFont(42,"xyz"); + + gStyle->SetTextFont(42); + gStyle->SetTextSize(0.06); + + gStyle->SetPadLeftMargin(0.15); + gStyle->SetPadRightMargin(0.05); + gStyle->SetPadBottomMargin(0.15); + gStyle->SetPadTopMargin(0.05); + + gROOT->ForceStyle(); + +} + +void SetGraphStyle(TGraph *g, Int_t m, Int_t c) +{ + g->SetMarkerStyle(m); + g->SetMarkerColor(c); + g->SetLineColor(c); + g->SetLineWidth(1); + g->SetFillStyle(0); + g->SetFillColor(0); + g->SetMarkerSize(2.0); + if (m == 28 || m == 34 || m == 23 || m == 32 || m == 22) + g->SetMarkerSize(2.5); + if (m == 27 || m == 33 || m == 30 || m == 29) + g->SetMarkerSize(3.0); +} + +void SetHistoStyle(TH1 *h, Int_t m, Int_t c, Int_t w = 1, Int_t s = 1) +{ + h->SetMarkerStyle(m); + h->SetMarkerColor(c); + h->SetLineColor(c); + h->SetLineWidth(w); + h->SetLineStyle(s); + h->SetFillStyle(0); + h->SetFillColor(0); + h->SetMarkerSize(2.0); + if (m == 28 || m == 34 || m == 23 || m == 32 || m == 22) + h->SetMarkerSize(2.5); + if (m == 27 || m == 33 || m == 30 || m == 29) + h->SetMarkerSize(3.0); + if (m == 0) + h->SetMarkerSize(0); +} + +#endif /* style_C */ diff --git a/examples/smearing/tof.C b/examples/smearing/tof.C index 3a9ca4a..57e71c4 100644 --- a/examples/smearing/tof.C +++ b/examples/smearing/tof.C @@ -1,10 +1,19 @@ R__LOAD_LIBRARY(libDelphes) R__LOAD_LIBRARY(libDelphesO2) -double Bz = 0.2; // [T] double tof_radius = 100.; // [cm] double tof_length = 200.; // [cm] double tof_sigmat = 0.02; // [ns] +double tof_sigma0 = 0.20; // [ns] + +double tof_mismatch = 0.01; +std::string tof_mismatch_fname; + +// this part is for the PID of a potential MUON detector +// it stores the probability for a track to be ID as a muon +// one should do it nicely with the (eta-pt) dependence and correct numbers +// this is just an example based on std::map to store flat probability for different input PID +std::map muon_idp = { {11, 0.01 } , {13, 0.95} , {211, 0.10} , {321, 0.15} , {2212, 0.05} }; void tof(const char *inputFile = "delphes.root", @@ -26,36 +35,56 @@ tof(const char *inputFile = "delphes.root", // TOF layer o2::delphes::TOFLayer toflayer; - toflayer.setup(tof_radius, tof_length, tof_sigmat); + toflayer.setup(tof_radius, tof_length, tof_sigmat, tof_sigma0); // smearer o2::delphes::TrackSmearer smearer; - if (Bz == 0.2) { - smearer.loadTable(11, "lutCovm.el.2kG.dat"); - smearer.loadTable(13, "lutCovm.mu.2kG.dat"); - smearer.loadTable(211, "lutCovm.pi.2kG.dat"); - smearer.loadTable(321, "lutCovm.ka.2kG.dat"); - smearer.loadTable(2212, "lutCovm.pr.2kG.dat"); - } else if (Bz == 0.5) { - smearer.loadTable(11, "lutCovm.el.5kG.dat"); - smearer.loadTable(13, "lutCovm.mu.5kG.dat"); - smearer.loadTable(211, "lutCovm.pi.5kG.dat"); - smearer.loadTable(321, "lutCovm.ka.5kG.dat"); - smearer.loadTable(2212, "lutCovm.pr.5kG.dat"); - } else { - std::cout << " --- invalid Bz field: " << Bz << std::endl; - return; - } + smearer.useEfficiency(true); + smearer.loadTable(11, "lutCovm.el.dat"); + smearer.loadTable(13, "lutCovm.mu.dat"); + smearer.loadTable(211, "lutCovm.pi.dat"); + smearer.loadTable(321, "lutCovm.ka.dat"); + smearer.loadTable(2212, "lutCovm.pr.dat"); + // logx binning + const Int_t nbins = 80; + double xmin = 1.e-2; + double xmax = 1.e2; + double logxmin = std::log10(xmin); + double logxmax = std::log10(xmax); + double binwidth = (logxmax - logxmin) / nbins; + double xbins[nbins + 1]; + for (Int_t i = 0; i <= nbins; ++i) + xbins[i] = std::pow(10., logxmin + i * binwidth); + // histograms auto hTime0 = new TH1F("hTime0", ";t_{0} (ns)", 1000, -1., 1.); - auto hBetaP = new TH2F("hBetaP", ";log_{10}(#it{p}/GeV);#beta", 400, -2., 2., 1000, 0.1, 1.1); - TH2 *hNsigmaP[5]; + auto hBetaP = new TH2F("hBetaP", ";#it{p} (GeV/#it{c});#beta", nbins, xbins, 1000, 0.1, 1.1); + TH2 *hHit = new TH2F("hHit", ";#eta;#it{p}_{T} (GeV/#it{c})", 200, -4., 4., nbins, xbins); + TH2 *hNsigmaPt[5], *hNsigmaPt_true[5][5]; const char *pname[5] = {"el", "mu", "pi", "ka", "pr"}; const char *plabel[5] = {"e", "#mu", "#pi", "K", "p"}; - for (int i = 0; i < 5; ++i) - hNsigmaP[i] = new TH2F(Form("hNsigmaP_%s", pname[i]), Form(";log_{10}(#it{p}/GeV);n#sigma_{%s}", plabel[i]), 40, -2., 2., 200, -10., 10.); + for (int i = 0; i < 5; ++i) { + hNsigmaPt[i] = new TH2F(Form("hNsigmaPt_%s", pname[i]), Form(";#it{p_{T}} (GeV/#it{c});n#sigma_{%s}", plabel[i]), nbins, xbins, 500, -25., 25.); + for (int j = 0; j < 5; ++j) { + hNsigmaPt_true[i][j] = new TH2F(Form("hNsigmaPt_%s_true_%s", pname[i], pname[j]), Form(";#it{p_{T}} (GeV/#it{c});n#sigma_{%s}", plabel[i]), nbins, xbins, 500, -25., 25.); + } + } + auto hMismatchTemplateOut = new TH1F("hMismatchTemplate", "", 3000., -5., 25.); + auto hMuonPt[i] = new TH1F("hMuonPt", ";#it{p_{T}} (GeV/#it{c});", nbins, xbins); + + // read mismatch template if requested + TH1 *hMismatchTemplateIn = nullptr; + if (!tof_mismatch_fname.empty()) { + auto fmismatch = TFile::Open(tof_mismatch_fname.c_str()); + hMismatchTemplateIn = (TH1 *)fmismatch->Get("hMismatchTemplate"); + hMismatchTemplateIn->SetDirectory(0); + fmismatch->Close(); + } + + std::map pidmap = { {11, 0}, {13, 1}, {211, 2}, {321, 3}, {2212, 4} }; + for (Int_t ientry = 0; ientry < numberOfEntries; ++ientry) { // Load selected branches with data from specified event @@ -72,13 +101,33 @@ tof(const char *inputFile = "delphes.root", // smear track if (!smearer.smearTrack(*track)) continue; + // check if it is identified as a muon by MUON ID + auto pdg = std::abs(track->PID); + auto muonp = muon_idp[pdg]; + if (gRandom->Uniform() < muonp) + hMuonPt->Fill(track->PT); + + // check if has TOF + if (!toflayer.hasTOF(*track)) continue; + + // fill output mismatch template + auto L = std::sqrt(track->XOuter * track->XOuter + + track->YOuter * track->YOuter + + track->ZOuter * track->ZOuter); + hMismatchTemplateOut->Fill(track->TOuter * 1.e9 - L / 299.79246); + + // do some random mismatch + if (hMismatchTemplateIn && gRandom->Uniform() < tof_mismatch) { + track->TOuter = (hMismatchTemplateIn->GetRandom() + L / 299.79246) * 1.e-9; + } + // select primaries based on 3 sigma DCA cuts if (fabs(track->D0 / track->ErrorD0) > 3.) continue; if (fabs(track->DZ / track->ErrorDZ) > 3.) continue; - // check if has TOF - if (!toflayer.hasTOF(*track)) continue; - + // fill hit histogram with true (eta,pt) + hHit->Fill(particle->Eta, particle->PT); + // push track tof_tracks.push_back(track); @@ -92,16 +141,22 @@ tof(const char *inputFile = "delphes.root", // loop over tracks and do PID for (auto track : tof_tracks) { + auto pdg = std::abs(track->PID); + auto ipdg = pidmap[pdg]; + // fill beta-p auto p = track->P; + auto pt = track->PT; auto beta = toflayer.getBeta(*track); - hBetaP->Fill(log10(p), beta); + hBetaP->Fill(p, beta); // fill nsigma std::array deltat, nsigma; toflayer.makePID(*track, deltat, nsigma); - for (int i = 0; i < 5; ++i) - hNsigmaP[i]->Fill(log10(p), nsigma[i]); + for (int i = 0; i < 5; ++i) { + hNsigmaPt[i]->Fill(pt, nsigma[i]); + hNsigmaPt_true[i][ipdg]->Fill(pt, nsigma[i]); + } } } @@ -109,8 +164,15 @@ tof(const char *inputFile = "delphes.root", auto fout = TFile::Open(outputFile, "RECREATE"); hTime0->Write(); hBetaP->Write(); - for (int i = 0; i < 5; ++i) - hNsigmaP[i]->Write(); + hHit->Write(); + hMismatchTemplateOut->Write(); + for (int i = 0; i < 5; ++i) { + hNsigmaPt[i]->Write(); + for (int j = 0; j < 5; ++j) { + hNsigmaPt_true[i][j]->Write(); + } + } + hMuonPt->Write(); fout->Close(); } diff --git a/rpythia8/.clang-format b/rpythia8/.clang-format new file mode 120000 index 0000000..538fb4d --- /dev/null +++ b/rpythia8/.clang-format @@ -0,0 +1 @@ +../examples/aod/.clang-format \ No newline at end of file diff --git a/rpythia8/CMakeLists.txt b/rpythia8/CMakeLists.txt new file mode 100644 index 0000000..47d9de2 --- /dev/null +++ b/rpythia8/CMakeLists.txt @@ -0,0 +1,43 @@ +### @author: Roberto Preghenella +### @email: preghenella@bo.infn.it + +find_package(HepMC REQUIRED) +find_package(Pythia REQUIRED) +find_package(ROOT REQUIRED) +find_package(Boost COMPONENTS program_options REQUIRED) + +find_path(ROOT_INCLUDE_DIR + NAMES TMath.h + PATH_SUFFIXES ROOT + PATHS $ENV{ROOTSYS}/include) + +set(ROOT_INCLUDE_DIR ${ROOT_INCLUDE_DIR}) + +include_directories(${Pythia_INCLUDE_DIR} ${HepMC_INCLUDE_DIR} ${ROOT_INCLUDE_DIR}) + +add_executable(rpythia8 rpythia8.cc) +target_link_libraries(rpythia8 + ${Pythia_LIBRARIES} + ${HepMC_LIBRARIES} ${Boost_LIBRARIES}) +install(TARGETS rpythia8 RUNTIME DESTINATION bin) + +add_executable(rpythia8-gun rpythia8-gun.cc) +target_link_libraries(rpythia8-gun + ${Pythia_LIBRARIES} + ${HepMC_LIBRARIES} ${Boost_LIBRARIES}) +install(TARGETS rpythia8-gun RUNTIME DESTINATION bin) + +add_executable(rpythia8-box rpythia8-box.cc) +target_link_libraries(rpythia8-box + ${Pythia_LIBRARIES} + ${HepMC_LIBRARIES} ${Boost_LIBRARIES}) +install(TARGETS rpythia8-box RUNTIME DESTINATION bin) + +add_executable(rpythia8-bgbw rpythia8-bgbw.cc ) +target_link_libraries(rpythia8-bgbw + ${Pythia_LIBRARIES} + ${HepMC_LIBRARIES} + ${ROOT_LIBRARIES} + ${Boost_LIBRARIES}) +install(TARGETS rpythia8-bgbw RUNTIME DESTINATION bin) + diff --git a/rpythia8/rpythia8-bgbw.cc b/rpythia8/rpythia8-bgbw.cc new file mode 100644 index 0000000..419d59c --- /dev/null +++ b/rpythia8/rpythia8-bgbw.cc @@ -0,0 +1,311 @@ +#include +#include +#include +#include + +#include "Pythia8/Pythia.h" +#include "Pythia8Plugins/HepMC2.h" + +using namespace Pythia8; + +#include "TMath.h" +#include "TRandom3.h" +#include "TF1.h" + +const float maximum = 20; + +Double_t IntegrandBG(const double* x, const double* p) +{ + // integrand for boltzman-gibbs blast wave + // x[0] -> r (radius) + // p[0] -> mass + // p[1] -> pT (transverse momentum) + // p[2] -> beta_max (surface velocity) + // p[3] -> T (freezout temperature) + // p[4] -> n (velocity profile) + + double x0 = x[0]; + + double mass = p[0]; + double pT = p[1]; + double beta_max = p[2]; + double temp = p[3]; + Double_t n = p[4]; + + // Keep beta within reasonable limits + Double_t beta = beta_max * TMath::Power(x0, n); + if (beta > 0.9999999999999999) + beta = 0.9999999999999999; + + double mT = TMath::Sqrt(mass * mass + pT * pT); + + double rho0 = TMath::ATanH(beta); + double arg00 = pT * TMath::SinH(rho0) / temp; + if (arg00 > 700.) + arg00 = 700.; // avoid FPE + double arg01 = mT * TMath::CosH(rho0) / temp; + double f0 = x0 * mT * TMath::BesselI0(arg00) * TMath::BesselK1(arg01); + + // printf("r=%f, pt=%f, beta_max=%f, temp=%f, n=%f, mt=%f, beta=%f, rho=%f, argI0=%f, argK1=%f\n", x0, pT, beta_max, temp, n, mT, beta, rho0, arg00, arg01); + + return f0; +} + +Double_t StaticBGdNdPt(const double* x, const double* p) +{ + + // implementation of BGBW (1/pt dNdpt) + + double pT = x[0]; + ; + + double mass = p[0]; + double beta = p[1]; + double temp = p[2]; + double n = p[3]; + double norm = p[4]; + + static TF1* fIntBG = 0; + if (!fIntBG) + fIntBG = new TF1("fIntBG", IntegrandBG, 0, 1, 5); + + fIntBG->SetParameters(mass, pT, beta, temp, n); + double result = fIntBG->Integral(0, 1); + // printf ("[%4.4f], Int :%f\n", pT, result); + return result * norm; //*1e30;; +} + +Double_t StaticBGdNdPtTimesPt(const double* x, const double* p) +{ + // BGBW dNdpt implementation + return x[0] * StaticBGdNdPt(x, p); +} + +Double_t StaticBGdNdMtTimesMt(const double* x, const double* p) +{ + // BGBW dNdpt implementation + // X0 is mt here + Double_t pt = TMath::Sqrt(x[0] * x[0] - p[0] * p[0]); + return pt * StaticBGdNdPt(&pt, p); +} + +TF1* fLastFunc = nullptr; +float fLineWidth = 2; + +// Times Pt funcs +// Boltzmann-Gibbs Blast Wave +TF1* GetBGBWdNdptTimesPt(Double_t mass, Double_t beta, Double_t temp, Double_t n, + Double_t norm, const char* name) +{ + + // BGBW, dNdpt + + fLastFunc = new TF1(name, StaticBGdNdPtTimesPt, 0.0, maximum, 5); + fLastFunc->SetParameters(mass, beta, temp, n, norm); + fLastFunc->FixParameter(0, mass); + fLastFunc->SetParNames("mass", "#beta", "temp", "n", "norm"); + fLastFunc->SetLineWidth(fLineWidth); + return fLastFunc; +} + +// Boltzmann-Gibbs Blast Wave +TF1* GetBGBWdNdptTimesMt(Double_t mass, Double_t beta, Double_t temp, Double_t n, + Double_t norm, const char* name) +{ + + // BGBW, dNdpt + // 1/Mt dN/dmt + fLastFunc = new TF1(name, StaticBGdNdMtTimesMt, 0.0, maximum, 5); + fLastFunc->SetParameters(mass, beta, temp, n, norm); + fLastFunc->FixParameter(0, mass); + fLastFunc->SetParNames("mass", "#beta", "temp", "n", "norm"); + fLastFunc->SetLineWidth(fLineWidth); + return fLastFunc; +} + +TF1* GetBGBWdNdpt(Double_t mass, Double_t beta, Double_t temp, + Double_t n, Double_t norm, const char* name) +{ + + // BGBW 1/pt dNdpt + + fLastFunc = new TF1(name, StaticBGdNdPt, 0.0, maximum, 5); + fLastFunc->SetParameters(mass, beta, temp, n, norm); + fLastFunc->FixParameter(0, mass); + fLastFunc->SetParNames("mass", "#beta", "T", "n", "norm"); + fLastFunc->SetLineWidth(fLineWidth); + return fLastFunc; +} + +int main(int argc, char** argv) +{ + + int nevents, pdg, seed; + std::string config, output, background_config; + double y_min, phi_min, pt_min; + double y_max, phi_max, pt_max; + int npart; + double xProd, yProd, zProd; + bool verbose, decay; + //BGBW parameters + double beta, temp, n; + + /** process arguments **/ + namespace po = boost::program_options; + po::options_description desc("Options"); + try { + desc.add_options() + ("help", "Print help messages") + ("nevents,n", po::value(&nevents)->default_value(10), "Number of events to be generated") + ("pdg,p", po::value(&pdg)->required(), "PDG code of the particle") + ("npart", po::value(&npart)->default_value(1), "Number of particles per event in a box") + ("ymin", po::value(&y_min)->default_value(0.), "Minimum y") + ("ymax", po::value(&y_max)->default_value(0.), "Maximum y") + ("phimin", po::value(&phi_min)->default_value(0.), "Minimum phi") + ("phimax", po::value(&phi_max)->default_value(0.), "Maximum phi") + ("ptmin", po::value(&pt_min)->default_value(0.), "Minimum momentum") + ("ptmax", po::value(&pt_max)->default_value(20.), "Maximum momentum") + ("xProd", po::value(&xProd)->default_value(0.), "Production vertex in the x-direction") + ("yProd", po::value(&yProd)->default_value(0.), "Production vertex in the y-direction") + ("zProd", po::value(&zProd)->default_value(0.), "Production vertex in the z-direction") + ("beta", po::value(&beta)->default_value(0.57), "BGBW beta parameter") + ("temp", po::value(&temp)->default_value(0.100), "BGBW temperature parameter") + ("n", po::value(&n)->default_value(1.02), "BGBW n parameter") + ("config,c", po::value(&config), "Configuration file") + ("background-config", po::value(&background_config), "Background configuration file") + ("output,o", po::value(&output)->default_value("pythia-gun.hepmc"), "Output HepMC file") + ("decay,D", po::bool_switch(&decay)->default_value(false), "Decay particle at production vertex") + ("verbose,V", po::bool_switch(&verbose)->default_value(false), "Verbose event listing") + ("seed", po::value(&seed)->default_value(1), "initial seed"); + + po::variables_map vm; + po::store(po::parse_command_line(argc, argv, desc), vm); + po::notify(vm); + + if (vm.count("help")) { + std::cout << desc << std::endl; + return 1; + } + } catch (std::exception& e) { + std::cerr << "Error: " << e.what() << std::endl; + std::cout << desc << std::endl; + return 1; + } + + HepMC::Pythia8ToHepMC ToHepMC; + HepMC::IO_GenEvent ascii_io(output, std::ios::out); + + // pythia + Pythia pythia; + + // configure pythia + pythia.readString("ProcessLevel:all = off"); + // pythia.readString("SoftQCD:elastic on"); + if (!config.empty() && !pythia.readFile(config)) { + std::cout << "Error: could not read config file \"" << config << "\"" << std::endl; + return 1; + } + + // check valid pdg code + if (!pythia.particleData.isParticle(pdg)) { + std::cout << "Error: invalid PDG code \"" << pdg << "\" is not in the particle list" << std::endl; + return 1; + } + if (!pythia.particleData.isLepton(pdg) && + !pythia.particleData.isHadron(pdg) && + !pythia.particleData.isResonance(pdg)) { + if (abs(pdg) < 1000000000) { + std::cout << "Error: invalid PDG code \"" << pdg << "\"" << std::endl; + return 1; + } else { + std::cout << "PDG code \"" << pdg << "\" stands for a nucleous" << std::endl; + } + } + + gRandom->SetSeed(seed); + std::cout << "Random:seed =" + std::to_string(seed) << std::endl; + pythia.readString("Random:setSeed = on"); + pythia.readString("Random:seed =" + std::to_string(seed)); + // init + pythia.init(); + const double m = pythia.particleData.m0(pdg); + + // TF1* GetBGBWdNdptTimesPt(Double_t mass, Double_t beta, Double_t temp, Double_t n, + // Double_t norm, const char* name) + TF1* bgbw = GetBGBWdNdptTimesPt(m, beta * 1.5, temp, n, 1, Form("bgbw_for_pdg_%i", pdg)); + + // the particle + Particle particle; + particle.id(pdg); + particle.status(11); + particle.m(m); + particle.xProd(xProd); + particle.yProd(yProd); + particle.zProd(zProd); + + // background interface + Pythia8::Pythia* pythia_bkg = nullptr; + if (!background_config.empty()) { + std::cout << "Background: configure from " << background_config << std::endl; + pythia_bkg = new Pythia8::Pythia; + if (!pythia_bkg->readFile(background_config)) { + std::cout << "Error: could not read config file \"" << background_config << "\"" << std::endl; + return 1; + } + pythia_bkg->readString("Random:setSeed = on"); + pythia_bkg->readString("Random:seed =" + std::to_string(seed)); + pythia_bkg->init(); + } + + // event loop + double y, phi, p, pt, pl; + srand(time(NULL)); + for (int iev = 0; iev < nevents; ++iev) { + + // reset, add particle and decay + pythia.event.reset(); + for (int ipart = 0; ipart < npart; ipart++) { + pt = bgbw->GetRandom(pt_min, pt_max, gRandom); + y = y_min + static_cast(rand()) / (static_cast(RAND_MAX / (y_max - y_min))); + constexpr double eNep = TMath::E(); + pl = 0.5 * pow(eNep, (-y)) * (pow(eNep, y * 2) - 1) * sqrt(m * m + pt * pt); + p = sqrt(pt * pt + pl * pl); + phi = phi_min + static_cast(rand()) / (static_cast(RAND_MAX / (phi_max - phi_min))); + particle.e(sqrt(p * p + m * m)); + particle.px(pt * cos(phi)); + particle.py(pt * sin(phi)); + particle.pz(pl); + pythia.event.append(particle); + } + if (decay) + pythia.moreDecays(); + pythia.next(); + + // print verbose + if (verbose) + pythia.event.list(1); + + // background + if (pythia_bkg) { + pythia_bkg->next(); + if (decay) + pythia_bkg->moreDecays(); + pythia.event += pythia_bkg->event; + } + + // write HepMC + HepMC::GenEvent* hepmcevt = new HepMC::GenEvent(); + ToHepMC.fill_next_event(pythia, hepmcevt); + ascii_io << hepmcevt; + delete hepmcevt; + } + + // print statistics + pythia.stat(); + if (pythia_bkg) { + pythia_bkg->stat(); + delete pythia_bkg; + } + + return 0; +} diff --git a/rpythia8/rpythia8-box.cc b/rpythia8/rpythia8-box.cc new file mode 100644 index 0000000..fff192c --- /dev/null +++ b/rpythia8/rpythia8-box.cc @@ -0,0 +1,174 @@ +#include +#include +#include +#include + +#include "Pythia8/Pythia.h" +#include "Pythia8Plugins/HepMC2.h" + +using namespace Pythia8; + +int main(int argc, char** argv) +{ + + int nevents, pdg, seed; + std::string config, output, background_config; + double eta_min, phi_min, p_min; + double eta_max, phi_max, p_max; + int npart; + double xProd, yProd, zProd; + bool verbose, decay; + + /** process arguments **/ + namespace po = boost::program_options; + po::options_description desc("Options"); + try { + desc.add_options() + ("help", "Print help messages") + ("nevents,n", po::value(&nevents)->default_value(10), "Number of events to be generated") + ("pdg,p", po::value(&pdg)->required(), "PDG code of the particle") + ("npart", po::value(&npart)->default_value(1), "Number of particles per event in a box") + ("etamin", po::value(&eta_min)->default_value(0.), "Minimum eta") + ("etamax", po::value(&eta_max)->default_value(0.), "Maximum eta") + ("phimin", po::value(&phi_min)->default_value(0.), "Minimum phi") + ("phimax", po::value(&phi_max)->default_value(0.), "Maximum phi") + ("pmin", po::value(&p_min)->default_value(0.), "Minimum momentum") + ("pmax", po::value(&p_max)->default_value(0.), "Maximum momentum") + ("xProd", po::value(&xProd)->default_value(0.), "Production vertex in the x-direction") + ("yProd", po::value(&yProd)->default_value(0.), "Production vertex in the y-direction") + ("zProd", po::value(&zProd)->default_value(0.), "Production vertex in the z-direction") + ("config,c", po::value(&config), "Configuration file") + ("background-config", po::value(&background_config), "Background configuration file") + ("output,o", po::value(&output)->default_value("pythia-gun.hepmc"), "Output HepMC file") + ("decay,D", po::bool_switch(&decay)->default_value(false), "Decay particle at production vertex") + ("verbose,V", po::bool_switch(&verbose)->default_value(false), "Verbose event listing") + ("seed", po::value(&seed)->default_value(1), "initial seed"); + + po::variables_map vm; + po::store(po::parse_command_line(argc, argv, desc), vm); + po::notify(vm); + + if (vm.count("help")) { + std::cout << desc << std::endl; + return 1; + } + } catch (std::exception& e) { + std::cerr << "Error: " << e.what() << std::endl; + std::cout << desc << std::endl; + return 1; + } + + HepMC::Pythia8ToHepMC ToHepMC; + HepMC::IO_GenEvent ascii_io(output, std::ios::out); + + // pythia + Pythia pythia; + + // configure pythia + pythia.readString("ProcessLevel:all = off"); + // pythia.readString("SoftQCD:elastic on"); + if (!config.empty() && !pythia.readFile(config)) { + std::cout << "Error: could not read config file \"" << config << "\"" << std::endl; + return 1; + } + + // check valid pdg code + if (!pythia.particleData.isParticle(pdg)) { + std::cout << "Error: invalid PDG code \"" << pdg << "\" is not in the particle list" << std::endl; + return 1; + } + if (!pythia.particleData.isLepton(pdg) && + !pythia.particleData.isHadron(pdg) && + !pythia.particleData.isResonance(pdg)) { + if (abs(pdg) < 1000000000 && abs(pdg) != 22) { + std::cout << "Error: invalid PDG code \"" << pdg << "\"" << std::endl; + return 1; + } else if (abs(pdg) == 22) { + std::cout << "PDG code \"" << pdg << "\" stands for a gamma" << std::endl; + } else { + std::cout << "PDG code \"" << pdg << "\" stands for a nucleous" << std::endl; + } + } + + std::cout << "Random:seed =" + std::to_string(seed) << std::endl; + pythia.readString("Random:setSeed = on"); + pythia.readString("Random:seed =" + std::to_string(seed)); + // init + pythia.init(); + const double m = pythia.particleData.m0(pdg); + + // the particle + Particle particle; + particle.id(pdg); + particle.status(11); + particle.m(m); + particle.xProd(xProd); + particle.yProd(yProd); + particle.zProd(zProd); + + // background interface + Pythia8::Pythia* pythia_bkg = nullptr; + if (!background_config.empty()) { + std::cout << "Background: configure from " << background_config << std::endl; + pythia_bkg = new Pythia8::Pythia; + if (!pythia_bkg->readFile(background_config)) { + std::cout << "Error: could not read config file \"" << background_config << "\"" << std::endl; + return 1; + } + pythia_bkg->readString("Random:setSeed = on"); + pythia_bkg->readString("Random:seed =" + std::to_string(seed)); + pythia_bkg->init(); + } + + // event loop + double eta, phi, p, pt, pl, e; + srand(time(NULL)); + for (int iev = 0; iev < nevents; ++iev) { + + // reset, add particle and decay + pythia.event.reset(); + for (int ipart = 0; ipart < npart; ipart++) { + eta = eta_min + static_cast(rand()) / (static_cast(RAND_MAX / (eta_max - eta_min))); + phi = phi_min + static_cast(rand()) / (static_cast(RAND_MAX / (phi_max - phi_min))); + p = p_min + static_cast(rand()) / (static_cast(RAND_MAX / (p_max - p_min))); + e = exp(2. * eta); + pl = p * (e - 1.) / (1. + e); + pt = sqrt(p * p - pl * pl); + particle.e(sqrt(p * p + m * m)); + particle.px(pt * cos(phi)); + particle.py(pt * sin(phi)); + particle.pz(pl); + pythia.event.append(particle); + } + if (decay) + pythia.moreDecays(); + pythia.next(); + + // print verbose + if (verbose) + pythia.event.list(1); + + // background + if (pythia_bkg) { + pythia_bkg->next(); + if (decay) + pythia_bkg->moreDecays(); + pythia.event += pythia_bkg->event; + } + + // write HepMC + HepMC::GenEvent* hepmcevt = new HepMC::GenEvent(); + ToHepMC.fill_next_event(pythia, hepmcevt); + ascii_io << hepmcevt; + delete hepmcevt; + } + + // print statistics + pythia.stat(); + if (pythia_bkg) { + pythia_bkg->stat(); + delete pythia_bkg; + } + + return 0; +} diff --git a/rpythia8/rpythia8-gun.cc b/rpythia8/rpythia8-gun.cc new file mode 100644 index 0000000..7afb4d7 --- /dev/null +++ b/rpythia8/rpythia8-gun.cc @@ -0,0 +1,155 @@ +#include +#include + +#include "Pythia8/Pythia.h" +#include "Pythia8Plugins/HepMC2.h" + +using namespace Pythia8; + +int main(int argc, char** argv) +{ + + int nevents, pdg, seed; + std::string config, output, background_config; + double px, py, pz; + double xProd, yProd, zProd; + bool verbose, decay; + + /** process arguments **/ + namespace po = boost::program_options; + po::options_description desc("Options"); + try { + desc.add_options() + ("help", "Print help messages") + ("nevents,n", po::value(&nevents)->default_value(10), "Number of events to be generated") + ("pdg,p", po::value(&pdg)->required(), "PDG code of the particle") + ("px", po::value(&px)->default_value(0.), "Momentum in the x-direction") + ("py", po::value(&py)->default_value(0.), "Momentum in the y-direction") + ("pz", po::value(&pz)->default_value(0.), "Momentum in the z-direction") + ("xProd", po::value(&xProd)->default_value(0.), "Production vertex in the x-direction") + ("yProd", po::value(&yProd)->default_value(0.), "Production vertex in the y-direction") + ("zProd", po::value(&zProd)->default_value(0.), "Production vertex in the z-direction") + ("config,c", po::value(&config), "Configuration file") + ("background-config", po::value(&background_config), "Background configuration file") + ("output,o", po::value(&output)->default_value("pythia-gun.hepmc"), "Output HepMC file") + ("decay,D", po::bool_switch(&decay)->default_value(false), "Decay particle at production vertex") + ("verbose,V", po::bool_switch(&verbose)->default_value(false), "Verbose event listing") + ("seed", po::value(&seed)->default_value(1), "initial seed"); + + po::variables_map vm; + po::store(po::parse_command_line(argc, argv, desc), vm); + po::notify(vm); + + if (vm.count("help")) { + std::cout << desc << std::endl; + return 1; + } + } catch (std::exception& e) { + std::cerr << "Error: " << e.what() << std::endl; + std::cout << desc << std::endl; + return 1; + } + + HepMC::Pythia8ToHepMC ToHepMC; + HepMC::IO_GenEvent ascii_io(output, std::ios::out); + + // pythia + Pythia pythia; + + // configure pythia + pythia.readString("ProcessLevel:all = off"); + // pythia.readString("SoftQCD:elastic on"); + if (!config.empty() && !pythia.readFile(config)) { + std::cout << "Error: could not read config file \"" << config << "\"" << std::endl; + return 1; + } + + // check valid pdg code + if (!pythia.particleData.isParticle(pdg)) { + std::cout << "Error: invalid PDG code \"" << pdg << "\" is not in the particle list" << std::endl; + return 1; + } + if (!pythia.particleData.isLepton(pdg) && + !pythia.particleData.isHadron(pdg) && + !pythia.particleData.isResonance(pdg)) { + if (abs(pdg) < 1000000000) { + std::cout << "Error: invalid PDG code \"" << pdg << "\"" << std::endl; + return 1; + } else { + std::cout << "PDG code \"" << pdg << "\" stands for a nucleous" << std::endl; + } + } + + std::cout << "Random:seed =" + std::to_string(seed) << std::endl; + pythia.readString("Random:setSeed = on"); + pythia.readString("Random:seed =" + std::to_string(seed)); + // init + pythia.init(); + double m = pythia.particleData.m0(pdg); + double e = sqrt(px * px + py * py + pz * pz + m * m); + + // the particle + Particle particle; + particle.id(pdg); + particle.status(11); + particle.px(px); + particle.py(py); + particle.pz(pz); + particle.e(e); + particle.m(m); + particle.xProd(xProd); + particle.yProd(yProd); + particle.zProd(zProd); + + // background interface + Pythia8::Pythia* pythia_bkg = nullptr; + if (!background_config.empty()) { + std::cout << "Background: configure from " << background_config << std::endl; + pythia_bkg = new Pythia8::Pythia; + if (!pythia_bkg->readFile(background_config)) { + std::cout << "Error: could not read config file \"" << background_config << "\"" << std::endl; + return 1; + } + pythia_bkg->readString("Random:setSeed = on"); + pythia_bkg->readString("Random:seed =" + std::to_string(seed)); + pythia_bkg->init(); + } + + // event loop + for (int iev = 0; iev < nevents; ++iev) { + + // reset, add particle and decay + pythia.event.reset(); + pythia.event.append(particle); + if (decay) + pythia.moreDecays(); + pythia.next(); + + // print verbose + if (verbose) + pythia.event.list(1); + + // background + if (pythia_bkg) { + pythia_bkg->next(); + if (decay) + pythia_bkg->moreDecays(); + pythia.event += pythia_bkg->event; + } + + // write HepMC + HepMC::GenEvent* hepmcevt = new HepMC::GenEvent(); + ToHepMC.fill_next_event(pythia, hepmcevt); + ascii_io << hepmcevt; + delete hepmcevt; + } + + // print statistics + pythia.stat(); + if (pythia_bkg) { + pythia_bkg->stat(); + delete pythia_bkg; + } + + return 0; +} diff --git a/rpythia8/rpythia8.cc b/rpythia8/rpythia8.cc new file mode 100644 index 0000000..570c9b0 --- /dev/null +++ b/rpythia8/rpythia8.cc @@ -0,0 +1,106 @@ +#include +#include + +#include "Pythia8/Pythia.h" +#include "Pythia8/ParticleData.h" +#include "Pythia8Plugins/HepMC2.h" + +int main(int argc, char** argv) +{ + + int nevents, inject_nevents, seed; + std::string config, output, inject_config; + + /** process arguments **/ + namespace po = boost::program_options; + po::options_description desc("Options"); + try { + desc.add_options() + ("help", "Print help messages") + ("nevents,n" , po::value(&nevents)->default_value(10), "Number of events") + ("config,c" , po::value(&config), "Configuration file") + ("output,o" , po::value(&output)->default_value("pythia8.hepmc"), "Output HepMC file") + ("inject-config" , po::value(&inject_config), "Injected event configuration file") + ("inject-nevents" , po::value(&inject_nevents)->default_value(1), "Number of events to inject") + ("seed" , po::value(&seed)->default_value(1), "initial seed"); + + po::variables_map vm; + po::store(po::parse_command_line(argc, argv, desc), vm); + po::notify(vm); + + if (vm.count("help")) { + std::cout << desc << std::endl; + return 1; + } + } catch (std::exception& e) { + std::cerr << "Error: " << e.what() << std::endl; + std::cout << desc << std::endl; + return 1; + } + + // pythia, config and init + Pythia8::Pythia pythia; + pythia.particleData.addParticle(9920443, "X(3872)", 3, 0, 0, 3.87196, 0.00012); + + Pythia8::Rndm rndm; + if (!config.empty() && !pythia.readFile(config)) { + std::cout << "Error: could not read config file \"" << config << "\"" << std::endl; + return 1; + } + std::cout << "Random:seed =" + std::to_string(seed) << std::endl; + pythia.readString("Random:setSeed = on"); + pythia.readString("Random:seed =" + std::to_string(seed)); + pythia.init(); + rndm.init(); + + // Interface for conversion from Pythia8::Event to HepMC event. + HepMC::Pythia8ToHepMC ToHepMC; + HepMC::IO_GenEvent output_io(output, std::ios::out); + + // injection interface + Pythia8::Pythia* pythia_inj = nullptr; + if (!inject_config.empty()) { + std::cout << "Injection: configure from " << inject_config << std::endl; + pythia_inj = new Pythia8::Pythia; + if (!pythia_inj->readFile(inject_config)) { + std::cout << "Error: could not read config file \"" << inject_config << "\"" << std::endl; + return 1; + } + pythia_inj->readString("Random:setSeed = on"); + pythia_inj->readString("Random:seed =" + std::to_string(seed)); + pythia_inj->init(); + } + + // event loop + for (int iev = 0; iev < nevents; ++iev) { + + // generate event + pythia.next(); + auto offset = pythia.event.size(); + + // injection + if (pythia_inj) { + for (int iiev = 0; iiev < inject_nevents; ++iiev) { + pythia_inj->next(); + pythia.event += pythia_inj->event; + } + } + + // convert to HepMC + HepMC::GenEvent* hepmcevt = new HepMC::GenEvent(); + ToHepMC.fill_next_event(pythia, hepmcevt); + + output_io << hepmcevt; + delete hepmcevt; + + } // end of event loop + + // print statistics + pythia.stat(); + if (pythia_inj) { + pythia_inj->stat(); + delete pythia_inj; + } + + return 0; +} diff --git a/src/.clang-format b/src/.clang-format new file mode 120000 index 0000000..538fb4d --- /dev/null +++ b/src/.clang-format @@ -0,0 +1 @@ +../examples/aod/.clang-format \ No newline at end of file diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 60df981..8ea2539 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -5,13 +5,23 @@ set(SOURCES TrackSmearer.cc TrackUtils.cc TOFLayer.cc + RICHdetector.cc + ECALdetector.cc + MIDdetector.cc + PreShower.cc + PhotonConversion.cc ) set(HEADERS VertexFitter.hh - TrackSmearer.hh + TrackSmearer.hh TrackUtils.hh TOFLayer.hh + RICHdetector.hh + ECALdetector.hh + MIDdetector.hh + PreShower.hh + PhotonConversion.hh ) get_target_property(DELPHES_INCLUDE_DIRECTORIES @@ -30,7 +40,6 @@ get_target_property(RECONSTRUCTIONDATAFORMATS_INCLUDE_DIRECTORIES AliceO2::ReconstructionDataFormats INTERFACE_INCLUDE_DIRECTORIES) - include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${DELPHES_INCLUDE_DIRECTORIES} ${DETECTORSVERTEXING_INCLUDE_DIRECTORIES} @@ -43,6 +52,8 @@ root_generate_dictionary(G__DelphesO2 ${HEADERS} LINKDEF DelphesO2LinkDef.h) target_link_libraries(DelphesO2 Delphes::Core ROOT::MathCore + ROOT::RIO + ROOT::EG AliceO2::DetectorsVertexing AliceO2::ReconstructionDataFormats AliceO2::GPUCommon @@ -51,6 +62,14 @@ target_link_libraries(DelphesO2 install(TARGETS DelphesO2 DESTINATION lib) +install(FILES ${HEADERS} lutCovm.hh DESTINATION include) + +FILE(GLOB WRITERS lutWrite.*.cc) +install(FILES DetectorK/DetectorK.cxx DESTINATION lut/DetectorK) +install(FILES DetectorK/DetectorK.h DESTINATION lut/DetectorK) +install(FILES fwdRes/fwdRes.C DESTINATION lut/fwdRes) +install(FILES lutWrite.cc ${WRITERS} DESTINATION lut) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/libDelphesO2_rdict.pcm ${CMAKE_CURRENT_BINARY_DIR}/libDelphesO2.rootmap diff --git a/src/DelphesO2LinkDef.h b/src/DelphesO2LinkDef.h index e22118c..1f517c2 100644 --- a/src/DelphesO2LinkDef.h +++ b/src/DelphesO2LinkDef.h @@ -11,6 +11,8 @@ #pragma link C++ class o2::delphes::TrackSmearer+; #pragma link C++ class o2::delphes::TrackUtils+; #pragma link C++ class o2::delphes::TOFLayer+; +#pragma link C++ class o2::delphes::RICHdetector+; +#pragma link C++ class o2::delphes::MIDdetector+; #pragma link C++ struct o2::delphes::Vertex+; #endif diff --git a/src/DetectorK/DetectorK.cxx b/src/DetectorK/DetectorK.cxx index f441346..54d42b5 100644 --- a/src/DetectorK/DetectorK.cxx +++ b/src/DetectorK/DetectorK.cxx @@ -27,6 +27,7 @@ Bool_t DetectorK::verboseR=0; #define RIDICULOUS 999999 // A ridiculously large resolution (cm) to flag a dead detector +#define xrhosteps 100 // steps for dEdx correction #define Luminosity 1.e27 // Luminosity of the beam (LHC HI == 1.e27, RHIC II == 8.e27 ) #define SigmaD 6.0 // Size of the interaction diamond (cm) (LHC = 6.0 cm) #define dNdEtaMinB 1//950//660//950 // Multiplicity per unit Eta (AuAu MinBias = 170, Central = 700) @@ -103,7 +104,7 @@ DetectorK::DetectorK() SetMaxSnp(); } -DetectorK::DetectorK(const char *name, const char *title) +DetectorK::DetectorK(char *name, char *title) : TNamed(name,title), fNumberOfLayers(0), fNumberOfActiveLayers(0), @@ -525,12 +526,13 @@ void DetectorK::AddTPC(Float_t phiResMean, Float_t zResMean, Int_t skip) { // skip=1: Use every padrow, skip=2: Signal in every 2nd padrow - AddLayer((char*)"tpcIFC", 77.8, 0.01367); // Inner Field cage - AddLayer((char*)"tpcOFC", 254.0, 0.01367); // Outer Field cage + AddLayer((char*)"tpcIFC", 77.8, 9.279967e-02, 3.325701e+00); // Inner Field cage + AddLayer((char*)"tpcOFC", 254.0, 9.279967e-02, 3.325701e+00); // Outer Field cage // % Radiation Lengths ... Average per TPC row (i.e. total/159 ) const int kNPassiveBound = 2; - const Float_t radLBoubdary[kNPassiveBound] = {0.05, 0.0165}; + const Float_t radLBoundary[kNPassiveBound] = {1.692612e-01, 8.711904e-02}; + const Float_t xrhoBoundary[kNPassiveBound] = {6.795774e+00, 3.111401e+00}; const Float_t rBoundary[kNPassiveBound] = {50, 70.0}; // cm Float_t radLPerRow = 0.000036; @@ -551,7 +553,7 @@ void DetectorK::AddTPC(Float_t phiResMean, Float_t zResMean, Int_t skip) { // add boundaries between ITS and TPC for (int i=0;i1.2) fParticleMass = -TMath::Abs(fParticleMass); // Prepare Probability Kombinations Int_t nLayer = fNumberOfActiveITSLayers; @@ -890,7 +892,7 @@ void DetectorK::SolveViaBilloir(Double_t selPt, double ptmin) { trCov[kY2] = trCov[kZ2] = trCov[kSnp2] = trCov[kTgl2] = trCov[kPtI2] = 1e-9; // // find max layer this track can reach - double rmx = (TMath::Abs(fBField)>1e-5) ? pt*100./(0.3*TMath::Abs(fBField)) : 9999; + double rmx = (TMath::Abs(fBField)>1e-5) ? TMath::Abs(charge)*pt*100./(0.3*TMath::Abs(fBField)) : 9999; Int_t lastActiveLayer = -1; for (Int_t j=fLayers.GetEntries(); j--;) { CylLayerK *l = (CylLayerK*) fLayers.At(j); @@ -914,8 +916,8 @@ void DetectorK::SolveViaBilloir(Double_t selPt, double ptmin) { if (lr->xrho>0) { // correct in small steps bool elossOK = kTRUE; - for (int ise=10;ise--;) { - if (!probTrLast.CorrectForMeanMaterial(0, -lr->xrho/10, fParticleMass , kTRUE)) {elossOK = kFALSE; break;} + for (int ise=xrhosteps;ise--;) { + if (!probTrLast.CorrectForMeanMaterial(0, -lr->xrho/xrhosteps, fParticleMass , kTRUE)) {elossOK = kFALSE; break;} } if (!elossOK) break; } @@ -928,11 +930,15 @@ void DetectorK::SolveViaBilloir(Double_t selPt, double ptmin) { lastReached = il; prepLrOK[il] = 1.; // flag successfully passed layer } - if ( ((CylLayerK*)fLayers.At(lastReached))->radius < fMinRadTrack) continue; + // if ( ((CylLayerK*)fLayers.At(lastReached))->radius < fMinRadTrack) continue; if (!PropagateToR(&probTr,probTr.GetX() + kTrackingMargin,bGauss,1)) continue; - if (probTr.GetX()radius + kTrackingMargin,bGauss,1)) continue; //if (!probTr.PropagateTo(last->radius,bGauss)) continue; // reset cov.matrix @@ -1028,8 +1034,8 @@ void DetectorK::SolveViaBilloir(Double_t selPt, double ptmin) { exit(1); } if (layer->xrho>0) { // correct in small steps - for (int ise=10;ise--;) { - if (!probTr.CorrectForMeanMaterial(0, layer->xrho/10, fParticleMass , kTRUE)) { + for (int ise=xrhosteps;ise--;) { + if (!probTr.CorrectForMeanMaterial(0, layer->xrho/xrhosteps, fParticleMass , kTRUE)) { printf("Failed to apply material correction, xrho=%.4f\n",layer->xrho); probTr.Print(); exit(1); @@ -1241,8 +1247,8 @@ void DetectorK::SolveViaBilloir(Double_t selPt, double ptmin) { exit(1); } if (layer->xrho>0) { // correct in small steps - for (int ise=10;ise--;) { - if (!probTr.CorrectForMeanMaterial(0, -layer->xrho/10, fParticleMass , kTRUE)) { + for (int ise=xrhosteps;ise--;) { + if (!probTr.CorrectForMeanMaterial(0, -layer->xrho/xrhosteps, fParticleMass , kTRUE)) { printf("Failed to apply material correction, xrho=%.4f\n",-layer->xrho); probTr.Print(); exit(1); @@ -1420,7 +1426,12 @@ Bool_t DetectorK::SolveTrack(TrackSol& ts) { double etaTr = ts.fEta; double mass = ts.fMass; double charge = ts.fCharge; - + + // reset good hit probability + for (int i = 0; i < kMaxNumberOfDetectors; ++i) + fGoodHitProb[i] = -1.; + fGoodHitProb[0] = 1.; // we use layer zero to accumulate + if (ptTr<0) { printf("Input track is not initialized"); return kFALSE; @@ -1436,7 +1447,6 @@ Bool_t DetectorK::SolveTrack(TrackSol& ts) { TClonesArray &saveParOutwardA = ts.fTrackOutA; TClonesArray &saveParComb = ts.fTrackCmb; - // Calculate track parameters using Billoirs method of matrices Double_t pt,lambda; // CylLayerK *last = (CylLayerK*) fLayers.At((fLayers.GetEntries()-1)); @@ -1478,39 +1488,51 @@ Bool_t DetectorK::SolveTrack(TrackSol& ts) { // // find max layer this track can reach double rmx = (TMath::Abs(fBField)>1e-5) ? pt*100./(0.3*TMath::Abs(fBField)) : 9999; - if (2*rmx-5. < minRad && minRad>0) { - printf("Track of pt=%.3f cannot be tracked to min. r=%f\n",pt,minRad); + // if (2*rmx-5. < minRad && minRad>0) { + if ( minRad/(2.*rmx)>fMaxSnp-0.01 && minRad>0) { + // printf("Track of pt=%.3f cannot be tracked to min. r=%f\n",pt,minRad); return kFALSE; } - Int_t lastActiveLayer = -1; + Int_t lastActiveLayer = -1, lastReachedLayer = -1; for (Int_t j=fLayers.GetEntries(); j--;) { CylLayerK *l = (CylLayerK*) fLayers.At(j); - // printf("at lr %d r: %f vs %f, pt:%f\n",j,l->radius, 2*rmx-2.*kTrackingMargin, pt); if (/*!(l->isDead) && */(l->radius <= 2*(rmx-5))) {lastActiveLayer = j; last = l; break;} } if (lastActiveLayer<0) { printf("No active layer with radius < %f is found, pt = %f\n",rmx, pt); return kFALSE; } - // printf("PT=%f 2Rpt=%f Rlr=%f\n",pt,2*rmx,last->radius); // - if (!PropagateToR(&probTr,last->radius + kTrackingMargin,bGauss,1)) return kFALSE; - //if (!probTr.PropagateTo(last->radius,bGauss)) continue; - // reset cov.matrix - // - // rotate to external layer frame - /* - double posL[3]; - probTr.GetXYZ(posL); // lab position - double phiL = TMath::ATan2(posL[1],posL[0]); - if (!probTr.Rotate(phiL)) { - printf("Failed to rotate to the frame (phi:%+.3f)of Extertnal layer at %.2f\n", - phiL,last->radius); - probTr.Print(); - exit(1); + for (int il=1;il<=lastActiveLayer;il++) { + CylLayerK *lr = (CylLayerK*) fLayers.At(il); + AliExternalTrackParam probTrLast(probTr); + bool ok = PropagateToR(&probTrLast,lr->radius,bGauss,1); + if (ok) ok = probTrLast.CorrectForMeanMaterial(lr->radL, 0, mass , kTRUE); + if (ok && lr->xrho>0) { + for (int ise=xrhosteps;ise--;) { + ok = probTrLast.CorrectForMeanMaterial(0, -lr->xrho/xrhosteps, mass , kTRUE); + if (!ok) break; + } } - */ - if (!probTr.Rotate(probTr.Phi())) return kFALSE; // define large errors in track proper frame (snp=0) + if (ok && lr->radius>1e-3 && !lr->isDead) { + ok = probTrLast.Rotate(probTrLast.PhiPos()) && TMath::Abs( probTrLast.GetSnp() )GetName()); Bool_t isVertex = name.Contains("vertex"); + Bool_t isTOF = name.Contains("tof"); // - if (!PropagateToR(&probTr,layer->radius,bGauss,-1)) return kFALSE; // exit(1); - // if (!probTr.PropagateTo(last->radius,bGauss)) exit(1); // - // rotate to frame with X axis normal to the surface + if (!PropagateToR(&probTr,layer->radius,bGauss,-1)) return kFALSE; //exit(1); if (!isVertex) { double pos[3]; probTr.GetXYZ(pos); // lab position @@ -1543,8 +1564,7 @@ Bool_t DetectorK::SolveTrack(TrackSol& ts) { if ( TMath::Abs(TMath::Abs(phi)-TMath::Pi()/2)<1e-3) phi = 0;//TMath::Sign(TMath::Pi()/2 - 1e-3,phi); if (!probTr.Rotate(phi)) { printf("Failed to rotate to the frame (phi:%+.3f)of layer at %.2f at XYZ: %+.3f %+.3f %+.3f (pt=%+.3f)\n", - phi,layer->radius,pos[0],pos[1],pos[2],pt); - + phi,layer->radius,pos[0],pos[1],pos[2],pt); probTr.Print(); return kFALSE; // exit(1); } @@ -1555,7 +1575,7 @@ Bool_t DetectorK::SolveTrack(TrackSol& ts) { printf("SaveInw %d (%f) ",j,layer->radius); probTr.Print(); } // - if (!isVertex && !layer->isDead) { + if (!isVertex && !isTOF && !layer->isDead) { // // create fake measurement with the errors assigned to the layer // account for the measurement there @@ -1576,6 +1596,15 @@ Bool_t DetectorK::SolveTrack(TrackSol& ts) { probTr.Print(); return kFALSE; // exit(1); } + if (layer->xrho>0) { // correct in small steps + for (int ise=xrhosteps;ise--;) { + if (!probTr.CorrectForMeanMaterial(0, layer->xrho/xrhosteps, mass , kTRUE)) { + printf("Failed to apply material correction, xrho=%.4f\n",layer->xrho); + probTr.Print(); + return kFALSE; // exit(1); + } + } + } } // // BACKWORD TRACKING +++++++++++++++++ @@ -1590,7 +1619,6 @@ Bool_t DetectorK::SolveTrack(TrackSol& ts) { Bool_t doLikeAliRoot = 0; // don't do the "combined info" but do like in Aliroot - // RESET Covariance Matrix ( to 10 x the estimate -> as it is done in AliExternalTrackParam) // mIstar.UnitMatrix(); // start with unity if (doLikeAliRoot) { @@ -1613,12 +1641,13 @@ Bool_t DetectorK::SolveTrack(TrackSol& ts) { } } //probTr.Rotate(0); - for (Int_t j=0; j<=lastActiveLayer; j++) { // Layer loop + for (Int_t j=0; j<=lastReachedLayer; j++) { // Layer loop // layer = (CylLayerK*)fLayers.At(j); TString name(layer->GetName()); Bool_t isVertex = name.Contains("vertex"); - if (!PropagateToR(&probTr, layer->radius,bGauss,1)) exit(1); + Bool_t isTOF = name.Contains("tof"); + if (!PropagateToR(&probTr, layer->radius,bGauss,1)) return kFALSE;//exit(1); // if (!isVertex) { // rotate to frame with X axis normal to the surface @@ -1647,7 +1676,7 @@ Bool_t DetectorK::SolveTrack(TrackSol& ts) { covCmb[1] = 0; // create fake measurement with the errors assigned to the layer // account for the measurement there - if (!isVertex && !layer->isDead) { + if (!isVertex && !isTOF && !layer->isDead) { double meas[2] = {probTr.GetY(),probTr.GetZ()}; double measErr2[3] = {layer->phiRes*layer->phiRes,0,layer->zRes*layer->zRes}; // @@ -1664,9 +1693,27 @@ Bool_t DetectorK::SolveTrack(TrackSol& ts) { probTr.Print(); return kFALSE; // exit(1); } + if (layer->xrho>0) { // correct in small steps + for (int ise=xrhosteps;ise--;) { + if (!probTr.CorrectForMeanMaterial(0, -layer->xrho/xrhosteps, mass , kTRUE)) { + printf("Failed to apply material correction, xrho=%.4f\n",-layer->xrho); + probTr.Print(); + return kFALSE; // exit(1); + } + } + } // save outward parameters at this layer: after the update new( saveParOutwardA[j] ) AliExternalTrackParam(probTr); // + // good hit probability calculation + if (!isVertex && !layer->isDead) { + AliExternalTrackParam* trCmb = (AliExternalTrackParam*)ts.fTrackCmb[j]; + double sigYCmb = TMath::Sqrt(trCmb->GetSigmaY2()+layer->phiRes*layer->phiRes); + double sigZCmb = TMath::Sqrt(trCmb->GetSigmaZ2()+layer->zRes*layer->zRes); + fGoodHitProb[j] = ProbGoodChiSqHit(layer->radius * 100., sigYCmb * 100., sigZCmb * 100.); + if (!isTOF) + fGoodHitProb[0] *= fGoodHitProb[j]; + } } // probTr.SetUseLogTermMS(kFALSE); // Reset of MS term usage to avoid problems since its static @@ -1722,11 +1769,14 @@ Bool_t DetectorK::CalcITSEff(TrackSol& ts, Bool_t verbose) // if (verbose) { const double kCnv=1e4; - printf("%s:\t%5.1f %.4f %7.0f | %6.0f %6.0f -> %.3f | %6.0f %6.0f -> %.3f | %6.0f %6.0f -> %.3f\n", + printf("%s:\t%5.1f %.4f %7.0f | %6.0f %6.0f -> %.3f | %6.0f %6.0f -> %.3f | %6.0f %6.0f -> %.3f --> %.3f --> %.3f \n", l->GetName(),l->radius,l->radL,HitDensity(l->radius), sigYInw*kCnv,sigZInw*kCnv,probLayInw(2,nITSAct), sigYOut*kCnv,sigZOut*kCnv,probLayOut(2,nITSAct), - sigYCmb*kCnv,sigZCmb*kCnv,probLayCmb(2,nITSAct)); + sigYCmb*kCnv,sigZCmb*kCnv,probLayCmb(2,nITSAct), + ProbGoodHit(l->radius, sigYCmb, sigZCmb), + ProbGoodChiSqHit(l->radius, sigYCmb, sigZCmb) + ); } nITSAct++; ilr++; @@ -2311,6 +2361,15 @@ void DetectorK::MakeStandardPlots(Bool_t add, Int_t color, Int_t linewidth, cons pointResZ->SetName(Form("pointZRes%dadd",0)); pointResZ->Draw("L"); } + if (outGr) { + HistoManager hm("",outGr); + hm.AddGraph(eff); + hm.AddGraph(momRes); + hm.AddGraph(pointResR); + hm.AddGraph(pointResZ); + hm.Write(); + hm.Clear(); + } } @@ -2539,42 +2598,53 @@ Bool_t DetectorK::PropagateToR(AliExternalTrackParam* trc, double r, double b, i double rr = r*r; int iter = 0; const double kTiny = 1e-6; - const Double_t kEpsilon = 0.00001; + const Double_t kEpsilonX = 0.00001, kEpsilonR = 0.01; // if (verboseR) { printf("Prop to %f d=%d ",r,dir); trc->Print(); } - if (!GetXatLabR(trc, r ,xToGo, b, dir)) { - printf("Track with pt=%f cannot reach radius %f\n",trc->Pt(),r); - return kFALSE; - } - - Double_t xpos = trc->GetX(); - dir = (xpos kEpsilon) { - Double_t step = dir*TMath::Min(TMath::Abs(xToGo-xpos), maxStep); - Double_t x = xpos+step; - Double_t xyz0[3],xyz1[3],param[7]; - trc->GetXYZ(xyz0); //starting global position - if (!trc->PropagateTo(x,b)) return kFALSE; - xpos = trc->GetX(); - } - // - double rreal = TMath::Sqrt(xpos*xpos+trc->GetY()*trc->GetY()); - // printf("Rtgt=%f Rreal=%f\n",r,rreal); - if (r>0.5) { - if (!trc->Rotate(trc->PhiPos())) { - printf("Failed to rotate to layer local frame %f | ",trc->PhiPos()); trc->Print(); + while(1) { + + if (!GetXatLabR(trc, r ,xToGo, b, dir)) { + printf("Track with pt=%f cannot reach radius %f\n",trc->Pt(),r); return kFALSE; } - } - else { - if (!trc->Rotate(trc->Phi())) { - printf("Failed to rotate to track local frame %f | ",trc->Phi()); trc->Print(); - return kFALSE; + + Double_t xpos = trc->GetX(); + dir = (xpos kEpsilonX) { + Double_t step = dir*TMath::Min(TMath::Abs(xToGo-xpos), maxStep); + Double_t x = xpos+step; + // Double_t xyz0[3],xyz1[3],param[7]; + // trc->GetXYZ(xyz0); //starting global position + if (!trc->PropagateTo(x,b)) return kFALSE; + xpos = trc->GetX(); + } + // + double drreal = r - TMath::Sqrt(xpos*xpos+trc->GetY()*trc->GetY()); + if (!iter && ((dir>0 && drreal>kEpsilonR) || (dir<0 && drreal<-kEpsilonR)) ) { // apparently the phase changes by more than pi/2 + iter++; + if (!trc->Rotate(trc->Phi())) { + printf("Failed to rotate to track local frame %f in the large phase change mode| ",trc->Phi()); trc->Print(); + return kFALSE; + } + continue; // another iteration + } + // printf("Rtgt=%f Rreal=%f\n",r,rreal); + if (r>0.5) { + if (!trc->Rotate(trc->PhiPos())) { + printf("Failed to rotate to layer local frame %f | ",trc->PhiPos()); trc->Print(); + return kFALSE; + } + } + else { + if (!trc->Rotate(trc->Phi())) { + printf("Failed to rotate to track local frame %f | ",trc->Phi()); trc->Print(); + return kFALSE; + } } + break; } - return kTRUE; } diff --git a/src/DetectorK/DetectorK.h b/src/DetectorK/DetectorK.h index 0341207..e262c0a 100644 --- a/src/DetectorK/DetectorK.h +++ b/src/DetectorK/DetectorK.h @@ -6,6 +6,7 @@ #include #include #include +#include "HistoManager.h" /*********************************************************** @@ -113,7 +114,7 @@ class DetectorK : public TNamed { public: DetectorK(); - DetectorK(const char *name, const char *title); + DetectorK(char *name,char *title); virtual ~DetectorK(); enum {kNptBins = 50}; // less then 400 !! @@ -229,6 +230,8 @@ class DetectorK : public TNamed { Bool_t IsITSLayer(const TString& lname); + Double_t GetGoodHitProb(Int_t i) { return fGoodHitProb[i]; }; + static Bool_t verboseR; protected: @@ -265,7 +268,8 @@ class DetectorK : public TNamed { Double_t fDetPointZRes[kMaxNumberOfDetectors][kNptBins]; // array of z resolution per layer Double_t fEfficiency[kNptBins]; // efficiency Double_t fFake[kNptBins]; // fake prob - + Double_t fGoodHitProb[kMaxNumberOfDetectors]; // array of good hit probability per layer + Int_t kDetLayer; // layer for which a few more details are extracted Double_t fResolutionRPhiLay[kNptBins]; // array of rphi resolution Double_t fResolutionZLay[kNptBins]; // array of z resolution diff --git a/src/DetectorK/HistoManager.cxx b/src/DetectorK/HistoManager.cxx new file mode 100644 index 0000000..0fb21a8 --- /dev/null +++ b/src/DetectorK/HistoManager.cxx @@ -0,0 +1,383 @@ +#include "HistoManager.h" +#include "TROOT.h" +#include "TSystem.h" + +ClassImp(HistoManager) + +//_______________________________________________________________ +HistoManager::HistoManager(const char* dirname,const char* fname,Bool_t LOAD,const char* prefix) +{ + // + fNHistos = 0; + fDirName = dirname; + SetFileName(fname); + SetFile(0); + if (LOAD && !fDefName.IsNull()) { + int nh = Load(fname,dirname); + Printf("HistoManager::Load was requested: got %d histos from %s/%s",nh,fname,dirname); + if (prefix && prefix[0]!=0) AddPrefix(prefix); + } + // +} + +//_______________________________________________________________ +HistoManager::~HistoManager() +{ + Delete(); +} + +//_______________________________________________________________ +HistoManager* HistoManager::CreateClone(const char* prefix) +{ + // + HistoManager* hm = (HistoManager*)this->Clone(); + hm->AddPrefix(prefix); + TH1* histo; + for (int i=0;iInheritsFrom("TH1")) { + ((TH1*)obj)->SetDirectory(0); + } + } + hm->fNHistos = fNHistos; + hm->SetFileName(fDefName.Data()); + hm->SetDirName(fDirName.Data()); + return hm; + // +} + +//_______________________________________________________________ +Int_t HistoManager::AddHisto(TH1* histo,Int_t at) +{ + // Add new histo either to next free slot (at<0) or to requested position + if (at<0) at = fNHistos; + AddAtAndExpand(histo,at); + histo->SetDirectory(0); + histo->SetUniqueID(at+1); + return fNHistos++; + // +} + +//_______________________________________________________________ +Int_t HistoManager::AddGraph(TGraph* gr,Int_t at) +{ + // Add new histo either to next free slot (at<0) or to requested position + if (at<0) at = fNHistos; + AddAtAndExpand(gr,at); + //histo->SetDirectory(0); + gr->SetUniqueID(at+1); + return fNHistos++; + // +} + +//_______________________________________________________________ +void HistoManager::Compress() +{ + TObjArray::Compress(); + TObject* histo; + for (int i=0;iSetUniqueID(i+1); +} + +//_______________________________________________________________ +void HistoManager::Write(TFile* file) +{ + // Write all histograms to file + if (!fNHistos) return; + Bool_t localfile = kFALSE; + TH1* histo=0; + TFile *lfile=0; + const char* str=0; + if (file) lfile = file; + else { + // Check if the file is not already open + TFile *tmpF = (TFile*)gROOT->GetListOfFiles()->FindObject(fDefName.Data()); + if (tmpF && tmpF->IsOpen()) { + TString opt = tmpF->GetOption(); opt.ToLower(); + if (!opt.Contains("read")) { + lfile = tmpF; + tmpF->cd(); + } + } + } + TString pwd = gDirectory->GetPath(); + if (!lfile) { // have to open + str = fDefName.Data(); + if (!str || !str[0] || str[0] == ' ') fDefName = "histoman"; + if (!fDefName.Contains(".root")) fDefName += ".root"; + lfile = TFile::Open(fDefName.Data(),"UPDATE"); + fDefName = str; + localfile = kTRUE; + } + // + lfile->cd(); + // Create directory (if necessary) + str = fDirName.Data(); + if (str && str[0] && str[0] != ' ') { + if (!lfile->Get(str)) lfile->mkdir(str); + lfile->cd(str); + } + Printf("Writing histogrames to: %s%s",lfile->GetPath(),str); + for (int i=0;i(obj); + TDirectory* dr = 0; + if (histo) { + dr = histo->GetDirectory(); + histo->SetDirectory(0); + } + obj->Write(0,TObject::kOverwrite); + if (dr && histo) histo->SetDirectory(dr); + } + if (localfile) {lfile->Close(); delete lfile;} + TDirectory* oldDir = ((TDirectory *)gROOT->GetListOfFiles()->FindObject(pwd.Data())); + if (oldDir) oldDir->cd(); + // +} + +//_______________________________________________________________ +void HistoManager::Clear(Option_t*) +{ + int nent = GetLast()+1; + for (int i=0;iPrint(option); + // + } + Printf("\nTotal number of defined Histograms: %d",fNHistos); + Printf("\nCurrent output path: %s/%s",fDefName.Data(),fDirName.Data()); +} + +//_______________________________________________________________ +void HistoManager::AddPrefix(const char* pref) +{ + TString prfs = pref; + if (prfs.IsNull()) return; + int nent = GetLast()+1; + for (int i=0;iGetName(); + if (hh->InheritsFrom("TNamed")) ((TNamed*)hh)->SetName(prfs.Data()); + // prfs = pref; + // prfs += hh->GetTitle(); + // hh->SetTitle(prfs.Data()); + } +} + +//_______________________________________________________________ +void HistoManager::AddHistos(HistoManager* hm,Double_t c1) +{ + int nent = GetLast()+1; + int nent1 = hm->GetLast()+1; + if (nent!=nent1) {Error("AddHistos","HistoManagers have different content: %d vs %d",nent,nent1);return;} + for (int i=0;iGetHisto(i); + if (!hh1 || !hh2) continue; + hh1->Add(hh2,c1); + } +} + +//_______________________________________________________________ +void HistoManager::DivideHistos(HistoManager* hm) +{ + int nent = GetLast()+1; + int nent1 = hm->GetLast()+1; + if (nent!=nent1) {Error("DivideHistos","HistoManagers have different content: %d vs %d",nent,nent1);return;} + for (int i=0;iGetHisto(i); + if (!hh1 || !hh2) continue; + hh1->Divide(hh2); + } +} + +//_______________________________________________________________ +void HistoManager::MultiplyHistos(HistoManager* hm) +{ + int nent = GetLast()+1; + int nent1 = hm->GetLast()+1; + if (nent!=nent1) {Error("MultiplyHistos","HistoManagers have different content: %d vs %d",nent,nent1);return;} + for (int i=0;iGetHisto(i); + if (!hh1 || !hh2) continue; + hh1->Multiply(hh2); + } +} + +//_______________________________________________________________ +void HistoManager::ScaleHistos(Double_t c1) +{ + int nent = GetLast()+1; + for (int i=0;iScale(c1); + } +} + +//_______________________________________________________________ +void HistoManager::Sumw2() +{ + int nent = GetLast()+1; + for (int i=0;i(UncheckedAt(i)); + if (hh1) hh1->Sumw2(); + } +} + +//_______________________________________________________________ +void HistoManager::SetFile(TFile* file) +{ + if (file) fDefName = file->GetName(); +} + +//_______________________________________________________________ +void HistoManager::DelHisto(Int_t at) +{ + TH1* hist = GetHisto(at); + if (hist) { + RemoveAt(at); + delete hist; + } +} + +//_______________________________________________________________ +void HistoManager::Purify(Bool_t emptyToo) +{ + // remove empty slots, optionally removing empty histos too + int last = GetLast()+1; + if (emptyToo) + for (int i=0;iGetEntries()<1) { DelHisto(i); fNHistos--;} + } + Compress(); + // +} + +//_____________________________________________________________________________ +void HistoManager::SetFileName(const char *name) +{fDefName = name; gSystem->ExpandPathName(fDefName);} + +//_____________________________________________________________________________ +void HistoManager::Reset() +{ + int last = GetLast()+1; + for (int i=0;iReset(); + } +} + +//_____________________________________________________________________________ +Int_t HistoManager::Load(const char* fname,const char* dirname) +{ + TString flpath = fname; + gSystem->ExpandPathName(flpath); + TFile* file = TFile::Open(flpath.Data()); + if (!file) {Printf("Error: no file %s",fname); return 0;} + if (dirname && dirname[0] && dirname[0] != ' ') { + if (!file->Get(dirname)) { + Printf("Error: no %s directory in file %s",dirname,fname); + file->Close(); delete file; + return 0; + } + else file->cd(dirname); + } + // + int count = 0; + TList* lst = gDirectory->GetListOfKeys(); + TIter next(lst); + TObject* obj; + while ((obj=next())) { + if (FindObject(obj->GetName())) continue; // already added + TObject* hst = gDirectory->Get(obj->GetName()); + int ID = hst->GetUniqueID(); + TH1* h = dynamic_cast(hst); + if (h) { + AddHisto(h, ID-1); + count++; + continue; + } + TGraph* gr = dynamic_cast(hst); + if (gr) { + AddGraph(gr, ID-1); + count++; + continue; + } + } + file->Close(); + delete file; + return count; +} + +//_____________________________________________________________________________ +void HistoManager::SetColor(Color_t tcolor) +{ + int last = GetLast()+1; + for (int i=0;iSetLineColor(tcolor); + hist->SetMarkerColor(tcolor); + } +} + +//_____________________________________________________________________________ +void HistoManager::SetMarkerStyle(Style_t mstyle,Size_t msize) +{ + int last = GetLast()+1; + for (int i=0;iSetMarkerStyle(mstyle); + hist->SetMarkerSize(msize); + } +} + +//_____________________________________________________________________________ +void HistoManager::SetMarkerSize(Size_t msize) +{ + int last = GetLast()+1; + for (int i=0;iSetMarkerSize(msize); + } +} + diff --git a/src/DetectorK/HistoManager.h b/src/DetectorK/HistoManager.h new file mode 100644 index 0000000..45be301 --- /dev/null +++ b/src/DetectorK/HistoManager.h @@ -0,0 +1,74 @@ +#ifndef HISTOMANAGER_H +#define HISTOMANAGER_H + +#include "TH1.h" +#include "TH2.h" +#include "TGraph.h" +#include "TProfile.h" +#include "TFile.h" +#include "TObjArray.h" +class TROOT; +class TSystem; + +class HistoManager +: public TObjArray +{ + //#ifdef USE_USING + using TCollection::Print; + using TCollection::Write; + //#endif + public: + HistoManager(const char* dirname="",const char* fname="histoman.root",Bool_t LOAD=kFALSE,const char* prefix=""); + ~HistoManager(); + HistoManager* CreateClone(const char* prefix); + // + Int_t GetNHistos() const {return fNHistos;} + TGraph* GetGraph(Int_t id) const {return id<=GetLast() ? dynamic_cast(UncheckedAt(id)) : 0;} + TH1* GetHisto(Int_t id) const {return id<=GetLast() ? dynamic_cast(UncheckedAt(id)) : 0;} + TH1* GetHisto(char* name) const {return dynamic_cast( FindObject(name) );} + TH1F* GetHisto1F(Int_t id) const {return dynamic_cast( UncheckedAt(id) );} + TH2F* GetHisto2F(Int_t id) const {return dynamic_cast( UncheckedAt(id) );} + TProfile* GetHistoP(Int_t id) const {return dynamic_cast( UncheckedAt(id) );} + Int_t AddHisto(TH1* histo,Int_t at=-1); + Int_t AddGraph(TGraph* gr,Int_t at=-1); + void DelHisto(Int_t at); + void SetFile(TFile* file); + void SetFileName(const char* fname); + char* GetFileName() const {return (char*) fDefName.Data();} + void SetDirName(const char* name) {fDirName = name;} + char* GetDirName() const {return (char*) fDirName.Data();} + // + void Reset(); + void Write(TFile* file=0); + Int_t Write(const char* flname, int =0, int =0) {SetFileName(flname);Write();return 0;} + void AddPrefix(const char* pref); + void AddHistos(HistoManager* hm, Double_t c1 = 1); + void DivideHistos(HistoManager* hm); + void MultiplyHistos(HistoManager* hm); + void ScaleHistos(Double_t c1 = 1); + void SetColor(Color_t tcolor = 1); + void SetMarkerStyle(Style_t mstyle = 1, Size_t msize = 1); + void SetMarkerSize(Size_t msize = 1); + void Sumw2(); + Int_t Load(const char* fname,const char* dirname=""); + // + void Purify(Bool_t emptyToo=kFALSE); + void Print(Option_t* option="") const; + void Clear(Option_t* option=""); + void Delete(Option_t* option=""); + virtual void Compress(); + // + private: + Int_t fNHistos; // Number of Histogrames defined + TString fDefName; // Default file name + TString fDirName; // Directory name in the output file + // + ClassDef(HistoManager,0) // NA60 Histograms manager +}; + + + + + + +#endif diff --git a/src/ECALdetector.cc b/src/ECALdetector.cc new file mode 100644 index 0000000..8c75afb --- /dev/null +++ b/src/ECALdetector.cc @@ -0,0 +1,124 @@ +/// @author: Yuri Kharlov +/// @author: Nicolo' Jacazio +/// @since 04/09/2021 + +#include "ECALdetector.hh" +#include "TDatabasePDG.h" +#include "TRandom.h" +#include "TLorentzVector.h" + +namespace o2 +{ +namespace delphes +{ + +/*****************************************************************/ + +void ECALdetector::setup(float resoEA, float resoEB, float resoEC, float resoXA, float resoXB) +{ + mEnergyResolutionA = resoEA; + mEnergyResolutionB = resoEB; + mEnergyResolutionC = resoEC; + mPositionResolutionA = resoXA; + mPositionResolutionB = resoXB; +} + +/*****************************************************************/ + +bool ECALdetector::hasECAL(const Track& track) const +{ + auto x = track.XOuter * 0.1; // [cm] + auto y = track.YOuter * 0.1; // [cm] + auto z = track.ZOuter * 0.1; // [cm] + /** check if hit **/ + bool ishit = false; + auto r = hypot(x, y); + ishit = (fabs(r - mRadius) < 0.001 && fabs(z) < mLength); + if (!ishit) + return false; + auto particle = (GenParticle*)track.Particle.GetObject(); + return true; +} + +/*****************************************************************/ +bool ECALdetector::makeSignal(const GenParticle& particle, + TLorentzVector& p4ECAL, + float& posZ, + float& posPhi) +{ + // Simulate fast response of ECAL to photons: + // take generated particle as input and calculate its smeared 4-momentum p4ECAL + // and hit coordinates posZ, posPhi + + const int pid = particle.PID; + if (pid != 22) { // only photons are treated so far. e+- and MIPs will be added later. + return false; + } + + TLorentzVector p4True = particle.P4(); // true 4-momentum + if (TMath::Abs(p4True.Eta()) > 4) { // ECAL acceptance is rougly limited by |eta|<4 + return false; + } + + Float_t vtX = particle.X; // particle vertex X + Float_t vtY = particle.Y; // particle vertex Y + Float_t vtZ = particle.Z; // particle vertex Z + + posPhi = p4True.Phi(); // azimuth angle of a photon hit + posZ = -1e6; + Double_t tanTheta = TMath::Tan(p4True.Theta()); + if (tanTheta != 0.) { + posZ = mRadius / tanTheta; // z-coodrinate of a photon hit + } + + p4ECAL = smearPhotonP4(p4True); + + return true; +} + +/*****************************************************************/ +TLorentzVector ECALdetector::smearPhotonP4(const TLorentzVector& pTrue) +{ + // This function smears the photon 4-momentum from the true one via applying + // parametrized energy and coordinate resolution + + // Get true energy from true 4-momentum and smear this energy + Double_t eTrue = pTrue.E(); + Double_t eSmeared = smearPhotonE(eTrue); + // Smear direction of 3-vector + Double_t phi = pTrue.Phi() + gRandom->Gaus(0., sigmaX(eTrue) / mRadius); + Double_t theta = pTrue.Theta() + gRandom->Gaus(0., sigmaX(eTrue) / mRadius); + // Calculate smeared components of 3-vector + Double_t pxSmeared = eSmeared * TMath::Cos(phi) * TMath::Sin(theta); + Double_t pySmeared = eSmeared * TMath::Sin(phi) * TMath::Sin(theta); + Double_t pzSmeared = eSmeared * TMath::Cos(theta); + // Construct new 4-momentum from smeared energy and 3-momentum + TLorentzVector pSmeared(pxSmeared, pySmeared, pzSmeared, eSmeared); + return pSmeared; +} +/*****************************************************************/ +Double_t ECALdetector::sigmaX(const Double_t& eTrue) +{ + // Calculate sigma of photon coordinate smearing [cm] + // E is the photon energy + Double_t dX = sqrt(mPositionResolutionA * mPositionResolutionA + mPositionResolutionB * mPositionResolutionB / eTrue); + return dX; +} +/*****************************************************************/ +Double_t ECALdetector::smearPhotonE(const Double_t& eTrue) +{ + // Smear a photon energy eTrue according to a Gaussian distribution with energy resolution parameters + // sigma of Gaussian smearing is calculated from parameters A,B,C and true energy + + const Double_t sigmaE = eTrue * sqrt(mEnergyResolutionA * mEnergyResolutionA / eTrue / eTrue + + mEnergyResolutionB * mEnergyResolutionB / eTrue + + mEnergyResolutionC * mEnergyResolutionC); + Double_t eSmeared = gRandom->Gaus(eTrue, sigmaE); + if (eSmeared < 0) + eSmeared = 0; + return eSmeared; +} +/*****************************************************************/ + +} // namespace delphes +} // namespace o2 diff --git a/src/ECALdetector.hh b/src/ECALdetector.hh new file mode 100644 index 0000000..7ff01fc --- /dev/null +++ b/src/ECALdetector.hh @@ -0,0 +1,43 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#ifndef _DelphesO2_ECALdetector_h_ +#define _DelphesO2_ECALdetector_h_ + +#include "classes/DelphesClasses.h" + +namespace o2 +{ +namespace delphes +{ + +class ECALdetector +{ + + public: + ECALdetector() = default; + ~ECALdetector() = default; + + void setup(float resoEA, float resoEB, float resoEC, float resoPosA, float resoPosB); + bool hasECAL(const Track& track) const; + bool makeSignal(const GenParticle& particle, TLorentzVector& pECAL, float& posZ, float& posPhi); + + protected: + Double_t smearPhotonE(const Double_t& eTrue); + Double_t sigmaX(const Double_t& eTrue); + TLorentzVector smearPhotonP4(const TLorentzVector& pTrue); + + float mRadius = 120.; // ECAL barrel inner radius [cm] + float mLength = 200.; // ECAL half-length along beam axis [cm] + + float mEnergyResolutionA = 0.002; // parameter A of energy resolution in GeV + float mEnergyResolutionB = 0.02; // parameter B of energy resolution in GeV^{1/2} + float mEnergyResolutionC = 0.01; // parameter C of energy resolution + float mPositionResolutionA = 0.15; // parameter A of coordinate resolution in cm + float mPositionResolutionB = 0.30; // parameter B of coordinate resolution in cm*GeV^{1/2} +}; + +} // namespace delphes +} // namespace o2 + +#endif /** _DelphesO2_ECALdetector_h_ **/ diff --git a/src/MIDdetector.cc b/src/MIDdetector.cc new file mode 100644 index 0000000..4483f0b --- /dev/null +++ b/src/MIDdetector.cc @@ -0,0 +1,84 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +/// @author: Antonio Uras +/// @email: antonio.uras@cern.ch + +#include "MIDdetector.hh" +#include "TDatabasePDG.h" +#include "THnSparse.h" +#include "TRandom.h" +#include "TDatime.h" +#include "TFile.h" +#include "TVector3.h" +#include "TMath.h" +#include "TAxis.h" + +namespace o2 { + namespace delphes { + + //========================================================================================================== + + bool MIDdetector::setup(const Char_t *nameInputFile = "muonAccEffPID.root") { + + TDatime t; + gRandom->SetSeed(t.GetDate()+t.GetYear()*t.GetHour()*t.GetMinute()*t.GetSecond()); + + mFileAccEffMuonPID = new TFile(nameInputFile); + if (!mFileAccEffMuonPID) { + printf("File %s not found\n",nameInputFile); + return kFALSE; + } + if (!(mFileAccEffMuonPID->IsOpen())) { + printf("File %s not open\n",nameInputFile); + return kFALSE; + } + + for (Int_t iPart=kMuon; iPartGet(Form("mAccEffMuonPID_%s",partLabel[iPart])); + if (!mAccEffMuonPID[iPart]) { + printf("Object %s not found, quitting\n",Form("mAccEffMuonPID_%s",partLabel[iPart])); + return kFALSE; + } + mMomMin[iPart] = TMath::Max(1.2, mAccEffMuonPID[iPart]->GetAxis(1)->GetBinCenter(1)); + mMomMax[iPart] = mAccEffMuonPID[iPart]->GetAxis(1)->GetBinCenter(mAccEffMuonPID[iPart]->GetAxis(1)->GetNbins()); + } + + printf("Setup of MIDdetector successfully completed\n"); + return kTRUE; + + } + + //========================================================================================================== + + bool MIDdetector::hasMID(const Track &track) { + + auto pdg = std::abs(track.PID); + auto part = pidmap[pdg]; + return ((TMath::Abs(track.Eta) < mEtaMax) && (track.P > mMomMin[part])); + + } + + //========================================================================================================== + + bool MIDdetector::isMuon(const Track &track, int multiplicity=1) { + + auto pdg = std::abs(track.PID); + auto part = pidmap[pdg]; + if (part == kElectron) return kFALSE; + + auto particle = (GenParticle*) track.Particle.GetObject(); + + Double_t mom = TMath::Min(Double_t(track.P), Double_t(mMomMax[part])); + + Double_t var[4] = {track.Eta, mom, particle->Z, double(multiplicity)}; + Double_t probMuonPID = mAccEffMuonPID[part]->GetBinContent(mAccEffMuonPID[part]->GetBin(var)); + return (gRandom->Uniform() < probMuonPID); + + } + + //========================================================================================================== + + } /** namespace delphes **/ + +} /** namespace o2 **/ diff --git a/src/MIDdetector.hh b/src/MIDdetector.hh new file mode 100644 index 0000000..a871aee --- /dev/null +++ b/src/MIDdetector.hh @@ -0,0 +1,48 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +/// @author: Antonio Uras +/// @email: antonio.uras@cern.ch + +#ifndef _DelphesO2_MIDdetector_h_ +#define _DelphesO2_MIDdetector_h_ + +#include "classes/DelphesClasses.h" +#include "THnSparse.h" +#include "TFile.h" + +#include +using namespace std; + +namespace o2 { + namespace delphes { + + class MIDdetector { + + public: + MIDdetector() = default; + ~MIDdetector() = default; + + enum { kElectron, kMuon, kPion, kKaon, kProton, kNPart }; // primary particles with a non-zero muon PID probability + + bool setup(const Char_t *nameInputFile); + bool hasMID(const Track &track); + bool isMuon(const Track &track, int multiplicity); + + protected: + + TFile *mFileAccEffMuonPID; + THnSparse *mAccEffMuonPID[kNPart]; + const double mEtaMax = 1.6; + double mMomMin[kNPart]; + double mMomMax[kNPart]; + const char *partLabel[kNPart] = {"electron","muon","pion","kaon","proton"}; + std::map pidmap = { {11, kElectron}, {13, kMuon}, {211, kPion}, {321, kKaon}, {2212, kProton} }; + + }; + + } /** namespace delphes **/ +} /** namespace o2 **/ + +#endif /** _DelphesO2_MIDLayer_h_ **/ + diff --git a/src/PhotonConversion.cc b/src/PhotonConversion.cc new file mode 100644 index 0000000..82ec8e5 --- /dev/null +++ b/src/PhotonConversion.cc @@ -0,0 +1,120 @@ +/// @author: Ana Marin +/// @author: Nicolo' Jacazio +/// @since 20/09/2021 + +#include +#include "PhotonConversion.hh" +#include "TDatabasePDG.h" +#include "TRandom.h" +#include "TLorentzVector.h" +#include "TDatime.h" +#include +#include + +namespace o2 +{ +namespace delphes +{ + +/*****************************************************************/ + +void PhotonConversion::setup() +{ + + TDatime t; + gRandom->SetSeed(t.GetDate() + t.GetYear() * t.GetHour() * t.GetMinute() * t.GetSecond()); // NB: gRandom is a global pointer ? +} + +/*****************************************************************/ + +bool PhotonConversion::hasPhotonConversion(const GenParticle& particle) const +{ + + const int pid = particle.PID; + TLorentzVector p4True = particle.P4(); + float convProb,eff; + + if (pid == 22) { + if (TMath::Abs(particle.Eta) < 1.3) { + convProb = 3.54334e-02 * TMath::Power(p4True.Pt(), 1.47512) / (1.56461e-02 + TMath::Power(p4True.Pt(), 1.43599)); + if (convProb > 0.04) + convProb = 0.04; + eff = 5.89182e-01 * TMath::Power(p4True.Pt(), 3.85834) / (2.96558e-03 + TMath::Power(p4True.Pt(), 3.72573)); + if (eff > 1.) + eff = 1.; + + } else if (TMath::Abs(particle.Eta) > 1.75 && TMath::Abs(particle.Eta) < 4.) { + convProb = -8.24825e-03 *( TMath::Power(p4True.P(), -5.03182e-01 )-1.13113e+01*p4True.P()) / (2.23495e-01 + TMath::Power(p4True.P(), 1.08338e+00 )); + eff = 5.89182e-01 * TMath::Power(p4True.P(), 3.85834) / (2.96558e-03 + TMath::Power(p4True.P(), 3.72573)); + if (eff > 1.) + eff = 1.; + }else{ + convProb = 0.; + eff=0.; + } + return (gRandom->Uniform() < (convProb * eff)); + } else { + const Float_t misConvProb = 0.0; + return (gRandom->Uniform() < misConvProb); + } + return true; +} + +/*****************************************************************/ + +bool PhotonConversion::makeSignal(const GenParticle& particle, TLorentzVector& photonConv) +{ + const int pid = particle.PID; + if (pid != 22) { + return false; + } + // Eta coverage of the central barrel. Region where the conv prob., rec effciency and momentum resolution have been parametrized. + if (( TMath::Abs(particle.Eta) > 1.3 && TMath::Abs(particle.Eta) < 1.75) || TMath::Abs(particle.Eta) > 4 ) { + return false; + } + TLorentzVector p4Smeared = smearPhotonP(particle); + photonConv = p4Smeared; + return true; +} + +/*****************************************************************/ + +TLorentzVector PhotonConversion::smearPhotonP(const GenParticle& particle) +{ + // This function smears the photon 4-momentum from the true one via applying + // parametrized pt and pz resolution + + TLorentzVector p4True = particle.P4(); + // std::cout<< "Particle px,py,pz,pt,p,m,Eta,phi:: "<< particle.Px << " " << particle.Py << " " << particle.Pz << " "<< + // particle.P << " "<< particle.M1 << " "<< particle.Eta << " " << particle.Phi << " " << std::endl; + + // Get true energy from true 4-momentum and smear this energy + double pTrue = p4True.P(); + double phi = p4True.Phi(); + double theta = p4True.Theta(); + + double sigmaP; + if( TMath::Abs(particle.Eta) < 1.3) { + sigmaP = pTrue * TMath::Sqrt(sigmaPt0 * sigmaPt0 + (sigmaPt1 * pTrue) * (sigmaPt1 * pTrue)); + } else if ( TMath::Abs(particle.Eta) > 1.75 && TMath::Abs(particle.Eta) < 4 ) { + sigmaP = pTrue * TMath::Sqrt(sigmaPF0 * sigmaPF0 ); + } + + double pSmearedMag = gRandom->Gaus(pTrue, sigmaP); + if (pSmearedMag < 0) + pSmearedMag = 0; + + // Calculate smeared components of 3-vector + Double_t pxSmeared = pSmearedMag * TMath::Cos(phi) * TMath::Sin(theta); + Double_t pySmeared = pSmearedMag * TMath::Sin(phi) * TMath::Sin(theta); + Double_t pzSmeared = pSmearedMag * TMath::Cos(theta); + + // Construct new 4-momentum from smeared energy and 3-momentum + TLorentzVector pSmeared; + pSmeared.SetXYZM(pxSmeared, pySmeared, pzSmeared, 0.); + return pSmeared; +} +/*****************************************************************/ + +} // namespace delphes +} // namespace o2 diff --git a/src/PhotonConversion.hh b/src/PhotonConversion.hh new file mode 100644 index 0000000..da9aa7d --- /dev/null +++ b/src/PhotonConversion.hh @@ -0,0 +1,39 @@ +/// @Author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#ifndef _DelphesO2_PhotonConversion_h_ +#define _DelphesO2_PhotonConversion_h_ + +#include "classes/DelphesClasses.h" + +namespace o2 +{ +namespace delphes +{ + +class PhotonConversion +{ + + public: + PhotonConversion() = default; + ~PhotonConversion() = default; + + void setup(); + bool hasPhotonConversion(const GenParticle& particle) const; + bool makeSignal(const GenParticle& particle, TLorentzVector& pConv); + + protected: + TLorentzVector smearPhotonP(const GenParticle& particle); + + float sigmaPt0 = 0.0314; // parameter sigma0 for momentum resolution + float sigmaPt1 = 0.00406; // parameter sigma1 for momentum resolution + + float sigmaPF0 = 0.04082; // parameter sigma0 for momentum resolution ~30% worst than eta~0 + + +}; + +} // namespace delphes +} // namespace o2 + +#endif /** _DelphesO2_PhotonConversion_h_ **/ diff --git a/src/PreShower.cc b/src/PreShower.cc new file mode 100644 index 0000000..c470c9e --- /dev/null +++ b/src/PreShower.cc @@ -0,0 +1,69 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +/// @author: Antonio Uras +/// @email: antonio.uras@cern.ch + +/// @author: Marco van Leeuwen +/// @email: marco.van.leeuwen@cern.ch + +#include "PreShower.hh" +#include "TDatabasePDG.h" +#include "THnSparse.h" +#include "TRandom.h" +#include "TDatime.h" +#include "TFile.h" +#include "TVector3.h" +#include "TMath.h" +#include "TAxis.h" + +namespace o2 { + namespace delphes { + + //========================================================================================================== + + bool PreShower::setup() { + + TDatime t; + gRandom->SetSeed(t.GetDate()+t.GetYear()*t.GetHour()*t.GetMinute()*t.GetSecond()); // NB: gRandom is a global pointer ? + for (Int_t iPart = 0; iPart < kNPart; iPart++) { + mMomMin[iPart] = 0.1; + mMomMax[iPart] = 20; + } + + return kTRUE; + + } + + //========================================================================================================== + + bool PreShower::hasPreShower(const Track &track) { + + auto pdg = std::abs(track.PID); + auto part = pidmap[pdg]; + return ((TMath::Abs(track.Eta) < mEtaMax) && (track.P > mMomMin[part])); + + } + + //========================================================================================================== + + bool PreShower::isElectron(const Track &track, int multiplicity=1) { + + auto pdg = std::abs(track.PID); + auto part = pidmap[pdg]; + if (part == kElectron) { + // Parametrisation of preshower detector studies without charge sharing + float eff = 0.8*(1.-exp(-1.6*(track.P-0.05))); + return (gRandom->Uniform() < eff); + } + else { + const Float_t misTagProb = 0.001; + return (gRandom->Uniform() < misTagProb); + } + } + + //========================================================================================================== + + } /** namespace delphes **/ + +} /** namespace o2 **/ diff --git a/src/PreShower.hh b/src/PreShower.hh new file mode 100644 index 0000000..0c71de5 --- /dev/null +++ b/src/PreShower.hh @@ -0,0 +1,49 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +/// @author: Antonio Uras +/// @email: antonio.uras@cern.ch + +/// @author: Marco van Leeuwen +/// @email: marco.van.leeuwen@cern.ch + +#ifndef _DelphesO2_PreShower_h_ +#define _DelphesO2_PreShower_h_ + +#include "classes/DelphesClasses.h" +#include "THnSparse.h" +#include "TFile.h" + +#include +using namespace std; + +namespace o2 { + namespace delphes { + + class PreShower { + + public: + PreShower() = default; + ~PreShower() = default; + + enum { kElectron, kMuon, kPion, kKaon, kProton, kNPart }; // primary particles with a non-zero muon PID probability + + bool setup(); + bool hasPreShower(const Track &track); + bool isElectron(const Track &track, int multiplicity); + + protected: + + const double mEtaMax = 1.75; + double mMomMin[kNPart]; + double mMomMax[kNPart]; + const char *partLabel[kNPart] = {"electron","muon","pion","kaon","proton"}; + std::map pidmap = { {11, kElectron}, {13, kMuon}, {211, kPion}, {321, kKaon}, {2212, kProton} }; + + }; + + } /** namespace delphes **/ +} /** namespace o2 **/ + +#endif /** _DelphesO2_MIDLayer_h_ **/ + diff --git a/src/RICHdetector.cc b/src/RICHdetector.cc new file mode 100644 index 0000000..5483852 --- /dev/null +++ b/src/RICHdetector.cc @@ -0,0 +1,121 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#include "RICHdetector.hh" +#include "TDatabasePDG.h" +#include "TRandom.h" + +namespace o2 +{ +namespace delphes +{ + +/*****************************************************************/ + +void +RICHdetector::setup(float radius, float length) +{ + mRadius = radius; + mLength = length; +} + +/*****************************************************************/ + +bool +RICHdetector::hasRICH(const Track &track) const +{ + auto x = track.XOuter * 0.1; // [cm] + auto y = track.YOuter * 0.1; // [cm] + auto z = track.ZOuter * 0.1; // [cm] + /** check if hit **/ + bool ishit = false; + if (mType == kBarrel) { + auto r = hypot(x, y); + ishit = (fabs(r - mRadius) < 0.001 && fabs(z) < mLength); + } + if (mType == kForward) { + auto r = hypot(x, y); + ishit = (r > mRadiusIn) && (r < mRadius) && (fabs(fabs(z) - mLength) < 0.001); + } + if (!ishit) return false; + /** check if above threshold **/ + auto particle = (GenParticle *)track.Particle.GetObject(); + int pid = particle->PID; + double mass = TDatabasePDG::Instance()->GetParticle(pid)->Mass(); + auto thr = cherenkovThreshold(mass); + if (particle->P < thr) return false; + return true; +} + +/*****************************************************************/ + +std::pair +RICHdetector::getMeasuredAngle(const Track &track) const +{ + if (!hasRICH(track)) return {0., 0.}; + auto particle = (GenParticle *)track.Particle.GetObject(); + int pid = particle->PID; + double mass = TDatabasePDG::Instance()->GetParticle(pid)->Mass(); + auto angle = cherenkovAngle(particle->P, mass); + auto nph_av = numberOfPhotons(angle); // average number of photons + auto nph = gRandom->Poisson(nph_av); // random number of photons + if (nph < mMinPhotons) return {0., 0.}; + auto nph_el = 0; // number of photo-electrons + for (int i = 0; i < nph; ++i) { + if (gRandom->Uniform() < mEfficiency) nph_el++; + } + if (nph_el < mMinPhotons) return {0., 0.}; + auto sigma = mSigma / sqrt(nph_el); + angle = gRandom->Gaus(angle, sigma); + return {angle, sigma}; +} + +/*****************************************************************/ + +float +RICHdetector::getExpectedAngle(float p, float mass) const +{ + auto thr = cherenkovThreshold(mass); + if (p < thr) return 0.; + return cherenkovAngle(p, mass); +} + +/*****************************************************************/ + +void +RICHdetector::makePID(const Track &track, std::array &deltaangle, std::array &nsigma) const +{ + double pmass[5] = {0.00051099891, 0.10565800, 0.13957000, 0.49367700, 0.93827200}; + + /** get info **/ + auto measurement = getMeasuredAngle(track); + auto angle = measurement.first; + auto anglee = measurement.second; + + /** perform PID **/ + double p = track.P; + double ep = p * track.ErrorP; + double n = mIndex; + for (Int_t ipart = 0; ipart < 5; ++ipart) { + auto m = pmass[ipart]; + auto exp_angle = getExpectedAngle(p, m); + auto A = std::sqrt(n * n * p * p - m * m - p * p); + auto B = std::sqrt(m * m + p * p); + auto exp_sigma = m * m / p / A / B * ep; + exp_sigma = sqrt(anglee * anglee + exp_sigma * exp_sigma); + if (anglee <= 0. || exp_angle <= 0.) { + deltaangle[ipart] = -1000.; + nsigma[ipart] = 1000.; + continue; + } + deltaangle[ipart] = angle - exp_angle; + nsigma[ipart] = deltaangle[ipart] / exp_sigma; // should also consider the momentum resolution + } + +} + +/*****************************************************************/ + + +} /** namespace delphes **/ +} /** namespace o2 **/ diff --git a/src/RICHdetector.hh b/src/RICHdetector.hh new file mode 100644 index 0000000..d328547 --- /dev/null +++ b/src/RICHdetector.hh @@ -0,0 +1,68 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#ifndef _DelphesO2_RICHdetector_h_ +#define _DelphesO2_RICHdetector_h_ + +#include "classes/DelphesClasses.h" + +namespace o2 +{ +namespace delphes +{ + +class RICHdetector { + +public: + RICHdetector() = default; + ~RICHdetector() = default; + + enum { kBarrel, kForward }; // type of RICH detector + + void setup(float radius, float length); + bool hasRICH(const Track &track) const; + + void setIndex(float val) { mIndex = val; }; + void setRadiatorLength(float val) { mRadiatorLength = val; }; + void setEfficiency(float val) { mEfficiency = val; }; + void setSigma(float val) { mSigma = val; }; + void setMinPhotons(int val) { mMinPhotons = val; }; + + void setType(int val) { mType = val; }; + void setRadiusIn(float val) { mRadiusIn = val; }; + + void makePID(const Track &track, std::array &deltaangle, std::array &nsigma) const; + std::pair getMeasuredAngle(const Track &track) const; + float getExpectedAngle(float p, float mass) const; + + double cherenkovAngle(double p, double m) const { + return acos( sqrt( m * m + p * p ) / ( mIndex * p ) ); }; + double cherenkovThreshold(double m) const { + return m / sqrt(mIndex * mIndex - 1.); }; + double numberOfPhotons(double angle) const { + return 490. * sin(angle) * sin(angle) * mRadiatorLength; }; + double numberOfDetectedPhotons(double angle) const { + return numberOfPhotons(angle) * mEfficiency; }; + double cherenkovAngleSigma(double p, double m) const { + return mSigma / sqrt(numberOfDetectedPhotons(cherenkovAngle(p, m))); } + +protected: + + int mType = kBarrel; + float mRadius = 100.; // [cm] + float mRadiusIn = 10.; // [cm] + float mLength = 200.; // [cm] + + float mIndex = 1.03; + float mRadiatorLength = 2.; // [cm] + float mEfficiency = 0.4; + float mSigma = 7.e-3; // [rad] + int mMinPhotons = 3; + +}; + +} /** namespace delphes **/ +} /** namespace o2 **/ + +#endif /** _DelphesO2_RICHdetector_h_ **/ + diff --git a/src/TOFLayer.cc b/src/TOFLayer.cc index 86a28df..f7665df 100644 --- a/src/TOFLayer.cc +++ b/src/TOFLayer.cc @@ -12,11 +12,12 @@ namespace delphes /*****************************************************************/ void -TOFLayer::setup(float radius, float length, float sigmat) +TOFLayer::setup(float radius, float length, float sigmat, float sigma0) { mRadius = radius; mLength = length; mSigmaT = sigmat; + mSigma0 = sigma0; } /*****************************************************************/ @@ -26,8 +27,16 @@ TOFLayer::hasTOF(const Track &track) { auto x = track.XOuter * 0.1; // [cm] auto y = track.YOuter * 0.1; // [cm] - auto z = track.ZOuter * 0.1; // [cm] - return (fabs(hypot(x, y) - mRadius) < 0.001 && fabs(z) < mLength); + auto z = track.ZOuter * 0.1; // [cm] + if (mType == kBarrel) { + auto r = hypot(x, y); + return (fabs(r - mRadius) < 0.001 && fabs(z) < mLength); + } + if (mType == kForward) { + auto r = hypot(x, y); + return (r > mRadiusIn) && (r < mRadius) && (fabs(fabs(z) - mLength) < 0.001); + } + return false; } /*****************************************************************/ @@ -35,7 +44,7 @@ TOFLayer::hasTOF(const Track &track) float TOFLayer::getBeta(const Track &track) { - double tof = track.TOuter * 1.e9 - mTime0; // [ns] + double tof = track.TOuter * 1.e9; // [ns] double L = track.L * 0.1; // [cm] double c = 29.9792458; // [cm/ns] return (L / tof / c); @@ -47,9 +56,10 @@ void TOFLayer::makePID(const Track &track, std::array &deltat, std::array &nsigma) { double pmass[5] = {0.00051099891, 0.10565800, 0.13957000, 0.49367700, 0.93827200}; - + /** get info **/ - double tof = track.TOuter * 1.e9 - mTime0; // [ns] + double tof = track.TOuter * 1.e9; // [ns] + double etof = track.ErrorT * 1.e9; // [ns] double L = track.L * 0.1; // [cm] double p = track.P; double p2 = p * p; @@ -61,8 +71,8 @@ TOFLayer::makePID(const Track &track, std::array &deltat, std::array &tracks, std::array &tzero) double sum = 0.; double sumw = 0.; - + for (auto &track : tracks) { int pid = track->PID; double mass = TDatabasePDG::Instance()->GetParticle(pid)->Mass(); - double mass2 = mass * mass; + double mass2 = mass * mass; double tof = track->TOuter * 1.e9; // [ns] + double etof = track->ErrorT * 1.e9; // [ns] double L = track->L * 0.1; // [cm] double p = track->P; // [GeV/c] + p *= abs(TDatabasePDG::Instance()->GetParticle(pid)->Charge()) / 3.; // [GeV/c] double ep = track->ErrorP; double p2 = p * p; double c = 29.9792458; // [cm/ns] double Lc = L / c; double texp = Lc / p * TMath::Sqrt(mass2 + p2); - double etexp = Lc * mass2 / p2 / TMath::Sqrt(mass2 + p2) * ep; - double sigma = TMath::Sqrt(etexp * etexp + mSigmaT * mSigmaT); + double etexp = Lc * mass2 / p2 / TMath::Sqrt(mass2 + p2) * ep; + double sigma = TMath::Sqrt(etexp * etexp + etof * etof); double deltat = tof - texp; double w = 1. / (sigma * sigma); @@ -102,19 +114,80 @@ TOFLayer::eventTime(std::vector &tracks, std::array &tzero) } if (sumw <= 0.) { - mTime0 = 0.; - mSigma0 = 1000.; + tzero[0] = 0.; + tzero[1] = mSigma0; return false; } - mTime0 = tzero[0] = sum / sumw; - mSigma0 = tzero[1] = sqrt(1. / sumw); + tzero[0] = sum / sumw; + tzero[1] = sqrt(1. / sumw); + + // if we have many tracks, we use the start-time computed with all tracks + + if (tracks.size() > 4) { + for (auto &track : tracks) { + track->TOuter -= tzero[0] * 1.e-9; // [s] + track->ErrorT = std::hypot(track->ErrorT, tzero[1] * 1.e-9); + } + return true; + } + + // if we have few tracks, we do the combinations excluding the track of interest + + std::vector time0, sigma0; + time0.reserve(tracks.size()); + sigma0.reserve(tracks.size()); + for (int itrack = 0; itrack < tracks.size(); ++itrack) { + + time0[itrack] = 0; + sigma0[itrack] = mSigma0; + + sum = 0.; + sumw = 0.; + + for (int jtrack = 0; jtrack < tracks.size(); ++jtrack) { + if (itrack == jtrack) continue; // do not use self + + auto &track = tracks[jtrack]; + int pid = track->PID; + double mass = TDatabasePDG::Instance()->GetParticle(pid)->Mass(); + double mass2 = mass * mass; + double tof = track->TOuter * 1.e9; // [ns] + double etof = track->ErrorT * 1.e9; // [ns] + double L = track->L * 0.1; // [cm] + double p = track->P; // [GeV/c] + p *= abs(TDatabasePDG::Instance()->GetParticle(pid)->Charge()) / 3.; // [GeV/c] + double ep = track->ErrorP; + double p2 = p * p; + double c = 29.9792458; // [cm/ns] + double Lc = L / c; + double texp = Lc / p * TMath::Sqrt(mass2 + p2); + double etexp = Lc * mass2 / p2 / TMath::Sqrt(mass2 + p2) * ep; + double sigma = TMath::Sqrt(etexp * etexp + etof * etof); + double deltat = tof - texp; + double w = 1. / (sigma * sigma); + sum += w * deltat; + sumw += w; + } + + if (sumw <= 0.) continue; + + time0[itrack] = sum / sumw; + sigma0[itrack] = std::sqrt(1. / sumw); + + } + + for (int itrack = 0; itrack < tracks.size(); ++itrack) { + auto &track = tracks[itrack]; + track->TOuter -= time0[itrack] * 1.e-9; // [s] + track->ErrorT = std::hypot(track->ErrorT, sigma0[itrack] * 1.e-9); + } return true; } /*****************************************************************/ - + } /** namespace delphes **/ } /** namespace o2 **/ diff --git a/src/TOFLayer.hh b/src/TOFLayer.hh index 7d663a3..edc688c 100644 --- a/src/TOFLayer.hh +++ b/src/TOFLayer.hh @@ -17,20 +17,25 @@ public: TOFLayer() = default; ~TOFLayer() = default; - void setup(float radius, float length, float sigmat); + enum { kBarrel, kForward }; // type of TOF detector + + void setup(float radius, float length, float sigmat, float sigma0); bool hasTOF(const Track &track); float getBeta(const Track &track); void makePID(const Track &track, std::array &deltat, std::array &nsigma); bool eventTime(std::vector &tracks, std::array &tzero); + + void setType(int val) { mType = val; }; + void setRadiusIn(float val) { mRadiusIn = val; }; protected: - + + int mType = kBarrel; float mRadius = 100.; // [cm] + float mRadiusIn = 10.; // [cm] float mLength = 200.; // [cm] float mSigmaT = 0.02; // [ns] - - float mTime0 = 0.; // [ns] - float mSigma0 = 0.; // [ns] + float mSigma0 = 0.200; // [ns] }; diff --git a/src/TrackSmearer.cc b/src/TrackSmearer.cc index a7b4601..09f580f 100644 --- a/src/TrackSmearer.cc +++ b/src/TrackSmearer.cc @@ -15,28 +15,52 @@ namespace delphes /*****************************************************************/ bool -TrackSmearer::loadTable(int pdg, const char *filename) +TrackSmearer::loadTable(int pdg, const char *filename, bool forceReload) { auto ipdg = getIndexPDG(pdg); + if (mLUTHeader[ipdg] && !forceReload) { + std::cout << " --- LUT table for PDG " << pdg << " has been already loaded with index " << ipdg << std::endl; + return false; + } mLUTHeader[ipdg] = new lutHeader_t; std::ifstream lutFile(filename, std::ifstream::binary); if (!lutFile.is_open()) { std::cout << " --- cannot open covariance matrix file for PDG " << pdg << ": " << filename << std::endl; + delete mLUTHeader[ipdg]; + mLUTHeader[ipdg] = nullptr; return false; } lutFile.read(reinterpret_cast(mLUTHeader[ipdg]), sizeof(lutHeader_t)); if (lutFile.gcount() != sizeof(lutHeader_t)) { std::cout << " --- troubles reading covariance matrix header for PDG " << pdg << ": " << filename << std::endl; + delete mLUTHeader[ipdg]; + mLUTHeader[ipdg] = nullptr; + return false; + } + if (mLUTHeader[ipdg]->version != LUTCOVM_VERSION) { + std::cout << " --- LUT header version mismatch: expected/detected = " << LUTCOVM_VERSION << "/" << mLUTHeader[ipdg]->version << std::endl; + delete mLUTHeader[ipdg]; + mLUTHeader[ipdg] = nullptr; + return false; + } + if (mLUTHeader[ipdg]->pdg != pdg) { + std::cout << " --- LUT header PDG mismatch: expected/detected = " << pdg << "/" << mLUTHeader[ipdg]->pdg << std::endl; + delete mLUTHeader[ipdg]; + mLUTHeader[ipdg] = nullptr; return false; } const int nnch = mLUTHeader[ipdg]->nchmap.nbins; const int nrad = mLUTHeader[ipdg]->radmap.nbins; const int neta = mLUTHeader[ipdg]->etamap.nbins; const int npt = mLUTHeader[ipdg]->ptmap.nbins; + mLUTEntry[ipdg] = new lutEntry_t****[nnch]; for (int inch = 0; inch < nnch; ++inch) { + mLUTEntry[ipdg][inch] = new lutEntry_t***[nrad]; for (int irad = 0; irad < nrad; ++irad) { + mLUTEntry[ipdg][inch][irad] = new lutEntry_t**[neta]; for (int ieta = 0; ieta < neta; ++ieta) { + mLUTEntry[ipdg][inch][irad][ieta] = new lutEntry_t*[npt]; for (int ipt = 0; ipt < npt; ++ipt) { mLUTEntry[ipdg][inch][irad][ieta][ipt] = new lutEntry_t; lutFile.read(reinterpret_cast(mLUTEntry[ipdg][inch][irad][ieta][ipt]), sizeof(lutEntry_t)); @@ -47,6 +71,7 @@ TrackSmearer::loadTable(int pdg, const char *filename) }}}} std::cout << " --- read covariance matrix table for PDG " << pdg << ": " << filename << std::endl; mLUTHeader[ipdg]->print(); + lutFile.close(); return true; } @@ -67,9 +92,17 @@ TrackSmearer::getLUTEntry(int pdg, float nch, float radius, float eta, float pt) /*****************************************************************/ -void +bool TrackSmearer::smearTrack(O2Track &o2track, lutEntry_t *lutEntry) { + // generate efficiency + if (mUseEfficiency) { + auto eff = 0.; + if (mWhatEfficiency == 1) eff = lutEntry->eff; + if (mWhatEfficiency == 2) eff = lutEntry->eff2; + if (gRandom->Uniform() > eff) + return false; + } // transform params vector and smear double params_[5]; for (int i = 0; i < 5; ++i) { @@ -92,21 +125,23 @@ TrackSmearer::smearTrack(O2Track &o2track, lutEntry_t *lutEntry) // set covariance matrix for (int i = 0; i < 15; ++i) o2track.setCov(lutEntry->covm[i], i); + return true; } /*****************************************************************/ bool -TrackSmearer::smearTrack(O2Track &o2track, int pid) +TrackSmearer::smearTrack(O2Track &o2track, int pid, float nch) { auto pt = o2track.getPt(); + if (abs(pid) == 1000020030) { + pt *= 2.f; + } auto eta = o2track.getEta(); - auto lutEntry = getLUTEntry(pid, 0., 0., eta, pt); + auto lutEntry = getLUTEntry(pid, nch, 0., eta, pt); if (!lutEntry || !lutEntry->valid) return false; - - smearTrack(o2track, lutEntry); - return true; + return smearTrack(o2track, lutEntry); } /*****************************************************************/ @@ -117,7 +152,9 @@ TrackSmearer::smearTrack(Track &track, bool atDCA) O2Track o2track; TrackUtils::convertTrackToO2Track(track, o2track, atDCA); - if (!smearTrack(o2track, track.PID)) return false; + int pdg = track.PID; + float nch = mdNdEta; // use locally stored dNch/deta for the time being + if (!smearTrack(o2track, pdg, nch)) return false; TrackUtils::convertO2TrackToTrack(o2track, track, atDCA); return true; diff --git a/src/TrackSmearer.hh b/src/TrackSmearer.hh index ae69614..4f3bc70 100644 --- a/src/TrackSmearer.hh +++ b/src/TrackSmearer.hh @@ -23,28 +23,39 @@ public: ~TrackSmearer() = default; /** LUT methods **/ - bool loadTable(int pdg, const char *filename); + bool loadTable(int pdg, const char *filename, bool forceReload = false); + void useEfficiency(bool val) { mUseEfficiency = val; }; + void setWhatEfficiency(int val) { mWhatEfficiency = val; }; + lutHeader_t *getLUTHeader(int pdg) { return mLUTHeader[getIndexPDG(pdg)]; }; lutEntry_t *getLUTEntry(int pdg, float nch, float radius, float eta, float pt); - void smearTrack(O2Track &o2track, lutEntry_t *lutEntry); - bool smearTrack(O2Track &o2track, int pid); + bool smearTrack(O2Track &o2track, lutEntry_t *lutEntry); + bool smearTrack(O2Track &o2track, int pid, float nch); bool smearTrack(Track &track, bool atDCA = true); int getIndexPDG(int pdg) { switch(abs(pdg)) { - case 11: return 0; - case 13: return 1; - case 211: return 2; - case 321: return 3; - case 2212: return 4; - default: return 2; + case 11: return 0; // Electron + case 13: return 1; // Muon + case 211: return 2; // Pion + case 321: return 3; // Kaon + case 2212: return 4; // Proton + case 1000010020: return 5; // Deuteron + case 1000010030: return 6; // Triton + case 1000020030: return 7; // Helium3 + default: return 2; // Default: pion }; }; + + void setdNdEta(float val) { mdNdEta = val; }; protected: - - lutHeader_t *mLUTHeader[5] = {nullptr}; - lutEntry_t *mLUTEntry[5][1][1][100][100] = {nullptr}; + static constexpr unsigned int nLUTs = 8; // Number of LUT available + lutHeader_t *mLUTHeader[nLUTs] = {nullptr}; + lutEntry_t *****mLUTEntry[nLUTs] = {nullptr}; + bool mUseEfficiency = true; + int mWhatEfficiency = 1; + float mdNdEta = 1600.; }; diff --git a/src/TrackUtils.cc b/src/TrackUtils.cc index 4a627c4..db43b76 100644 --- a/src/TrackUtils.cc +++ b/src/TrackUtils.cc @@ -2,6 +2,8 @@ /// @email: preghenella@bo.infn.it #include "TrackUtils.hh" +#include "TParticle.h" +#include "TParticlePDG.h" namespace o2 { @@ -21,8 +23,8 @@ TrackUtils::makeO2Track(O2Track& o2track, std::array xyz, std::array params; - o2::utils::sincosf(track.Phi, s, c); - o2::utils::rotateZInv(0.1 * (atDCA ? track.Xd : track.X), 0.1 * (atDCA ? track.Yd : track.Y), x, params[0], s, c); + o2::math_utils::sincos(track.Phi, s, c); + o2::math_utils::rotateZInv(0.1 * (atDCA ? track.Xd : track.X), 0.1 * (atDCA ? track.Yd : track.Y), x, params[0], s, c); params[1] = 0.1 * (atDCA ? track.Zd : track.Z); params[2] = 0.; // since alpha = phi auto theta = 2.*TMath::ATan( TMath::Exp(-track.Eta) ); @@ -123,8 +125,8 @@ TrackUtils::convertGenParticleToO2Track(const GenParticle& particle, O2Track& o2 // float s, c, x; std::array params; - o2::utils::sincosf(track.Phi, s, c); - o2::utils::rotateZInv(0.1 * (atDCA ? track.Xd : track.X), 0.1 * (atDCA ? track.Yd : track.Y), x, params[0], s, c); + o2::math_utils::sincos(track.Phi, s, c); + o2::math_utils::rotateZInv(0.1 * (atDCA ? track.Xd : track.X), 0.1 * (atDCA ? track.Yd : track.Y), x, params[0], s, c); params[1] = 0.1 * (atDCA ? track.Zd : track.Z); params[2] = 0.; // since alpha = phi auto theta = 2.*TMath::ATan( TMath::Exp(-track.Eta) ); @@ -140,11 +142,24 @@ TrackUtils::convertGenParticleToO2Track(const GenParticle& particle, O2Track& o2 /*****************************************************************/ +void +TrackUtils::convertTParticleToO2Track(const TParticle& particle, O2Track& o2track) +{ + + std::array xyz = {static_cast(particle.Vx()), static_cast(particle.Vy()), static_cast(particle.Vz())}; + std::array ptetaphi = {static_cast(particle.Pt()), static_cast(particle.Eta()), static_cast(particle.Phi())}; + int charge = particle.GetPDG() ? particle.GetPDG()->Charge() / 3. : 0; + makeO2Track(o2track, xyz, ptetaphi, charge); + +} + +/*****************************************************************/ + bool TrackUtils::propagateToDCA(O2Track &o2track, std::array xyz, float Bz) { float sn, cs, alp = o2track.getAlpha(); - o2::utils::sincosf(alp, sn, cs); + o2::math_utils::sincos(alp, sn, cs); float x = o2track.getX(), y = o2track.getY(), snp = o2track.getSnp(), csp = std::sqrt((1.f - snp) * (1.f + snp)); float xv = xyz[0] * cs + xyz[1] * sn, yv = -xyz[0] * sn + xyz[1] * cs, zv = xyz[2]; x -= xv; @@ -162,7 +177,7 @@ TrackUtils::propagateToDCA(O2Track &o2track, std::array xyz, float Bz) o2::track::TrackParCov tmpT(o2track); // operate on the copy to recover after the failure alp += std::asin(sn); if (!tmpT.rotate(alp) || !tmpT.propagateTo(xv, Bz * 10.)) { - LOG(ERROR) << "failed to propagate to alpha=" << alp << " X=" << xv << " for vertex " + LOG(error) << "failed to propagate to alpha=" << alp << " X=" << xv << " for vertex " << xyz[0] << ' ' << xyz[1] << ' ' << xyz[2] << " | Track is: "; tmpT.print(); return false; diff --git a/src/TrackUtils.hh b/src/TrackUtils.hh index c89475a..8e58ef8 100644 --- a/src/TrackUtils.hh +++ b/src/TrackUtils.hh @@ -8,7 +8,9 @@ #include "classes/DelphesClasses.h" using O2Track = o2::track::TrackParCov; - + +class TParticle; + namespace o2 { namespace delphes @@ -23,6 +25,7 @@ public: static void convertTrackToO2Track(const Track &track, O2Track &o2track, bool atDCA); static void convertGenParticleToO2Track(const GenParticle &particle, O2Track &o2track); + static void convertTParticleToO2Track(const TParticle &particle, O2Track &o2track); static bool propagateToDCA(O2Track &o2track, std::array xyz, float Bz); diff --git a/src/lutCovm.hh b/src/lutCovm.hh index c4d304a..acceefd 100644 --- a/src/lutCovm.hh +++ b/src/lutCovm.hh @@ -1,6 +1,9 @@ /// @author: Roberto Preghenella /// @email: preghenella@bo.infn.it +#pragma once +#define LUTCOVM_VERSION 20210801 + struct map_t { int nbins = 1; float min = 0.; @@ -25,6 +28,7 @@ struct map_t { }; struct lutHeader_t { + int version = LUTCOVM_VERSION; int pdg = 0; float mass = 0.; float field = 0.; @@ -32,26 +36,36 @@ struct lutHeader_t { map_t radmap; map_t etamap; map_t ptmap; + bool check_version() { + return (version == LUTCOVM_VERSION); + }; void print() { - printf(" pdg: %d \n", pdg); - printf(" field: %f \n", field); - printf(" nchmap: "); nchmap.print(); - printf(" radmap: "); radmap.print(); - printf(" etamap: "); etamap.print(); - printf(" ptmap: "); ptmap.print(); + printf(" version: %d \n", version); + printf(" pdg: %d \n", pdg); + printf(" field: %f \n", field); + printf(" nchmap: "); nchmap.print(); + printf(" radmap: "); radmap.print(); + printf(" etamap: "); etamap.print(); + printf(" ptmap: "); ptmap.print(); }; }; struct lutEntry_t { + float nch = 0.; float eta = 0.; float pt = 0.; bool valid = false; + float eff = 0.; + float eff2 = 0.; + float itof = 0.; + float otof = 0.; float covm[15] = {0.}; float eigval[5] = {0.}; float eigvec[5][5] = {0.}; float eiginv[5][5] = {0.}; void print() { printf(" --- lutEntry: pt = %f, eta = %f (%s)\n", pt, eta, valid ? "valid" : "not valid"); + printf(" efficiency: %f\n", eff); printf(" covMatix: "); int k = 0; for (int i = 0; i < 5; ++i) { diff --git a/src/lutWrite.cc b/src/lutWrite.cc index 96c872a..f489a42 100644 --- a/src/lutWrite.cc +++ b/src/lutWrite.cc @@ -1,59 +1,63 @@ /// @author: Roberto Preghenella /// @email: preghenella@bo.infn.it +#ifndef lutWrite_CC +#define lutWrite_CC #include "lutCovm.hh" #include "fwdRes/fwdRes.C" DetectorK fat; void diagonalise(lutEntry_t &lutEntry); +static float etaMaxBarrel = 1.75; -void -fatInit(float field = 0.5, float rmin = 100.) +bool usePara = true; // use fwd parameterisation +bool useDipole = false; // use dipole i.e. flat parametrization for efficiency and momentum resolution +bool useFlatDipole = false; // use dipole i.e. flat parametrization outside of the barrel + +void printLutWriterConfiguration() { - fat.SetBField(field); - // new ideal Pixel properties? - Double_t x0IB = 0.0005; - Double_t x0OB = 0.005; - Double_t xrhoIB = 1.1646e-02; // 50 mum Si - Double_t xrhoOB = 1.1646e-01; // 500 mum Si - - Double_t resRPhiIB = 0.0001; - Double_t resZIB = 0.0001; - Double_t resRPhiOB = 0.0005; - Double_t resZOB = 0.0005; - Double_t eff = 0.98; - fat.AddLayer((char*)"vertex", 0.0, 0, 0); // dummy vertex for matrix calculation - fat.AddLayer((char*)"bpipe", 1.6, 0.0014, 9.24e-02); // 500 mum Be | nominal R5? - fat.AddLayer((char*)"ddd1", 1.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); - fat.AddLayer((char*)"ddd2", 2.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); - //fat.AddLayer((char*)"bpipe", 2.9 , 0.0014, 9.24e-02 ); // 500 mum Be - fat.AddLayer((char*)"ddd3", 3.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); - fat.AddLayer((char*)"ddd3a", 8.0, x0OB, xrhoOB, resRPhiOB, resZOB, eff); - fat.AddLayer((char*)"ddd4", 20., x0OB, xrhoOB, resRPhiOB, resZOB, eff); - fat.AddLayer((char*)"ddd5", 25., x0OB, xrhoOB, resRPhiOB, resZOB, eff); - fat.AddLayer((char*)"ddd7", 40., x0OB, xrhoOB, resRPhiOB, resZOB, eff); - fat.AddLayer((char*)"ddd8", 55., x0OB, xrhoOB, resRPhiOB, resZOB, eff); - fat.AddLayer((char*)"dddY", 80., x0OB, xrhoOB, resRPhiOB, resZOB, eff); - fat.AddLayer((char*)"dddX", 100., x0OB, xrhoOB, resRPhiOB, resZOB, eff); - fat.SetAtLeastHits(4); - fat.SetAtLeastCorr(4); - fat.SetAtLeastFake(0); - // - fat.SetMinRadTrack(rmin); - // - fat.PrintLayout(); + std::cout << " --- Printing configuration of LUT writer --- " << std::endl; + std::cout << " -> etaMaxBarrel = " << etaMaxBarrel << std::endl; + std::cout << " -> usePara = " << usePara << std::endl; + std::cout << " -> useDipole = " << useDipole << std::endl; + std::cout << " -> useFlatDipole = " << useFlatDipole << std::endl; } bool -fatSolve(float *covm, float pt = 0.1, float eta = 0.0, float mass = 0.13957000) +fatSolve(lutEntry_t &lutEntry, float pt = 0.1, float eta = 0.0, float mass = 0.13957000, int itof = 0, int otof = 0, int q = 1) { - int q = 1; + lutEntry.valid = false; + + // solve track + if (q > 1) mass = -mass; TrackSol tr(1, pt, eta, q, mass); - bool retval = fat.SolveTrack(tr); - if (!retval) return false; - AliExternalTrackParam trCopy = *((AliExternalTrackParam*)tr.fTrackCmb[0]); - for (int i = 0; i < 15; ++i) - covm[i] = trCopy.GetCovariance()[i]; + if (!fat.SolveTrack(tr)) return false; + AliExternalTrackParam *trPtr = (AliExternalTrackParam*)tr.fTrackCmb.At(0); + if (!trPtr) return false; + + lutEntry.valid = true; + lutEntry.itof = fat.GetGoodHitProb(itof); + lutEntry.otof = fat.GetGoodHitProb(otof); + for (int i = 0; i < 15; ++i) lutEntry.covm[i] = trPtr->GetCovariance()[i]; + + // define the efficiency + auto totfake = 0.; + lutEntry.eff = 1.; + for (int i = 1; i < 20; ++i) { + auto igoodhit = fat.GetGoodHitProb(i); + if (igoodhit <= 0. || i == itof || i == otof) continue; + lutEntry.eff *= igoodhit; + auto pairfake = 0.; + for (int j = i + 1; j < 20; ++j) { + auto jgoodhit = fat.GetGoodHitProb(j); + if (jgoodhit <= 0. || j == itof || j == otof) continue; + pairfake = (1. - igoodhit) * (1. - jgoodhit); + break; + } + totfake += pairfake; + } + lutEntry.eff2 = (1. - totfake); + return true; } @@ -64,27 +68,94 @@ fwdSolve(float *covm, float pt = 0.1, float eta = 0.0, float mass = 0.13957000) return true; } +bool +fwdPara(lutEntry_t &lutEntry, float pt = 0.1, float eta = 0.0, float mass = 0.13957000, float Bfield = 0.5) +{ + lutEntry.valid = false; + + // parametrised forward response; interpolates between FAT at eta = 1.75 and a fixed parametrisation at eta = 4; only diagonal elements + if (fabs(eta) < etaMaxBarrel || fabs(eta) > 4) + return false; + + if (!fatSolve(lutEntry, pt, etaMaxBarrel, mass)) return false; + float covmbarrel[15] = {0}; + for (int i = 0; i < 15; ++i) { + covmbarrel[i] = lutEntry.covm[i]; + } + + // parametrisation at eta = 4 + double beta = 1./sqrt(1+mass*mass/pt/pt/cosh(eta)/cosh(eta)); + float dca_pos = 2.5e-4/sqrt(3); // 2.5 micron/sqrt(3) + float r0 = 0.5; // layer 0 radius [cm] + float r1 = 1.3; + float r2 = 2.5; + float x0layer = 0.001; // material budget (rad length) per layer + double sigma_alpha = 0.0136/beta/pt*sqrt(x0layer*cosh(eta))*(1+0.038*log(x0layer*cosh(eta))); + double dcaxy_ms = sigma_alpha*r0*sqrt(1+r1*r1/(r2-r0)/(r2-r0)); + double dcaxy2 = dca_pos*dca_pos+dcaxy_ms*dcaxy_ms; + + double dcaz_ms = sigma_alpha*r0*cosh(eta); + double dcaz2 = dca_pos*dca_pos+dcaz_ms*dcaz_ms; + + float Leta = 2.8/sinh(eta)-0.01*r0; // m + double relmomres_pos = 10e-6*pt/0.3/Bfield/Leta/Leta*sqrt(720./15.); + + float relmomres_barrel = sqrt(covmbarrel[14])*pt; + float Router = 1; // m + float relmomres_pos_barrel = 10e-6*pt/0.3/Bfield/Router/Router/sqrt(720./15.); + float relmomres_MS_barrel = sqrt(relmomres_barrel*relmomres_barrel-relmomres_pos_barrel*relmomres_pos_barrel); + + // interpolate MS contrib (rel resolution 0.4 at eta = 4) + float relmomres_MS_eta4 = 0.4/beta*0.5/Bfield; + float relmomres_MS = relmomres_MS_eta4*pow(relmomres_MS_eta4/relmomres_MS_barrel,(fabs(eta)-4.)/(4.-etaMaxBarrel)); + float momres_tot = pt*sqrt(relmomres_pos*relmomres_pos + relmomres_MS*relmomres_MS); // total absolute mom reso + + // Fill cov matrix diag + for (int i = 0; i < 15; ++i) + lutEntry.covm[i] = 0; + + lutEntry.covm[0] = covmbarrel[0]; + if (dcaxy2 > lutEntry.covm[0]) lutEntry.covm[0] = dcaxy2; + lutEntry.covm[2] = covmbarrel[2]; + if (dcaz2 > lutEntry.covm[2]) lutEntry.covm[2] = dcaz2; + lutEntry.covm[5] = covmbarrel[5]; // sigma^2 sin(phi) + lutEntry.covm[9] = covmbarrel[9]; // sigma^2 tanl + lutEntry.covm[14] = momres_tot*momres_tot/pt/pt/pt/pt; // sigma^2 1/pt + return true; +} + void -lutWrite(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.2) +lutWrite(const char* filename = "lutCovm.dat", int pdg = 211, float field = 0.2, int itof = 0, int otof = 0) { - // init FAT - fatInit(field); - + if (useFlatDipole && useDipole) { + Printf("Both dipole and dipole flat flags are on, please use only one of them"); + return; + } + // output file ofstream lutFile(filename, std::ofstream::binary); + if (!lutFile.is_open()) { + Printf("Did not manage to open output file!!"); + return; + } // write header lutHeader_t lutHeader; // pid lutHeader.pdg = pdg; lutHeader.mass = TDatabasePDG::Instance()->GetParticle(pdg)->Mass(); + const int q = std::abs(TDatabasePDG::Instance()->GetParticle(pdg)->Charge()) / 3; + if (q <= 0) { + Printf("Negative or null charge (%f) for pdg code %i. Fix the charge!", TDatabasePDG::Instance()->GetParticle(pdg)->Charge(), pdg); + return; + } lutHeader.field = field; // nch lutHeader.nchmap.log = true; - lutHeader.nchmap.nbins = 1; - lutHeader.nchmap.min = 0.; - lutHeader.nchmap.max = 4.; + lutHeader.nchmap.nbins = 20; + lutHeader.nchmap.min = 0.5; + lutHeader.nchmap.max = 3.5; // radius lutHeader.radmap.log = false; lutHeader.radmap.nbins = 1; @@ -97,7 +168,7 @@ lutWrite(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.2) lutHeader.etamap.max = 4.; // pt lutHeader.ptmap.log = true; - lutHeader.ptmap.nbins = 100; + lutHeader.ptmap.nbins = 200; lutHeader.ptmap.min = -2; lutHeader.ptmap.max = 2.; lutFile.write(reinterpret_cast(&lutHeader), sizeof(lutHeader)); @@ -110,41 +181,66 @@ lutWrite(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.2) lutEntry_t lutEntry; // write entries - for (int inch = 0; inch < nnch; ++inch) - for (int irad = 0; irad < nrad; ++irad) + for (int inch = 0; inch < nnch; ++inch) { + auto nch = lutHeader.nchmap.eval(inch); + lutEntry.nch = nch; + fat.SetdNdEtaCent(nch); + std::cout << " --- setting FAT dN/deta: " << nch << std::endl; + for (int irad = 0; irad < nrad; ++irad) { for (int ieta = 0; ieta < neta; ++ieta) { - auto eta = lutHeader.etamap.eval(ieta); - lutEntry.eta = lutHeader.etamap.eval(ieta); - for (int ipt = 0; ipt < npt; ++ipt) { - lutEntry.pt = lutHeader.ptmap.eval(ipt); - lutEntry.valid = true; - if (fabs(eta) < 2.) { - printf(" --- fatSolve: pt = %f, eta = %f, mass = %f, field=%f \n", lutEntry.pt, lutEntry.eta, lutHeader.mass, lutHeader.field); - if (!fatSolve(lutEntry.covm, lutEntry.pt, lutEntry.eta, lutHeader.mass)) { - printf(" --- fatSolve: error \n"); - lutEntry.valid = false; - for (int i = 0; i < 15; ++i) - lutEntry.covm[i] = 0.; - } - } - else { - printf(" --- fwdSolve: pt = %f, eta = %f, mass = %f, field=%f \n", lutEntry.pt, lutEntry.eta, lutHeader.mass, lutHeader.field); - if (!fwdSolve(lutEntry.covm, lutEntry.pt, lutEntry.eta, lutHeader.mass)) { - printf(" --- fwdSolve: error \n"); - lutEntry.valid = false; - for (int i = 0; i < 15; ++i) - lutEntry.covm[i] = 0.; - } - } - diagonalise(lutEntry); - lutFile.write(reinterpret_cast(&lutEntry), sizeof(lutEntry_t)); - }} - - + auto eta = lutHeader.etamap.eval(ieta); + lutEntry.eta = lutHeader.etamap.eval(ieta); + for (int ipt = 0; ipt < npt; ++ipt) { + lutEntry.pt = lutHeader.ptmap.eval(ipt); + lutEntry.valid = true; + if (fabs(eta) <= etaMaxBarrel) { // full lever arm ends at etaMaxBarrel + // printf(" --- fatSolve: pt = %f, eta = %f, mass = %f, field=%f \n", lutEntry.pt, lutEntry.eta, lutHeader.mass, lutHeader.field); + if (!fatSolve(lutEntry, lutEntry.pt, lutEntry.eta, lutHeader.mass, itof, otof, q)) { + // printf(" --- fatSolve: error \n"); + lutEntry.valid = false; + lutEntry.eff = 0.; + lutEntry.eff2 = 0.; + for (int i = 0; i < 15; ++i) + lutEntry.covm[i] = 0.; + } + } else { + // printf(" --- fwdSolve: pt = %f, eta = %f, mass = %f, field=%f \n", lutEntry.pt, lutEntry.eta, lutHeader.mass, lutHeader.field); + lutEntry.eff = 1.; + lutEntry.eff2 = 1.; + bool retval = true; + if (useFlatDipole) { // Using the parametrization at the border of the barrel + retval = fatSolve(lutEntry, lutEntry.pt, etaMaxBarrel, lutHeader.mass, itof, otof, q); + } else if (usePara) { + retval = fwdPara(lutEntry, lutEntry.pt, lutEntry.eta, lutHeader.mass, field); + } else { + retval = fwdSolve(lutEntry.covm, lutEntry.pt, lutEntry.eta, lutHeader.mass); + } + if (useDipole) { // Using the parametrization at the border of the barrel only for efficiency and momentum resolution + lutEntry_t lutEntryBarrel; + retval = fatSolve(lutEntryBarrel, lutEntry.pt, etaMaxBarrel, lutHeader.mass, itof, otof, q); + lutEntry.valid = lutEntryBarrel.valid; + lutEntry.covm[14] = lutEntryBarrel.covm[14]; + lutEntry.eff = lutEntryBarrel.eff; + lutEntry.eff2 = lutEntryBarrel.eff2; + } + if (!retval) { + // printf(" --- fwdSolve: error \n"); + lutEntry.valid = false; + for (int i = 0; i < 15; ++i) + lutEntry.covm[i] = 0.; + } + } + diagonalise(lutEntry); + lutFile.write(reinterpret_cast(&lutEntry), sizeof(lutEntry_t)); + } + } + } + } + lutFile.close(); } -void diagonalise(lutEntry_t &lutEntry) +void diagonalise(lutEntry_t& lutEntry) { TMatrixDSym m(5); double fcovm[5][5]; @@ -152,8 +248,8 @@ void diagonalise(lutEntry_t &lutEntry) for (int j = 0; j < i + 1; ++j, ++k) { fcovm[i][j] = lutEntry.covm[k]; fcovm[j][i] = lutEntry.covm[k]; - } - m.SetMatrixArray((double *)fcovm); + } + m.SetMatrixArray((double*)fcovm); TMatrixDSymEigen eigen(m); // eigenvalues vector TVectorD eigenVal = eigen.GetEigenValues(); @@ -169,5 +265,6 @@ void diagonalise(lutEntry_t &lutEntry) for (int i = 0; i < 5; ++i) for (int j = 0; j < 5; ++j) lutEntry.eiginv[i][j] = eigenVec[i][j]; - } + +#endif diff --git a/src/lutWrite.default.cc b/src/lutWrite.default.cc new file mode 100644 index 0000000..3639d0a --- /dev/null +++ b/src/lutWrite.default.cc @@ -0,0 +1,54 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#include "lutWrite.cc" + +void +fatInit_default(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + fat.SetdNdEtaCent(400.); + // new ideal Pixel properties? + Double_t x0IB = 0.0005; + Double_t x0OB = 0.005; + Double_t xrhoIB = 1.1646e-02; // 50 mum Si + Double_t xrhoOB = 1.1646e-01; // 500 mum Si + + Double_t resRPhiIB = 0.0001; + Double_t resZIB = 0.0001; + Double_t resRPhiOB = 0.0005; + Double_t resZOB = 0.0005; + Double_t eff = 0.98; + fat.AddLayer((char*)"vertex", 0.0, 0, 0); // dummy vertex for matrix calculation + fat.AddLayer((char*)"bpipe", 1.6, 0.0014, 9.24e-02); // 500 mum Be | nominal R5? + fat.AddLayer((char*)"ddd1", 1.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd2", 2.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + //fat.AddLayer((char*)"bpipe", 2.9 , 0.0014, 9.24e-02 ); // 500 mum Be + fat.AddLayer((char*)"ddd3", 3.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd3a", 8.0, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd4", 20., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd5", 25., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd7", 40., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd8", 55., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddY", 80., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddX", 100., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void +lutWrite_default(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.5, float rmin = 100.) +{ + + // init FAT + fatInit_default(field, rmin); + // write + lutWrite(filename, pdg, field); + +} + diff --git a/src/lutWrite.geometry_v1.cc b/src/lutWrite.geometry_v1.cc new file mode 100644 index 0000000..cf80241 --- /dev/null +++ b/src/lutWrite.geometry_v1.cc @@ -0,0 +1,61 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#include "lutWrite.cc" + +float scale = 1.; + +void +fatInit_geometry_v1(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + // new ideal Pixel properties? + Double_t x0IB = 0.001; + Double_t x0OB = 0.01; + Double_t xrhoIB = 2.3292e-02; // 100 mum Si + Double_t xrhoOB = 2.3292e-01; // 1000 mum Si + + Double_t resRPhiIB = 0.00025; + Double_t resZIB = 0.00025; + Double_t resRPhiOB = 0.00100; + Double_t resZOB = 0.00100; + Double_t eff = 0.98; + fat.AddLayer((char*)"vertex", 0.0, 0, 0); // dummy vertex for matrix calculation + fat.AddLayer((char*)"bpipe0", 0.48 * scale, 0.00042, 2.772e-02); // 150 mum Be + // + fat.AddLayer((char*)"B00", 0.50 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"B01", 1.20 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"B02", 2.50 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + // + fat.AddLayer((char*)"bpipe1", 3.7 * scale, 0.0014, 9.24e-02); // 500 mum Be + // + fat.AddLayer((char*)"B03", 3.75 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + // + fat.AddLayer((char*)"B04", 7.00 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B05", 12.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B06", 20.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B07", 30.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B08", 45.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B09", 60.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B10", 80.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B11", 100. * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void +lutWrite_geometry_v1(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.5, float rmin = 100.) +{ + + // init FAT + fatInit_geometry_v1(field, rmin); + // write + lutWrite(filename, pdg, field); + +} + diff --git a/src/lutWrite.geometry_v2.cc b/src/lutWrite.geometry_v2.cc new file mode 100644 index 0000000..84867e3 --- /dev/null +++ b/src/lutWrite.geometry_v2.cc @@ -0,0 +1,66 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#include "lutWrite.cc" + +float scale = 1.; + +void +fatInit_geometry_v2(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + // new ideal Pixel properties? + Double_t x0IB = 0.001; + Double_t x0OB = 0.01; + // Double_t x0IB = 0; + // Double_t x0OB = 0; + + + Double_t xrhoIB = 2.3292e-02; // 100 mum Si + Double_t xrhoOB = 2.3292e-01; // 1000 mum Si + Double_t resRPhiIB = 0.00025; // 2.5 mum + Double_t resZIB = 0.00025; // 2.5 mum + Double_t resRPhiOB = 0.00100; + Double_t resZOB = 0.00100; + + Double_t eff = 0.98; + + fat.AddLayer((char*)"vertex", 0.0, 0, 0); // dummy vertex for matrix calculat ion + fat.AddLayer((char*)"bpipe0", 0.48 * scale, 0.00042, 2.772e-02); // 150 mum Be + + fat.AddLayer((char*)"B00", 0.50 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"B01", 1.20 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"B02", 2.50 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + + fat.AddLayer((char*)"bpipe1", 3.7 * scale, 0.0014, 9.24e-02); // 500 mum Be + + fat.AddLayer((char*)"B03", 3.75 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + + fat.AddLayer((char*)"B04", 7.00 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B05", 12.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B06", 20.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B07", 30.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B08", 45.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B09", 60.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B10", 80.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void +lutWrite_geometry_v2(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.5, float rmin = 100.) +{ + + // init FAT + fatInit_geometry_v2(field, rmin); + // write + lutWrite(filename, pdg, field); + +} + diff --git a/src/lutWrite.geometry_v4.cc b/src/lutWrite.geometry_v4.cc new file mode 100644 index 0000000..06a99f5 --- /dev/null +++ b/src/lutWrite.geometry_v4.cc @@ -0,0 +1,67 @@ +#include "lutWrite.cc" + +float scale = 1.; + +void +fatInit_geometry_v4(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + // new ideal Pixel properties? + Double_t x0IB = 0.001; + Double_t x0OB = 0.005; + Double_t xrhoIB = 1.1646e-02; // 50 mum Si + Double_t xrhoOB = 1.1646e-01; // 500 mum Si + + Double_t resRPhiIB = 0.00025; + Double_t resZIB = 0.00025; + Double_t resRPhiOB = 0.00050; + Double_t resZOB = 0.00050; + Double_t eff = 0.98; + fat.SetBField(0.5); // Tesla + fat.SetIntegrationTime(100.e-6); // 100 ns (as in LoI) + fat.SetMaxRadiusOfSlowDetectors(0.00001); // no slow detectors + fat.SetAvgRapidity(0.0); + fat.SetLhcUPCscale(1.); + + fat.AddLayer((char*)"vertex", 0.0, 0, 0); // dummy vertex for matrix calculat ion + // fat.AddLayer((char*)"bpipe0", 0.48 * scale, 0.00042, 2.772e-02); // 150 mum Be + + fat.AddLayer((char*)"ddd0", 0.50 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd1", 1.20 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd2", 2.50 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + + // fat.AddLayer((char*)"bpipe1", 5.7 * scale, 0.0014, 9.24e-02); // 500 mum Be + + fat.AddLayer((char*)"ddd3", 7.00 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + + fat.AddLayer((char*)"ddd4", 10.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd5", 13.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd6", 16.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + + fat.AddLayer((char*)"ddd7", 25.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd8", 40.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd9", 45.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddTPC(0.1, 0.1); + + fat.SetAtLeastHits(5); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + // fat.PrintLayout(); + // fat.SolveViaBilloir(0); + // fat.MakeStandardPlots(0,2,2,"LutQA"); +} + +void +lutWrite_geometry_v4(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.5, float rmin = 100.) +{ + + // init FAT + fatInit_geometry_v4(field, rmin); + // write + lutWrite(filename, pdg, field); + +} + diff --git a/src/lutWrite.its1.cc b/src/lutWrite.its1.cc new file mode 100644 index 0000000..b8f59b5 --- /dev/null +++ b/src/lutWrite.its1.cc @@ -0,0 +1,31 @@ +/// @author Nicolo' Jacazio +/// @email: nicolo.jacazio@cern.ch +/// @since 30/10/2020 +/// @file lutWrite.its1.cc + +#include "lutWrite.cc" + +void fatInit_its1(float field = 0.5, float rmin = 100.) +{ + + fat.SetBField(field); + fat.SetdNdEtaCent(400.); + fat.MakeAliceCurrent(); + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void lutWrite_its1(const char* filename = "lutCovm.dat", int pdg = 211, + float field = 0.2, float rmin = 20.) +{ + + // init FAT + fatInit_its1(field, rmin); + // write + lutWrite(filename, pdg, field); +} diff --git a/src/lutWrite.its2.cc b/src/lutWrite.its2.cc new file mode 100644 index 0000000..4880082 --- /dev/null +++ b/src/lutWrite.its2.cc @@ -0,0 +1,60 @@ +/// @author Nicolo' Jacazio +/// @email: nicolo.jacazio@cern.ch +/// @since 30/10/2020 +/// @file lutWrite.its2.cc + +#include "lutWrite.cc" + +void fatInit_its2(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + fat.SetdNdEtaCent(400.); + + const Double_t x0BP = 0.00224; // 800 mum Be + const Double_t xrhoBP = 1.4784e-1; // 800 mum Be + + fat.AddLayer((char*)"vertex", 0, 0); // dummy vertex for matrix calculation + fat.AddLayer((char*)"bpipe", 1.98, x0BP, xrhoBP); // beam pipe + + // X/X0 of the inner barrel, values taken from the TDR of ITS upgrade + const Double_t x0IB = 0.0035; // X/X0 of the inner barrel (first three layers) + // X/X0 of the outer barrel, values taken from the TDR of ITS upgrade + const Double_t x0OB = 0.008; // X/X0 of the outer barrel + // Resolution in Rphi values taken from A. Kalweit (table presented at the + // Physics Forum on 24th Feb. 2021) the value is in the middle of 15 and 30 + // micron pitch + const Double_t resRPhi = 0.0006; + // Resolution in Z, values taken from A. Kalweit (table presented at the + // Physics Forum on 24th Feb. 2021) the value is in the middle of 15 and 30 + // micron pitch + const Double_t resZ = 0.0006; + const Double_t eff = 0.98; // Efficiency (lower limit) + const Double_t xrhoIBOB = 1.1646e-01; // Surface density for 500 mum thick Si + + fat.AddLayer((char*)"il0", 2.3, x0IB, xrhoIBOB, resRPhi, resZ, eff); + fat.AddLayer((char*)"il1", 3.1, x0IB, xrhoIBOB, resRPhi, resZ, eff); + fat.AddLayer((char*)"il2", 3.9, x0IB, xrhoIBOB, resRPhi, resZ, eff); + fat.AddLayer((char*)"ml3", 19.4, x0OB, xrhoIBOB, resRPhi, resZ, eff); + fat.AddLayer((char*)"ml4", 24.7, x0OB, xrhoIBOB, resRPhi, resZ, eff); + fat.AddLayer((char*)"ol5", 35.3, x0OB, xrhoIBOB, resRPhi, resZ, eff); + fat.AddLayer((char*)"ol6", 40.5, x0OB, xrhoIBOB, resRPhi, resZ, eff); + + fat.AddTPC(0.1, 0.1); // TPC + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void lutWrite_its2(const char* filename = "lutCovm.dat", int pdg = 211, + float field = 0.2, float rmin = 20.) +{ + + // init FAT + fatInit_its2(field, rmin); + // write + lutWrite(filename, pdg, field); +} diff --git a/src/lutWrite.its3.cc b/src/lutWrite.its3.cc new file mode 100644 index 0000000..ee8b10a --- /dev/null +++ b/src/lutWrite.its3.cc @@ -0,0 +1,84 @@ +/// @author Nicolo' Jacazio +/// @email: nicolo.jacazio@cern.ch +/// @since 30/10/2020 +/// @file lutWrite.its3.cc + +#include "lutWrite.cc" + +// Adds foam spacers between inner layers +const bool add_foam = true; +// Puts foam spacers mid-way betwen layers +const bool foam_middle = false; + +void fatInit_its3(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + fat.SetdNdEtaCent(400.); + + const Double_t x0BP = 0.0014; // 500 mum Be + const Double_t xrhoBP = 9.24e-02; // 500 mum Be + + // dummy vertex for matrix calculation + fat.AddLayer((char*)"vertex", 0.0, 0, 0); + fat.AddLayer((char*)"bpipe", 1.625, x0BP, xrhoBP); // 500 mum Be beam pipe + + // X/X0 of the inner barrel first three layers), values taken from the LOI of + // the ITS3 + const Double_t x0IB = 0.0005; + // X/X0 of the outer barrel, values taken from the TDR of ITS upgrade + const Double_t x0OB = 0.008; + // Resolution in Rphi values taken from A. Kalweit (table presented at the + // Physics Forum on 24th Feb. 2021) the value is in the middle of 15 and 30 + // micron pitch + const Double_t resRPhi = 0.0006; + // Resolution in Z, values taken from A. Kalweit (table presented at the + // Physics Forum on 24th Feb. 2021) the value is in the middle of 15 and 30 + // micron pitch + const Double_t resZ = 0.0006; + const Double_t eff = 0.98; // Efficiency (lower limit) + const Double_t xrhoIB = 1.1646e-02; // Surface density for 50 mum thick Si + const Double_t xrhoOB = 1.1646e-01; // Surface density for 500 mum thick Si + + const Double_t x0Foam = 0.0008; // X0 ~710cm for 0.6cm thick foam + float foam_radius = 9.370 * x0IB; // width of a chip + + fat.AddLayer((char*)"ddd1", 1.8, x0IB, xrhoIB, resRPhi, resZ, eff); + if (foam_middle) + foam_radius = (2.4 - 1.8) / 2; + if (add_foam) + fat.AddLayer((char*)"foam1", 1.8 + foam_radius, x0Foam); // Foam spacer + fat.AddLayer((char*)"ddd2", 2.4, x0IB, xrhoIB, resRPhi, resZ, eff); + if (foam_middle) + foam_radius = (3.0 - 2.4) / 2; + if (add_foam) + fat.AddLayer((char*)"foam2", 2.4 + foam_radius, x0Foam); // Foam spacer + fat.AddLayer((char*)"ddd3", 3.0, x0IB, xrhoIB, resRPhi, resZ, eff); + if (foam_middle) + foam_radius = (19.4 - 3.0) / 2; + if (add_foam) + fat.AddLayer((char*)"foam3", 3.0 + foam_radius, x0Foam); // Foam spacer + // Structural cylinder? + fat.AddLayer((char*)"ddd4", 19.4, x0OB, xrhoOB, resRPhi, resZ, eff); + fat.AddLayer((char*)"ddd5", 24.7, x0OB, xrhoOB, resRPhi, resZ, eff); + fat.AddLayer((char*)"ddd6", 35.3, x0OB, xrhoOB, resRPhi, resZ, eff); + fat.AddLayer((char*)"ddd7", 40.5, x0OB, xrhoOB, resRPhi, resZ, eff); + + fat.AddTPC(0.1, 0.1); // TPC + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void lutWrite_its3(const char* filename = "lutCovm.dat", int pdg = 211, + float field = 0.2, float rmin = 20.) +{ + + // init FAT + fatInit_its3(field, rmin); + // write + lutWrite(filename, pdg, field); +} diff --git a/src/lutWrite.scenario1.cc b/src/lutWrite.scenario1.cc new file mode 100644 index 0000000..6021c03 --- /dev/null +++ b/src/lutWrite.scenario1.cc @@ -0,0 +1,57 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#include "lutWrite.cc" + +void +fatInit_scenario1(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + fat.SetdNdEtaCent(400.); + // new ideal Pixel properties? + Double_t x0IB = 0.0005; + Double_t x0OB = 0.005; + Double_t xrhoIB = 1.1646e-02; // 50 mum Si + Double_t xrhoOB = 1.1646e-01; // 500 mum Si + + Double_t x0BP = 0.0014; // 500 mum Be + Double_t xrhoBP = 9.24e-02; // 500 mum Be + + Double_t resRPhiIB = 0.0001; + Double_t resZIB = 0.0001; + Double_t resRPhiOB = 0.0005; + Double_t resZOB = 0.0005; + Double_t eff = 0.98; + fat.AddLayer((char*)"vertex", 0.0, 0, 0); // dummy vertex for matrix calculation + fat.AddLayer((char*)"bpipe", 1.5, x0BP, xrhoBP); + fat.AddLayer((char*)"ddd1", 1.5250, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd2", 2.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + //fat.AddLayer((char*)"bpipe", 2.9 , 0.0014, 9.24e-02 ); // 500 mum Be + fat.AddLayer((char*)"ddd3", 3.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + // + fat.AddLayer((char*)"ddd3a", 8.0, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd4", 20., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd5", 25., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd7", 40., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd8", 55., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddY", 80., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddX", 100., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void +lutWrite_scenario1(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.2, float rmin = 20.) +{ + + // init FAT + fatInit_scenario1(field, rmin); + // write + lutWrite(filename, pdg, field); + +} diff --git a/src/lutWrite.scenario2.cc b/src/lutWrite.scenario2.cc new file mode 100644 index 0000000..cf57ecd --- /dev/null +++ b/src/lutWrite.scenario2.cc @@ -0,0 +1,59 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#include "lutWrite.cc" + +void +fatInit_scenario2(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + fat.SetdNdEtaCent(400.); + // new ideal Pixel properties? + Double_t x0IB = 0.0005; + Double_t x0OB = 0.005; + Double_t xrhoIB = 1.1646e-02; // 50 mum Si + Double_t xrhoOB = 1.1646e-01; // 500 mum Si + + Double_t x0BP = 0.0014; // 500 mum Be + Double_t xrhoBP = 9.24e-02; // 500 mum Be + Double_t x0BF = 0.0004; // 150 mum Be + Double_t xrhoBF = 2.77e-02; // 150 mum Be + + Double_t resRPhiIB = 0.0001; + Double_t resZIB = 0.0001; + Double_t resRPhiOB = 0.0005; + Double_t resZOB = 0.0005; + Double_t eff = 0.98; + fat.AddLayer((char*)"vertex", 0.0, 0, 0); // dummy vertex for matrix calculation + fat.AddLayer((char*)"bfoil", 1.5, x0BF, xrhoBF); + fat.AddLayer((char*)"ddd1", 1.5075, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd2", 2.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd3", 3.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"bpipe", 3.9, x0BP, xrhoBP); // 500 mum Be + // + fat.AddLayer((char*)"ddd3a", 8.0, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd4", 20., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd5", 25., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd7", 40., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd8", 55., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddY", 80., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddX", 100., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void +lutWrite_scenario2(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.2, float rmin = 20.) +{ + + // init FAT + fatInit_scenario2(field, rmin); + // write + lutWrite(filename, pdg, field); + +} diff --git a/src/lutWrite.scenario3.cc b/src/lutWrite.scenario3.cc new file mode 100644 index 0000000..d985182 --- /dev/null +++ b/src/lutWrite.scenario3.cc @@ -0,0 +1,61 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#include "lutWrite.cc" + +void +fatInit_scenario3(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + fat.SetdNdEtaCent(400.); + // new ideal Pixel properties? + Double_t x0IB = 0.0005; + Double_t x0OB = 0.005; + Double_t xrhoIB = 1.1646e-02; // 50 mum Si + Double_t xrhoOB = 1.1646e-01; // 500 mum Si + + Double_t x0BP = 0.0014; // 500 mum Be + Double_t xrhoBP = 9.24e-02; // 500 mum Be + Double_t x0BF = 0.0004; // 150 mum Be + Double_t xrhoBF = 2.77e-02; // 150 mum Be + + Double_t resRPhiIB = 0.0001; + Double_t resZIB = 0.0001; + Double_t resRPhiOB = 0.0005; + + Double_t resZOB = 0.0005; + Double_t eff = 0.98; + fat.AddLayer((char*)"vertex", 0.0, 0, 0); // dummy vertex for matrix calculation + fat.AddLayer((char*)"bfoil", 0.5, x0BF, xrhoBF); + fat.AddLayer((char*)"ddd0", 0.5075, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd1", 1.5, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd2", 2.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd3", 3.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"bpipe", 3.9, x0BP, xrhoBP); // 500 mum Be + // + fat.AddLayer((char*)"ddd3a", 8.0, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd4", 20., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd5", 25., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd7", 40., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd8", 55., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddY", 80., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddX", 100., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void +lutWrite_scenario3(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.2, float rmin = 20.) +{ + + // init FAT + fatInit_scenario3(field, rmin); + // write + lutWrite(filename, pdg, field); + +} diff --git a/src/lutWrite.scenario4.cc b/src/lutWrite.scenario4.cc new file mode 100644 index 0000000..c30b1dd --- /dev/null +++ b/src/lutWrite.scenario4.cc @@ -0,0 +1,61 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#include "lutWrite.cc" + +void +fatInit_scenario4(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + fat.SetdNdEtaCent(400.); + // new ideal Pixel properties? + Double_t x0IB = 0.0005; + Double_t x0OB = 0.005; + Double_t xrhoIB = 1.1646e-02; // 50 mum Si + Double_t xrhoOB = 1.1646e-01; // 500 mum Si + + Double_t x0BP = 0.0014; // 500 mum Be + Double_t xrhoBP = 9.24e-02; // 500 mum Be + Double_t x0BF = 0.0004; // 150 mum Be + Double_t xrhoBF = 2.77e-02; // 150 mum Be + + Double_t resRPhiIB = 0.0001; + Double_t resZIB = 0.0001; + Double_t resRPhiOB = 0.0005; + + Double_t resZOB = 0.0005; + Double_t eff = 0.98; + fat.AddLayer((char*)"vertex", 0.0, 0, 0); // dummy vertex for matrix calculation + // fat.AddLayer((char*)"bfoil", 0.5, x0BF, xrhoBF); + fat.AddLayer((char*)"ddd0", 0.5, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd1", 1.5, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd2", 2.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd3", 3.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"bpipe", 3.9, x0BP, xrhoBP); // 500 mum Be + // + fat.AddLayer((char*)"ddd3a", 8.0, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd4", 20., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd5", 25., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd7", 40., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd8", 55., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddY", 80., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddX", 100., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void +lutWrite_scenario4(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.2, float rmin = 20.) +{ + + // init FAT + fatInit_scenario4(field, rmin); + // write + lutWrite(filename, pdg, field); + +} diff --git a/src/lutWrite.tof1.cc b/src/lutWrite.tof1.cc new file mode 100644 index 0000000..6127741 --- /dev/null +++ b/src/lutWrite.tof1.cc @@ -0,0 +1,54 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#include "lutWrite.cc" + +void +fatInit_tof1(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + fat.SetdNdEtaCent(400.); + // new ideal Pixel properties? + Double_t x0IB = 0.0005; + Double_t x0OB = 0.005; + Double_t xrhoIB = 1.1646e-02; // 50 mum Si + Double_t xrhoOB = 1.1646e-01; // 500 mum Si + + Double_t resRPhiIB = 0.0001; + Double_t resZIB = 0.0001; + Double_t resRPhiOB = 0.0005; + Double_t resZOB = 0.0005; + Double_t eff = 0.98; + fat.AddLayer((char*)"vertex", 0.0, 0, 0); // dummy vertex for matrix calculation + fat.AddLayer((char*)"bpipe", 1.6, 0.0014, 9.24e-02); // 500 mum Be | nominal R5? + fat.AddLayer((char*)"ddd1", 1.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd2", 2.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + //fat.AddLayer((char*)"bpipe", 2.9 , 0.0014, 9.24e-02 ); // 500 mum Be + fat.AddLayer((char*)"ddd3", 3.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd3a", 8.0, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd4", 20., 0.03, 0.03, resRPhiOB, resZOB, eff); // 3% X0 + fat.AddLayer((char*)"ddd5", 25., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd7", 40., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd8", 55., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddY", 80., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddX", 100., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void +lutWrite_tof1(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.2, float rmin = 20.) +{ + + // init FAT + fatInit_tof1(field, rmin); + // write + lutWrite(filename, pdg, field); + +} + diff --git a/src/lutWrite.tof2.cc b/src/lutWrite.tof2.cc new file mode 100644 index 0000000..573a095 --- /dev/null +++ b/src/lutWrite.tof2.cc @@ -0,0 +1,54 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#include "lutWrite.cc" + +void +fatInit_tof2(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + fat.SetdNdEtaCent(400.); + // new ideal Pixel properties? + Double_t x0IB = 0.0005; + Double_t x0OB = 0.005; + Double_t xrhoIB = 1.1646e-02; // 50 mum Si + Double_t xrhoOB = 1.1646e-01; // 500 mum Si + + Double_t resRPhiIB = 0.0001; + Double_t resZIB = 0.0001; + Double_t resRPhiOB = 0.0005; + Double_t resZOB = 0.0005; + Double_t eff = 0.98; + fat.AddLayer((char*)"vertex", 0.0, 0, 0); // dummy vertex for matrix calculation + fat.AddLayer((char*)"bpipe", 1.6, 0.0014, 9.24e-02); // 500 mum Be | nominal R5? + fat.AddLayer((char*)"ddd1", 1.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd2", 2.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + //fat.AddLayer((char*)"bpipe", 2.9 , 0.0014, 9.24e-02 ); // 500 mum Be + fat.AddLayer((char*)"ddd3", 3.8, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"ddd3a", 8.0, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd4", 20., x0OB, xrhoOB, 0.1, 0.1, eff); // 1 mm resolution + fat.AddLayer((char*)"ddd5", 25., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd7", 40., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"ddd8", 55., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddY", 80., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"dddX", 100., x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void +lutWrite_tof2(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.2, float rmin = 20.) +{ + + // init FAT + fatInit_tof2(field, rmin); + // write + lutWrite(filename, pdg, field); + +} + diff --git a/src/lutWrite.v12.cc b/src/lutWrite.v12.cc new file mode 100644 index 0000000..4e6d27a --- /dev/null +++ b/src/lutWrite.v12.cc @@ -0,0 +1,60 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#include "lutWrite.cc" + +float scale = 1.; + +void +fatInit_v12(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + // new ideal Pixel properties? + Double_t x0IB = 0.001; + Double_t x0OB = 0.01; + Double_t xrhoIB = 2.3292e-02; // 100 mum Si + Double_t xrhoOB = 2.3292e-01; // 1000 mum Si + + Double_t resRPhiIB = 0.00025; + Double_t resZIB = 0.00025; + Double_t resRPhiOB = 0.00100; + Double_t resZOB = 0.00100; + Double_t eff = 0.98; + fat.AddLayer((char*)"vertex", 0.0, 0, 0); // dummy vertex for matrix calculation + fat.AddLayer((char*)"bpipe0", 0.48 * scale, 0.00042, 2.772e-02); // 150 mum Be + // + fat.AddLayer((char*)"B00", 0.50 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"B01", 1.20 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"B02", 2.50 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + // + fat.AddLayer((char*)"bpipe1", 3.7 * scale, 0.0014, 9.24e-02); // 500 mum Be + // + fat.AddLayer((char*)"B03", 3.75 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B04", 7.00 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B05", 12.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B06", 20.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B07", 30.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B08", 45.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B09", 60.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B10", 80.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B11", 100. * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void +lutWrite_v12(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.5, float rmin = 100.) +{ + + // init FAT + fatInit_v12(field, rmin); + // write + lutWrite(filename, pdg, field, 0, 0, 0, 1); // last flag: use parametrisation for forward resolution + +} + diff --git a/src/lutWrite.werner.cc b/src/lutWrite.werner.cc new file mode 100644 index 0000000..1c45c43 --- /dev/null +++ b/src/lutWrite.werner.cc @@ -0,0 +1,60 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +#include "lutWrite.cc" + +float scale = 1.; + +void +fatInit_werner(float field = 0.5, float rmin = 100.) +{ + fat.SetBField(field); + // new ideal Pixel properties? + Double_t x0IB = 0.001; + Double_t x0OB = 0.01; + Double_t xrhoIB = 2.3292e-02; // 100 mum Si + Double_t xrhoOB = 2.3292e-01; // 1000 mum Si + + Double_t resRPhiIB = 0.00025; + Double_t resZIB = 0.00025; + Double_t resRPhiOB = 0.00100; + Double_t resZOB = 0.00100; + Double_t eff = 0.98; + fat.AddLayer((char*)"vertex", 0.0, 0, 0); // dummy vertex for matrix calculation + fat.AddLayer((char*)"bpipe0", 0.48 * scale, 0.00042, 2.772e-02); // 150 mum Be + // + fat.AddLayer((char*)"B00", 0.50 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"B01", 1.20 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + fat.AddLayer((char*)"B02", 2.50 * scale, x0IB, xrhoIB, resRPhiIB, resZIB, eff); + // + fat.AddLayer((char*)"bpipe1", 3.7 * scale, 0.0014, 9.24e-02); // 500 mum Be + // + fat.AddLayer((char*)"B03", 3.75 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B04", 7.00 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B05", 12.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B06", 20.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B07", 30.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B08", 45.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B09", 60.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B10", 80.0 * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.AddLayer((char*)"B11", 100. * scale, x0OB, xrhoOB, resRPhiOB, resZOB, eff); + fat.SetAtLeastHits(4); + fat.SetAtLeastCorr(4); + fat.SetAtLeastFake(0); + // + fat.SetMinRadTrack(rmin); + // + fat.PrintLayout(); +} + +void +lutWrite_werner(const char *filename = "lutCovm.dat", int pdg = 211, float field = 0.5, float rmin = 100.) +{ + + // init FAT + fatInit_werner(field, rmin); + // write + lutWrite(filename, pdg, field); + +} + diff --git a/src/lutWrite_aod.cc b/src/lutWrite_aod.cc new file mode 100644 index 0000000..ba7b89b --- /dev/null +++ b/src/lutWrite_aod.cc @@ -0,0 +1,294 @@ +/// @author: Roberto Preghenella +/// @email: preghenella@bo.infn.it + +/// @author: Nicolo' Jacazio +/// @email: nicolo.jacazio@cern.ch + +/// \brief LUT writer with the AnalysisResults from AODs as input + +#include "TCanvas.h" +#include "TDatabasePDG.h" +#include "TFile.h" +#include "TMatrixD.h" +#include "TMatrixDSymEigen.h" +#include "TProfile2D.h" +#include "TProfile3D.h" +#include "TVectorD.h" +#include "DetectorK/DetectorK.h" +#include "lutCovm.hh" +#include +#include "lutWrite.cc" + +void lutWrite_aod(const char* filename = "/tmp/lutCovm.pi.aod.dat", + int pdg = 211, + float field = 0.2, int layer = 0, int what = 0, + int efftype = 0, + const char* infilename = "/tmp/AnalysisResults_LUT.root", + float minPt = 0.f, + float maxPt = 80.f, + float minEta = -4.f, + float maxEta = 4.f) +{ + + std::map partname{{11, "electron"}, {13, "muon"}, {211, "pion"}, {321, "kaon"}, {2212, "proton"}}; + const std::string dn = "alice3-lutmaker-" + partname[pdg]; + + // Get the input from the analysis results + TFile f(infilename); + if (!f.IsOpen()) { + Printf("Did not find %s", infilename); + return; + } + // f.ls(); + TDirectory* d = nullptr; + f.GetObject(dn.c_str(), d); + if (!d) { + Printf("Did not find %s", dn.c_str()); + f.ls(); + return; + } + // d->ls(); + std::map h{{"eta", nullptr}, {"pt", nullptr}}; + std::map m{{"CovMat_cYY", nullptr}, + {"CovMat_cZY", nullptr}, + {"CovMat_cZZ", nullptr}, + {"CovMat_cSnpY", nullptr}, + {"CovMat_cSnpZ", nullptr}, + {"CovMat_cSnpSnp", nullptr}, + {"CovMat_cTglY", nullptr}, + {"CovMat_cTglZ", nullptr}, + {"CovMat_cTglSnp", nullptr}, + {"CovMat_cTglTgl", nullptr}, + {"CovMat_c1PtY", nullptr}, + {"CovMat_c1PtZ", nullptr}, + {"CovMat_c1PtSnp", nullptr}, + {"CovMat_c1PtTgl", nullptr}, + {"CovMat_c1Pt21Pt2", nullptr}, + {"Efficiency", nullptr}}; + + struct binning { + int n = 0; + float min = 0; + float max = 0; + bool log = false; + }; + + binning histo_eta_bins; + binning histo_pt_bins; + for (auto const& i : h) { + + auto setBinning = [&h, &i](binning& b) { + const auto j = h[i.first]; + if (b.n == 0) { + Printf("Setting bin for %s", i.first.c_str()); + b.n = j->GetXaxis()->GetNbins(); + b.min = j->GetXaxis()->GetBinLowEdge(1); + b.max = j->GetXaxis()->GetBinUpEdge(b.n); + } + if (std::abs(j->GetXaxis()->GetBinWidth(1) - j->GetXaxis()->GetBinWidth(j->GetNbinsX())) > 0.0001f) { + b.log = true; + } + }; + + h[i.first] = ((TH1F*)d->Get(i.first.c_str())); + h[i.first]->SetDirectory(0); + if (i.first == "eta") { + setBinning(histo_eta_bins); + } else if (i.first == "pt") { + setBinning(histo_pt_bins); + } + } + + for (auto const& i : m) { + auto checkBinning = [&m, &i, histo_eta_bins, histo_pt_bins]() { + const auto j = m[i.first]; + const char* n = i.first.c_str(); + // X + const TAxis* x = j->GetXaxis(); + if (histo_pt_bins.n != x->GetNbins()) { + Printf("Different number of bins on X for %s: %i vs %i", n, histo_pt_bins.n, x->GetNbins()); + return false; + } + if (std::abs(histo_pt_bins.min - x->GetBinLowEdge(1)) > 0.0001f) { + Printf("Different starting on X for %s: %f vs %f, diff. is %f", n, histo_pt_bins.min, x->GetBinLowEdge(1), histo_pt_bins.min - x->GetBinLowEdge(1)); + return false; + } + if (std::abs(histo_pt_bins.max - x->GetBinUpEdge(x->GetNbins())) > 0.0001f) { + Printf("Different ending on X for %s: %f vs %f, diff. is %f", n, histo_pt_bins.max, x->GetBinUpEdge(x->GetNbins()), histo_pt_bins.max - x->GetBinUpEdge(x->GetNbins())); + return false; + } + // Y + const TAxis* y = j->GetYaxis(); + if (histo_eta_bins.n != y->GetNbins()) { + Printf("Different number of bins on Y for %s: %i vs %i", n, histo_eta_bins.n, y->GetNbins()); + return false; + } + if (std::abs(histo_eta_bins.min - y->GetBinLowEdge(1)) > 0.0001f) { + Printf("Different starting on Y for %s: %f vs %f, diff. is %f", n, histo_eta_bins.min, y->GetBinLowEdge(1), histo_eta_bins.min - y->GetBinLowEdge(1)); + return false; + } + if (std::abs(histo_eta_bins.max - y->GetBinUpEdge(y->GetNbins())) > 0.0001f) { + Printf("Different ending on Y for %s: %f vs %f, diff. is %f", n, histo_eta_bins.max, y->GetBinUpEdge(y->GetNbins()), histo_eta_bins.max - y->GetBinUpEdge(y->GetNbins())); + return false; + } + return true; + }; + // m[i.first] = (TProfile3D*)d->Get(i.first.c_str()); + // m[i.first] = ((TProfile3D*)d->Get(i.first.c_str()))->Project3DProfile("yx"); + m[i.first] = ((TProfile2D*)d->Get(i.first.c_str())); + m[i.first]->SetDirectory(0); + if (!checkBinning()) { + Printf("Something went wrong, stopping"); + return; + } + } + + f.Close(); + + // output file + ofstream lutFile(filename, std::ofstream::binary); + if (!lutFile.is_open()) { + Printf("Did not manage to open output file!!"); + return; + } + + // write header + lutHeader_t lutHeader; + // pid + lutHeader.pdg = pdg; + lutHeader.mass = TDatabasePDG::Instance()->GetParticle(pdg)->Mass(); + const int q = std::abs(TDatabasePDG::Instance()->GetParticle(pdg)->Charge()) / 3; + if (q <= 0) { + Printf("Negative or null charge (%f) for pdg code %i. Fix the charge!", TDatabasePDG::Instance()->GetParticle(pdg)->Charge(), pdg); + return; + } + lutHeader.field = field; + // nch + lutHeader.nchmap.log = true; + lutHeader.nchmap.nbins = 20; + lutHeader.nchmap.min = 0.5; + lutHeader.nchmap.max = 3.5; + // radius + lutHeader.radmap.log = false; + lutHeader.radmap.nbins = 1; + lutHeader.radmap.min = 0.; + lutHeader.radmap.max = 100.; + // eta + lutHeader.etamap.log = false; + lutHeader.etamap.nbins = histo_eta_bins.n; + lutHeader.etamap.min = histo_eta_bins.min; + lutHeader.etamap.max = histo_eta_bins.max; + Printf("LUT eta: %i, [%f, %f]", lutHeader.etamap.nbins, lutHeader.etamap.min, lutHeader.etamap.max); + // pt + lutHeader.ptmap.log = histo_pt_bins.log; + lutHeader.ptmap.nbins = histo_pt_bins.n; + lutHeader.ptmap.min = histo_pt_bins.log ? std::log10(histo_pt_bins.min) : histo_pt_bins.min; + lutHeader.ptmap.max = histo_pt_bins.log ? std::log10(histo_pt_bins.max) : histo_pt_bins.max; + Printf("LUT pt: %i, [%f, %f]%s", lutHeader.ptmap.nbins, lutHeader.ptmap.min, lutHeader.ptmap.max, lutHeader.ptmap.log ? " LOG AXIS!" : ""); + lutFile.write(reinterpret_cast(&lutHeader), sizeof(lutHeader)); + // entries + const int nnch = lutHeader.nchmap.nbins; + const int nrad = lutHeader.radmap.nbins; + const int neta = lutHeader.etamap.nbins; + const int npt = lutHeader.ptmap.nbins; + lutEntry_t lutEntry; + + auto resetCovM = [&lutEntry]() { + lutEntry.valid = false; + for (int i = 0; i < 15; ++i) { + lutEntry.covm[i] = 0.; + } + }; + + TH1F* hptcalls = (TH1F*)h["pt"]->Clone("hptcalls"); + hptcalls->Reset(); + hptcalls->GetYaxis()->SetTitle("pT calls"); + + TH1F* hetacalls = (TH1F*)h["eta"]->Clone("hetacalls"); + hetacalls->Reset(); + hetacalls->GetYaxis()->SetTitle("eta calls"); + + // write entries + for (int inch = 0; inch < nnch; ++inch) { + for (int irad = 0; irad < nrad; ++irad) { + for (int ieta = 0; ieta < neta; ++ieta) { + lutEntry.eta = lutHeader.etamap.eval(ieta); + hetacalls->Fill(lutEntry.eta); + const int bin_eta = h["eta"]->FindBin(lutEntry.eta); + if (ieta == 0 && bin_eta != 1) { + Printf("First eta is not the first bin"); + return; + } + if (lutEntry.eta < minEta || lutEntry.eta > maxEta) { + continue; + } + for (int ipt = 0; ipt < npt; ++ipt) { + lutEntry.pt = lutHeader.ptmap.eval(ipt); + hptcalls->Fill(lutEntry.pt); + const int bin_pt = h["pt"]->FindBin(lutEntry.pt); + if (ipt == 0 && bin_pt != 1) { + Printf("First pt is not the first bin"); + return; + } + if (lutEntry.pt < minPt || lutEntry.pt > maxPt) { + continue; + } + if (bin_eta <= 0 || bin_eta > h["eta"]->GetNbinsX()) { + resetCovM(); + } else if (h["eta"]->GetBinContent(bin_eta) <= 0.f) { + resetCovM(); + } else if (bin_pt <= 0 || bin_pt > h["pt"]->GetNbinsX()) { + resetCovM(); + } else if (h["pt"]->GetBinContent(bin_pt) <= 0.f) { + resetCovM(); + } else { + if (fabs(lutEntry.eta) < .3) { // Barrel + // const int bin = m["Efficiency"]->FindBin(lutEntry.pt, lutEntry.eta, 3.14); + const int bin = m["Efficiency"]->FindBin(lutEntry.pt, lutEntry.eta); + lutEntry.eff = m["Efficiency"]->GetBinContent(bin); + lutEntry.covm[0] = m["CovMat_cYY"]->GetBinContent(bin); + lutEntry.covm[1] = m["CovMat_cZY"]->GetBinContent(bin); + lutEntry.covm[2] = m["CovMat_cZZ"]->GetBinContent(bin); + lutEntry.covm[3] = m["CovMat_cSnpY"]->GetBinContent(bin); + lutEntry.covm[4] = m["CovMat_cSnpZ"]->GetBinContent(bin); + lutEntry.covm[5] = m["CovMat_cSnpSnp"]->GetBinContent(bin); + lutEntry.covm[6] = m["CovMat_cTglY"]->GetBinContent(bin); + lutEntry.covm[7] = m["CovMat_cTglZ"]->GetBinContent(bin); + lutEntry.covm[8] = m["CovMat_cTglSnp"]->GetBinContent(bin); + lutEntry.covm[9] = m["CovMat_cTglTgl"]->GetBinContent(bin); + lutEntry.covm[10] = m["CovMat_c1PtY"]->GetBinContent(bin); + lutEntry.covm[11] = m["CovMat_c1PtZ"]->GetBinContent(bin); + lutEntry.covm[12] = m["CovMat_c1PtSnp"]->GetBinContent(bin); + lutEntry.covm[13] = m["CovMat_c1PtTgl"]->GetBinContent(bin); + lutEntry.covm[14] = m["CovMat_c1Pt21Pt2"]->GetBinContent(bin); + + lutEntry.valid = true; + } else { + lutEntry.eff = 1.; + resetCovM(); + // // printf(" --- fwdSolve: pt = %f, eta = %f, mass = %f, field=%f \n", lutEntry.pt, lutEntry.eta, lutHeader.mass, lutHeader.field); + // if (!fwdSolve(lutEntry.covm, lutEntry.pt, lutEntry.eta, lutHeader.mass)) { + // // printf(" --- fwdSolve: error \n"); + // } + } + } + diagonalise(lutEntry); + if (lutEntry.valid) { + Printf("Writing valid entry at pT %f and eta %f:", lutEntry.pt, lutEntry.eta); + lutEntry.print(); + } + lutFile.write(reinterpret_cast(&lutEntry), sizeof(lutEntry_t)); + } + } + } + } + + lutFile.close(); + TCanvas* can = new TCanvas("ptetacalls", "ptetacalls"); + can->Divide(2); + can->cd(1); + hptcalls->Scale(1. / hetacalls->GetNbinsX()); + hptcalls->Draw("HIST"); + can->cd(2); + hetacalls->Draw("HIST"); +}