diff --git a/Detectors/TPC/workflow/include/TPCWorkflow/RecoWorkflow.h b/Detectors/TPC/workflow/include/TPCWorkflow/RecoWorkflow.h index f86afc310b04c..a5368f451a820 100644 --- a/Detectors/TPC/workflow/include/TPCWorkflow/RecoWorkflow.h +++ b/Detectors/TPC/workflow/include/TPCWorkflow/RecoWorkflow.h @@ -35,15 +35,15 @@ struct CorrectionMapsLoaderGloOpts; namespace reco_workflow { /// define input and output types of the workflow -enum struct InputType { PassThrough, // No processing, just pass through available inputs to the writers, defined by the OutputType - Digitizer, // directly read digits from channel {TPC:DIGITS} - Digits, // read digits from file - ClustersHardware, // read hardware clusters in raw page format from file - Clusters, // read native clusters from file - CompClusters, // read compressed cluster container - CompClustersCTF, // compressed clusters from CTF, as flat format - CompClustersFlat, // compressed clusters in flat format, used as input for the entropy encoder - EncodedClusters, // read encoded clusters +enum struct InputType { PassThrough, // No processing, just pass through available inputs to the writers, defined by the OutputType + Digitizer, // directly read digits from channel {TPC:DIGITS} + Digits, // read digits from file + ClustersHardware, // read hardware clusters in raw page format from file + Clusters, // read native clusters from file + CompClustersRoot, // read compressed cluster in ROOT format + CompClustersFlat, // compressed clusters from flat format (e.g. from CTF) + CompClustersFlatForEncode, // compressed clusters in flat format, used as input for the entropy encoder, no gpu-reco + EncodedClusters, // read encoded clusters ZSRaw, }; @@ -59,7 +59,8 @@ enum struct OutputType { Digits, ClustersHardware, Clusters, Tracks, - CompClusters, + CompClustersRoot, + CompClustersFlat, EncodedClusters, DisableWriter, SendClustersPerSector, diff --git a/Detectors/TPC/workflow/src/RecoWorkflow.cxx b/Detectors/TPC/workflow/src/RecoWorkflow.cxx index 3e35f416373a1..3fdff02dd69cc 100644 --- a/Detectors/TPC/workflow/src/RecoWorkflow.cxx +++ b/Detectors/TPC/workflow/src/RecoWorkflow.cxx @@ -80,16 +80,17 @@ const std::unordered_map InputMap{ {"clustershardware", InputType::ClustersHardware}, {"clusters", InputType::Clusters}, {"zsraw", InputType::ZSRaw}, - {"compressed-clusters", InputType::CompClusters}, - {"compressed-clusters-ctf", InputType::CompClustersCTF}, - {"compressed-clusters-flat", InputType::CompClustersFlat}}; + {"compressed-clusters-root", InputType::CompClustersRoot}, + {"compressed-clusters-flat", InputType::CompClustersFlat}, + {"compressed-clusters-flat-for-encode", InputType::CompClustersFlatForEncode}}; const std::unordered_map OutputMap{ {"digits", OutputType::Digits}, {"clustershardware", OutputType::ClustersHardware}, {"clusters", OutputType::Clusters}, {"tracks", OutputType::Tracks}, - {"compressed-clusters", OutputType::CompClusters}, + {"compressed-clusters-root", OutputType::CompClustersRoot}, + {"compressed-clusters-flat", OutputType::CompClustersFlat}, {"encoded-clusters", OutputType::EncodedClusters}, {"disable-writer", OutputType::DisableWriter}, {"send-clusters-per-sector", OutputType::SendClustersPerSector}, @@ -118,18 +119,23 @@ framework::WorkflowSpec getWorkflow(CompletionPolicyData* policyData, std::vecto return std::find(outputTypes.begin(), outputTypes.end(), type) != outputTypes.end(); }; - if (filteredInp && !(inputType == InputType::PassThrough && isEnabled(OutputType::Tracks) && isEnabled(OutputType::Clusters) && isEnabled(OutputType::SendClustersPerSector))) { - throw std::invalid_argument("filtered-input option must be provided only with pass-through input and clusters,tracks,send-clusters-per-sector output"); + if (filteredInp && !(inputType == InputType::PassThrough)) { + throw std::invalid_argument("filtered-input option must be provided only with pass-through input"); } - bool decompressTPC = inputType == InputType::CompClustersCTF || inputType == InputType::CompClusters; + bool decompressTPC = inputType == InputType::CompClustersFlat || inputType == InputType::CompClustersRoot; // Disable not applicable settings depending on TPC input, no need to disable manually if (decompressTPC && (isEnabled(OutputType::Clusters) || isEnabled(OutputType::Tracks))) { caClusterer = false; zsOnTheFly = false; propagateMC = false; } - if (inputType == InputType::ZSRaw || inputType == InputType::CompClustersFlat) { + if (inputType == InputType::CompClustersFlatForEncode || inputType == InputType::CompClustersRoot || inputType == InputType::CompClustersFlat) { + caClusterer = false; + zsOnTheFly = false; + propagateMC = false; + } + if (inputType == InputType::ZSRaw) { caClusterer = true; zsOnTheFly = false; propagateMC = false; @@ -225,7 +231,7 @@ framework::WorkflowSpec getWorkflow(CompletionPolicyData* policyData, std::vecto if (sclOpts.requestCTPLumi) { // need CTP digits (lumi) reader specs.emplace_back(o2::ctp::getDigitsReaderSpec(false)); } - } else if (inputType == InputType::CompClusters) { + } else if (inputType == InputType::CompClustersRoot) { // TODO: need to check if we want to store the MC labels alongside with compressed clusters // for the moment reading of labels is disabled (last parameter is false) // TODO: make a different publisher spec for only one output spec, for now using the @@ -248,8 +254,9 @@ framework::WorkflowSpec getWorkflow(CompletionPolicyData* policyData, std::vecto // output matrix // Note: the ClusterHardware format is probably a deprecated legacy format and also the // ClusterDecoderRawSpec - bool produceCompClusters = isEnabled(OutputType::CompClusters); - bool runGPUReco = (produceTracks || produceCompClusters || (isEnabled(OutputType::Clusters) && caClusterer) || inputType == InputType::CompClustersCTF) && inputType != InputType::CompClustersFlat; + bool produceCompClustersRoot = isEnabled(OutputType::CompClustersRoot); + bool produceCompClustersFlat = isEnabled(OutputType::CompClustersFlat); + bool runGPUReco = (produceTracks || produceCompClustersRoot || produceCompClustersFlat || (isEnabled(OutputType::Clusters) && caClusterer) || inputType == InputType::CompClustersFlat) && inputType != InputType::CompClustersFlatForEncode; bool runHWDecoder = !caClusterer && (runGPUReco || isEnabled(OutputType::Clusters)); bool runClusterer = !caClusterer && (runHWDecoder || isEnabled(OutputType::ClustersHardware)); bool zsDecoder = inputType == InputType::ZSRaw; @@ -460,13 +467,13 @@ framework::WorkflowSpec getWorkflow(CompletionPolicyData* policyData, std::vecto cfg.enableMShape = sclOpts.enableMShapeCorrection; cfg.enableCTPLumi = sclOpts.requestCTPLumi; cfg.decompressTPC = decompressTPC; - cfg.decompressTPCFromROOT = decompressTPC && inputType == InputType::CompClusters; + cfg.decompressTPCFromROOT = decompressTPC && inputType == InputType::CompClustersRoot; cfg.caClusterer = caClusterer; cfg.zsDecoder = zsDecoder; cfg.zsOnTheFly = zsOnTheFly; cfg.outputTracks = produceTracks; - cfg.outputCompClusters = produceCompClusters; - cfg.outputCompClustersFlat = runClusterEncoder; + cfg.outputCompClustersRoot = produceCompClustersRoot; + cfg.outputCompClustersFlat = produceCompClustersFlat || runClusterEncoder; cfg.outputCAClusters = isEnabled(OutputType::Clusters) && (caClusterer || decompressTPC); cfg.outputQA = isEnabled(OutputType::QA); cfg.outputSharedClusterMap = (isEnabled(OutputType::Clusters) || inputType == InputType::Clusters) && isEnabled(OutputType::Tracks) && !isEnabled(OutputType::NoSharedClusterMap); @@ -500,7 +507,7 @@ framework::WorkflowSpec getWorkflow(CompletionPolicyData* policyData, std::vecto // // selected by output type 'encoded-clusters' if (runClusterEncoder) { - specs.emplace_back(o2::tpc::getEntropyEncoderSpec(!runGPUReco && inputType != InputType::CompClustersFlat, selIR)); + specs.emplace_back(o2::tpc::getEntropyEncoderSpec(!runGPUReco && inputType != InputType::CompClustersFlatForEncode, selIR)); } ////////////////////////////////////////////////////////////////////////////////////////////// @@ -547,7 +554,7 @@ framework::WorkflowSpec getWorkflow(CompletionPolicyData* policyData, std::vecto // a writer process for compressed clusters container // // selected by output type 'compressed-clusters' - if (produceCompClusters && !isEnabled(OutputType::DisableWriter)) { + if (produceCompClustersRoot && !isEnabled(OutputType::DisableWriter)) { // defining the track writer process using the generic RootTreeWriter and generator tool // // defaults diff --git a/Detectors/TPC/workflow/src/tpc-reco-workflow.cxx b/Detectors/TPC/workflow/src/tpc-reco-workflow.cxx index 9d7ab63b0c2a0..b440f6e8d359f 100644 --- a/Detectors/TPC/workflow/src/tpc-reco-workflow.cxx +++ b/Detectors/TPC/workflow/src/tpc-reco-workflow.cxx @@ -57,8 +57,8 @@ void customize(std::vector& workflowOptions) using namespace o2::framework; std::vector options{ - {"input-type", VariantType::String, "digits", {"digitizer, digits, zsraw, clustershw, clusters, compressed-clusters, compressed-clusters-ctf, pass-through"}}, - {"output-type", VariantType::String, "tracks", {"digits, zsraw, clustershw, clusters, tracks, compressed-clusters, encoded-clusters, disable-writer, send-clusters-per-sector, qa, no-shared-cluster-map, tpc-triggers"}}, + {"input-type", VariantType::String, "digits", {"digitizer, digits, zsraw, clustershw, clusters, compressed-clusters-root, compressed-clusters-ctf, compressed-clusters-flat-for-encode, pass-through"}}, + {"output-type", VariantType::String, "tracks", {"digits, zsraw, clustershw, clusters, tracks, compressed-clusters-root, compressed-clusters-flat, encoded-clusters, disable-writer, send-clusters-per-sector, qa, no-shared-cluster-map, tpc-triggers"}}, {"disable-root-input", o2::framework::VariantType::Bool, false, {"disable root-files input reader"}}, {"no-ca-clusterer", VariantType::Bool, false, {"Use HardwareClusterer instead of clusterer of GPUCATracking"}}, {"disable-mc", VariantType::Bool, false, {"disable sending of MC information"}}, @@ -155,8 +155,6 @@ WorkflowSpec defineDataProcessing(ConfigContext const& cfgc) gDispatchTrigger = o2::framework::ConcreteDataTypeMatcher{"TPC", "DIGITS"}; } else if (inputType == "clustershw") { gDispatchTrigger = o2::framework::ConcreteDataTypeMatcher{"TPC", "CLUSTERHW"}; - } else if (inputType == "clustersnative") { - gDispatchTrigger = o2::framework::ConcreteDataTypeMatcher{"TPC", "CLUSTERNATIVE"}; } else if (inputType == "zsraw") { gDispatchTrigger = o2::framework::ConcreteDataTypeMatcher{"TPC", "RAWDATA"}; } diff --git a/GPU/GPUTracking/DataCompression/GPUTPCCompression.h b/GPU/GPUTracking/DataCompression/GPUTPCCompression.h index 52585b4c08b24..1dafffaeea043 100644 --- a/GPU/GPUTracking/DataCompression/GPUTPCCompression.h +++ b/GPU/GPUTracking/DataCompression/GPUTPCCompression.h @@ -89,6 +89,7 @@ class GPUTPCCompression : public GPUProcessor void SetPointersCompressedClusters(void*& mem, T& c, uint32_t nClA, uint32_t nTr, uint32_t nClU, bool reducedClA); template GPUd() static void truncateSignificantBits(T& val, uint32_t nBits, uint32_t max); + GPUd() bool rejectCluster(int32_t idx, GPUParam& param, const GPUTrackingInOutPointers& ioPtrs); int16_t mMemoryResOutputHost = -1; int16_t mMemoryResOutputGPU = -1; diff --git a/GPU/GPUTracking/DataCompression/GPUTPCCompressionKernels.cxx b/GPU/GPUTracking/DataCompression/GPUTPCCompressionKernels.cxx index 5503eeb30cdd6..d5567bb5148d9 100644 --- a/GPU/GPUTracking/DataCompression/GPUTPCCompressionKernels.cxx +++ b/GPU/GPUTracking/DataCompression/GPUTPCCompressionKernels.cxx @@ -183,6 +183,31 @@ GPUd() bool GPUTPCCompressionKernels::GPUTPCCompressionKernels_Compare<4>::opera return mClsPtr[a].qTot < mClsPtr[b].qTot; } +GPUd() bool GPUTPCCompression::rejectCluster(int32_t idx, GPUParam& GPUrestrict() param, const GPUTrackingInOutPointers& GPUrestrict() ioPtrs) +{ + if (mClusterStatus[idx]) { + return true; + } + int32_t attach = ioPtrs.mergedTrackHitAttachment[idx]; + bool unattached = attach == 0; + + if (unattached) { + if (param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyB) { + return true; + } + } else if (param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyA) { + if (GPUTPCClusterRejection::GetIsRejected(attach)) { + return true; + } + int32_t id = attach & gputpcgmmergertypes::attachTrackMask; + auto& trk = ioPtrs.mergedTracks[id]; + if (CAMath::Abs(trk.GetParam().GetQPt() * param.qptB5Scaler) > param.rec.tpc.rejectQPtB5 || trk.MergedLooper()) { + return true; + } + } + return false; +} + template <> GPUdii() void GPUTPCCompressionKernels::Thread(int32_t nBlocks, int32_t nThreads, int32_t iBlock, int32_t iThread, GPUsharedref() GPUSharedMemory& smem, processorType& GPUrestrict() processors) { @@ -208,33 +233,7 @@ GPUdii() void GPUTPCCompressionKernels::Thread(clusters->nClusters[iSector][iRow]); for (uint32_t i = iThread; i < nn + nThreads; i += nThreads) { const int32_t idx = idOffset + i; - int32_t storeCluster = 0; - do { - if (i >= clusters->nClusters[iSector][iRow]) { - break; - } - if (compressor.mClusterStatus[idx]) { - break; - } - int32_t attach = ioPtrs.mergedTrackHitAttachment[idx]; - bool unattached = attach == 0; - - if (unattached) { - if (processors.param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyB) { - break; - } - } else if (processors.param.rec.tpc.rejectionStrategy >= GPUSettings::RejectionStrategyA) { - if (GPUTPCClusterRejection::GetIsRejected(attach)) { - break; - } - int32_t id = attach & gputpcgmmergertypes::attachTrackMask; - auto& trk = ioPtrs.mergedTracks[id]; - if (CAMath::Abs(trk.GetParam().GetQPt() * processors.param.qptB5Scaler) > processors.param.rec.tpc.rejectQPtB5 || trk.MergedLooper()) { - break; - } - } - storeCluster = 1; - } while (false); + int32_t storeCluster = i < clusters->nClusters[iSector][iRow] && !compressor.rejectCluster(idx, param, ioPtrs); GPUbarrier(); int32_t myIndex = work_group_scan_inclusive_add(storeCluster); diff --git a/GPU/GPUTracking/DataTypes/GPUDataTypes.h b/GPU/GPUTracking/DataTypes/GPUDataTypes.h index 801c60f6b02ba..967d6a73914dd 100644 --- a/GPU/GPUTracking/DataTypes/GPUDataTypes.h +++ b/GPU/GPUTracking/DataTypes/GPUDataTypes.h @@ -245,6 +245,7 @@ struct GPUTrackingInOutPointers { uint32_t nOutputClusRefsTPCO2 = 0; const o2::MCCompLabel* outputTracksTPCO2MC = nullptr; const o2::tpc::CompressedClustersFlat* tpcCompressedClusters = nullptr; + const o2::tpc::ClusterNativeAccess* clustersNativeReduced = nullptr; // TPC links int32_t* tpcLinkITS = nullptr; diff --git a/GPU/GPUTracking/Definitions/GPUSettingsList.h b/GPU/GPUTracking/Definitions/GPUSettingsList.h index 8b5f70f25a4d9..30477d67fdc4f 100644 --- a/GPU/GPUTracking/Definitions/GPUSettingsList.h +++ b/GPU/GPUTracking/Definitions/GPUSettingsList.h @@ -361,6 +361,7 @@ AddOption(tpcMaxAttachedClustersPerSectorRow, uint32_t, 51000, "", 0, "Maximum n AddOption(tpcUseOldCPUDecoding, bool, false, "", 0, "Enable old CPU-based TPC decoding") AddOption(tpcApplyCFCutsAtDecoding, bool, false, "", 0, "Apply cluster cuts from clusterization during decoding of compressed clusters") AddOption(tpcApplyClusterFilterOnCPU, uint8_t, 0, "", 0, "Apply custom cluster filter of GPUTPCClusterFilter class, 0: off, 1: debug, 2: PbPb23") +AddOption(tpcWriteClustersAfterRejection, bool, false, "", 0, "Apply TPC rejection strategy before writing clusters") AddOption(oclPlatformNum, int32_t, -1, "", 0, "Platform to use, in case the backend provides multiple platforms (OpenCL only, -1 = auto-select, -2 query all platforms (also incompatible))") AddOption(oclCompileFromSources, bool, false, "", 0, "Compile OpenCL binary from included source code instead of using included spirv code") AddOption(oclOverrideSourceBuildFlags, std::string, "", "", 0, "Override OCL build flags for compilation from source, put a space for empty options") diff --git a/GPU/GPUTracking/Global/GPUChainTracking.cxx b/GPU/GPUTracking/Global/GPUChainTracking.cxx index f47c6923a6be7..91870f981d542 100644 --- a/GPU/GPUTracking/Global/GPUChainTracking.cxx +++ b/GPU/GPUTracking/Global/GPUChainTracking.cxx @@ -273,6 +273,10 @@ bool GPUChainTracking::ValidateSettings() GPUError("Clusterizer and merger Sanity checks only supported when not running on GPU"); return false; } + if (GetProcessingSettings().tpcWriteClustersAfterRejection && (mRec->IsGPU() || param().rec.tpc.compressionTypeMask || !(GetRecoSteps() & GPUDataTypes::RecoStep::TPCCompression))) { + GPUError("tpcWriteClustersAfterRejection requires compressionTypeMask = 0, no GPU usage, and compression enabled"); + return false; + } if (GetProcessingSettings().doublePipeline) { if (!GetRecoStepsOutputs().isOnlySet(GPUDataTypes::InOutType::TPCMergedTracks, GPUDataTypes::InOutType::TPCCompressedClusters, GPUDataTypes::InOutType::TPCClusters)) { GPUError("Invalid outputs for double pipeline mode 0x%x", (uint32_t)GetRecoStepsOutputs()); @@ -543,6 +547,10 @@ void GPUChainTracking::ClearIOPointers() std::memset((void*)&mIOPtrs, 0, sizeof(mIOPtrs)); mIOMem.~InOutMemory(); new (&mIOMem) InOutMemory; + mClusterNativeAccessReduced.reset(nullptr); + if (mClusterNativeAccess.get()) { + memset((void*)mClusterNativeAccess.get(), 0, sizeof(*mClusterNativeAccess)); + } } void GPUChainTracking::AllocateIOMemory() diff --git a/GPU/GPUTracking/Global/GPUChainTracking.h b/GPU/GPUTracking/Global/GPUChainTracking.h index 5c85147494711..8de49cc954e35 100644 --- a/GPU/GPUTracking/Global/GPUChainTracking.h +++ b/GPU/GPUTracking/Global/GPUChainTracking.h @@ -265,7 +265,7 @@ class GPUChainTracking : public GPUChain std::unique_ptr mTRDGeometryU; // TRD Geometry // Ptrs to internal buffers - std::unique_ptr mClusterNativeAccess; + std::unique_ptr mClusterNativeAccess, mClusterNativeAccessReduced; std::array mSubOutputControls = {nullptr}; std::unique_ptr mTriggerBuffer; @@ -305,6 +305,7 @@ class GPUChainTracking : public GPUChain void RunTPCTrackingMerger_Resolve(int8_t useOrigTrackParam, int8_t mergeAll, GPUReconstruction::krnlDeviceType deviceType); void RunTPCClusterFilter(o2::tpc::ClusterNativeAccess* clusters, std::function allocator, bool applyClusterCuts); bool NeedTPCClustersOnGPU(); + void WriteReducedClusters(); template int32_t RunTRDTrackingInternal(); uint32_t StreamForSector(uint32_t sector) const; diff --git a/GPU/GPUTracking/Global/GPUChainTrackingClusterizer.cxx b/GPU/GPUTracking/Global/GPUChainTrackingClusterizer.cxx index 619940ff6d3dd..b0d466f13e5ef 100644 --- a/GPU/GPUTracking/Global/GPUChainTrackingClusterizer.cxx +++ b/GPU/GPUTracking/Global/GPUChainTrackingClusterizer.cxx @@ -772,7 +772,7 @@ int32_t GPUChainTracking::RunTPCClusterizer(bool synchronizeOutput) GPUFatal("Cannot use waitForFinalInput callback without delayed output"); } if (!GetProcessingSettings().tpcApplyClusterFilterOnCPU) { - AllocateRegisteredMemory(mInputsHost->mResourceClusterNativeOutput, mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clustersNative)]); + AllocateRegisteredMemory(mInputsHost->mResourceClusterNativeOutput, GetProcessingSettings().tpcWriteClustersAfterRejection ? nullptr : mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clustersNative)]); tmpNativeClusters = mInputsHost->mPclusterNativeOutput; } else { tmpNativeClusterBuffer = std::make_unique(mInputsHost->mNClusterNative); @@ -1269,7 +1269,7 @@ int32_t GPUChainTracking::RunTPCClusterizer(bool synchronizeOutput) // TODO: write to buffer directly o2::dataformats::MCTruthContainer mcLabels; std::pair buffer; - if (mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clusterLabels)] && mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clusterLabels)]->useExternal()) { + if (!GetProcessingSettings().tpcWriteClustersAfterRejection && mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clusterLabels)] && mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clusterLabels)]->useExternal()) { if (!mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clusterLabels)]->allocator) { throw std::runtime_error("Cluster MC Label buffer missing"); } @@ -1293,7 +1293,7 @@ int32_t GPUChainTracking::RunTPCClusterizer(bool synchronizeOutput) if (buildNativeHost && buildNativeGPU && GetProcessingSettings().delayedOutput) { mInputsHost->mNClusterNative = mInputsShadow->mNClusterNative = nClsTotal; - AllocateRegisteredMemory(mInputsHost->mResourceClusterNativeOutput, mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clustersNative)]); + AllocateRegisteredMemory(mInputsHost->mResourceClusterNativeOutput, GetProcessingSettings().tpcWriteClustersAfterRejection ? nullptr : mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clustersNative)]); tmpNativeClusters = mInputsHost->mPclusterNativeOutput; for (uint32_t i = outputQueueStart; i < mOutputQueue.size(); i++) { mOutputQueue[i].dst = (char*)tmpNativeClusters + (size_t)mOutputQueue[i].dst; @@ -1308,7 +1308,7 @@ int32_t GPUChainTracking::RunTPCClusterizer(bool synchronizeOutput) if (GetProcessingSettings().tpcApplyClusterFilterOnCPU) { auto allocator = [this, &tmpNativeClusters](size_t size) { this->mInputsHost->mNClusterNative = size; - this->AllocateRegisteredMemory(this->mInputsHost->mResourceClusterNativeOutput, this->mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clustersNative)]); + this->AllocateRegisteredMemory(this->mInputsHost->mResourceClusterNativeOutput, this->GetProcessingSettings().tpcWriteClustersAfterRejection ? nullptr : this->mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clustersNative)]); return (tmpNativeClusters = this->mInputsHost->mPclusterNativeOutput); }; RunTPCClusterFilter(tmpNativeAccess, allocator, false); diff --git a/GPU/GPUTracking/Global/GPUChainTrackingCompression.cxx b/GPU/GPUTracking/Global/GPUChainTrackingCompression.cxx index 3bcd2390eae52..e06699c0918b8 100644 --- a/GPU/GPUTracking/Global/GPUChainTrackingCompression.cxx +++ b/GPU/GPUTracking/Global/GPUChainTrackingCompression.cxx @@ -23,6 +23,8 @@ #include "GPUConstantMem.h" // TODO: Try to get rid of as many GPUConstantMem includes as possible! #include "GPUTPCCompressionKernels.h" #include "GPUTPCDecompressionKernels.h" +#include "SimulationDataFormat/ConstMCTruthContainer.h" +#include "SimulationDataFormat/MCCompLabel.h" #include "utils/strtag.h" #include @@ -52,6 +54,9 @@ int32_t GPUChainTracking::RunTPCCompression() TransferMemoryResourcesToGPU(myStep, &Compressor, 0); runKernel(GetGridAutoStep(0, RecoStep::TPCCompression), CompressorShadow.mClusterStatus, Compressor.mMaxClusters * sizeof(CompressorShadow.mClusterStatus[0])); runKernel(GetGridAuto(0)); + if (GetProcessingSettings().tpcWriteClustersAfterRejection) { + WriteReducedClusters(); + } runKernel(GetGridAuto(0)); TransferMemoryResourcesToHost(myStep, &Compressor, 0); #ifdef GPUCA_TPC_GEOMETRY_O2 @@ -434,3 +439,56 @@ int32_t GPUChainTracking::RunTPCDecompression() DoDebugDump(GPUChainTrackingDebugFlags::TPCDecompressedClusters, &GPUChainTracking::DumpClusters, *mDebugFile, mIOPtrs.clustersNative); return 0; } + +void GPUChainTracking::WriteReducedClusters() +{ + GPUTPCCompression& Compressor = processors()->tpcCompressor; + mClusterNativeAccessReduced = std::make_unique(); + uint32_t nOutput = 0; + for (uint32_t iSec = 0; iSec < GPUCA_NSECTORS; iSec++) { + for (uint32_t iRow = 0; iRow < GPUCA_ROW_COUNT; iRow++) { + mClusterNativeAccessReduced->nClusters[iSec][iRow] = 0; + for (uint32_t i = 0; i < mIOPtrs.clustersNative->nClusters[iSec][iRow]; i++) { + mClusterNativeAccessReduced->nClusters[iSec][iRow] += !Compressor.rejectCluster(mIOPtrs.clustersNative->clusterOffset[iSec][iRow] + i, param(), mIOPtrs); + } + nOutput += mClusterNativeAccessReduced->nClusters[iSec][iRow]; + } + } + + GPUOutputControl* clOutput = mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clustersNative)]; + if (!clOutput || !clOutput->allocator) { + throw std::runtime_error("No output allocator for clusterNative available"); + } + auto* clBuffer = (ClusterNative*)clOutput->allocator(nOutput * sizeof(ClusterNative)); + mClusterNativeAccessReduced->clustersLinear = clBuffer; + mClusterNativeAccessReduced->setOffsetPtrs(); + + std::pair labelBuffer; + if (mIOPtrs.clustersNative->clustersMCTruth) { + GPUOutputControl* labelOutput = mSubOutputControls[GPUTrackingOutputs::getIndex(&GPUTrackingOutputs::clusterLabels)]; + if (!labelOutput || !labelOutput->allocator) { + throw std::runtime_error("No output allocator for clusterNative labels available"); + } + ClusterNativeAccess::ConstMCLabelContainerViewWithBuffer* labelContainer = reinterpret_cast(labelOutput->allocator(0)); + labelBuffer = {&labelContainer->first, &labelContainer->second}; + } + + nOutput = 0; + o2::dataformats::MCLabelContainer tmpContainer; + for (uint32_t i = 0; i < mIOPtrs.clustersNative->nClustersTotal; i++) { + if (!Compressor.rejectCluster(i, param(), mIOPtrs)) { + if (mIOPtrs.clustersNative->clustersMCTruth) { + for (const auto& element : mIOPtrs.clustersNative->clustersMCTruth->getLabels(i)) { + tmpContainer.addElement(nOutput, element); + } + } + clBuffer[nOutput++] = mIOPtrs.clustersNative->clustersLinear[i]; + } + } + mIOPtrs.clustersNativeReduced = mClusterNativeAccessReduced.get(); + if (mIOPtrs.clustersNative->clustersMCTruth) { + tmpContainer.flatten_to(*labelBuffer.first); + *labelBuffer.second = *labelBuffer.first; + mClusterNativeAccessReduced->clustersMCTruth = labelBuffer.second; + } +} diff --git a/GPU/GPUTracking/Global/GPUChainTrackingMerger.cxx b/GPU/GPUTracking/Global/GPUChainTrackingMerger.cxx index a9d4304d77c83..5ab4b963d7330 100644 --- a/GPU/GPUTracking/Global/GPUChainTrackingMerger.cxx +++ b/GPU/GPUTracking/Global/GPUChainTrackingMerger.cxx @@ -293,7 +293,9 @@ int32_t GPUChainTracking::RunTPCTrackingMerger(bool synchronizeOutput) mRec->PushNonPersistentMemory(qStr2Tag("TPCMERG2")); AllocateRegisteredMemory(Merger.MemoryResOutputO2Scratch()); WriteToConstantMemory(RecoStep::TPCMerging, (char*)&processors()->tpcMerger - (char*)processors(), &MergerShadow, sizeof(MergerShadow), 0); - runKernel(GetGridAuto(0, deviceType)); + if (!GetProcessingSettings().tpcWriteClustersAfterRejection) { + runKernel(GetGridAuto(0, deviceType)); + } TransferMemoryResourceLinkToHost(RecoStep::TPCMerging, Merger.MemoryResMemory(), 0, &mEvents->single); runKernel(GetGridAuto(0, deviceType)); mRec->ReturnVolatileDeviceMemory(); diff --git a/GPU/Workflow/include/GPUWorkflow/GPUWorkflowSpec.h b/GPU/Workflow/include/GPUWorkflow/GPUWorkflowSpec.h index ad424cc226499..4f62f07593bff 100644 --- a/GPU/Workflow/include/GPUWorkflow/GPUWorkflowSpec.h +++ b/GPU/Workflow/include/GPUWorkflow/GPUWorkflowSpec.h @@ -117,7 +117,7 @@ class GPURecoWorkflowSpec : public o2::framework::Task bool zsDecoder = false; bool zsOnTheFly = false; bool outputTracks = false; - bool outputCompClusters = false; + bool outputCompClustersRoot = false; bool outputCompClustersFlat = false; bool outputCAClusters = false; bool outputQA = false; @@ -134,6 +134,7 @@ class GPURecoWorkflowSpec : public o2::framework::Task bool itsOverrBeamEst = false; bool tpcTriggerHandling = false; bool isITS3 = false; + bool useFilteredOutputSpecs = false; }; GPURecoWorkflowSpec(CompletionPolicyData* policyData, Config const& specconfig, std::vector const& tpcsectors, uint64_t tpcSectorMask, std::shared_ptr& ggr, std::function** gPolicyOrder = nullptr); diff --git a/GPU/Workflow/src/GPUWorkflowSpec.cxx b/GPU/Workflow/src/GPUWorkflowSpec.cxx index 68f7be8fb6330..6c76f13c9bbd0 100644 --- a/GPU/Workflow/src/GPUWorkflowSpec.cxx +++ b/GPU/Workflow/src/GPUWorkflowSpec.cxx @@ -112,8 +112,8 @@ namespace o2::gpu GPURecoWorkflowSpec::GPURecoWorkflowSpec(GPURecoWorkflowSpec::CompletionPolicyData* policyData, Config const& specconfig, std::vector const& tpcsectors, uint64_t tpcSectorMask, std::shared_ptr& ggr, std::function** gPolicyOrder) : o2::framework::Task(), mPolicyData(policyData), mTPCSectorMask(tpcSectorMask), mTPCSectors(tpcsectors), mSpecConfig(specconfig), mGGR(ggr) { - if (mSpecConfig.outputCAClusters && !mSpecConfig.caClusterer && !mSpecConfig.decompressTPC) { - throw std::runtime_error("inconsistent configuration: cluster output is only possible if CA clusterer is activated"); + if (mSpecConfig.outputCAClusters && !mSpecConfig.caClusterer && !mSpecConfig.decompressTPC && !mSpecConfig.useFilteredOutputSpecs) { + throw std::runtime_error("inconsistent configuration: cluster output is only possible if CA clusterer or CompCluster decompression is activated"); } mConfig.reset(new GPUO2InterfaceConfiguration); @@ -236,14 +236,14 @@ void GPURecoWorkflowSpec::init(InitContext& ic) } // Configure the "GPU workflow" i.e. which steps we run on the GPU (or CPU) - if (mSpecConfig.outputTracks || mSpecConfig.outputCompClusters || mSpecConfig.outputCompClustersFlat) { + if (mSpecConfig.outputTracks || mSpecConfig.outputCompClustersRoot || mSpecConfig.outputCompClustersFlat) { mConfig->configWorkflow.steps.set(GPUDataTypes::RecoStep::TPCConversion, GPUDataTypes::RecoStep::TPCSectorTracking, GPUDataTypes::RecoStep::TPCMerging); mConfig->configWorkflow.outputs.set(GPUDataTypes::InOutType::TPCMergedTracks); mConfig->configWorkflow.steps.setBits(GPUDataTypes::RecoStep::TPCdEdx, mConfParam->rundEdx == -1 ? !mConfParam->synchronousProcessing : mConfParam->rundEdx); } - if (mSpecConfig.outputCompClusters || mSpecConfig.outputCompClustersFlat) { + if (mSpecConfig.outputCompClustersRoot || mSpecConfig.outputCompClustersFlat) { mConfig->configWorkflow.steps.setBits(GPUDataTypes::RecoStep::TPCCompression, true); mConfig->configWorkflow.outputs.setBits(GPUDataTypes::InOutType::TPCCompressedClusters, true); } @@ -800,15 +800,15 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) }; setOutputAllocator("COMPCLUSTERSFLAT", mSpecConfig.outputCompClustersFlat, outputRegions.compressedClusters, std::make_tuple(gDataOriginTPC, (DataDescription) "COMPCLUSTERSFLAT", 0)); - setOutputAllocator("CLUSTERNATIVE", mClusterOutputIds.size() > 0, outputRegions.clustersNative, std::make_tuple(gDataOriginTPC, mSpecConfig.sendClustersPerSector ? (DataDescription) "CLUSTERNATIVETMP" : (DataDescription) "CLUSTERNATIVE", NSectors, clusterOutputSectorHeader), sizeof(o2::tpc::ClusterCountIndex)); + setOutputAllocator("CLUSTERNATIVE", mClusterOutputIds.size() > 0, outputRegions.clustersNative, std::make_tuple(gDataOriginTPC, mSpecConfig.sendClustersPerSector ? (DataDescription) "CLUSTERNATIVETMP" : (mSpecConfig.useFilteredOutputSpecs ? (DataDescription) "CLUSTERNATIVEF" : (DataDescription) "CLUSTERNATIVE"), NSectors, clusterOutputSectorHeader), sizeof(o2::tpc::ClusterCountIndex)); setOutputAllocator("CLSHAREDMAP", mSpecConfig.outputSharedClusterMap, outputRegions.sharedClusterMap, std::make_tuple(gDataOriginTPC, (DataDescription) "CLSHAREDMAP", 0)); setOutputAllocator("TPCOCCUPANCYMAP", mSpecConfig.outputSharedClusterMap, outputRegions.tpcOccupancyMap, std::make_tuple(gDataOriginTPC, (DataDescription) "TPCOCCUPANCYMAP", 0)); - setOutputAllocator("TRACKS", mSpecConfig.outputTracks, outputRegions.tpcTracksO2, std::make_tuple(gDataOriginTPC, (DataDescription) "TRACKS", 0)); - setOutputAllocator("CLUSREFS", mSpecConfig.outputTracks, outputRegions.tpcTracksO2ClusRefs, std::make_tuple(gDataOriginTPC, (DataDescription) "CLUSREFS", 0)); - setOutputAllocator("TRACKSMCLBL", mSpecConfig.outputTracks && mSpecConfig.processMC, outputRegions.tpcTracksO2Labels, std::make_tuple(gDataOriginTPC, (DataDescription) "TRACKSMCLBL", 0)); + setOutputAllocator("TRACKS", mSpecConfig.outputTracks, outputRegions.tpcTracksO2, std::make_tuple(gDataOriginTPC, mSpecConfig.useFilteredOutputSpecs ? (DataDescription) "TRACKSF" : (DataDescription) "TRACKS", 0)); + setOutputAllocator("CLUSREFS", mSpecConfig.outputTracks, outputRegions.tpcTracksO2ClusRefs, std::make_tuple(gDataOriginTPC, mSpecConfig.useFilteredOutputSpecs ? (DataDescription) "CLUSREFSF" : (DataDescription) "CLUSREFS", 0)); + setOutputAllocator("TRACKSMCLBL", mSpecConfig.outputTracks && mSpecConfig.processMC, outputRegions.tpcTracksO2Labels, std::make_tuple(gDataOriginTPC, mSpecConfig.useFilteredOutputSpecs ? (DataDescription) "TRACKSMCLBLF" : (DataDescription) "TRACKSMCLBL", 0)); setOutputAllocator("TRIGGERWORDS", mSpecConfig.caClusterer && mConfig->configProcessing.param.tpcTriggerHandling, outputRegions.tpcTriggerWords, std::make_tuple(gDataOriginTPC, (DataDescription) "TRIGGERWORDS", 0)); o2::tpc::ClusterNativeHelper::ConstMCLabelContainerViewWithBuffer clustersMCBuffer; - if (mSpecConfig.processMC && mSpecConfig.caClusterer) { + if (mSpecConfig.processMC && (mSpecConfig.caClusterer || mSpecConfig.useFilteredOutputSpecs)) { outputRegions.clusterLabels.allocator = [&clustersMCBuffer](size_t size) -> void* { return &clustersMCBuffer; }; } @@ -889,6 +889,9 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) // ------------------------------ Varios postprocessing steps ------------------------------ + if (mConfig->configProcessing.tpcWriteClustersAfterRejection) { + ptrs.clustersNative = ptrs.clustersNativeReduced; + } bool createEmptyOutput = false; if (retVal != 0) { if (retVal == 3 && mConfig->configProcessing.ignoreNonFatalGPUErrors) { @@ -963,7 +966,7 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) LOG(info) << "found " << ptrs.nOutputTracksTPCO2 << " track(s)"; } - if (mSpecConfig.outputCompClusters) { + if (mSpecConfig.outputCompClustersRoot) { o2::tpc::CompressedClustersROOT compressedClusters = *ptrs.tpcCompressedClusters; pc.outputs().snapshot(Output{gDataOriginTPC, "COMPCLUSTERS", 0}, ROOTSerialized(compressedClusters)); } @@ -976,7 +979,7 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) if (mTPCSectorMask & (1ul << i)) { DataHeader::SubSpecificationType subspec = i; clusterOutputSectorHeader.sectorBits = (1ul << i); - char* buffer = pc.outputs().make({gDataOriginTPC, "CLUSTERNATIVE", subspec, {clusterOutputSectorHeader}}, accessIndex.nClustersSector[i] * sizeof(*accessIndex.clustersLinear) + sizeof(o2::tpc::ClusterCountIndex)).data(); + char* buffer = pc.outputs().make({gDataOriginTPC, mSpecConfig.useFilteredOutputSpecs ? (DataDescription) "CLUSTERNATIVEF" : (DataDescription) "CLUSTERNATIVE", subspec, {clusterOutputSectorHeader}}, accessIndex.nClustersSector[i] * sizeof(*accessIndex.clustersLinear) + sizeof(o2::tpc::ClusterCountIndex)).data(); o2::tpc::ClusterCountIndex* outIndex = reinterpret_cast(buffer); memset(outIndex, 0, sizeof(*outIndex)); for (int32_t j = 0; j < o2::tpc::constants::MAXGLOBALPADROW; j++) { @@ -993,7 +996,7 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) } ConstMCLabelContainer contflat; cont.flatten_to(contflat); - pc.outputs().snapshot({gDataOriginTPC, "CLNATIVEMCLBL", subspec, {clusterOutputSectorHeader}}, contflat); + pc.outputs().snapshot({gDataOriginTPC, mSpecConfig.useFilteredOutputSpecs ? DataDescription("CLNATIVEMCLBLF") : DataDescription("CLNATIVEMCLBL"), subspec, {clusterOutputSectorHeader}}, contflat); } } } @@ -1003,8 +1006,8 @@ void GPURecoWorkflowSpec::run(ProcessingContext& pc) o2::tpc::ClusterCountIndex* outIndex = reinterpret_cast(outputBuffers[outputRegions.getIndex(outputRegions.clustersNative)].second); static_assert(sizeof(o2::tpc::ClusterCountIndex) == sizeof(accessIndex.nClusters)); memcpy(outIndex, &accessIndex.nClusters[0][0], sizeof(o2::tpc::ClusterCountIndex)); - if (mSpecConfig.processMC && mSpecConfig.caClusterer && accessIndex.clustersMCTruth) { - pc.outputs().snapshot({gDataOriginTPC, "CLNATIVEMCLBL", subspec, {clusterOutputSectorHeader}}, clustersMCBuffer.first); + if (mSpecConfig.processMC && (mSpecConfig.caClusterer || mSpecConfig.useFilteredOutputSpecs) && accessIndex.clustersMCTruth) { + pc.outputs().snapshot({gDataOriginTPC, mSpecConfig.useFilteredOutputSpecs ? DataDescription("CLNATIVEMCLBLF") : DataDescription("CLNATIVEMCLBL"), subspec, {clusterOutputSectorHeader}}, clustersMCBuffer.first); } } } @@ -1250,13 +1253,13 @@ Outputs GPURecoWorkflowSpec::outputs() return outputSpecs; } if (mSpecConfig.outputTracks) { - outputSpecs.emplace_back(gDataOriginTPC, "TRACKS", 0, Lifetime::Timeframe); - outputSpecs.emplace_back(gDataOriginTPC, "CLUSREFS", 0, Lifetime::Timeframe); + outputSpecs.emplace_back(gDataOriginTPC, mSpecConfig.useFilteredOutputSpecs ? (DataDescription) "TRACKSF" : (DataDescription) "TRACKS", 0, Lifetime::Timeframe); + outputSpecs.emplace_back(gDataOriginTPC, mSpecConfig.useFilteredOutputSpecs ? (DataDescription) "CLUSREFSF" : (DataDescription) "CLUSREFS", 0, Lifetime::Timeframe); } if (mSpecConfig.processMC && mSpecConfig.outputTracks) { - outputSpecs.emplace_back(gDataOriginTPC, "TRACKSMCLBL", 0, Lifetime::Timeframe); + outputSpecs.emplace_back(gDataOriginTPC, mSpecConfig.useFilteredOutputSpecs ? (DataDescription) "TRACKSMCLBLF" : (DataDescription) "TRACKSMCLBL", 0, Lifetime::Timeframe); } - if (mSpecConfig.outputCompClusters) { + if (mSpecConfig.outputCompClustersRoot) { outputSpecs.emplace_back(gDataOriginTPC, "COMPCLUSTERS", 0, Lifetime::Timeframe); } if (mSpecConfig.outputCompClustersFlat) { @@ -1269,18 +1272,18 @@ Outputs GPURecoWorkflowSpec::outputs() if (mSpecConfig.sendClustersPerSector) { outputSpecs.emplace_back(gDataOriginTPC, "CLUSTERNATIVETMP", NSectors, Lifetime::Timeframe); // Dummy buffer the TPC tracker writes the inital linear clusters to for (const auto sector : mTPCSectors) { - outputSpecs.emplace_back(gDataOriginTPC, "CLUSTERNATIVE", sector, Lifetime::Timeframe); + outputSpecs.emplace_back(gDataOriginTPC, mSpecConfig.useFilteredOutputSpecs ? (DataDescription) "CLUSTERNATIVEF" : (DataDescription) "CLUSTERNATIVE", sector, Lifetime::Timeframe); } } else { - outputSpecs.emplace_back(gDataOriginTPC, "CLUSTERNATIVE", NSectors, Lifetime::Timeframe); + outputSpecs.emplace_back(gDataOriginTPC, mSpecConfig.useFilteredOutputSpecs ? (DataDescription) "CLUSTERNATIVEF" : (DataDescription) "CLUSTERNATIVE", NSectors, Lifetime::Timeframe); } if (mSpecConfig.processMC) { if (mSpecConfig.sendClustersPerSector) { for (const auto sector : mTPCSectors) { - outputSpecs.emplace_back(gDataOriginTPC, "CLNATIVEMCLBL", sector, Lifetime::Timeframe); + outputSpecs.emplace_back(gDataOriginTPC, mSpecConfig.useFilteredOutputSpecs ? DataDescription("CLNATIVEMCLBLF") : DataDescription("CLNATIVEMCLBL"), sector, Lifetime::Timeframe); } } else { - outputSpecs.emplace_back(gDataOriginTPC, "CLNATIVEMCLBL", NSectors, Lifetime::Timeframe); + outputSpecs.emplace_back(gDataOriginTPC, mSpecConfig.useFilteredOutputSpecs ? DataDescription("CLNATIVEMCLBLF") : DataDescription("CLNATIVEMCLBL"), NSectors, Lifetime::Timeframe); } } } diff --git a/GPU/Workflow/src/gpu-reco-workflow.cxx b/GPU/Workflow/src/gpu-reco-workflow.cxx index 561d537b5c251..e620d013cc925 100644 --- a/GPU/Workflow/src/gpu-reco-workflow.cxx +++ b/GPU/Workflow/src/gpu-reco-workflow.cxx @@ -51,8 +51,8 @@ void customize(std::vector& workflowOptions) { std::vector options{ - {"input-type", VariantType::String, "digits", {"digitizer, digits, zsraw, zsonthefly, clustersnative, compressed-clusters-root, compressed-clusters-ctf, trd-tracklets, its-clusters"}}, - {"output-type", VariantType::String, "tracks", {"clustersnative, tracks, compressed-clusters-ctf, qa, no-shared-cluster-map, send-clusters-per-sector, trd-tracks, error-qa, tpc-triggers, its-tracks"}}, + {"input-type", VariantType::String, "digits", {"digits, zsraw, zsonthefly, clusters, compressed-clusters-root, compressed-clusters-flat, trd-tracklets, its-clusters, its-mean-vertex"}}, + {"output-type", VariantType::String, "tracks", {"cluster, tracks, compressed-clusters-root, compressed-clusters-flat, qa, error-qa, no-shared-cluster-map, send-clusters-per-sector, trd-tracks, tpc-triggers, its-tracks"}}, {"corrmap-lumi-mode", VariantType::Int, 0, {"scaling mode: (default) 0 = static + scale * full; 1 = full + scale * derivative"}}, {"disable-root-input", VariantType::Bool, true, {"disable root-files input reader"}}, {"disable-mc", VariantType::Bool, false, {"disable sending of MC information"}}, @@ -62,6 +62,7 @@ void customize(std::vector& workflowOptions) {"enableDoublePipeline", VariantType::Bool, false, {"enable GPU double pipeline mode"}}, {"tpc-deadMap-sources", VariantType::Int, -1, {"Sources to consider for TPC dead channel map creation; -1=all, 0=deactivated"}}, {"tpc-mc-time-gain", VariantType::Bool, false, {"use time gain calibration for MC (true) or for data (false)"}}, + {"filtered-output-specs", VariantType::Bool, false, {"use filtered output specs for output DataDescriptions"}}, }; o2::tpc::CorrectionMapsLoader::addGlobalOptions(options); o2::raw::HBFUtilsInitializer::addConfigOption(options); @@ -98,7 +99,7 @@ enum struct ioType { Digits, ZSRaw, ZSRawOTF, CompClustROOT, - CompClustCTF, + CompClustFlat, Tracks, QA, ErrorQA, @@ -117,7 +118,7 @@ static const std::unordered_map InputMap{ {"zsraw", ioType::ZSRaw}, {"zsonthefly", ioType::ZSRawOTF}, {"compressed-clusters-root", ioType::CompClustROOT}, - {"compressed-clusters-ctf", ioType::CompClustCTF}, + {"compressed-clusters-flat", ioType::CompClustFlat}, {"trd-tracklets", ioType::TRDTracklets}, {"its-clusters", ioType::ITSClusters}, {"its-mean-vertex", ioType::MeanVertex}, @@ -126,7 +127,8 @@ static const std::unordered_map InputMap{ static const std::unordered_map OutputMap{ {"clusters", ioType::Clusters}, {"tracks", ioType::Tracks}, - {"compressed-clusters-ctf", ioType::CompClustCTF}, + {"compressed-clusters-flat", ioType::CompClustFlat}, + {"compressed-clusters-root", ioType::CompClustROOT}, {"qa", ioType::QA}, {"error-qa", ioType::ErrorQA}, {"no-shared-cluster-map", ioType::NoSharedMap}, @@ -167,13 +169,13 @@ WorkflowSpec defineDataProcessing(ConfigContext const& cfgc) cfg.enableMShape = sclOpt.enableMShapeCorrection; cfg.enableCTPLumi = sclOpt.requestCTPLumi; cfg.decompressTPCFromROOT = isEnabled(inputTypes, ioType::CompClustROOT); - cfg.decompressTPC = isEnabled(inputTypes, ioType::CompClustCTF) || cfg.decompressTPCFromROOT; + cfg.decompressTPC = isEnabled(inputTypes, ioType::CompClustFlat) || cfg.decompressTPCFromROOT; cfg.zsDecoder = isEnabled(inputTypes, ioType::ZSRaw); cfg.zsOnTheFly = isEnabled(inputTypes, ioType::ZSRawOTF); cfg.caClusterer = cfg.zsDecoder || cfg.zsOnTheFly || isEnabled(inputTypes, ioType::Digits); cfg.outputTracks = isEnabled(outputTypes, ioType::Tracks); - cfg.outputCompClusters = isEnabled(outputTypes, ioType::CompClustROOT); - cfg.outputCompClustersFlat = isEnabled(outputTypes, ioType::CompClustCTF); + cfg.outputCompClustersRoot = isEnabled(outputTypes, ioType::CompClustROOT); + cfg.outputCompClustersFlat = isEnabled(outputTypes, ioType::CompClustFlat); cfg.outputCAClusters = isEnabled(outputTypes, ioType::Clusters); cfg.outputQA = isEnabled(outputTypes, ioType::QA); cfg.outputErrorQA = isEnabled(outputTypes, ioType::ErrorQA); @@ -189,6 +191,7 @@ WorkflowSpec defineDataProcessing(ConfigContext const& cfgc) cfg.tpcUseMCTimeGain = cfgc.options().get("tpc-mc-time-gain"); cfg.runITSTracking = isEnabled(outputTypes, ioType::ITSTracks); cfg.itsOverrBeamEst = isEnabled(inputTypes, ioType::MeanVertex); + cfg.useFilteredOutputSpecs = cfgc.options().get("filtered-output-specs"); Inputs ggInputs; auto ggRequest = std::make_shared(false, true, false, true, true, o2::base::GRPGeomRequest::Aligned, ggInputs, true); diff --git a/prodtests/full-system-test/dpl-workflow.sh b/prodtests/full-system-test/dpl-workflow.sh index 9fc6ce5507168..2f0e761366e18 100755 --- a/prodtests/full-system-test/dpl-workflow.sh +++ b/prodtests/full-system-test/dpl-workflow.sh @@ -205,7 +205,7 @@ workflow_has_parameter CALIB && [[ $CALIB_TPC_VDRIFTTGL == 1 ]] && SEND_ITSTPC_D PVERTEXING_CONFIG_KEY+="${ITSMFT_STROBES};" -has_processing_step ENTROPY_ENCODER && has_detector_ctf TPC && GPU_OUTPUT+=",compressed-clusters-ctf" +has_processing_step ENTROPY_ENCODER && has_detector_ctf TPC && GPU_OUTPUT+=",compressed-clusters-flat" if [[ $SYNCMODE == 1 ]] && workflow_has_parameter QC && has_detector_qc TPC; then GPU_OUTPUT+=",qa,error-qa" @@ -443,7 +443,7 @@ fi if [[ -n $INPUT_DETECTOR_LIST ]]; then if [[ $CTFINPUT == 1 ]]; then - GPU_INPUT=compressed-clusters-ctf + GPU_INPUT=compressed-clusters-flat TOF_INPUT=digits CTFName=`ls -t $RAWINPUTDIR/o2_ctf_*.root 2> /dev/null | head -n1` [[ -z $CTFName && $WORKFLOWMODE == "print" ]] && CTFName='$CTFName' @@ -652,7 +652,7 @@ if has_processing_step ENTROPY_ENCODER && [[ -n "$WORKFLOW_DETECTORS_CTF" ]] && has_detector_ctf TOF && add_W o2-tof-entropy-encoder-workflow "$RANS_OPT --mem-factor ${TOF_ENC_MEMFACT:-1.5} --pipeline $(get_N tof-entropy-encoder TOF CTF 1)" has_detector_ctf ITS && add_W o2-itsmft-entropy-encoder-workflow "$RANS_OPT --mem-factor ${ITS_ENC_MEMFACT:-1.5} --pipeline $(get_N its-entropy-encoder ITS CTF 1)" has_detector_ctf TRD && add_W o2-trd-entropy-encoder-workflow "$RANS_OPT --mem-factor ${TRD_ENC_MEMFACT:-1.5} --pipeline $(get_N trd-entropy-encoder TRD CTF 1 TRDENT)" - has_detector_ctf TPC && add_W o2-tpc-reco-workflow " $RANS_OPT --mem-factor ${TPC_ENC_MEMFACT:-1.} --input-type compressed-clusters-flat --output-type encoded-clusters,disable-writer --pipeline $(get_N tpc-entropy-encoder TPC CTF 1 TPCENT)" + has_detector_ctf TPC && add_W o2-tpc-reco-workflow " $RANS_OPT --mem-factor ${TPC_ENC_MEMFACT:-1.} --input-type compressed-clusters-flat-for-encode --output-type encoded-clusters,disable-writer --pipeline $(get_N tpc-entropy-encoder TPC CTF 1 TPCENT)" has_detector_ctf CTP && add_W o2-ctp-entropy-encoder-workflow "$RANS_OPT --mem-factor ${CTP_ENC_MEMFACT:-1.5} --pipeline $(get_N its-entropy-encoder CTP CTF 1)" if [[ $CREATECTFDICT == 1 && $WORKFLOWMODE == "run" ]] ; then