diff --git a/.readthedocs.yml b/.readthedocs.yaml
similarity index 62%
rename from .readthedocs.yml
rename to .readthedocs.yaml
index b22a1bc2c69..dcda1939df0 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yaml
@@ -8,8 +8,13 @@ version: 2
mkdocs:
configuration: mkdocs.yml
+# Set the version of Python and other tools you might need
+build:
+ os: ubuntu-22.04
+ tools:
+ python: "3.8"
+
# Set the version of Python and requirements required to build your docs
python:
- version: 3.8
- install:
- - requirements: docs/requirements.txt
+ install:
+ - requirements: docs/requirements.txt
diff --git a/bin/mkdocs_skeleton.yml b/bin/mkdocs_skeleton.yml
index 84740a148de..30ff1447093 100644
--- a/bin/mkdocs_skeleton.yml
+++ b/bin/mkdocs_skeleton.yml
@@ -122,12 +122,15 @@ nav:
- Table of Contents:
- Overview:
- README.md
+ - Release Notes:
+ - release_notes.md
- Toolset:
- daq-assettools:
- daq-buildtools:
- daq-cmake:
- daq-release:
- - daq-systemtest:
+ - daqpytools:
+ - daqsystemtest:
- integrationtest:
- styleguide:
- Core:
@@ -137,34 +140,45 @@ nav:
- logging:
- utilities:
- Readout:
+ - asiolibs:
+ - ctbmodules:
- daqdataformats:
+ - datahandlinglibs:
- detchannelmaps:
- detdataformats:
- - dtpcontrols:
- - dtpctrllibs:
+ - dpdklibs:
+ - fddetdataformats:
- fdreadoutlibs:
+ - fdreadoutmodules:
- flxlibs:
- - lbrulibs:
- - readoutlibs:
- - readoutmodules:
+ - hermesmodules:
+ - snbmodules:
+ - tdemodules:
- wibmod:
- Control:
+ - appmodel:
+ - confmodel:
+ - connectivityserver:
- daqconf:
- - nanorc:
+ - dbe:
+ - drunc:
- restcmd:
+ - runconftools:
+ - runconf-ui:
- Dataflow (logical):
- dfmessages:
- dfmodules:
- hdf5libs:
- timing:
- timinglibs:
+ - tpglibs:
+ - trgtools:
- trigger:
- Dataflow (physical):
- iomanager:
- ipm:
- serialization:
- Monitoring:
- - dqm:
- erskafka:
- kafkaopmon:
- opmonlib:
diff --git a/bin/the_final_markdown.sh b/bin/the_final_markdown.sh
index 565d0206c96..721a90d5d62 100755
--- a/bin/the_final_markdown.sh
+++ b/bin/the_final_markdown.sh
@@ -5,7 +5,7 @@ here=$(cd $(dirname $(readlink -f ${BASH_SOURCE})) && pwd)
# Reverse alphabetical order
# for package development themselves
-package_list="wibmod utilities trigger timinglibs timing styleguide serialization restcmd readoutmodules readoutlibs rcif rawdatautils opmonlib ndreadoutlibs nanorc kafkaopmon logging listrev lbrulibs hdf5libs ipm iomanager integrationtest flxlibs fdreadoutlibs erskafka ers dtpctrllibs dtpcontrols dqm dfmodules dfmessages detdataformats detchannelmaps daqdataformats daqconf daq-systemtest daq-release daq-cmake daq-buildtools daq-assettools cmdlib appfwk"
+package_list="wibmod utilities trigger trgtools tpglibs timinglibs timing tdemodules styleguide snbmodules serialization runconf-ui runconftools restcmd rawdatautils opmonlib kafkaopmon logging listrev hermesmodules hdf5libs ipm iomanager integrationtest flxlibs fdreadoutmodules fdreadoutlibs fddetdataformats erskafka ers drunc dpdklibs dfmodules dfmessages detdataformats detchannelmaps dbe datahandlinglibs daqdataformats daqconf daqsystemtest daq-release daqpytools daq-cmake daq-buildtools daq-assettools ctbmodules connectivityserver confmodel cmdlib asiolibs appfwk appmodel"
mkdocs_yml="$here/../mkdocs.yml"
@@ -115,11 +115,11 @@ for package in $package_list ; do
# themselves being updated
if [[ "$package" =~ "daq-buildtools" ]]; then
- git checkout dunedaq-v3.2.2_for_docs
+ git checkout fddaq-v5.5.0_for_docs
elif [[ "$package" =~ "daq-cmake" ]]; then
- git checkout dunedaq-v3.2.0_for_docs
+ git checkout v3.2.1
else
- git checkout develop
+ git checkout coredaq-v5.5.0 || git checkout fddaq-v5.5.0
fi
echo $tmpdir/$package
@@ -151,9 +151,9 @@ for package in $package_list ; do
fi
for mdfile in $( find . -mindepth 2 -type f -not -type l -not -regex ".*\.git.*" -not -regex "\./docs.*" -name "*.md" ); do
- reldir=$( echo $mdfile | sed -r 's!(.*)/.*!\1!' )
- mkdir -p $packages_dir/$package/$reldir
- cp -p $mdfile $packages_dir/$package/$reldir
+ reldir=$( echo "$mdfile" | sed -r 's!(.*)/.*!\1!' )
+ mkdir -p "$packages_dir/$package/$reldir"
+ cp -p "$mdfile" "$packages_dir/$package/$reldir"
if [[ "$?" != "0" ]]; then
echo "There was a problem copying $mdfile to $packages_dir/$package/$reldir in $PWD; exiting..." >&2
exit 3
diff --git a/docs/README.md b/docs/README.md
index 8e247f586df..1a12bd0db49 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -18,10 +18,12 @@ To learn about the C++ coding guidelines which DUNE DAQ package developers shoul
To learn how to run integration tests within our framework, go [here](packages/integrationtest/README.md)
-To learn about how to run even more comprehensive tests (particularly relevant during a DUNE DAQ release period), read about the [daq-systemtest package](packages/daq-systemtest/README.md)
+To learn about how to run even more comprehensive tests (particularly relevant during a DUNE DAQ release period), read about the [daqsystemtest package](packages/daqsystemtest/README.md)
To learn how to keep track of assets (files, etc. which affect the behavior of the DAQ but aren't part of a standard software package), go [here](packages/daq-assettools/README.md)
+Tools to simplify DAQ development in Python can be found in [daqpytools](packages/daqpytools/README.md)
+
--------------
For the other packages, please click on one of the links below. To learn how to edit a package's documentation, click [here](editing_package_documentation.md). Packages marked with an asterix don't yet have any official documentation; please see their Issues page to remedy this.
@@ -40,40 +42,56 @@ For the other packages, please click on one of the links below. To learn how to
### Readout
+[asiolibs](packages/asiolibs/README.md) _Boost.Asio-based socket reader plugin for low-bandwidth devices_
+
+[ctbmodules](packages/ctbmodules/README.md) _Modules for the Central Trigger Board hardware_
+
[daqdataformats](packages/daqdataformats/README.md) _DAQ data formats_
+[datahandlinglibs](packages/datahandlinglibs/README.md) _Tools for constructing readout-focused processes_
+
[detchannelmaps](packages/detchannelmaps/README.md) _Channel maps for the detectors_
-[detdataformats](packages/detdataformats/README.md) _Data formats for the detectors_
+[detdataformats](packages/detdataformats/README.md) _General-purpose data formats and related tools_
-[dtpctrllibs](packages/dtpctrllibs/README.md) _DAQ modules for controlling Trigger Primitive generation firmware_
+[dpdklibs](packages/dpdklibs/README.md) _Data Plane Development Kit software and utilities_
-[dtpcontrols](packages/dtpcontrols/README.md) _Python tools for control of the Trigger Primitive firmware_
+[fddetdataformats](packages/fddetdataformats/README.md) _Bitfields of far detector raw data and related tools_
[fdreadoutlibs](packages/fdreadoutlibs/README.md) _Classes for working with far detector data (WIB, SSP, etc.)_
-[flxlibs](packages/flxlibs/README.md) _DAQModules, utilities, and scripts for Upstream FELIX Readout Software_
+[fdreadoutmodules](packages/fdreadoutmodules/README.md) _Readout plugin collection for the far detector_
-[lbrulibs](packages/lbrulibs/README.md) _DAQModules, utilities, and scripts for DUNE-ND Upstream DAQ Low Bandwidth Readout Unit_
+[flxlibs](packages/flxlibs/README.md) _DAQModules, utilities, and scripts for Upstream FELIX Readout Software_
-[ndreadoutlibs](packages/ndreadoutlibs/README.md) _Classes for working with near detector data (e.g. PACMAN)_
+[hermesmodules](packages/hermesmodules/README.md) _Modules for the Hermes core_
-[readoutlibs](packages/readoutlibs/README.md) _Base classes for construction of readout-related DAQModules_
+[snbmodules](packages/snbmodules/README.md) _Modules for supernova detection_
-[readoutmodules](packages/readoutmodules/README.md) _DAQModules for constructing readout-focused processes_
+[tdemodules](packages/tdemodules/README.md) _Modules for controlling the Top Drift Electronics' Advanced Mezzanine Cards (AMC)_
[wibmod](packages/wibmod/README.md) _WIB configuration and monitoring interface_
### Control
+[appmodel](packages/appmodel/README.md) _Schema for DAQ configuration of readout, dataflow and trigger applications_
+
+[confmodel](packages/confmodel/README.md) _A core schema for DAQ configuration_
+
+[connectivityserver](packages/connectivityserver/README.md) _Serves connection information to DAQ applications_
+
[daqconf](packages/daqconf/README.md) _application to read out Felix data and store it in HDF5 files on disk_
-[nanorc](packages/nanorc/README.md) _Not ANOther Run Control_
+[dbe](packages/dbe/README.md) _A GUI interface for the OKS-based configuration design_
-[* rcif](packages/rcif/README.md) _run control related_
+[drunc](packages/drunc/README.md) _Run control infrastructure for a distributed DAQ system_
[restcmd](packages/restcmd/README.md) _HTTP REST backend based CommandFacility_
+[runconftools](packages/runconftools/README.md) _Constructs configurations from a base of ehn1 configurations_
+
+[runconf-ui](packages/runconf-ui/README.md) _An interface which lets shifters enable/disable elements of the detector_
+
### Dataflow (logical)
[dfmessages](packages/dfmessages/README.md) _dataflow messages_
@@ -86,6 +104,10 @@ For the other packages, please click on one of the links below. To learn how to
[timinglibs](packages/timinglibs/README.md) _timing control and monitoring_
+[tpglibs](packages/tpglibs/README.md) _Processes raw waveforms and returns the generated trigger primitives_
+
+[trgtools](packages/trgtools/README.md) _trigger emulation and analysis tools_
+
[trigger](packages/trigger/README.md) _modules that make up the DUNE FD DAQ trigger system_
### Dataflow (physical)
@@ -98,8 +120,6 @@ For the other packages, please click on one of the links below. To learn how to
### Monitoring
-[dqm](packages/dqm/README.md) _Data Quality Monitor_
-
[erskafka](packages/erskafka/README.md) _the erskafka plugin_
[kafkaopmon](packages/kafkaopmon/README.md) _converts JSON objects into [Kafka](https://en.wikipedia.org/wiki/Apache_Kafka) messages_
@@ -111,8 +131,3 @@ For the other packages, please click on one of the links below. To learn how to
### Educational
[listrev](packages/listrev/README.md) _educational example of DAQModules for new developers_
-
-------
-
-_Mar-11-2021: For software coordinators only:_ [how to make edits to this webpage](how_to_make_edits.md)
-
diff --git a/docs/how_to_make_edits.md b/docs/how_to_make_edits.md
index 09d6294d42a..ba87d27bd66 100644
--- a/docs/how_to_make_edits.md
+++ b/docs/how_to_make_edits.md
@@ -1,5 +1,5 @@
-_JCF, Jul-1-2021: The following is currently intended just for members of the Software Coordination group_
+_JCF, Feb-28-2024: The following is currently intended just for members of the Software Coordination group_
# How the official documentation works
diff --git a/docs/packages/appfwk/ActionPlans.md b/docs/packages/appfwk/ActionPlans.md
new file mode 100755
index 00000000000..9154529678d
--- /dev/null
+++ b/docs/packages/appfwk/ActionPlans.md
@@ -0,0 +1,169 @@
+# Action Plans
+
+## Overview
+
+An ActionPlan defines a series of steps consisting of groups of modules, which are executed in response to a command from CCM. Groups of modules are defined either by module class or by module instances, and the execution of each step is in parallel by default, but can be changed to serial execution if needed. Each ActionPlan is associated with a FSMCommand object, and is run by the appliction when it recieves the corresponding command. If a command is received and no ActionPlan is defined, the application currently runs a "dummy" ActionPlan consisting of a single step where modules with the command registered are all run in parallel.
+
+Action Plans allow for much finer-grained control over the execution of a command within an application, allowing for modules that have dependencies on one another to execute their commands correctly. It also introduces parallelization of command execution within each step, which helps with certain time-consuming module commands (e.g. configuring hardware on a link). The current implmentation uses std::future objects and a catch-all threading pattern to ensure that errors executing steps within an action plan do not lead to program crashes.
+
+## Defining an ActionPlan
+
+ActionPlans are defined in configuration using these objects:
+
+```XML
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+```
+
+
+
+1. ActionPlan relates a set of DAQModule groups to a FSMCommand instance.
+
+
+1. DAQModules can be grouped by type (C++ class) or by Id (module instance reference)
+
+
+1. ActionPlan has a "execution_policy" attribute which sets whether the modules referenced by each step should execute the command in parallel or in series. (Steps are always executed in series, but within each step, modules can receive the command in parallel or again in series.)
+
+ActionPlans are validated by the application to ensure that every module type has registered methods corresponding to the command linked to the ActionPlan, and that only one ActionPlan is linked to the application for a given command. Most DUNE-DAQ applications are SmartDaqApplications, which may generate module instances using predefined rules. This can complicate the usage of DaqModulesGroupById and this mode should be used with caution. Note that FSMCommand objects are usually defined by the CCM and included in a fsm.data.xml OKS database.
+
+### Example test/config/appfwk.data.xml
+
+The DAQModuleManager_test unit test defines several ActionPlans used within the test. For example, the "do_stuff" action:
+
+```XML
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+```
+
+Here, the FSMCommand is defined in the file because it is a non-standard command used for the purposes of this unit test. Normally, the FSMCommand instance would not be defined in the same location as the ActionPlan that uses it. The Action plan defines a single step where all modules in the "dummymodules_type_group" receive the "stuff" command in parallel (since parallel step execution is the default behavior when execution_policy is not specified). "dummymodules_type_group" groups all modules of type DummyModule.
+
+Generally, FSMCommands are defined by the CCM group and the basic commands can be found in [fsm.data.xml](https://github.com/DUNE-DAQ/daqsystemtest/blob/develop/config/daqsystemtest/fsm.data.xml), but ActionPlan instances and DaqModulesGroupByType/ID instances are user-defined (examples are in moduleconfs.data.xml, described below).
+
+The user-defined ActionPlans (such as "stuff" above) are associated with the Application instance as follows:
+
+```XML
+
+
+
+
+
+
+
+
+
+
+
+```
+
+## Notes
+
+
+* DAQModules register their action methods with the DAQModuleManager, and this information is used in validation of the ActionPlans (e.g. that every DAQModule that has registered a command is called by the corresponding ActionPlan).
+
+* ActionPlans refer to FSMCommand objects as defined by the CCM. New FSMCommands may be added, but should be integrated into the state machine in consultation with CCM experts.
+
+* Within each step of an ActionPlan, whether executing in series or in parallel, the modules will be called in the order in which they are declared to the Application. In series mode, the future has `wait()` called for each module in the step, and in parallel mode, the futures are all started and the results are collected by a loop which calls `wait()` on each individually.
+
+## Further Examples
+
+### https://github.com/DUNE-DAQ/daqsystemtest/blob/develop/config/daqsystemtest/moduleconfs.data.xml#L134
+
+```XML
+
+
+
+
+
+
+
+
+
+
+```
+
+This ActionPlan consists of four steps, with each step sending the command to the modules matched by the group in parallel. Therefore:
+
+
+1. the FragmentAggregator will run first (there is only one FA per app),
+
+
+1. when the FA is complete, all of the TP Handlers will receive "start" in parallel
+
+
+1. once they are all complete, then the DataLinkHandlers
+
+
+1. finally the FDFakeReaderModules
+
+### https://github.com/DUNE-DAQ/daqsystemtest/blob/develop/config/daqsystemtest/moduleconfs.data.xml#L199
+
+```XML
+
+
+
+
+
+
+
+
+```
+
+This ActionPlan, by contrast, uses the modules-in-series execution policy, so for an application with "ta-handler-01", "ta-handler-02", and "ta-subscriber-01", the ActionPlan will result in:
+
+
+1. (Step 1A) "ta-handler-01" executing "start"
+
+
+1. (Step 1B) once that is complete, "ta-handler-02" will execute "start"
+
+
+1. (Step 2) finally "ta-subscriber-01" will receive "start"
+
+
+-----
+
+
+_Last git commit to the markdown source of this page:_
+
+
+_Author: Eric Flumerfelt_
+
+_Date: Mon Jul 21 15:31:54 2025 -0500_
+
+_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/appfwk/issues](https://github.com/DUNE-DAQ/appfwk/issues)_
+
diff --git a/docs/packages/appfwk/Daq-Application.md b/docs/packages/appfwk/Daq-Application.md
index 23189dcc80e..6068450a78f 100755
--- a/docs/packages/appfwk/Daq-Application.md
+++ b/docs/packages/appfwk/Daq-Application.md
@@ -24,7 +24,7 @@ daq_application known arguments (additional arguments will be stored and passed
# Usage Notes
-As of v2.6.0, `daq_application` will seldom have to be called directly, instead the preferred method of starting _dunedaq_ applications will be to use one of the Run Control products, such as `nanorc`.
+As of v2.6.0, `daq_application` will seldom have to be called directly, instead the preferred method of starting _dunedaq_ applications will be to use one of the Run Control products, such as `nanorc` or `drunc`.
-----
@@ -33,9 +33,9 @@ As of v2.6.0, `daq_application` will seldom have to be called directly, instead
_Last git commit to the markdown source of this page:_
-_Author: glehmannmiotto_
+_Author: Marco Roda_
-_Date: Fri Jul 15 15:54:07 2022 +0200_
+_Date: Thu Jul 4 10:47:11 2024 +0200_
_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/appfwk/issues](https://github.com/DUNE-DAQ/appfwk/issues)_
diff --git a/docs/packages/appfwk/README.md b/docs/packages/appfwk/README.md
index 755a2a72d8e..62dfd567583 100644
--- a/docs/packages/appfwk/README.md
+++ b/docs/packages/appfwk/README.md
@@ -12,7 +12,7 @@ appfwk consists of a generic DAQ application (`daq_application`) which can be co
appfwk provides the scaffolding on which all DUNE DAQ software processes can be developed. The running DAQ typically consists of multiple distinct processes assigned various tasks: filtering data, requesting it, saving it to storage, etc. There are many different types of process, some of which may not even have been conceived of yet, and it would be cumbersome to recompile multiple different types of process across many packages every time one wanted to change the behavior of the DAQ. To solve this problem, the approach that's been taken is to have a standard DUNE DAQ software process [`daq_application`](Daq-Application.md) which can be configured at runtime by Run Control in order to perform some particular function in the DAQ.
-`daq_application` is designed as a flexible container of "DAQ modules" (units of code designed to perform specific tasks) and "connections" (designed to move data between DAQ modules and DAQ applications). These specific tasks can vary widely; they include [producing fake data for testing purposes](https://github.com/DUNE-DAQ/readoutmodules/blob/develop/plugins/FakeCardReader.hpp), [putting data into long term storage](https://github.com/DUNE-DAQ/dfmodules/blob/develop/plugins/DataWriter.hpp), and so forth. DAQ modules will typically execute user-defined functions when receiving standard transitions from Run Control: "conf", "start", etc. appfwk provides the `DAQModule` base class which users should derive their DAQ module class from in their own packages.
+`daq_application` is designed as a flexible container of "DAQ modules" (units of code designed to perform specific tasks) and "connections" (designed to move data between DAQ modules that can be in the same or in different DAQ applications). These specific tasks can vary widely; they include [producing fake data for testing purposes](https://github.com/DUNE-DAQ/readoutmodules/blob/develop/plugins/FakeCardReader.hpp), [putting data into long term storage](https://github.com/DUNE-DAQ/dfmodules/blob/develop/plugins/DataWriterModule.hpp), and so forth. DAQ modules will typically execute user-defined functions when receiving standard transitions from Run Control: "conf", "start", etc. appfwk provides the `DAQModule` base class which users should derive their DAQ module class from in their own packages. Read more about ActionPlans [here](ActionPlans.md).

@@ -25,16 +25,21 @@ In general, in a full blown DAQ system users won't be running `daq_application`
### Basics of the `DAQModule` interface
+
+ **_Be aware that much of the boilerplate code described below can be automatically generated using the [create_dunedaq_package script](https://dune-daq-sw.readthedocs.io/en/latest/packages/daq-cmake/#the-create_dunedaq_package-script)_**
+
When implenting a DAQ module, you'll want to `#include` the [`DAQModule.hpp` header](https://github.com/DUNE-DAQ/appfwk/blob/develop/include/appfwk/DAQModule.hpp), and derive your DAQ module from the `DAQModule` base class. The most important parts of `DAQModule.hpp` to an implementor of a DAQ module are the following:
-* `DEFINE_DUNE_DAQ_MODULE`: This is a macro which should be "called" at the bottom of your DAQ module's source file with an "argument" of the form `dunedaq::::`. E.g., `DEFINE_DUNE_DAQ_MODULE(dunedaq::dfmodules::DataWriter)` [at the bottom of the dfmodules package's DataWriter module's source file](https://github.com/DUNE-DAQ/dfmodules/blob/develop/plugins/DataWriter.cpp)
+* `DEFINE_DUNE_DAQ_MODULE`: This is a macro which should be "called" at the bottom of your DAQ module's source file with an "argument" of the form `dunedaq::::`. E.g., `DEFINE_DUNE_DAQ_MODULE(dunedaq::dfmodules::DataWriterModule)` [at the bottom of the dfmodules package's DataWriterModule module's source file](https://github.com/DUNE-DAQ/dfmodules/blob/develop/plugins/DataWriterModule.cpp)
+
+* `register_command`: takes as arguments the name of a command and a function which should execute when the command is received. The function is user defined, and takes an instance of `DAQModule::data_t` as argument. `DAQModule::data_t` is aliased to the `nlohmann::json` type and can thus be thought of as a blob of JSON-structured data. While in principle any arbitary name could be associated with any function of arbitrary behavior to create a command, in practice implementors of DAQ modules define commands associated with the DAQ's state machine: "_conf_", "_start_", "_stop_", "_scrap_". Not all DAQ modules necessarily need to perform an action for each of those transitions; e.g., a module may only be designed to do something during configuration, and not change as the DAQ enters the running state ("_start_") or exits it ("_stop_"). It also supports an optional third argument which lists the states that the application must be in for the command to be valid. [!!!Control People here should make comments and see if this is correct, if it's sitll the plan, etc]
-* `register_command`: takes as arguments the name of a command and a function which should execute when the command is received. The function is user defined, and takes an instance of `DAQModule::data_t` as argument. `DAQModule::data_t` is aliased to the `nlohmann::json` type and can thus be thought of as a blob of JSON-structured data. While in principle any arbitary name could be associated with any function of arbitrary behavior to create a command, in practice implementors of DAQ modules define commands associated with the DAQ's state machine: "_conf_", "_start_", "_stop_", "_scrap_". Not all DAQ modules necessarily need to perform an action for each of those transitions; e.g., a module may only be designed to do something during configuration, and not change as the DAQ enters the running state ("_start_") or exits it ("_stop_"). It also supports an optional third argument which lists the states that the application must be in for the command to be valid.
+ * **register_command must be called in the DAQModule Constructor!**
-* `init`: this pure virtual function's implementation is meant to create objects which are persistent for the lifetime of the DAQ module. It takes as an argument the type `DAQModule::data_t`. Typically it will use parameters from this JSON argument to define the persistent objects. For persistent objects of types which don't have an efficient copy assigment operator, a common technique is to declare as member data a `unique_ptr` to the type of interest and then, in `init`, to allocate the desired object on the heap using values from the JSON and point the `unique_ptr` member to it. Connection objects are commonly allocated in `init`; they'll be described in more detail later in this document.
+* `init`: this pure virtual function's implementation is meant to create objects which are persistent for the lifetime of the DAQ module. It also has the unique role of connecting the DAQModel with its own configuration object, see later the init section for more details. It takes as an argument the type `std::shared_ptr`. Typically, `init` will query the `ConfigurationManager`, extract the configuration object specifically defined for this `DAQModule` and will store the pointer internally to the class for later usage, when the dedicated commands comes, usually `conf`. Connection, as they are persistent objects, are commonly allocated in `init`; they'll be described in more detail later in this document.
An conceptual example of what this looks like is the following simplified version of a DAQ module implementation.
-```
+```C++
// This file would be called plugins/MyDaqModule.hpp
// Functions would typically be defined in plugins/MyDaqModule.cpp
@@ -49,7 +54,7 @@ class MyDaqModule : public dunedaq::appfwk::DAQModule {
register_command("scrap", &MyDAQModule::do_scrap);
}
- void init(const data_t& init_data) override;
+ void init(std::shared_ptr) override;
private:
@@ -57,6 +62,8 @@ class MyDaqModule : public dunedaq::appfwk::DAQModule {
void do_start(const data_t& start_data);
void do_stop(const data_t& stop_data);
void do_scrap(const data_t& scrap_data);
+
+ const MyDAQModuleConf * m_cfg = nullptr;
};
```
@@ -78,37 +85,54 @@ A word needs to be said about the concept of a "unique name" here. Looking in [`
### The `init` function
-Already touched upon above, this function takes a `data_t` instance (i.e., JSON) to tell it what objects to make persistent over the DAQ module's lifetime. A very common example of this is the construction of the `iomanager` connections which will pipe data into and out of an instance of the DAQ module. A description of this common use case will illustrate a couple of very important aspects of DAQ module programming.
+Already touched upon above, this function takes a `std::shared_ptr` instance to tell it what objects to make persistent over the DAQ module's lifetime. A very common example of this is the construction of the `iomanager` connections which will pipe data into and out of an instance of the DAQ module. A description of this common use case will illustrate a couple of very important aspects of DAQ module programming.
When a DAQ module writer wants to communicate with other DAQ modules, they use the [`iomanager`](https://dune-daq-sw.readthedocs.io/en/latest/packages/iomanager/#connectionid-connectionref). The `iomanager` Sender and Receiver objects needed by a DAQ Module get built in the call to `init` based on the JSON configuration `init` receives . A definition of `init`, then, can look like the following:
-```
-void MyDaqModule::init(const data_t& init_data) {
- auto ci = appfwk::connection_index(init_data, {"name_of_required_input"});
- m_required_input_ptr = dunedaq::get_iom_receiver(ci["name_of_required_input"]));
+```C++
+void MyDaqModule::init(std::shared_ptr p) {
+ m_cfg = p->get_dal(get_name());
+ if ( !m_cfg ) {
+ throw appfwk::CommandFailed(ERS_HERE, "init", get_name(), "Unable to retrieve configuration object");
+ }
+
+ auto inputs = m_cfg->get_inputs();
+ for (auto con : inputs) {
+ if (con->get_data_type() == datatype_to_string ()) {
+ m_type1_con = con->UID();
+ }
+ if (con->get_data_type() == datatype_to_string()) {
+ auto iom = iomanager::IOManager::get();
+ m_type2_receiver = iom->get_receiver(con->UID());
+ }
+ }
}
+
```
-In the code above, the call to `connection_index`, defined in [`DAQModuleHelper.cpp`](https://github.com/DUNE-DAQ/appfwk/blob/develop/src/DAQModuleHelper.cpp), returns a map which connects the names of connections with the `ConnectionRef` objects consumed by `IOManager`. It will throw an exception if any provided names don't appear - so in this case, if `name_of_required_input` isn't found in `init_data`, an exception will be thrown. If the name is found, then `m_required_input_ptr`, which here is an `std::shared_ptr_` to a `iomanager::Receiver` of `MyType_t`s, gets pointed to the appropriate `Receiver`. When the DAQ enters the running state, we could have `MyDaqModule` receive `MyType_t` instances from `m_required_input_ptr` for processing.
+In the code above, the configuration object is first extracted and then queried for the possible input connections.
+The information on the data type transmitted in the connection is used to decide what to use it for. The input of `MyType1` is simply used to store the name of the connection for later usage, while the inptu of `MyType2` is used to directly obtain the receiver socket from the `IOManager`.
+Similar operations can be done on the outputs, for example see the [`TRBModule`](https://github.com/DUNE-DAQ/dfmodules/blob/2e9fc856e82cf566c2d38d024960a74cee910e75/plugins/TRBModule.cpp#L110).
+Of course in this case operations can be more complicatd because modules with multiple outputs of the same type might require a bit of more logic to organise where to send data. In that case ad-hoc solutions need to be adopted based on configuration schema object that is defined.
+
+This code of course raises the question: what _is_ `MyDAQModuleConf`? It's a `class`, but rather than being manually written the code for it is generated by the DUNE DAQ build system itself, using a `oks` file schema as input. Initial documentation on OKS can be found [here](https://github.com/DUNE-DAQ/dal/blob/develop/docs/README.md). It's in the schema file that the logical contents of the struct are defined; an example of this type of file can be found [here](https://github.com/DUNE-DAQ/listrev/blob/develop/schema/listrev/listrev.schema.xml). This approach allows automatic compile-time checks on the variable (here `MyDAQModuleConf`) retrieved by the module, reducing the workload on the implementor of `do_conf` or other transitions.
+[!!! Here some expert should decide what to do with this comment. Should we keep discussing jsonnet?!?!?]Note also that in fact many functions in a DAQ module, including `init`, can use JSON as input to control their actions, not just `do_conf`. Further details on the generation of code from `jsonnet` files are beyond the scope of appfwk documentation and are instead covered in [this section of the daq-cmake documentation](../daq-cmake/README.md#daq_cmake_schema).
### The `do_conf` function
-As one might expect, there are many values which a DAQ module may rely on to perform its calculations when in the running state that ideally should be settable during the `conf` transition. The typical technique is to have some member data which in the DAQ module constructor intentionally gets initialized either to zero or to implausible values (e.g. `m_calibration_scale_factor(-1)`, `m_num_total_warnings(0)`) and then to set them properly during the `config` transition. You'll see in the code below that the type of the data instance `data` which gets extracted from the JSON is `mydaqmodule::Conf`, and then `data` is used to set the member(s).
-```
-void MyDaqModule::do_conf(const data_t& conf_data)
+As one might expect, there are many values which a DAQ module may rely on to perform its calculations when in the running state that ideally should be settable during the `conf` transition. The typical technique is to have some member data which in the DAQ module constructor intentionally gets initialized either to zero or to implausible values (e.g. `m_calibration_scale_factor(-1)`, `m_num_total_warnings(0)`) and then to set them properly during the `config` transition. You'll see in the code below that the information is extracted from the previously set pointer to our schema generated object and is used to set the member(s).
+```C++
+void MyDaqModule::do_conf(const data_t&)
{
- auto data = conf_data.get();
-
- m_calibration_scale_factor = data.calibration_scale_factor;
+ m_calibration_scale_factor = m_cfg->get_calibration_scale_factor();
// ...and then set the other members which take per-configuration values...
}
```
-This of course raises the question: what _is_ `mydaqmodule::Conf`? It's a `struct`, but rather than being manually written the code for it is generated by the DUNE DAQ build system itself, using a `jsonnet` file as input. It's in the `jsonnet` file that the logical contents of the struct are defined; an example of this type of file can be found [here](https://github.com/DUNE-DAQ/listrev/blob/develop/schema/listrev/randomdatalistgenerator.jsonnet). This approach allows automatic compile-time checks on the variable (here `mydaqmodule::Conf`) retrieved by the module, reducing the workload on the implementor of `do_conf`. Note also that in fact many functions in a DAQ module, including `init`, can use JSON as input to control their actions, not just `do_conf`. Further details on the generation of code from `jsonnet` files are beyond the scope of appfwk documentation and are instead covered in [this section of the daq-cmake documentation](../daq-cmake/README.md#daq_cmake_schema).
### The `do_start` function
Most DAQ modules are designed to loop over some sort of repeated action when the DAQ enters the running state, and it's in the `do_start` function that this repeated action begins. A very common technique for the `do_start` function is, "Set an atomic boolean stating that we're now in the running state, and then start one or more threads which perform actions in loops which they break out of if they see that the atomic boolean indicates we're no longer in the running state".
While it's of course possible to accomplish this using the existing concurrency facilities provided by the C++ Standard Library, the `utilities` package provides a class, `WorkerThread`, which makes this easier. `WorkerThread` is covered in detail [here](https://dune-daq-sw.readthedocs.io/en/latest/packages/utilities/WorkerThread-Usage-Notes/); when in use the `do_start` function can be as simple as follows:
-```
+```C++
void MyDaqModule::do_start(const data_t& /*args*/) {
m_thread.start_working_thread(); // m_thread is an `utilities::WorkerThread` member of MyDaqModule
}
@@ -118,12 +142,12 @@ Note that `start_working_thread` takes an optional argument which gives the `Wor
### The `do_stop` function
Quite simple, basically the reverse of `do_start`:
-```
+```C++
void MyDaqModule::do_stop(const data_t& /*args*/) {
m_thread.stop_working_thread(); // m_thread is an `utilities::WorkerThread` member of MyDaqModule
}
```
-Note that if your `do_start` function also allocates any resources (hardware, memory, etc.) it should be deallocated here. Also, the queues which send data to your DAQ module should be drained. The idea is that you want your DAQ module to be able to accept a "start" transition after receiving a "stop" transition without anything from the previous run interfering.
+Note that if your `do_start` function also allocates any resources (hardware, memory, etc.) it should be deallocated here. Also, the input connections to your DAQ module should be drained. The idea is that you want your DAQ module to be able to accept a "start" transition after receiving a "stop" transition without anything from the previous run interfering.
### The `do_scrap` function
@@ -132,10 +156,10 @@ This is the reverse of `do_config`. Often this function isn't even needed since
### The `get_info` function
Not yet mentioned, you can see in [`DAQModule.hpp`](https://github.com/DUNE-DAQ/appfwk/blob/develop/include/appfwk/DAQModule.hpp) that there's a virtual function called `get_info` which defaults to a no-op:
-```
+```C++
virtual void get_info(opmonlib::InfoCollector& /*ci*/, int /*level*/) { return; };
```
-It's meant to be implemented by DAQ module writers to supply metrics about the DAQ module; an example of this can be found [here](https://github.com/DUNE-DAQ/dfmodules/blob/develop/plugins/DataWriter.cpp).
+It's meant to be implemented by DAQ module writers to supply metrics about the DAQ module; an example of this can be found [here](https://github.com/DUNE-DAQ/dfmodules/blob/develop/plugins/DataWriterModule.cpp).
### The full code
@@ -143,11 +167,12 @@ Given the code features described above, `MyDaqModule` would look something like
* `MyDaqModule.hpp`:
-```
+```C++
class MyDaqModule : public dunedaq::appfwk::DAQModule {
public:
- alias MyType_t = double; // Pretend this module processes an incoming stream of doubles
+ using MyType1 = double; // Pretend this module processes an incoming stream of doubles
+ using MyType2 = int; // Pretend this module processes an incoming stream of int
MyDaqModule(const std::string& name) : // A DAQ module instance is meant to have a unique name
dunedaq::appfwk::DAQModule(name),
@@ -158,9 +183,9 @@ class MyDaqModule : public dunedaq::appfwk::DAQModule {
register_command("start", &MyDAQModule::do_start);
register_command("stop", &MyDAQModule::do_stop);
register_command("scrap", &MyDAQModule::do_scrap);
- }
+ }
- void init(const data_t& init_data) override;
+ void init(std::shared_ptr) override;
private:
@@ -172,16 +197,32 @@ class MyDaqModule : public dunedaq::appfwk::DAQModule {
void do_work(std::atomic&);
dunedaq::utilities::WorkerThread m_thread;
double m_calibration_scale_factor;
- std::shared_ptr> m_required_input_ptr;
+ const MyDAQModuleConf * m_cfg = nullptr;
+ std::string m_type1_con;
+ std::shared_ptr> m_type2_receiver;
};
```
* `MyDaqModule.cpp`:
-```
-
-void MyDaqModule::init(const data_t& init_data) {
- auto ci = appfwk::connection_index(init_data, {"name_of_required_input"});
- m_required_input_ptr = dunedaq::get_iom_receiver(ci["name_of_required_input"]));
+```C++
+
+void MyDaqModule::init(std::shared_ptr) {
+
+ m_cfg = p->get_dal(get_name());
+ if ( !m_cfg ) {
+ throw appfwk::CommandFailed(ERS_HERE, "init", get_name(), "Unable to retrieve configuration object");
+ }
+
+ auto inputs = m_cfg->get_inputs();
+ for (auto con : inputs) {
+ if (con->get_data_type() == datatype_to_string ()) {
+ m_type1_con = con->UID();
+ }
+ if (con->get_data_type() == datatype_to_string()) {
+ auto iom = iomanager::IOManager::get();
+ m_type2_receiver = iom->get_receiver(con->UID());
+ }
+ }
}
void MyDaqModule::do_conf(const data_t& conf_data)
@@ -221,7 +262,7 @@ Now that you've been given an overview of appfwk and how to write DAQ modules, y
### API Diagram
-
+[!!! Here we need to remake this diagram]
-----
@@ -230,9 +271,9 @@ Now that you've been given an overview of appfwk and how to write DAQ modules, y
_Last git commit to the markdown source of this page:_
-_Author: eflumerf_
+_Author: Eric Flumerfelt_
-_Date: Fri Jan 20 15:06:29 2023 -0600_
+_Date: Wed Jul 23 13:39:53 2025 -0500_
_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/appfwk/issues](https://github.com/DUNE-DAQ/appfwk/issues)_
diff --git a/docs/packages/appfwk/appfwk.drawio b/docs/packages/appfwk/appfwk.drawio
index e4bbbb12a27..b8c33d09305 100755
--- a/docs/packages/appfwk/appfwk.drawio
+++ b/docs/packages/appfwk/appfwk.drawio
@@ -1 +1,946 @@
-7Z1Zb+O2FoB/TQBPgQwsr8ljls40uDPFTNLpRe+LoUi0zUYSFUpy4nnob79cRFnLkZfEkpiUQDGNaYmyyI+HPAsPT4ZX/vNnaofLr8RF3smg7z6fDK9PBoNBfzJg/+Mla1liDQZjWbKg2E3LNgV3+CdKC/tpaYJdFBUujAnxYhwWCx0SBMiJC2U2peSpeNmceMWnhvYCVQruHNurlv4Xu/FSlp6N+5vy3xBeLNWTrX76jW+ri9OCaGm75ClXNPz1ZHhFCYnlX/7zFfJ466l2kfd9qvk2+2EUBfE+N+Cf6+Xp5HY1JvfXj3Tin97Gj6eqmVe2l6RvfDKYeKzCy3v2x4L/cRGGHnbsGJMg+46qL1UJe+79pky8b7xWjRg9Yd+zA/bpck6C+C79ps8+2x5eBOxvh70FoqxghWjMnuZdpF/EJGSlzhJ77hd7TRL+rlFsOw/q0+WSUPyTVWt77CuLFbCvaZyiNOwXrrjjd6aPpihi13xTDWiVir7az4ULv9hRnBY4xPPsMML32Wv4Nl3g4JLEMfHTomXsqx/EX/qT7WOPj4LfkLdC/BVVa8gfao3EL6fkIQNtwK/AnndFPMKa5tpFczvxNi2c7/sUB9566DlXlLLwGREfxXTNLsnGprxjXfz4lIN8lJYtc4CPxmfp4EoH1iKrecMe+yPF7xAUrQqKp/7MT8T7XAigXP7H8EKWlSFjZXGx2WVrqtYLiAQw16BpkWLQQ/MYINDHrssfchmFtoODxRdx2fVoU3Kbtg8vIuz2uScG+pLdiAIOEInt2N7QEhIcxKL9xpfsP9bMV/2P45Mxe40r9tnafGb/8ctpfEUC9jo2Fv2NGIpPiOO4P1pVZLYLhN0gpeQM9yRHEXZ8cAYQOOxNYyxkVhEe1oqsxww9mtAz7pyeIUQPDuZk5i+ohIeEPgk8fC8JumHffbUDNvNTg5EmGE07x2gEYcRWIjEyAkhncs47J2cMkXOfROsSODZbW2KHLbJtny+J5UKc6UJZCbz4NmB1A5ZldU7WBCILMa2YGrTeNFrdr7nP4FUTW3Gzrv2JXAmYZMhQowc13a+1zyFqaBLMhMlmFmO/vFpylpQERP7Nv56JLilKKvDiKEa2u545HnEejBjTE8jOV+2q4gqQXP+TJFIHzyVRuWKpBBqQdAGp80X8ADRezhPPW88eE9a7c4zcWWBX5JvRBrUiadD5on0AWjN94m7MUdcX378SN/GQsUJpRk+b6/L198mK3P3ncfb4+O3T95/2NfqxOrXG4MLc8d3Z3HbKsmfJmsCdhTEtrqfY1Zmx84r4vh24n1jnejhem5WUnuC1ubSvAQ9c2zskmB9AHmvhuUFNb9Q6X7Rb0wppFTo8LLq66E+3jsLDHzwg4frUqhAyrBIyBGjw7HvkfSNR6py8pvLaEiVvAIR9F91HCBiARc4ECl7Jh6v07DDctui+ki0j/cTqQvAqOSViP/TqrlA+wm3XcGkIf//ByDdtsD7reiqdVJVJxjU3rvY+SJJXhNVgiNGDmKHVPTFVpZERQ5OghwKmONr0gcfWXRzs70k/yX8NexqyN+yevWoADWMPPSMniVGPa54ua39uneetyC4MPLJkOmUgUfw7EmGlBjTNQRt3D1o1xIaBtrDjJaIi0ibqYa5oVqO1GAkecmJCS6DJVZmHVogvyVirG/I0JG/aPXnVEB2+IIuEXY0VYvc1cs44yXWiTYPlfzVsh9EWoViGE/YiSH00cks3kkYaqAWQpexykZH0wTgjtSao+8U9wE9pd1jVGWm2iJ28jy1i58rAWr9HbNqHcJw2ZfIFfePcMGaCDo8tw7bKg9dsDoORaUqCgbt7fCGwZr4dlncWshI4tDBvxt/pxsxkYo09zXg0NUIUcJ23i2g1YqOChnFnNk0B4NUGKWhqZgNtW+W1Vc+4CXXBpboyb1dowBYptRLqGZP7m0cMMES1ixhohspt8ekV7Jf9lDUDjw7wWIDxqV16QNOT4yE7SEITxqAhMYCxqV1iqgGkOT+y2kmvZrSKYgY49hzfPfwO461+BywDzup2WQbzahlf9XvnrnO1AEqixZdsqckr6vlRiJwoJ9/sMJTwMU3zTnxnxJvumHWuGkApt1zMuiZ2ljPWwzM//ctb97CYhHNbiuS0zPcW+e6Nq0yqh0/vZqJ+8yQPOldToPRfGcn2PGbTtY/oAhmMDcZb9n12jvEE2PfJAz3YvM/3tUSz+zWPVsNuEyCX40hWxQUs7EkzvjANQe5ecYIcIXmOajytrLVCRD8uw/Agn6sJO9E/7GQCxZ1MACjPGws7geKgjIv2lcJptDcYW4JJIA4aw6Cq9XCbtsw8z3eQ4sBFzz2mb5P7vw9Z0sk5l6LHBFMRxfSambQvfln/ZHp5Mr0uz8yHBriYuVrj4QDM1dBwaGyuHoA7v3LjgaJ59ILhkEKbWaaushpv0RyxdnTYeta4GnXhEFoztsshGDyT4zDhak8RQwHWnoI5TQzwMl3IxNRrBStkoW8XVsgxXhdTb4Lp361WMxxVV7NnoKo9bGw9C+cKkxm/otetHEMb1y5bUV3+Vni5Wah3ngROehJRrgZuDO0p+cwNqrO4Ipa3mgXMArdxWZ0JvledLNPu5gFoYWH0/rY5gJLbtRmbPQAjk7JJugcnjjKx2trwA2yjbFeOgNFA/2wIMqzowgqUbKJdVuqTf+1Y4Bi3sG4wQbkk2oUJtFinMbVqpd87yL5xtEAHkGL+G/OGa4OzRjhDmwpa5hm0OPM4B6W1VnJavMSDYrayaIUdlAajXezA3XZLOzqGCN3wtp9l2Wy30pHR7nUMcLseF4380Jqeg1+x24A/22w40JK77vUVyJ8xpGiBIx48+woByY9d2suweyUfj12Z1iqqE6oH2p1LPykXV8Ebc/KYEH7dxe9/bT6I783aVceB0r0uBuX5kM68ZOPJu/TtB5Tu1umFXrLA9YnpFeLA2MHsCztw0GH3VnzXB+e3Ub7J3BuZAaDFAIC2WrQ7AEaQFbTk+S4dAGOc3+/D+a2YUgZ5wPc9GQMsZhaH48NoDpE5vlTKRvhrXLwgCI1hALr28lKol1BsnHsaE1Sd1kCCmsvEBDv3CgwZXHTBBbDTtIwL6N/jdhqu3uZPrGo21YkUawdZHqFwWYO1FlgDZqCWsYY8jTXarToUcsv8+koFdds5kkZH1ZZiwEbTMsWQf1ECw/onKFCibH68dU6f0l8nxCShPlcPL/N2wUnRqAhsZY2cJfLtWlrl84t68EYz5ntlKorw0g75n3PiuULbzavDHGrGUabVWH1Z8Jtq5mFakFMyUqhzw+KeJIGL3C/3qkDoJTfBb8gWjxRlDleGmSq1+VxWaClyEhrhFboVym9aegS9c6xm20xMVvUN9urnAGLnTTEG7NSvmEFuAhwb84eu5o/tomNvOIdFo4g69TGH5nAA2kQaIxNUhtPMUVK2bZJEmSmzgSkzkw2vMaCA1DSX5xNUfx8TlChmMPHTo0HEmu27+Mrs39QGISC8vmWEQJV4s38T5mizK9jApA9MQB79lmGCFNHLJBJqpwBmxZTCWYToCjtIknX3hGNnaRjShCHgYPl2GbKAONMkkmaJb8In/zs30amQkLLtAlLQ1O03rNFffPMXrl296E5FeHqriCHcdv1mnVd+GBRPm15d0l231V9J6TBPczrs8zggHwTw6BptOCAxGkCq69MSx+iODT9+1RPlZwMVhr4kfzCu1ZtKg74xvWE6Pd+pOFiQ4jBqarycg0m7Kp59oNOMhvsv0nAH/T1V3GljoIKB3Km/ZSNgzTqggXXA+cG5OjrXbc/BmOpEZNCsKiQ/sGsUEW3g6VyrtcDsAaVZcbNyMjPhe54JR4NxYSqc7jkTNuaFsOA9TTLiWIq3vKZjhFoDQm0jId7OlGjB+4xwmlMtr+AaZvRgRoOZEDwMLN0PfiKil7Jdade81LCjCTudm3OtIeSUrK6ijMf8X7iKGlh7LqPGjeEJbmoRviuR/VbKN5PAtlUpd743V9qsqsB9CCU8UOBeUCr6xcW2TwL3jyVfq1+yLz5h/kDRNexT2mqDUZEmKk3eJ3XhVVtHaUQS6qB9Zhcm3xZoW5XKI4TcBdraPfkoLZU+kyLPjvEqfyPcIWl13zipOZOk0qhU16uddKoK+Z7pXZturVQ07u+oSDZDpSLBR/aOr0AG2HfwS5UZ1sTSqFkgYee8lkkMMbvUDvzrA4blqG5YilBfLh/Tx5yk46x2Gjjtf5yOreJUoA5QfyUe6sgUVWupT8l8LnaRN9Gd1SAI69/Rnf2Pk9FZsTeP05kqO7TqTIVMG70JWVfeljw/31ecZxNuF/J8NCySYw0mL5Pnw1J+5kpFjcvzqmFFa3meIa61PB+WRMCoPRFQNXpoLdCP15+NCfTs3NGsO622uhMU6JUQB5Hr5gsO0A3vt5Ai3nvGKqGpVeK1aQyyk/HyeiN0mITV2MlkwKTBM9MhL5zx46SYho3cQto4cU1/bnuRsdA3Y3+olaMHmR9aPZMEmKoYRbmtyCfADmGVDyu/yS37y5ClBVmQ66ddsqDNbpfsTWOxqdDw9bb5gtxD7fIFbllLkw5me8xnxURqhra3SRu0L6BV2iZVk6uIfZgTtZ/EkPYuSMv22HaHWt3ZmnOD2vtCrfPV/6TqeWCokXiJ6IyEuQ2YRzuV2MCnC3ydKwiTAQBfiQ6Th7F5EPZdyTdlyJrUH4uazx/FXj1CPZsuHCZphG2Y5zJjn1c8mdnSpr/88kHF3Gwzxpr0T1pCWM3/1K40ssBsFmWfTdN+W+Um2uWhVbFneQ8t/rlenk5uV2Nyf/1IJ/7pbfx4qlazXThop6U4mVF5S8O+DtqzyY6KmnbQgmkqdGVDRSPt9t53iIZV7NEspOpgNKY7Kmocjaq2prWvd1rn+nqBr3dsnZ0dBQdrUuhE5bpvwVU/re7U0zr04njdd9r/OJr0i0ESw+P0ZjGSrr3QKzABjK5i2noLM3g5e385Ad3eYvpsR0XHE9P4fw9/jv/8vvhrtUDfb37Q2R/h59Pq/J0qCazw19s7/u+zg3IWj3LgxqXtXnn4R2QvUC+zsT0t7fhD9dqbQBxJdCeyeu68+keAnkM2bpGb/qTNHTy1c3pgXFYHAapIb2TLe7a6d+I/MeG9ToLis0tV7Vs5e3M2LnJvzZQfxC2P0A+xg4DE37imxjdzbe6hTNsgAfjTN+l6r9hV/Gd/stkTcs2QUFzfyrwCvEiouPPH7c2u2wrX3zKsMGL17LorO/bmq9z2/zuJ+bYjzH7BT1TsMezucf+Fx97VXefrqO/Xa8xUxNhhYmpRbhrxvFI3plkba97dw07Mrkur/prW+/IaN6p2FCU5SrIQglJVKOJjCKjqIgzZbxPdUte6NdSpEhevCrIeMn0X76jN7HtvOw8LIelPHanny5PwxI8Cq8xwrI7hSnvK8x9FWRqbgygtfL5PonX2pjUpgFmxeOO6KLJush4BC5PK7Fi7JrHOSnrB3n6/syPYBcCZo7qkyB2Btd/ckd1QJ+Dy7pwiKYXDvAD0P6MAUdvLnlAahHViWkJaurg6IWy//VYc9EfBN9q7rh/BQ0Cegj2mvpoK1GTLh9Qet7/HQTIaAKPkDBglR0kDBo6S+uTpucTWTEmiCRsnXEExkbEaRcYeF8cJ4EGfgjhOj+BSAHGsKvWn/uzJxvHMWZUcmkxrdmVA2sqmWCjPxinwAqfADobqpcYhsbEgRsdwCYA/rxpgrSjyE/GuBZBkmWFHC3YA53a77OyxCdD4tpumAPBsgxQ0NQ2BsfWFdZA5Hk4bVqoO6HYlBhgu/4+hRUtagOOq2qUFDH7HCpaeOYdeO2Qs4BTudplRelkNNLMIeXNDjobkAAfdtkwOFP8yxPm5SShGPaEdual3JX+2o7OkJCCppoQdSiLENe9IWumkaq408fx9dkzY5cV4Y2kjLx2WVz3m1MVUZt5i1VS3wH7I7401nGvBOXBC77E4Zx8p4Q6bjVeZ2uHyK3F5VMev/wc=
\ No newline at end of file
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/docs/packages/appmodel/DFApplication.png b/docs/packages/appmodel/DFApplication.png
new file mode 100644
index 00000000000..b40b409798c
Binary files /dev/null and b/docs/packages/appmodel/DFApplication.png differ
diff --git a/docs/packages/appmodel/README.md b/docs/packages/appmodel/README.md
new file mode 100644
index 00000000000..f108611913b
--- /dev/null
+++ b/docs/packages/appmodel/README.md
@@ -0,0 +1,89 @@
+# Appmodel
+
+ This package extends the schema from the confmodel package
+to describe readout, dataflow and trigger applications.
+
+## SmartDaqApplication
+
+
+
+
+ **SmartDaqApplication** is an abstract class where the modules
+relationship will normally be left empty with the **DaqModules** themselves
+being generated on the fly by an implementation of the
+`generate_modules()` method. The **SmartDaqApplication** has
+relationships to **QueueConnectionRules** and
+
+**NetworkConnectionRules** to allow the `generate_modules()` method to
+know how to connect the modules internally and to network endpoints.
+
+The `generate_modules` method is a pure virtual function that must be implemented for each **SmartDaqApplication**. It should populate the modules relationship of the **DaqApplication** and call `conffwk::update()` so that subsequent calls to `get_modules` will return the newly created objects.
+
+Readout, HSI, Hermes Dataflow and Trigger applications extend from **SmartDaqApplication**
+
+## ReadoutApplication
+
+ 
+
+ The **ReadoutApplication** inherits from **SmartDaqApplication** and provides
+a `generate_modules()` method which will
+generate a **DataReaderModule** for each **DetectorToDaqConnection** associated with the application via the `detector_connections` relationship, and set of **DataHandlerModule** objects, i.e. **DLH** for each
+
+**DetectorStream** plus a single **TPHandlerModule** (FIXME: this shall become a TPHandler per detector plane).
+
+ Optionally **DataRecorderModule** modules may be created (not supported yet)). The modules are created
+according to the configuration given by the data_reader, link_handler, data_recorder
+and tp_handler relationships respectively.
+
+ Connections between pairs
+of modules are configured according to the `queue_rules` relationship
+inherited from **SmartDaqApplication**.
+
+### Far Detector schema extensions
+
+
+
+Several OKS classes have far detector specific customisations, as shown in blue the above diagram.
+
+## DataFlow applications
+
+ 
+
+The Datflow applications, which are also **SmartDaqApplication** which
+generate **DaqModules** on the fly, are also included here.
+
+## Trigger applications
+
+ 
+
+The Trigger applications, which are also **SmartDaqApplication** which
+generate **DaqModules** on the fly, are also included here.
+
+## WIEC application
+
+ 
+
+The WIEC application is a **SmartDaqApplication** which generates **HermesModule** modules , and **WIBModules**, on the fly.
+
+## Testing SmartDaqApplication module generation
+
+This package also provides a program `generate_modules_test` for
+testing the `generate_modules` method of **SmartDaqApplication**s. It reads
+a configuration from an OKS database, generates the DaqModules for the
+requested SmartDaqApplication and prints a summary of the DaqModules
+and Connections.
+
+
+-----
+
+
+_Last git commit to the markdown source of this page:_
+
+
+_Author: Gordon Crone_
+
+_Date: Thu Sep 4 16:44:30 2025 +0100_
+
+_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/appmodel/issues](https://github.com/DUNE-DAQ/appmodel/issues)_
+
diff --git a/docs/packages/appmodel/SmartDaqApplication.md b/docs/packages/appmodel/SmartDaqApplication.md
new file mode 100644
index 00000000000..9be431c5be2
--- /dev/null
+++ b/docs/packages/appmodel/SmartDaqApplication.md
@@ -0,0 +1,100 @@
+# SmartDaqApplication
+
+The SmartDaqApplication class allows for automatic creation of modules and connections for a known application. The general pattern is that a SDA ingests a set of module configuration objects and connection rules and uses them to create a well-defined application with modules, internal connections, and external connections.
+
+## Writing a new SmartDaqApplication
+
+SmartDaqApplications implement the `std::vector generate_modules(const comnfmodel::Session*)` method, which is responsible for generating a set of modules and connection objects. Each SmartDaqApplication has a UID from the configuration.
+
+This section will use the "[DFOApplication](https://github.com/DUNE-DAQ/appmodel/blob/develop/src/DFOApplication.cpp)" SmartDaqApplication as an example.
+
+
+## ConfigObjectFactory
+`ConfigObjectFactory` is an helper class to simplify the creation of `appfwk` configuration objects in `SmartApplication`.
+Once instantiated at the start of `generate_modules`, it offers a set of methods to facilitate the creation of configurarion objects, queues and network connections.
+
+
+### Creating a module
+
+```C++
+
+ ConfigObjectFactory obj_fac(this):
+
+ TLOG_DEBUG(7) << "creating OKS configuration object for DFOModule class ";
+ conffwk::ConfigObject dfoObj = obj_fac.create("DFOModule", "DFO-"+UID());
+
+ auto dfoConf = get_dfo();
+ dfoObj.set_obj("configuration", &dfoConf->config_object());
+```
+
+Here, it is important to understand the DFOApplication schema definition:
+```XML
+
+
+
+
+
+
+
+```
+In addition to the fields from SmartDaqApplication, the DFOApplication class has a relationship named "dfo" to a `DFOConf` object. As an OKS object, it also has a "UID"" field. The code uses this UID (accessed via the `UID()` method) to create the UID for a `DFOModule` object. The object is created in the in-memory database, and its configuration assigned using the "dfo" relationship from the DFOApplication schema.
+
+### Reading connection rules and creating connections
+
+```C++
+
+ for (auto rule : get_network_rules()) {
+ auto endpoint_class = rule->get_endpoint_class();
+ auto descriptor = rule->get_descriptor();
+
+ conffwk::ConfigObject connObj = obj_fac.create_net_obj(descriptor);
+
+ if (descriptor->get_data_type() == "TriggerDecision") {
+ tdInObj = connObj;
+ input_conns.push_back(&tdInObj);
+ }
+ else if (descriptor->get_data_type() == "TriggerDecisionToken") {
+ tokenInObj = connObj;
+ input_conns.push_back(&tokenInObj);
+ }
+
+ else if (descriptor->get_data_type() == "TriggerInhibit") {
+ busyOutObj = connObj;
+ output_conns.push_back(&busyOutObj);
+ }
+ }
+
+```
+
+The next stage of DFOApplication is to retrieve the network connection rules to assign the inputs and outputs of the `DFOModule` instance. A DFO has two fixed inputs (decisions and tokens), and one fixed output (inhibits). Decisions sent to TRB instances are dynamically instantiated at run-time using information in the token messages.
+
+### Setting Module Connection relationships
+
+```C++
+ dfoObj.set_objs("inputs", input_conns);
+ dfoObj.set_objs("outputs", output_conns);
+
+ // Add to our list of modules to return
+ modules.push_back(confdb->get(dfoUid));
+
+ return modules;
+```
+
+Once the fixed connections are retrieved using the network rules, the module's input and output relations are set, and the module is added to the output vector, which is returned.
+
+### Summary
+
+These basic steps are repeated in all SmartDaqApplication instances, with differences depending on the specific applciation being implemented. The DFOApplication is one of the simplest applications in the system, but it demonstrates the basic logic followed by all SmartDaqApplications.
+
+-----
+
+
+_Last git commit to the markdown source of this page:_
+
+
+_Author: Alessandro Thea_
+
+_Date: Tue May 20 23:32:21 2025 +0200_
+
+_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/appmodel/issues](https://github.com/DUNE-DAQ/appmodel/issues)_
+
diff --git a/docs/packages/appmodel/apps.png b/docs/packages/appmodel/apps.png
new file mode 100644
index 00000000000..0df9d8b182d
Binary files /dev/null and b/docs/packages/appmodel/apps.png differ
diff --git a/docs/packages/appmodel/fd_customizations.png b/docs/packages/appmodel/fd_customizations.png
new file mode 100644
index 00000000000..91281306311
Binary files /dev/null and b/docs/packages/appmodel/fd_customizations.png differ
diff --git a/docs/packages/appmodel/readout.png b/docs/packages/appmodel/readout.png
new file mode 100644
index 00000000000..6fc3cfd120e
Binary files /dev/null and b/docs/packages/appmodel/readout.png differ
diff --git a/docs/packages/appmodel/roApp.png b/docs/packages/appmodel/roApp.png
new file mode 100644
index 00000000000..bf55737c538
Binary files /dev/null and b/docs/packages/appmodel/roApp.png differ
diff --git a/docs/packages/appmodel/trigger.png b/docs/packages/appmodel/trigger.png
new file mode 100644
index 00000000000..9d5dac75b65
Binary files /dev/null and b/docs/packages/appmodel/trigger.png differ
diff --git a/docs/packages/appmodel/wiec_app.png b/docs/packages/appmodel/wiec_app.png
new file mode 100644
index 00000000000..2fc67f6d1af
Binary files /dev/null and b/docs/packages/appmodel/wiec_app.png differ
diff --git a/docs/packages/asiolibs/README.md b/docs/packages/asiolibs/README.md
new file mode 100644
index 00000000000..fe4b8451733
--- /dev/null
+++ b/docs/packages/asiolibs/README.md
@@ -0,0 +1,48 @@
+# Asiolibs
+
+Boost.Asio-based socket reader plugin for low-bandwidth devices
+
+# Example usage
+
+`local-crt-bern1x1-config` and `local-crt-grenoble-1x1-config` (defined in `daqsystemtest/config/daqsystemtest/example-configs.data.xml`) are session configurations with a CRT reader application accompanied by a socket reader application.
+
+CRT reader application includes a data reader (either `CRTBernReaderModule` or `CRTGrenobleReaderModule`) which reads data from the hardware then puts it into a queue and data writers (`SocketWriterModule`) which read data from the queue then send it over a socket.
+
+Socket reader application includes a data reader (`SocketReaderModule`) which reads data from the socket (`CRTBernFrame`/`CRTGrenobleFrame`) then puts it into another queue to be processed by `DataHandlingModel`.
+
+
+
+
+
+
+
+## How to run
+
+```
+drunc-unified-shell ssh-standalone config/daqsystemtest/example-configs.data.xml local-crt-bern-1x1-config uname-local-test
+
+drunc-unified-shell ssh-standalone config/daqsystemtest/example-configs.data.xml local-crt-grenoble-1x1-config uname-local-test
+```
+
+The following table includes relevant configuration details that can be set by the user. Users can either configure TCP or UDP as the socket type.
+
+| Configuration | Can be changed from | Object ID/Attribute name |
+| ---------------- | ------------------- | ---------------- |
+| Local IP | config/daqsystemtest/moduleconfs.data.xml | def-socket-reader-conf/local_ip
+| Remote IP | config/daqsystemtest/moduleconfs.data.xml | def-socket-writer-conf/remote_ip
+| Port | config/daqsystemtest/ru-segment.data.xml | socket_wib_101_link0/port |
+| Socket type | config/daqsystemtest/moduleconfs.data.xml | def-socket-reader-conf/socket_type def-socket-writer-conf/socket_type |
+
+
+-----
+
+
+_Last git commit to the markdown source of this page:_
+
+
+_Author: Deniz Tuana Ergonul Uzun_
+
+_Date: Wed Jun 11 11:51:26 2025 +0200_
+
+_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/asiolibs/issues](https://github.com/DUNE-DAQ/asiolibs/issues)_
+
diff --git a/docs/packages/asiolibs/crt-reader-and-readout-apps.png b/docs/packages/asiolibs/crt-reader-and-readout-apps.png
new file mode 100644
index 00000000000..405a0f4ed40
Binary files /dev/null and b/docs/packages/asiolibs/crt-reader-and-readout-apps.png differ
diff --git a/docs/packages/asiolibs/local-crt-bern-1x1-config.svg b/docs/packages/asiolibs/local-crt-bern-1x1-config.svg
new file mode 100644
index 00000000000..277afa7a654
--- /dev/null
+++ b/docs/packages/asiolibs/local-crt-bern-1x1-config.svg
@@ -0,0 +1,279 @@
+
+
+
+
+
diff --git a/docs/packages/asiolibs/local-crt-grenoble-1x1-config.svg b/docs/packages/asiolibs/local-crt-grenoble-1x1-config.svg
new file mode 100644
index 00000000000..d3c039226b1
--- /dev/null
+++ b/docs/packages/asiolibs/local-crt-grenoble-1x1-config.svg
@@ -0,0 +1,279 @@
+
+
+
+
+
diff --git a/docs/packages/asiolibs/local-socket-1x1-config.svg b/docs/packages/asiolibs/local-socket-1x1-config.svg
new file mode 100644
index 00000000000..beb602235aa
--- /dev/null
+++ b/docs/packages/asiolibs/local-socket-1x1-config.svg
@@ -0,0 +1,279 @@
+
+
+
+
+
diff --git a/docs/packages/confmodel/README.md b/docs/packages/confmodel/README.md
new file mode 100644
index 00000000000..84b3ca9f223
--- /dev/null
+++ b/docs/packages/confmodel/README.md
@@ -0,0 +1,148 @@
+# confmodel
+This package contains the core' schema for the DUNE daq OKS configuration.
+
+ 
+
+The top level of the schema is the **Session** which defines some global
+DAQ parameters and has a relationship to a single top-level **Segment**.
+It also has a list of disabled [Resources](#resources-and-resourcesets). It is intended that parts of
+the DAQ system that are not required in the current run are simply
+disabled rather than deleted from the database altogether.
+
+A **Segment** is a logical grouping of applications which
+are controlled by a single controller (**RCApplication**). A **Segment** may contain other
+nested **Segment**s. A **Segment** is a Resource that can be enabled/disabled [(see below)](#resources-and-resourcesets),
+disabling a **Segment** disables all of its nested **Segment**s.
+
+The **Application** class has attributes defining the application's
+ `application_name` (executable name) and `commandline_parameters`. Its
+ `application_environment` relationship lists environment variables needed by the
+ application in addition to those defined by the **Session**.
+
+## Resources and ResourceSets
+
+
+**Resource** is an abstract class describing an item that can be
+disabled directly. It has the method `is_disabled(const dunedaq::confmodel::ResourceTree& session)` which can be called
+by application code to determine if the object should be considered
+disabled for this session (Session is a subclass of ResourceTree). The [disabling logic](#the-resource-disabled-logic) calls the virtual
+`compute_disabled_state(const std::set& disabled_resources)` method to determine the state of the Resource, the disabled_resources argument is a list of UIDs of all the Resources that have been disabled so far. The
+implementation provided by the base class just checks that the object
+itself is not in the list of disabled objects. Derived classes can
+re-implement this method with whatever logic is needed to determine the
+state of the object, for example the **ResourceSetDisableAND** class
+provides an implementation that ANDs together the state of all of its
+contained objects.
+
+
+**ResourceSet** is an abstract container of **Resource**s which can be disabled together. It
+is itself a Resource (so can be nested). It defines a pure virtual method `contained_resources()` which returns a vector of pointers to 'contained' resources. Developers should implement this method to extract any resources that need to be considered for determining the disabled state of the set from among the class's relationships. The class may have relationships to other Resource derived
+objects that will be ignored for the disabled check.
+
+
+**ResourceSetDisableAND** is a container of **Resource**s which will
+be disabled if *all* of its **Resource**s are disabled. It provides a
+final implementation of the ResourceSet::compute_disabled_state() method.
+
+
+**ResourceSetDisableOR** is a container of **Resource**s which
+provides a final implementation of the ResourceSet::compute_disabled_state()
+method returning true if *any* of its contained **Resource**s are
+disabled.
+
+
+**Segment** is a container of **Segment**s and **Applications**
+which inherits from **ResourceSetDisableAND** so it can be disabled
+directly or indirectly if all its components are disabled.
+
+ 
+
+### The Resource disabled logic
+
+The Resource disabled logic works on a single tree of **ResourceSets**.
+It is held by the virtual class **ResourceTree** currently **Session**
+is the only concrete class derived from it.
+The **ResourceTree** holds a **DisabledResources** object which is initialised with a reference to the root **Segment**
+and the list of disabled resources from its `disabled` relationship.
+
+⚠️**Any ResourceSet that is not referenced by a ResourceSet in the tree
+starting at the Session's segment relationship will not be considered
+by the disabling logic!**
+
+The **DisabledResources** constructor will configure itself using the
+tree of Resources and initial list of disabled Resources.
+To start with, the UID of each member of the list is inserted into a
+set and any 'contained' (using the `contained_resources()` method) Resources
+are also disabled.
+
+A list of all ResourceSets in the tree is generated by recursively
+calling `contained_resources()` and iterating over all the ResourceSets.
+Then it iterates over the list of **ResourceSet**s. If a ResourceSet
+is not currently in the disabled set, it will call the `compute_disabled_state()`
+method to see if its state has been changed by the current content of
+the disabled set. It will repeat this procedure until an iteration
+that ends with the same number of disabled resources it started with.
+
+
+## Readout Map
+
+ 
+
+(the blue classes in the diagram are not part of confmodel and are
+there to show how the other parts fit together)
+
+The readout map is defined in terms of **DetectorStream** objects
+which define a one to one mapping between a source_id and a
+
+ **GeoID" object. A collection of streams are
+aggregated into a **DetDataSender** and a group of **DetDataSender**
+objects are contained in a **DetectorToDaqConnection** along with a
+single **DetDataReceiver**.
+
+### Resource handling in the readout map
+
+The **DetectorToDaqConnection** is a **ResourceSet** with a custom implementation of `compute_disabled_state()` that checks that the **DetDataReceiver** and at least one **DetDataSender** are enabled.
+
+The **DetDataSender** is a **ResourceSetDisableAND** that contains a set of **DetectorStream** **Resource**s.
+
+
+
+## Finite State Machines
+Each controller (**RCApplication**) uses one **FSMConfiguration** object that describes action, transitions and sequences.
+
+ 
+
+## Notes
+
+### VirtualHost
+
+ The idea is that this describes the subset of resources of a physical
+host server that are available to an Application. For example two
+applications may be assigned to the same physical server but each be
+allocated resources of a different NUMA node.
+
+### **DaqApplication** and **DaqModule**
+
+ The **DaqApplication** contains a list of **DaqModule**s each of which has a
+list of used resources. The **DaqApplication** provides a method
+`get_used_hostresources` which can be called by `appfwk` in order to check
+that these resources are indeed associated with the VirtualHost by
+comparing with those listed in its `hw_resources` relationship.
+
+### NetworkConnection
+ Describes the connection type and points to the **Service** running over this connection.
+
+
+
+-----
+
+
+_Last git commit to the markdown source of this page:_
+
+
+_Author: Gordon Crone_
+
+_Date: Mon Oct 27 15:46:35 2025 +0000_
+
+_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/confmodel/issues](https://github.com/DUNE-DAQ/confmodel/issues)_
+
diff --git a/docs/packages/confmodel/ReadoutMap.png b/docs/packages/confmodel/ReadoutMap.png
new file mode 100644
index 00000000000..36aaa4b3d0a
Binary files /dev/null and b/docs/packages/confmodel/ReadoutMap.png differ
diff --git a/docs/packages/confmodel/environment.png b/docs/packages/confmodel/environment.png
new file mode 100644
index 00000000000..7862878448d
Binary files /dev/null and b/docs/packages/confmodel/environment.png differ
diff --git a/docs/packages/confmodel/fsm.png b/docs/packages/confmodel/fsm.png
new file mode 100644
index 00000000000..2c7152df115
Binary files /dev/null and b/docs/packages/confmodel/fsm.png differ
diff --git a/docs/packages/confmodel/resources.png b/docs/packages/confmodel/resources.png
new file mode 100644
index 00000000000..22f6baf3328
Binary files /dev/null and b/docs/packages/confmodel/resources.png differ
diff --git a/docs/packages/confmodel/resourcetree.png b/docs/packages/confmodel/resourcetree.png
new file mode 100644
index 00000000000..98bfa35fd83
Binary files /dev/null and b/docs/packages/confmodel/resourcetree.png differ
diff --git a/docs/packages/confmodel/schema.png b/docs/packages/confmodel/schema.png
new file mode 100644
index 00000000000..3f54e25483e
Binary files /dev/null and b/docs/packages/confmodel/schema.png differ
diff --git a/docs/packages/confmodel/schema.view b/docs/packages/confmodel/schema.view
new file mode 100644
index 00000000000..663ca696f5a
--- /dev/null
+++ b/docs/packages/confmodel/schema.view
@@ -0,0 +1,30 @@
+DetDataSender,429,221
+DetDataReceiver,856,113
+ResourceTree,1011,13
+Session,948,294
+DetectorConfig,888,599
+VariableBase,391,307
+VariableSet,202,228
+Variable,420,377
+Resource,617,14
+ResourceSet,670,150
+ResourceSetDisableAND,663,226
+Segment,649,452
+Application,135,375
+RCApplication,527,616
+DaqApplication,134,674
+DaqModule,358,817
+Connection,125,814
+NetworkConnection,2,947
+Queue,244,946
+NetworkInterface,961,777
+NetworkDevice,950,946
+HostComponent,777,831
+VirtualHost,403,717
+PhysicalHost,794,725
+ProcessingResource,605,949
+StorageDevice,780,942
+ConnectionService,107,560
+Service,28,227
+DetectorStream,475,127
+DetectorToDaqConnection,877,175
diff --git a/docs/packages/connectivityserver/README.md b/docs/packages/connectivityserver/README.md
new file mode 100644
index 00000000000..a6bc6eaf3cc
--- /dev/null
+++ b/docs/packages/connectivityserver/README.md
@@ -0,0 +1,87 @@
+# connectivityserver
+
+ This service provides a very simple flask based
+server to serve connection information to DAQ applications.
+
+
+## REST interface
+
+ The server reponds to the following uris
+
+### /publish
+ Allows publication of connection information. The content of the
+ request should be JSON encoded. For example, the following json file
+ can be published using curl.
+
+```
+> cat publish.json
+{
+ "connections":[
+ {
+ "connection_type":0,
+ "data_type":"TPSet",
+ "uid":"DRO-000-tp_to_trigger",
+ "uri":"tcp://192.168.1.100:1234"
+ },
+ {
+ "connection_type":0,
+ "data_type":"TPSet",
+ "uid":"DRO-001-tp_to_trigger",
+ "uri":"tcp://192.168.1.100:1235"
+ }
+ ],
+ "partition":"ccTest"
+}
+
+> curl -d @publish.json -H "content-type: application/json" \
+ http://connection-flask.connections:5000/publish
+```
+
+### /getconnection/
+This uri returns a list of connections matching the 'uid_regex' and
+'data_type' specified in the JSON encoded request.
+
+```
+curl -d '{"uid_regex":"DRO.*","data_type":"TPSet"}' \
+ -H "content-type: application/json" \
+ http://connection-flask.connections:5000/getconnection/ccTest
+[{"uid": "DRO-000-tp_to_trigger", "uri": "tcp://192.168.1.100:1234", "connection_type": 0, "data_type": "TPSet"}, {"uid": "DRO-001-tp_to_trigger", "uri": "tcp://192.168.1.100:1235", "connection_type": 0, "data_type": "TPSet"}]
+```
+
+
+### /retract
+This uri should be used to remove published connections. The request should be JSON encoded with the keys "partition" and "connections" with the latter being an array of "connection_id" and "data_type" values.
+
+
+### /retract-partition
+This uri should be used to remove all published connections from the
+given partition. The request should be JSON encoded with one field "partition" naming the partition to be retracted.
+
+## Running the server locally from the command line
+ The server is intended to be run under the Gunicorn web server.
+
+ ```
+ gunicorn -b 0.0.0.0:5000 --workers=1 --worker-class=gthread --threads=2 \
+ --timeout 5000000000 connectivityserver.connectionflask:app
+ ```
+
+Some debug information will be printed by the connection-flask if the
+environment variable 'CONNECTION_FLASK_DEBUG' is set to a number
+greater than 0. Currently 1 will print timing information for the
+publish/lookup calls. 2 will give information about what was
+published/looked up and 3 is even more verbose printing the actual
+JSON of the requests.
+
+
+-----
+
+
+_Last git commit to the markdown source of this page:_
+
+
+_Author: Gordon Crone_
+
+_Date: Thu Oct 16 16:54:02 2025 +0100_
+
+_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/connectivityserver/issues](https://github.com/DUNE-DAQ/connectivityserver/issues)_
+
diff --git a/docs/packages/connectivityserver/deploy/README.md b/docs/packages/connectivityserver/deploy/README.md
new file mode 100644
index 00000000000..eb7b29f4640
--- /dev/null
+++ b/docs/packages/connectivityserver/deploy/README.md
@@ -0,0 +1,50 @@
+# connectivityserver
+
+ This service provides a very simple flask based
+server to serve connection information to DAQ applications.
+
+## Installation
+
+To build the docker image of develop just do
+```bash
+docker buildx build --tag ghcr.io/dune-daq/connectivityserver:latest .
+```
+Or, if you want to specify a tag
+```bash
+docker buildx build --tag ghcr.io/dune-daq/connectivityserver:v1.3.0 --build-arg VERSION=v1.3.0 .
+```
+
+ Apply the kubernetes manifest from connectivityserver.yaml. This
+ should start a service called connectionservice in the namespace
+ connections.
+
+```
+kubectl apply -f connectivityserver.yaml
+```
+
+To test the basic operation of the server, you can connect to pod in the k8s cluster and try getting the root document.
+
+```
+> kubectl exec myPod -i -t -- bash
+[root@myPod /]# curl http://connectionservice.connections:5000
+
Dump of configuration dictionary
Active partitions
None
Server statistics
Since 2023-03-16 09:15:06.571492
0 calls to publish in total time 0:00:00 (average 0 µs per call)
0 calls to lookup in total time 0:00:00 (average 0 µs per call)
Maximum number of partitions active = 0
+[root@myPod /]#
+```
+
+## Connectivityserver operation
+Please refer to the documentaion in the
+connectivityserver package [https://github.com/DUNE-DAQ/connectivityserver].
+
+
+-----
+
+
+_Last git commit to the markdown source of this page:_
+
+
+_Author: Gordon Crone_
+
+_Date: Thu Oct 16 16:54:02 2025 +0100_
+
+_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/connectivityserver/issues](https://github.com/DUNE-DAQ/connectivityserver/issues)_
+
diff --git a/docs/packages/ctbmodules/README.md b/docs/packages/ctbmodules/README.md
new file mode 100644
index 00000000000..41272359f41
--- /dev/null
+++ b/docs/packages/ctbmodules/README.md
@@ -0,0 +1,74 @@
+# ctbmodules - DUNE DAQ module to control and read out the CTB hardware
+
+Ported from original implementation in redmine:
+
+
+
+
+
+
+
+
+## Instructions to update the configuration and run with dunedaq v5 line
+
+### Area setup
+First of all you need a v5 area.
+To do this follow the instructions in the daqconf wiki, for example [fddaq-v5.3.2](https://github.com/DUNE-DAQ/daqconf/wiki/Setting-up-a-fddaq%E2%80%90v5.3.2-software-area).
+
+Locally, in the top area, you also need the [base configuration repository](https://gitlab.cern.ch/dune-daq/online/ehn1-daqconfigs).
+Please note that the repo on gitlab are only accessible via ssh key, so please register one in the CERN gitlab.
+I recommend you also set in your area a `.netrc` file as in the `np04daq` home, remember to change login to your CERN username.
+After that you can simply
+```bash
+git clone ssh://git@gitlab.cern.ch:7999/dune-daq/online/ehn1-daqconfigs.git
+```
+Or alternatively
+```bash
+cpm-setup -b fddaq-v5.3.2 ehn1-daqconfigs
+```
+The first one is a direct clone, while the second sets up the configuration repo to do some more advance operation, so the default branches might be a little strange.
+Further domentation on the various `cpm-*` commands can be found in [the runconftools documentation](https://github.com/DUNE-DAQ/runconftools/blob/develop/docs/README.md).
+The second only works if you have setup the `.netrc` file correctly.
+To conclude, just
+```bash
+source ehn1-daqconfigs/setup_db_path.sh
+```
+
+### Update the CTB configuration
+The ehn1-daqconfigs contains already a valid configuration for the CTB.
+Due to the implementation of HLTs and LLTs as confmodel::resource, it's best if any branch of ehn1-daqconfigs contains only one version of each.
+So, as CTB experts the only thing you should do is to update the value of the objects already created in ehn1-daqconfigs.
+
+In order to do so, there is a script called `update_ctb_settings`.
+Typical usage is:
+```bash
+update_ctb_settings ehn1-daqconfigs/sessions/np02-session.data.xml
+```
+This will do the following:
+ - It will change the value of every objects in the configuration related to the CTB according to the the json file you provide
+ - It will enable/disable HLTs and LLTs according to your configuration
+Please keep in mind that HLTs can also be enabled/disabled via the shifter interface (see dedicated section).
+
+Once you are happy with the changes, you can commit and push the changes on a branch on ehn1-daqconfigs and open a Merge request toward the dedicate branch.
+
+### Run the CTB configuration
+in order to run, start using the shifter interface in local mode:
+```bash
+runconf-shifter-ui -l -d ehn1-daqconfigs
+```
+From the inerface select which components you need, select which HLTs you want to enable and click `create`.
+The output of the shifter interface will tell you how to run.
+
+
+-----
+
+
+_Last git commit to the markdown source of this page:_
+
+
+_Author: Marco Roda_
+
+_Date: Wed Jun 18 11:19:38 2025 +0200_
+
+_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/ctbmodules/issues](https://github.com/DUNE-DAQ/ctbmodules/issues)_
+
diff --git a/docs/packages/daq-assettools/README.md b/docs/packages/daq-assettools/README.md
index 31df43f8c15..d9429d3826d 100644
--- a/docs/packages/daq-assettools/README.md
+++ b/docs/packages/daq-assettools/README.md
@@ -1,70 +1,37 @@
# DAQ Asset Tools
+## Overview
+
DAQ asset files are stored under a 3-level hashed directory in `/cvmfs/dunedaq.opensciencegrid.org/assets/files`. Each asset file has an associated json file with its metadata under the same directory.
There is a SQLite database file (`dunedaq-asset-db.sqlite`) under `/cvmfs/dunedaq.opensciencegrid.org/assets`. Metadata of the files are also stored in this database file.
-This repository contains a set of tools to manage these DAQ asset files.
+This repository contains a set of tools to manage these DAQ asset files, available [once the standard DUNE DAQ environment has been set up](https://dune-daq-sw.readthedocs.io/en/latest/packages/daq-buildtools/).
-- `assets-list`: list asset files;
-- `assets-add`: adding new asset files to the catalog;
-- `assets-update`: update asset files' metadata;
-- `assets-retire`: retire asset files.
+- `assets-list`: list asset files
+- `assets-add`: adding new asset files to the catalog
+- `assets-update`: update asset files' metadata
+- `assets-retire`: retire asset files
-Files listed in this [spreadsheet](https://docs.google.com/spreadsheets/d/1oDYe1eEqJhkY0DTd6mfpLw9ou7TqBCaDEgTo0qqVmqY/edit#gid=0) are being cataloged. When adding new files, please add new entries to the spreadsheet and let Software Coordination team to catalog and publish the files.
+Each command has a `-h` option which will tell you how to use it in detail; some of the highlights are covered in this document.
-### Installation
+Files which are part of our assets are catalogued in this [spreadsheet](https://docs.google.com/spreadsheets/d/1oDYe1eEqJhkY0DTd6mfpLw9ou7TqBCaDEgTo0qqVmqY/edit#gid=0), where they provide info to users about each asset. When developers and testers want a new asset, they should open an issue in this repository and select the "Request to add a DAQ asset file" form. The Software Coordination team will then publish the file to `cvmfs`.
-`pip install git+https://github.com/DUNE-DAQ/daq-assettools@v1.0.0#egg=daq-assettools`
+Note that asset files shouldn't exceed more than a couple hundred MB in size; cvmfs responds badly to files larger than that.
-## How to get path to asset files
+## How to see which asset files are available
-`assets-list` is the tool for getting the path to asset files.
+`assets-list` is the tool for this. It's a flexible tool; see `assets-list -h` for all available options. Here are some examples:
-Examples:
- `assets-list --subsystem readout`
-- `assets-list --subsystem readout --copy-to ./`: list files of `readout` subsystem, and copy them to the current directory. The copied file will be renamed as `file-.ext`, assuming its original file name is `file.ext`;
-- `assets-list -c dc74fe934cfb603d74ab6e54a0af7980`: list single file matching the MD5 file checksum;
-- `assets-list -c dc74fe934cfb603d74ab6e54a0af7980 --copy-to ./`: list single file matching the MD5 file checksum and copy the file to the current directory;
-- `assets-list -c dc74fe934cfb603d74ab6e54a0af7980 | awk '{print $NF}'`: get the file path only;
+- `assets-list --subsystem readout --copy-to ./`: list files of `readout` subsystem, and copy them to the current directory. The copied file will be renamed as `file-.ext`, assuming its original file name is `file.ext`
+- `assets-list -c dc74fe934cfb603d74ab6e54a0af7980`: list single file matching the MD5 file checksum
+- `assets-list -c dc74fe934cfb603d74ab6e54a0af7980 --copy-to ./`: list single file matching the MD5 file checksum and copy the file to the current directory
- `assets-list --subsystem readout --format binary --status valid --print-metadata`
-```
-usage: assets-list [-h] [--db-file DB_FILE] [-n NAME]
- [--subsystem {readout,trigger}] [-l LABEL]
- [-f {binary,text}]
- [--status {valid,expired,new_version_available}]
- [--description DESCRIPTION] [--replica-uri REPLICA_URI]
- [-p] [--copy-to COPY_TO]
-
-optional arguments:
- -h, --help show this help message and exit
- --db-file DB_FILE path to database file (default:
- /cvmfs/dunedaq.opensciencegrid.org/assets/dunedaq-
- asset-db.sqlite)
- -n NAME, --name NAME asset name (default: None)
- --subsystem {readout,trigger}
- asset subsystem (default: None)
- -l LABEL, --label LABEL
- asset label (default: None)
- -f {binary,text}, --format {binary,text}
- asset file format (default: None)
- --status {valid,expired,new_version_available}
- asset file status (default: None)
- -c CHECKSUM, --checksum CHECKSUM
- MD5 checksum of asset file (default: None)
- --description DESCRIPTION
- description of asset file (default: None)
- --replica-uri REPLICA_URI
- replica URI (default: None)
- -p, --print-metadata print full metadata (default: False)
- --copy-to COPY_TO path to the directory where asset files will be copied to. (default: None)
-
-```
-
## How to add, update, and retire asset files
-Note: these operations require write permissions to the database file, and file storage directories. Only Software Coordination team members need to perform these operations.
+_Note: these operations require write permissions to the database file, and file storage directories. Only Software Coordination team members need to perform these operations._
### `assets-add`
@@ -73,118 +40,73 @@ Note: these operations require write permissions to the database file, and file
The tool can take metadata fields from command line as well as from a JSON file. If both are presented, command-line entries take the precedence.
Examples:
-- `assets-add -s ./frames.bin --db-file ./dunedaq-asset-db.sqlite -n frames.bin -f binary --status valid --subsystem readout --label ProtoWIB --description "Used for FE emulation in FakeCardReader"`
-```
-usage: assets-add [-h] [--db-file DB_FILE] [-n NAME]
- [--subsystem {readout,trigger}] [-l LABEL]
- [-f {binary,text}]
- [--status {valid,expired,new_version_available}]
- [--description DESCRIPTION] [--replica-uri REPLICA_URI]
- [-s SOURCE] [--json-file JSON_FILE]
-
-optional arguments:
- -h, --help show this help message and exit
- --db-file DB_FILE path to database file (default:
- /cvmfs/dunedaq.opensciencegrid.org/assets/dunedaq-
- asset-db.sqlite)
- -n NAME, --name NAME asset name (default: None)
- --subsystem {readout,trigger}
- asset subsystem (default: None)
- -l LABEL, --label LABEL
- asset label (default: None)
- -f {binary,text}, --format {binary,text}
- asset file format (default: None)
- --status {valid,expired,new_version_available}
- asset file status (default: None)
- -c CHECKSUM, --checksum CHECKSUM
- MD5 checksum of asset file (default: None)
- --description DESCRIPTION
- description of asset file (default: None)
- --replica-uri REPLICA_URI
- replica URI (default: None)
- -s SOURCE, --source SOURCE
- path to asset file (default: None)
- --json-file JSON_FILE
- json file containing file metadata (default: None)
-
-```
+- `assets-add -s ./frames1234.bin --db-file ./dunedaq-asset-db.sqlite -n frames1234.bin -f binary --status valid --subsystem readout --label WIBEth --description "Used for FE emulation in FakeCardReader"`
### `assets-update`
Use `assets-update` to update certain metadata fields of a file. Similar as other tools, it takes the metadata fields from command-line for matching files in the database. Additionally, it takes a JSON string from command-line for the new metadata.
Examples:
-- `assets-update --subsystem readout --label ProtoWIB --json-string '{"description": "Used for FE emulation in FakeCardReader during Integration Week."}'`
-- `assets-update -c dc74fe934cfb603d74ab6e54a0af7980 --json-string '{"status": "valid"}'`
-```
-usage: assets-update [-h] [--db-file DB_FILE] [-n NAME]
- [--subsystem {readout,trigger}] [-l LABEL]
- [-f {binary,text}]
- [--status {valid,expired,new_version_available}]
- [--description DESCRIPTION] [--replica-uri REPLICA_URI]
- [--json-string JSON_STRING]
-
-optional arguments:
- -h, --help show this help message and exit
- --db-file DB_FILE path to database file (default:
- /cvmfs/dunedaq.opensciencegrid.org/assets/dunedaq-
- asset-db.sqlite)
- -n NAME, --name NAME asset name (default: None)
- --subsystem {readout,trigger}
- asset subsystem (default: None)
- -l LABEL, --label LABEL
- asset label (default: None)
- -f {binary,text}, --format {binary,text}
- asset file format (default: None)
- --status {valid,expired,new_version_available}
- asset file status (default: None)
- -c CHECKSUM, --checksum CHECKSUM
- MD5 checksum of asset file (default: None)
- --description DESCRIPTION
- description of asset file (default: None)
- --replica-uri REPLICA_URI
- replica URI (default: None)
- --json-string JSON_STRING
- json string to be updated in metadata (default: None)
-```
+- `assets-update --subsystem readout --label WIBEth --json-string '{"description": "Used for FE emulation in FakeCardReader during Integration Week."}'`
+- `assets-update -c dc74fe934cfb603d74ab6e54a0af7980 --json-string '{"status": "valid"}'`
### `assets-retire`
`assets-retire` is the tool to retire a file. The operation is as simple as change its metadata field 'status' to 'expired'. It will not delete the file itself.
Examples:
+
- `assets-retire -c dc74fe934cfb603d74ab6e54a0af7980`
+### Publishing changes to cvmfs
+
+Publishing changes to cvmfs can be done via the following steps:
+
+
+
+1. Prepare changes in a local copy of the cvmfs repository's `assets` directory
+
+
+2. On a cvmfs publisher node, open a cvmfs transaction, sync the `assets` directory in the repo to the local mirror with new changes, and publish the changes.
+
+The following code snippet shows a real-case example of adding a new file to the database, and "retire" a previous file. For space/logistical reasons it doesn't show that (1) the file also gets logged in the spreadsheet and (2) a DUNE DAQ environment has already been set up.
+
+#### Prepare changes in a local "assets" mirror
+
+```bash
+
+# Create a local mirror of "assets"
+
+rsync -vlprt /cvmfs/dunedaq.opensciencegrid.org/assets .
+
+# Make changes to the local assets mirror
+# Specify the db file path with `--db-file` option so that the changes goes to the local mirror;
+
+## Adding a new file
+
+cd ./assets
+
+# Note that the name, label and description here are just given as examples
+assets-add -s --db-file ./dunedaq-asset-db.sqlite -n wib_link_67.bin -f binary --status valid --subsystem readout --label WIBEth --description "Other WIBEth files have outdated detector_id fields in DAQEthHeader"
+
+## Retiring a file, referring to it by its hash
+
+assets-retire --db-file ./dunedaq-asset-db.sqlite -c a0ddae8343e82ba1a3668c5aea20f3d2
+
+## More low-level: accomplishing the same as above, but via the assets-update command
+
+assets-update --db-file ./dunedaq-asset-db.sqlite -c a0ddae8343e82ba1a3668c5aea20f3d2 --json-string '{"status": "expired"}'
+
```
-usage: assets-retire [-h] [--db-file DB_FILE] [-n NAME]
- [--subsystem {readout,trigger}] [-l LABEL]
- [-f {binary,text}]
- [--status {valid,expired,new_version_available}]
- [-c CHECKSUM] [--description DESCRIPTION]
- [--replica-uri REPLICA_URI]
-
-optional arguments:
- -h, --help show this help message and exit
- --db-file DB_FILE path to database file (default:
- /cvmfs/dunedaq.opensciencegrid.org/assets/dunedaq-
- asset-db.sqlite)
- -n NAME, --name NAME asset name (default: None)
- --subsystem {readout,trigger}
- asset subsystem (default: None)
- -l LABEL, --label LABEL
- asset label (default: None)
- -f {binary,text}, --format {binary,text}
- asset file format (default: None)
- --status {valid,expired,new_version_available}
- asset file status (default: None)
- -c CHECKSUM, --checksum CHECKSUM
- MD5 checksum of asset file (default: None)
- --description DESCRIPTION
- description of asset file (default: None)
- --replica-uri REPLICA_URI
- replica URI (default: None)
+
+#### Publish changes to cvmfs
+
+Technical details of how to publish to cvmfs [is covered in the daq-release documentation](https://dune-daq-sw.readthedocs.io/en/latest/packages/daq-release/publish_to_cvmfs/#the-basics). Here, after modifying your local mirror of `assets`, you'd sync it to /cvmfs/dunedaq.opensciencegrid.org/assets:
+
+```bash
+rsync -vlprt : /cvmfs/dunedaq.opensciencegrid.org
```
@@ -194,9 +116,9 @@ optional arguments:
_Last git commit to the markdown source of this page:_
-_Author: Pengfei Ding_
+_Author: Kurt Biery_
-_Date: Fri Feb 10 02:43:31 2023 -0600_
+_Date: Mon Oct 13 21:21:07 2025 -0500_
_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/daq-assettools/issues](https://github.com/DUNE-DAQ/daq-assettools/issues)_
diff --git a/docs/packages/daq-buildtools/README.md b/docs/packages/daq-buildtools/README.md
index 4cacc9490d7..8003f915bd1 100644
--- a/docs/packages/daq-buildtools/README.md
+++ b/docs/packages/daq-buildtools/README.md
@@ -1,24 +1,16 @@
-
-_JCF: This document was last edited Feb-10-2023_
-
# DUNE DAQ Buildtools
+_This document was last edited Dec-13-2025_
+
`daq-buildtools` is the toolset to simplify the development of DUNE DAQ packages. It provides environment and building utilities for the DAQ Suite.
+If you've read these instructions before, release notes for specific
+versions of daq-buildtools can be found at the bottom of this
+document.
+
## System requirements
-To get set up, you'll need access to the cvmfs Spack area
-`/cvmfs/dunedaq-development.opensciencegrid.org/spack-nightly` as is
-the case, e.g., on the lxplus machines at CERN. If you've been doing
-your own Spack work on the system in question, you may also want to
-back up (rename) your existing `~/.spack` directory to give Spack a
-clean slate to start from in these instructions.
-
-You'll also want `python` to be version 3; to find out whether this is the case, run `python --version`. If it isn't, then you can switch over to Python 3 with the following simple commands:
-```
-source `realpath /cvmfs/dunedaq.opensciencegrid.org/spack-externals/spack-installation/share/spack/setup-env.sh`
-spack load python@3.8.3%gcc@8.2.0
-```
+To get set up, you'll need access to the cvmfs areas `/cvmfs/dunedaq.opensciencegrid.org` and `/cvmfs/dunedaq-development.opensciencegrid.org`. This is the case, e.g., on the np04 cluster at CERN.
## Setup of `daq-buildtools`
@@ -26,15 +18,17 @@ spack load python@3.8.3%gcc@8.2.0
Simply do:
```
source /cvmfs/dunedaq.opensciencegrid.org/setup_dunedaq.sh
-setup_dbt dunedaq-v3.2.2 # dunedaq-v3.2.2 is the latest daq-buildtools version as of Feb-10-2023
+setup_dbt fddaq-v5.5.0
```
+Note that `fddaq-v5.5.0` is aliased to `v8.9.11`.
After running these two commands, then you'll see something like:
```
-Added /cvmfs/dunedaq.opensciencegrid.org/tools/dbt/v7.0.0/bin -> PATH
-Added /cvmfs/dunedaq.opensciencegrid.org/tools/dbt/v7.0.0/scripts -> PATH
+Added /cvmfs/dunedaq.opensciencegrid.org/tools/dbt/v8.9.11/bin -> PATH
+Added /cvmfs/dunedaq.opensciencegrid.org/tools/dbt/v8.9.11/scripts -> PATH
DBT setuptools loaded
```
+
If you type `dbt-` followed by the `` key you'll see a listing of available commands, which include `dbt-create`, `dbt-build`, `dbt-setup-release` and `dbt-workarea-env`. These are all described in the following sections.
Each time that you log into a fresh Linux shell and want to either (1) set up an existing cvmfs-based DUNE DAQ software release or (2) develop code within a pre-existing DUNE DAQ work area, you'll need to set up daq-buildtools. These two cases are described in detail momentarily. For (1) you'd want to repeat the method above to set up daq-buildtools. For (2) it's easier instead to `cd` into the work area and source the file named `env.sh`.
@@ -42,15 +36,17 @@ Each time that you log into a fresh Linux shell and want to either (1) set up an
## Running a release from cvmfs
-If you simply want access to a DUNE DAQ software release (its executables, etc.) without actually developing DUNE DAQ software itself, you'll want to run a release from cvmfs. After setting up daq-buildtools, you can simply run the following command if you wish to use a frozen release:
+If you only want access to a DUNE DAQ software release (its executables, etc.) without actually developing DUNE DAQ software itself, you'll want to run a release from cvmfs. Please note that in general, stable releases (especially patch stable releases) are intended for this scenario, and _not_ for development. After setting up daq-buildtools, you can simply run the following command if you wish to use a stable release:
```sh
-dbt-setup-release # dunedaq-v3.2.2 is the latest frozen release as of Feb-10-2023
+dbt-setup-release # fddaq-v5.5.0-a9 the latest stable release as of Dec-13-2025
```
-Instead of a frozen release you can also set up nightly releases, candidate releases or test releases using the same arguments as are described later for `dbt-create`; e.g. if you want to set up candidate release `rc-v3.2.1-2` you can do:
+Note that if you set up a stable release you'll get a message along the lines of `Release "fddaq-v5.5.0-a9" requested; interpreting this as release "fddaq-v5.5.0-a9-1"`; this simply reflects that the latest build iteration of the stable release (`-1`, `-2`, etc.) has been alias'd out for the convenience of the user.
+
+Instead of a stable release you can also set up nightly releases or candidate releases using the same arguments as are described later for `dbt-create`; e.g. if you want to set up candidate release `fddaq-v5.2.0-rc3-a9` you can do:
```
-dbt-setup-release -b candidate rc-v3.2.1-2
+dbt-setup-release -b candidate fddaq-v5.2.0-rc3-a9
```
`dbt-setup-release` will set up both the external packages and DAQ packages, as well as activate the Python virtual environment. Note that the Python virtual environment activated here is read-only.
@@ -62,34 +58,30 @@ If you wish to develop DUNE DAQ software, you can start by creating a work area.
Each work area is based on a DUNE DAQ software release, which defines what external and DUNE DAQ packages the code you develop in a work area are built against. Releases come in four categories:
-* **Nightly Releases**: packages in nightly releases are built each night using the heads of their `develop` branches. Generally labeled as `N--
`, e.g. `N22-11-27`.
+* **Nightly Releases**: packages in nightly releases are built each night using the heads of their `develop` and `production/v4` branches. Depending on whether it's the far detector stack or the near detector stack, and whether it's a develop or production build, these are generally labeled either as `NFD__
_` (far detector) or `NND__
_` (near detector). E.g. `NFD_DEV_240716_A9` is the AL9 nightly develop build for the far detector on July 16th, 2024, and `NFD_PROD4_250202_A9` is the v4 production nightly build on February 2, 2025.
-* **Frozen Releases**: a frozen release typically comes out every couple of months, and only after extensive testing supervised by a Release Coordinator. Generally labeled as `dunedaq-vX.Y.X`, e.g. `dunedaq-v3.2.2`
+* **Stable Releases**: a stable release typically comes out every couple of months, and only after extensive testing supervised by a Release Coordinator. Depending on whether it's the far detector stack or the near detector stack, this is labeled as `fddaq-vX.Y.X-` or `nddaq-vX.Y.Z-`, e.g., `fddaq-v4.4.4-a9`.
-* **Candidate Releases**: a type of release meant specifically for frozen release testing. Generally labeled as `rc-vX.Y.Z-`, e.g. `rc-v3.2.1-1`
+* **Candidate Releases**: a type of release meant specifically for stable release testing. Generally labeled as `fddaq-vX.Y.Z-rc-` or `nddaq-vX.Y.Z-rc-`. For example, `fddaq-v4.4.0-rc4-a9` is the fourth release candidate for the AL9 build of `fddaq-v4.4.0`.
The majority of work areas are set up to build against the most recent nightly release. To do so, run:
```sh
-dbt-create [-i/--install-pyvenv] -n # E.g., N22-11-27 or last_successful
+dbt-create -n # E.g., NFD_DEV_240213_A9
```
-...where in general the most popular `` is `last_successful`, which as the name suggests will translate to the date of the most recent successful nightly release. The optional `-i` argument will be discussed in a moment.
-
-To see all available nightly releases, run `dbt-create -l -n` or `dbt-create -l -b nightly`.
+You can also use `-n last_fddaq` to build against the most recent _develop_ branch, e.g., `NFD_DEV_241007_A9`. To see all available nightly releases, run `dbt-create -l -n` or `dbt-create -l -b nightly`. Note also that you can leave out defining the name of the work area subdirectory, in which case it defaults to the same name as the release.
If you want to build against a candidate release, run:
```sh
-dbt-create [-i/--install-pyvenv] -b candidate # E.g., rc-v3.2.1-1 as of Nov-11-2022.
+dbt-create -b candidate # E.g., fddaq-v4.4.0-rc4-a9
```
...where to see all available candidate releases, run `dbt-create -l -b candidate`.
-To build against a test release, simply replace `candidate` above with `test`. And to build against a frozen release, you don't need the `-b ` argument at all. You can simply do:
+And to build against a stable release (_not recommended_, as the codebase changes fairly rapidly), you don't need the `-b ` argument at all. You can just do:
```
-dbt-create [-i/--install-pyvenv]
+dbt-create
```
-The option `-i/--install-pyvenv` for `dbt-create` is optional. By default, the Python virtual environment created in the work area will be a clone of an existing one from the release directory. This avoids the compilation/installation of Python modules using the `pyvenv_requirements.txt` in the release directory, and speeds up the work-area creation significantly. However, the first time running `dbt-create` with cloning on a node may take several minutes since cvmfs needs to fetch these files into local cache first, and `-i` is an option to avoid this.
-
-The structure of your work area will look like the following:
+The structure of your work area will include the following files and directories:
```txt
MyTopDir
├── build
@@ -102,6 +94,31 @@ MyTopDir
```
The next section of this document concerns how to build code in your new work area. However, if you'd like to learn about how to retrieve information about your work area such as the release of the DUNE DAQ suite it builds against, you can skip ahead to [Finding Info on Your Work Area](#Finding_Info).
+### Advanced `dbt-create` options
+
+Along with telling `dbt-create` what you want your work area to be named and what release you want it to be based off of, there are a few more options that give you finer-grained control over the work area. You can simply run `dbt-create -h` for a summary, but they're described in fuller detail here.
+
+
+* `-s/--spack`: Install a local Spack instance in the work area. This will allow you to install and load whatever Spack packages you wish into your work area.
+
+
+* `-q/--quick`: Use this if you don't plan to develop a Python package. This is much quicker than the default behavior of dbt-create, which will actually copy the Python virtual environment over to your work area, thereby giving you write permission to the project's Python packages. With `-q/--quick`, the Python virtual environment your work area uses is in the (read-only) release area on cvmfs.
+
+
+* `-i/--install-pyvenv`: With this option, there will be compilation/installation of python modules using the `pyvenv_requirements.txt` in the release directory. This is typically slower than cloning, but not always. You can take further control by combining it with the `-p ` argument, though it's unlikely as a typical developer that you'd want a non-standard set of Python packages.
+
+### Cloning an entire work area
+
+A new (June 2025) pair of experimental scripts in daq-buildtools enables users to create a work area by cloning another work area, using a YAML recipe file as an intermediary. The basic approach is simple. To create a recipe file from an existing area, assuming its environment is set up, just do the following:
+```
+dbtx-save-workarea-recipe.py
+```
+and the script will generate a file called `.yaml`. This human-readable file will contain details about the original area, and can then be used later to generate a work area based on the same nightly/candidate/stable release as well as the same repos and their commits as the original area. To do so one can simply pass the file to `dbtx-create-workarea-from-recipe.py` as well as the desired name of the new work area:
+```
+dbtx-create-workarea-from-recipe.py --workarea-name .yaml
+```
+Both scripts have further options; pass `--help` as an argument to either one in order to get more details.
+
## Cloning and building a package repo
@@ -118,7 +135,6 @@ cd ..
Note that in a "real world" situation [you'd be doing your development on a feature branch](https://dune-daq-sw.readthedocs.io/en/latest/packages/daq-release/development_workflow_gitflow/) in which case you'd add `-b ` to the `git clone` command above.
-
We're about to build and install the `listrev` package. (🔴 Note: if you are working with other packages, have a look at the [Working with more repos](#working-with-more-repos) subsection before running the following build command.) By default, the scripts will create a subdirectory of MyTopDir called `./install ` and install any packages you build off your repos there. If you wish to install them in another location, you'll want to set the environment variable `DBT_INSTALL_DIR` to the desired installation path before source-ing the `env.sh` script described below. You'll also want to remember to set the variable during subsequent logins to the work area if you don't go with the default.
Now, do the following:
@@ -131,7 +147,9 @@ dbt-build
### Working with more repos
-To work with more repos, add them to the `./sourcecode` subdirectory as we did with listrev. Be aware, though: if you're developing a new repo which itself depends on another new repo, daq-buildtools may not already know about this dependency. "New" in this context means "not listed in `/cvmfs/dunedaq.opensciencegrid.org/spack/releases/dunedaq-v3.2.2/dbt-build-order.cmake`". If this is the case, add the names of your new package(s) to the `build_order` list found in `./sourcecode/dbt-build-order.cmake`, placing them in the list in the relative order in which you want them to be built.
+To work with more repos, add them to the `./sourcecode` subdirectory as we did with listrev. Be aware, though: if you're developing a new repo which itself depends on another new repo, daq-buildtools may not already know about this dependency. If this is the case, add the names of your new package(s) to the `build_order` list found in `./sourcecode/dbt-build-order.cmake`, placing them in the list in the relative order in which you want them to be built.
+
+Note that as of daq-buildtools `v8.7.1`, you can replace the actual `./sourcecode` directory in your work area with a soft link called `sourcecode` which points to an actual `./sourcecode` directory elsewhere on your file system.
As a reminder, once you've added your repos and built them, you'll want to run `dbt-workarea-env` so the environment picks up their applications, libraries, etc.
@@ -149,11 +167,13 @@ dbt-build --clean --unittest # Blow away the contents of ./build, run config+ge
```
..where in the above case, you blow away the contents of `./build`, run config+generate+build, install the result in `$DBT_INSTALL_DIR` and then run the unit tests. Be aware that for many packages, unit tests will only (fully) work if you've also rerun `dbt-workarea-env`.
+To run any integration tests your repos may contain (e.g., `dfmodules`) , you can pass the `--integtest` option to `dbt-build`.
+
To check for deviations from the coding rules described in the [DUNE C++ Style Guide](https://dune-daq-sw.readthedocs.io/en/latest/packages/styleguide/), run with the `--lint` option:
```
dbt-build --lint
```
-...though be aware that some guideline violations (e.g., having a function which tries to do unrelated things) can't be picked up by the automated linter. (_n.b.: As of Nov-11-2022, the `llvm` package needed for linting has been removed from the environment. It's possible by the time you read this that the issue has been fixed_) Also note that you can use `dbt-clang-format.sh` in order to automatically fix whitespace issues in your code; type it at the command line without arguments to learn how to use it.
+...though be aware that some guideline violations (e.g., having a function which tries to do unrelated things) can't be picked up by the automated linter. Also note that you can use `dbt-clang-format.sh` in order to automatically fix whitespace issues in your code; type it at the command line without arguments to learn how to use it.
Note that unlike the other options to `dbt-build`, `--lint` and `--unittest` are both capable of taking an optional argument, which is the name of a specific repo in your work area which you'd like to either lint or run unit tests for. This can be useful if you're focusing on developing one of several repos in your work area; e.g. `dbt-build --lint `. With `--lint` you can get even more fine grained by passing it the name of a single file in your repository area; either the absolute path for the file or its path relative to the directory you ran `dbt-build` from will work.
@@ -167,6 +187,21 @@ If you want to change cmake message log level, you can use the `--cmake-msg-lvl`
dbt-build --cmake-msg-lvl=
```
+By default the build is performed using gcc's `O2` compilation flag. If you wish to use a different
+```
+dbt-build --optimize-flag O3 # Or Og, etc.
+```
+If you wish to only generate files but _not_ also perform a compilation (this is a kind of expert action, but there are use cases for it) you can run:
+```
+dbt-build --codegen-only
+```
+
+If you want to troubleshoot your code by taking advantage of `gcc`'s `-fsanitize` option, you can forward an argument to it via `dbt-build`'s `--sanitize` option. Note that in order to keep things consistent a clean build is required for this. One example:
+```
+dbt-build --clean --sanitize address # Will ensure -fsanitize=address is passed to gcc
+```
+Depending on the argument provided, there may be some helpful tips at the bottom of the `dbt-build` output on how to run the code you've built with sanitization applied.
+
You can see all the options listed if you run the script with the `--help` command, i.e.
```
dbt-build --help
@@ -177,7 +212,7 @@ Finally, note that both the output of your builds and your unit tests are logged
## Running
-In order to access the applications, libraries and plugins built and installed into the `$DBT_INSTALL_DIR` area during the above procedure, the system needs to be instructed on where to look for them. This is accomplished via tha `dbt-workarea-env` command you've already seen. E.g., log into a new shell, cd into your work area, then do the following:
+In order to access the applications, libraries and plugins built and installed into the `$DBT_INSTALL_DIR` area during the above procedure, the system needs to be instructed on where to look for them. This is accomplished via tha `env.sh` file you've already seen. E.g., log into a new shell, cd into your work area, then do the following:
```
export DBT_INSTALL_DIR= # ONLY needed if you didn't use the default
. ./env.sh
@@ -187,11 +222,7 @@ Note that if you add a new repo to your work area, after building your new code
Once the runtime environment is set, just run the application you need. listrev, however, has no applications; it's just a set of DAQ module plugins which get added to CET_PLUGIN_PATH.
-Now that you know how to set up a work area, a nice place to learn a bit about the DUNE DAQ suite is via the `daqconf` package. Take a look at its documentation [here](https://dune-daq-sw.readthedocs.io/en/latest/packages/daqconf/); note that in parts of the `daqconf` instructions you're told to run daq-buildtools commands which you may already have run (e.g., to create a new work area) in which case you can skip those specific commands.
-
-A classic option for learning about how to run DAQ modules in a work area is [the listrev documentation](https://dune-daq-sw.readthedocs.io/en/latest/packages/listrev/).
-
-In both the links above you'll notice you'll be running a program called `nanorc` to run the DAQ. To learn more about `nanorc` itself, take a look at [the nanorc documentation](https://dune-daq-sw.readthedocs.io/en/latest/packages/nanorc/).
+Now that you know how to set up a work area, a classic option for learning about how to run DAQ modules in a work area is [the listrev documentation](https://dune-daq-sw.readthedocs.io/en/latest/packages/listrev/).
@@ -199,27 +230,93 @@ In both the links above you'll notice you'll be running a program called `nanorc
A couple of things need to be kept in mind when you're building code in a work area. The first is that when you call `dbt-build`, it will build your repos against a specific release of the DUNE DAQ software stack - namely, the release you (or someone else) provided to `dbt-create` when the work area was first created. Another is that the layout and behavior of a work area is a function of the version of daq-buildtools which was used to create it. As a work area ages it becomes increasingly likely that a problem will occur when you try to build a repo in it; this is natural and unavoidable.
-As such, it's important to know the assumptions a work area makes when you use it to build code. In the base of your work area is a file called `dbt-workarea-constants.sh`, which will look something like the following:
+As such, it's important to know the assumptions a work area makes when you use it to build code. This section covers ways to learn details about your work area and its contents.
+
+### `dbt-info`
+
+A useful script to call to get immediate information on your development environment is `dbt-info`. For a full set of options you can simply run `dbt-info --help`, but for a quick summary, we have the following:
+
+
+* `dbt-info release`: tells you if it's a far detector or near detector release, what its name is (e.g. `NFD_DEV_240213_A9`), what the name of the base release is, and where the release is located in cvmfs.
+
+
+* `dbt-info package `: tells you info about the DUNE DAQ package whose name you provide it (git commit hash of its code, etc.). Passing "all" as the package name gives you info for all the DUNE DAQ packages.
+
+
+* `dbt-info external `: `external` is same as the `package` option, except you use it when you want info not on a DUNE DAQ package but an external package (e.g., `boost`)
+
+
+* `dbt-info pymodule `: get the version of a Python module. Response will differ depending on whether you have a local Python environment in your work area.
+
+
+* `dbt-info sourcecode`: will tell you the branch each of the repos in your work area is on, as well as whether the code on the branch has been edited (indicated by an `*`)
+
+
+* `dbt-info release_size`: tells you the # of packages and memory (in KB) used by each of the release, the base release, and the externals.
+
+### `dbt-workarea-constants.sh`
+
+In the base of your work area is a file called `dbt-workarea-constants.sh`, which will look something like the following:
```
-export SPACK_RELEASE="N22-09-23"
-export SPACK_RELEASES_DIR="/cvmfs/dunedaq-development.opensciencegrid.org/nightly"
-export DBT_ROOT_WHEN_CREATED="/cvmfs/dunedaq.opensciencegrid.org/tools/dbt/v6.0.2"
+export SPACK_RELEASE="fddaq-v4.1.0"
+export SPACK_RELEASES_DIR="/cvmfs/dunedaq.opensciencegrid.org/spack/releases"
+export DBT_ROOT_WHEN_CREATED="/cvmfs/dunedaq.opensciencegrid.org/tools/dbt/v7.2.1"
+export LOCAL_SPACK_DIR="/home/jcfree/daqbuild_fddaq-v4.1.0/.spack"
```
This file is sourced whenever you run `dbt-workarea-env`, and it tells both the build system and the developer where they can find crucial information about the work areas' builds. Specifically, these environment variables mean the following:
-* `$SPACK_RELEASE`: this is the release of the DUNE DAQ software stack against which repos will build (e.g. `dunedaq-v2.10.2`, `N22-04-09`, etc.)
+* `$SPACK_RELEASE`: this is the release of the DUNE DAQ software stack against which repos will build (e.g. `fddaq-v4.4.0-rc4-a9`, `NFD_DEV_240213_A9`, etc.)
-* `$SPACK_RELEASES_DIR`: The base of the directory containing the DUNE DAQ software installations. The directory `$SPACK_RELEASES_DIR/$SPACK_RELEASE` contains the installation of the packages for your release
+* `$SPACK_RELEASES_DIR`: The base of the directory containing the DUNE DAQ software installations.
* `DBT_ROOT_WHEN_CREATED`: The directory containing the `env.sh` file which was sourced before this work area was first created
-There are also useful Spack commands which can be executed to learn about the versions of the individual packages you're working with, once you've run `dbt-workarea-env` or `dbt-setup-release`. An [excellent Spack tutorial](https://spack-tutorial.readthedocs.io/en/latest/tutorial_basics.html) inside the official Spack documentation is worth a look, but a few Spack commands can be used right away to learn about a work area:
+* `LOCAL_SPACK_DIR`: If the `-s/--spack` was passed to `dbt-create` when the work area was built, this points to where the local Spack area is located
+
+If you set up your work area using `daq-buildtools v8.6.1` or later (i.e., using the `develop` line instead of `production/v4`), you'll also see something like
+```
+export DUNE_DAQ_RELEASE_SOURCE="/cvmfs/dunedaq-development.opensciencegrid.org/candidates/fddaq-v5.1.0-rc1-a9/sourcecode"
+```
+`DUNE_DAQ_RELEASE_SOURCE` points to a cvmfs area containing the source code used to build this release. This can be useful for inspecting packages not checked out locally under `$DBT_AREA_ROOT/sourcecode`.
+
+### `dbt-lcov.sh`
+
+Strictly speaking, this script is more about finding info about your code than about your work area. It determines what fraction of your lines of code and functions the unit tests in your work area's repos cover. This script wraps calls to our installed external [`lcov` package](https://github.com/linux-test-project/lcov). Assuming you've set up your work area's enviroment and are in its base, if you run
+```
+dbt-lcov.sh
+```
+what will happen is that, if it hasn't already been run, the script will insert a few lines of CMake code into the `sourcecode/CMakeLists.txt` file which will ensure that when the repos are built the output will be instrumented in a manner `lcov` can use. It will then perform a clean build now that `sourcecode/CMakeLists.txt` has been modified, followed by a run of the unit tests. It will then output the results in a subdirectory called `./code_coverage_results`; in particular, `./code_coverage_results/html/index.html` is a webpage which will display the fractions mentioned above.
+
+Please note that due to the modification of `sourcecode/CMakeLists.txt`, you wouldn't want to use the code you build for normal running (e.g., for performance testing or data readout). Likely it's best to use a work area dedicated to code coverage study as opposed to other functions.
+
+### Useful Spack commands
+
+There are also useful Spack commands which can be executed to learn about the versions of the individual packages you're working with, once you've run `dbt-workarea-env` or `dbt-setup-release`. An [excellent Spack tutorial](https://spack-tutorial.readthedocs.io/en/latest/tutorial_basics.html) inside the official Spack documentation is worth a look, but a few Spack commands can be used right away to learn more about your environment. They're presented both for the case of you having set up a nightly release and a stable release:
+
+* `spack find -N -d --loaded | grep NB` will tell you all the DUNE DAQ packages shared by both far- and near detector software which have been loaded by `dbt-workarea-env` or `dbt-setup-release`
+
+* `spack find -N -d --loaded | grep NFD` for far detector-specific DUNE DAQ packages
+
+* `spack find -N -d --loaded | grep NND` for near detector-specific DUNE DAQ packages
+
+* `spack find -N -d --loaded | grep dunedaq-externals` for external packages not developed by DUNE collaborators
+
+* `spack find -p ` will tell you the path to the actual contents of a Spack-installed package
+
+Finally, when `dbt-build` is run, a file called `daq_app_rte.sh` is
+produced and placed in your installation area (`$DBT_INSTALL_DIR`). You generally don't need to think about `daq_app_rte.sh` unless you're curious; it's a sourceable file which contains environment variables that [drunc](https://dune-daq-sw.readthedocs.io/en/latest/packages/drunc/) uses to launch processes when performing runs.
+
+## Release Notes
+
+[`v8.9.4` release notes](https://github.com/DUNE-DAQ/daq-buildtools/releases/tag/v8.9.4)
+
+[`v8.9.2` release notes](https://github.com/DUNE-DAQ/daq-buildtools/releases/tag/v8.9.2)
-* `spack find --loaded -N | grep $SPACK_RELEASE` will tell you all the DUNE DAQ packages which have been loaded by `dbt-workarea-env` or `dbt-setup-release`
+[`v8.9.1` release notes](https://github.com/DUNE-DAQ/daq-buildtools/releases/tag/v8.9.1)
-* `spack find --loaded -N | grep dunedaq-externals` is the same, but will tell you all the external packages
+[`v8.9.0` release notes](https://github.com/DUNE-DAQ/daq-buildtools/releases/tag/v8.9.0)
-* `spack find --loaded -p ` will tell you the path to the actual contents of a Spack-installed package
+[`v8.8.0` release notes](https://github.com/DUNE-DAQ/daq-buildtools/releases/tag/v8.8.0)
## Next Step
@@ -237,7 +334,7 @@ _Last git commit to the markdown source of this page:_
_Author: John Freeman_
-_Date: Fri Feb 10 09:43:50 2023 -0600_
+_Date: Sat Dec 13 11:21:28 2025 -0600_
_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/daq-buildtools/issues](https://github.com/DUNE-DAQ/daq-buildtools/issues)_
diff --git a/docs/packages/daq-cmake/README.md b/docs/packages/daq-cmake/README.md
index 2a1e5f6acad..ed672cc5251 100644
--- a/docs/packages/daq-cmake/README.md
+++ b/docs/packages/daq-cmake/README.md
@@ -1,20 +1,22 @@
# daq-cmake
-_JCF, Sep-28-2022: the following daq-cmake documentation assumes you're using (a candidate) dunedaq-v3.2.0 or a recent nightly as it covers the new `create_dunedaq_package` script. For daq-cmake documentation prior to this addition please go [here](https://dune-daq-sw.readthedocs.io/en/v3.1.1/packages/daq-cmake/)_
-
This package provides CMake support for DUNE-DAQ packages.
The documentation for this package is divided into four parts:
+
1) Instructions for `create_dunedaq_package`, a script which will generate a good deal of CMake/C++ code which is standard across all DUNE DAQ packages
+
2) A description of the standard structure and CMake build code in a DUNE DAQ package
+
3) A complete reference manual for the DUNE-DAQ-specific CMake functions developers can call in order to specify their package's build
+
4) A description of how we use schema in order to consistently define data structures
-Note that this documentation assumes you have some familiarity with the [daq-buildtools package](https://dune-daq-sw.readthedocs.io/en/latest/packages/daq-buildtools) and know how to set up a development area and run commands to build code in it.
+Note that this documentation assumes you have some familiarity with the [daq-buildtools package](https://dune-daq-sw.readthedocs.io/en/latest/packages/daq-buildtools) and know how to set up a development area and run commands to build code in it.
## The `create_dunedaq_package` script
-A DUNE DAQ software package is composed of various types of software components - standalone applications, libraries, [DAQModules](https://dune-daq-sw.readthedocs.io/en/latest/packages/appfwk/), etc. Across the packages there are common ways these are implemented, whether as a result of our [official coding guidelines](https://dune-daq-sw.readthedocs.io/en/latest/packages/styleguide/) or simply through tradition. `create_dunedaq_package` takes advantage of these patterns and saves you work by generating much of the boilerplate code which makes up a DUNE DAQ package.
+A DUNE DAQ software package is composed of various types of software components - standalone applications, libraries, [DAQModules](https://dune-daq-sw.readthedocs.io/en/latest/packages/appfwk/), etc. Across the packages there are common ways these are implemented, whether as a result of our [official coding guidelines](https://dune-daq-sw.readthedocs.io/en/latest/packages/styleguide/) or simply through tradition. `create_dunedaq_package` takes advantage of these patterns and saves you work by generating much of the boilerplate code which makes up a DUNE DAQ package.
Before using `create_dunedaq_package`, you'll want to have some idea of what software components will make up your package, and what their names should be. While the only argument actually required by `create_dunedaq_package` is the name of your new package, it won't do much unless you provide it with options and arguments. You can see what these are by running `create_dunedaq_package -h`, reprinted here for your convenience.
@@ -30,14 +32,14 @@ Arguments and options:
`--test-app`: same as `--daq-module`, but for integration test applications
-`--config-generation`: whether to generate a script which itself will generate JSON code to create an application based on the package. Requires at least one `--daq-module` as well.
+`--pytest`: will create a Python program readable by the [pytest integration test framework](https://docs.pytest.org/en/stable/). It takes the name of the test as an argument; note the name needs to be of the form `*_test` or `test_*` so that pytest can work with it.
-Note that some of these concepts, e.g. a user-oriented app vs. an app designed for integration tests of the package itself, are covered below in the [Overview of a DUNE DAQ package](#package_overview) section.
+Note that some of these concepts, e.g. a user-oriented app vs. an app designed for integration tests of the package itself, are covered below in the [Overview of a DUNE DAQ package](#package_overview) section.
In the directory `create_dunedaq_package` is run out of, `create_dunedaq_package` will create a subdirectory named after your package if such a subdirectory doesn't exist. If a subdirectory with that name already _does_ exist, it should be empty with the possible exceptions of a `README.md` documentation file and/or a `.git/` version control directory. These exceptions allow you to run the script using as an argument the name of a new repo which you've cloned into your area. An example of using `create_dunedaq_package` would be the following (note you can horizontal-scroll the command below):
```
cd ./sourcecode # If we were in the base of a development area
-create_dunedaq_package --daq-module AFirstModule --config-generation --user-app an_app_for_users --user-app another_app_for_users --python-bindings --main-library thenewpackage
+create_dunedaq_package --daq-module AFirstModule --user-app an_app_for_users --user-app another_app_for_users --python-bindings --main-library thenewpackage
```
(Of course in real life please use better names for your package and its components than those in the example). If you were to `ls thenewpackage`, you would see that the script had set up several new directories for you, as well as a `CMakeLists.txt` file:
```
@@ -53,37 +55,27 @@ schema
src
unittest
```
-where most of the directories contain boilerplate code for the software components you requested. While you'd be able to build this boilerplate package if it were in the `sourcecode/` directory of a standard DUNE DAQ development environment, the new package's components do almost nothing, although in the case of DAQModules code is generated which provide an example of how to set a member variable via Run Control configuration. Nonetheless this boilerplate code will need to be replaced, filled in and extended by the package's developers. Also if you look at `CMakeLists.txt`, you'll see that many of the function calls you'd need will have been added, though generally missing the arguments you'd need to provide them so they would know what libraries to link against, e.g.:
+where most of the directories contain boilerplate code for the software components you requested. While you'd be able to build this boilerplate package if it were in the `sourcecode/` directory of a standard DUNE DAQ development environment, the new package's components do almost nothing. Nonetheless this boilerplate code will need to be replaced, filled in and extended by the package's developers. Also if you look at `CMakeLists.txt`, you'll see that many of the function calls you'd need will have been added, though generally missing the arguments you'd need to provide them so they would know what libraries to link against, e.g.:
```
daq_add_application(an_app_for_users an_app_for_users.cxx LINK_LIBRARIES ) # Any libraries to link in not yet determined
```
-Obviously comments such as `# Any libraries to link in not yet determined` should be deleted when it becomes appropriate.
-
-Note also that a unit test is automatically generated for you _which is designed to fail_. Developers are strongly encouraged to replace it with appropriate unit tests for their package, unless it's one of those rare packages which don't need unit tests, in which case the unit test functionality should be entirely stripped from the package.
+Obviously comments such as `# Any libraries to link in not yet determined` should be deleted when it becomes appropriate.
-If the `--config-generation` option is chosen, the script which gets produced is called `_gen`. You can pass it the `-h` option to see its arguments, but the main thing to know is that to pass it a set of arguments you'd want to do so via the `-c ` argument. An example of such a JSON file can be found in `/scripts/_example_config.json` file which is produced after you've run `create_dunedaq_package` with the `--config-generation` option.
-
-Assuming you're in the base of a development area [whose environment has been set up](https://dune-daq-sw.readthedocs.io/en/latest/packages/daq-buildtools) and have run the example `create_dunedaq_package` command above, you can now build your newly generated code and then try out the configuration generation script:
-```
-dbt-build
-dbt-workarea-env
-thenewpackage_gen -c ./sourcecode/thenewpackage/scripts/thenewpackage_example_config.json anewconfig
-```
-...where you can edit the values `num_afirstmodules` and `some_configured_value` in (a copy of) `thenewpackage_example_config.json` to generate a different configuration. Note that while this _legally_ runs in [`nanorc`](https://dune-daq-sw.readthedocs.io/en/latest/packages/nanorc/), it doesn't actually do anything -- in particular, the DAQ module(s) you've specified only set a member variable when configured, and don't communicate with anything.
+Note also that a unit test is automatically generated for you _which is designed to fail_. Developers are strongly encouraged to replace it with appropriate unit tests for their package, unless it's one of those rare packages which don't need unit tests, in which case the unit test functionality should be entirely stripped from the package.
-Now that you know how to generate the boilerplate for a DUNE DAQ package, please read on for a more in-depth understanding of what a typical DUNE DAQ package looks like.
+Now that you know how to generate the boilerplate for a DUNE DAQ package, please read on for a more in-depth understanding of what a typical DUNE DAQ package looks like.
## Overview of a DUNE DAQ package
### Setting up a development area
-To create a new package, you'll want to install a DUNE-DAQ development environment and then create a new CMake project for the package as described in [in the daq-buildtools documentation](https://dune-daq-sw.readthedocs.io/en/latest/packages/daq-buildtools/).
+To create a new package, you'll want to install a DUNE-DAQ development environment and then create a new CMake project for the package as described in [in the daq-buildtools documentation](https://dune-daq-sw.readthedocs.io/en/latest/packages/daq-buildtools/).
### A package's subdirectory structure
-To learn a bit more about how to structure your package so that it can be incorporated into the DUNE DAQ software suite, we'll play with a contrived package called "toylibrary". It's actually contained within a subdirectory of the daq-cmake repo; however, in order to be able to build toylibrary we'll want to copy it into the `./sourcecode` directory so the build system can work with it. Assuming you're already in the base directory of your development environment, do the following:
+To learn a bit more about how to structure your package so that it can be incorporated into the DUNE DAQ software suite, we'll play with a contrived package called "toylibrary". It's actually contained within a subdirectory of the daq-cmake repo; however, in order to be able to build toylibrary we'll want to copy it into the `./sourcecode` directory so the build system can work with it. Assuming you're already in the base directory of your development environment, do the following:
```
git clone https://github.com/DUNE-DAQ/daq-cmake
cd daq-cmake
@@ -92,7 +84,7 @@ cd ..
mv daq-cmake/toylibrary sourcecode
rm -rf daq-cmake
```
-You can now build toylibrary like you would a standard DUNE DAQ package. Please note that if you do so, since toylibrary isn't an official DUNE DAQ package and isn't a git repository, you'll get a couple of warnings; unlike most warnings, you can disregard these. Specifically, these warnings are `Package "toylibrary" not provided to the daq_add_subpackages` and `warning: Not a git repository.` (and surrounding text).
+You can now build toylibrary like you would a standard DUNE DAQ package. Please note that if you do so, since toylibrary isn't an official DUNE DAQ package and isn't a git repository, you'll get a couple of warnings; unlike most warnings, you can disregard these. Specifically, these warnings are `Package "toylibrary" not provided to the daq_add_subpackages` and `warning: Not a git repository.` (and surrounding text).
In terms of its actual functionality, toylibrary is pretty useless (it contains a class which can wrap an integer, and another class which can print that wrapped integer). However, its functionality is beside the point; toylibrary contains many features which DUNE DAQ packages have in common, in particular DUNE DAQ packages which provide a library other developers want to link against. For starters, take a look at the subdirectories, `ls sourcecode/toylibrary`:
@@ -109,18 +101,18 @@ In terms of its actual functionality, toylibrary is pretty useless (it contains
* *scripts* This directory is inteneded to hold executable scripts. In this case it contains the script, `toyscript.py`. The `python` script demonstrates how the C++ code exposed in `toy_wrapper.cpp` can be used from within `python`.
-If your package contains applications intended not for testing but for the end user, you'd put the code for it in a subdirectory called `apps/`. toylibrary doesn't have this type of application, but, e.g., the appfwk package does. Similarly, plugins not intended for testing but for the end user would go in `plugins/`.
+If your package contains applications intended not for testing but for the end user, you'd put the code for it in a subdirectory called `apps/`. toylibrary doesn't have this type of application, but, e.g., the appfwk package does. Similarly, plugins not intended for testing but for the end user would go in `plugins/`.
### Coding rules
-Along with having a standard directory structure, the C++ code itself in toylibrary conforms to the [DUNE C++ Style Guide](https://dune-daq-sw.readthedocs.io/en/latest/packages/styleguide/). Here, "style" doesn't mean whitespace and formatting, but rather, a set of Modern C++ best practices designed to make your code more robust against bugs, easier to extend, easier to reuse, etc. The DUNE C++ Style Guide is derived from the Google C++ Style Guide, but is greatly simplified and has been modified to be more appropriate to the DUNE DAQ project than Google's projects. Code which is merged into a package's git develop branch should be in conformance with the guide; while it's encouraged for code on a package's unmerged feature branches to also be in conformance, this is less important.
+Along with having a standard directory structure, the C++ code itself in toylibrary conforms to the [DUNE C++ Style Guide](https://dune-daq-sw.readthedocs.io/en/latest/packages/styleguide/). Here, "style" doesn't mean whitespace and formatting, but rather, a set of Modern C++ best practices designed to make your code more robust against bugs, easier to extend, easier to reuse, etc. The DUNE C++ Style Guide is derived from the Google C++ Style Guide, but is greatly simplified and has been modified to be more appropriate to the DUNE DAQ project than Google's projects. Code which is merged into a package's git develop branch should be in conformance with the guide; while it's encouraged for code on a package's unmerged feature branches to also be in conformance, this is less important.
### Your project's CMakeLists.txt file
-Every DUNE DAQ package should have one and only one `CMakeLists.txt` file, in the base directory of the package's repo (not to be confused with the base directory of the overall development area). To learn a bit about what that `CMakeLists.txt` file should look like, let's take a look at `sourcecode/toylibrary/CMakeLists.txt`. Because CMake is widely used and extensively documented online, this documentation will primarily focus on DUNE-specific CMake functions. The full documentation of the DUNE-specific CMake functions for users can be found [below](#cmake_function_descriptions). Depending on your learning style, however, you may find it easier to start learning about some of what these functions are capable of by reading on without skipping.
+Every DUNE DAQ package should have one and only one `CMakeLists.txt` file, in the base directory of the package's repo (not to be confused with the base directory of the overall development area). To learn a bit about what that `CMakeLists.txt` file should look like, let's take a look at `sourcecode/toylibrary/CMakeLists.txt`. Because CMake is widely used and extensively documented online, this documentation will primarily focus on DUNE-specific CMake functions. The full documentation of the DUNE-specific CMake functions for users can be found [below](#cmake_function_descriptions). Depending on your learning style, however, you may find it easier to start learning about some of what these functions are capable of by reading on without skipping.
-At the top of `CMakeLists.txt`: before doing anything else, we want to define the minimum version of CMake used (currently 3.12, which supports [modern CMake style](https://cliutils.gitlab.io/modern-cmake/)) as well as the name and version of the project. Concerning the version: it may not literally be the case that the code you're working with is exactly the same as the version-in-question's release code, because you may be on a feature branch, or there may have been commits to the develop branch since the last release.
+At the top of `CMakeLists.txt`: before doing anything else, we want to define the minimum version of CMake used (currently 3.12, which supports [modern CMake style](https://cliutils.gitlab.io/modern-cmake/)) as well as the name and version of the project. Concerning the version: it may not literally be the case that the code you're working with is exactly the same as the version-in-question's release code, because you may be on a feature branch, or there may have been commits to the develop branch since the last release.
```
cmake_minimum_required(VERSION 3.12)
project(toylibrary VERSION 1.1.0)
@@ -129,7 +121,7 @@ Next, we want to make CMake functions written specifically for DUNE DAQ developm
```
find_package(daq-cmake REQUIRED)
```
-This is how we ensure that the `CMakeLists.txt` file has access to the standard DUNE DAQ CMake functions previously mentioned. When `find_package` is called here it imports daq-cmake's `DAQ` CMake module. Note that by convention all functions/macros within the module begin with `daq_`, so as to distinguish them from functions/macros from CMake modules written outside of DUNE DAQ.
+This is how we ensure that the `CMakeLists.txt` file has access to the standard DUNE DAQ CMake functions previously mentioned. When `find_package` is called here it imports daq-cmake's `DAQ` CMake module. Note that by convention all functions/macros within the module begin with `daq_`, so as to distinguish them from functions/macros from CMake modules written outside of DUNE DAQ.
The next step is to call a macro from the `DAQ` module which sets up a standard DUNE CMake environment for your `CMakeLists.txt` file:
```
@@ -141,41 +133,41 @@ Among other things daq_setup_environment() will do the following:
* Ensure all code within the project can find the project's public headers
-* Allow our linter scripts to work with the code
+* Allow our linter scripts to work with the code
* Have gcc use standard warnings
* Support the use of CTest for the unit tests
-Next you'll see calls to CMake's [find_package](https://cmake.org/cmake/help/v3.17/command/find_package.html) function, which makes toylibrary's dependencies available. Comments in the file explain why the dependencies are selected.
+Next you'll see calls to CMake's [find_package](https://cmake.org/cmake/help/v3.17/command/find_package.html) function, which makes toylibrary's dependencies available. Comments in the file explain why the dependencies are selected. Please note that when developing your own package, if it's part of the nightly build and you add a new dependency, besides adding the needed `find_package` call you should also alert Software Coordination so they can add the dependency to the Spack build of the package. See below in the section called "Installing your project as a local package" for more about dependencies.
-Then, you'll see a call to a function called `daq_add_library`.
+Then, you'll see a call to a function called `daq_add_library`.
```
daq_add_library(IntPrinter.cpp LINK_LIBRARIES logging::logging)
```
-What `daq_add_library` does here is create the main project library. It looks in the project's `./src` subdirectory for a file called `IntPrinter.cpp`, which it then compiles and links against the DUNE DAQ logging library. The result is output in the installation area (`$DBT_INSTALL_DIR`) as a shared object library named after the project itself, `toylibrary/lib64/libtoylibrary.so`.
+What `daq_add_library` does here is create the main project library. It looks in the project's `./src` subdirectory for a file called `IntPrinter.cpp`, which it then compiles and links against the DUNE DAQ logging library. The result is output in the installation area (`$DBT_INSTALL_DIR`) as a shared object library named after the project itself, `toylibrary/lib64/libtoylibrary.so`.
The next function you see called in the CMakeLists.txt file is `daq_add_python_bindings`:
```
-daq_add_python_bindings( toy_wrapper.cpp LINK_LIBRARIES ${PROJECT_NAME} )
+daq_add_python_bindings( toy_wrapper.cpp )
```
-which is a function designed to allow the binding of C++ code to python. To do so, it relies on the header only library, `pybind11`. The function expects to find the source files exposing the C++ code, in the package directory, `pybindsrc`. In this `toylibrary` case, we have specified that the bindings are located in the file `toy_wrapper.cpp`. The resulting compiled file will be called, `_daq_${PROJECT_NAME}_py.so`, and will be placed in the output installation subdirectory, `${PROJECT_NAME}/lib64/python/${PROJECT_NAME}`. Similarly to `daq_add_library`, `_daq_${PROJECT_NAME}_py.so` will be linked against the libraries specified after `LINK_LIBRARIES`. For how to import the exposed C++ in, see detailed description section. After the call of `daq_add_python_bindings`, you will see the call to the function `daq_add_application`.
+which is a function designed to allow the binding of C++ code to python. To do so, it relies on the header only library, `pybind11`. The function expects to find the source files exposing the C++ code, in the package directory, `pybindsrc`. In this `toylibrary` case, we have specified that the bindings are located in the file `toy_wrapper.cpp`. The resulting compiled file will be called, `_daq_${PROJECT_NAME}_py.so`, and will be placed in the output installation subdirectory, `${PROJECT_NAME}/lib64/python/${PROJECT_NAME}`. Similarly to `daq_add_library`, `_daq_${PROJECT_NAME}_py.so` will be linked against the libraries specified after `LINK_LIBRARIES`; however, it will also automatically link against the main package library. For how to import the exposed C++ in, see detailed description section. After the call of `daq_add_python_bindings`, you will see the call to the function `daq_add_application`.
```
daq_add_application( toylibrary_test_program toylibrary_test_program.cxx TEST LINK_LIBRARIES ${Boost_PROGRAM_OPTIONS_LIBRARY} ${PROJECT_NAME} )
```
-which searches in the projects' `test/apps/` subdirectory for a file called `toylibrary_test_program.cxx`, builds it, and links against the project's main library which we created via the previous `daq_add_library` command as well as a Boost library used to parse program input. The output application is named after the first argument to the function, `toylibrary_test_program`; it can be found in `$DBT_INSTALL_DIR/toylibrary/test/bin/toylibrary_test_program`. Note that if the "TEST" argument hadn't been supplied, the build system would have looked in a subdirectory of the project called `apps/` rather than `test/apps/` for the source file.
+which searches in the projects' `test/apps/` subdirectory for a file called `toylibrary_test_program.cxx`, builds it, and links against the project's main library which we created via the previous `daq_add_library` command as well as a Boost library used to parse program input. The output application is named after the first argument to the function, `toylibrary_test_program`; it can be found in `$DBT_INSTALL_DIR/toylibrary/test/bin/toylibrary_test_program`. Note that if the "TEST" argument hadn't been supplied, the build system would have looked in a subdirectory of the project called `apps/` rather than `test/apps/` for the source file.
Another function currently provided by the DAQ CMake module is `daq_add_unit_test`. Examples of this function's use can be found at the bottom of the `sourcecode/toylibrary/CMakeLists.txt` file, e.g.:
```
daq_add_unit_test(ValueWrapper_test)
```
-If you pass this function a name, e.g., `MyComponent_test`, it will create a unit test executable off of a source file called `sourcecode//unittest/MyComponent_test.cxx`, and handle linking in the Boost unit test dependencies. You can also optionally have it link in other libraries by providing them after the `LINK_LIBRARIES` argument as in other functions; in the above example, this isn't needed because ValueWrapper is a template class which is instantiated within the unit test code itself.
+If you pass this function a name, e.g., `MyComponent_test`, it will create a unit test executable off of a source file called `sourcecode//unittest/MyComponent_test.cxx`, and handle linking in the Boost unit test dependencies. You can also optionally have it link in other libraries by providing them after the `LINK_LIBRARIES` argument as in other functions; in the above example, this isn't needed because ValueWrapper is a template class which is instantiated within the unit test code itself.
At the bottom of CMakeLists.txt, you'll see the following function:
```
daq_install()
```
-When you call it it will install the targets (executables, shared object libraries) you wish to make available to others who want to use your package in a directory called `$DBT_INSTALL_DIR/` (by default that would be `./install/toylibrary`). You'll also need to add a special file to your project for this function to work; this is discussed more fully in the "Installing your project as a local package" section later in this document.
+When you call it it will install the targets (executables, shared object libraries) you wish to make available to others who want to use your package in a directory called `$DBT_INSTALL_DIR/` (by default that would be `./install/toylibrary`). You'll also need to add a special file to your project for this function to work; this is discussed more fully in the "Installing your project as a local package" section later in this document.
### Installing your project as a local package
@@ -185,7 +177,7 @@ Use the procedure described below in order to have your package installed. Once
find_package(mypackage)
```
-For starters, you'll want to call the DAQ module's `daq_install()` function at the bottom of your CMakeLists.txt file, as described earlier in this document.
+For starters, you'll want to call the DAQ module's `daq_install()` function at the bottom of your CMakeLists.txt file, as described earlier in this document.
A major thing you should be aware of is that when you call CMake's `find_package` function, it will look for a file with the name `mypackageConfig.cmake` in a predetermined set of directories, including the one you defined (or allowed to default to `./install`) when you initially set up your development area as described elsewhere in the documentation. What a standard `mypackageConfig.cmake` file should look like with modern CMake is documented in many places on the web, but in order to make life as easy as possible there's a templatized version of this file in the daq-cmake package. Assuming you've got a `./sourcecode/mypackage` repo in your development area, you can do the following:
```
@@ -202,26 +194,26 @@ and then let's look at the opening lines of `mypackageConfig.cmake.in`:
include(CMakeFindDependencyMacro)
-# Insert find_dependency() calls for your package's dependencies in
-# the place of this comment. Make sure they match up with the
-# find_package calls in your package's CMakeLists.txt file
+# Insert find_dependency() calls for your package's dependencies in
+# the place of this comment. Make sure they match up with the
+# find_package calls in your package's CMakeLists.txt file
```
-The only part of this file you need to worry about is the "Insert find_dependency()..." comment. In place of this comment, you'll want to call CMake's `find_dependency` function (details [here](https://cmake.org/cmake/help/latest/module/CMakeFindDependencyMacro.html)) for each package that mypackage depends on; this ensures that developers who call `find_package(mypackage)` don't need to have explicit `find_package` calls on these dependencies.
+The only part of this file you need to worry about is the "Insert find_dependency()..." comment. In place of this comment, you'll want to call CMake's `find_dependency` function (details [here](https://cmake.org/cmake/help/latest/module/CMakeFindDependencyMacro.html)) for each package that mypackage depends on; this ensures that developers who call `find_package(mypackage)` don't need to have explicit `find_package` calls on these dependencies. Please note that if you want to _drop_ a dependency from your package, not only should you remove the relevant `find_package` call from `CMakeLists.txt`, you should also remove the corresponding `find_dependency` call in your `Config.cmake.in` file.
You can see a simple example of this kind of file with `toylibrary/cmake/toylibraryConfig.cmake.in`.
-Once you've edited this file as described, from the base of your development area you can then run
+Once you've edited this file as described, from the base of your development area you can then run
```
dbt-build
```
-without receiving an error message informing you that installation isn't an option.
+without receiving an error message informing you that installation isn't an option.
## Description of the CMake functions provided by `daq-cmake`
### daq_setup_environment:
-Usage:
+Usage:
```
daq_setup_environment()
@@ -229,8 +221,8 @@ daq_setup_environment()
This macro should be called immediately after this DAQ module is
included in your DUNE DAQ project's CMakeLists.txt file; it ensures
-that DUNE DAQ projects all have a common build environment. It takes
-no arguments.
+that DUNE DAQ projects all have a common build environment. It takes
+no arguments.
### daq_codegen:
@@ -238,42 +230,42 @@ Usage:
```
daq_codegen( ... [TEST] [DEP_PKGS ...] [MODEL ]
[TEMPLATES ...] )
-```
+```
`daq_codegen` uses `moo` to generate C++ headers from schema files from `schema/` applying
them to one or more templates.
Arguments:
-
-* ` ...`:
+
+* ` ...`:
The list of schema files to process from `/schema/`. Each schema file will applied to each template (specified by the TEMPLATES argument). Each schema/template pair will generate a code file named `build//codegen/include///`
e.g. `my_schema.jsonnet` (from `my_pkg`) + `your_pkg/YourStruct.hpp.j2` will result in `build/my_pkg/codegen/include/my_pkg/my_schema/YourStruct.hpp`
-* `TEST`:
-
+* `TEST`:
+
If the code is meant for an entity in the package's test/ subdirectory, `TEST` should be passed as an argument, and the schema file's path will be assumed to be
`test/schema/` rather than merely `schema/`.
-
-* `DEP_PKGS`:
-
+
+* `DEP_PKGS`:
+
If schema, template or model files depend on files provided by other DAQ packages, the `DEP_PKGS` argument must contain the list of packages.
-
+
* `MODEL`:
-
+
The `MODEL` argument is optional; if no model file name is explicitly provided, `omodel.jsonnet` from the moo package itself is used.
* `TEMPLATES`:
-
+
The list of templates to use. This is a mandatory argument. The template file format is `/`. If `` is omitted, the template is expected to be made available by moo.
### daq_add_library:
-Usage:
+Usage:
```
daq_add_library( ... [LINK_LIBRARIES ...])
```
@@ -285,8 +277,8 @@ glob expressions, and link against the libraries listed after
`LINK_LIBRARIES`. The set of files is assumed to be in the `src/`
subdirectory of the project.
-As an example,
-`daq_add_library(MyProj.cpp *Utils.cpp LINK_LIBRARIES logging::logging)`
+As an example,
+`daq_add_library(MyProj.cpp *Utils.cpp LINK_LIBRARIES logging::logging)`
will create a library off of `src/MyProj.cpp` and any file in `src/`
ending in "Utils.cpp", and links against the [logging library](https://dune-daq-sw.readthedocs.io/en/latest/packages/logging/)
@@ -294,32 +286,77 @@ Public headers for users of the library should go in the project's
`include/` directory. Private headers used in the
library's implementation should be put in the `src/` directory.
+### daq_protobuf_codegen:
+Usage:
+```
+daq_protobuf_codegen( ... [TEST] [GEN_GRPC] [DEP_PKGS ...] )
+```
+
+Requirements for calling this function:
+1) You need to call `find_package(opmonlib REQUIRED)` in your `CMakeLists.txt` file
+2) You also need to call `daq_add_library`, i.e., have a main package-wide library, and link it against the opmonlib library
+3) You need to call `find_package(gRPC REQUIRED)` before calling this function if you have specified `GEN_GRPC`.
+
+Arguments:
+
+
+* ` ...`: these arguments are the list of `*.proto` files for protobuf's "protoc" program to process from `/schema/`. Globs also allowed.
+
+
+* `TEST`: If the code is meant for an entity in the package's `test/` subdirectory, `TEST` should be passed as an argument, and the schema file's path will be assumed to be `test/schema/` rather than merely `schema/`.
+
+
+* `GEN_GRPC`: if you need to cgenerate gRPC prototype for the `*.proto` files.
+
+
+* `DEP_PKGS`: if a `*.proto` file given depends on `*.proto files` provided by other DAQ packages, the `DEP_PKGS` argument must contain the list of packages.
+
+Each `*.proto` file will have a C++ header/source file generated as well as a Python file.
+The names of the generated files are the same as per the [ProtoBuf API](https://protobuf.dev/): `*.pb.h` and `*.pb.cc` for the C++ header and source, respectively.
+gRPC files are of the form `*.grpc.pb.h` and `*.grpc.pb.cc`.
+The header will be installed in the public include directory.
+Code can link against the header in the form:
+```C++
+#include "/.pb.h"
+```
+The generated python file will be called `*_pb2.py` and will be installed in `lib64/python/`.
+The generated python file will be called `*_grpc_pb2.py` and will be installed in `lib64/python/`.
+
+The source file will be built as part of the main package library.
+Its compilation will be done automatically, i.e. there is no need to add `*.pb.cc` in the `daq_add_library` directive of your package: `daq_protobuf_codegen` will suffice.
+
### daq_add_python_bindings:
-Usage:
+Usage:
```
-daq_add_python_bindings( ... [LINK_LIBRARIES ...])
+daq_add_python_bindings( ... [DAL] [LINK_LIBRARIES ...])
```
-`daq_add_python_bindings` is designed to produce a library providing
-a python interface to C++ code. It will compile a group
-of files, which are expected to expose the desired C++ interface via `pybind11`.
-The set of files is defined by a set of one or more individual filenames and/or
-glob expressions, and link against the libraries listed after
-LINK_LIBRARIES. The set of files is expected to be in the `pybindsrc`
-subdirectory of the project.
+`daq_add_python_bindings` is designed to produce a library providing a Python
+interface to C++ code. It will compile a group of files, which are expected
+to expose the desired C++ interface via `pybind11`. The set of files is
+defined by a set of one or more individual filenames and/or glob expressions,
+and are assumed to be in the `pybindsrc/` subdirectory of the package.
+Linking is done against the libraries listed after `LINK_LIBRARIES` plus,
+if available, the main package library (if `DAL` isn't provided as an argument)
+or the library produced via `daq_add_dal_library` (if `DAL` is).
As an example,
-`daq_add_python_bindings(my_wrapper.cpp LINK_LIBRARIES ${PROJECT_NAME})`
-will create a library from `pybindsrc/my_wrapper.cpp` and link against
-the main project library which would have been created via daq_add_library
+`daq_add_python_bindings(my_wrapper.cpp)`
+will create a library from `pybindsrc/my_wrapper.cpp` and link against
+the main package library which would have been created via `daq_add_library`
-Please note that library shared object will be named `_daq_${PROJECT_NAME}_py.so`, and will be placed
-in the `python/${PROJECT_NAME}` directory. You will need to have the corresponding init file,
-`python/${PROJECT_NAME}/__init__.py` to import the appropiate componenets of the module.
-See toylibrary for a working example.
+_Without_ the `DAL` option, the library shared object will be named
+`_daq_${PROJECT_NAME}_py.so`, and will be installed in the `python/${PROJECT_NAME}/`
+directory. You will need to have the corresponding init file,
+`python/${PROJECT_NAME}/__init__.py` to import the appropiate components of the module.
+See `toylibrary` for a working example.
+
+_With_ the `DAL` option, the library shared object will be `_daq_${PROJECT_NAME}_dal_py.so`,
+and will be installed in the `python/${PROJECT_NAME}_dal` directory. Here, you need a
+`python/${PROJECT_NAME}_dal/__init__.py` file which imports `_daq_${PROJECT_NAME}_dal_py.so`.
### daq_add_plugin:
-Usage:
+Usage:
```
daq_add_plugin( [TEST] [LINK_LIBRARIES ...])
```
@@ -330,15 +367,18 @@ with the name `.cpp` located either in the `plugins/`
subdirectory of the project (if the `TEST` option isn't used) or in
the `test/plugins/` subdirectory of the project (if it is). Like
daq_add_library, daq_add_plugin can be provided a list of libraries
-to link against, following the `LINK_LIBRARIES` argument.
+to link against, following the `LINK_LIBRARIES` argument.
Your plugin will look in `include/` for your project's public headers
and `src/` for its private headers. Additionally, if it's a "TEST"
plugin, it will look in `test/src/`.
+Note that if `cetlib` is a dependency of the package being built, it
+will be automatically linked against the plugin.
+
### daq_add_application
-Usage:
+Usage:
```
daq_add_application( ... [TEST] [LINK_LIBRARIES ...])
```
@@ -357,7 +397,7 @@ headers and `src/` for its private headers. Additionally, if it's a
"TEST" plugin, it will look in `test/src/`.
### daq_add_unit_test
-Usage:
+Usage:
```
daq_add_unit_test( [LINK_LIBRARIES ...])
```
@@ -369,8 +409,49 @@ etc.). Like daq_add_library, daq_add_unit_test can be provided a
list of libraries to link against, following the `LINK_LIBRARIES`
token.
+### daq_add_dal_library
+Usage:
+```
+daq_add_dal_library( ... [TEST] [NAMESPACE ns] [DALDIR subdir] [DEP_PKGS pkg1 pkg2 ...])
+```
+
+Note that calling `find_package(conffwk REQUIRED)` is required to use this function
+
+`daq_add_dal_library` uses the `oksdalgen` package's application of the
+same name to generate C++ and Python code from the OKS schema
+file(s) provided to it and build it into a shared object library
+with the name `lib_dal`; it optionally can take source files
+which implement some of the functions as well as libraries needed by
+those source files
+
+Arguments:
+` ...`: the list of OKS schema files to process from `/schema/`.
+
+`TEST`: If the code is meant for an entity in the package's `test/` subdirectory, `TEST`
+should be passed as an argument, and the schema file's path will be assumed to be
+`test/schema/` rather than merely `schema/`.
+
+`SOURCES`: the names of any user-written source files needed to
+implement functions whose declarations are generated from a schema,
+taken relative to the `src/` subdirectory
+
+`NAMESPACE`: the namespace in which the generated C++ classes will be in. Defaults to `dunedaq::`
+
+`DALDIR`: subdirectory relative to the package's primary include directory where headers will appear (`include//`); default is no subdirectory
+
+`DEP_PKGS`: if a schema file you've provided as an argument itself
+includes a schema file (or schema files) from one or more other
+packages, you need to supply the names of the packages as arguments
+to `DEP_PKGS`. Note the dal libraries produced from those packages
+will automatically get linked in as dependencies and won't need to
+be provided in the `LINK_LIBRARIES` argument described below
+
+`LINK_LIBRARIES`: the name of any libraries needed by the source files
+provided by `SOURCES` (`conffwk` automatically provided)
+
+
### daq_install
-Usage:
+Usage:
```
daq_install()
```
@@ -381,7 +462,7 @@ arguments.
## Schemas and code generation
-`daq-cmake` supports for schema distribution and code generation with [moo](https://github.com/brettviren/moo/)
+`daq-cmake` supports for schema distribution and code generation with [moo](https://github.com/brettviren/moo/), [protobuf](https://protobuf.dev/programming-guides/proto3/) and [OKS](https://github.com/DUNE-DAQ/dal).
@@ -409,10 +490,11 @@ appfwk/
├── python
├── schema
│ ├── appfwk
-│ │ ├── appinfo.jsonnet
│ │ ├── app.jsonnet
│ │ ├── cmd.jsonnet
-│ ├── README.md
+│ │ └── opmon
+│ │ └── appinfo.proto
+│ └── README.md
├── src
├── test
└── unittest
@@ -424,7 +506,8 @@ appfwk/
local s = moo.oschema.schema("dunedaq.appfwk.cmd");
```
-The same applies to `app.jsonnet` and `appinfo.jsonnet` for `dunedaq.appfwk.app` and `dunedaq.appfwk.appinfo`.
+The same applies to `app.jsonnet` for `dunedaq.appfwk.app`.
+
The matching between the schema file name/path and the jsonnet namespace is essential for code generation with `daq-cmake`. A mismatch between the two will result in empty generated files in most of the cases.
@@ -435,9 +518,9 @@ The matching between the schema file name/path and the jsonnet namespace is esse
_Last git commit to the markdown source of this page:_
-_Author: jcfreeman2_
+_Author: John Freeman_
-_Date: Wed Sep 28 13:54:30 2022 -0500_
+_Date: Mon Jul 28 10:32:53 2025 -0500_
_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/daq-cmake/issues](https://github.com/DUNE-DAQ/daq-cmake/issues)_
diff --git a/docs/packages/daq-release/Build-new-external-software-stack.md b/docs/packages/daq-release/Build-new-external-software-stack.md
new file mode 100644
index 00000000000..2145af5142e
--- /dev/null
+++ b/docs/packages/daq-release/Build-new-external-software-stack.md
@@ -0,0 +1,73 @@
+# Building and deploying external packages
+
+## Preliminary
+
+You'll want to do the following logged onto `daq.fnal.gov` as user
+`dunedaq`. Ideally you'll be logged on in a manner such that it's
+unlikely your ssh connection will broken; while strides have been made
+in getting the `build-ext.sh` script to be able to pick up where it
+left off, it's generally better to be able to run `build-ext.sh` in
+one go. Note that this takes about 2-3 hours.
+
+## Build setup and start
+
+In a nutshell, all you need to do is run the `build-ext.sh` script
+inside a container based on the `ghcr.io/dune-daq/alma9-spack:latest`
+image. To provide a bit more detail, you'll want to do the following
+once you're logged into `daq.fnal.gov` as `dunedaq`:
+
+
+
+1. Check whether there are already externals installed in `/home/nfs/dunedaq/docker-scratch/cvmfs_dunedaq/spack/externals/ext-v${EXT_VERSION}/spack-${SPACK_VERSION}` (*), and if so, that you know why they're already there.
+
+
+1. Create a directory which will be the base of operations for your work, if you don't already have one
+
+
+1. Inside that directory, `git clone https://github.com/DUNE-DAQ/daq-release`
+
+
+1. Launch a container using the [example at the top of the `build-ext.sh` script as a guide](https://github.com/DUNE-DAQ/daq-release/blob/develop/scripts/spack/build-ext.sh). Note that `` here would be `/home/nfs/dunedaq/docker-scratch/cvmfs_dunedaq` (as of Jul-25-2024)
+
+
+1. Run `/daq-release/scripts/spack/build-ext.sh ` if you want to build everything from scratch (which you do if this is your first time). The `` is the integer which gets passed to the Linux `nice` command to determine the priority of the build relative to other people's work on the system. Set this to `0` if you want it to have equal priority, or a value higher than `0` (e.g. `10` or `15`) if you want to be nice and give priority to other people's work.
+
+
+1. _or_ `/daq-release/scripts/spack/build-ext.sh false` if you want to resume an externals build (e.g. because your ssh connection got broken)
+
+(*) Here, use `${EXT_VERSION}` and `${SPACK_VERSION}` as stand-ins for the actual externals version (e.g., `2.1`) and Spack version (e.g., `0.22.0`)
+
+# Once the build is complete
+
+Once complete, the externals you've built will be located in `/home/nfs/dunedaq/docker-scratch/cvmfs_dunedaq/ext-v${EXT_VERSION}/spack-${SPACK_VERSION}`. Note that you'll need to get them copied from the local area on `daq.fnal.gov` to two separate locations: (1) an externals image in which the nightly build can be performed, and (2) onto cvmfs. _Please confirm you can build a nightly in an externals image before altering cvmfs_ . In order to do so:
+
+
+
+1. Run the [Build docker with slim externals](https://github.com/DUNE-DAQ/daq-release/actions/workflows/slim_externals.yaml) GitHub Action. _Make sure_ you add an argument to the "optional suffix for test-only externals image" field, otherwise you'll clobber the standard externals image used for the usual nightly. This Action usually takes very roughly an hour, sometimes a bit less.
+
+
+
+1. Create a temporary branch forked off of the `develop` branch where you can modify the relevant nightly workflow YAML file (i.e., the nightly you plan to base off the externals you've built). The, modify the file so that instead of using the standard externals image, it uses the test image you created. E.g., if you provided `T` as an argument to "Build docker with slim externals", you'd stick a `T` at the end of the externals image referred to in the workflow file.
+
+
+
+1. Run the workflow off the temporary branch, and make sure that you provide a nightly tag prefix so you don't clobber the standard nightly. Also select `yes` for whether to deploy the release to cvmfs if it builds correctly.
+
+
+
+1. If it does, in fact, build successfully, now you can update the externals area on the actual cvmfs. Details on how to do that are [here](https://dune-daq-sw.readthedocs.io/en/latest/packages/daq-release/publish_to_cvmfs/#updating-a-particular-directory-on-cvmfs).
+
+
+
+-----
+
+
+_Last git commit to the markdown source of this page:_
+
+
+_Author: Dune_
+
+_Date: Thu Nov 21 12:47:06 2024 -0600_
+
+_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/daq-release/issues](https://github.com/DUNE-DAQ/daq-release/issues)_
+
diff --git a/docs/packages/daq-release/Build-packages-with-spack-in-a-work-area.md b/docs/packages/daq-release/Build-packages-with-spack-in-a-work-area.md
new file mode 100644
index 00000000000..b4e82f77a4c
--- /dev/null
+++ b/docs/packages/daq-release/Build-packages-with-spack-in-a-work-area.md
@@ -0,0 +1,104 @@
+# Build packages with spack in a work area
+Developers occasionally need to utilize a different variant, a new version, or a completely new external package before its inclusion and deployment in the DUNE DAQ external software stack on CVMFS. Here's a step-by-step guide on how to do this:
+
+
+
+1. Create a work area using the `-s` flag with the `dbt-create` command. This will generate a `.spack` subdirectory within the work area, with the external stack and release stack as upstreams to it.
+
+
+2. For the external package, either copy or create the desired Spack recipe file and place it in the directory:
+`/.spack/spack-repo/packages//package.py`. Detailed instructions on creating this recipe file are provided below.
+
+
+3. Run the `spack install` command, for example `spack install @version%gcc@12.1.0 arch=linux-almalinux9-x86_64`. Of course, know what spec you want.
+
+
+4. After installation, load the newly installed Spack package and run `dbt-build` to build your DUNE DAQ packages. If `dbt-build` complains about `dbt-workarea-env` not being run, reload the Python virtual environment as follows:
+```bash
+deactivate
+source .venv/bin/activate
+```
+
+## Spack recipe files for the new external packages
+
+### New external package
+
+For a new external package, there may already be a recipe file in Spack's built-in repository. To check for available versions and their dependencies, use the following command:
+```bash
+spack info
+```
+You can also open the recipe file in your default editor with:
+```bash
+spack edit
+```
+If no recipe file exists, consider reaching out to Software Coordination for assistance in creating one.
+
+### Existing external package
+
+For existing external packages, you can copy the recipe file (and related patches) from either the built-in Spack repository or daq-release/spack-repo/externals. Copy the entire directory from the source path to your work area's Spack repository path:
+```bash
+cp -pr /packages//.spack/spack-repo/packages/
+```
+This ensures you copy both the recipe file and related patch files.
+
+### Adding a new version
+
+If a recipe file exists but lacks entries for newer versions, use the following command to have Spack check for available versions and generate checksums:
+
+```bash
+spack checksum
+```
+
+Copy the output for the new versions and their associated checksums into the recipe file to make these versions available. Additionally, check if the package uses patches when building. If patches are in use, confirm if existing patches are still applicable; otherwise, create new ones. Contact Software Coordination for assistance as needed.
+
+## Example: Building and Using a New `HighFive` version in a work area
+
+```bash
+## Creating a work area with the "-s" option of "dot-create"
+source /cvmfs/dunedaq.opensciencegrid.org/setup_dunedaq.sh
+setup_dbt latest
+dbt-create -s -n NFD_PROD4_240404_A9 daqbuild_NFD_PROD4_240404_A9
+cd daqbuild_NFD_PROD4_240404_A9
+
+## Obtaining the Spack recipe file and placing it into the local Spack repository
+git clone https://github.com/DUNE-DAQ/daq-release
+cp -pr daq-release/spack-repos/externals/packages/highfive .spack/spack-repo/packages/
+
+## Setting up Spack and building the HighFive using RelWithDebInfo rather than the already-installed build_type=Release
+dbt-workarea-env
+spack install --reuse highfive@2.7.1%gcc@12.1.0~boost~ipo+mpi build_system=cmake build_type=RelWithDebInfo
+
+spack find -p -l highfive # Use this to find the Spack hash for the highfive you just built
+spack load # And load it in
+
+## Reloading the Python virtual environment
+## "spack load" sometimes modifies PYTHONPATH, which could cause issues with "dbt-build."
+## Reloading the environment before running "dbt-build" avoids this potential issue.
+deactivate
+source .venv/bin/activate
+
+# JCF, Apr-04-2024: unclear if the following is still relevant/correct...
+
+## Building the DAQ package using features available only in the locally installed HighFive
+cd sourcecode/
+git clone https://github.com/DUNE-DAQ/hdf5libs -b leo-update-tests-apps
+# Modify hdf5libs's CMakeLists.txt to set:
+# option(WITH_HIGHFIVE_AS_PACKAGE "HIGHFIVE externals as a dunedaq package" ON)
+# option(WITH_HDF5_AS_PACKAGE "HDF5 externals as a dunedaq package" ON)
+
+dbt-build
+```
+
+
+-----
+
+
+_Last git commit to the markdown source of this page:_
+
+
+_Author: Dune_
+
+_Date: Wed Oct 16 14:03:52 2024 -0500_
+
+_If you see a problem with the documentation on this page, please file an Issue at [https://github.com/DUNE-DAQ/daq-release/issues](https://github.com/DUNE-DAQ/daq-release/issues)_
+
diff --git a/docs/packages/daq-release/Doxyfile.in b/docs/packages/daq-release/Doxyfile.in
new file mode 100644
index 00000000000..485639efb1f
--- /dev/null
+++ b/docs/packages/daq-release/Doxyfile.in
@@ -0,0 +1,2851 @@
+# Doxyfile 1.10.0
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+#
+# Note:
+#
+# Use doxygen to compare the used configuration file with the template
+# configuration file:
+# doxygen -x [configFile]
+# Use doxygen to compare the used configuration file with the template
+# configuration file without replacing the environment variables or CMake type
+# replacement variables:
+# doxygen -x_noenv [configFile]
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the configuration
+# file that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# https://www.gnu.org/software/libiconv/ for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME = DUNE-DAQ
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF = "DUNE Trigger and Data Acquisition software"
+
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
+
+PROJECT_LOGO =
+
+# With the PROJECT_ICON tag one can specify an icon that is included in the tabs
+# when the HTML document is shown. Doxygen will copy the logo to the output
+# directory.
+
+PROJECT_ICON =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = docs
+
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create up to 4096
+# sub-directories (in 2 levels) under the output directory of each output format
+# and will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system. Adapt CREATE_SUBDIRS_LEVEL to
+# control the number of sub-directories.
+# The default value is: NO.
+
+CREATE_SUBDIRS = NO
+
+# Controls the number of sub-directories that will be created when
+# CREATE_SUBDIRS tag is set to YES. Level 0 represents 16 directories, and every
+# level increment doubles the number of directories, resulting in 4096
+# directories at level 8 which is the default and also the maximum value. The
+# sub-directories are organized in 2 levels, the first level always has a fixed
+# number of 16 directories.
+# Minimum value: 0, maximum value: 8, default value: 8.
+# This tag requires that the tag CREATE_SUBDIRS is set to YES.
+
+CREATE_SUBDIRS_LEVEL = 8
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Bulgarian,
+# Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, English
+# (United States), Esperanto, Farsi (Persian), Finnish, French, German, Greek,
+# Hindi, Hungarian, Indonesian, Italian, Japanese, Japanese-en (Japanese with
+# English messages), Korean, Korean-en (Korean with English messages), Latvian,
+# Lithuanian, Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese,
+# Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish,
+# Swedish, Turkish, Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF = "The $name class" \
+ "The $name widget" \
+ "The $name file" \
+ is \
+ provides \
+ specifies \
+ contains \
+ represents \
+ a \
+ an \
+ the
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the JAVADOC_BANNER tag is set to YES then doxygen will interpret a line
+# such as
+# /***************
+# as being the beginning of a Javadoc-style comment "banner". If set to NO, the
+# Javadoc-style will behave just like regular comments and it will not be
+# interpreted by doxygen.
+# The default value is: NO.
+
+JAVADOC_BANNER = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# By default Python docstrings are displayed as preformatted text and doxygen's
+# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the
+# doxygen's special commands can be used and the contents of the docstring
+# documentation blocks is shown as doxygen documentation.
+# The default value is: YES.
+
+PYTHON_DOCSTRING = YES
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:^^"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". Note that you cannot put \n's in the value part of an alias
+# to insert newlines (in the resulting output). You can put ^^ in the value part
+# of an alias to insert a newline as if a physical newline was in the original
+# file. When you need a literal { or } or , in the value part of an alias you
+# have to escape them by means of a backslash (\), this can lead to conflicts
+# with the commands \{ and \} for these it is advised to use the version @{ and
+# @} or use a double escape (\\{ and \\})
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice
+# sources only. Doxygen will then generate output that is more tailored for that
+# language. For instance, namespaces will be presented as modules, types will be
+# separated into more groups, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_SLICE = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, JavaScript,
+# Csharp (C#), C, C++, Lex, D, PHP, md (Markdown), Objective-C, Python, Slice,
+# VHDL, Fortran (fixed format Fortran: FortranFixed, free formatted Fortran:
+# FortranFree, unknown formatted Fortran: Fortran. In the later case the parser
+# tries to guess whether the code is fixed or free formatted code, this is the
+# default for Fortran type files). For instance to make doxygen treat .inc files
+# as Fortran files (default is PHP), and .f files as C (default is Fortran),
+# use: inc=Fortran f=C.
+#
+# Note: For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen. When specifying no_extension you should add
+# * to the FILE_PATTERNS.
+#
+# Note see also the list of default file extension mappings.
+
+EXTENSION_MAPPING =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See https://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT = YES
+
+# When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up
+# to that level are automatically included in the table of contents, even if
+# they do not have an id attribute.
+# Note: This feature currently applies only to Markdown headings.
+# Minimum value: 0, maximum value: 99, default value: 5.
+# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
+
+TOC_INCLUDE_HEADINGS = 5
+
+# The MARKDOWN_ID_STYLE tag can be used to specify the algorithm used to
+# generate identifiers for the Markdown headings. Note: Every identifier is
+# unique.
+# Possible values are: DOXYGEN use a fixed 'autotoc_md' string followed by a
+# sequence number starting at 0 and GITHUB use the lower case version of title
+# with any whitespace replaced by '-' and punctuation characters removed.
+# The default value is: DOXYGEN.
+# This tag requires that the tag MARKDOWN_SUPPORT is set to YES.
+
+MARKDOWN_ID_STYLE = DOXYGEN
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# If one adds a struct or class to a group and this option is enabled, then also
+# any nested class or struct is added to the same group. By default this option
+# is disabled and one has to add nested compounds explicitly via \ingroup.
+# The default value is: NO.
+
+GROUP_NESTED_COMPOUNDS = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE = 0
+
+# The NUM_PROC_THREADS specifies the number of threads doxygen is allowed to use
+# during processing. When set to 0 doxygen will based this on the number of
+# cores available in the system. You can set it explicitly to a value larger
+# than 0 to get more control over the balance between CPU load and processing
+# speed. At this moment only the input processing can be done using multiple
+# threads. Since this is still an experimental feature the default is set to 1,
+# which effectively disables parallel processing. Please report any issues you
+# encounter. Generating dot graphs in parallel is controlled by the
+# DOT_NUM_THREADS setting.
+# Minimum value: 0, maximum value: 32, default value: 1.
+
+NUM_PROC_THREADS = 1
+
+# If the TIMESTAMP tag is set different from NO then each generated page will
+# contain the date or date and time when the page was generated. Setting this to
+# NO can help when comparing the output of multiple runs.
+# Possible values are: YES, NO, DATETIME and DATE.
+# The default value is: NO.
+
+TIMESTAMP = DATE
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE = YES
+
+# If the EXTRACT_PRIV_VIRTUAL tag is set to YES, documented private virtual
+# methods of a class will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIV_VIRTUAL = YES
+
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. If set to YES, local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS = YES
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If this flag is set to YES, the name of an unnamed parameter in a declaration
+# will be determined by the corresponding definition. By default unnamed
+# parameters remain unnamed in the output.
+# The default value is: YES.
+
+RESOLVE_UNNAMED_PARAMS = YES
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO, these classes will be included in the various overviews. This option
+# will also hide undocumented C++ concepts if enabled. This option has no effect
+# if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# declarations. If set to NO, these declarations will be included in the
+# documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO, these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS = NO
+
+# With the correct setting of option CASE_SENSE_NAMES doxygen will better be
+# able to match the capabilities of the underlying filesystem. In case the
+# filesystem is case sensitive (i.e. it supports files in the same directory
+# whose names only differ in casing), the option must be set to YES to properly
+# deal with such files in case they appear in the input. For filesystems that
+# are not case sensitive the option should be set to NO to properly deal with
+# output files written for symbols that only differ in casing, such as for two
+# classes, one named CLASS and the other named Class, and to also support
+# references to files without having to specify the exact matching casing. On
+# Windows (including Cygwin) and MacOS, users should typically set this option
+# to NO, whereas on Linux or other Unix flavors it should typically be set to
+# YES.
+# Possible values are: SYSTEM, NO and YES.
+# The default value is: SYSTEM.
+
+CASE_SENSE_NAMES = SYSTEM
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES, the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
+# If the SHOW_HEADERFILE tag is set to YES then the documentation for a class
+# will show which file needs to be included to use the class.
+# The default value is: YES.
+
+SHOW_HEADERFILE = YES
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if ... \endif and \cond
+# ... \endcond blocks.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file. See also section "Changing the
+# layout of pages" for information.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE = ./DoxygenLayout.xml
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as documenting some parameters in
+# a documented function twice, or documenting parameters that don't exist or
+# using markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR = YES
+
+# If WARN_IF_INCOMPLETE_DOC is set to YES, doxygen will warn about incomplete
+# function parameter documentation. If set to NO, doxygen will accept that some
+# parameters have no documentation without warning.
+# The default value is: YES.
+
+WARN_IF_INCOMPLETE_DOC = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO, doxygen will only warn about wrong parameter
+# documentation, but not about the absence of documentation. If EXTRACT_ALL is
+# set to YES then this flag will automatically be disabled. See also
+# WARN_IF_INCOMPLETE_DOC
+# The default value is: NO.
+
+WARN_NO_PARAMDOC = NO
+
+# If WARN_IF_UNDOC_ENUM_VAL option is set to YES, doxygen will warn about
+# undocumented enumeration values. If set to NO, doxygen will accept
+# undocumented enumeration values. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: NO.
+
+WARN_IF_UNDOC_ENUM_VAL = NO
+
+# If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when
+# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS
+# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but
+# at the end of the doxygen process doxygen will return with a non-zero status.
+# If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS_PRINT then doxygen behaves
+# like FAIL_ON_WARNINGS but in case no WARN_LOGFILE is defined doxygen will not
+# write the warning messages in between other messages but write them at the end
+# of a run, in case a WARN_LOGFILE is defined the warning messages will be
+# besides being in the defined file also be shown at the end of a run, unless
+# the WARN_LOGFILE is defined as - i.e. standard output (stdout) in that case
+# the behavior will remain as with the setting FAIL_ON_WARNINGS.
+# Possible values are: NO, YES, FAIL_ON_WARNINGS and FAIL_ON_WARNINGS_PRINT.
+# The default value is: NO.
+
+WARN_AS_ERROR = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# See also: WARN_LINE_FORMAT
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT = "$file:$line: $text"
+
+# In the $text part of the WARN_FORMAT command it is possible that a reference
+# to a more specific place is given. To make it easier to jump to this place
+# (outside of doxygen) the user can define a custom "cut" / "paste" string.
+# Example:
+# WARN_LINE_FORMAT = "'vi $file +$line'"
+# See also: WARN_FORMAT
+# The default value is: at line $line of file $file.
+
+WARN_LINE_FORMAT = "at line $line of file $file"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr). In case the file specified cannot be opened for writing the
+# warning and error messages are written to standard error. When as file - is
+# specified the warning and error messages are written to standard output
+# (stdout).
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING
+# Note: If this tag is empty the current directory is searched.
+
+INPUT = README.md \
+ doxygen_mainpage.md \
+ @INPUT_LIST
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see:
+# https://www.gnu.org/software/libiconv/) for the list of possible encodings.
+# See also: INPUT_FILE_ENCODING
+# The default value is: UTF-8.
+
+INPUT_ENCODING = UTF-8
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses The INPUT_FILE_ENCODING tag can be used to specify
+# character encoding on a per file pattern basis. Doxygen will compare the file
+# name with each pattern and apply the encoding instead of the default
+# INPUT_ENCODING) if there is a match. The character encodings are a list of the
+# form: pattern=encoding (like *.php=ISO-8859-1). See cfg_input_encoding
+# "INPUT_ENCODING" for further information on supported encodings.
+
+INPUT_FILE_ENCODING =
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# read by doxygen.
+#
+# Note the list of default checked file patterns might differ from the list of
+# default file extension mappings.
+#
+# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cxxm,
+# *.cpp, *.cppm, *.ccm, *.c++, *.c++m, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl,
+# *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, *.h++, *.ixx, *.l, *.cs, *.d,
+# *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, *.md, *.mm, *.dox (to
+# be provided as doxygen C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08,
+# *.f18, *.f, *.for, *.vhd, *.vhdl, *.ucf, *.qsf and *.ice.
+
+FILE_PATTERNS = *.c \
+ *.cc \
+ *.cxx \
+ *.cpp \
+ *.c++ \
+ *.java \
+ *.ii \
+ *.ixx \
+ *.ipp \
+ *.i++ \
+ *.inl \
+ *.idl \
+ *.ddl \
+ *.odl \
+ *.h \
+ *.hh \
+ *.hxx \
+ *.hpp \
+ *.h++ \
+ *.l \
+ *.cs \
+ *.d \
+ *.php \
+ *.php4 \
+ *.php5 \
+ *.phtml \
+ *.inc \
+ *.m \
+ *.markdown \
+ *.md \
+ *.mm \
+ *.dox \
+ *.py \
+ *.pyw \
+ *.f90 \
+ *.f95 \
+ *.f03 \
+ *.f08 \
+ *.f18 \
+ *.f \
+ *.for \
+ *.vhd \
+ *.vhdl \
+ *.ucf \
+ *.qsf \
+ *.ice
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# ANamespace::AClass, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS = *
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+#
+#
+# where is the value of the INPUT_FILTER tag, and is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+#
+# Note that doxygen will use the data processed and written to standard output
+# for further processing, therefore nothing else, like debug statements or used
+# commands (so in case of a Windows batch file always use @echo OFF), should be
+# written to standard output.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+#
+# Note that for custom extensions or not directly supported extensions you also
+# need to set EXTENSION_MAPPING for the extension otherwise the files are not
+# properly processed by doxygen.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE = doxygen_mainpage.md
+
+# The Fortran standard specifies that for fixed formatted Fortran code all
+# characters from position 72 are to be considered as comment. A common
+# extension is to allow longer lines before the automatic comment starts. The
+# setting FORTRAN_COMMENT_AFTER will also make it possible that longer lines can
+# be processed before the automatic comment starts.
+# Minimum value: 7, maximum value: 10000, default value: 72.
+
+FORTRAN_COMMENT_AFTER = 72
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER = YES
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# multi-line macros, enums or list initialized variables directly into the
+# documentation.
+# The default value is: NO.
+
+INLINE_SOURCES = YES
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# entity all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see https://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX = YES
+
+# The IGNORE_PREFIX tag can be used to specify a prefix (or a list of prefixes)
+# that should be ignored while generating the index headers. The IGNORE_PREFIX
+# tag works for classes, function and member names. The entity will be placed in
+# the alphabetical list under the first letter of the entity name that remains
+# after removing the prefix.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# Note: Since the styling of scrollbars can currently not be overruled in
+# Webkit/Chromium, the styling will be left out of the default doxygen.css if
+# one or more extra stylesheets have been specified. So if scrollbar
+# customization is desired it has to be added explicitly. For an example see the
+# documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET = ./doxygen-awesome-css/doxygen-awesome.css
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE tag can be used to specify if the generated HTML output
+# should be rendered with a dark or light theme.
+# Possible values are: LIGHT always generate light mode output, DARK always
+# generate dark mode output, AUTO_LIGHT automatically set the mode according to
+# the user preference, use light mode if no preference is set (the default),
+# AUTO_DARK automatically set the mode according to the user preference, use
+# dark mode if no preference is set and TOGGLE allow to user to switch between
+# light and dark mode via a button.
+# The default value is: AUTO_LIGHT.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE = AUTO_LIGHT
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the style sheet and background images according to
+# this color. Hue is specified as an angle on a color-wheel, see
+# https://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use gray-scales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML
+# documentation will contain a main index with vertical navigation menus that
+# are dynamically created via JavaScript. If disabled, the navigation index will
+# consists of multiple levels of tabs that are statically embedded in every HTML
+# page. Disable this option to support browsers that do not have JavaScript,
+# like the Qt help browser.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_MENUS = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# If the HTML_CODE_FOLDING tag is set to YES then classes and functions can be
+# dynamically folded and expanded in the generated HTML source code.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_CODE_FOLDING = YES
+
+# If the HTML_COPY_CLIPBOARD tag is set to YES then doxygen will show an icon in
+# the top right corner of code and text fragments that allows the user to copy
+# its content to the clipboard. Note this only works if supported by the browser
+# and the web page is served via a secure context (see:
+# https://www.w3.org/TR/secure-contexts/), i.e. using the https: or file:
+# protocol.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COPY_CLIPBOARD = YES
+
+# Doxygen stores a couple of settings persistently in the browser (via e.g.
+# cookies). By default these settings apply to all HTML pages generated by
+# doxygen across all projects. The HTML_PROJECT_COOKIE tag can be used to store
+# the settings under a project specific key, such that the user preferences will
+# be stored separately.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_PROJECT_COOKIE =
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see:
+# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To
+# create a documentation set, doxygen will generate a Makefile in the HTML
+# output directory. Running make will produce the docset in that directory and
+# running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy
+# genXcode/_index.html for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# This tag determines the URL of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDURL =
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# on Windows. In the beginning of 2021 Microsoft took the original page, with
+# a.o. the download links, offline the HTML help workshop was already many years
+# in maintenance mode). You can download the HTML help workshop from the web
+# archives at Installation executable (see:
+# http://web.archive.org/web/20160201063255/http://download.microsoft.com/downlo
+# ad/0/A/9/0A939EF6-E31C-430F-A3DF-DFAE7960D564/htmlhelp.exe).
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the main .chm file (NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND = NO
+
+# The SITEMAP_URL tag is used to specify the full URL of the place where the
+# generated documentation will be placed on the server by the user during the
+# deployment of the documentation. The generated sitemap is called sitemap.xml
+# and placed on the directory specified by HTML_OUTPUT. In case no SITEMAP_URL
+# is specified no sitemap is generated. For information about the sitemap
+# protocol see https://www.sitemaps.org
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SITEMAP_URL =
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE = org.doxygen.Project
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS =
+
+# The QHG_LOCATION tag can be used to specify the location (absolute path
+# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to
+# run qhelpgenerator on the generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine tune the look of the index (see "Fine-tuning the output"). As an
+# example, the default style sheet generated by doxygen has an example that
+# shows how to put an image at the root of the tree instead of the PROJECT_NAME.
+# Since the tree basically has the same information as the tab index, you could
+# consider setting DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW = NO
+
+# When both GENERATE_TREEVIEW and DISABLE_INDEX are set to YES, then the
+# FULL_SIDEBAR option determines if the side bar is limited to only the treeview
+# area (value NO) or if it should extend to the full height of the window (value
+# YES). Setting this to YES gives a layout similar to
+# https://docs.readthedocs.io with more room for contents, but less room for the
+# project logo, title, and description. If either GENERATE_TREEVIEW or
+# DISABLE_INDEX is set to NO, this option has no effect.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FULL_SIDEBAR = NO
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH = 250
+
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# If the OBFUSCATE_EMAILS tag is set to YES, doxygen will obfuscate email
+# addresses.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+OBFUSCATE_EMAILS = YES
+
+# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg
+# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see
+# https://inkscape.org) to generate formulas as SVG images instead of PNGs for
+# the HTML output. These images will generally look nicer at scaled resolutions.
+# Possible values are: png (the default) and svg (looks nicer but requires the
+# pdf2svg or inkscape tool).
+# The default value is: png.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FORMULA_FORMAT = svg
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE = 10
+
+# The FORMULA_MACROFILE can contain LaTeX \newcommand and \renewcommand commands
+# to create new LaTeX commands to be used in formulas as building blocks. See
+# the section "Including formulas" for details.
+
+FORMULA_MACROFILE =
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# https://www.mathjax.org) which uses client side JavaScript for the rendering
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX = NO
+
+# With MATHJAX_VERSION it is possible to specify the MathJax version to be used.
+# Note that the different versions of MathJax have different requirements with
+# regards to the different settings, so it is possible that also other MathJax
+# settings have to be changed when switching between the different MathJax
+# versions.
+# Possible values are: MathJax_2 and MathJax_3.
+# The default value is: MathJax_2.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_VERSION = MathJax_2
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. For more details about the output format see MathJax
+# version 2 (see:
+# http://docs.mathjax.org/en/v2.7-latest/output.html) and MathJax version 3
+# (see:
+# http://docs.mathjax.org/en/latest/web/components/output.html).
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility. This is the name for Mathjax version 2, for MathJax version 3
+# this will be translated into chtml), NativeMML (i.e. MathML. Only supported
+# for NathJax 2. For MathJax version 3 chtml will be used instead.), chtml (This
+# is the name for Mathjax version 3, for MathJax version 2 this will be
+# translated into HTML-CSS) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from https://www.mathjax.org before deployment. The default value is:
+# - in case of MathJax version 2: https://cdn.jsdelivr.net/npm/mathjax@2
+# - in case of MathJax version 3: https://cdn.jsdelivr.net/npm/mathjax@3
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH =
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# for MathJax version 2 (see
+# https://docs.mathjax.org/en/v2.7-latest/tex.html#tex-and-latex-extensions):
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# For example for MathJax version 3 (see
+# http://docs.mathjax.org/en/latest/input/tex/extensions/index.html):
+# MATHJAX_EXTENSIONS = ams
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see:
+# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use + S
+# (what the is depends on the OS and browser, but it is typically
+# , /