diff --git a/CMakeLists.txt b/CMakeLists.txt
index 73739304..2c07cd58 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.12)
-project(daqconf VERSION 5.6.1)
+project(daqconf VERSION 6.0.2)
find_package(daq-cmake REQUIRED )
diff --git a/config/daqconf_full_config.json b/config/daqconf_full_config.json
index 68648d1e..8ed1f185 100644
--- a/config/daqconf_full_config.json
+++ b/config/daqconf_full_config.json
@@ -1,196 +1,275 @@
{
- "boot": {
- "base_command_port": 3333,
- "disable_trace": false,
- "opmon_impl": "local",
- "ers_impl": "local",
- "pocket_url": "127.0.0.1",
- "image": "",
- "use_k8s": false,
- "op_env": "swtest",
- "data_request_timeout_ms": 1000
- },
- "dataflow": {
- "host_dfo": "localhost",
- "apps": [
- {
- "app_name": "dataflow0",
+ "boot": {
+ "base_command_port": 3333,
+ "capture_env_vars": [
+ "TIMING_SHARE",
+ "DETCHANNELMAPS_SHARE"
+ ],
+ "connectivity_service_host": "np04-srv-016",
+ "connectivity_service_interval": 1000,
+ "connectivity_service_port": 15000,
+ "connectivity_service_threads": 2,
+ "disable_trace": false,
+ "ers_impl": "local",
+ "k8s_image": "dunedaq/c8-minimal",
+ "k8s_rte": "auto",
+ "opmon_impl": "local",
+ "pocket_url": "127.0.0.1",
+ "process_manager": "ssh",
+ "start_connectivity_service": true,
+ "use_connectivity_service": true
+ },
+ "ctb_hsi": {
+ "beam_llt_triggers": [],
+ "crt_llt_triggers": [],
+ "fake_trig_1": {
+ "beam_mode": true,
+ "description": "Random trigger that can optionally be set to fire only during beam spill",
+ "enable": false,
+ "fixed_freq": true,
+ "period": 100000
+ },
+ "fake_trig_2": {
+ "beam_mode": true,
+ "description": "Random trigger that can optionally be set to fire only during beam spill",
+ "enable": false,
+ "fixed_freq": true,
+ "period": 100000
+ },
+ "hlt_triggers": [],
+ "host_ctb_hsi": "localhost",
+ "pds_llt_triggers": [],
+ "use_ctb_hsi": false
+ },
+ "daq_common": {
+ "data_rate_slowdown_factor": 1,
+ "data_request_timeout_ms": 1000,
+ "use_data_network": false
+ },
+ "dataflow": {
+ "apps": [
+ {
+ "app_name": "dataflow0",
+ "data_store_mode": "all-per-file",
+ "host_df": "localhost",
+ "max_file_size": 4294967296,
+ "max_trigger_record_window": 0,
+ "output_paths": [
+ "."
+ ]
+ }
+ ],
+ "enable_tpset_writing": false,
+ "host_dfo": "localhost",
+ "host_tpw": "localhost",
"token_count": 10,
- "output_paths": [ "." ],
- "host_df": "localhost",
- "max_file_size": 4294967296,
- "max_trigger_record_window": 0
- }
- ]
- },
- "dqm": {
- "enable_dqm": false,
- "impl": "local",
- "cmap": "HD",
- "host_dqm": [ "localhost" ],
- "raw_params": [ 60, 50 ],
- "std_params": [ 10, 1000 ],
- "rms_params": [ 0, 1000 ],
- "fourier_channel_params": [ 0, 0 ],
- "fourier_plane_params": [ 600, 1000 ],
- "df_rate": 10,
- "df_algs": "raw std fourier_plane",
- "max_num_frames": 32768,
- "kafka_address": "",
- "kafka_topic": "DQM"
- },
- "hsi": {
- "use_timing_hsi": false,
- "host_timing_hsi": "localhost",
- "hsi_hw_connections_file": "${TIMING_SHARE}/config/etc/connections.xml",
- "hsi_device_name": "",
- "hsi_readout_period": 1000,
- "control_hsi_hw": false,
- "hsi_endpoint_address": 1,
- "hsi_endpoint_partition": 0,
- "hsi_re_mask": 0,
- "hsi_fe_mask": 0,
- "hsi_inv_mask": 0,
- "hsi_source": 1,
- "use_fake_hsi": true,
- "host_fake_hsi": "localhost",
- "hsi_device_id": 0,
- "mean_hsi_signal_multiplicity": 1,
- "hsi_signal_emulation_mode": 0,
- "enabled_hsi_signals": 1
- },
- "ctb_hsi": {
- "use_ctb_hsi": false,
- "host_ctb_hsi": "localhost",
- "hlt_triggers": [
- { "id":"HLT_4",
- "description": "TEST HLT",
- "enable":true,
- "minc" : "0x1",
- "mexc" : "0x0",
- "prescale" : "0x0"
- }
- ],
- "beam_llt_triggers": [],
- "crt_llt_triggers":[],
- "pds_llt_triggers": [],
- "fake_trig_1": {
- "description": "Fake 1Hz LLT trigger",
- "enable": true,
- "fixed_freq": true,
- "beam_mode": false,
- "period": 62500000
- },
- "fake_trig_2": {
- "description": "Fake 1Hz LLT trigger",
- "enable": true,
- "fixed_freq": true,
- "beam_mode": false,
- "period": 62500000
- }
- },
- "readout": {
- "detector_readout_map_file": "./DetectorReadoutMap.json",
- "emulator_mode": false,
- "thread_pinning_file": "",
- "data_rate_slowdown_factor": 1,
- "clock_speed_hz": 62500000,
- "default_data_file": "asset://?name=wib2-frames.bin&label=DuneWIB&subsystem=readout",
- "data_files": [],
- "latency_buffer_size": 499968,
- "enable_tpg": false,
- "enable_raw_recording": false,
- "raw_recording_output_dir": ".",
- "use_fake_data_producers": false,
- "readout_sends_tp_fragments": false,
- "eal_args": "-l 0-1 -n 3 -- -m [0:1].0 -j",
- "numa_config": {
- "default_id": 0,
- "exceptions": []
+ "tpset_output_file_size": 4294967296,
+ "tpset_output_path": "."
+ },
+ "detector": {
+ "clock_speed_hz": 62500000,
+ "op_env": "swtest",
+ "tpc_channel_map": "PD2HDChannelMap"
+ },
+ "dpdk_sender": {
+ "eal_args": "-l 0-1 -n 3 -- -m [0:1].0 -j",
+ "enable_dpdk_sender": false,
+ "host_dpdk_sender": [
+ "np04-srv-021"
+ ]
+ },
+ "dqm": {
+ "cmap": "HD",
+ "df_algs": "raw std fourier_plane",
+ "df_rate": 10,
+ "enable_dqm": false,
+ "fourier_channel_params": [
+ 0,
+ 0
+ ],
+ "fourier_plane_params": [
+ 600,
+ 1000
+ ],
+ "host_dqm": [
+ "localhost"
+ ],
+ "impl": "local",
+ "kafka_address": "",
+ "kafka_topic": "DQM",
+ "max_num_frames": 32768,
+ "raw_params": [
+ 60,
+ 50
+ ],
+ "rms_params": [
+ 0,
+ 1000
+ ],
+ "std_params": [
+ 10,
+ 1000
+ ]
+ },
+ "hsi": {
+ "control_hsi_hw": false,
+ "enable_hardware_state_recovery": true,
+ "enabled_hsi_signals": 1,
+ "host_fake_hsi": "localhost",
+ "host_timing_hsi": "localhost",
+ "hsi_device_id": 0,
+ "hsi_device_name": "",
+ "hsi_endpoint_address": 1,
+ "hsi_endpoint_partition": 0,
+ "hsi_fe_mask": 0,
+ "hsi_hw_connections_file": "${TIMING_SHARE}/config/etc/connections.xml",
+ "hsi_inv_mask": 0,
+ "hsi_re_mask": 0,
+ "hsi_readout_period": 1000,
+ "hsi_signal_emulation_mode": 0,
+ "hsi_source": 1,
+ "mean_hsi_signal_multiplicity": 1,
+ "use_fake_hsi": true,
+ "use_timing_hsi": false
+ },
+ "readout": {
+ "data_files": [],
+ "default_data_file": "/cvmfs/dunedaq.opensciencegrid.org/assets/files/9/f/1/frames.bin",
+ "detector_readout_map_file": "./DetectorReadoutMap.json",
+ "dpdk_eal_args": "-l 0-1 -n 3 -- -m [0:1].0 -j",
+ "dpdk_rxqueues_per_lcore": 1,
+ "emulated_data_times_start_with_now": false,
+ "emulator_mode": false,
+ "enable_raw_recording": false,
+ "enable_tpg": false,
+ "fragment_send_timeout_ms": 10,
+ "latency_buffer_size": 499968,
+ "numa_config": {
+ "default_id": 0,
+ "default_latency_numa_aware": false,
+ "default_latency_preallocation": false,
+ "exceptions": []
+ },
+ "raw_recording_output_dir": ".",
+ "thread_pinning_file": "",
+ "tpg_algorithm": "SimpleThreshold",
+ "tpg_channel_mask": [],
+ "tpg_threshold": 120,
+ "use_fake_cards": false,
+ "use_fake_data_producers": false
+ },
+ "timing": {
+ "control_timing_partition": false,
+ "host_tprtc": "localhost",
+ "timing_partition_id": 0,
+ "timing_partition_master_device_name": "",
+ "timing_partition_rate_control_enabled": false,
+ "timing_partition_spill_gate_enabled": false,
+ "timing_partition_trigger_mask": 255,
+ "timing_session_name": ""
+ },
+ "trigger": {
+ "completeness_tolerance": 1,
+ "ctcm_trigger_intervals": [
+ 10000000
+ ],
+ "ctcm_trigger_types": [
+ 4
+ ],
+ "host_trigger": "localhost",
+ "hsi_trigger_type_passthrough": false,
+ "mlt_buffer_timeout": 100,
+ "mlt_ignore_tc": [],
+ "mlt_max_td_length_ms": 1000,
+ "mlt_merge_overlapping_tcs": true,
+ "mlt_send_timed_out_tds": true,
+ "mlt_td_readout_map": {
+ "c0": {
+ "candidate_type": 0,
+ "time_after": 1001,
+ "time_before": 1000
+ },
+ "c1": {
+ "candidate_type": 1,
+ "time_after": 1001,
+ "time_before": 1000
+ },
+ "c2": {
+ "candidate_type": 2,
+ "time_after": 1001,
+ "time_before": 1000
+ },
+ "c3": {
+ "candidate_type": 3,
+ "time_after": 1001,
+ "time_before": 1000
+ },
+ "c4": {
+ "candidate_type": 4,
+ "time_after": 1001,
+ "time_before": 1000
+ },
+ "c5": {
+ "candidate_type": 5,
+ "time_after": 1001,
+ "time_before": 1000
+ },
+ "c6": {
+ "candidate_type": 6,
+ "time_after": 1001,
+ "time_before": 1000
+ },
+ "c7": {
+ "candidate_type": 7,
+ "time_after": 1001,
+ "time_before": 1000
+ },
+ "c8": {
+ "candidate_type": 8,
+ "time_after": 1001,
+ "time_before": 1000
+ },
+ "c9": {
+ "candidate_type": 9,
+ "time_after": 1001,
+ "time_before": 1000
+ }
+ },
+ "mlt_trigger_bitwords": [],
+ "mlt_use_bitwords": false,
+ "mlt_use_readout_map": false,
+ "tolerate_incompleteness": false,
+ "trigger_activity_config": {
+ "adc_threshold": 10000,
+ "adj_tolerance": 4,
+ "adjacency_threshold": 6,
+ "n_channels_threshold": 8,
+ "prescale": 100,
+ "print_tp_info": false,
+ "trigger_on_adc": false,
+ "trigger_on_adjacency": true,
+ "trigger_on_n_channels": false,
+ "window_length": 10000
+ },
+ "trigger_activity_plugin": "TriggerActivityMakerPrescalePlugin",
+ "trigger_candidate_config": {
+ "adc_threshold": 10000,
+ "adj_tolerance": 4,
+ "adjacency_threshold": 6,
+ "n_channels_threshold": 8,
+ "prescale": 100,
+ "print_tp_info": false,
+ "trigger_on_adc": false,
+ "trigger_on_adjacency": true,
+ "trigger_on_n_channels": false,
+ "window_length": 10000
+ },
+ "trigger_candidate_plugin": "TriggerCandidateMakerPrescalePlugin",
+ "trigger_rate_hz": 1.0,
+ "trigger_window_after_ticks": 1000,
+ "trigger_window_before_ticks": 1000,
+ "ttcm_s1": 1,
+ "ttcm_s2": 2,
+ "use_custom_maker": false
}
- },
- "timing": {
- "timing_session_name": "timing",
- "host_tprtc": "localhost",
- "control_timing_partition": false,
- "timing_partition_master_device_name": "",
- "timing_partition_id": 0,
- "timing_partition_trigger_mask": 255,
- "timing_partition_rate_control_enabled": false,
- "timing_partition_spill_gate_enabled": false
- },
- "trigger": {
- "trigger_rate_hz": 1,
- "trigger_window_before_ticks": 1000,
- "trigger_window_after_ticks": 1000,
- "host_trigger": "localhost",
- "host_tpw": "localhost",
- "ttcm_s1": 1,
- "ttcm_s2": 2,
- "trigger_activity_plugin": "TriggerActivityMakerPrescalePlugin",
- "trigger_activity_config": { "prescale": 100 },
- "trigger_candidate_plugin": "TriggerCandidateMakerPrescalePlugin",
- "trigger_candidate_config": { "prescale": 100 },
- "hsi_trigger_type_passthrough": false,
- "enable_tpset_writing": false,
- "tpset_output_path": ".",
- "tpset_output_file_size": 4294967296,
- "tpg_channel_map": "ProtoDUNESP1ChannelMap",
- "mlt_merge_overlapping_tcs": true,
- "mlt_buffer_timeout": 100,
- "mlt_send_timed_out_tds": true,
- "mlt_max_td_length_ms": 1000,
- "mlt_ignore_tc": [],
- "use_custom_maker": false,
- "ctcm_trigger_types": [4],
- "ctcm_trigger_intervals": [62500000],
- "mlt_use_readout_map": false,
- "mlt_td_readout_map": {
- "c0": {
- "time_before":1000,
- "time_after": 1001
- },
- "c1": {
- "time_before":1000,
- "time_after": 1001
- },
- "c2": {
- "time_before":1000,
- "time_after": 1001
- },
- "c3": {
- "time_before":1000,
- "time_after": 1001
- },
- "c4": {
- "time_before":1000,
- "time_after": 1001
- },
- "c5": {
- "time_before":1000,
- "time_after": 1001
- },
- "c6": {
- "time_before":1000,
- "time_after": 1001
- },
- "c7": {
- "time_before":1000,
- "time_after": 1001
- },
- "c8": {
- "time_before":1000,
- "time_after": 1001
- },
- "c9": {
- "time_before":1000,
- "time_after": 1001
- }
- }
- },
- "dpdk_sender": {
- "enable_dpdk_sender": false,
- "host_dpdk_sender": [ "np04-srv-021" ],
- "eal_args": "-l 0-1 -n 3 -- -m [0:1].0 -j"
- }
}
diff --git a/docs/ConfViewerLocalScreenshot.png b/docs/ConfViewerLocalScreenshot.png
new file mode 100644
index 00000000..c84d43d2
Binary files /dev/null and b/docs/ConfViewerLocalScreenshot.png differ
diff --git a/docs/ConfViewerScreenshot.png b/docs/ConfViewerScreenshot.png
new file mode 100644
index 00000000..08680d4f
Binary files /dev/null and b/docs/ConfViewerScreenshot.png differ
diff --git a/docs/ConfigDatabase.md b/docs/ConfigDatabase.md
new file mode 100644
index 00000000..29c4cd0d
--- /dev/null
+++ b/docs/ConfigDatabase.md
@@ -0,0 +1,36 @@
+# Interacting with the Configuration Database
+
+## Uploading a config
+To use configuration files with a _nanorc_ instance run on Kubernetes, the config first needs to be uploaded to the MongoDB running in the cluster.
+To do this, simply run `upload-conf `.
+
+_nanorc_ should then be started with `nanorc --pm k8s://np04-srv-015:31000 db://name-for-the-conf partition-name`
+
+Keep in mind that the config directory can contain underscores, but the name it will be given in the database cannot (hyphens are fine).
+
+## Viewing configurations
+To inspect the contents of the database, run `daqconf_viewer` after setting up the software environment. This will open a graphical UI in the terminal. There are five optional arguments that can be provided:
+* --conf-host and reg-host to manually enter the host of the microservices (defaults to http://np04-srv-023 and http://dunedaq-microservices.cern.ch)
+* --conf-port and reg-port to manually enter the port that the service listens on (defaults to 31011 and 5005)
+* --dir to tell the config viewer where to look for local config files (defaults to ./)
+![Config Viewer](ConfViewerScreenshot.png)
+
+A list of all configuration names is shown on the left. It can be filtered using the search bar above it.
+Clicking one of these names will generate a list of buttons at the top, corresponding to the saved versions of the config.
+The versions are displayed in descending order by default, but this can be changed with the V key.
+Click one of the buttons to bring up the config file in the display box. By clicking the arrows, the contents of each sub-schema can be expanded.
+
+Additionally, you can press the L key to switch between viewing the database and viewing local files. The config list will be replaced with a tree representing the contents of the directory given with --dir.
+![Local Configs](ConfViewerLocalScreenshot.png)
+
+Pressing the D key after picking a config will take you to a very similar screen, albeit with green lines instead of red.
+If a second config is selected using the previously defined process, then a "diff" of the two will be generated, showing all the
+differences between the two in a format similar to how commits are displayed on github.
+Again, the L key can be used to switch to local files, allowing for comparisons of local and DB configs in any combination.
+Finally, once you are done press q to quit (or use ctrl+c).
+
+## Interacting with the run registry
+Pressing the R key while on the first screen will take you to the run registry screen. Metadata for the chosen run is shown in the top right, and the associated config files are displayed in a tree on the bottom left. Selecting one brings up the JSON on the bottom right, with expandable schema as before.
+![Run Reg Viewer](RunRegScreenshot.png)
+
+When the application is initialised, the most recent run is shown by default. To navigate to different runs, simply click the back/forward buttons, or type the desired run into the input box and click the Get Data button. The R key can be used to return to browsing the MongoDB when you are done.
diff --git a/docs/InstructionsForCasualUsers.md b/docs/InstructionsForCasualUsers.md
index a8080d60..b11a81a3 100644
--- a/docs/InstructionsForCasualUsers.md
+++ b/docs/InstructionsForCasualUsers.md
@@ -21,6 +21,8 @@ As of Oct-4-2022, here are the steps that should be used when you first create y
* and
* `hdf5_dump.py -n 3 -p all -f swtest_run000101_0000_*.hdf5`
+If you intend to run _nanorc_ on the Kubernetes cluster, then [these instructions](ConfigDatabase.md) may be useful.
+
When you return to this work area (for example, after logging out and back in), you can skip the 'setup' steps in the instructions above. For example:
1. `cd `
diff --git a/docs/README.md b/docs/README.md
index ae66f4cd..7d9fe6d3 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -6,10 +6,14 @@ The focus of this documentation is on providing instructions for using the tools
[Instructions for casual or first-time users](InstructionsForCasualUsers.md)
-and for a slightly more in-depth look into how to generate configurations for a DAQ system, take a look at:
+For a slightly more in-depth look into how to generate configurations for a DAQ system, take a look at:
[Configuration options for casual or first-time users](ConfigurationsForCasualUsers.md)
+If you want to view existing configs stored in the MongoDB, or run configurations accessible through the run-registry microservice, take a look at:
+
+[Interacting with the Configuration Database](ConfigDatabase.md)
+
Traditionally multiple command line options were passed to `daqconf_multiru_gen` in order to control how it generated configurations. However, for the `dunedaq-v3.2.0` release (September 2022) we're switching to passing a single JSON file whose contents contain the information needed to control `daqconf_multiru_gen`. For `daqconf_multiru_gen` users who want to learn about how to make the switch to this new approach, take a look at [these migration instructions](MigratingToNewConfgen.md).
Finally, here's nice visual representation of the type of DAQ system which can be configured:
diff --git a/docs/RunRegScreenshot.png b/docs/RunRegScreenshot.png
new file mode 100644
index 00000000..43b34bdc
Binary files /dev/null and b/docs/RunRegScreenshot.png differ
diff --git a/python/daqconf/apps/dataflow_gen.py b/python/daqconf/apps/dataflow_gen.py
index cd1ca44e..ac14fef1 100644
--- a/python/daqconf/apps/dataflow_gen.py
+++ b/python/daqconf/apps/dataflow_gen.py
@@ -29,24 +29,31 @@
# Time to wait on pop()
QUEUE_POP_WAIT_MS = 100
-def get_dataflow_app(HOSTIDX=0,
- OUTPUT_PATHS=["."],
- APP_NAME="dataflow0",
- OPERATIONAL_ENVIRONMENT="swtest",
- FILE_LABEL="swtest",
- DATA_STORE_MODE='all-per-file',
- MAX_FILE_SIZE=4*1024*1024*1024,
- MAX_TRIGGER_RECORD_WINDOW=0,
- MAX_EXPECTED_TR_SEQUENCES=1,
- TOKEN_COUNT=10,
- TRB_TIMEOUT=200,
- HOST="localhost",
- HAS_DQM=False,
- SRC_GEO_ID_MAP='',
- DEBUG=False):
+def get_dataflow_app(
+ df_config,
+ dataflow,
+ detector,
+ HOSTIDX=0,
+ APP_NAME="dataflow0",
+ FILE_LABEL="swtest",
+ MAX_EXPECTED_TR_SEQUENCES=1,
+ TRB_TIMEOUT=200,
+ HAS_DQM=False,
+ SRC_GEO_ID_MAP='',
+ DEBUG=False
+ ):
"""Generate the json configuration for the readout and DF process"""
+ OUTPUT_PATHS = df_config.output_paths
+ OPERATIONAL_ENVIRONMENT = detector.op_env
+ DATA_STORE_MODE=df_config.data_store_mode
+ MAX_FILE_SIZE = df_config.max_file_size
+ MAX_TRIGGER_RECORD_WINDOW = df_config.max_trigger_record_window
+ TOKEN_COUNT = dataflow.token_count
+ HOST=df_config.host_df
+
+
modules = []
queues = []
@@ -118,4 +125,12 @@ def get_dataflow_app(HOSTIDX=0,
df_app = App(modulegraph=mgraph, host=HOST)
+
+ df_app.mounted_dirs += [{
+ 'name': f'raw-data-{i}',
+ 'physical_location': opath,
+ 'in_pod_location': opath,
+ 'read_only': False,
+ } for i,opath in enumerate(set(OUTPUT_PATHS))]
+
return df_app
diff --git a/python/daqconf/apps/dqm_gen.py b/python/daqconf/apps/dqm_gen.py
index b358ecaf..56814ea8 100644
--- a/python/daqconf/apps/dqm_gen.py
+++ b/python/daqconf/apps/dqm_gen.py
@@ -25,38 +25,40 @@
from daqconf.core.daqmodule import DAQModule
from daqconf.core.app import App,ModuleGraph
-from detdataformats._daq_detdataformats_py import *
+from detdataformats import *
# Time to wait on pop()
QUEUE_POP_WAIT_MS = 100
-def get_dqm_app(DQM_IMPL='',
- DATA_RATE_SLOWDOWN_FACTOR=1,
- CLOCK_SPEED_HZ=62500000,
- DQMIDX=0,
- MAX_NUM_FRAMES=32768,
- KAFKA_ADDRESS='',
- KAFKA_TOPIC='',
- CMAP='HD',
- RAW_PARAMS=[60, 50],
- RMS_PARAMS=[10, 1000],
- STD_PARAMS=[10, 1000],
- FOURIER_CHANNEL_PARAMS=[600, 100],
- FOURIER_PLANE_PARAMS=[60, 1000],
- LINKS=[],
- HOST="localhost",
- MODE="readout",
- DF_RATE=10,
- DF_ALGS='raw std fourier_plane',
- DF_TIME_WINDOW=0,
- # DRO_CONFIG=None,
- RU_STREAMS=None,
- RU_APPNAME="ru_0",
- TRB_DQM_SOURCEID_OFFSET=0,
- DEBUG=False,
- ):
+def get_dqm_app(
+ DQM_IMPL='',
+ DATA_RATE_SLOWDOWN_FACTOR=1,
+ CLOCK_SPEED_HZ=62500000,
+ DQMIDX=0,
+ MAX_NUM_FRAMES=32768,
+ KAFKA_ADDRESS='',
+ KAFKA_TOPIC='',
+ CMAP='HD',
+ RAW_PARAMS=[60, 50],
+ RMS_PARAMS=[10, 1000],
+ STD_PARAMS=[10, 1000],
+ FOURIER_CHANNEL_PARAMS=[600, 100],
+ FOURIER_PLANE_PARAMS=[60, 1000],
+ LINKS=[],
+ HOST="localhost",
+ MODE="readout",
+ DF_RATE=10,
+ DF_ALGS='raw std fourier_plane',
+ DF_TIME_WINDOW=0,
+ # DRO_CONFIG=None,
+ RU_STREAMS=None,
+ RU_APPNAME="ru_0",
+ TRB_DQM_SOURCEID_OFFSET=0,
+ DEBUG=False,
+ ):
FRONTEND_TYPE = DetID.subdetector_to_string(DetID.Subdetector(RU_STREAMS[0].geo_id.det_id))
+
if ((FRONTEND_TYPE== "HD_TPC" or FRONTEND_TYPE== "VD_Bottom_TPC") and CLOCK_SPEED_HZ== 50000000):
FRONTEND_TYPE = "wib"
elif ((FRONTEND_TYPE== "HD_TPC" or FRONTEND_TYPE== "VD_Bottom_TPC") and CLOCK_SPEED_HZ== 62500000):
@@ -65,8 +67,9 @@ def get_dqm_app(DQM_IMPL='',
FRONTEND_TYPE = "pds_list"
elif FRONTEND_TYPE== "VD_Top_TPC":
FRONTEND_TYPE = "tde"
- elif FRONTEND_TYPE== "ND_LAr":
- FRONTEND_TYPE = "pacman"
+ # 20-Jun-2023, KAB: quick fix to get FD-specific nightly build to run
+ #elif FRONTEND_TYPE== "ND_LAr":
+ # FRONTEND_TYPE = "pacman"
if DQM_IMPL == 'cern':
KAFKA_ADDRESS = "monkafka.cern.ch:30092"
@@ -76,7 +79,6 @@ def get_dqm_app(DQM_IMPL='',
modules = []
if MODE == 'readout':
-
modules += [DAQModule(name='trb_dqm',
plugin='TriggerRecordBuilder',
conf=trb.ConfParams(
diff --git a/python/daqconf/apps/fake_hsi_gen.py b/python/daqconf/apps/fake_hsi_gen.py
index 93f889c0..287d359f 100644
--- a/python/daqconf/apps/fake_hsi_gen.py
+++ b/python/daqconf/apps/fake_hsi_gen.py
@@ -42,37 +42,36 @@
import math
#===============================================================================
-def get_fake_hsi_app(RUN_NUMBER=333,
- CLOCK_SPEED_HZ: int=62500000,
- DATA_RATE_SLOWDOWN_FACTOR: int=1,
- TRIGGER_RATE_HZ: int=1,
- HSI_SOURCE_ID: int=0,
- MEAN_SIGNAL_MULTIPLICITY: int=0,
- SIGNAL_EMULATION_MODE: int=0,
- ENABLED_SIGNALS: int=0b00000001,
- QUEUE_POP_WAIT_MS=10,
- LATENCY_BUFFER_SIZE=100000,
- DATA_REQUEST_TIMEOUT=1000,
- HOST="localhost",
- DEBUG=False):
+def get_fake_hsi_app(
+ detector,
+ hsi,
+ daq_common,
+ source_id,
+ QUEUE_POP_WAIT_MS=10,
+ LATENCY_BUFFER_SIZE=100000,
+ DATA_REQUEST_TIMEOUT=1000,
+ # HOST="localhost",
+ DEBUG=False
+ ):
- region_id=0
- element_id=0
-
- trigger_interval_ticks = 0
- if TRIGGER_RATE_HZ > 0:
- trigger_interval_ticks = math.floor((1 / TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR)
- startpars = rccmd.StartParams(run=RUN_NUMBER, trigger_rate = TRIGGER_RATE_HZ)
+ CLOCK_SPEED_HZ = detector.clock_speed_hz
+ DATA_RATE_SLOWDOWN_FACTOR = daq_common.data_rate_slowdown_factor
+ HSI_SOURCE_ID=source_id
+ RANDOM_TRIGGER_RATE_HZ = hsi.random_trigger_rate_hz
+ MEAN_SIGNAL_MULTIPLICITY = hsi.mean_hsi_signal_multiplicity
+ SIGNAL_EMULATION_MODE = hsi.hsi_signal_emulation_mode
+ ENABLED_SIGNALS = hsi.enabled_hsi_signals
+ HOST=hsi.host_fake_hsi
modules = [DAQModule(name = 'fhsig',
plugin = "FakeHSIEventGenerator",
conf = fhsig.Conf(clock_frequency=CLOCK_SPEED_HZ/DATA_RATE_SLOWDOWN_FACTOR,
- trigger_rate=TRIGGER_RATE_HZ,
+ trigger_rate=RANDOM_TRIGGER_RATE_HZ,
mean_signal_multiplicity=MEAN_SIGNAL_MULTIPLICITY,
signal_emulation_mode=SIGNAL_EMULATION_MODE,
enabled_signals=ENABLED_SIGNALS),
- extra_commands = {"start": startpars})]
+ )]
modules += [DAQModule(name = f"hsi_datahandler",
diff --git a/python/daqconf/apps/hsi_gen.py b/python/daqconf/apps/hsi_gen.py
index 8f59abef..8fb15a2c 100644
--- a/python/daqconf/apps/hsi_gen.py
+++ b/python/daqconf/apps/hsi_gen.py
@@ -11,9 +11,8 @@
# fragments are provided by the FakeDataProd module from dfmodules
import math
-from rich.console import Console
-console = Console()
-
+# from rich.console import Console
+from ..core.console import console
# Set moo schema search path
from dunedaq.env import get_moo_model_path
import moo.io
@@ -21,12 +20,12 @@
# Load configuration types
import moo.otypes
-moo.otypes.load_types('rcif/cmd.jsonnet')
+# moo.otypes.load_types('rcif/cmd.jsonnet')
moo.otypes.load_types('hsilibs/hsireadout.jsonnet')
moo.otypes.load_types('hsilibs/hsicontroller.jsonnet')
moo.otypes.load_types('readoutlibs/readoutconfig.jsonnet')
-import dunedaq.rcif.cmd as rccmd # AddressedCmd,
+# import dunedaq.rcif.cmd as rccmd # AddressedCmd,
import dunedaq.hsilibs.hsireadout as hsir
import dunedaq.hsilibs.hsicontroller as hsic
import dunedaq.readoutlibs.readoutconfig as rconf
@@ -36,29 +35,40 @@
from ..core.conf_utils import Direction, Queue
#===============================================================================
-def get_timing_hsi_app(RUN_NUMBER = 333,
- CLOCK_SPEED_HZ: int = 62500000,
- TRIGGER_RATE_HZ: int = 1,
- DATA_RATE_SLOWDOWN_FACTOR: int=1,
- CONTROL_HSI_HARDWARE = False,
- READOUT_PERIOD_US: int = 1e3,
- HSI_ENDPOINT_ADDRESS = 1,
- HSI_ENDPOINT_PARTITION = 0,
- HSI_RE_MASK = 0x20000,
- HSI_FE_MASK = 0,
- HSI_INV_MASK = 0,
- HSI_SOURCE = 1,
- HSI_SOURCE_ID = 0,
- CONNECTIONS_FILE="${TIMING_SHARE}/config/etc/connections.xml",
- HSI_DEVICE_NAME="BOREAS_TLU",
+def get_timing_hsi_app(
+ detector,
+ hsi,
+ daq_common,
+ source_id,
+ timing_session_name,
UHAL_LOG_LEVEL="notice",
QUEUE_POP_WAIT_MS=10,
LATENCY_BUFFER_SIZE=100000,
DATA_REQUEST_TIMEOUT=1000,
- TIMING_SESSION="",
- HARDWARE_STATE_RECOVERY_ENABLED=True,
- HOST="localhost",
DEBUG=False):
+
+
+
+
+ # Temp vars - remove
+ CLOCK_SPEED_HZ = detector.clock_speed_hz
+ DATA_RATE_SLOWDOWN_FACTOR = daq_common.data_rate_slowdown_factor
+ RANDOM_TRIGGER_RATE_HZ = hsi.random_trigger_rate_hz
+ CONTROL_HSI_HARDWARE=hsi.control_hsi_hw
+ CONNECTIONS_FILE=hsi.hsi_hw_connections_file
+ READOUT_PERIOD_US = hsi.hsi_readout_period
+ HSI_DEVICE_NAME = hsi.hsi_device_name
+ HARDWARE_STATE_RECOVERY_ENABLED = hsi.enable_hardware_state_recovery
+ HSI_ENDPOINT_ADDRESS = hsi.hsi_endpoint_address
+ HSI_ENDPOINT_PARTITION = hsi.hsi_endpoint_partition
+ HSI_RE_MASK=hsi.hsi_re_mask
+ HSI_FE_MASK=hsi.hsi_fe_mask
+ HSI_INV_MASK=hsi.hsi_inv_mask
+ HSI_SOURCE=hsi.hsi_source
+ HSI_SOURCE_ID=source_id
+ TIMING_SESSION=timing_session_name
+ HOST=hsi.host_timing_hsi
+
modules = {}
## TODO all the connections...
@@ -69,9 +79,6 @@ def get_timing_hsi_app(RUN_NUMBER = 333,
hsi_device_name=HSI_DEVICE_NAME,
uhal_log_level=UHAL_LOG_LEVEL))]
- region_id=0
- element_id=0
-
modules += [DAQModule(name = f"hsi_datahandler",
plugin = "HSIDataLinkHandler",
conf = rconf.Conf(readoutmodelconf = rconf.ReadoutModelConf(source_queue_timeout_ms = QUEUE_POP_WAIT_MS,
@@ -91,15 +98,6 @@ def get_timing_hsi_app(RUN_NUMBER = 333,
enable_raw_recording = False)
))]
- trigger_interval_ticks=0
- if TRIGGER_RATE_HZ > 0:
- trigger_interval_ticks=math.floor((1/TRIGGER_RATE_HZ) * CLOCK_SPEED_HZ)
- elif CONTROL_HSI_HARDWARE:
- console.log('WARNING! Emulated trigger rate of 0 will not disable signal emulation in real HSI hardware! To disable emulated HSI triggers, use option: "--hsi-source 0" or mask all signal bits', style="bold red")
-
- startpars = rccmd.StartParams(run=RUN_NUMBER, trigger_rate = TRIGGER_RATE_HZ)
- # resumepars = rccmd.ResumeParams(trigger_interval_ticks = trigger_interval_ticks)
-
if CONTROL_HSI_HARDWARE:
modules.extend( [
DAQModule(name="hsic",
@@ -108,14 +106,15 @@ def get_timing_hsi_app(RUN_NUMBER = 333,
hardware_state_recovery_enabled=HARDWARE_STATE_RECOVERY_ENABLED,
timing_session_name=TIMING_SESSION,
clock_frequency=CLOCK_SPEED_HZ,
- trigger_rate=TRIGGER_RATE_HZ,
+ trigger_rate=RANDOM_TRIGGER_RATE_HZ,
address=HSI_ENDPOINT_ADDRESS,
partition=HSI_ENDPOINT_PARTITION,
rising_edge_mask=HSI_RE_MASK,
falling_edge_mask=HSI_FE_MASK,
invert_edge_mask=HSI_INV_MASK,
data_source=HSI_SOURCE),
- extra_commands = {"start": startpars}),
+ # extra_commands = {"start": startpars}
+ ),
] )
queues = [Queue(f"hsir.output",f"hsi_datahandler.raw_input", "HSIFrame", f'hsi_link_0', 100000)]
diff --git a/python/daqconf/apps/readout_gen.py b/python/daqconf/apps/readout_gen.py
index e13653ef..214cf1e5 100644
--- a/python/daqconf/apps/readout_gen.py
+++ b/python/daqconf/apps/readout_gen.py
@@ -1,92 +1,66 @@
# Set moo schema search path
-from rich.console import Console
-
-console = Console()
-
-
from dunedaq.env import get_moo_model_path
import moo.io
moo.io.default_load_path = get_moo_model_path()
# Load configuration types
import moo.otypes
-# moo.otypes.load_types('rcif/cmd.jsonnet')
-# moo.otypes.load_types('appfwk/cmd.jsonnet')
-# moo.otypes.load_types('appfwk/app.jsonnet')
moo.otypes.load_types('flxlibs/felixcardreader.jsonnet')
-# moo.otypes.load_types('dtpctrellibs/dtpcontroller.jsonnet')
moo.otypes.load_types('readoutlibs/sourceemulatorconfig.jsonnet')
moo.otypes.load_types('readoutlibs/readoutconfig.jsonnet')
-moo.otypes.load_types('lbrulibs/pacmancardreader.jsonnet')
moo.otypes.load_types('dfmodules/fakedataprod.jsonnet')
moo.otypes.load_types("dpdklibs/nicreader.jsonnet")
# Import new types
-# import dunedaq.cmdlib.cmd as basecmd # AddressedCmd,
-# import dunedaq.rcif.cmd as rccmd # AddressedCmd,
-# import dunedaq.appfwk.cmd as cmd # AddressedCmd,
-# import dunedaq.appfwk.app as app # AddressedCmd,
import dunedaq.readoutlibs.sourceemulatorconfig as sec
import dunedaq.flxlibs.felixcardreader as flxcr
-# import dunedaq.dtpctrllibs.dtpcontroller as dtpctrl
import dunedaq.readoutlibs.readoutconfig as rconf
-import dunedaq.lbrulibs.pacmancardreader as pcr
-# import dunedaq.dfmodules.triggerrecordbuilder as trb
import dunedaq.dfmodules.fakedataprod as fdp
import dunedaq.dpdklibs.nicreader as nrc
# from appfwk.utils import acmd, mcmd, mrccmd, mspec
from os import path
+from pathlib import Path
from ..core.conf_utils import Direction, Queue
from ..core.sourceid import SourceIDBroker
from ..core.daqmodule import DAQModule
from ..core.app import App, ModuleGraph
-from ..detreadoutmap import ReadoutUnitDescriptor
+from ..detreadoutmap import ReadoutUnitDescriptor, group_by_key
# from detdataformats._daq_detdataformats_py import *
from detdataformats import DetID
-from ..detreadoutmap import group_by_key
-
-
## Compute the frament types from detector infos
def compute_data_types(
- det_id : int,
- clk_freq_hz: int,
- kind: str
+ stream_entry
):
- det_str = DetID.subdetector_to_string(DetID.Subdetector(det_id))
-
- fe_type = None
- fakedata_frag_type = None
- queue_frag_type = None
- fakedata_time_tick=None
- fakedata_frame_size=None
-
- # if ((det_str == "HD_TPC" or det_str== "VD_Bottom_TPC") and clk_freq_hz== 50000000):
- # fe_type = "wib"
- # queue_frag_type="WIBFrame"
- # fakedata_frag_type = "ProtoWIB"
- # fakedata_time_tick=25
- # fakedata_frame_size=434
+ det_str = DetID.subdetector_to_string(DetID.Subdetector(stream_entry.geo_id.det_id))
+
+
# Far detector types
- if ((det_str == "HD_TPC" or det_str == "VD_Bottom_TPC") and clk_freq_hz== 62500000 and kind=='flx' ):
+ if (det_str in ("HD_TPC","VD_Bottom_TPC") and stream_entry.kind=='flx' ):
fe_type = "wib2"
queue_frag_type="WIB2Frame"
fakedata_frag_type = "WIB"
fakedata_time_tick=32
fakedata_frame_size=472
- elif ((det_str == "HD_TPC" or det_str == "VD_Bottom_TPC") and clk_freq_hz== 62500000 and kind=='eth' ):
+ elif (det_str in ("HD_TPC","VD_Bottom_TPC") and stream_entry.kind=='eth' ):
fe_type = "wibeth"
queue_frag_type="WIBEthFrame"
fakedata_frag_type = "WIBEth"
fakedata_time_tick=2048
fakedata_frame_size=7200
- elif det_str == "HD_PDS" or det_str == "VD_Cathode_PDS" or det_str =="VD_Membrane_PDS":
+ elif det_str in ("HD_PDS", "VD_Cathode_PDS", "VD_Membrane_PDS") and stream_entry.parameters.mode == "var_rate":
+ fe_type = "pds"
+ fakedata_frag_type = "DAPHNE"
+ queue_frag_type = "PDSFrame"
+ fakedata_time_tick=None
+ fakedata_frame_size=472
+ elif det_str in ("HD_PDS", "VD_Cathode_PDS", "VD_Membrane_PDS") and stream_entry.parameters.mode == "fix_rate":
fe_type = "pds_stream"
fakedata_frag_type = "DAPHNE"
queue_frag_type = "PDSStreamFrame"
@@ -98,164 +72,27 @@ def compute_data_types(
queue_frag_type = "TDEFrame"
fakedata_time_tick=4472*32
fakedata_frame_size=8972
- # Near detector types
- elif det_str == "NDLAr_TPC":
- fe_type = "pacman"
- fakedata_frag_type = "PACMAN"
- queue_frag_type = "PACMANFrame"
- fakedata_time_tick=None
- fakedata_frame_size=None
- elif det_str == "NDLAr_PDS":
- fe_type = "mpd"
- fakedata_frag_type = "MPD"
- queue_frag_type = "MPDFrame"
- fakedata_time_tick=None
- fakedata_frame_size=None
+ # 20-Jun-2023, KAB: quick fix to get FD-specific nightly build to run
+ ## Near detector types
+ #elif det_str == "NDLAr_TPC":
+ # fe_type = "pacman"
+ # fakedata_frag_type = "PACMAN"
+ # queue_frag_type = "PACMANFrame"
+ # fakedata_time_tick=None
+ # fakedata_frame_size=None
+ #elif det_str == "NDLAr_PDS":
+ # fe_type = "mpd"
+ # fakedata_frag_type = "MPD"
+ # queue_frag_type = "MPDFrame"
+ # fakedata_time_tick=None
+ # fakedata_frame_size=None
else:
- raise ValueError(f"No match for {det_str}, {clk_freq_hz}, {kind}")
+ raise ValueError(f"No match for {det_str}, {stream_entry.kind}")
return fe_type, queue_frag_type, fakedata_frag_type, fakedata_time_tick, fakedata_frame_size
-###
-# Fake Card Reader creator
-###
-def create_fake_cardreader(
- FRONTEND_TYPE: str,
- QUEUE_FRAGMENT_TYPE: str,
- DATA_RATE_SLOWDOWN_FACTOR: int,
- DATA_FILES: dict,
- DEFAULT_DATA_FILE: str,
- CLOCK_SPEED_HZ: int,
- EMULATED_DATA_TIMES_START_WITH_NOW: bool,
- RU_DESCRIPTOR # ReadoutUnitDescriptor
-
-) -> tuple[list, list]:
- """
- Create a FAKE Card reader module
- """
-
- conf = sec.Conf(
- link_confs = [
- sec.LinkConfiguration(
- source_id=s.src_id,
- crate_id = s.geo_id.crate_id,
- slot_id = s.geo_id.slot_id,
- link_id = s.geo_id.stream_id,
- slowdown=DATA_RATE_SLOWDOWN_FACTOR,
- queue_name=f"output_{s.src_id}",
- data_filename = DATA_FILES[s.geo_id.det_id] if s.geo_id.det_id in DATA_FILES.keys() else DEFAULT_DATA_FILE,
- emu_frame_error_rate=0
- ) for s in RU_DESCRIPTOR.streams],
- use_now_as_first_data_time=EMULATED_DATA_TIMES_START_WITH_NOW,
- clock_speed_hz=CLOCK_SPEED_HZ,
- queue_timeout_ms = QUEUE_POP_WAIT_MS
- )
-
-
- modules = [DAQModule(name = "fake_source",
- plugin = "FakeCardReader",
- conf = conf)]
- queues = [
- Queue(
- f"fake_source.output_{s.src_id}",
- f"datahandler_{s.src_id}.raw_input",
- QUEUE_FRAGMENT_TYPE,
- f'{FRONTEND_TYPE}_link_{s.src_id}', 100000
- ) for s in RU_DESCRIPTOR.streams
- ]
-
- return modules, queues
-
-
-###
-# FELIX Card Reader creator
-###
-def create_felix_cardreader(
- FRONTEND_TYPE: str,
- QUEUE_FRAGMENT_TYPE: str,
- CARD_ID_OVERRIDE: int,
- NUMA_ID: int,
- RU_DESCRIPTOR # ReadoutUnitDescriptor
- ) -> tuple[list, list]:
- """
- Create a FELIX Card Reader (and reader->DHL Queues?)
-
- [CR]->queues
- """
- links_slr0 = []
- links_slr1 = []
- sids_slr0 = []
- sids_slr1 = []
- for stream in RU_DESCRIPTOR.streams:
- if stream.parameters.slr == 0:
- links_slr0.append(stream.parameters.link)
- sids_slr0.append(stream.src_id)
- if stream.parameters.slr == 1:
- links_slr1.append(stream.parameters.link)
- sids_slr1.append(stream.src_id)
-
- links_slr0.sort()
- links_slr1.sort()
-
- card_id = RU_DESCRIPTOR.iface if CARD_ID_OVERRIDE == -1 else CARD_ID_OVERRIDE
-
- modules = []
- queues = []
- if len(links_slr0) > 0:
- modules += [DAQModule(name = 'flxcard_0',
- plugin = 'FelixCardReader',
- conf = flxcr.Conf(card_id = card_id,
- logical_unit = 0,
- dma_id = 0,
- chunk_trailer_size = 32,
- dma_block_size_kb = 4,
- dma_memory_size_gb = 4,
- numa_id = NUMA_ID,
- links_enabled = links_slr0
- )
- )]
-
- if len(links_slr1) > 0:
- modules += [DAQModule(name = "flxcard_1",
- plugin = "FelixCardReader",
- conf = flxcr.Conf(card_id = card_id,
- logical_unit = 1,
- dma_id = 0,
- chunk_trailer_size = 32,
- dma_block_size_kb = 4,
- dma_memory_size_gb = 4,
- numa_id = NUMA_ID,
- links_enabled = links_slr1
- )
- )]
-
- # Queues for card reader 1
- queues += [
- Queue(
- f'flxcard_0.output_{idx}',
- f"datahandler_{idx}.raw_input",
- QUEUE_FRAGMENT_TYPE,
- f'{FRONTEND_TYPE}_link_{idx}',
- 100000
- ) for idx in sids_slr0
- ]
- # Queues for card reader 2
- queues += [
- Queue(
- f'flxcard_1.output_{idx}',
- f"datahandler_{idx}.raw_input",
- QUEUE_FRAGMENT_TYPE,
- f'{FRONTEND_TYPE}_link_{idx}',
- 100000
- ) for idx in sids_slr1
- ]
-
- return modules, queues
-
-
-
###
# DPDK Card Reader creator
###
@@ -275,15 +112,20 @@ def streams_by_host(self):
return iface_map
- def streams_by_iface(self):
+ def streams_by_rxiface(self):
+ """Group streams by interface
+
+ Returns:
+ dict: A map of streams with the same destination ip, mac and host
+ """
iface_map = group_by_key(self.desc.streams, lambda s: (s.parameters.rx_ip, s.parameters.rx_mac, s.parameters.rx_host))
return iface_map
- def streams_by_iface_and_tx_endpoint(self):
+ def streams_by_rxiface_and_tx_endpoint(self):
- s_by_if = self.streams_by_iface()
+ s_by_if = self.streams_by_rxiface()
m = {}
for k,v in s_by_if.items():
m[k] = group_by_key(v, lambda s: (s.parameters.tx_ip, s.parameters.tx_mac, s.parameters.tx_host))
@@ -294,16 +136,22 @@ def streams_by_iface_and_tx_endpoint(self):
# m = group_by_key(self.desc.streams, lambda s: (getattr(s.parameters, self.desc._host_label_map[s.kind]), getattr(s.parameters, self.desc._iflabel_map[s.kind]), s.kind, s.geo_id.det_id))
# return m
- def build_conf(self, eal_arg_list):
+ def build_conf(self, eal_arg_list, lcores_id_set):
+
- streams_by_if_and_tx = self.streams_by_iface_and_tx_endpoint()
+ streams_by_if_and_tx = self.streams_by_rxiface_and_tx_endpoint()
ifcfgs = []
for (rx_ip, rx_mac, _),txs in streams_by_if_and_tx.items():
srcs = []
# Sid is used for the "Source.id". What is it?
- for sid,((tx_ip,_,_),streams) in enumerate(txs.items()):
+ # Transmitters are sorted by tx ip address.
+ # This is not good for understanding what is what, so we sort them by minimum
+ # src_id
+ txs_sorted_by_src = sorted(txs.items(), key=lambda x: min(x[1], key=lambda y: y.src_id))
+
+ for sid,((tx_ip,_,_),streams) in enumerate(txs_sorted_by_src):
ssm = nrc.SrcStreamsMapping([
nrc.StreamMap(source_id=s.src_id, stream_id=s.geo_id.stream_id)
for s in streams
@@ -319,7 +167,7 @@ def build_conf(self, eal_arg_list):
nrc.Source(
id=sid, # FIXME what is this ID?
ip_addr=tx_ip,
- lcore=sid+self.lcore_offset,
+ lcore=lcores_id_set[sid % len(lcores_id_set)],
rx_q=sid,
src_info=si,
src_streams_mapping=ssm
@@ -329,7 +177,8 @@ def build_conf(self, eal_arg_list):
nrc.Interface(
ip_addr=rx_ip,
mac_addr=rx_mac,
- expected_sources=srcs
+ expected_sources=srcs,
+ stats_reporting_cfg=nrc.StatsReporting()
)
)
@@ -340,581 +189,778 @@ def build_conf(self, eal_arg_list):
)
return conf
+
- def build_conf_by_host(self, eal_arg_list):
+# Time to wait on pop()
+QUEUE_POP_WAIT_MS = 10 # This affects stop time, as each link will wait this long before stop
- streams_by_if_and_tx = self.streams_by_host()
- ifcfgs = []
- for (rx_ip, rx_mac, _),txs in streams_by_if_and_tx.items():
- srcs = []
- # Sid is used for the "Source.id". What is it?
+class ReadoutAppGenerator:
+ """Utility class to generate readout applications"""
- for sid,((tx_ip,_,_),streams) in enumerate(txs.items()):
- ssm = nrc.SrcStreamsMapping([
- nrc.StreamMap(source_id=s.src_id, stream_id=s.geo_id.stream_id)
- for s in streams
- ])
- geo_id = streams[0].geo_id
- si = nrc.SrcGeoInfo(
- det_id=geo_id.det_id,
- crate_id=geo_id.crate_id,
- slot_id=geo_id.slot_id
- )
+ def __init__(self, readout_cfg, det_cfg, daq_cfg):
- srcs.append(
- nrc.Source(
- id=sid, # FIXME what is this ID?
- ip_addr=tx_ip,
- lcore=sid+self.lcore_offset,
- rx_q=sid,
- src_info=si,
- src_streams_mapping=ssm
- )
- )
- ifcfgs.append(
- nrc.Interface(
- ip_addr=rx_ip,
- mac_addr=rx_mac,
- expected_sources=srcs
- )
- )
+ self.ro_cfg = readout_cfg
+ self.det_cfg = det_cfg
+ self.daq_cfg = daq_cfg
+ numa_excpt = {}
+ for ex in self.ro_cfg.numa_config['exceptions']:
+ numa_excpt[(ex['host'], ex['card'])] = ex
+ self.numa_excpt = numa_excpt
- conf = nrc.Conf(
- ifaces = ifcfgs,
- eal_arg_list=eal_arg_list
- )
+ lcores_excpt = {}
+ for ex in self.ro_cfg.dpdk_lcores_config['exceptions']:
+ lcores_excpt[(ex['host'], ex['iface'])] = ex
+ self.lcores_excpt = lcores_excpt
- return conf
-def create_dpdk_cardreader(
- FRONTEND_TYPE: str,
- QUEUE_FRAGMENT_TYPE: str,
- EAL_ARGS: str,
- RU_DESCRIPTOR # ReadoutUnitDescriptor
- ) -> tuple[list, list]:
- """
- Create a DPDK Card Reader (and reader->DHL Queues?)
+ def get_numa_cfg(self, RU_DESCRIPTOR):
- [CR]->queues
- """
+ cfg = self.ro_cfg
+ try:
+ ex = self.numa_excpt[(RU_DESCRIPTOR.host_name, RU_DESCRIPTOR.iface)]
+ numa_id = ex['numa_id']
+ latency_numa = ex['latency_buffer_numa_aware']
+ latency_preallocate = ex['latency_buffer_preallocation']
+ flx_card_override = ex['felix_card_id']
+ except KeyError:
+ numa_id = cfg.numa_config['default_id']
+ latency_numa = cfg.numa_config['default_latency_numa_aware']
+ latency_preallocate = cfg.numa_config['default_latency_preallocation']
+ flx_card_override = -1
+ return (numa_id, latency_numa, latency_preallocate, flx_card_override)
- eth_ru_bldr = NICReceiverBuilder(RU_DESCRIPTOR)
+ def get_lcore_config(self, RU_DESCRIPTOR):
+ cfg = self.ro_cfg
+ try:
+ ex = self.lcores_excpt[(RU_DESCRIPTOR.host_name, RU_DESCRIPTOR.iface)]
+ lcore_id_set = ex['lcore_id_set']
+ except KeyError:
+ lcore_id_set = cfg.dpdk_lcores_config['default_lcore_id_set']
- nic_reader_name = f"nic_reader_{RU_DESCRIPTOR.iface}"
+
+ return list(dict.fromkeys(lcore_id_set))
- modules = [DAQModule(
- name=nic_reader_name,
- plugin="NICReceiver",
- conf=eth_ru_bldr.build_conf(eal_arg_list=EAL_ARGS),
- )]
- # Queues
- queues = [
- Queue(
- f"{nic_reader_name}.output_{stream.src_id}",
- f"datahandler_{stream.src_id}.raw_input", QUEUE_FRAGMENT_TYPE,
- f'{FRONTEND_TYPE}_stream_{stream.src_id}', 100000
- )
- for stream in RU_DESCRIPTOR.streams
- ]
-
- return modules, queues
+ ###
+ # Fake Card Reader creator
+ ###
+ def create_fake_cardreader(
+ self,
+ # FRONTEND_TYPE: str,
+ # QUEUE_FRAGMENT_TYPE: str,
+ DATA_FILES: dict,
+ RU_DESCRIPTOR # ReadoutUnitDescriptor
-def create_pacman_cardreader(
- FRONTEND_TYPE: str,
- QUEUE_FRAGMENT_TYPE: str,
- RU_DESCRIPTOR # ReadoutUnitDescriptor
) -> tuple[list, list]:
- """
- Create a Pacman Cardeader
- """
+ """
+ Create a FAKE Card reader module
+ """
+ cfg = self.ro_cfg
+
+ conf = sec.Conf(
+ link_confs = [
+ sec.LinkConfiguration(
+ source_id=s.src_id,
+ crate_id = s.geo_id.crate_id,
+ slot_id = s.geo_id.slot_id,
+ link_id = s.geo_id.stream_id,
+ slowdown=self.daq_cfg.data_rate_slowdown_factor,
+ queue_name=f"output_{s.src_id}",
+ data_filename = DATA_FILES[s.geo_id.det_id] if s.geo_id.det_id in DATA_FILES.keys() else cfg.default_data_file,
+ emu_frame_error_rate=0
+ ) for s in RU_DESCRIPTOR.streams],
+ use_now_as_first_data_time=cfg.emulated_data_times_start_with_now,
+ clock_speed_hz=self.det_cfg.clock_speed_hz,
+ queue_timeout_ms = QUEUE_POP_WAIT_MS
+ )
- reader_name = "nd_reader"
- if FRONTEND_TYPE == 'pacman':
- reader_name = "pacman_source"
- elif FRONTEND_TYPE == 'mpd':
- reader_name = "mpd_source"
+ modules = [DAQModule(name = "fake_source",
+ plugin = "FDFakeCardReader",
+ conf = conf)]
+ # queues = [
+ # Queue(
+ # f"fake_source.output_{s.src_id}",
+ # f"datahandler_{s.src_id}.raw_input",
+ # QUEUE_FRAGMENT_TYPE,
+ # f'{FRONTEND_TYPE}_link_{s.src_id}', 100000
+ # ) for s in RU_DESCRIPTOR.streams
+ # ]
+
+ queues = []
+ for s in RU_DESCRIPTOR.streams:
+ FRONTEND_TYPE, QUEUE_FRAGMENT_TYPE, _, _, _ = compute_data_types(s)
+ queues.append(
+ Queue(
+ f"fake_source.output_{s.src_id}",
+ f"datahandler_{s.src_id}.raw_input",
+ QUEUE_FRAGMENT_TYPE,
+ f'{FRONTEND_TYPE}_link_{s.src_id}', 100000
+ )
+ )
- else:
- raise RuntimeError(f"Pacman Cardreader for {FRONTEND_TYPE} not supported")
-
- modules = [DAQModule(
- name=reader_name,
- plugin="PacmanCardReader",
- conf=pcr.Conf(link_confs = [pcr.LinkConfiguration(Source_ID=stream.src_id)
- for stream in RU_DESCRIPTOR.streams],
- zmq_receiver_timeout = 10000)
- )]
-
- # Queues
- queues = [
- Queue(
- f"{reader_name}.output_{stream.src_id}",
- f"datahandler_{stream.src_id}.raw_input", QUEUE_FRAGMENT_TYPE,
- f'{FRONTEND_TYPE}_stream_{stream.src_id}', 100000
- )
- for stream in RU_DESCRIPTOR.streams
- ]
-
- return modules, queues
+ return modules, queues
+
+
+ ###
+ # FELIX Card Reader creator
+ ###
+ def create_felix_cardreader(
+ self,
+ # FRONTEND_TYPE: str,
+ # QUEUE_FRAGMENT_TYPE: str,
+ CARD_ID_OVERRIDE: int,
+ NUMA_ID: int,
+ RU_DESCRIPTOR # ReadoutUnitDescriptor
+ ) -> tuple[list, list]:
+ """
+ Create a FELIX Card Reader (and reader->DHL Queues?)
+
+ [CR]->queues
+ """
+ links_slr0 = []
+ links_slr1 = []
+ strms_slr0 = []
+ strms_slr1 = []
+ for stream in RU_DESCRIPTOR.streams:
+ if stream.parameters.slr == 0:
+ links_slr0.append(stream.parameters.link)
+ strms_slr0.append(stream)
+ if stream.parameters.slr == 1:
+ links_slr1.append(stream.parameters.link)
+ strms_slr1.append(stream)
+
+ links_slr0.sort()
+ links_slr1.sort()
+
+ card_id = RU_DESCRIPTOR.iface if CARD_ID_OVERRIDE == -1 else CARD_ID_OVERRIDE
+
+ modules = []
+ queues = []
+ if len(links_slr0) > 0:
+ modules += [DAQModule(name = 'flxcard_0',
+ plugin = 'FelixCardReader',
+ conf = flxcr.Conf(card_id = card_id,
+ logical_unit = 0,
+ dma_id = 0,
+ chunk_trailer_size = 32,
+ dma_block_size_kb = 4,
+ dma_memory_size_gb = 4,
+ numa_id = NUMA_ID,
+ links_enabled = links_slr0
+ )
+ )]
+
+ if len(links_slr1) > 0:
+ modules += [DAQModule(name = "flxcard_1",
+ plugin = "FelixCardReader",
+ conf = flxcr.Conf(card_id = card_id,
+ logical_unit = 1,
+ dma_id = 0,
+ chunk_trailer_size = 32,
+ dma_block_size_kb = 4,
+ dma_memory_size_gb = 4,
+ numa_id = NUMA_ID,
+ links_enabled = links_slr1
+ )
+ )]
+
+ # # Queues for card reader 1
+ # queues += [
+ # Queue(
+ # f'flxcard_0.output_{idx}',
+ # f"datahandler_{idx}.raw_input",
+ # QUEUE_FRAGMENT_TYPE,
+ # f'{FRONTEND_TYPE}_link_{idx}',
+ # 100000
+ # ) for idx in strms_slr0
+ # ]
+ # # Queues for card reader 2
+ # queues += [
+ # Queue(
+ # f'flxcard_1.output_{idx}',
+ # f"datahandler_{idx}.raw_input",
+ # QUEUE_FRAGMENT_TYPE,
+ # f'{FRONTEND_TYPE}_link_{idx}',
+ # 100000
+ # ) for idx in strms_slr1
+ # ]
+ # Queues for card reader 1
+ for s in strms_slr0:
+ FRONTEND_TYPE, QUEUE_FRAGMENT_TYPE, _, _, _ = compute_data_types(s)
+ queues.append(
+ Queue(
+ f'flxcard_0.output_{s.src_id}',
+ f"datahandler_{s.src_id}.raw_input",
+ QUEUE_FRAGMENT_TYPE,
+ f'{FRONTEND_TYPE}_link_{s.src_id}',
+ 100000
+ )
+ )
+ # Queues for card reader 2
+ for s in strms_slr1:
+ FRONTEND_TYPE, QUEUE_FRAGMENT_TYPE, _, _, _ = compute_data_types(s)
+ queues.append(
+ Queue(
+ f'flxcard_1.output_{s.src_id}',
+ f"datahandler_{s.src_id}.raw_input",
+ QUEUE_FRAGMENT_TYPE,
+ f'{FRONTEND_TYPE}_link_{s.src_id}',
+ 100000
+ )
+ )
-###
-# Create detector datalink handlers
-###
-def create_det_dhl(
- LATENCY_BUFFER_SIZE: int,
- LATENCY_BUFFER_NUMA_AWARE: int,
- LATENCY_BUFFER_ALLOCATION_MODE: int,
- NUMA_ID: int,
- SEND_PARTIAL_FRAGMENTS: bool,
- RAW_RECORDING_OUTPUT_DIR: str,
- DATA_REQUEST_TIMEOUT: int,
- FRAGMENT_SEND_TIMEOUT: int,
- RAW_RECORDING_ENABLED: bool,
- RU_DESCRIPTOR, # ReadoutUnitDescriptor
- EMULATOR_MODE : bool
-
- ) -> tuple[list, list]:
+ return modules, queues
- # defaults hardcoded values
- default_latency_buffer_alignment_size = 4096
- default_pop_limit_pct = 0.8
- default_pop_size_pct = 0.1
- default_stream_buffer_size = 8388608
+ def create_dpdk_cardreader(
+ self,
+ # FRONTEND_TYPE: str,
+ # QUEUE_FRAGMENT_TYPE: str,
+ RU_DESCRIPTOR # ReadoutUnitDescriptor
+ ) -> tuple[list, list]:
+ """
+ Create a DPDK Card Reader (and reader->DHL Queues?)
- modules = []
- for stream in RU_DESCRIPTOR.streams:
- geo_id = stream.geo_id
- modules += [DAQModule(
- name = f"datahandler_{stream.src_id}",
- plugin = "DataLinkHandler",
- conf = rconf.Conf(
- readoutmodelconf= rconf.ReadoutModelConf(
- source_queue_timeout_ms= QUEUE_POP_WAIT_MS,
- # fake_trigger_flag=0, # default
- source_id = stream.src_id,
- send_partial_fragment_if_available = SEND_PARTIAL_FRAGMENTS
- ),
- latencybufferconf= rconf.LatencyBufferConf(
- latency_buffer_alignment_size = default_latency_buffer_alignment_size,
- latency_buffer_size = LATENCY_BUFFER_SIZE,
- source_id = stream.src_id,
- latency_buffer_numa_aware = LATENCY_BUFFER_NUMA_AWARE,
- latency_buffer_numa_node = NUMA_ID,
- latency_buffer_preallocation = LATENCY_BUFFER_ALLOCATION_MODE,
- latency_buffer_intrinsic_allocator = LATENCY_BUFFER_ALLOCATION_MODE,
- ),
- rawdataprocessorconf= rconf.RawDataProcessorConf(
- emulator_mode = EMULATOR_MODE,
- crate_id = geo_id.crate_id,
- slot_id = geo_id.slot_id,
- link_id = geo_id.stream_id
- ),
- requesthandlerconf= rconf.RequestHandlerConf(
- latency_buffer_size = LATENCY_BUFFER_SIZE,
- pop_limit_pct = default_pop_limit_pct,
- pop_size_pct = default_pop_size_pct,
- source_id = stream.src_id,
- det_id = RU_DESCRIPTOR.det_id,
- output_file = path.join(RAW_RECORDING_OUTPUT_DIR, f"output_{RU_DESCRIPTOR.label}_{stream.src_id}.out"),
- stream_buffer_size = default_stream_buffer_size,
- request_timeout_ms = DATA_REQUEST_TIMEOUT,
- fragment_send_timeout_ms = FRAGMENT_SEND_TIMEOUT,
- enable_raw_recording = RAW_RECORDING_ENABLED,
- ))
- )]
- queues = []
- return modules, queues
+ [CR]->queues
+ """
+ cfg = self.ro_cfg
-###
-# Enable processing in DHLs
-###
-def add_tp_processing(
- dlh_list: list,
- THRESHOLD_TPG: int,
- ALGORITHM_TPG: int,
- CHANNEL_MASK_TPG: list,
- TPG_CHANNEL_MAP: str,
- EMULATOR_MODE,
- CLOCK_SPEED_HZ: int,
- DATA_RATE_SLOWDOWN_FACTOR: int,
- ) -> list:
+ eth_ru_bldr = NICReceiverBuilder(RU_DESCRIPTOR)
- modules = []
+ nic_reader_name = f"nic_reader_{RU_DESCRIPTOR.iface}"
- # defaults hardcoded values
- default_error_counter_threshold=100
- default_error_reset_freq=10000
-
-
- # Loop over datalink handlers to re-define the data processor configuration
- for dlh in dlh_list:
-
- # Recover the raw data link source id
- # MOOOOOO
- dro_sid = dlh.conf.readoutmodelconf["source_id"]
- geo_cid = dlh.conf.rawdataprocessorconf["crate_id"]
- geo_sid = dlh.conf.rawdataprocessorconf["slot_id"]
- geo_lid = dlh.conf.rawdataprocessorconf["link_id"]
- # Re-create the module with an extended configuration
- modules += [DAQModule(
- name = dlh.name,
- plugin = dlh.plugin,
- conf = rconf.Conf(
- readoutmodelconf = dlh.conf.readoutmodelconf,
- latencybufferconf = dlh.conf.latencybufferconf,
- requesthandlerconf = dlh.conf.requesthandlerconf,
- rawdataprocessorconf= rconf.RawDataProcessorConf(
- source_id = dro_sid,
- crate_id = geo_cid,
- slot_id = geo_sid,
- link_id = geo_lid,
- enable_tpg = True,
- tpg_threshold = THRESHOLD_TPG,
- tpg_algorithm = ALGORITHM_TPG,
- tpg_channel_mask = CHANNEL_MASK_TPG,
- channel_map_name = TPG_CHANNEL_MAP,
- emulator_mode = EMULATOR_MODE,
- clock_speed_hz = (CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR),
- error_counter_threshold=default_error_counter_threshold,
- error_reset_freq=default_error_reset_freq
- ),
- )
- )]
+ lcores_id_set = self.get_lcore_config(RU_DESCRIPTOR)
+
+ modules = [DAQModule(
+ name=nic_reader_name,
+ plugin="NICReceiver",
+ conf=eth_ru_bldr.build_conf(
+ eal_arg_list=cfg.dpdk_eal_args,
+ lcores_id_set=lcores_id_set
+ ),
+ )]
+
+ # Queues
+ # queues = [
+ # Queue(
+ # f"{nic_reader_name}.output_{stream.src_id}",
+ # f"datahandler_{stream.src_id}.raw_input", QUEUE_FRAGMENT_TYPE,
+ # f'{FRONTEND_TYPE}_stream_{stream.src_id}', 100000
+ # )
+ # for stream in RU_DESCRIPTOR.streams
+ # ]
- return modules
+ queues = []
+ for stream in RU_DESCRIPTOR.streams:
+ FRONTEND_TYPE, QUEUE_FRAGMENT_TYPE, _, _, _ = compute_data_types(stream)
+ queues.append(
+ Queue(
+ f"{nic_reader_name}.output_{stream.src_id}",
+ f"datahandler_{stream.src_id}.raw_input",
+ QUEUE_FRAGMENT_TYPE,
+ f'{FRONTEND_TYPE}_stream_{stream.src_id}', 100000
+ )
+ )
-###
-# Create TP data link handlers
-###
-def create_tp_dlhs(
- dlh_list: list,
- DATA_REQUEST_TIMEOUT: int, # To Check
- FRAGMENT_SEND_TIMEOUT: int, # To Check
- tpset_sid: int,
- )-> tuple[list, list]:
+ return modules, queues
- default_pop_limit_pct = 0.8
- default_pop_size_pct = 0.1
- default_stream_buffer_size = 8388608
- default_latency_buffer_size = 4000000
- default_detid = 1
+# def create_pacman_cardreader(
+# self,
+# FRONTEND_TYPE: str,
+# QUEUE_FRAGMENT_TYPE: str,
+# RU_DESCRIPTOR # ReadoutUnitDescriptor
+# ) -> tuple[list, list]:
+# """
+# Create a Pacman Cardeader
+# """
+#
+# reader_name = "nd_reader"
+# if FRONTEND_TYPE == 'pacman':
+# reader_name = "pacman_source"
+#
+# elif FRONTEND_TYPE == 'mpd':
+# reader_name = "mpd_source"
+#
+# else:
+# raise RuntimeError(f"Pacman Cardreader for {FRONTEND_TYPE} not supported")
+#
+# modules = [DAQModule(
+# name=reader_name,
+# plugin="PacmanCardReader",
+# conf=pcr.Conf(link_confs = [pcr.LinkConfiguration(Source_ID=stream.src_id)
+# for stream in RU_DESCRIPTOR.streams],
+# zmq_receiver_timeout = 10000)
+# )]
+#
+# # Queues
+# queues = [
+# Queue(
+# f"{reader_name}.output_{stream.src_id}",
+# f"datahandler_{stream.src_id}.raw_input", QUEUE_FRAGMENT_TYPE,
+# f'{FRONTEND_TYPE}_stream_{stream.src_id}', 100000
+# )
+# for stream in RU_DESCRIPTOR.streams
+# ]
+#
+ return modules, queues
+
+
+
+
+
+ ###
+ # Create detector datalink handlers
+ ###
+ def create_det_dhl(
+ self,
+ # LATENCY_BUFFER_SIZE: int,
+ LATENCY_BUFFER_NUMA_AWARE: int,
+ LATENCY_BUFFER_ALLOCATION_MODE: int,
+ NUMA_ID: int,
+ SEND_PARTIAL_FRAGMENTS: bool,
+ # RAW_RECORDING_OUTPUT_DIR: str,
+ DATA_REQUEST_TIMEOUT: int,
+ # FRAGMENT_SEND_TIMEOUT: int,
+ # RAW_RECORDING_ENABLED: bool,
+ RU_DESCRIPTOR, # ReadoutUnitDescriptor
+ # EMULATOR_MODE : bool
- # Create the TP link handler
- modules = [
- DAQModule(name = f"tp_datahandler_{tpset_sid}",
- plugin = "DataLinkHandler",
- conf = rconf.Conf(
- readoutmodelconf = rconf.ReadoutModelConf(
- source_queue_timeout_ms = QUEUE_POP_WAIT_MS,
- source_id = tpset_sid
+ ) -> tuple[list, list]:
+
+ cfg = self.ro_cfg
+
+ # defaults hardcoded values
+ default_latency_buffer_alignment_size = 4096
+ default_pop_limit_pct = 0.8
+ default_pop_size_pct = 0.1
+ default_stream_buffer_size = 8388608
+
+
+ modules = []
+ for stream in RU_DESCRIPTOR.streams:
+ geo_id = stream.geo_id
+ modules += [DAQModule(
+ name = f"datahandler_{stream.src_id}",
+ plugin = "FDDataLinkHandler",
+ conf = rconf.Conf(
+ readoutmodelconf= rconf.ReadoutModelConf(
+ source_queue_timeout_ms= QUEUE_POP_WAIT_MS,
+ # fake_trigger_flag=0, # default
+ source_id = stream.src_id,
+ send_partial_fragment_if_available = SEND_PARTIAL_FRAGMENTS
),
- latencybufferconf = rconf.LatencyBufferConf(
- latency_buffer_size = default_latency_buffer_size,
- source_id = tpset_sid
+ latencybufferconf= rconf.LatencyBufferConf(
+ latency_buffer_alignment_size = default_latency_buffer_alignment_size,
+ latency_buffer_size = cfg.latency_buffer_size,
+ source_id = stream.src_id,
+ latency_buffer_numa_aware = LATENCY_BUFFER_NUMA_AWARE,
+ latency_buffer_numa_node = NUMA_ID,
+ latency_buffer_preallocation = LATENCY_BUFFER_ALLOCATION_MODE,
+ latency_buffer_intrinsic_allocator = LATENCY_BUFFER_ALLOCATION_MODE,
+ ),
+ rawdataprocessorconf= rconf.RawDataProcessorConf(
+ emulator_mode = cfg.emulator_mode,
+ crate_id = geo_id.crate_id,
+ slot_id = geo_id.slot_id,
+ link_id = geo_id.stream_id
),
- rawdataprocessorconf = rconf.RawDataProcessorConf(enable_tpg = False),
requesthandlerconf= rconf.RequestHandlerConf(
- latency_buffer_size = default_latency_buffer_size,
+ latency_buffer_size = cfg.latency_buffer_size,
pop_limit_pct = default_pop_limit_pct,
pop_size_pct = default_pop_size_pct,
- source_id = tpset_sid,
- det_id = default_detid,
+ source_id = stream.src_id,
+ det_id = RU_DESCRIPTOR.det_id,
+ output_file = path.join(cfg.raw_recording_output_dir, f"output_{RU_DESCRIPTOR.label}_{stream.src_id}.out"),
stream_buffer_size = default_stream_buffer_size,
request_timeout_ms = DATA_REQUEST_TIMEOUT,
- fragment_send_timeout_ms = FRAGMENT_SEND_TIMEOUT,
- enable_raw_recording = False
- )
- )
- )
- ]
-
- queues = []
- for dlh in dlh_list:
- # extract source ids
- dro_sid = dlh.conf.readoutmodelconf["source_id"]
-
- # Attach to the detector DLH's tp_out connector
- queues += [
- Queue(
- f"{dlh.name}.tp_out",
- f"tp_datahandler_{tpset_sid}.raw_input",
- "TriggerPrimitive",
- f"tp_link_{tpset_sid}",1000000
- )
- ]
-
- return modules, queues
+ fragment_send_timeout_ms = cfg.fragment_send_timeout_ms,
+ enable_raw_recording = cfg.enable_raw_recording,
+ ))
+ )]
+ queues = []
+ return modules, queues
-###
-# Add detector endpoints and fragment producers
-###
-def add_dro_eps_and_fps(
- mgraph: ModuleGraph,
- dlh_list: list,
- RUIDX: str,
-
-) -> None:
- """Adds detector readout endpoints and fragment producers"""
- for dlh in dlh_list:
- # print(dlh)
-
- # extract source ids
- dro_sid = dlh.conf.readoutmodelconf['source_id']
- # tp_sid = dlh.conf.rawdataprocessorconf.tpset_sourceid
-
- mgraph.add_fragment_producer(
- id = dro_sid,
- subsystem = "Detector_Readout",
- requests_in = f"datahandler_{dro_sid}.request_input",
- fragments_out = f"datahandler_{dro_sid}.fragment_queue"
- )
- mgraph.add_endpoint(
- f"timesync_ru{RUIDX}_{dro_sid}",
- f"datahandler_{dro_sid}.timesync_output",
- "TimeSync", Direction.OUT,
- is_pubsub=True,
- toposort=False
- )
- # if processing is enabled, add a pubsub endooint for TPSets
- #if dlh.conf.rawdataprocessorconf['enable_tpg']:
- # mgraph.add_endpoint(
- # f"tpsets_ru{RUIDX}_link{dro_sid}",
- # f"datahandler_{dro_sid}.tpset_out",
- # "TPSet",
- # Direction.OUT,
- # is_pubsub=True
- # )
+ ###
+ # Enable processing in DHLs
+ ###
+ def add_tp_processing(
+ self,
+ dlh_list: list,
+ TPG_CHANNEL_MAP: str,
+ ) -> list:
+ cfg = self.ro_cfg
-###
-# Add tpg endpoints and fragment producers
-###
-def add_tpg_eps_and_fps(
- mgraph: ModuleGraph,
- tpg_dlh_list: list,
- RUIDX: str,
-
-) -> None:
- """Adds detector readout endpoints and fragment producers"""
+ modules = []
- for dlh in tpg_dlh_list:
+ # defaults hardcoded values
+ default_error_counter_threshold=100
+ default_error_reset_freq=10000
- # extract source ids
- tpset_sid = dlh.conf.readoutmodelconf['source_id']
- # Add enpointis with this source id for timesync and TPSets
- mgraph.add_endpoint(
- f"timesync_tp_dlh_ru{RUIDX}_{tpset_sid}",
- f"tp_datahandler_{tpset_sid}.timesync_output",
- "TimeSync",
- Direction.OUT,
- is_pubsub=True
- )
+ # Loop over datalink handlers to re-define the data processor configuration
+ for dlh in dlh_list:
- mgraph.add_endpoint(
- f"tpsets_tplink{tpset_sid}",
- f"tp_datahandler_{tpset_sid}.tpset_out",
- "TPSet",
- Direction.OUT,
- is_pubsub=True
- )
+ # Recover the raw data link source id
+ # MOOOOOO
+ dro_sid = dlh.conf.readoutmodelconf["source_id"]
+ geo_cid = dlh.conf.rawdataprocessorconf["crate_id"]
+ geo_sid = dlh.conf.rawdataprocessorconf["slot_id"]
+ geo_lid = dlh.conf.rawdataprocessorconf["link_id"]
+ # Re-create the module with an extended configuration
+ modules += [DAQModule(
+ name = dlh.name,
+ plugin = dlh.plugin,
+ conf = rconf.Conf(
+ readoutmodelconf = dlh.conf.readoutmodelconf,
+ latencybufferconf = dlh.conf.latencybufferconf,
+ requesthandlerconf = dlh.conf.requesthandlerconf,
+ rawdataprocessorconf= rconf.RawDataProcessorConf(
+ source_id = dro_sid,
+ crate_id = geo_cid,
+ slot_id = geo_sid,
+ link_id = geo_lid,
+ enable_tpg = True,
+ tpg_threshold = cfg.tpg_threshold,
+ tpg_algorithm = cfg.tpg_algorithm,
+ tpg_channel_mask = cfg.tpg_channel_mask,
+ channel_map_name = TPG_CHANNEL_MAP,
+ emulator_mode = cfg.emulator_mode,
+ clock_speed_hz = (self.det_cfg.clock_speed_hz / self.daq_cfg.data_rate_slowdown_factor),
+ error_counter_threshold=default_error_counter_threshold,
+ error_reset_freq=default_error_reset_freq
+ ),
+ )
+ )]
+
+ return modules
- # Add Fragment producer with this source id
- mgraph.add_fragment_producer(
- id = tpset_sid, subsystem = "Trigger",
- requests_in = f"tp_datahandler_{tpset_sid}.request_input",
- fragments_out = f"tp_datahandler_{tpset_sid}.fragment_queue"
- )
+ ###
+ # Create TP data link handlers
+ ###
+ def create_tp_dlhs(
+ self,
+ dlh_list: list,
+ DATA_REQUEST_TIMEOUT: int, # To Check
+ FRAGMENT_SEND_TIMEOUT: int, # To Check
+ tpset_sid: int,
+ )-> tuple[list, list]:
+ default_pop_limit_pct = 0.8
+ default_pop_size_pct = 0.1
+ default_stream_buffer_size = 8388608
+ default_latency_buffer_size = 4000000
+ default_detid = 1
-# Time to wait on pop()
-QUEUE_POP_WAIT_MS = 10 # This affects stop time, as each link will wait this long before stop
+
+ # Create the TP link handler
+ modules = [
+ DAQModule(name = f"tp_datahandler_{tpset_sid}",
+ plugin = "FDDataLinkHandler",
+ conf = rconf.Conf(
+ readoutmodelconf = rconf.ReadoutModelConf(
+ source_queue_timeout_ms = QUEUE_POP_WAIT_MS,
+ source_id = tpset_sid
+ ),
+ latencybufferconf = rconf.LatencyBufferConf(
+ latency_buffer_size = default_latency_buffer_size,
+ source_id = tpset_sid
+ ),
+ rawdataprocessorconf = rconf.RawDataProcessorConf(enable_tpg = False),
+ requesthandlerconf= rconf.RequestHandlerConf(
+ latency_buffer_size = default_latency_buffer_size,
+ pop_limit_pct = default_pop_limit_pct,
+ pop_size_pct = default_pop_size_pct,
+ source_id = tpset_sid,
+ det_id = default_detid,
+ stream_buffer_size = default_stream_buffer_size,
+ request_timeout_ms = DATA_REQUEST_TIMEOUT,
+ fragment_send_timeout_ms = FRAGMENT_SEND_TIMEOUT,
+ enable_raw_recording = False
+ )
+ )
+ )
+ ]
+
+ queues = []
+ for dlh in dlh_list:
+ # Attach to the detector DLH's tp_out connector
+ queues += [
+ Queue(
+ f"{dlh.name}.tp_out",
+ f"tp_datahandler_{tpset_sid}.raw_input",
+ "TriggerPrimitive",
+ f"tp_link_{tpset_sid}",1000000
+ )
+ ]
+
+ return modules, queues
+
+ ###
+ # Add detector endpoints and fragment producers
+ ###
+ def add_dro_eps_and_fps(
+ self,
+ mgraph: ModuleGraph,
+ dlh_list: list,
+ RUIDX: str,
+ ) -> None:
+ """Adds detector readout endpoints and fragment producers"""
+ for dlh in dlh_list:
+ # print(dlh)
+
+ # extract source ids
+ dro_sid = dlh.conf.readoutmodelconf['source_id']
+ # tp_sid = dlh.conf.rawdataprocessorconf.tpset_sourceid
+
+ mgraph.add_fragment_producer(
+ id = dro_sid,
+ subsystem = "Detector_Readout",
+ requests_in = f"datahandler_{dro_sid}.request_input",
+ fragments_out = f"datahandler_{dro_sid}.fragment_queue"
+ )
+ mgraph.add_endpoint(
+ f"timesync_ru{RUIDX}_{dro_sid}",
+ f"datahandler_{dro_sid}.timesync_output",
+ "TimeSync", Direction.OUT,
+ is_pubsub=True,
+ toposort=False
+ )
-###
-# Create Readout Application
-###
-def create_readout_app(
- RU_DESCRIPTOR,
- SOURCEID_BROKER : SourceIDBroker = None,
- EMULATOR_MODE=False,
- DATA_RATE_SLOWDOWN_FACTOR=1,
- DEFAULT_DATA_FILE="./frames.bin",
- DATA_FILES={},
- USE_FAKE_CARDS=True,
- CLOCK_SPEED_HZ=62500000,
- RAW_RECORDING_ENABLED=False,
- RAW_RECORDING_OUTPUT_DIR=".",
- CHANNEL_MASK_TPG: list = [],
- THRESHOLD_TPG=120,
- ALGORITHM_TPG="SWTPG",
- TPG_ENABLED=False,
- TPG_CHANNEL_MAP= "ProtoDUNESP1ChannelMap",
- DATA_REQUEST_TIMEOUT=1000,
- FRAGMENT_SEND_TIMEOUT=10,
- READOUT_SENDS_TP_FRAGMENTS=False,
- EAL_ARGS='-l 0-1 -n 3 -- -m [0:1].0 -j',
- NUMA_ID=0,
- LATENCY_BUFFER_SIZE=499968,
- LATENCY_BUFFER_NUMA_AWARE = False,
- LATENCY_BUFFER_ALLOCATION_MODE = False,
-
- CARD_ID_OVERRIDE = -1,
- EMULATED_DATA_TIMES_START_WITH_NOW = False,
- DEBUG=False
-) -> App:
-
- FRONTEND_TYPE, QUEUE_FRAGMENT_TYPE, _, _, _ = compute_data_types(RU_DESCRIPTOR.det_id, CLOCK_SPEED_HZ, RU_DESCRIPTOR.kind)
-
- # TPG is automatically disabled for non wib2 frontends
- TPG_ENABLED = TPG_ENABLED and (FRONTEND_TYPE=='wib2' or FRONTEND_TYPE=='wibeth')
-
- modules = []
- queues = []
+ ###
+ # Add tpg endpoints and fragment producers
+ ###
+ def add_tpg_eps_and_fps(
+ self,
+ mgraph: ModuleGraph,
+ tpg_dlh_list: list,
+ RUIDX: str,
+ ) -> None:
+ """Adds detector readout endpoints and fragment producers"""
- # Create the card readers
- cr_mods = []
- cr_queues = []
+ for dlh in tpg_dlh_list:
+ # extract source ids
+ tpset_sid = dlh.conf.readoutmodelconf['source_id']
- # Create the card readers
- if USE_FAKE_CARDS:
- fakecr_mods, fakecr_queues = create_fake_cardreader(
- FRONTEND_TYPE=FRONTEND_TYPE,
- QUEUE_FRAGMENT_TYPE=QUEUE_FRAGMENT_TYPE,
- DATA_RATE_SLOWDOWN_FACTOR=DATA_RATE_SLOWDOWN_FACTOR,
- DATA_FILES=DATA_FILES,
- DEFAULT_DATA_FILE=DEFAULT_DATA_FILE,
- CLOCK_SPEED_HZ=CLOCK_SPEED_HZ,
- EMULATED_DATA_TIMES_START_WITH_NOW=EMULATED_DATA_TIMES_START_WITH_NOW,
- RU_DESCRIPTOR=RU_DESCRIPTOR
- )
- cr_mods += fakecr_mods
- cr_queues += fakecr_queues
- else:
- if RU_DESCRIPTOR.kind == 'flx':
- flx_mods, flx_queues = create_felix_cardreader(
- FRONTEND_TYPE=FRONTEND_TYPE,
- QUEUE_FRAGMENT_TYPE=QUEUE_FRAGMENT_TYPE,
- CARD_ID_OVERRIDE=CARD_ID_OVERRIDE,
- NUMA_ID=NUMA_ID,
- RU_DESCRIPTOR=RU_DESCRIPTOR
+ # Add enpointis with this source id for timesync and TPSets
+ mgraph.add_endpoint(
+ f"timesync_tp_dlh_ru{RUIDX}_{tpset_sid}",
+ f"tp_datahandler_{tpset_sid}.timesync_output",
+ "TimeSync",
+ Direction.OUT,
+ is_pubsub=True
)
- cr_mods += flx_mods
- cr_queues += flx_queues
-
- elif RU_DESCRIPTOR.kind == 'eth' and RU_DESCRIPTOR.streams[0].parameters.protocol == "udp":
- dpdk_mods, dpdk_queues = create_dpdk_cardreader(
- FRONTEND_TYPE=FRONTEND_TYPE,
- QUEUE_FRAGMENT_TYPE=QUEUE_FRAGMENT_TYPE,
- EAL_ARGS=EAL_ARGS,
- RU_DESCRIPTOR=RU_DESCRIPTOR
+
+ mgraph.add_endpoint(
+ f"tpsets_tplink{tpset_sid}",
+ f"tp_datahandler_{tpset_sid}.tpset_out",
+ "TPSet",
+ Direction.OUT,
+ is_pubsub=True
+ )
+
+ # Add Fragment producer with this source id
+ mgraph.add_fragment_producer(
+ id = tpset_sid, subsystem = "Trigger",
+ requests_in = f"tp_datahandler_{tpset_sid}.request_input",
+ fragments_out = f"tp_datahandler_{tpset_sid}.fragment_queue"
)
- cr_mods += dpdk_mods
- cr_queues += dpdk_queues
+
+
+ def generate(
+ self,
+ RU_DESCRIPTOR,
+ SOURCEID_BROKER,
+ data_file_map,
+ data_timeout_requests,
+ ):
+ """Generate the readout applicaton
+
+ Args:
+ RU_DESCRIPTOR (ReadoutUnitDescriptor): A readout unit descriptor object
+ SOURCEID_BROKER (SourceIDBroker): The source ID brocker
+ data_file_map (dict): Map of pattern files to application
+ data_timeout_requests (int): Data timeout request
+
+ Raises:
+ RuntimeError: _description_
+
+ Returns:
+ _type_: _description_
+ """
+
+ numa_id, latency_numa, latency_preallocate, card_override = self.get_numa_cfg(RU_DESCRIPTOR)
+ cfg = self.ro_cfg
+ TPG_ENABLED = cfg.enable_tpg
+ DATA_FILES = data_file_map
+ DATA_REQUEST_TIMEOUT=data_timeout_requests
+
+ # FRONTEND_TYPE, QUEUE_FRAGMENT_TYPE, _, _, _ = compute_data_types(RU_DESCRIPTOR.det_id, self.det_cfg.clock_speed_hz, RU_DESCRIPTOR.kind)
+
+ # TPG is automatically disabled for non wib2 frontends
+ # TPG_ENABLED = TPG_ENABLED and (FRONTEND_TYPE=='wib2' or FRONTEND_TYPE=='wibeth')
+ TPG_ENABLED = TPG_ENABLED and (RU_DESCRIPTOR.det_id == DetID.Subdetector.kHD_TPC.value)
+
+ modules = []
+ queues = []
+
+
+ # Create the card readers
+ cr_mods = []
+ cr_queues = []
- elif RU_DESCRIPTOR.kind == 'eth' and RU_DESCRIPTOR.streams[0].parameters.protocol == "zmq":
- pac_mods, pac_queues = create_pacman_cardreader(
- FRONTEND_TYPE=FRONTEND_TYPE,
- QUEUE_FRAGMENT_TYPE=QUEUE_FRAGMENT_TYPE,
+ # Create the card readers
+ if cfg.use_fake_cards:
+ fakecr_mods, fakecr_queues = self.create_fake_cardreader(
+ # FRONTEND_TYPE=FRONTEND_TYPE,
+ # QUEUE_FRAGMENT_TYPE=QUEUE_FRAGMENT_TYPE,
+ DATA_FILES=DATA_FILES,
RU_DESCRIPTOR=RU_DESCRIPTOR
)
- cr_mods += pac_mods
- cr_queues += pac_queues
-
- modules += cr_mods
- queues += cr_queues
-
- # Create the data-link handlers
- dlhs_mods, _ = create_det_dhl(
- LATENCY_BUFFER_SIZE=LATENCY_BUFFER_SIZE,
- LATENCY_BUFFER_NUMA_AWARE=LATENCY_BUFFER_NUMA_AWARE,
- LATENCY_BUFFER_ALLOCATION_MODE=LATENCY_BUFFER_ALLOCATION_MODE,
- NUMA_ID=NUMA_ID,
- SEND_PARTIAL_FRAGMENTS=False,
- RAW_RECORDING_OUTPUT_DIR=RAW_RECORDING_OUTPUT_DIR,
- DATA_REQUEST_TIMEOUT=DATA_REQUEST_TIMEOUT,
- FRAGMENT_SEND_TIMEOUT=FRAGMENT_SEND_TIMEOUT,
- RAW_RECORDING_ENABLED=RAW_RECORDING_ENABLED,
- RU_DESCRIPTOR=RU_DESCRIPTOR,
- EMULATOR_MODE=EMULATOR_MODE
-
- )
-
- # Configure the TP processing if requrested
- if TPG_ENABLED:
- dlhs_mods = add_tp_processing(
- dlh_list=dlhs_mods,
- THRESHOLD_TPG=THRESHOLD_TPG,
- ALGORITHM_TPG=ALGORITHM_TPG,
- CHANNEL_MASK_TPG=CHANNEL_MASK_TPG,
- TPG_CHANNEL_MAP=TPG_CHANNEL_MAP,
- EMULATOR_MODE=EMULATOR_MODE,
- CLOCK_SPEED_HZ=CLOCK_SPEED_HZ,
- DATA_RATE_SLOWDOWN_FACTOR=DATA_RATE_SLOWDOWN_FACTOR
+ cr_mods += fakecr_mods
+ cr_queues += fakecr_queues
+ else:
+ if RU_DESCRIPTOR.kind == 'flx':
+ flx_mods, flx_queues = self.create_felix_cardreader(
+ # FRONTEND_TYPE=FRONTEND_TYPE,
+ # QUEUE_FRAGMENT_TYPE=QUEUE_FRAGMENT_TYPE,
+ CARD_ID_OVERRIDE=card_override,
+ NUMA_ID=numa_id,
+ RU_DESCRIPTOR=RU_DESCRIPTOR
+ )
+ cr_mods += flx_mods
+ cr_queues += flx_queues
+
+ elif RU_DESCRIPTOR.kind == 'eth' and RU_DESCRIPTOR.streams[0].parameters.protocol == "udp":
+ dpdk_mods, dpdk_queues = self.create_dpdk_cardreader(
+ # FRONTEND_TYPE=FRONTEND_TYPE,
+ # QUEUE_FRAGMENT_TYPE=QUEUE_FRAGMENT_TYPE,
+ RU_DESCRIPTOR=RU_DESCRIPTOR
+ )
+ cr_mods += dpdk_mods
+ cr_queues += dpdk_queues
+
+# elif RU_DESCRIPTOR.kind == 'eth' and RU_DESCRIPTOR.streams[0].parameters.protocol == "zmq":
+#
+# pac_mods, pac_queues = self.create_pacman_cardreader(
+# FRONTEND_TYPE=FRONTEND_TYPE,
+# QUEUE_FRAGMENT_TYPE=QUEUE_FRAGMENT_TYPE,
+# RU_DESCRIPTOR=RU_DESCRIPTOR
+# )
+# cr_mods += pac_mods
+# cr_queues += pac_queues
+
+ modules += cr_mods
+ queues += cr_queues
+
+ # Create the data-link handlers
+ dlhs_mods, _ = self.create_det_dhl(
+ # LATENCY_BUFFER_SIZE=cfg.latency_buffer_size,
+ LATENCY_BUFFER_NUMA_AWARE=latency_numa,
+ LATENCY_BUFFER_ALLOCATION_MODE=latency_preallocate,
+ NUMA_ID=numa_id,
+ SEND_PARTIAL_FRAGMENTS=False,
+ DATA_REQUEST_TIMEOUT=DATA_REQUEST_TIMEOUT,
+ RU_DESCRIPTOR=RU_DESCRIPTOR,
+
)
- modules += dlhs_mods
+ # Configure the TP processing if requrested
+ if TPG_ENABLED:
+ dlhs_mods = self.add_tp_processing(
+ dlh_list=dlhs_mods,
+ TPG_CHANNEL_MAP=self.det_cfg.tpc_channel_map,
+ )
- # Add the TP datalink handlers
- #if TPG_ENABLED and READOUT_SENDS_TP_FRAGMENTS:
- if TPG_ENABLED:
- tps = { k:v for k,v in SOURCEID_BROKER.get_all_source_ids("Trigger").items() if isinstance(v, ReadoutUnitDescriptor ) and v==RU_DESCRIPTOR}
- if len(tps) != 1:
- raise RuntimeError(f"Could not retrieve unique element from source id map {tps}")
+ modules += dlhs_mods
- tpg_mods, tpg_queues = create_tp_dlhs(
- dlh_list=dlhs_mods,
- DATA_REQUEST_TIMEOUT=DATA_REQUEST_TIMEOUT,
- FRAGMENT_SEND_TIMEOUT=FRAGMENT_SEND_TIMEOUT,
- tpset_sid = next(iter(tps))
- )
- modules += tpg_mods
- queues += tpg_queues
+ # Add the TP datalink handlers
+ if TPG_ENABLED:
+ tps = { k:v for k,v in SOURCEID_BROKER.get_all_source_ids("Trigger").items() if isinstance(v, ReadoutUnitDescriptor ) and v==RU_DESCRIPTOR}
+ if len(tps) != 1:
+ raise RuntimeError(f"Could not retrieve unique element from source id map {tps}")
- # Create the Module graphs
- mgraph = ModuleGraph(modules, queues=queues)
+ tpg_mods, tpg_queues = self.create_tp_dlhs(
+ dlh_list=dlhs_mods,
+ DATA_REQUEST_TIMEOUT=DATA_REQUEST_TIMEOUT,
+ FRAGMENT_SEND_TIMEOUT=cfg.fragment_send_timeout_ms,
+ tpset_sid = next(iter(tps))
+ )
+ modules += tpg_mods
+ queues += tpg_queues
- # Add endpoints and frame producers to DRO data handlers
- add_dro_eps_and_fps(
- mgraph=mgraph,
- dlh_list=dlhs_mods,
- RUIDX=RU_DESCRIPTOR.label
- )
+ # Create the Module graphs
+ mgraph = ModuleGraph(modules, queues=queues)
- if TPG_ENABLED:
- # Add endpoints and frame producers to TP data handlers
- add_tpg_eps_and_fps(
+ # Add endpoints and frame producers to DRO data handlers
+ self.add_dro_eps_and_fps(
mgraph=mgraph,
- # dlh_list=dlhs_mods,
- tpg_dlh_list=tpg_mods,
+ dlh_list=dlhs_mods,
RUIDX=RU_DESCRIPTOR.label
)
- # Create the application
- readout_app = App(mgraph, host=RU_DESCRIPTOR.host_name)
+ if TPG_ENABLED:
+ # Add endpoints and frame producers to TP data handlers
+ self.add_tpg_eps_and_fps(
+ mgraph=mgraph,
+ tpg_dlh_list=tpg_mods,
+ RUIDX=RU_DESCRIPTOR.label
+ )
- # All done
- return readout_app
+ # Create the application
+ readout_app = App(mgraph, host=RU_DESCRIPTOR.host_name)
+
+
+ # Kubernetes-specific extensions
+ if RU_DESCRIPTOR.kind == 'flx':
+ c = card_override if card_override != -1 else RU_DESCRIPTOR.iface
+ readout_app.resources = {
+ f"felix.cern/flx{c}-data": "1", # requesting FLX{c}
+ # "memory": f"{}Gi" # yes bro
+ }
+
+ dir_names = set()
+
+ cvmfs = Path('/cvmfs')
+ ddf_path = Path(cfg.default_data_file)
+ if not cvmfs in ddf_path.parents:
+ dir_names.add(ddf_path.parent)
+
+ for file in data_file_map.values():
+ f = Path(file)
+ if not cvmfs in f.parents:
+ dir_names.add(f.parent)
+
+ for dir_idx, dir_name in enumerate(dir_names):
+ readout_app.mounted_dirs += [{
+ 'name': f'data-file-{dir_idx}',
+ 'physical_location': dir_name,
+ 'in_pod_location': dir_name,
+ 'read_only': True,
+ }]
+
+ # All done
+ return readout_app
+
###
# Create Fake dataproducers Application
###
-def create_fake_reaout_app(
+def create_fake_readout_app(
RU_DESCRIPTOR,
CLOCK_SPEED_HZ
) -> App:
@@ -923,19 +969,21 @@ def create_fake_reaout_app(
modules = []
queues = []
- _, _, fakedata_fragment_type, fakedata_time_tick, fakedata_frame_size = compute_data_types(RU_DESCRIPTOR.det_id, CLOCK_SPEED_HZ, RU_DESCRIPTOR.kind)
+ # _, _, fakedata_fragment_type, fakedata_time_tick, fakedata_frame_size = compute_data_types(RU_DESCRIPTOR.det_id, CLOCK_SPEED_HZ, RU_DESCRIPTOR.kind)
for stream in RU_DESCRIPTOR.streams:
- modules += [DAQModule(name = f"fakedataprod_{stream.src_id}",
- plugin='FakeDataProd',
- conf = fdp.ConfParams(
- system_type = "Detector_Readout",
- source_id = stream.src_id,
- time_tick_diff = fakedata_time_tick,
- frame_size = fakedata_frame_size,
- response_delay = 0,
- fragment_type = fakedata_fragment_type,
- ))]
+ _, _, fakedata_fragment_type, fakedata_time_tick, fakedata_frame_size = compute_data_types(stream)
+
+ modules += [DAQModule(name = f"fakedataprod_{stream.src_id}",
+ plugin='FakeDataProd',
+ conf = fdp.ConfParams(
+ system_type = "Detector_Readout",
+ source_id = stream.src_id,
+ time_tick_diff = fakedata_time_tick,
+ frame_size = fakedata_frame_size,
+ response_delay = 0,
+ fragment_type = fakedata_fragment_type,
+ ))]
mgraph = ModuleGraph(modules, queues=queues)
diff --git a/python/daqconf/apps/tprtc_gen.py b/python/daqconf/apps/tprtc_gen.py
index 5e7d06b0..a6639562 100644
--- a/python/daqconf/apps/tprtc_gen.py
+++ b/python/daqconf/apps/tprtc_gen.py
@@ -12,8 +12,6 @@
from distutils.command.check import check
import math
-from rich.console import Console
-console = Console()
# Set moo schema search path
from dunedaq.env import get_moo_model_path
@@ -30,14 +28,20 @@
from daqconf.core.conf_utils import Direction
#===============================================================================
-def get_tprtc_app(MASTER_DEVICE_NAME="",
- TIMING_PARTITION_ID=0,
- TRIGGER_MASK=0xff,
- RATE_CONTROL_ENABLED=True,
- SPILL_GATE_ENABLED=False,
- TIMING_SESSION="",
- HOST="localhost",
- DEBUG=False):
+def get_tprtc_app(
+ timing,
+ DEBUG=False
+ ):
+
+
+ MASTER_DEVICE_NAME=timing.timing_partition_master_device_name
+ TIMING_PARTITION_ID=timing.timing_partition_id
+ TRIGGER_MASK=timing.timing_partition_trigger_mask
+ RATE_CONTROL_ENABLED=timing.timing_partition_rate_control_enabled
+ SPILL_GATE_ENABLED=timing.timing_partition_spill_gate_enabled
+ TIMING_SESSION=timing.timing_session_name
+ HOST=timing.host_tprtc
+
modules = {}
diff --git a/python/daqconf/apps/tpwriter_gen.py b/python/daqconf/apps/tpwriter_gen.py
index 6543269c..32a5ef5d 100644
--- a/python/daqconf/apps/tpwriter_gen.py
+++ b/python/daqconf/apps/tpwriter_gen.py
@@ -26,19 +26,27 @@
QUEUE_POP_WAIT_MS = 100
def get_tpwriter_app(
- OUTPUT_PATH=".",
- APP_NAME="tpwriter",
- OPERATIONAL_ENVIRONMENT="swtest",
- MAX_FILE_SIZE=4*1024*1024*1024,
- DATA_RATE_SLOWDOWN_FACTOR=1,
- CLOCK_SPEED_HZ=62500000,
- SRC_GEO_ID_MAP='',
- SOURCE_IDX=998,
- HOST="localhost",
- DEBUG=False):
-
+ detector,
+ dataflow,
+ daq_common,
+ app_name,
+ file_label,
+ source_id,
+ SRC_GEO_ID_MAP,
+ DEBUG=False
+ ):
"""Generate the json configuration for the readout and DF process"""
+ # Temp vars
+ OUTPUT_PATH = dataflow.tpset_output_path
+ APP_NAME = app_name
+ OPERATIONAL_ENVIRONMENT = detector.op_env
+ MAX_FILE_SIZE = dataflow.tpset_output_file_size
+ DATA_RATE_SLOWDOWN_FACTOR = daq_common.data_rate_slowdown_factor
+ CLOCK_SPEED_HZ = detector.clock_speed_hz
+ SOURCE_IDX=source_id
+ HOST=dataflow.host_tpw
+
ONE_SECOND_INTERVAL_TICKS = CLOCK_SPEED_HZ / DATA_RATE_SLOWDOWN_FACTOR
modules = []
@@ -76,4 +84,11 @@ def get_tpwriter_app(
tpw_app = App(modulegraph=mgraph, host=HOST)
+ tpw_app.mounted_dirs += [{
+ 'name': 'raw-data',
+ 'physical_location':OUTPUT_PATH,
+ 'in_pod_location':OUTPUT_PATH,
+ 'read_only': False
+ }]
+
return tpw_app
diff --git a/python/daqconf/apps/trigger_gen.py b/python/daqconf/apps/trigger_gen.py
index 481dffd2..6b2fe0c6 100644
--- a/python/daqconf/apps/trigger_gen.py
+++ b/python/daqconf/apps/trigger_gen.py
@@ -34,6 +34,8 @@
from daqconf.core.conf_utils import Direction, Queue
from daqconf.core.sourceid import TAInfo, TPInfo, TCInfo
+from trgdataformats import TriggerBits as trgbs
+
#FIXME maybe one day, triggeralgs will define schemas... for now allow a dictionary of 4byte int, 4byte floats, and strings
moo.otypes.make_type(schema='number', dtype='i4', name='temp_integer', path='temptypes')
moo.otypes.make_type(schema='number', dtype='f4', name='temp_float', path='temptypes')
@@ -70,45 +72,66 @@ def get_buffer_conf(source_id, data_request_timeout):
request_timeout_ms = data_request_timeout,
warn_on_timeout = False,
enable_raw_recording = False))
+
+#===============================================================================
+### Function that converts trigger word strings to trigger word integers given TC type. Uses functions from trgdataformats.
+def get_trigger_bitwords(bitwords):
+ # create bitwords flags
+ final_bit_flags = []
+ for bitword in bitwords:
+ tmp_bits = []
+ for bit_name in bitword:
+ bit_value = trgbs.string_to_fragment_type_value(bit_name)
+ if bit_value == 0:
+ raise RuntimeError(f'One (or more) of provided MLT trigger bitwords is unknown! Please recheck the names...')
+ else:
+ tmp_bits.append(bit_value)
+ final_bit_flags.append(tmp_bits)
+
+ return final_bit_flags
#===============================================================================
-def get_trigger_app(CLOCK_SPEED_HZ: int = 62_500_000,
- DATA_RATE_SLOWDOWN_FACTOR: float = 1,
- TP_CONFIG: dict = {},
- TOLERATE_INCOMPLETENESS=False,
- COMPLETENESS_TOLERANCE=1,
-
- ACTIVITY_PLUGIN: str = 'TriggerActivityMakerPrescalePlugin',
- ACTIVITY_CONFIG: dict = dict(prescale=10000),
-
- CANDIDATE_PLUGIN: str = 'TriggerCandidateMakerPrescalePlugin',
- CANDIDATE_CONFIG: int = dict(prescale=10),
-
- USE_HSI_INPUT = True,
- TTCM_S1: int = 1,
- TTCM_S2: int = 2,
- TRIGGER_WINDOW_BEFORE_TICKS: int = 1000,
- TRIGGER_WINDOW_AFTER_TICKS: int = 1000,
- HSI_TRIGGER_TYPE_PASSTHROUGH: bool = False,
-
- USE_CUSTOM_MAKER: bool = False,
- CTCM_TYPES: list = [4],
- CTCM_INTERVAL: list = [62500000],
-
- MLT_MERGE_OVERLAPPING_TCS: bool = False,
- MLT_BUFFER_TIMEOUT: int = 100,
- MLT_SEND_TIMED_OUT_TDS: bool = False,
- MLT_MAX_TD_LENGTH_MS: int = 1000,
- MLT_IGNORE_TC: list = [],
- MLT_USE_READOUT_MAP: bool = False,
- MLT_READOUT_MAP: dict = {},
-
- USE_CHANNEL_FILTER: bool = True,
-
- CHANNEL_MAP_NAME = "ProtoDUNESP1ChannelMap",
- DATA_REQUEST_TIMEOUT = 1000,
- HOST="localhost",
- DEBUG=False):
+def get_trigger_app(
+ trigger,
+ detector,
+ daq_common,
+ tp_infos,
+ trigger_data_request_timeout,
+ USE_HSI_INPUT = True,
+ USE_CHANNEL_FILTER: bool = True,
+ DEBUG=False
+ ):
+
+ # Temp variables, To cleanup
+ DATA_RATE_SLOWDOWN_FACTOR = daq_common.data_rate_slowdown_factor
+ CLOCK_SPEED_HZ = detector.clock_speed_hz
+ TP_CONFIG = tp_infos
+ TOLERATE_INCOMPLETENESS=trigger.tolerate_incompleteness
+ COMPLETENESS_TOLERANCE=trigger.completeness_tolerance
+ ACTIVITY_PLUGIN = trigger.trigger_activity_plugin
+ ACTIVITY_CONFIG = trigger.trigger_activity_config
+ CANDIDATE_PLUGIN = trigger.trigger_candidate_plugin
+ CANDIDATE_CONFIG = trigger.trigger_candidate_config
+ TTCM_S1=trigger.ttcm_s1
+ TTCM_S2=trigger.ttcm_s2
+ TRIGGER_WINDOW_BEFORE_TICKS = trigger.trigger_window_before_ticks
+ TRIGGER_WINDOW_AFTER_TICKS = trigger.trigger_window_after_ticks
+ HSI_TRIGGER_TYPE_PASSTHROUGH = trigger.hsi_trigger_type_passthrough
+ MLT_MERGE_OVERLAPPING_TCS = trigger.mlt_merge_overlapping_tcs
+ MLT_BUFFER_TIMEOUT = trigger.mlt_buffer_timeout
+ MLT_MAX_TD_LENGTH_MS = trigger.mlt_max_td_length_ms
+ MLT_SEND_TIMED_OUT_TDS = trigger.mlt_send_timed_out_tds
+ MLT_IGNORE_TC = trigger.mlt_ignore_tc
+ MLT_USE_READOUT_MAP = trigger.mlt_use_readout_map
+ MLT_READOUT_MAP = trigger.mlt_td_readout_map
+ MLT_USE_BITWORDS = trigger.mlt_use_bitwords
+ MLT_TRIGGER_BITWORDS = trigger.mlt_trigger_bitwords
+ USE_CUSTOM_MAKER = trigger.use_custom_maker
+ CTCM_TYPES = trigger.ctcm_trigger_types
+ CTCM_INTERVAL = trigger.ctcm_trigger_intervals
+ CHANNEL_MAP_NAME = detector.tpc_channel_map
+ DATA_REQUEST_TIMEOUT=trigger_data_request_timeout
+ HOST=trigger.host_trigger
# Generate schema for the maker plugins on the fly in the temptypes module
make_moo_record(ACTIVITY_CONFIG , 'ActivityConf' , 'temptypes')
@@ -288,6 +311,9 @@ def get_trigger_app(CLOCK_SPEED_HZ: int = 62_500_000,
trigger_intervals=CTCM_INTERVAL,
clock_frequency_hz=CLOCK_SPEED_HZ,
timestamp_method="kSystemClock"))]
+
+ ### get trigger bitwords for mlt
+ MLT_TRIGGER_FLAGS = get_trigger_bitwords(MLT_TRIGGER_BITWORDS)
# We need to populate the list of links based on the fragment
# producers available in the system. This is a bit of a
@@ -306,7 +332,9 @@ def get_trigger_app(CLOCK_SPEED_HZ: int = 62_500_000,
ignore_tc=MLT_IGNORE_TC,
td_readout_limit=max_td_length_ticks,
use_readout_map=MLT_USE_READOUT_MAP,
- td_readout_map=MLT_READOUT_MAP))]
+ td_readout_map=MLT_READOUT_MAP,
+ use_bitwords=MLT_USE_BITWORDS,
+ trigger_bitwords=MLT_TRIGGER_FLAGS))]
mgraph = ModuleGraph(modules)
diff --git a/python/daqconf/core/assets.py b/python/daqconf/core/assets.py
index fc52c2a4..cfe02f28 100755
--- a/python/daqconf/core/assets.py
+++ b/python/daqconf/core/assets.py
@@ -1,7 +1,7 @@
from os.path import exists,abspath,dirname,expandvars
-from rich.console import Console
-console = Console()
+
+from .console import console
from daq_assettools.asset_file import AssetFile
from daq_assettools.asset_database import Database
diff --git a/python/daqconf/core/conf_utils.py b/python/daqconf/core/conf_utils.py
index 58e48e82..0bfb60e1 100644
--- a/python/daqconf/core/conf_utils.py
+++ b/python/daqconf/core/conf_utils.py
@@ -5,11 +5,9 @@
import urllib
from pathlib import Path
-from rich.console import Console
from copy import deepcopy
from collections import namedtuple, defaultdict
import json
-import os
from enum import Enum
from typing import Callable
from graphviz import Digraph
@@ -29,7 +27,7 @@
from daqconf.core.daqmodule import DAQModule
-console = Console()
+from .console import console
########################################################################
#
@@ -321,8 +319,8 @@ def make_system_connections(the_system, verbose=False, use_k8s=False, use_connec
port = the_system.next_unassigned_port() if not use_connectivity_service or use_k8s else '*'
address = f'tcp://{{{endpoint["app"]}}}:{port}' if not use_k8s else f'tcp://{endpoint["app"]}:{port}'
conn_id =conn.ConnectionId( uid=endpoint['endpoint'].external_name, data_type=endpoint['endpoint'].data_type)
- pubsub_connectionids[endpoint['endpoint'].external_name] = conn.Connection(id=conn_id,
- connection_type="kPubSub",
+ pubsub_connectionids[endpoint['endpoint'].external_name] = conn.Connection(id=conn_id,
+ connection_type="kPubSub",
uri=address
)
topic_connectionuids += [endpoint['endpoint'].external_name]
@@ -409,7 +407,7 @@ def make_app_command_data(system, app, appkey, verbose=False, use_k8s=False, use
# Fill in the "standard" command entries in the command_data structure
command_data['init'] = appfwk.Init(modules=mod_specs,
connections=system.connections[appkey],
- queues=system.queues[appkey],
+ queues=system.queues[appkey],
use_connectivity_service=use_connectivity_service,
connectivity_service_interval_ms=connectivity_service_interval)
@@ -504,7 +502,7 @@ def update_with_ssh_boot_data (
-def update_with_k8s_boot_data(
+def add_k8s_app_boot_data(
boot_data: dict,
apps: list,
image: str,
@@ -533,10 +531,10 @@ def update_with_k8s_boot_data(
boot_data.update({"apps": apps_desc})
boot_data.update({"order": boot_order})
- if 'rte_script' in boot_data:
- boot_data['exec']['daq_application_k8s']['cmd'] = ['daq_application']
- else:
- boot_data['exec']['daq_application_k8s']['cmd'] = ['/dunedaq/run/app-entrypoint.sh']
+ # if 'rte_script' in boot_data:
+ # boot_data['exec']['daq_application_k8s']['cmd'] = ['daq_application']
+ # else:
+ # boot_data['exec']['daq_application_k8s']['cmd'] = ['/dunedaq/run/app-entrypoint.sh']
boot_data["exec"]["daq_application_k8s"]["image"] = image
@@ -547,59 +545,76 @@ def resolve_localhost(host):
return host
def generate_boot(
- conf,
+ boot_conf,
system,
verbose=False,
control_to_data_network = None) -> dict:
"""
Generate the dictionary that will become the boot.json file
"""
+
+ info_svc_uri_map = {
+ 'cern': "kafka://monkafka.cern.ch:30092/opmon",
+ 'pocket': f"kafka://{boot_conf.pocket_url}:30092/opmon",
+ 'local': "file://info_{APP_NAME}_{APP_PORT}.json"
+ }
+
ers_settings=dict()
- if conf.ers_impl == 'cern':
+ if boot_conf.ers_impl == 'cern':
use_kafka = True
ers_settings["INFO"] = "erstrace,throttle,lstdout,erskafka(monkafka.cern.ch:30092)"
ers_settings["WARNING"] = "erstrace,throttle,lstdout,erskafka(monkafka.cern.ch:30092)"
ers_settings["ERROR"] = "erstrace,throttle,lstdout,erskafka(monkafka.cern.ch:30092)"
ers_settings["FATAL"] = "erstrace,lstdout,erskafka(monkafka.cern.ch:30092)"
- elif conf.ers_impl == 'pocket':
+ elif boot_conf.ers_impl == 'pocket':
use_kafka = True
- ers_settings["INFO"] = "erstrace,throttle,lstdout,erskafka(" + conf.pocket_url + ":30092)"
- ers_settings["WARNING"] = "erstrace,throttle,lstdout,erskafka(" + conf.pocket_url + ":30092)"
- ers_settings["ERROR"] = "erstrace,throttle,lstdout,erskafka(" + conf.pocket_url + ":30092)"
- ers_settings["FATAL"] = "erstrace,lstdout,erskafka(" + conf.pocket_url + ":30092)"
- else:
+ ers_settings["INFO"] = "erstrace,throttle,lstdout,erskafka(" + boot_conf.pocket_url + ":30092)"
+ ers_settings["WARNING"] = "erstrace,throttle,lstdout,erskafka(" + boot_conf.pocket_url + ":30092)"
+ ers_settings["ERROR"] = "erstrace,throttle,lstdout,erskafka(" + boot_conf.pocket_url + ":30092)"
+ ers_settings["FATAL"] = "erstrace,lstdout,erskafka(" + boot_conf.pocket_url + ":30092)"
+ elif boot_conf.ers_impl == 'local':
use_kafka = False
ers_settings["INFO"] = "erstrace,throttle,lstdout"
ers_settings["WARNING"] = "erstrace,throttle,lstdout"
ers_settings["ERROR"] = "erstrace,throttle,lstdout"
ers_settings["FATAL"] = "erstrace,lstdout"
-
- if conf.opmon_impl == 'cern':
- info_svc_uri = "kafka://monkafka.cern.ch:30092/opmon"
- elif conf.opmon_impl == 'pocket':
- info_svc_uri = "kafka://" + conf.pocket_url + ":30092/opmon"
else:
- info_svc_uri = "file://info_{APP_NAME}_{APP_PORT}.json"
+ raise ValueError(f"Unknown boot_conf.ers_impl value {boot_conf.ers_impl}")
- daq_app_exec_name = "daq_application_ssh" if not conf.use_k8s else "daq_application_k8s"
+ info_svc_uri = info_svc_uri_map[boot_conf.opmon_impl]
- daq_app_specs = {
- daq_app_exec_name : {
- "comment": "Application profile using PATH variables (lower start time)",
- "env":{
- "CET_PLUGIN_PATH": "getenv",
- "DETCHANNELMAPS_SHARE": "getenv",
- "DUNEDAQ_SHARE_PATH": "getenv",
- "TIMING_SHARE": "getenv",
- "LD_LIBRARY_PATH": "getenv",
- "PATH": "getenv",
+ daq_app_exec_name = f"daq_application_{boot_conf.process_manager}"
+
+ capture_paths = [
+ 'PATH',
+ 'LD_LIBRARY_PATH',
+ 'CET_PLUGIN_PATH',
+ 'DUNEDAQ_SHARE_PATH'
+ ]
+
+ app_env = {
"TRACE_FILE": "getenv:/tmp/trace_buffer_{APP_HOST}_{DUNEDAQ_PARTITION}",
"CMD_FAC": "rest://localhost:{APP_PORT}",
- "CONNECTION_SERVER": resolve_localhost(conf.connectivity_service_host),
- "CONNECTION_PORT": f"{conf.connectivity_service_port}",
+ "CONNECTION_SERVER": resolve_localhost(boot_conf.connectivity_service_host),
+ "CONNECTION_PORT": f"{boot_conf.connectivity_service_port}",
"INFO_SVC": info_svc_uri,
- },
+ }
+
+ app_env.update({
+ p:'getenv' for p in capture_paths
+ })
+
+ app_env.update({
+ v:'getenv' for v in boot_conf.capture_env_vars
+ })
+
+
+
+ daq_app_specs = {
+ daq_app_exec_name : {
+ "comment": "Application profile using PATH variables (lower start time)",
+ "env": app_env,
"cmd":"daq_application",
"args": [
"--name",
@@ -633,54 +648,57 @@ def generate_boot(
if use_kafka:
boot["env"]["DUNEDAQ_ERS_STREAM_LIBS"] = "erskafka"
- if conf.disable_trace:
+ if boot_conf.disable_trace:
del boot["exec"][daq_app_exec_name]["env"]["TRACE_FILE"]
+ boot['rte_script'] = get_rte_script()
+ # match boot_conf.k8s_rte:
+ # case 'auto':
+ # if (release_or_dev() == 'rel'):
+ # boot['rte_script'] = get_rte_script()
- match conf.RTE_script_settings:
- case 0:
- if (release_or_dev() == 'rel'):
- boot['rte_script'] = get_rte_script()
+ # case 'release':
+ # boot['rte_script'] = get_rte_script()
- case 1:
- boot['rte_script'] = get_rte_script()
+ # case 'devarea':
+ # pass
- case 2:
- pass
-
- if not conf.use_k8s:
+ if boot_conf.process_manager == 'ssh':
for app in system.apps.values():
app.host = resolve_localhost(app.host)
update_with_ssh_boot_data(
boot_data = boot,
apps = system.apps,
- base_command_port = conf.base_command_port,
+ base_command_port = boot_conf.base_command_port,
verbose = verbose,
control_to_data_network = control_to_data_network,
)
- else:
+ elif boot_conf.process_manager == 'k8s':
# ARGGGGG (MASSIVE WARNING SIGN HERE)
ruapps = [app for app in system.apps.keys() if app[:2] == 'ru']
dfapps = [app for app in system.apps.keys() if app[:2] == 'df']
otherapps = [app for app in system.apps.keys() if not app in ruapps + dfapps]
boot_order = ruapps + dfapps + otherapps
- update_with_k8s_boot_data(
+ add_k8s_app_boot_data(
boot_data = boot,
apps = system.apps,
boot_order = boot_order,
- image = conf.image,
- base_command_port = conf.base_command_port,
+ image = boot_conf.k8s_image,
+ base_command_port = boot_conf.base_command_port,
verbose = verbose,
control_to_data_network = control_to_data_network,
)
+ else:
+ raise ValueError(f"Unknown boot_conf.process_manager value {boot_conf.process_manager}")
- if conf.start_connectivity_service:
- if conf.use_k8s:
+
+ if boot_conf.start_connectivity_service:
+ if boot_conf.process_manager == 'k8s':
raise RuntimeError(
- 'Starting connectivity service only supported with ssh.\n')
+ 'Starting connectivity service only supported with ssh')
# CONNECTION_PORT will be updatd by nanorc remove this entry
daq_app_specs[daq_app_exec_name]["env"].pop("CONNECTION_PORT")
@@ -688,7 +706,7 @@ def generate_boot(
"connectionservice": {
"exec": "consvc_ssh",
"host": "connectionservice",
- "port": conf.connectivity_service_port,
+ "port": boot_conf.connectivity_service_port,
"update-env": {
"CONNECTION_PORT": "{APP_PORT}"
}
@@ -700,7 +718,7 @@ def generate_boot(
"--bind=0.0.0.0:{APP_PORT}",
"--workers=1",
"--worker-class=gthread",
- f"--threads={conf.connectivity_service_threads}",
+ f"--threads={boot_conf.connectivity_service_threads}",
"--timeout=0",
"--pid={APP_NAME}_{APP_PORT}.pid",
"connection-service.connection-flask:app"
@@ -717,9 +735,9 @@ def generate_boot(
boot["services"]={}
boot["services"].update(consvc)
boot["exec"].update(consvc_exec)
- conf.connectivity_service_host = resolve_localhost(conf.connectivity_service_host)
+ boot_conf.connectivity_service_host = resolve_localhost(boot_conf.connectivity_service_host)
boot["hosts-ctrl"].update({"connectionservice":
- conf.connectivity_service_host})
+ boot_conf.connectivity_service_host})
return boot
@@ -768,7 +786,7 @@ def make_app_json(app_name, app_command_data, data_dir, verbose=False):
with open(data_dir / f'{app_name}_{c}.json', 'w') as f:
json.dump(app_command_data[c].pod(), f, indent=4, sort_keys=True)
-def make_system_command_datas(daqconf:dict, the_system, forced_deps=[], verbose:bool=False, control_to_data_network:Callable[[str],str]=None) -> dict:
+def make_system_command_datas(boot_conf:dict, the_system, forced_deps=[], verbose:bool=False, control_to_data_network:Callable[[str],str]=None) -> dict:
"""Generate the dictionary of commands and their data for the entire system"""
# if the_system.app_start_order is None:
@@ -782,11 +800,6 @@ def make_system_command_datas(daqconf:dict, the_system, forced_deps=[], verbose:
cfg = {
"apps": {app_name: f'data/{app_name}_{c}' for app_name in the_system.apps.keys()}
}
- # if c == 'start':
- # cfg['order'] = the_system.app_start_order
- # elif c == 'stop':
- # cfg['order'] = the_system.app_start_order[::-1]
-
system_command_datas[c]=cfg
if verbose:
@@ -794,7 +807,7 @@ def make_system_command_datas(daqconf:dict, the_system, forced_deps=[], verbose:
console.log(f"Generating boot json file")
system_command_datas['boot'] = generate_boot(
- conf = daqconf,
+ boot_conf = boot_conf,
system = the_system,
verbose = verbose,
control_to_data_network=control_to_data_network
@@ -813,7 +826,7 @@ def write_json_files(app_command_datas, system_command_datas, json_dir, verbose=
console.rule("JSON file creation")
data_dir = json_dir / 'data'
- data_dir.mkdir(parents=True)
+ data_dir.mkdir(parents=True, exist_ok=True)
# Apps
for app_name, command_data in app_command_datas.items():
@@ -821,8 +834,10 @@ def write_json_files(app_command_datas, system_command_datas, json_dir, verbose=
# System commands
for cmd, cfg in system_command_datas.items():
- with open(json_dir / f'{cmd}.json', 'w') as f:
+ data_file = json_dir / f'{cmd}.json'
+ with open(data_file, 'w') as f:
json.dump(cfg, f, indent=4, sort_keys=True)
+ console.log(f"- {data_file} generated")
console.log(f"System configuration generated in directory '{json_dir}'")
@@ -854,14 +869,17 @@ def release_or_dev():
return 'rel'
def get_rte_script():
- from os import path
+ from os import path,getenv
+ script = ''
+ if release_or_dev() == 'rel':
+ ver = get_version()
+ releases_dir = get_releases_dir()
+ script = path.join(releases_dir, ver, 'daq_app_rte.sh')
- ver = get_version()
- releases_dir = get_releases_dir()
-
- script = path.join(releases_dir, ver, 'daq_app_rte.sh')
+ else:
+ dbt_install_dir = getenv('DBT_INSTALL_DIR')
+ script = path.join(dbt_install_dir, 'daq_app_rte.sh')
if not path.exists(script):
raise RuntimeError(f'Couldn\'t understand where to find the rte script tentative: {script}')
-
return script
diff --git a/python/daqconf/core/config_file.py b/python/daqconf/core/config_file.py
index 393ea399..12e8ffc8 100755
--- a/python/daqconf/core/config_file.py
+++ b/python/daqconf/core/config_file.py
@@ -2,14 +2,13 @@
import math
import sys
import glob
-import rich.traceback
-from rich.console import Console
+# from rich.console import Console
from collections import defaultdict
from os.path import exists, join
-import random
-import string
+import json
+from pathlib import Path
+from . console import console
-console = Console()
# Set moo schema search path
from dunedaq.env import get_moo_model_path
import moo.io
@@ -20,6 +19,48 @@
import moo.otypes
import moo.oschema
+
+class ConfigSet:
+
+
+ def get(self,conf):
+ if conf in self.confs:
+ return self.confs[conf]
+ else:
+ myconf = self.base_config
+ for pname, pval in self.full_input[conf]:
+ myconf[pname] = pval
+ return myconf
+
+ def create_all_configs(self):
+ for cname, pars in self.full_input.items():
+ if(cname==self.base_name): continue
+ self.confs[cname] = dict(self.base_config)
+ for pname, pval in pars.items():
+ self.confs[cname][pname] = pval
+
+ def get_all_configs(self):
+ return self.confs
+
+ def list_all_configs(self):
+ print(self.confs.keys())
+
+ def __init__(self,conf_file,base_name='common'):
+
+ self.base_name = base_name
+ with open(conf_file,"r+") as f:
+ self.full_input = json.load(f)
+
+ try:
+ self.base_config = self.full_input[self.base_name]
+ except KeyError as e:
+ print(f"No '{self.base_name}' config in {conf_file}.")
+ raise e
+
+ self.confs = {self.base_name: self.base_config}
+ self.create_all_configs()
+
+
def _strict_recursive_update(dico1, dico2):
for k, v in dico2.items():
if not k in dico1:
@@ -37,70 +78,70 @@ def _strict_recursive_update(dico1, dico2):
dico1[k] = v
return dico1
+
def parse_json(filename, schemed_object):
console.log(f"Parsing config json file {filename}")
- with open(filename, 'r') as f:
+ filepath = Path(filename)
+ # basepath = filepath.parent
+
+ # First pass, load the main json file
+ with open(filepath, 'r') as f:
try:
- import json
- try:
- new_parameters = json.load(f)
- # Validate the heck out of this but that doesn't change the object itself (ARG)
- _strict_recursive_update(schemed_object.pod(), new_parameters)
- # now its validated, update the object with moo
- schemed_object.update(new_parameters)
- except Exception as e:
- raise RuntimeError(f'Couldn\'t update the object {schemed_object} with the file {filename},\nError: {e}')
+ new_parameters = json.load(f)
except Exception as e:
- raise RuntimeError(f"Couldn't parse {filename}, error: {str(e)}")
- return schemed_object
-
- raise RuntimeError(f"Couldn't find file {filename}")
-
-
-# def _recursive_section(sections, data):
-# if len(sections) == 1:
-# d = data
-# for k,v in d.items():
-# if v == "true" or v == "True":
-# d[k] = True
-# if v == "false" or v == "False":
-# d[k] = False
-# return {sections[0]: d}
-# else:
-# return {sections[0]: _recursive_section(sections[1:], data)}
-
-# def parse_ini(filename, schemed_object):
-# console.log(f"Parsing config ini file {filename}")
+ raise RuntimeError(f"Couldn't parse {filepath}, error: {str(e)}")
+
+ # second pass, look for references
+ subkeys = [ k for k,v in schemed_object.pod().items() if isinstance(v,dict) ]
+ for k in new_parameters:
+ # look for keys that are associated to dicts in the schemed_obj but here are strings
+ v = new_parameters[k]
+
+ if isinstance(v,str) and k in subkeys:
+
+ # It's a string! It's a reference! Try loading it
+ subfile_path = Path(os.path.expandvars(v)).expanduser()
+ if not subfile_path.is_absolute():
+ subfile_path = filepath.parent / subfile_path
+ if not subfile_path.exists():
+ raise RuntimeError(f'Cannot find the file {v} ({subfile_path})')
+
+ console.log(f"Detected subconfiguration for {k} {v} - loading {subfile_path}")
+ with open(subfile_path, 'r') as f:
+ try:
+ new_subpars = json.load(f)
+ except Exception as e:
+ raise RuntimeError(f"Couldn't parse {subfile_path}, error: {str(e)}")
+ new_parameters[k] = new_subpars
+
+ elif '' in v:
+ cname = k
+ pars = v['']
+ scname = pars['config_name']
+ scfile = pars['config_file'] if 'config_file' in pars else f'{cname}_configs.json'
+ scbase = pars['config_base'] if 'config_base' in pars else 'common'
-# import configparser
-# config = configparser.ConfigParser()
-# try:
-# config.read(filename)
-# except Exception as e:
-# raise RuntimeError(f"Couldn't parse {filename}, error: {str(e)}")
-# config_dict = {}
+ scfile = Path(os.path.expandvars(scfile)).expanduser()
+ if not scfile.is_absolute():
+ scfile = filepath.parent / scfile
-# for sect in config.sections():
-# sections = sect.split('.')
-# data = {k:v for k,v in config.items(sect)}
-# if sections[0] in config_dict:
-# config_dict[sections[0]].update(_recursive_section(sections, data)[sections[0]])
-# else:
-# config_dict[sections[0]] = _recursive_section(sections, data)[sections[0]]
+ if not scfile.exists():
+ raise RuntimeError(f'Cannot find the file {v} ({scfile})')
-# try:
-# new_parameters = config_dict
-# # validate the heck out of this but that doesn't change the object itself (ARG)
-# _strict_recursive_update(schemed_object.pod(), new_parameters)
-# # now its validated, update the object with moo
-# schemed_object.update(new_parameters)
-# return schemed_object
-# except Exception as e:
-# raise RuntimeError(f'Couldn\'t update the object {schemed_object} with the file {filename},\nError: {e}')
+ scset = ConfigSet(scfile,scbase)
+ new_parameters[k] = scset.get(scname)
+ try:
+ # Validate the heck out of this but that doesn't change the object itself (ARG)
+ _strict_recursive_update(schemed_object.pod(), new_parameters)
+ # now its validated, update the object with moo
+ schemed_object.update(new_parameters)
+ except Exception as e:
+ raise RuntimeError(f'Couldn\'t update the object {schemed_object} with the file {filename},\nError: {e}')
+ return schemed_object
def parse_config_file(filename, configurer_conf):
@@ -145,7 +186,16 @@ def add_decorator(function):
module_name = schema_file.replace('.jsonnet', '').replace('/', '.')
config_module = importlib.import_module(f'dunedaq.{module_name}')
schema_object = getattr(config_module, schema_object_name)
- extra_schemas = [getattr(config_module, obj)() for obj in args]
+ extra_schemas = []
+ for obj_name in args:
+ if '.' in obj_name:
+ i = obj_name.rfind('.')
+ ex_module_name, ex_schema_object_name = obj_name[:i], obj_name[i+1:]
+ extra_module = importlib.import_module(f'dunedaq.{ex_module_name}')
+ extra_schemas += [getattr(extra_module, ex_schema_object_name)()]
+ else:
+ # extra_schemas = [getattr(config_module, obj)() for obj in args]
+ extra_schemas = [getattr(config_module, obj_name)()]
def configure(ctx, param, filename):
return parse_config_file(filename, schema_object())
diff --git a/python/daqconf/core/console.py b/python/daqconf/core/console.py
new file mode 100644
index 00000000..745d8c8b
--- /dev/null
+++ b/python/daqconf/core/console.py
@@ -0,0 +1,2 @@
+from rich.console import Console
+console = Console()
\ No newline at end of file
diff --git a/python/daqconf/core/fragment_producers.py b/python/daqconf/core/fragment_producers.py
index 17b1abf0..991278cf 100644
--- a/python/daqconf/core/fragment_producers.py
+++ b/python/daqconf/core/fragment_producers.py
@@ -3,10 +3,7 @@
import moo.io
moo.io.default_load_path = get_moo_model_path()
-from rich.console import Console
-
import moo.otypes
-import re
moo.otypes.load_types('trigger/moduleleveltrigger.jsonnet')
moo.otypes.load_types('dfmodules/triggerrecordbuilder.jsonnet')
@@ -16,8 +13,7 @@
from daqconf.core.conf_utils import Direction
from daqconf.core.sourceid import source_id_raw_str, ensure_subsystem_string
-
-console = Console()
+from .console import console
def set_mlt_links(the_system, mlt_app_name="trigger", verbose=False):
"""
@@ -40,12 +36,14 @@ def set_mlt_links(the_system, mlt_app_name="trigger", verbose=False):
mgraph.reset_module_conf("mlt", mlt.ConfParams(links=mlt_links,
hsi_trigger_type_passthrough=old_mlt_conf.hsi_trigger_type_passthrough,
merge_overlapping_tcs=old_mlt_conf.merge_overlapping_tcs,
- buffer_timeout=old_mlt_conf.buffer_timeout,
+ buffer_timeout=old_mlt_conf.buffer_timeout,
td_out_of_timeout=old_mlt_conf.td_out_of_timeout,
td_readout_limit=old_mlt_conf.td_readout_limit,
ignore_tc=old_mlt_conf.ignore_tc,
use_readout_map=old_mlt_conf.use_readout_map,
- td_readout_map=old_mlt_conf.td_readout_map))
+ td_readout_map=old_mlt_conf.td_readout_map,
+ use_bitwords=old_mlt_conf.use_bitwords,
+ trigger_bitwords=old_mlt_conf.trigger_bitwords))
def remove_mlt_link(the_system, source_id, mlt_app_name="trigger"):
"""
@@ -65,7 +63,9 @@ def remove_mlt_link(the_system, source_id, mlt_app_name="trigger"):
td_readout_limit=old_mlt_conf.td_readout_limit,
ignore_tc=old_mlt_conf.ignore_tc,
use_readout_map=old_mlt_conf.use_readout_map,
- td_readout_map=old_mlt_conf.td_readout_map))
+ td_readout_map=old_mlt_conf.td_readout_map,
+ use_bitwords=old_mlt_conf.use_bitwords,
+ trigger_bitwords=old_mlt_conf.trigger_bitwords))
def connect_fragment_producers(app_name, the_system, verbose=False):
"""Connect the data request and fragment sending queues from all of
diff --git a/python/daqconf/core/metadata.py b/python/daqconf/core/metadata.py
index 1f58e320..20c54693 100755
--- a/python/daqconf/core/metadata.py
+++ b/python/daqconf/core/metadata.py
@@ -1,15 +1,13 @@
import json
import os
import sys
-from rich.console import Console
from os.path import exists, join
-console = Console()
+from .console import console
def write_metadata_file(json_dir, generator, config_file):
console.log("Generating metadata file")
-
# Backwards compatibility
if isinstance(json_dir, str):
from pathlib import Path
diff --git a/python/daqconf/core/sourceid.py b/python/daqconf/core/sourceid.py
index 14b597b7..ccd49a76 100644
--- a/python/daqconf/core/sourceid.py
+++ b/python/daqconf/core/sourceid.py
@@ -6,10 +6,11 @@
from enum import Enum
from collections import namedtuple, defaultdict
-console = Console()
+from .console import console
-from daqdataformats._daq_daqdataformats_py import SourceID
-from detchannelmaps._daq_detchannelmaps_py import *
+
+from daqdataformats import SourceID
+from detchannelmaps import *
TAID = namedtuple('TAID', ['detector', 'crate'])
TPID = namedtuple('TPID', ['detector', 'crate'])
diff --git a/python/daqconf/detreadoutmap.py b/python/daqconf/detreadoutmap.py
index 89a8f8f2..2b0f6b6a 100644
--- a/python/daqconf/detreadoutmap.py
+++ b/python/daqconf/detreadoutmap.py
@@ -92,21 +92,6 @@ def app_name(self):
class DetReadoutMapService:
"""Detector - Readout Link mapping"""
- # _tech_map = {
- # 'flx': (FelixStreamParameters, dromap.FelixStreamParameters),
- # 'eth': (EthStreamParameters, dromap.EthStreamParameters),
- # }
-
- # _host_label_map = {
- # 'flx': 'host',
- # 'eth': 'rx_host',
- # }
-
- # _iflabel_map = {
- # 'flx': 'card',
- # 'eth': 'rx_iface',
- # }
-
_traits_map = {
'flx': StreamKindTraits(FelixStreamParameters, dromap.FelixStreamParameters, 'host', 'card'),
'eth': StreamKindTraits(EthStreamParameters, dromap.EthStreamParameters, 'rx_host', 'rx_iface'),
@@ -134,7 +119,7 @@ def __init__(self):
self._map = {}
- def load(self, map_path: str, merge: bool = False) -> None:
+ def load(self, map_path: str, merge: bool = False, offset: int = 0) -> None:
map_fp = pathlib.Path(map_path)
@@ -147,8 +132,13 @@ def load(self, map_path: str, merge: bool = False) -> None:
self._validate_json(data)
+
streams = self._build_streams(data)
+ print(f"Offset = {offset}")
+ if offset:
+ streams = [s._replace(src_id = s.src_id + offset) for s in streams]
+
if merge:
src_id_max = max(self.get())+1 if self.get() else 0
new_src_id_min = min([s.src_id for s in streams])
diff --git a/schema/daqconf/bootgen.jsonnet b/schema/daqconf/bootgen.jsonnet
new file mode 100644
index 00000000..ad3e9aab
--- /dev/null
+++ b/schema/daqconf/bootgen.jsonnet
@@ -0,0 +1,43 @@
+// This is the configuration schema for daqconf_multiru_gen
+//
+
+local moo = import "moo.jsonnet";
+
+local stypes = import "daqconf/types.jsonnet";
+local types = moo.oschema.hier(stypes).dunedaq.daqconf.types;
+
+local s = moo.oschema.schema("dunedaq.daqconf.bootgen");
+local nc = moo.oschema.numeric_constraints;
+
+local cs = {
+ monitoring_dest: s.enum( "MonitoringDest", ["local", "cern", "pocket"]),
+ pm_choice: s.enum( "PMChoice", ["k8s", "ssh"], doc="Process Manager choice: ssh or Kubernetes"),
+
+
+ boot: s.record("boot", [
+ // s.field( "op_env", self.string, default='swtest', doc="Operational environment - used for raw data filename prefix and HDF5 Attribute inside the files"),
+ s.field( "base_command_port", types.port, default=3333, doc="Base port of application command endpoints"),
+
+ # Obscure
+ s.field( "capture_env_vars", types.strings, default=['TIMING_SHARE', 'DETCHANNELMAPS_SHARE'], doc="List of variables to capture from the environment"),
+ s.field( "disable_trace", types.flag, false, doc="Do not enable TRACE (default TRACE_FILE is /tmp/trace_buffer_${HOSTNAME}_${USER})"),
+ s.field( "opmon_impl", self.monitoring_dest, default='local', doc="Info collector service implementation to use"),
+ s.field( "ers_impl", self.monitoring_dest, default='local', doc="ERS destination (Kafka used for cern and pocket)"),
+ s.field( "pocket_url", types.host, default='127.0.0.1', doc="URL for connecting to Pocket services"),
+ s.field( "process_manager", self.pm_choice, default="ssh", doc="Choice of process manager"),
+
+ # K8S
+ s.field( "k8s_image", types.string, default="dunedaq/c8-minimal", doc="Which docker image to use"),
+
+ # Connectivity Service
+ s.field( "use_connectivity_service", types.flag, default=true, doc="Whether to use the ConnectivityService to manage connections"),
+ s.field( "start_connectivity_service", types.flag, default=true, doc="Whether to use the ConnectivityService to manage connections"),
+ s.field( "connectivity_service_threads", types.count, default=2, doc="Number of threads for the gunicorn server that serves connection info"),
+ s.field( "connectivity_service_host", types.host, default='localhost', doc="Hostname for the ConnectivityService"),
+ s.field( "connectivity_service_port", types.port, default=15000, doc="Port for the ConnectivityService"),
+ s.field( "connectivity_service_interval", types.count, default=1000, doc="Publish interval for the ConnectivityService")
+ ]),
+};
+
+
+stypes + moo.oschema.sort_select(cs)
diff --git a/schema/daqconf/confgen.jsonnet b/schema/daqconf/confgen.jsonnet
index 4ba902fe..f4ca4da1 100755
--- a/schema/daqconf/confgen.jsonnet
+++ b/schema/daqconf/confgen.jsonnet
@@ -3,320 +3,101 @@
local moo = import "moo.jsonnet";
+
+local stypes = import "daqconf/types.jsonnet";
+local types = moo.oschema.hier(stypes).dunedaq.daqconf.types;
+
local sctb = import "ctbmodules/ctbmodule.jsonnet";
local ctbmodule = moo.oschema.hier(sctb).dunedaq.ctbmodules.ctbmodule;
-local s = moo.oschema.schema("dunedaq.daqconf.confgen");
-local nc = moo.oschema.numeric_constraints;
-// A temporary schema construction context.
-local cs = {
- port: s.number( "Port", "i4", doc="A TCP/IP port number"),
- freq: s.number( "Frequency", "u4", doc="A frequency"),
- rate: s.number( "Rate", "f8", doc="A rate as a double"),
- count: s.number( "count", "i8", doc="A count of things"),
- three_choice: s.number( "threechoice", "i8", nc(minimum=0, exclusiveMaximum=3), doc="A choice between 0, 1, or 2"),
- flag: s.boolean( "Flag", doc="Parameter that can be used to enable or disable functionality"),
- monitoring_dest: s.enum( "MonitoringDest", ["local", "cern", "pocket"]),
- path: s.string( "Path", doc="Location on a filesystem"),
- paths: s.sequence( "Paths", self.path, doc="Multiple paths"),
- host: s.string( "Host", moo.re.dnshost, doc="A hostname"),
- hosts: s.sequence( "Hosts", self.host, "Multiple hosts"),
- string: s.string( "Str", doc="Generic string"),
- tpg_channel_map: s.enum( "TPGChannelMap", ["VDColdboxChannelMap", "ProtoDUNESP1ChannelMap", "PD2HDChannelMap", "HDColdboxChannelMap"]),
- dqm_channel_map: s.enum( "DQMChannelMap", ['HD', 'VD', 'PD2HD', 'HDCB']),
- dqm_params: s.sequence( "DQMParams", self.count, doc="Parameters for DQM (fixme)"),
- tc_types: s.sequence( "TCTypes", self.count, doc="List of TC types"),
- tc_type: s.number( "TCType", "i4", nc(minimum=0, maximum=9), doc="Number representing TC type. Currently ranging from 0 to 9"),
- tc_interval: s.number( "TCInterval", "i8", nc(minimum=1, maximum=30000000000), doc="The intervals between TCs that are inserted into MLT by CTCM, in clock ticks"),
- tc_intervals: s.sequence( "TCIntervals", self.tc_interval, doc="List of TC intervals used by CTCM"),
- readout_time: s.number( "ROTime", "i8", doc="A readout time in ticks"),
- channel_list: s.sequence( "ChannelList", self.count, doc="List of offline channels to be masked out from the TPHandler"),
-
- numa_exception: s.record( "NUMAException", [
- s.field( "host", self.host, default='localhost', doc="Host of exception"),
- s.field( "card", self.count, default=0, doc="Card ID of exception"),
- s.field( "numa_id", self.count, default=0, doc="NUMA ID of exception"),
- s.field( "felix_card_id", self.count, default=-1, doc="CARD ID override, -1 indicates no override"),
- s.field( "latency_buffer_numa_aware", self.flag, default=false, doc="Enable NUMA-aware mode for the Latency Buffer"),
- s.field( "latency_buffer_preallocation", self.flag, default=false, doc="Enable Latency Buffer preallocation"),
- ], doc="Exception to the default NUMA ID for FELIX cards"),
- numa_exceptions: s.sequence( "NUMAExceptions", self.numa_exception, doc="Exceptions to the default NUMA ID"),
- numa_config: s.record("numa_config", [
- s.field( "default_id", self.count, default=0, doc="Default NUMA ID for FELIX cards"),
- s.field( "default_latency_numa_aware", self.flag, default=false, doc="Default for Latency Buffer NUMA awareness"),
- s.field( "default_latency_preallocation", self.flag, default=false, doc="Default for Latency Buffer Preallocation"),
- s.field( "exceptions", self.numa_exceptions, default=[], doc="Exceptions to the default NUMA ID"),
- ]),
+local sboot = import "daqconf/bootgen.jsonnet";
+local bootgen = moo.oschema.hier(sboot).dunedaq.daqconf.bootgen;
- boot: s.record("boot", [
- s.field( "base_command_port", self.port, default=3333, doc="Base port of application command endpoints"),
- s.field( "disable_trace", self.flag, false, doc="Do not enable TRACE (default TRACE_FILE is /tmp/trace_buffer_${HOSTNAME}_${USER})"),
- s.field( "opmon_impl", self.monitoring_dest, default='local', doc="Info collector service implementation to use"),
- s.field( "ers_impl", self.monitoring_dest, default='local', doc="ERS destination (Kafka used for cern and pocket)"),
- s.field( "pocket_url", self.host, default='127.0.0.1', doc="URL for connecting to Pocket services"),
- s.field( "image", self.string, default="dunedaq/c8-minimal", doc="Which docker image to use"),
- s.field( "use_k8s", self.flag, default=false, doc="Whether to use k8s"),
- s.field( "op_env", self.string, default='swtest', doc="Operational environment - used for raw data filename prefix and HDF5 Attribute inside the files"),
- s.field( "data_request_timeout_ms", self.count, default=1000, doc="The baseline data request timeout that will be used by modules in the Readout and Trigger subsystems (i.e. any module that produces data fragments). Downstream timeouts, such as the trigger-record-building timeout, are derived from this."),
- s.field( "use_connectivity_service", self.flag, default=true, doc="Whether to use the ConnectivityService to manage connections"),
- s.field( "start_connectivity_service", self.flag, default=true, doc="Whether to use the ConnectivityService to manage connections"),
- s.field( "connectivity_service_threads", self.count, default=2, doc="Number of threads for the gunicorn server that serves connection info"),
- s.field( "connectivity_service_host", self.host, default='localhost', doc="Hostname for the ConnectivityService"),
- s.field( "connectivity_service_port", self.port, default=15000, doc="Port for the ConnectivityService"),
- s.field( "connectivity_service_interval", self.count, default=1000, doc="Publish interval for the ConnectivityService"),
- s.field( "RTE_script_settings", self.three_choice, default=0, doc="0 - Use an RTE script iff not in a dev environment, 1 - Always use RTE, 2 - never use RTE"),
- s.field( "use_data_network", self.flag, default = false, doc="Whether to use the data network (Won't work with k8s)"),
- ]),
+local sdetector = import "daqconf/detectorgen.jsonnet";
+local detectorgen = moo.oschema.hier(sdetector).dunedaq.daqconf.detectorgen;
- timing: s.record("timing", [
- s.field( "timing_session_name", self.string, default="", doc="Name of the global timing session to use, for timing commands"),
- s.field( "host_tprtc", self.host, default='localhost', doc='Host to run the timing partition controller app on'),
- # timing hw partition options
- s.field( "control_timing_partition", self.flag, default=false, doc='Flag to control whether we are controlling timing partition in master hardware'),
- s.field( "timing_partition_master_device_name", self.string, default="", doc='Timing partition master hardware device name'),
- s.field( "timing_partition_id", self.count, default=0, doc='Timing partition id'),
- s.field( "timing_partition_trigger_mask", self.count, default=255, doc='Timing partition trigger mask'),
- s.field( "timing_partition_rate_control_enabled", self.flag, default=false, doc='Timing partition rate control enabled'),
- s.field( "timing_partition_spill_gate_enabled", self.flag, default=false, doc='Timing partition spill gate enabled'),
- ]),
+local sdaqcommon = import "daqconf/daqcommongen.jsonnet";
+local daqcommongen = moo.oschema.hier(sdaqcommon).dunedaq.daqconf.daqcommongen;
- hsi: s.record("hsi", [
- # timing hsi options
- s.field( "use_timing_hsi", self.flag, default=false, doc='Flag to control whether real hardware timing HSI config is generated. Default is false'),
- s.field( "host_timing_hsi", self.host, default='localhost', doc='Host to run the HSI app on'),
- s.field( "hsi_hw_connections_file", self.path, default="${TIMING_SHARE}/config/etc/connections.xml", doc='Real timing hardware only: path to hardware connections file'),
- s.field( "enable_hardware_state_recovery", self.flag, default=true, doc="Enable (or not) hardware state recovery"),
- s.field( "hsi_device_name", self.string, default="", doc='Real HSI hardware only: device name of HSI hw'),
- s.field( "hsi_readout_period", self.count, default=1e3, doc='Real HSI hardware only: Period between HSI hardware polling [us]'),
- s.field( "control_hsi_hw", self.flag, default=false, doc='Flag to control whether we are controlling hsi hardware'),
- s.field( "hsi_endpoint_address", self.count, default=1, doc='Timing address of HSI endpoint'),
- s.field( "hsi_endpoint_partition", self.count, default=0, doc='Timing partition of HSI endpoint'),
- s.field( "hsi_re_mask",self.count, default=0, doc='Rising-edge trigger mask'),
- s.field( "hsi_fe_mask", self.count, default=0, doc='Falling-edge trigger mask'),
- s.field( "hsi_inv_mask",self.count, default=0, doc='Invert-edge mask'),
- s.field( "hsi_source",self.count, default=1, doc='HSI signal source; 0 - hardware, 1 - emulation (trigger timestamp bits)'),
- # fake hsi options
- s.field( "use_fake_hsi", self.flag, default=true, doc='Flag to control whether fake or real hardware HSI config is generated. Default is true'),
- s.field( "host_fake_hsi", self.host, default='localhost', doc='Host to run the HSI app on'),
- s.field( "hsi_device_id", self.count, default=0, doc='Fake HSI only: device ID of fake HSIEvents'),
- s.field( "mean_hsi_signal_multiplicity", self.count, default=1, doc='Fake HSI only: rate of individual HSI signals in emulation mode 1'),
- s.field( "hsi_signal_emulation_mode", self.count, default=0, doc='Fake HSI only: HSI signal emulation mode'),
- s.field( "enabled_hsi_signals", self.count, default=1, doc='Fake HSI only: bit mask of enabled fake HSI signals')
- ]),
+local stiming = import "daqconf/timinggen.jsonnet";
+local timinggen = moo.oschema.hier(stiming).dunedaq.daqconf.timinggen;
- ctb_hsi: s.record("ctb_hsi", [
- # ctb options
- s.field( "use_ctb_hsi", self.flag, default=false, doc='Flag to control whether CTB HSI config is generated. Default is false'),
- s.field( "host_ctb_hsi", self.host, default='localhost', doc='Host to run the HSI app on'),
- s.field("hlt_triggers", ctbmodule.Hlt_trigger_seq, []),
- s.field("beam_llt_triggers", ctbmodule.Llt_mask_trigger_seq, []),
- s.field("crt_llt_triggers", ctbmodule.Llt_count_trigger_seq, []),
- s.field("pds_llt_triggers", ctbmodule.Llt_count_trigger_seq, []),
- s.field("fake_trig_1", ctbmodule.Randomtrigger, ctbmodule.Randomtrigger),
- s.field("fake_trig_2", ctbmodule.Randomtrigger, ctbmodule.Randomtrigger)
- ]),
+local shsi = import "daqconf/hsigen.jsonnet";
+local hsigen = moo.oschema.hier(shsi).dunedaq.daqconf.hsigen;
- data_file_entry: s.record("data_file_entry", [
- s.field("data_file", self.path, default='./frames.bin', doc="File containing data frames to be replayed by the fake cards. Former -d. Uses the asset manager, can also be 'asset://checksum/somelonghash', or 'file://somewhere/frames.bin' or 'frames.bin'"),
- s.field("detector_id", self.count, default=3, doc="Detector ID that this file applies to"),
- ]),
- data_files: s.sequence("data_files", self.data_file_entry),
+local sreadout = import "daqconf/readoutgen.jsonnet";
+local readoutgen = moo.oschema.hier(sreadout).dunedaq.daqconf.readoutgen;
- readout: s.record("readout", [
- s.field( "detector_readout_map_file", self.path, default='./DetectorReadoutMap.json', doc="File containing detector hardware map for configuration to run"),
- s.field( "emulator_mode", self.flag, default=false, doc="If active, timestamps of data frames are overwritten when processed by the readout. This is necessary if the felix card does not set correct timestamps. Former -e"),
- s.field( "thread_pinning_file", self.path, default="", doc="A thread pinning configuration file that gets executed after conf."),
- s.field( "data_rate_slowdown_factor",self.count, default=1, doc="Factor by which to suppress data generation. Former -s"),
- s.field( "clock_speed_hz", self.freq, default=62500000),
- s.field( "default_data_file", self.path, default='asset://?label=ProtoWIB&subsystem=readout', doc="File containing data frames to be replayed by the fake cards. Former -d. Uses the asset manager, can also be 'asset://?checksum=somelonghash', or 'file://somewhere/frames.bin' or 'frames.bin'"),
- s.field( "data_files", self.data_files, default=[], doc="Files to use by detector type"),
- // s.field( "use_felix", self.flag, default=false, doc="Use real felix cards instead of fake ones. Former -f"),
- // s.field( "eth_mode", self.flag, default=false, doc="Use ethernet packet format"),
- s.field( "use_fake_cards", self.flag, default=false, doc="Use fake cards"),
- s.field( "latency_buffer_size", self.count, default=499968, doc="Size of the latency buffers (in number of elements)"),
- s.field( "fragment_send_timeout_ms", self.count, default=10, doc="The send timeout that will be used in the readout modules when sending fragments downstream (i.e. to the TRB)."),
- s.field( "enable_tpg", self.flag, default=false, doc="Enable TPG"),
- s.field( "tpg_threshold", self.count, default=120, doc="Select TPG threshold"),
- s.field( "tpg_algorithm", self.string, default="SimpleThreshold", doc="Select TPG algorithm (SimpleThreshold, AbsRS)"),
- s.field( "tpg_channel_mask", self.channel_list, default=[], doc="List of offline channels to be masked out from the TPHandler"),
- // s.field( "enable_firmware_tpg", self.flag, default=false, doc="Enable firmware TPG"),
- // s.field( "dtp_connections_file", self.path, default="${DTPCONTROLS_SHARE}/config/dtp_connections.xml", doc="DTP connections file"),
- // s.field( "firmware_hit_threshold", self.count, default=20, doc="firmware hitfinder threshold"),
- s.field( "enable_raw_recording", self.flag, default=false, doc="Add queues and modules necessary for the record command"),
- s.field( "raw_recording_output_dir", self.path, default='.', doc="Output directory where recorded data is written to. Data for each link is written to a separate file"),
- s.field( "use_fake_data_producers", self.flag, default=false, doc="Use fake data producers that respond with empty fragments immediately instead of (fake) cards and DLHs"),
- s.field( "readout_sends_tp_fragments",self.flag, default=false, doc="Send TP Fragments from Readout to Dataflow (via enabling TP Fragment links in MLT)"),
- // s.field( "enable_dpdk_reader", self.flag, default=false, doc="Enable sending frames using DPDK"),
- s.field( "host_dpdk_reader", self.hosts, default=['np04-srv-022'], doc="Which host to use to receive frames"),
- s.field( "eal_args", self.string, default='-l 0-1 -n 3 -- -m [0:1].0 -j', doc='Args passed to the EAL in DPDK'),
- s.field( "base_source_ip", self.string, default='10.73.139.', doc='First part of the IP of the source'),
- s.field( "destination_ip", self.string, default='10.73.139.17', doc='IP of the destination'),
- s.field( "numa_config", self.numa_config, default=self.numa_config, doc='Configuration of FELIX NUMA IDs'),
- s.field( "emulated_data_times_start_with_now", self.flag, default=false, doc="If active, the timestamp of the first emulated data frame is set to the current wallclock time"),
- ]),
+local strigger = import "daqconf/triggergen.jsonnet";
+local triggergen = moo.oschema.hier(strigger).dunedaq.daqconf.triggergen;
- trigger_algo_config: s.record("trigger_algo_config", [
- s.field("prescale", self.count, default=100),
- s.field("window_length", self.count, default=10000),
- s.field("adjacency_threshold", self.count, default=6),
- s.field("adj_tolerance", self.count, default=4),
- s.field("trigger_on_adc", self.flag, default=false),
- s.field("trigger_on_n_channels", self.flag, default=false),
- s.field("trigger_on_adjacency", self.flag, default=true),
- s.field("adc_threshold", self.count, default=10000),
- s.field("n_channels_threshold", self.count, default=8),
- s.field("print_tp_info", self.flag, default=false),
- ]),
+local sdataflow = import "daqconf/dataflowgen.jsonnet";
+local dataflowgen = moo.oschema.hier(sdataflow).dunedaq.daqconf.dataflowgen;
- c0_readout: s.record("c0_readout", [
- s.field("candidate_type", self.tc_type, default=0, doc="The TC type, 0=Unknown"),
- s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
- s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
- ]),
- c1_readout: s.record("c1_readout", [
- s.field("candidate_type", self.tc_type, default=1, doc="The TC type, 1=Timing"),
- s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
- s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
- ]),
- c2_readout: s.record("c2_readout", [
- s.field("candidate_type", self.tc_type, default=2, doc="The TC type, 2=TPCLowE"),
- s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
- s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
- ]),
- c3_readout: s.record("c3_readout", [
- s.field("candidate_type", self.tc_type, default=3, doc="The TC type, 3=Supernova"),
- s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
- s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
- ]),
- c4_readout: s.record("c4_readout", [
- s.field("candidate_type", self.tc_type, default=4, doc="The TC type, 4=Random"),
- s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
- s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
- ]),
- c5_readout: s.record("c5_readout", [
- s.field("candidate_type", self.tc_type, default=5, doc="The TC type, 5=Prescale"),
- s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
- s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
- ]),
- c6_readout: s.record("c6_readout", [
- s.field("candidate_type", self.tc_type, default=6, doc="The TC type, 6=ADCSimpleWindow"),
- s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
- s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
- ]),
- c7_readout: s.record("c7_readout", [
- s.field("candidate_type", self.tc_type, default=7, doc="The TC type, 7=HorizontalMuon"),
- s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
- s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
- ]),
- c8_readout: s.record("c8_readout", [
- s.field("candidate_type", self.tc_type, default=8, doc="The TC type, 8=MichelElectron"),
- s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
- s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
- ]),
- c9_readout: s.record("c9_readout", [
- s.field("candidate_type", self.tc_type, default=9, doc="The TC type, 9=LowEnergyEvent"),
- s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
- s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
- ]),
-
- tc_readout_map: s.record("tc_readout_map", [
- s.field("c0", self.c0_readout, default=self.c0_readout, doc="TC readout for TC type 0"),
- s.field("c1", self.c1_readout, default=self.c1_readout, doc="TC readout for TC type 1"),
- s.field("c2", self.c2_readout, default=self.c2_readout, doc="TC readout for TC type 2"),
- s.field("c3", self.c3_readout, default=self.c3_readout, doc="TC readout for TC type 3"),
- s.field("c4", self.c4_readout, default=self.c4_readout, doc="TC readout for TC type 4"),
- s.field("c5", self.c5_readout, default=self.c5_readout, doc="TC readout for TC type 5"),
- s.field("c6", self.c6_readout, default=self.c6_readout, doc="TC readout for TC type 6"),
- s.field("c7", self.c7_readout, default=self.c7_readout, doc="TC readout for TC type 7"),
- s.field("c8", self.c8_readout, default=self.c8_readout, doc="TC readout for TC type 8"),
- s.field("c9", self.c9_readout, default=self.c9_readout, doc="TC readout for TC type 9"),
- ]),
-
- trigger: s.record("trigger",[
- s.field( "trigger_rate_hz", self.rate, default=1.0, doc='Fake HSI only: rate at which fake HSIEvents are sent. 0 - disable HSIEvent generation. Former -t'),
- s.field( "trigger_window_before_ticks",self.count, default=1000, doc="Trigger window before marker. Former -b"),
- s.field( "trigger_window_after_ticks", self.count, default=1000, doc="Trigger window after marker. Former -a"),
- s.field( "host_trigger", self.host, default='localhost', doc='Host to run the trigger app on'),
- s.field( "host_tpw", self.host, default='localhost', doc='Host to run the TPWriter app on'),
- # trigger options
- s.field( "completeness_tolerance", self.count, default=1, doc="Maximum number of inactive queues we will tolerate."),
- s.field( "tolerate_incompleteness", self.flag, default=false, doc="Flag to tell trigger to tolerate inactive queues."),
- s.field( "ttcm_s1", self.count,default=1, doc="Timing trigger candidate maker accepted HSI signal ID 1"),
- s.field( "ttcm_s2", self.count, default=2, doc="Timing trigger candidate maker accepted HSI signal ID 2"),
- s.field( "trigger_activity_plugin", self.string, default='TriggerActivityMakerPrescalePlugin', doc="Trigger activity algorithm plugin"),
- s.field( "trigger_activity_config", self.trigger_algo_config, default=self.trigger_algo_config,doc="Trigger activity algorithm config (string containing python dictionary)"),
- s.field( "trigger_candidate_plugin", self.string, default='TriggerCandidateMakerPrescalePlugin', doc="Trigger candidate algorithm plugin"),
- s.field( "trigger_candidate_config", self.trigger_algo_config, default=self.trigger_algo_config, doc="Trigger candidate algorithm config (string containing python dictionary)"),
- s.field( "hsi_trigger_type_passthrough", self.flag, default=false, doc="Option to override trigger type in the MLT"),
- s.field( "enable_tpset_writing", self.flag, default=false, doc="Enable the writing of TPs to disk (only works with enable_tpg or enable_firmware_tpg)"),
- s.field( "tpset_output_path", self.path,default='.', doc="Output directory for TPSet stream files"),
- s.field( "tpset_output_file_size",self.count, default=4*1024*1024*1024, doc="The size threshold when TPSet stream files are closed (in bytes)"),
- s.field( "tpg_channel_map", self.tpg_channel_map, default="ProtoDUNESP1ChannelMap", doc="Channel map for TPG"),
- s.field( "mlt_merge_overlapping_tcs", self.flag, default=true, doc="Option to turn off merging of overlapping TCs when forming TDs in MLT"),
- s.field( "mlt_buffer_timeout", self.count, default=100, doc="Timeout (buffer) to wait for new overlapping TCs before sending TD"),
- s.field( "mlt_send_timed_out_tds", self.flag, default=true, doc="Option to drop TD if TC comes out of timeout window"),
- s.field( "mlt_max_td_length_ms", self.count, default=1000, doc="Maximum allowed time length [ms] for a readout window of a single TD"),
- s.field( "mlt_ignore_tc", self.tc_types, default=[], doc="Optional list of TC types to be ignored in MLT"),
- s.field( "mlt_use_readout_map", self.flag, default=false, doc="Option to use custom readout map in MLT"),
- s.field( "mlt_td_readout_map", self.tc_readout_map, default=self.tc_readout_map, doc="The readout windows assigned to TDs in MLT, based on TC type."),
- s.field( "use_custom_maker", self.flag, default=false, doc="Option to use a Custom Trigger Candidate Maker (plugin)"),
- s.field( "ctcm_trigger_types", self.tc_types, default=[4], doc="Optional list of TC types to be used by the Custom Trigger Candidate Maker (plugin)"),
- s.field( "ctcm_trigger_intervals", self.tc_intervals, default=[10000000], doc="Optional list of intervals (clock ticks) for the TC types to be used by the Custom Trigger Candidate Maker (plugin)"),
- ]),
+local sdqm = import "daqconf/dqmgen.jsonnet";
+local dqmgen = moo.oschema.hier(sdqm).dunedaq.daqconf.dqmgen;
- dataflowapp: s.record("dataflowapp",[
- s.field("app_name", self.string, default="dataflow0"),
- s.field( "output_paths",self.paths, default=['.'], doc="Location(s) for the dataflow app to write data. Former -o"),
- s.field( "host_df", self.host, default='localhost'),
- s.field( "max_file_size",self.count, default=4*1024*1024*1024, doc="The size threshold when raw data files are closed (in bytes)"),
- s.field( "data_store_mode", self.string, default="all-per-file", doc="all-per-file or one-event-per-file"),
- s.field( "max_trigger_record_window",self.count, default=0, doc="The maximum size for the window of data that will included in a single TriggerRecord (in ticks). Readout windows that are longer than this size will result in TriggerRecords being split into a sequence of TRs. A zero value for this parameter means no splitting."),
-
- ], doc="Element of the dataflow.apps array"),
- dataflowapps: s.sequence("dataflowapps", self.dataflowapp, doc="List of dataflowapp instances"),
+local s = moo.oschema.schema("dunedaq.daqconf.confgen");
+local nc = moo.oschema.numeric_constraints;
+// A temporary schema construction context.
- dataflow: s.record("dataflow", [
- s.field( "host_dfo", self.host, default='localhost', doc="Sets the host for the DFO app"),
- s.field("apps", self.dataflowapps, default=[], doc="Configuration for the dataflow apps (see dataflowapp for options)"),
- s.field( "token_count",self.count, default=10, doc="Number of tokens the dataflow apps give to the DFO. Former -c"),
- ]),
+local cs = {
+ // port: s.number( "Port", "i4", doc="A TCP/IP port number"),
+ // freq: s.number( "Frequency", "u4", doc="A frequency"),
+ // rate: s.number( "Rate", "f8", doc="A rate as a double"),
+ // count: s.number( "count", "i8", doc="A count of things"),
+ // three_choice: s.number( "threechoice", "i8", nc(minimum=0, exclusiveMaximum=3), doc="A choice between 0, 1, or 2"),
+ // flag: s.boolean( "Flag", doc="Parameter that can be used to enable or disable functionality"),
+ // monitoring_dest: s.enum( "MonitoringDest", ["local", "cern", "pocket"]),
+ // path: s.string( "Path", doc="Location on a filesystem"),
+ // paths: s.sequence( "Paths", self.path, doc="Multiple paths"),
+ // host: s.string( "Host", moo.re.dnshost, doc="A hostname"),
+ // hosts: s.sequence( "Hosts", self.host, "Multiple hosts"),
+ // string: s.string( "Str", doc="Generic string"),
+ // strings: s.sequence( "Strings", self.string, doc="List of strings"),
+
+ // tpg_channel_map: s.enum( "TPGChannelMap", ["VDColdboxChannelMap", "ProtoDUNESP1ChannelMap", "PD2HDChannelMap", "HDColdboxChannelMap"]),
+ // dqm_channel_map: s.enum( "DQMChannelMap", ['HD', 'VD', 'PD2HD', 'HDCB']),
+ // dqm_params: s.sequence( "DQMParams", self.count, doc="Parameters for DQM (fixme)"),
+ // tc_types: s.sequence( "TCTypes", self.count, doc="List of TC types"),
+ // tc_type: s.number( "TCType", "i4", nc(minimum=0, maximum=9), doc="Number representing TC type. Currently ranging from 0 to 9"),
+ // tc_interval: s.number( "TCInterval", "i8", nc(minimum=1, maximum=30000000000), doc="The intervals between TCs that are inserted into MLT by CTCM, in clock ticks"),
+ // tc_intervals: s.sequence( "TCIntervals", self.tc_interval, doc="List of TC intervals used by CTCM"),
+ // readout_time: s.number( "ROTime", "i8", doc="A readout time in ticks"),
+ // channel_list: s.sequence( "ChannelList", self.count, doc="List of offline channels to be masked out from the TPHandler"),
+ // tpg_algo_choice: s.enum( "TPGAlgoChoice", ["SimpleThreshold", "AbsRS"], doc="Trigger algorithm choice"),
+ // pm_choice: s.enum( "PMChoice", ["k8s", "ssh"], doc="Process Manager choice: ssh or Kubernetes"),
+ // rte_choice: s.enum( "RTEChoice", ["auto", "release", "devarea"], doc="Kubernetes DAQ application RTE choice"),
+
- dqm: s.record("dqm", [
- s.field('enable_dqm', self.flag, default=false, doc="Enable Data Quality Monitoring"),
- s.field('impl', self.monitoring_dest, default='local', doc="DQM destination (Kafka used for cern and pocket)"),
- s.field('cmap', self.dqm_channel_map, default='HD', doc="Which channel map to use for DQM"),
- s.field('host_dqm', self.hosts, default=['localhost'], doc='Host(s) to run the DQM app on'),
- s.field('raw_params', self.dqm_params, default=[60, 50], doc="Parameters that control the data sent for the raw display plot"),
- s.field('std_params', self.dqm_params, default=[10, 1000], doc="Parameters that control the data sent for the mean/rms plot"),
- s.field('rms_params', self.dqm_params, default=[0, 1000], doc="Parameters that control the data sent for the mean/rms plot"),
- s.field('fourier_channel_params', self.dqm_params, default=[0, 0], doc="Parameters that control the data sent for the fourier transform plot"),
- s.field('fourier_plane_params', self.dqm_params, default=[600, 1000], doc="Parameters that control the data sent for the summed fourier transform plot"),
- s.field('df_rate', self.count, default=10, doc='How many seconds between requests to DF for Trigger Records'),
- s.field('df_algs', self.string, default='raw std fourier_plane', doc='Algorithms to be run on Trigger Records from DF (use quotes)'),
- s.field('max_num_frames', self.count, default=32768, doc='Maximum number of frames to use in the algorithms'),
- s.field('kafka_address', self.string, default='', doc='kafka address used to send messages'),
- s.field('kafka_topic', self.string, default='DQM', doc='kafka topic used to send messages'),
+ ctb_hsi: s.record("ctb_hsi", [
+ # ctb options
+ s.field( "use_ctb_hsi", types.flag, default=false, doc='Flag to control whether CTB HSI config is generated. Default is false'),
+ s.field( "host_ctb_hsi", types.host, default='localhost', doc='Host to run the HSI app on'),
+ s.field( "hlt_triggers", ctbmodule.Hlt_trigger_seq, []),
+ s.field( "beam_llt_triggers", ctbmodule.Llt_mask_trigger_seq, []),
+ s.field( "crt_llt_triggers", ctbmodule.Llt_count_trigger_seq, []),
+ s.field( "pds_llt_triggers", ctbmodule.Llt_count_trigger_seq, []),
+ s.field( "fake_trig_1", ctbmodule.Randomtrigger, ctbmodule.Randomtrigger),
+ s.field( "fake_trig_2", ctbmodule.Randomtrigger, ctbmodule.Randomtrigger)
]),
- dpdk_sender: s.record("dpdk_sender", [
- s.field( "enable_dpdk_sender", self.flag, default=false, doc="Enable sending frames using DPDK"),
- s.field( "host_dpdk_sender", self.hosts, default=['np04-srv-021'], doc="Which host to use to send frames"),
- s.field( "eal_args", self.string, default='-l 0-1 -n 3 -- -m [0:1].0 -j', doc='Args passed to the EAL in DPDK'),
- ]),
daqconf_multiru_gen: s.record('daqconf_multiru_gen', [
- s.field('boot', self.boot, default=self.boot, doc='Boot parameters'),
- s.field('dataflow', self.dataflow, default=self.dataflow, doc='Dataflow paramaters'),
- s.field('dqm', self.dqm, default=self.dqm, doc='DQM parameters'),
- s.field('hsi', self.hsi, default=self.hsi, doc='HSI parameters'),
- s.field('ctb_hsi', self.ctb_hsi, default=self.ctb_hsi, doc='CTB parameters'),
- s.field('readout', self.readout, default=self.readout, doc='Readout parameters'),
- s.field('timing', self.timing, default=self.timing, doc='Timing parameters'),
- s.field('trigger', self.trigger, default=self.trigger, doc='Trigger parameters'),
- s.field('dpdk_sender', self.dpdk_sender, default=self.dpdk_sender, doc='DPDK sender parameters'),
+ s.field('detector', detectorgen.detector, default=detectorgen.detector, doc='Boot parameters'),
+ s.field('daq_common', daqcommongen.daq_common, default=daqcommongen.daq_common, doc='DAQ common parameters'),
+ s.field('boot', bootgen.boot, default=bootgen.boot, doc='Boot parameters'),
+ s.field('dataflow', dataflowgen.dataflow, default=dataflowgen.dataflow, doc='Dataflow paramaters'),
+ s.field('dqm', dqmgen.dqm, default=dqmgen.dqm, doc='DQM parameters'),
+ s.field('hsi', hsigen.hsi, default=hsigen.hsi, doc='HSI parameters'),
+ s.field('ctb_hsi', self.ctb_hsi, default=self.ctb_hsi, doc='CTB parameters'),
+ s.field('readout', readoutgen.readout, default=readoutgen.readout, doc='Readout parameters'),
+ s.field('timing', timinggen.timing, default=timinggen.timing, doc='Timing parameters'),
+ s.field('trigger', triggergen.trigger, default=triggergen.trigger, doc='Trigger parameters')
+ // s.field('dpdk_sender', self.dpdk_sender, default=self.dpdk_sender, doc='DPDK sender parameters'),
]),
};
// Output a topologically sorted array.
-sctb + moo.oschema.sort_select(cs)
+stypes + sboot + sdetector + sdaqcommon + stiming + shsi + sreadout + strigger + sdataflow + sdqm + sctb + moo.oschema.sort_select(cs)
diff --git a/schema/daqconf/daqcommongen.jsonnet b/schema/daqconf/daqcommongen.jsonnet
new file mode 100644
index 00000000..8eec3200
--- /dev/null
+++ b/schema/daqconf/daqcommongen.jsonnet
@@ -0,0 +1,22 @@
+// This is the configuration schema for daqconf_multiru_gen
+//
+
+local moo = import "moo.jsonnet";
+
+local stypes = import "daqconf/types.jsonnet";
+local types = moo.oschema.hier(stypes).dunedaq.daqconf.types;
+
+local s = moo.oschema.schema("dunedaq.daqconf.daqcommongen");
+local nc = moo.oschema.numeric_constraints;
+// A temporary schema construction context.
+local cs = {
+
+ daq_common : s.record("daq_common", [
+ s.field( "data_request_timeout_ms", types.count, default=1000, doc="The baseline data request timeout that will be used by modules in the Readout and Trigger subsystems (i.e. any module that produces data fragments). Downstream timeouts, such as the trigger-record-building timeout, are derived from this."),
+ s.field( "use_data_network", types.flag, default = false, doc="Whether to use the data network (Won't work with k8s)"),
+ s.field( "data_rate_slowdown_factor",types.count, default=1, doc="Factor by which to suppress data generation. Former -s"),
+ ], doc="Common daq_common settings"),
+
+};
+
+stypes + moo.oschema.sort_select(cs)
diff --git a/schema/daqconf/dataflowgen.jsonnet b/schema/daqconf/dataflowgen.jsonnet
new file mode 100644
index 00000000..95a9123b
--- /dev/null
+++ b/schema/daqconf/dataflowgen.jsonnet
@@ -0,0 +1,40 @@
+// This is the configuration schema for daqconf_multiru_gen
+//
+
+local moo = import "moo.jsonnet";
+
+local stypes = import "daqconf/types.jsonnet";
+local types = moo.oschema.hier(stypes).dunedaq.daqconf.types;
+
+local s = moo.oschema.schema("dunedaq.daqconf.dataflowgen");
+local nc = moo.oschema.numeric_constraints;
+
+local cs = {
+
+ dataflowapp: s.record("dataflowapp",[
+ s.field( "app_name", types.string, default="dataflow0"),
+ s.field( "output_paths",types.paths, default=['.'], doc="Location(s) for the dataflow app to write data. Former -o"),
+ s.field( "host_df", types.host, default='localhost'),
+ s.field( "max_file_size",types.count, default=4*1024*1024*1024, doc="The size threshold when raw data files are closed (in bytes)"),
+ s.field( "data_store_mode", types.string, default="all-per-file", doc="all-per-file or one-event-per-file"),
+ s.field( "max_trigger_record_window",types.count, default=0, doc="The maximum size for the window of data that will included in a single TriggerRecord (in ticks). Readout windows that are longer than this size will result in TriggerRecords being split into a sequence of TRs. A zero value for this parameter means no splitting."),
+
+ ], doc="Element of the dataflow.apps array"),
+
+ dataflowapps: s.sequence("dataflowapps", self.dataflowapp, doc="List of dataflowapp instances"),
+
+ dataflow: s.record("dataflow", [
+ s.field( "host_dfo", types.host, default='localhost', doc="Sets the host for the DFO app"),
+ s.field( "apps", self.dataflowapps, default=[], doc="Configuration for the dataflow apps (see dataflowapp for options)"),
+ s.field( "token_count",types.count, default=10, doc="Number of tokens the dataflow apps give to the DFO. Former -c"),
+ // Trigger
+ s.field( "host_tpw", types.host, default='localhost', doc='Host to run the TPWriter app on'),
+ s.field( "enable_tpset_writing", types.flag, default=false, doc="Enable the writing of TPs to disk (only works with enable_tpg or enable_firmware_tpg)"),
+ s.field( "tpset_output_path", types.path,default='.', doc="Output directory for TPSet stream files"),
+ s.field( "tpset_output_file_size",types.count, default=4*1024*1024*1024, doc="The size threshold when TPSet stream files are closed (in bytes)"),
+ ]),
+
+};
+
+
+stypes + moo.oschema.sort_select(cs)
diff --git a/schema/daqconf/detectorgen.jsonnet b/schema/daqconf/detectorgen.jsonnet
new file mode 100644
index 00000000..f63d0e6f
--- /dev/null
+++ b/schema/daqconf/detectorgen.jsonnet
@@ -0,0 +1,25 @@
+// This is the configuration schema for daqconf_multiru_gen
+//
+
+local moo = import "moo.jsonnet";
+
+local stypes = import "daqconf/types.jsonnet";
+local types = moo.oschema.hier(stypes).dunedaq.daqconf.types;
+
+local s = moo.oschema.schema("dunedaq.daqconf.detectorgen");
+local nc = moo.oschema.numeric_constraints;
+// A temporary schema construction context.
+local cs = {
+
+ tpc_channel_map: s.enum("TPCChannelMap", ["VDColdboxChannelMap", "ProtoDUNESP1ChannelMap", "PD2HDChannelMap", "HDColdboxChannelMap"]),
+
+ detector : s.record("detector", [
+ s.field( "op_env", types.string, default='swtest', doc="Operational environment - used for HDF5 Attribute inside the files"),
+ s.field( "clock_speed_hz", types.freq, default=62500000),
+ s.field( "tpc_channel_map", self.tpc_channel_map, default="PD2HDChannelMap", doc="Channel map for TPG"),
+ ], doc="Global common settings"),
+
+
+};
+
+stypes + moo.oschema.sort_select(cs)
diff --git a/schema/daqconf/detreadoutmap.jsonnet b/schema/daqconf/detreadoutmap.jsonnet
index 6af7f76d..23841fd6 100644
--- a/schema/daqconf/detreadoutmap.jsonnet
+++ b/schema/daqconf/detreadoutmap.jsonnet
@@ -36,15 +36,15 @@ local cs = {
], doc="A FELIX readout stream configuration"),
eth_conf: s.record("EthStreamParameters", [
- s.field("protocol", self.eth_protocol, "udp", doc="Ethernet protocol"),
+ s.field("protocol", self.eth_protocol, "udp", doc="Ethernet protocol used. udp or zmq"),
s.field("mode", self.mode, "fix_rate", doc="fix_rate, var_rate"),
s.field("rx_iface", self.short, 0, doc="Reaout interface"),
- s.field("rx_host", self.host, "localhost", doc="Reaout hostname"),
- s.field("rx_mac", self.mac, "00:00:00:00:00:00", doc="Reaout Destination MAC"),
- s.field("rx_ip", self.ipv4, "0.0.0.0", doc="Reaout Destination IP"),
- s.field("tx_host", self.host, "localhost", doc="Transmitter hostname"),
- s.field("tx_mac", self.mac, "00:00:00:00:00:00", doc="Reaout Source MAC"),
- s.field("tx_ip", self.ipv4, "0.0.0.0", doc="Reaout Source IP"),
+ s.field("rx_host", self.host, "localhost", doc="Readout hostname"),
+ s.field("rx_mac", self.mac, "00:00:00:00:00:00", doc="Destination MAC on readout host"),
+ s.field("rx_ip", self.ipv4, "0.0.0.0", doc="Destination IP on readout host"),
+ s.field("tx_host", self.host, "localhost", doc="Transmitter control host"),
+ s.field("tx_mac", self.mac, "00:00:00:00:00:00", doc="Transmitter MAC"),
+ s.field("tx_ip", self.ipv4, "0.0.0.0", doc="Transmitter IP"),
], doc="A Ethernet readout stream configuration"),
diff --git a/schema/daqconf/dqmgen.jsonnet b/schema/daqconf/dqmgen.jsonnet
new file mode 100644
index 00000000..808b7879
--- /dev/null
+++ b/schema/daqconf/dqmgen.jsonnet
@@ -0,0 +1,35 @@
+// This is the configuration schema for daqconf_multiru_gen
+//
+
+local moo = import "moo.jsonnet";
+
+local stypes = import "daqconf/types.jsonnet";
+local types = moo.oschema.hier(stypes).dunedaq.daqconf.types;
+
+local s = moo.oschema.schema("dunedaq.daqconf.dqmgen");
+local nc = moo.oschema.numeric_constraints;
+
+local cs = {
+ monitoring_dest: s.enum( "MonitoringDest", ["local", "cern", "pocket"]),
+ dqm_channel_map: s.enum( "DQMChannelMap", ['HD', 'VD', 'PD2HD', 'HDCB']),
+ dqm_params: s.sequence( "DQMParams", types.count, doc="Parameters for DQM (fixme)"),
+
+ dqm: s.record("dqm", [
+ s.field('enable_dqm', types.flag, default=false, doc="Enable Data Quality Monitoring"),
+ s.field('impl', self.monitoring_dest, default='local', doc="DQM destination (Kafka used for cern and pocket)"),
+ s.field('cmap', self.dqm_channel_map, default='HD', doc="Which channel map to use for DQM"),
+ s.field('host_dqm', types.hosts, default=['localhost'], doc='Host(s) to run the DQM app on'),
+ s.field('raw_params', self.dqm_params, default=[60, 50], doc="Parameters that control the data sent for the raw display plot"),
+ s.field('std_params', self.dqm_params, default=[10, 1000], doc="Parameters that control the data sent for the mean/rms plot"),
+ s.field('rms_params', self.dqm_params, default=[0, 1000], doc="Parameters that control the data sent for the mean/rms plot"),
+ s.field('fourier_channel_params', self.dqm_params, default=[0, 0], doc="Parameters that control the data sent for the fourier transform plot"),
+ s.field('fourier_plane_params', self.dqm_params, default=[600, 1000], doc="Parameters that control the data sent for the summed fourier transform plot"),
+ s.field('df_rate', types.count, default=10, doc='How many seconds between requests to DF for Trigger Records'),
+ s.field('df_algs', types.string, default='raw std fourier_plane', doc='Algorithms to be run on Trigger Records from DF (use quotes)'),
+ s.field('max_num_frames', types.count, default=32768, doc='Maximum number of frames to use in the algorithms'),
+ s.field('kafka_address', types.string, default='', doc='kafka address used to send messages'),
+ s.field('kafka_topic', types.string, default='DQM', doc='kafka topic used to send messages'),
+ ]),
+};
+
+stypes + moo.oschema.sort_select(cs)
diff --git a/schema/daqconf/hsigen.jsonnet b/schema/daqconf/hsigen.jsonnet
new file mode 100644
index 00000000..3e851747
--- /dev/null
+++ b/schema/daqconf/hsigen.jsonnet
@@ -0,0 +1,42 @@
+// This is the configuration schema for daqconf_multiru_gen
+//
+
+local moo = import "moo.jsonnet";
+
+local stypes = import "daqconf/types.jsonnet";
+local types = moo.oschema.hier(stypes).dunedaq.daqconf.types;
+
+local s = moo.oschema.schema("dunedaq.daqconf.hsigen");
+local nc = moo.oschema.numeric_constraints;
+
+local cs = {
+
+ hsi: s.record("hsi", [
+ s.field( "random_trigger_rate_hz", types.rate, default=1.0, doc='Fake HSI only: rate at which fake HSIEvents are sent. 0 - disable HSIEvent generation. Former -t'),
+ # timing hsi options
+ s.field( "use_timing_hsi", types.flag, default=false, doc='Flag to control whether real hardware timing HSI config is generated. Default is false'),
+ s.field( "host_timing_hsi", types.host, default='localhost', doc='Host to run the HSI app on'),
+ s.field( "hsi_hw_connections_file", types.path, default="${TIMING_SHARE}/config/etc/connections.xml", doc='Real timing hardware only: path to hardware connections file'),
+ s.field( "enable_hardware_state_recovery", types.flag, default=true, doc="Enable (or not) hardware state recovery"),
+ s.field( "hsi_device_name", types.string, default="", doc='Real HSI hardware only: device name of HSI hw'),
+ s.field( "hsi_readout_period", types.count, default=1e3, doc='Real HSI hardware only: Period between HSI hardware polling [us]'),
+ s.field( "control_hsi_hw", types.flag, default=false, doc='Flag to control whether we are controlling hsi hardware'),
+ s.field( "hsi_endpoint_address", types.count, default=1, doc='Timing address of HSI endpoint'),
+ s.field( "hsi_endpoint_partition", types.count, default=0, doc='Timing partition of HSI endpoint'),
+ s.field( "hsi_re_mask",types.count, default=0, doc='Rising-edge trigger mask'),
+ s.field( "hsi_fe_mask", types.count, default=0, doc='Falling-edge trigger mask'),
+ s.field( "hsi_inv_mask",types.count, default=0, doc='Invert-edge mask'),
+ s.field( "hsi_source",types.count, default=1, doc='HSI signal source; 0 - hardware, 1 - emulation (trigger timestamp bits)'),
+ # fake hsi options
+ s.field( "use_fake_hsi", types.flag, default=true, doc='Flag to control whether fake or real hardware HSI config is generated. Default is true'),
+ s.field( "host_fake_hsi", types.host, default='localhost', doc='Host to run the HSI app on'),
+ s.field( "hsi_device_id", types.count, default=0, doc='Fake HSI only: device ID of fake HSIEvents'),
+ s.field( "mean_hsi_signal_multiplicity", types.count, default=1, doc='Fake HSI only: rate of individual HSI signals in emulation mode 1'),
+ s.field( "hsi_signal_emulation_mode", types.count, default=0, doc='Fake HSI only: HSI signal emulation mode'),
+ s.field( "enabled_hsi_signals", types.count, default=1, doc='Fake HSI only: bit mask of enabled fake HSI signals')
+ ]),
+
+};
+
+
+stypes + moo.oschema.sort_select(cs)
diff --git a/schema/daqconf/readoutgen.jsonnet b/schema/daqconf/readoutgen.jsonnet
new file mode 100644
index 00000000..2ab79db5
--- /dev/null
+++ b/schema/daqconf/readoutgen.jsonnet
@@ -0,0 +1,84 @@
+// This is the configuration schema for daqconf_multiru_gen
+//
+
+local moo = import "moo.jsonnet";
+
+local stypes = import "daqconf/types.jsonnet";
+local types = moo.oschema.hier(stypes).dunedaq.daqconf.types;
+
+local s = moo.oschema.schema("dunedaq.daqconf.readoutgen");
+local nc = moo.oschema.numeric_constraints;
+// A temporary schema construction context.
+local cs = {
+
+ id_list: s.sequence( "IDList", types.count, doc="List of Ids"),
+
+ data_file_entry: s.record("data_file_entry", [
+ s.field( "data_file", types.path, default='./frames.bin', doc="File containing data frames to be replayed by the fake cards. Former -d. Uses the asset manager, can also be 'asset://checksum/somelonghash', or 'file://somewhere/frames.bin' or 'frames.bin'"),
+ s.field( "detector_id", types.count, default=3, doc="Detector ID that this file applies to"),
+ ]),
+ data_files: s.sequence("data_files", self.data_file_entry),
+
+ numa_exception: s.record( "NUMAException", [
+ s.field( "host", types.host, default='localhost', doc="Host of exception"),
+ s.field( "card", types.count, default=0, doc="Card ID of exception"),
+ s.field( "numa_id", types.count, default=0, doc="NUMA ID of exception"),
+ s.field( "felix_card_id", types.count, default=-1, doc="CARD ID override, -1 indicates no override"),
+ s.field( "latency_buffer_numa_aware", types.flag, default=false, doc="Enable NUMA-aware mode for the Latency Buffer"),
+ s.field( "latency_buffer_preallocation", types.flag, default=false, doc="Enable Latency Buffer preallocation"),
+ ], doc="Exception to the default NUMA ID for FELIX cards"),
+
+ numa_exceptions: s.sequence( "NUMAExceptions", self.numa_exception, doc="Exceptions to the default NUMA ID"),
+
+ numa_config: s.record("numa_config", [
+ s.field( "default_id", types.count, default=0, doc="Default NUMA ID for FELIX cards"),
+ s.field( "default_latency_numa_aware", types.flag, default=false, doc="Default for Latency Buffer NUMA awareness"),
+ s.field( "default_latency_preallocation", types.flag, default=false, doc="Default for Latency Buffer Preallocation"),
+ s.field( "exceptions", self.numa_exceptions, default=[], doc="Exceptions to the default NUMA ID"),
+ ]),
+
+ dpdk_lcore_exception: s.record( "DPDKLCoreException", [
+ s.field( "host", types.host, default='localhost', doc="Host of exception"),
+ s.field( "iface", types.count, default=0, doc="Card ID of exception"),
+ s.field( "lcore_id_set", self.id_list, default=[], doc='List of IDs per core'),
+ ]),
+ dpdk_lcore_exceptions: s.sequence( "DPDKLCoreExceptions", self.dpdk_lcore_exception, doc="Exceptions to the default LCore config"),
+
+ dpdk_lcore_config: s.record("DPDKLCoreConfig", [
+ s.field( "default_lcore_id_set", self.id_list, default=[1,2,3,4], doc='List of IDs per core'),
+ s.field( "exceptions", self.dpdk_lcore_exceptions, default=[], doc="Exceptions to the default NUMA ID"),
+ ]),
+
+ readout: s.record("readout", [
+ s.field( "detector_readout_map_file", types.path, default='./DetectorReadoutMap.json', doc="File containing detector hardware map for configuration to run"),
+ s.field( "use_fake_data_producers", types.flag, default=false, doc="Use fake data producers that respond with empty fragments immediately instead of (fake) cards and DLHs"),
+ // s.field( "memory_limit_gb", types.count, default=64, doc="Application memory limit in GB")
+ // Fake cards
+ s.field( "use_fake_cards", types.flag, default=false, doc="Use fake cards"),
+ s.field( "emulated_data_times_start_with_now", types.flag, default=false, doc="If active, the timestamp of the first emulated data frame is set to the current wallclock time"),
+ s.field( "default_data_file", types.path, default='asset://?label=ProtoWIB&subsystem=readout', doc="File containing data frames to be replayed by the fake cards. Former -d. Uses the asset manager, can also be 'asset://?checksum=somelonghash', or 'file://somewhere/frames.bin' or 'frames.bin'"),
+ s.field( "data_files", self.data_files, default=[], doc="Files to use by detector type"),
+ // DPDK
+ s.field( "dpdk_eal_args", types.string, default='-l 0-1 -n 3 -- -m [0:1].0 -j', doc='Args passed to the EAL in DPDK'),
+ // s.field( "dpdk_rxqueues_per_lcore", types.count, default=1, doc='Number of rx queues per core'),
+ // s.field( "dpdk_lcore_id_set", self.id_list, default=1, doc='List of IDs per core'),
+ s.field( "dpdk_lcores_config", self.dpdk_lcore_config, default=self.dpdk_lcore_config, doc='Configuration of DPDK LCore IDs'),
+ // FLX
+ s.field( "numa_config", self.numa_config, default=self.numa_config, doc='Configuration of FELIX NUMA IDs'),
+ // DLH
+ s.field( "emulator_mode", types.flag, default=false, doc="If active, timestamps of data frames are overwritten when processed by the readout. This is necessary if the felix card does not set correct timestamps. Former -e"),
+ s.field( "thread_pinning_file", types.path, default="", doc="A thread pinning configuration file that gets executed after conf."),
+ // s.field( "data_rate_slowdown_factor",types.count, default=1, doc="Factor by which to suppress data generation. Former -s"),
+ s.field( "latency_buffer_size", types.count, default=499968, doc="Size of the latency buffers (in number of elements)"),
+ s.field( "fragment_send_timeout_ms", types.count, default=10, doc="The send timeout that will be used in the readout modules when sending fragments downstream (i.e. to the TRB)."),
+ s.field( "enable_tpg", types.flag, default=false, doc="Enable TPG"),
+ s.field( "tpg_threshold", types.count, default=120, doc="Select TPG threshold"),
+ s.field( "tpg_algorithm", types.string, default="SimpleThreshold", doc="Select TPG algorithm (SimpleThreshold, AbsRS)"),
+ s.field( "tpg_channel_mask", self.id_list, default=[], doc="List of offline channels to be masked out from the TPHandler"),
+ s.field( "enable_raw_recording", types.flag, default=false, doc="Add queues and modules necessary for the record command"),
+ s.field( "raw_recording_output_dir", types.path, default='.', doc="Output directory where recorded data is written to. Data for each link is written to a separate file")
+ ]),
+
+};
+
+stypes + moo.oschema.sort_select(cs)
diff --git a/schema/daqconf/timinggen.jsonnet b/schema/daqconf/timinggen.jsonnet
new file mode 100644
index 00000000..547a488d
--- /dev/null
+++ b/schema/daqconf/timinggen.jsonnet
@@ -0,0 +1,28 @@
+// This is the configuration schema for daqconf_multiru_gen
+//
+
+local moo = import "moo.jsonnet";
+
+local stypes = import "daqconf/types.jsonnet";
+local types = moo.oschema.hier(stypes).dunedaq.daqconf.types;
+
+local s = moo.oschema.schema("dunedaq.daqconf.timinggen");
+local nc = moo.oschema.numeric_constraints;
+
+local cs = {
+
+ timing: s.record("timing", [
+ s.field( "timing_session_name", types.string, default="", doc="Name of the global timing session to use, for timing commands"),
+ s.field( "host_tprtc", types.host, default='localhost', doc='Host to run the timing partition controller app on'),
+ # timing hw partition options
+ s.field( "control_timing_partition", types.flag, default=false, doc='Flag to control whether we are controlling timing partition in master hardware'),
+ s.field( "timing_partition_master_device_name", types.string, default="", doc='Timing partition master hardware device name'),
+ s.field( "timing_partition_id", types.count, default=0, doc='Timing partition id'),
+ s.field( "timing_partition_trigger_mask", types.count, default=255, doc='Timing partition trigger mask'),
+ s.field( "timing_partition_rate_control_enabled", types.flag, default=false, doc='Timing partition rate control enabled'),
+ s.field( "timing_partition_spill_gate_enabled", types.flag, default=false, doc='Timing partition spill gate enabled'),
+ ]),
+};
+
+
+stypes + moo.oschema.sort_select(cs)
diff --git a/schema/daqconf/triggergen.jsonnet b/schema/daqconf/triggergen.jsonnet
new file mode 100644
index 00000000..c5bf25cc
--- /dev/null
+++ b/schema/daqconf/triggergen.jsonnet
@@ -0,0 +1,135 @@
+// This is the configuration schema for daqconf_multiru_gen
+//
+
+local moo = import "moo.jsonnet";
+
+local stypes = import "daqconf/types.jsonnet";
+local types = moo.oschema.hier(stypes).dunedaq.daqconf.types;
+
+local s = moo.oschema.schema("dunedaq.daqconf.triggergen");
+local nc = moo.oschema.numeric_constraints;
+// A temporary schema construction context.
+local cs = {
+ tc_type: s.number( "TCType", "i4", nc(minimum=0, maximum=9), doc="Number representing TC type. Currently ranging from 0 to 9"),
+ tc_types: s.sequence( "TCTypes", self.tc_type, doc="List of TC types"),
+ tc_interval: s.number( "TCInterval", "i8", nc(minimum=1, maximum=30000000000), doc="The intervals between TCs that are inserted into MLT by CTCM, in clock ticks"),
+ tc_intervals: s.sequence( "TCIntervals", self.tc_interval, doc="List of TC intervals used by CTCM"),
+ readout_time: s.number( "ROTime", "i8", doc="A readout time in ticks"),
+ bitword: s.string( "Bitword", doc="A string representing the TC type name, to be set in the trigger bitword."),
+ bitword_list: s.sequence( "BitwordList", self.bitword, doc="A sequence of bitword (TC type bits) forming a bitword."),
+ bitwords: s.sequence( "Bitwords", self.bitword_list, doc="List of bitwords to use when forming trigger decisions in MLT" ),
+
+ trigger_algo_config: s.record("trigger_algo_config", [
+ s.field("prescale", types.count, default=100),
+ s.field("window_length", types.count, default=10000),
+ s.field("adjacency_threshold", types.count, default=6),
+ s.field("adj_tolerance", types.count, default=4),
+ s.field("trigger_on_adc", types.flag, default=false),
+ s.field("trigger_on_n_channels", types.flag, default=false),
+ s.field("trigger_on_adjacency", types.flag, default=true),
+ s.field("adc_threshold", types.count, default=10000),
+ s.field("n_channels_threshold", types.count, default=8),
+ s.field("print_tp_info", types.flag, default=false),
+ ]),
+
+ c0_readout: s.record("c0_readout", [
+ s.field("candidate_type", self.tc_type, default=0, doc="The TC type, 0=Unknown"),
+ s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
+ s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
+ ]),
+ c1_readout: s.record("c1_readout", [
+ s.field("candidate_type", self.tc_type, default=1, doc="The TC type, 1=Timing"),
+ s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
+ s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
+ ]),
+ c2_readout: s.record("c2_readout", [
+ s.field("candidate_type", self.tc_type, default=2, doc="The TC type, 2=TPCLowE"),
+ s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
+ s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
+ ]),
+ c3_readout: s.record("c3_readout", [
+ s.field("candidate_type", self.tc_type, default=3, doc="The TC type, 3=Supernova"),
+ s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
+ s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
+ ]),
+ c4_readout: s.record("c4_readout", [
+ s.field("candidate_type", self.tc_type, default=4, doc="The TC type, 4=Random"),
+ s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
+ s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
+ ]),
+ c5_readout: s.record("c5_readout", [
+ s.field("candidate_type", self.tc_type, default=5, doc="The TC type, 5=Prescale"),
+ s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
+ s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
+ ]),
+ c6_readout: s.record("c6_readout", [
+ s.field("candidate_type", self.tc_type, default=6, doc="The TC type, 6=ADCSimpleWindow"),
+ s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
+ s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
+ ]),
+ c7_readout: s.record("c7_readout", [
+ s.field("candidate_type", self.tc_type, default=7, doc="The TC type, 7=HorizontalMuon"),
+ s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
+ s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
+ ]),
+ c8_readout: s.record("c8_readout", [
+ s.field("candidate_type", self.tc_type, default=8, doc="The TC type, 8=MichelElectron"),
+ s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
+ s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
+ ]),
+ c9_readout: s.record("c9_readout", [
+ s.field("candidate_type", self.tc_type, default=9, doc="The TC type, 9=LowEnergyEvent"),
+ s.field("time_before", self.readout_time, default=1000, doc="Time to readout before TC time [ticks]"),
+ s.field("time_after", self.readout_time, default=1001, doc="Time to readout after TC time [ticks]"),
+ ]),
+
+ tc_readout_map: s.record("tc_readout_map", [
+ s.field("c0", self.c0_readout, default=self.c0_readout, doc="TC readout for TC type 0"),
+ s.field("c1", self.c1_readout, default=self.c1_readout, doc="TC readout for TC type 1"),
+ s.field("c2", self.c2_readout, default=self.c2_readout, doc="TC readout for TC type 2"),
+ s.field("c3", self.c3_readout, default=self.c3_readout, doc="TC readout for TC type 3"),
+ s.field("c4", self.c4_readout, default=self.c4_readout, doc="TC readout for TC type 4"),
+ s.field("c5", self.c5_readout, default=self.c5_readout, doc="TC readout for TC type 5"),
+ s.field("c6", self.c6_readout, default=self.c6_readout, doc="TC readout for TC type 6"),
+ s.field("c7", self.c7_readout, default=self.c7_readout, doc="TC readout for TC type 7"),
+ s.field("c8", self.c8_readout, default=self.c8_readout, doc="TC readout for TC type 8"),
+ s.field("c9", self.c9_readout, default=self.c9_readout, doc="TC readout for TC type 9"),
+ ]),
+
+ trigger: s.record("trigger",[
+ // s.field( "trigger_rate_hz", types.rate, default=1.0, doc='Fake HSI only: rate at which fake HSIEvents are sent. 0 - disable HSIEvent generation. Former -t'),
+ s.field( "trigger_window_before_ticks",types.count, default=1000, doc="Trigger window before marker. Former -b"),
+ s.field( "trigger_window_after_ticks", types.count, default=1000, doc="Trigger window after marker. Former -a"),
+ s.field( "host_trigger", types.host, default='localhost', doc='Host to run the trigger app on'),
+ // s.field( "host_tpw", types.host, default='localhost', doc='Host to run the TPWriter app on'),
+ # trigger options
+ s.field( "completeness_tolerance", types.count, default=1, doc="Maximum number of inactive queues we will tolerate."),
+ s.field( "tolerate_incompleteness", types.flag, default=false, doc="Flag to tell trigger to tolerate inactive queues."),
+ s.field( "ttcm_s1", types.count,default=1, doc="Timing trigger candidate maker accepted HSI signal ID 1"),
+ s.field( "ttcm_s2", types.count, default=2, doc="Timing trigger candidate maker accepted HSI signal ID 2"),
+ s.field( "trigger_activity_plugin", types.string, default='TriggerActivityMakerPrescalePlugin', doc="Trigger activity algorithm plugin"),
+ s.field( "trigger_activity_config", self.trigger_algo_config, default=self.trigger_algo_config,doc="Trigger activity algorithm config (string containing python dictionary)"),
+ s.field( "trigger_candidate_plugin", types.string, default='TriggerCandidateMakerPrescalePlugin', doc="Trigger candidate algorithm plugin"),
+ s.field( "trigger_candidate_config", self.trigger_algo_config, default=self.trigger_algo_config, doc="Trigger candidate algorithm config (string containing python dictionary)"),
+ s.field( "hsi_trigger_type_passthrough", types.flag, default=false, doc="Option to override trigger type in the MLT"),
+ // s.field( "enable_tpset_writing", types.flag, default=false, doc="Enable the writing of TPs to disk (only works with enable_tpg or enable_firmware_tpg)"),
+ // s.field( "tpset_output_path", types.path,default='.', doc="Output directory for TPSet stream files"),
+ // s.field( "tpset_output_file_size",types.count, default=4*1024*1024*1024, doc="The size threshold when TPSet stream files are closed (in bytes)"),
+ // s.field( "tpg_channel_map", self.tpg_channel_map, default="ProtoDUNESP1ChannelMap", doc="Channel map for TPG"),
+ s.field( "mlt_merge_overlapping_tcs", types.flag, default=true, doc="Option to turn off merging of overlapping TCs when forming TDs in MLT"),
+ s.field( "mlt_buffer_timeout", types.count, default=100, doc="Timeout (buffer) to wait for new overlapping TCs before sending TD"),
+ s.field( "mlt_send_timed_out_tds", types.flag, default=true, doc="Option to drop TD if TC comes out of timeout window"),
+ s.field( "mlt_max_td_length_ms", types.count, default=1000, doc="Maximum allowed time length [ms] for a readout window of a single TD"),
+ s.field( "mlt_ignore_tc", self.tc_types, default=[], doc="Optional list of TC types to be ignored in MLT"),
+ s.field( "mlt_use_readout_map", types.flag, default=false, doc="Option to use custom readout map in MLT"),
+ s.field( "mlt_td_readout_map", self.tc_readout_map, default=self.tc_readout_map, doc="The readout windows assigned to TDs in MLT, based on TC type."),
+ s.field( "mlt_use_bitwords", types.flag, default=false, doc="Option to use bitwords (ie trigger types, coincidences) when forming trigger decisions in MLT" ),
+ s.field( "mlt_trigger_bitwords", self.bitwords, default=[], doc="Optional dictionary of bitwords to use when forming trigger decisions in MLT" ),
+ s.field( "use_custom_maker", types.flag, default=false, doc="Option to use a Custom Trigger Candidate Maker (plugin)"),
+ s.field( "ctcm_trigger_types", self.tc_types, default=[4], doc="Optional list of TC types to be used by the Custom Trigger Candidate Maker (plugin)"),
+ s.field( "ctcm_trigger_intervals", self.tc_intervals, default=[10000000], doc="Optional list of intervals (clock ticks) for the TC types to be used by the Custom Trigger Candidate Maker (plugin)"),
+ ]),
+
+};
+
+stypes + moo.oschema.sort_select(cs)
diff --git a/schema/daqconf/types.jsonnet b/schema/daqconf/types.jsonnet
new file mode 100644
index 00000000..74c8ecf6
--- /dev/null
+++ b/schema/daqconf/types.jsonnet
@@ -0,0 +1,34 @@
+// This is the configuration schema for daqconf_multiru_gen
+//
+
+local moo = import "moo.jsonnet";
+
+local s = moo.oschema.schema("dunedaq.daqconf.types");
+local nc = moo.oschema.numeric_constraints;
+// A temporary schema construction context.
+
+local cs = {
+ int4 : s.number( "int4", "i4", doc="A signed integer of 4 bytes"),
+ uint4 : s.number( "uint4", "u4", doc="An unsigned integer of 4 bytes"),
+ int8 : s.number( "int8", "i8", doc="A signed integer of 8 bytes"),
+ uint8 : s.number( "uint8", "u8", doc="An unsigned integer of 8 bytes"),
+ float4 : s.number( "float4", "f4", doc="A float of 4 bytes"),
+ double8 : s.number( "double8", "f8", doc="A double of 8 bytes"),
+
+ port: s.number( "port", "i4", doc="A TCP/IP port number"),
+ freq: s.number( "freq", "u4", doc="A frequency"),
+ rate: s.number( "rate", "f8", doc="A rate as a double"),
+ count: s.number( "count", "i8", doc="A count of things"),
+ flag: s.boolean( "flag", doc="Parameter that can be used to enable or disable functionality"),
+ path: s.string( "path", doc="Location on a filesystem"),
+ paths: s.sequence( "paths", self.path, doc="Multiple paths"),
+ string: s.string( "string", doc="Generic string"),
+ strings:s.sequence( "strings", self.string, doc="List of strings"),
+ host: s.string( "host", moo.re.dnshost, doc="A hostname"),
+ hosts: s.sequence( "hosts", self.host, doc="A collection of host names"),
+ ipv4: s.string( "ipv4", pattern=moo.re.ipv4, doc="ipv4 string"),
+ mac: s.string( "mac", pattern="^[a-fA-F0-9]{2}(:[a-fA-F0-9]{2}){5}$", doc="mac string"),
+};
+
+// Output a topologically sorted array.
+moo.oschema.sort_select(cs)
diff --git a/scripts/daqconf_multiru_gen b/scripts/daqconf_multiru_gen
index ebf08f29..dd54318c 100755
--- a/scripts/daqconf_multiru_gen
+++ b/scripts/daqconf_multiru_gen
@@ -6,6 +6,11 @@ import os.path
from rich.console import Console
from os.path import exists, abspath, dirname, basename
from pathlib import Path
+
+from daqconf.core.console import console
+
+# console.log("daqconf - loading base modules")
+
from daqconf.core.system import System
from daqconf.core.metadata import write_metadata_file, write_config_file
from daqconf.core.sourceid import SourceIDBroker #, get_tpg_mode
@@ -15,41 +20,210 @@ from daqconf.core.assets import resolve_asset_file
from detdataformats import *
import daqconf.detreadoutmap as dromap
+# console.log("daqconf - base modules loaded")
-console = Console()
# Set moo schema search path
-from dunedaq.env import get_moo_model_path
-import moo.io
-moo.io.default_load_path = get_moo_model_path()
+# from dunedaq.env import get_moo_model_path
+# import moo.io
+# moo.io.default_load_path = get_moo_model_path()
# Load configuration types
-import moo.otypes
+# import moo.otypes
+
+# moo.otypes.load_types('detchannelmaps/hardwaremapservice.jsonnet')
+# import dunedaq.detchannelmaps.hardwaremapservice as hwms
+
+
+def expand_conf(config_data, debug=False):
+ """Expands the moo configuration record into sub-records,
+ re-casting its members into the corresponding moo objects.
+
+ Args:
+ config_data (_type_): Configuration object
+ debug (bool, optional): Enable verbose reports. Defaults to False.
+
+ Returns:
+ _type_: _description_
+ """
+
+ import dunedaq.daqconf.confgen as confgen
+ import dunedaq.daqconf.bootgen as bootgen
+ import dunedaq.daqconf.detectorgen as detectorgen
+ import dunedaq.daqconf.daqcommongen as daqcommongen
+ import dunedaq.daqconf.timinggen as timinggen
+ import dunedaq.daqconf.hsigen as hsigen
+ import dunedaq.daqconf.readoutgen as readoutgen
+ import dunedaq.daqconf.triggergen as triggergen
+ import dunedaq.daqconf.dataflowgen as dataflowgen
+ import dunedaq.daqconf.dqmgen as dqmgen
+
+ ## Hack, we shouldn't need to do that, in the future it should be, boot = config_data.boot
+ boot = bootgen.boot(**config_data.boot)
+ if debug: console.log(f"boot configuration object: {boot.pod()}")
+
+ detector = detectorgen.detector(**config_data.detector)
+ if debug: console.log(f"detector configuration object: {detector.pod()}")
+
+ daq_common = daqcommongen.daq_common(**config_data.daq_common)
+ if debug: console.log(f"daq_common configuration object: {daq_common.pod()}")
+
+ timing = timinggen.timing(**config_data.timing)
+ if debug: console.log(f"timing configuration object: {timing.pod()}")
+
+ hsi = hsigen.hsi(**config_data.hsi)
+ if debug: console.log(f"hsi configuration object: {hsi.pod()}")
+
+ ctb_hsi = confgen.ctb_hsi(**config_data.ctb_hsi)
+ if debug: console.log(f"ctb_hsi configuration object: {ctb_hsi.pod()}")
+
+ readout = readoutgen.readout(**config_data.readout)
+ if debug: console.log(f"readout configuration object: {readout.pod()}")
+
+ trigger = triggergen.trigger(**config_data.trigger)
+ if debug: console.log(f"trigger configuration object: {trigger.pod()}")
+
+ dataflow = dataflowgen.dataflow(**config_data.dataflow)
+ if debug: console.log(f"dataflow configuration object: {dataflow.pod()}")
+
+ dqm = dqmgen.dqm(**config_data.dqm)
+ if debug: console.log(f"dqm configuration object: {dqm.pod()}")
+
+ # dpdk_sender = confgen.dpdk_sender(**config_data.dpdk_sender)
+ # if debug: console.log(f"dpdk_sender configuration object: {dpdk_sender.pod()}")
+
+ return (
+ boot,
+ detector,
+ daq_common,
+ timing,
+ hsi,
+ ctb_hsi,
+ readout,
+ trigger,
+ dataflow,
+ dqm,
+ # dpdk_sender
+ )
+
+def validate_conf(boot, readout, dataflow, timing, hsi, dqm):
+ """Validate the consistency of confgen parameters
+
+ Args:
+ boot (_type_): _description_
+ readout (_type_): _description_
+ dataflow (_type_): _description_
+ timing (_type_): _description_
+ hsi (_type_): _description_
+ dqm (_type_): _description_
+
+ Raises:
+ Exception: _description_
+ Exception: _description_
+ Exception: _description_
+ Exception: _description_
+ Exception: _description_
+ Exception: _description_
+ Exception: _description_
+ """
+ if readout.enable_tpg and readout.use_fake_data_producers:
+ raise Exception("Fake data producers don't support software tpg")
+
+ if readout.use_fake_data_producers and dqm.enable_dqm:
+ raise Exception("DQM can't be used with fake data producers")
+
+ if dataflow.enable_tpset_writing and not readout.enable_tpg:
+ raise Exception("TP writing can only be used when either software or firmware TPG is enabled")
+
+ if hsi.use_timing_hsi and not hsi.hsi_device_name:
+ raise Exception("If --use-hsi-hw flag is set to true, --hsi-device-name must be specified!")
+
+ if timing.control_timing_partition and not timing.timing_partition_master_device_name:
+ raise Exception("If --control-timing-partition flag is set to true, --timing-partition-master-device-name must be specified!")
+
+ if hsi.control_hsi_hw and not hsi.use_timing_hsi:
+ raise Exception("Timing HSI hardware control can only be enabled if timing HSI hardware is used!")
+
+ if boot.process_manager == 'k8s' and not boot.k8s_image:
+ raise Exception("You need to define k8s_image if running with k8s")
+
+
+def create_df_apps(
+ dataflow,
+ sourceid_broker
+ ):
+
+ import dunedaq.daqconf.dataflowgen as dataflowgen
+
+ if len(dataflow.apps) == 0:
+ console.log(f"No Dataflow apps defined, adding default dataflow0")
+ dataflow.apps = [dataflowgen.dataflowapp()]
+
+ host_df = []
+ appconfig_df = {}
+ df_app_names = []
+ for d in dataflow.apps:
+ console.log(f"Parsing dataflow app config {d}")
+
+ ## Hack, we shouldn't need to do that, in the future, it should be appconfig = d
+ appconfig = dataflowgen.dataflowapp(**d)
+
+ dfapp = appconfig.app_name
+ if dfapp in df_app_names:
+ appconfig_df[dfapp].update(appconfig)
+ else:
+ df_app_names.append(dfapp)
+ appconfig_df[dfapp] = appconfig
+ appconfig_df[dfapp].source_id = sourceid_broker.get_next_source_id("TRBuilder")
+ sourceid_broker.register_source_id("TRBuilder", appconfig_df[dfapp].source_id, None)
+ host_df += [appconfig.host_df]
+ return host_df, appconfig_df, df_app_names
-moo.otypes.load_types('detchannelmaps/hardwaremapservice.jsonnet')
-import dunedaq.detchannelmaps.hardwaremapservice as hwms
# Add -h as default help option
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
-@generate_cli_from_schema('daqconf/confgen.jsonnet', 'daqconf_multiru_gen', 'dataflowapp')
-@click.option('--base-command-port', type=int, default=-1, help="Base port of application command endpoints")
-@click.option('--detector-readout-map-file', default='', help="File containing detector detector-readout map for configuration to run")
+@generate_cli_from_schema('daqconf/confgen.jsonnet', 'daqconf_multiru_gen', 'daqconf.dataflowgen.dataflowapp')
+@click.option('--force-pm', default=None, type=click.Choice(['ssh', 'k8s']), help="Force process manager")
+@click.option('--base-command-port', type=int, default=None, help="Base port of application command endpoints")
+@click.option('-m', '--detector-readout-map-file', default=None, help="File containing detector detector-readout map for configuration to run")
@click.option('-s', '--data-rate-slowdown-factor', default=0, help="Scale factor for readout internal clock to generate less data")
+@click.option('--file-label', default=None, help="File - used for raw data filename prefix")
@click.option('--enable-dqm', default=False, is_flag=True, help="Enable generation of DQM apps")
-@click.option('--file-label', default='', help="File - used for raw data filename prefix")
-@click.option('-a', '--only-check-args', default=False, is_flag=True, help="Dry run, do not generate output files")
+@click.option('-a', '--check-args-and-exit', default=False, is_flag=True, help="Check input arguments and quit")
@click.option('-n', '--dry-run', default=False, is_flag=True, help="Dry run, do not generate output files")
+@click.option('-f', '--force', default=False, is_flag=True, help="Force configuration generation - delete existing target directory if exists")
@click.option('--debug', default=False, is_flag=True, help="Switch to get a lot of printout and dot files")
@click.argument('json_dir', type=click.Path())
-def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown_factor, enable_dqm, file_label, only_check_args, dry_run, debug, json_dir):
-
- if only_check_args:
+def cli(
+ config,
+ force_pm,
+ base_command_port,
+ detector_readout_map_file,
+ data_rate_slowdown_factor,
+ enable_dqm,
+ file_label,
+ check_args_and_exit,
+ dry_run,
+ force,
+ debug,
+ json_dir
+ ):
+
+ # console.log("Commandline parsing completed")
+ if check_args_and_exit:
return
output_dir = Path(json_dir)
- if output_dir.exists() and not dry_run:
- raise RuntimeError(f"Directory {output_dir} already exists")
+ if output_dir.exists():
+ if dry_run:
+ pass
+ elif force:
+ console.log(f"Removing existing {output_dir}")
+ # Delete output folder if it exists
+ shutil.rmtree(output_dir)
+ else:
+ raise RuntimeError(f"Directory {output_dir} already exists")
debug_dir = output_dir / 'debug'
@@ -57,57 +231,58 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
debug_dir.mkdir(parents=True)
config_data = config[0]
- config_file = config[1]
+ config_file = Path(config[1] if config[1] is not None else "daqconf_default.json")
if debug:
console.log(f"Configuration for daqconf: {config_data.pod()}")
- # Get our config objects
- # Loading this one another time... (first time in config_file.generate_cli_from_schema)
- moo.otypes.load_types('daqconf/confgen.jsonnet')
- import dunedaq.daqconf.confgen as confgen
-
- ## Hack, we shouldn't need to do that, in the future it should be, boot = config_data.boot
- boot = confgen.boot(**config_data.boot)
- if debug: console.log(f"boot configuration object: {boot.pod()}")
-
- ## etc...
- timing = confgen.timing(**config_data.timing)
- if debug: console.log(f"timing configuration object: {timing.pod()}")
-
- hsi = confgen.hsi(**config_data.hsi)
- if debug: console.log(f"hsi configuration object: {hsi.pod()}")
-
- ctb_hsi = confgen.ctb_hsi(**config_data.ctb_hsi)
- if debug: console.log(f"ctb_hsi configuration object: {ctb_hsi.pod()}")
-
- readout = confgen.readout(**config_data.readout)
- if debug: console.log(f"readout configuration object: {readout.pod()}")
-
- trigger = confgen.trigger(**config_data.trigger)
- if debug: console.log(f"trigger configuration object: {trigger.pod()}")
+ (
+ boot,
+ detector,
+ daq_common,
+ timing,
+ hsi,
+ ctb_hsi,
+ readout,
+ trigger,
+ dataflow,
+ dqm,
+ # dpdk_sender
+ ) = expand_conf(config_data, debug)
- dataflow = confgen.dataflow(**config_data.dataflow)
- if debug: console.log(f"dataflow configuration object: {dataflow.pod()}")
+ #
+ # Update command-line options config parameters
+ #
+ if force_pm is not None:
+ boot.process_manager = force_pm
+ console.log(f"boot.boot.process_manager set to {boot.process_manager}")
- dqm = confgen.dqm(**config_data.dqm)
- if debug: console.log(f"dqm configuration object: {dqm.pod()}")
+ use_k8s = (boot.process_manager == 'k8s')
- dpdk_sender = confgen.dpdk_sender(**config_data.dpdk_sender)
- if debug: console.log(f"dpdk_sender configuration object: {dpdk_sender.pod()}")
+ if base_command_port is not None:
+ boot.base_command_port = base_command_port
+ console.log(f"boot.base_command_port set to {boot.base_command_port}")
- # Update with command-line options
- if base_command_port != -1:
- boot.base_command_port = base_command_port
- if detector_readout_map_file != '':
+
+ if detector_readout_map_file is not None:
readout.detector_readout_map_file = detector_readout_map_file
+ console.log(f"readout.detector_readout_map_file set to {readout.detector_readout_map_file}")
+
if data_rate_slowdown_factor != 0:
- readout.data_rate_slowdown_factor = data_rate_slowdown_factor
+ daq_common.data_rate_slowdown_factor = data_rate_slowdown_factor
+ console.log(f"daq_common.data_rate_slowdown_factor set to {daq_common.data_rate_slowdown_factor}")
+
dqm.enable_dqm |= enable_dqm
+
if dqm.impl == 'pocket':
dqm.kafka_address = boot.pocket_url + ":30092"
- # if op_env != '':
- # boot.op_env = op_env
+
+ file_label = file_label if file_label is not None else detector.op_env
+
+ #--------------------------------------------------------------------------
+ # Validate configuration
+ #--------------------------------------------------------------------------
+ validate_conf(boot, readout, dataflow, timing, hsi, dqm)
console.log("Loading dataflow config generator")
from daqconf.apps.dataflow_gen import get_dataflow_app
@@ -115,7 +290,7 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
console.log("Loading dqm config generator")
from daqconf.apps.dqm_gen import get_dqm_app
console.log("Loading readout config generator")
- from daqconf.apps.readout_gen import create_readout_app, create_fake_reaout_app
+ from daqconf.apps.readout_gen import create_fake_readout_app, ReadoutAppGenerator
console.log("Loading trigger config generator")
from daqconf.apps.trigger_gen import get_trigger_app
console.log("Loading DFO config generator")
@@ -129,72 +304,53 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
console.log("Loading timing partition controller config generator")
from daqconf.apps.tprtc_gen import get_tprtc_app
console.log("Loading DPDK sender config generator")
- from daqconf.apps.dpdk_sender_gen import get_dpdk_sender_app
- if trigger.enable_tpset_writing:
+ # from daqconf.apps.dpdk_sender_gen import get_dpdk_sender_app
+
+ if dataflow.enable_tpset_writing:
console.log("Loading TPWriter config generator")
from daqconf.apps.tpwriter_gen import get_tpwriter_app
sourceid_broker = SourceIDBroker()
sourceid_broker.debug = debug
- if len(dataflow.apps) == 0:
- console.log(f"No Dataflow apps defined, adding default dataflow0")
- dataflow.apps = [confgen.dataflowapp()]
-
- host_df = []
- appconfig_df ={}
- df_app_names = []
- for d in dataflow.apps:
- console.log(f"Parsing dataflow app config {d}")
-
- ## Hack, we shouldn't need to do that, in the future, it should be appconfig = d
- appconfig = confgen.dataflowapp(**d)
-
- dfapp = appconfig.app_name
- if dfapp in df_app_names:
- appconfig_df[dfapp].update(appconfig)
- else:
- df_app_names.append(dfapp)
- appconfig_df[dfapp] = appconfig
- appconfig_df[dfapp].source_id = sourceid_broker.get_next_source_id("TRBuilder")
- sourceid_broker.register_source_id("TRBuilder", appconfig_df[dfapp].source_id, None)
- host_df += [appconfig.host_df]
-
-
+ #--------------------------------------------------------------------------
+ # Create dataflow applications
+ #--------------------------------------------------------------------------
+ host_df, appconfig_df, df_app_names = create_df_apps(dataflow=dataflow, sourceid_broker=sourceid_broker)
+ # Expand paths/assetfiles
readout.default_data_file = resolve_asset_file(readout.default_data_file, debug)
data_file_map = {}
for entry in readout.data_files:
data_file_map[entry["detector_id"]] = resolve_asset_file(entry["data_file"], debug)
- if boot.use_k8s:
+ # and output paths (Why does it need to be expanded in k8s mode only? or at all?)
+ if use_k8s:
console.log(f'Using k8s')
- trigger.tpset_output_path = abspath(trigger.tpset_output_path)
+ dataflow.tpset_output_path = abspath(dataflow.tpset_output_path)
for df_app in appconfig_df.values():
new_output_path = []
for op in df_app.output_paths:
new_output_path += [abspath(op)]
df_app.output_paths = new_output_path
+ #--------------------------------------------------------------------------
+ # Generation starts here
+ #--------------------------------------------------------------------------
console.log(f"Generating configs for hosts trigger={trigger.host_trigger} DFO={dataflow.host_dfo} dataflow={host_df} timing_hsi={hsi.host_timing_hsi} fake_hsi={hsi.host_fake_hsi} ctb_hsi={ctb_hsi.host_ctb_hsi} dqm={dqm.host_dqm}")
the_system = System()
- # Load the hw map file here to extract ru hosts, cards, slr, links, forntend types, sourceIDs and geoIDs
+ # Load the readout map file here to extract ru hosts, cards, slr, links, forntend types, sourceIDs and geoIDs
# The ru apps are determined by the combinations of hostname and card_id, the SourceID determines the
# DLH (with physical slr+link information), the detId acts as system_type allows to infer the frontend_type
- # hw_map_service = HardwareMapService(readout.detector_readout_map_file)
- # serialized_hw_map = hw_map_service.get_hardware_ma84p_json()
- # hw_map = hwms.HardwareMap(serialized_hw_map)
- # console.log(f"{hw_map}")
- # # Get the list of RU processes
- # dro_infos = hw_map_service.get_all_dro_info()
-
-
- # Load the Detector Readout map
+ #--------------------------------------------------------------------------
+ # Load Detector Readout map
+ #--------------------------------------------------------------------------
dro_map = dromap.DetReadoutMapService()
- dro_map.load(readout.detector_readout_map_file)
+ if readout.detector_readout_map_file:
+ dro_map.load(readout.detector_readout_map_file)
ru_descs = dro_map.get_ru_descriptors()
@@ -208,73 +364,7 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
console.log(f"Will generate a RU process on {ru_name} ({ru_desc.iface}, {ru_desc.kind}), {len(ru_desc.streams)} streams active")
number_of_ru_streams += len(ru_desc.streams)
-# total_number_of_data_producers = 0
-# if use_ssp:
-# total_number_of_data_producers = number_of_data_producers * len(host_ru)
-# console.log(f"Will setup {number_of_data_producers} SSP channels per host, for a total of {total_number_of_data_producers}")
-# else:
-# total_number_of_data_producers = number_of_data_producers * len(host_ru)
-# console.log(f"Will setup {number_of_data_producers} TPC channels per host, for a total of {total_number_of_data_producers}")
-#
-# if readout.enable_tpg and frontend_type != 'wib':
-# raise Exception("Software TPG is only available for the wib at the moment!")
-
- if readout.enable_tpg and readout.use_fake_data_producers:
- raise Exception("Fake data producers don't support software tpg")
-
- if readout.use_fake_data_producers and dqm.enable_dqm:
- raise Exception("DQM can't be used with fake data producers")
-
- if trigger.enable_tpset_writing and not readout.enable_tpg:
- raise Exception("TP writing can only be used when either software or firmware TPG is enabled")
-
-# if (len(region_id) != len(host_ru)) and (len(region_id) != 0):
-# raise Exception("--region-id should be specified once for each --host-ru, or not at all!")
-
- # TODO, Eric Flumerfelt 22-June-2022: Fix if/when multiple frontend types are supported. (Use https://click.palletsprojects.com/en/8.1.x/options/#multi-value-options for RU host/frontend/region config?)
-# if len(region_id) == 0:
-# region_id_temp = []
-# for reg in range(len(host_ru)):
-# region_id_temp.append(reg)
-# region_id = tuple(region_id_temp)
-
- if hsi.use_timing_hsi and not hsi.hsi_device_name:
- raise Exception("If --use-hsi-hw flag is set to true, --hsi-device-name must be specified!")
-
- if timing.control_timing_partition and not timing.timing_partition_master_device_name:
- raise Exception("If --control-timing-partition flag is set to true, --timing-partition-master-device-name must be specified!")
-
- if hsi.control_hsi_hw and not hsi.use_timing_hsi:
- raise Exception("Timing HSI hardware control can only be enabled if timing HSI hardware is used!")
-
- if boot.use_k8s and not boot.image:
- raise Exception("You need to provide an --image if running with k8s")
-
-# host_id_dict = {}
-# ru_configs = []
-# ru_channel_counts = {}
-# for region in region_id: ru_channel_counts[region] = 0
-#
-# ru_app_names=[f"ruflx{idx}" if readout.use_felix else f"ruemu{idx}" for idx in range(len(host_ru))]
-# dqm_app_names = [f"dqm{idx}_ru" for idx in range(len(host_ru))]
-#
-# for hostidx,ru_host in enumerate(ru_app_names):
-# cardid = 0
-# if host_ru[hostidx] in host_id_dict:
-# host_id_dict[host_ru[hostidx]] = host_id_dict[host_ru[hostidx]] + 1
-# cardid = host_id_dict[host_ru[hostidx]]
-# else:
-# host_id_dict[host_ru[hostidx]] = 0
-# ru_configs.append( {"host": host_ru[hostidx],
-# "card_id": cardid,
-# "region_id": region_id[hostidx],
-# "start_channel": ru_channel_counts[region_id[hostidx]],
-# "channel_count": number_of_data_producers })
-# ru_channel_counts[region_id[hostidx]] += number_of_data_producers
-
-# if debug:
-# console.log(f"Output data written to \"{output_path}\"")
max_expected_tr_sequences = 1
@@ -296,8 +386,8 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
TRB_TIMEOUT_SAFETY_FACTOR = 2
DFO_TIMEOUT_SAFETY_FACTOR = 2
MINIMUM_DFO_TIMEOUT = 10000
- readout_data_request_timeout = boot.data_request_timeout_ms # can that be put somewhere else? in dataflow?
- trigger_data_request_timeout = boot.data_request_timeout_ms
+ readout_data_request_timeout = daq_common.data_request_timeout_ms # can that be put somewhere else? in dataflow?
+ trigger_data_request_timeout = daq_common.data_request_timeout_ms
trigger_record_building_timeout = max(MINIMUM_BASIC_TRB_TIMEOUT, TRB_TIMEOUT_SAFETY_FACTOR * max(readout_data_request_timeout, trigger_data_request_timeout))
if len(ru_descs) >= 1:
effective_number_of_data_producers = number_of_ru_streams
@@ -308,6 +398,10 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
trigger_record_building_timeout += 15 * TRB_TIMEOUT_SAFETY_FACTOR * max_expected_tr_sequences
dfo_stop_timeout = max(DFO_TIMEOUT_SAFETY_FACTOR * trigger_record_building_timeout, MINIMUM_DFO_TIMEOUT)
+
+ #--------------------------------------------------------------------------
+ # CTB
+ #--------------------------------------------------------------------------
if ctb_hsi.use_ctb_hsi:
ctb_llt_source_id = sourceid_broker.get_next_source_id("HW_Signals_Interface")
sourceid_broker.register_source_id("HW_Signals_Interface", ctb_llt_source_id, None)
@@ -316,102 +410,70 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
sourceid_broker.register_source_id("HW_Signals_Interface", ctb_hlt_source_id, None)
the_system.apps["ctbhsi"] = get_ctb_hsi_app(
+ ctb_hsi,
nickname = "ctb",
LLT_SOURCE_ID=ctb_llt_source_id,
HLT_SOURCE_ID=ctb_hlt_source_id,
- HOST=ctb_hsi.host_ctb_hsi,
- HLT_LIST=ctb_hsi.hlt_triggers,
- BEAM_LLT_LIST=ctb_hsi.beam_llt_triggers,
- CRT_LLT_LIST=ctb_hsi.crt_llt_triggers,
- PDS_LLT_LIST=ctb_hsi.pds_llt_triggers,
- FAKE_TRIG_1=ctb_hsi.fake_trig_1,
- FAKE_TRIG_2=ctb_hsi.fake_trig_2
)
if debug: console.log("ctb hsi cmd data:", the_system.apps["ctbhsi"])
+ #--------------------------------------------------------------------------
+ # Real HSI
+ #--------------------------------------------------------------------------
if hsi.use_timing_hsi:
timing_hsi_source_id = sourceid_broker.get_next_source_id("HW_Signals_Interface")
sourceid_broker.register_source_id("HW_Signals_Interface", timing_hsi_source_id, None)
the_system.apps["timinghsi"] = get_timing_hsi_app(
- CLOCK_SPEED_HZ = readout.clock_speed_hz,
- TRIGGER_RATE_HZ = trigger.trigger_rate_hz,
- CONTROL_HSI_HARDWARE=hsi.control_hsi_hw,
- CONNECTIONS_FILE=hsi.hsi_hw_connections_file,
- READOUT_PERIOD_US = hsi.hsi_readout_period,
- HSI_DEVICE_NAME = hsi.hsi_device_name,
- HARDWARE_STATE_RECOVERY_ENABLED = hsi.enable_hardware_state_recovery,
- HSI_ENDPOINT_ADDRESS = hsi.hsi_endpoint_address,
- HSI_ENDPOINT_PARTITION = hsi.hsi_endpoint_partition,
- HSI_RE_MASK=hsi.hsi_re_mask,
- HSI_FE_MASK=hsi.hsi_fe_mask,
- HSI_INV_MASK=hsi.hsi_inv_mask,
- HSI_SOURCE=hsi.hsi_source,
- HSI_SOURCE_ID=timing_hsi_source_id,
- TIMING_SESSION=timing.timing_session_name,
- HOST=hsi.host_timing_hsi,
- DEBUG=debug)
+ hsi = hsi,
+ detector = detector,
+ source_id = timing_hsi_source_id,
+ daq_common = daq_common,
+ timing_session_name = timing.timing_session_name,
+ DEBUG=debug
+ )
if debug: console.log("timing hsi cmd data:", the_system.apps["timinghsi"])
+ #--------------------------------------------------------------------------
+ # Fake HSI
+ #--------------------------------------------------------------------------
if hsi.use_fake_hsi:
fake_hsi_source_id = sourceid_broker.get_next_source_id("HW_Signals_Interface")
sourceid_broker.register_source_id("HW_Signals_Interface", fake_hsi_source_id, None)
the_system.apps["fakehsi"] = get_fake_hsi_app(
- CLOCK_SPEED_HZ = readout.clock_speed_hz,
- DATA_RATE_SLOWDOWN_FACTOR = readout.data_rate_slowdown_factor,
- TRIGGER_RATE_HZ = trigger.trigger_rate_hz,
- HSI_SOURCE_ID=fake_hsi_source_id,
- MEAN_SIGNAL_MULTIPLICITY = hsi.mean_hsi_signal_multiplicity,
- SIGNAL_EMULATION_MODE = hsi.hsi_signal_emulation_mode,
- ENABLED_SIGNALS = hsi.enabled_hsi_signals,
- HOST=hsi.host_fake_hsi,
+ hsi = hsi,
+ detector = detector,
+ daq_common = daq_common,
+ source_id = fake_hsi_source_id,
DEBUG=debug)
if debug: console.log("fake hsi cmd data:", the_system.apps["fakehsi"])
# the_system.apps["hsi"] = util.App(modulegraph=mgraph_hsi, host=hsi.host_hsi)
+ #--------------------------------------------------------------------------
+ # Timing controller
+ #--------------------------------------------------------------------------
if timing.control_timing_partition:
the_system.apps["tprtc"] = get_tprtc_app(
- MASTER_DEVICE_NAME=timing.timing_partition_master_device_name,
- TIMING_PARTITION_ID=timing.timing_partition_id,
- TRIGGER_MASK=timing.timing_partition_trigger_mask,
- RATE_CONTROL_ENABLED=timing.timing_partition_rate_control_enabled,
- SPILL_GATE_ENABLED=timing.timing_partition_spill_gate_enabled,
- TIMING_SESSION=timing.timing_session_name,
- HOST=timing.host_tprtc,
- DEBUG=debug)
+ timing,
+ DEBUG=debug
+ )
+ #--------------------------------------------------------------------------
+ # Trigger
+ #--------------------------------------------------------------------------
the_system.apps['trigger'] = get_trigger_app(
- DATA_RATE_SLOWDOWN_FACTOR = readout.data_rate_slowdown_factor,
- CLOCK_SPEED_HZ = readout.clock_speed_hz,
- TP_CONFIG = tp_infos,
- TOLERATE_INCOMPLETENESS=trigger.tolerate_incompleteness,
- COMPLETENESS_TOLERANCE=trigger.completeness_tolerance,
- ACTIVITY_PLUGIN = trigger.trigger_activity_plugin,
- ACTIVITY_CONFIG = trigger.trigger_activity_config,
- CANDIDATE_PLUGIN = trigger.trigger_candidate_plugin,
- CANDIDATE_CONFIG = trigger.trigger_candidate_config,
- TTCM_S1=trigger.ttcm_s1,
- TTCM_S2=trigger.ttcm_s2,
- TRIGGER_WINDOW_BEFORE_TICKS = trigger.trigger_window_before_ticks,
- TRIGGER_WINDOW_AFTER_TICKS = trigger.trigger_window_after_ticks,
- HSI_TRIGGER_TYPE_PASSTHROUGH = trigger.hsi_trigger_type_passthrough,
- MLT_MERGE_OVERLAPPING_TCS = trigger.mlt_merge_overlapping_tcs,
- MLT_BUFFER_TIMEOUT = trigger.mlt_buffer_timeout,
- MLT_MAX_TD_LENGTH_MS = trigger.mlt_max_td_length_ms,
- MLT_SEND_TIMED_OUT_TDS = trigger.mlt_send_timed_out_tds,
- MLT_IGNORE_TC = trigger.mlt_ignore_tc,
- MLT_USE_READOUT_MAP = trigger.mlt_use_readout_map,
- MLT_READOUT_MAP = trigger.mlt_td_readout_map,
- USE_CUSTOM_MAKER = trigger.use_custom_maker,
- CTCM_TYPES = trigger.ctcm_trigger_types,
- CTCM_INTERVAL = trigger.ctcm_trigger_intervals,
- CHANNEL_MAP_NAME = trigger.tpg_channel_map,
- DATA_REQUEST_TIMEOUT=trigger_data_request_timeout,
- HOST=trigger.host_trigger,
+ trigger=trigger,
+ detector=detector,
+ daq_common=daq_common,
+ tp_infos=tp_infos,
+ trigger_data_request_timeout=trigger_data_request_timeout,
DEBUG=debug)
+ #--------------------------------------------------------------------------
+ # DFO
+ #--------------------------------------------------------------------------
the_system.apps['dfo'] = get_dfo_app(
FREE_COUNT = max(1, dataflow.token_count / 2),
BUSY_COUNT = dataflow.token_count,
@@ -424,84 +486,37 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
trb_dqm_sourceid_offset = sourceid_broker.get_next_source_id("TRBuilder")
ru_app_names=[]
dqm_app_names = []
+
+ #--------------------------------------------------------------------------
+ # Readout generation
+ #--------------------------------------------------------------------------
+ roapp_gen = ReadoutAppGenerator(readout, detector, daq_common)
for ru_i,(ru_name, ru_desc) in enumerate(ru_descs.items()):
+ #--------------------------------------------------------------------------
+ # Readout applications
+ #--------------------------------------------------------------------------
if readout.use_fake_data_producers == False:
- numa_id = readout.numa_config['default_id']
- latency_numa = readout.numa_config['default_latency_numa_aware']
- latency_preallocate = readout.numa_config['default_latency_preallocation']
- card_override = -1
- for ex in readout.numa_config['exceptions']:
- if ex['host'] == ru_desc.host_name and ex['card'] == ru_desc.iface:
- numa_id = ex['numa_id']
- latency_numa = ex['latency_buffer_numa_aware']
- latency_preallocate = ex['latency_buffer_preallocation']
- card_override = ex['felix_card_id']
-
- the_system.apps[ru_name] = create_readout_app(
- RU_DESCRIPTOR = ru_desc,
- SOURCEID_BROKER = sourceid_broker,
- EMULATOR_MODE = readout.emulator_mode,
- DATA_RATE_SLOWDOWN_FACTOR = readout.data_rate_slowdown_factor,
- DEFAULT_DATA_FILE = readout.default_data_file,
- DATA_FILES = data_file_map,
- USE_FAKE_CARDS = readout.use_fake_cards,
- CLOCK_SPEED_HZ = readout.clock_speed_hz,
- RAW_RECORDING_ENABLED = readout.enable_raw_recording,
- RAW_RECORDING_OUTPUT_DIR = readout.raw_recording_output_dir,
- TPG_ENABLED = readout.enable_tpg,
- THRESHOLD_TPG = readout.tpg_threshold,
- ALGORITHM_TPG = readout.tpg_algorithm,
- CHANNEL_MASK_TPG = readout.tpg_channel_mask,
- TPG_CHANNEL_MAP = trigger.tpg_channel_map,
- LATENCY_BUFFER_SIZE=readout.latency_buffer_size,
- DATA_REQUEST_TIMEOUT=readout_data_request_timeout,
- FRAGMENT_SEND_TIMEOUT=readout.fragment_send_timeout_ms,
- READOUT_SENDS_TP_FRAGMENTS = readout.readout_sends_tp_fragments,
- EAL_ARGS=readout.eal_args,
- NUMA_ID = numa_id,
- LATENCY_BUFFER_NUMA_AWARE = latency_numa,
- LATENCY_BUFFER_ALLOCATION_MODE = latency_preallocate,
- CARD_ID_OVERRIDE = card_override,
- EMULATED_DATA_TIMES_START_WITH_NOW = readout.emulated_data_times_start_with_now,
- DEBUG=debug)
+ the_system.apps[ru_name] = roapp_gen.generate(
+ RU_DESCRIPTOR=ru_desc,
+ SOURCEID_BROKER=sourceid_broker,
+ data_file_map=data_file_map,
+ data_timeout_requests=readout_data_request_timeout
+ )
- if boot.use_k8s:
- if ru_desc.kind == 'flx':
- c = card_override if card_override != -1 else ru_desc.iface
- the_system.apps[ru_name].resources = {
- f"felix.cern/flx{c}-data": "1", # requesting FLX{c}
- "memory": "32Gi" # yes bro
- }
-
- dir_names = set()
-
- if os.path.commonprefix(['/cvmfs', readout.default_data_file]) != '/cvmfs':
- dir_names.add(dirname(readout.default_data_file))
-
- for id,file in data_file_map:
- if os.path.commonprefix(['/cvmfs', file]) != '/cvmfs':
- dir_names.add(dirname(file))
-
- dirindex = 0
- for dir_name in dir_names:
- the_system.apps[ru_name].mounted_dirs += [{
- 'name': f'frames-bin-{dirindex}',
- 'physical_location': dir_name,
- 'in_pod_location': dir_name,
- 'read_only': True,
- }]
- dirindex += 1
else:
- the_system.apps[ru_name] = create_fake_reaout_app(
+ the_system.apps[ru_name] = create_fake_readout_app(
RU_DESCRIPTOR = ru_desc,
- CLOCK_SPEED_HZ = readout.clock_speed_hz,
+ CLOCK_SPEED_HZ = detector.clock_speed_hz,
)
if debug:
console.log(f"{ru_name} app: {the_system.apps[ru_name]}")
+ #--------------------------------------------------------------------------
+ # DQM frontend applications
+ #--------------------------------------------------------------------------
if dqm.enable_dqm:
dqm_name = "dqm" + ru_name
dqm_app_names.append(dqm_name)
@@ -516,8 +531,8 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
the_system.apps[dqm_name] = get_dqm_app(
DQM_IMPL=dqm.impl,
- DATA_RATE_SLOWDOWN_FACTOR=readout.data_rate_slowdown_factor,
- CLOCK_SPEED_HZ=readout.clock_speed_hz,
+ DATA_RATE_SLOWDOWN_FACTOR=daq_common.data_rate_slowdown_factor,
+ CLOCK_SPEED_HZ=detector.clock_speed_hz,
MAX_NUM_FRAMES=dqm.max_num_frames,
DQMIDX = ru_i,
KAFKA_ADDRESS=dqm.kafka_address,
@@ -533,39 +548,34 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
TRB_DQM_SOURCEID_OFFSET=trb_dqm_sourceid_offset,
HOST=dqm.host_dqm[ru_i % len(dqm.host_dqm)],
RU_STREAMS=ru_desc.streams,
- DEBUG=debug)
+ DEBUG=debug
+ )
if debug: console.log(f"{dqm_name} app: {the_system.apps[dqm_name]}")
+
+
+ #--------------------------------------------------------------------------
+ # Dataflow applications generatioo
+ #--------------------------------------------------------------------------
dqm_df_app_names = []
idx = 0
for app_name,df_config in appconfig_df.items():
dfidx = df_config.source_id
the_system.apps[app_name] = get_dataflow_app(
+ df_config = df_config,
+ dataflow = dataflow,
+ detector = detector,
HOSTIDX=dfidx,
- OUTPUT_PATHS = df_config.output_paths,
APP_NAME=app_name,
- OPERATIONAL_ENVIRONMENT = boot.op_env,
- FILE_LABEL = file_label if file_label else boot.op_env,
- DATA_STORE_MODE=df_config.data_store_mode,
- MAX_FILE_SIZE = df_config.max_file_size,
- MAX_TRIGGER_RECORD_WINDOW = df_config.max_trigger_record_window,
+ FILE_LABEL = file_label,
MAX_EXPECTED_TR_SEQUENCES = max_expected_tr_sequences,
- TOKEN_COUNT = dataflow.token_count,
TRB_TIMEOUT = trigger_record_building_timeout,
- HOST=df_config.host_df,
HAS_DQM=dqm.enable_dqm,
SRC_GEO_ID_MAP=dro_map.get_src_geo_map(),
DEBUG=debug
)
- if boot.use_k8s:
- the_system.apps[app_name].mounted_dirs += [{
- 'name': f'raw-data-{i}',
- 'physical_location': opath,
- 'in_pod_location': opath,
- 'read_only': False,
- } for i,opath in enumerate(set(df_config.output_paths))]
if dqm.enable_dqm:
@@ -574,8 +584,8 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
dqm_links = [ s.src_id for s in ru_desc.streams ]
the_system.apps[dqm_name] = get_dqm_app(
DQM_IMPL=dqm.impl,
- DATA_RATE_SLOWDOWN_FACTOR = readout.data_rate_slowdown_factor,
- CLOCK_SPEED_HZ = readout.clock_speed_hz,
+ DATA_RATE_SLOWDOWN_FACTOR = daq_common.data_rate_slowdown_factor,
+ CLOCK_SPEED_HZ = detector.clock_speed_hz,
MAX_NUM_FRAMES=dqm.max_num_frames,
DQMIDX = dfidx,
KAFKA_ADDRESS=dqm.kafka_address,
@@ -592,46 +602,45 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
DF_RATE=dqm.df_rate * len(host_df),
DF_ALGS=dqm.df_algs,
DF_TIME_WINDOW=trigger.trigger_window_before_ticks + trigger.trigger_window_after_ticks,
- # DRO_CONFIG=dro_config, # This is coming from the readout loop
RU_STREAMS=ru_desc.streams, # This is coming from the readout loop
DEBUG=debug)
if debug: console.log(f"{dqm_name} app: {the_system.apps[dqm_name]}")
idx += 1
- if trigger.enable_tpset_writing:
+ #--------------------------------------------------------------------------
+ # TPSet Writer applications generation
+ #--------------------------------------------------------------------------
+ if dataflow.enable_tpset_writing:
tpw_name=f'tpwriter'
dfidx = sourceid_broker.get_next_source_id("TRBuilder")
sourceid_broker.register_source_id("TRBuilder", dfidx, None)
the_system.apps[tpw_name] = get_tpwriter_app(
- OUTPUT_PATH = trigger.tpset_output_path,
- APP_NAME = tpw_name,
- OPERATIONAL_ENVIRONMENT = boot.op_env,
- MAX_FILE_SIZE = trigger.tpset_output_file_size,
- DATA_RATE_SLOWDOWN_FACTOR = readout.data_rate_slowdown_factor,
- CLOCK_SPEED_HZ = readout.clock_speed_hz,
+ dataflow=dataflow,
+ detector=detector,
+ daq_common=daq_common,
+ app_name=tpw_name,
+ file_label=file_label,
+ source_id=dfidx,
SRC_GEO_ID_MAP=dro_map.get_src_geo_map(),
- SOURCE_IDX=dfidx,
- HOST=trigger.host_tpw,
- DEBUG=debug)
- if boot.use_k8s: ## TODO schema
- the_system.apps[tpw_name].mounted_dirs += [{
- 'name': 'raw-data',
- 'physical_location':trigger.tpset_output_path,
- 'in_pod_location':trigger.tpset_output_path,
- 'read_only': False
- }]
+ DEBUG=debug
+ )
if debug: console.log(f"{tpw_name} app: {the_system.apps[tpw_name]}")
+
+ # if dpdk_sender.enable_dpdk_sender:
+ # the_system.apps["dpdk_sender"] = get_dpdk_sender_app(
+ # HOST=dpdk_sender.host_dpdk_sender[0],
+ # )
+
+ #--------------------------------------------------------------------------
+ # App generation completed
+ #--------------------------------------------------------------------------
+
all_apps_except_ru = []
all_apps_except_ru_and_df = []
- if dpdk_sender.enable_dpdk_sender:
- the_system.apps["dpdk_sender"] = get_dpdk_sender_app(
- HOST=dpdk_sender.host_dpdk_sender[0],
- )
-
for name,app in the_system.apps.items():
if app.name=="__app":
app.name=name
@@ -663,22 +672,6 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
if debug:
console.log(f"After set_mlt_links, mlt_links is {mlt_links}")
- # HACK HACK HACK P. Rodrigues 2022-03-04 We decided not to request
- # TPs from readout for the 2.10 release. It would be nice to
- # achieve this by just not adding fragment producers for the
- # relevant links in readout_gen.py, but then the necessary input
- # and output queues for the DataLinkHandler modules are not
- # created. So instead we do it this roundabout way: the fragment
- # producers are all created, they are added to the MLT's list of
- # links to read out from (in set_mlt_links above), and then
- # removed here. We rely on a convention that TP links have element
- # value >= 1000.
- #
- # This code should be removed after 2.10, when we will have
- # decided how to handle raw TP data as fragments
-# for link in mlt_links:
-# if link["subsystem"] == system_type and link["element"] >= 1000:
-# remove_mlt_link(the_system, link)
mlt_links=the_system.apps["trigger"].modulegraph.get_module("mlt").conf.links
if debug:
@@ -694,36 +687,27 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
# Arrange per-app command data into the format used by util.write_json_files()
app_command_datas = {
- name : make_app_command_data(the_system, app,name, verbose=debug, use_k8s=boot.use_k8s, use_connectivity_service=boot.use_connectivity_service, connectivity_service_interval=boot.connectivity_service_interval)
+ name : make_app_command_data(the_system, app,name, verbose=debug, use_k8s=use_k8s, use_connectivity_service=boot.use_connectivity_service, connectivity_service_interval=boot.connectivity_service_interval)
for name,app in the_system.apps.items()
}
##################################################################################
-
# Make boot.json config
- from daqconf.core.conf_utils import make_system_command_datas,generate_boot, write_json_files
+ from daqconf.core.conf_utils import make_system_command_datas, write_json_files
# HACK: Make sure RUs start after trigger
forced_deps = []
- # for i,host in enumerate(ru_confs):
- # ru_name = ru_app_names[i]
for name in ru_app_names:
forced_deps.append(['hsi', ru_name])
- if trigger.enable_tpset_writing:
+ if dataflow.enable_tpset_writing:
forced_deps.append(['tpwriter', ru_name])
if dqm.enable_dqm:
- # for i,host in enumerate(ru_confs):
- # dqm_name = dqm_app_names[i]
for dqm_name in dqm_app_names:
forced_deps.append([dqm_name, 'dfo'])
- # for i,host in enumerate(host_df):
- # dqm_name = dqm_df_app_names[i]
-
-
for dqm_name in dqm_df_app_names:
forced_deps.append([dqm_name, 'dfo'])
@@ -744,7 +728,7 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
else:
return control_hostname
- if boot.use_data_network:
+ if daq_common.use_data_network:
CDN = control_to_data_network
else:
CDN = None
@@ -759,7 +743,10 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
if readout.thread_pinning_file != "":
- resolved_thread_pinning_file = Path(os.path.expandvars(readout.thread_pinning_file)).expanduser().absolute()
+ resolved_thread_pinning_file = Path(os.path.expandvars(readout.thread_pinning_file)).expanduser()
+ if not resolved_thread_pinning_file.is_absolute():
+ resolved_thread_pinning_file = config_file.parent / resolved_thread_pinning_file
+
if not resolved_thread_pinning_file.exists():
raise RuntimeError(f'Cannot find the file {readout.thread_pinning_file} ({resolved_thread_pinning_file})')
@@ -769,7 +756,7 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
"readout-affinity.py --pinfile ${DUNEDAQ_THREAD_PIN_FILE}"
],
"env": {
- "DUNEDAQ_THREAD_PIN_FILE": str(resolved_thread_pinning_file),
+ "DUNEDAQ_THREAD_PIN_FILE": resolved_thread_pinning_file.resolve().as_posix(),
"LD_LIBRARY_PATH": "getenv",
"PATH": "getenv"
}
@@ -778,16 +765,20 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
if not dry_run:
+ import dunedaq.daqconf.confgen as confgen
+
write_json_files(app_command_datas, system_command_datas, output_dir, verbose=debug)
console.log(f"MDAapp config generated in {output_dir}")
- write_metadata_file(output_dir, "daqconf_multiru_gen", config_file)
+ write_metadata_file(output_dir, "daqconf_multiru_gen", config_file.as_posix())
write_config_file(
output_dir,
- basename(config_file) if config_file else "default.json",
- confgen.daqconf_multiru_gen( #
+ config_file.name if config_file else "default.json",
+ confgen.daqconf_multiru_gen( # :facepalm:
boot = boot,
+ detector = detector,
+ daq_common = daq_common,
dataflow = dataflow,
dqm = dqm,
hsi = hsi,
@@ -795,7 +786,7 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
readout = readout,
timing = timing,
trigger = trigger,
- dpdk_sender = dpdk_sender,
+ # dpdk_sender = dpdk_sender,
) #
)
@@ -806,8 +797,11 @@ def cli(config, base_command_port, detector_readout_map_file, data_rate_slowdown
the_system.apps[name].export(debug_dir / f"{name}.dot")
if __name__ == '__main__':
+ # console.log("daqconf - started")
try:
cli(show_default=True, standalone_mode=True)
except Exception as e:
console.print_exception()
raise SystemExit(-1)
+ # console.log("daqconf - finished")
+
diff --git a/scripts/daqconf_viewer b/scripts/daqconf_viewer
new file mode 100755
index 00000000..3a417018
--- /dev/null
+++ b/scripts/daqconf_viewer
@@ -0,0 +1,760 @@
+#!/usr/bin/env python3
+import asyncio
+import copy
+import click
+import httpx
+import io
+import json
+import os
+import sys
+import tarfile
+import tempfile
+
+from difflib import unified_diff
+from pathlib import Path
+from rich.markdown import Markdown
+from rich.text import Text
+
+from textual import log, events
+from textual.app import App, ComposeResult
+from textual.binding import Binding
+from textual.containers import Content, Container, Horizontal, Vertical
+from textual.reactive import reactive, Reactive
+from textual.screen import Screen
+from textual.widget import Widget
+from textual.widgets import Button, DirectoryTree, Footer, Header, Input, Label, ListItem, ListView, Static, Tree
+
+auth = ("fooUsr", "barPass")
+oldconf = None
+oldconfname = None
+oldconfver = None
+dir_object = None
+
+class TitleBox(Static):
+ def __init__(self, title, **kwargs):
+ super().__init__(Markdown(f'# {title}'))
+
+ def update(self, text):
+ super().update(Markdown(f'# {text}'))
+
+class LabelItem(ListItem):
+ def __init__(self, label: str) -> None:
+ super().__init__()
+ self.label = label
+
+ def compose( self ) -> ComposeResult:
+ yield Label(self.label)
+
+class Configs(Static):
+ conflist = reactive([])
+
+ def __init__(self, hostname, **kwargs):
+ super().__init__(**kwargs)
+ self.hostname = hostname
+ self.term = ""
+
+ def on_mount(self) -> None:
+ self.set_interval(0.1, self.update_configs)
+
+ def compose(self) -> ComposeResult:
+ yield TitleBox('Configurations')
+ yield Input(placeholder='Search Configs')
+ yield ListView(LabelItem("This shouldn't be visible!"))
+
+ async def update_configs(self) -> None:
+ try:
+ async with httpx.AsyncClient() as client:
+ r = await client.get(f'{self.hostname}/listConfigs', auth=auth, timeout=5)
+ unsorted = r.json()['configs']
+ self.conflist = sorted(unsorted, key=str.lower)
+ except Exception as e:
+ #Exiting the program mid-request causes a CancelledError: we don't want to call our function
+ #in this case, as it will not be able to find the relevent widgets.
+ if isinstance(e, asyncio.CancelledError):
+ return
+ self.display_error(f"Couldn't retrieve configs from {self.hostname}/listConfigs\nError: {e}")
+
+ def watch_conflist(self, conflist:list[str]):
+ self.display_conflist()
+
+ async def on_input_changed(self, message: Input.Changed) -> None:
+ '''This event occurs whenever the user types in the search box.'''
+ self.term = message.value
+ self.display_conflist()
+
+ def display_conflist(self) -> None:
+ '''
+ We regenerate the list whenever the actual list of configs changes, or whenever the user types in the search box.
+ #If the box is empty, don't filter, else we require that the search term is in the name
+ '''
+ if self.term == "":
+ filtered = self.conflist
+ else:
+ filtered = [name for name in self.conflist if self.term in name]
+
+ label_list = [LabelItem(f) for f in filtered]
+ the_list = self.query_one(ListView)
+ the_list.clear()
+ for item in label_list:
+ the_list.append(item)
+
+ def on_list_view_selected(self, event: ListView.Selected):
+ confname = event.item.label
+ for v in self.screen.query(Vertical):
+ if isinstance(v, Versions):
+ v.new_conf(confname)
+ break
+
+ def display_error(self, text):
+ '''If something goes wrong with getting the configs, we hijack the display to tell the user.'''
+ for v in self.screen.query(Vertical):
+ if isinstance(v, Display):
+ e_json = {'error': text}
+ v.confdata = e_json
+ break
+ if isinstance(v, DiffDisplay):
+ for s in v.query(Static):
+ if s.id == 'diffbox':
+ s.update(text)
+ break
+
+class ShortNodeTree(DirectoryTree):
+ '''We inherit everything from the dirtree, but we want to abbreviate the top node.'''
+ def process_label(self, label):
+ '''If a node is a/b/c, just display c'''
+ if '/' in label:
+ good_label = label.split('/')[-1]
+ else:
+ good_label = label
+ if isinstance(good_label, str):
+ text_label = Text.from_markup(good_label)
+ else:
+ text_label = good_label
+ first_line = text_label.split()[0]
+ return first_line
+
+class LocalConfigs(Static):
+ conflist = reactive([])
+
+ def __init__(self, hostname, path, **kwargs):
+ super().__init__(**kwargs)
+ self.hostname = hostname
+ path_obj = Path(path)
+ self.path = str(path_obj.resolve())
+
+ def compose(self) -> ComposeResult:
+ yield TitleBox('Configurations')
+ yield ShortNodeTree(self.path)
+
+ async def on_directory_tree_file_selected(self, event: DirectoryTree.FileSelected ) -> None:
+ location = event.path
+ filename = location.split('/')[-1]
+ try:
+ with open(location) as f:
+ self.current_conf = json.load(f)
+ #Look for a display to show the config to
+ for v in self.screen.query(Vertical):
+ if isinstance(v, Display) or isinstance(v, DiffDisplay):
+ await v.get_json_local(filename, self.current_conf)
+ break
+ except Exception as e:
+ self.display_error(f"Config at {location} is not usable\n Error: {e}")
+
+ def display_error(self, text):
+ '''If something goes wrong with getting the configs, we hijack the display to tell the user.'''
+ for v in self.screen.query(Vertical):
+ if isinstance(v, Display):
+ e_json = {'error': text}
+ v.confdata = e_json
+ break
+ if isinstance(v, DiffDisplay):
+ for s in v.query(Static):
+ if s.id == 'diffbox':
+ s.update(text)
+ break
+
+class RegistryConfigs(Static):
+ conflist = reactive([])
+
+ def __init__(self, hostname, **kwargs):
+ super().__init__(**kwargs)
+ self.hostname = hostname
+ self.path = None
+
+ def compose(self) -> ComposeResult:
+ yield TitleBox('Configurations')
+
+ def new_directory(self, path) -> None:
+ path_obj = Path(path)
+ self.path = str(path_obj.resolve())
+ for p in self.query(ShortNodeTree): #Delete any existing file trees
+ p.remove()
+ self.mount(ShortNodeTree(self.path))
+
+ async def on_directory_tree_file_selected(self, event: DirectoryTree.FileSelected ) -> None:
+ location = event.path
+ filename = location.split('/')[-1]
+ try:
+ with open(location) as f:
+ self.current_conf = json.load(f)
+ #Look for a display to show the config to
+ for v in self.screen.query(Vertical):
+ if isinstance(v, Display) or isinstance(v, DiffDisplay):
+ await v.get_json_local(filename, self.current_conf)
+ break
+ except Exception as e:
+ self.display_error(f"Config at {location} is not usable\n Error: {e}")
+
+ def clear(self):
+ for p in self.query(ShortNodeTree):
+ p.remove()
+
+ def display_error(self, text):
+ '''If something goes wrong with getting the configs, we hijack the display to tell the user.'''
+ for v in self.screen.query(Vertical):
+ if isinstance(v, Display):
+ e_json = {'error': text}
+ v.confdata = e_json
+ break
+ if isinstance(v, DiffDisplay):
+ for s in v.query(Static):
+ if s.id == 'diffbox':
+ s.update(text)
+ break
+
+class Versions(Vertical):
+ vlist = reactive([])
+
+ def __init__(self, hostname, **kwargs):
+ super().__init__(**kwargs)
+ self.hostname = hostname
+ self.current_conf = None
+ self.reverse = True
+
+ def compose(self) -> ComposeResult:
+ yield TitleBox(f'Configuration versions')
+ yield Horizontal(id='buttonbox')
+
+ def on_mount(self) -> None:
+ self.set_interval(0.1, self.update_versions)
+
+ def new_conf(self, conf) -> None:
+ self.current_conf = conf
+
+ async def update_versions(self) -> None:
+ if self.current_conf:
+ try:
+ async with httpx.AsyncClient() as client:
+ payload = {'name': self.current_conf}
+ r = await client.get(f'{self.hostname}/listVersions', auth=auth, params=payload, timeout=5)
+ numlist = r.json()['versions'] #This is a list of ints
+ if self.reverse:
+ numlist.reverse()
+ self.vlist = numlist
+ except Exception as e:
+ if isinstance(e, asyncio.CancelledError):
+ self.display_error(f"Couldn't retrieve versions from {self.hostname}/listVersions\nError: {e}")
+
+ def watch_vlist(self, vlist:list[int]) -> None:
+ bb = self.query_one(Horizontal)
+ old_buttons = bb.query(Button)
+ for b in old_buttons:
+ b.remove()
+ for v in vlist:
+ b_id = 'v' + str(v) #An id can't be just a number for some reason
+ bb.mount(Button(str(v), id=b_id, classes='vbuttons', variant='primary'))
+
+ async def on_button_pressed (self, event: Button.Pressed) -> None:
+ button_id = event.button.id
+ version = int(button_id[1:])
+ for v in self.screen.query(Vertical):
+ if isinstance(v, Display) or isinstance(v, DiffDisplay):
+ await v.get_json(self.current_conf, version)
+ break
+
+ def display_error(self, text):
+ '''If something goes wrong with getting the configs, we hijack the display to tell the user.'''
+ for v in self.screen.query(Vertical):
+ if isinstance(v, Display):
+ e_json = {'error': text}
+ v.confdata = e_json
+ break
+ if isinstance(v, DiffDisplay):
+ for s in v.query(Static):
+ if s.id == 'diffbox':
+ s.update(text)
+ break
+
+class Display(Vertical):
+ confdata = reactive(None)
+
+ def __init__(self, hostname, **kwargs):
+ super().__init__(**kwargs)
+ self.hostname = hostname
+ self.confname = None
+ self.version = None
+
+ def compose(self) -> ComposeResult:
+ yield TitleBox('Configuration data')
+ yield Tree("", id='conftree')
+
+ async def get_json(self, conf, ver) -> None:
+ self.confname = conf
+ self.version = ver
+ if self.confname != None and self.version != None:
+ try:
+ async with httpx.AsyncClient() as client:
+ payload = {'name': self.confname, 'version': self.version}
+ r = await client.get(f'{self.hostname}/retrieveVersion', auth=auth, params=payload, timeout=5)
+ self.confdata = r.json()
+ except:
+ self.confdata = {"error": f"Couldn't retrieve the configuration at {self.hostname}/retrieveVersion (payload: {payload}"}
+
+ async def get_json_local(self, name, conf) -> None:
+ self.confname = name
+ self.version = -1
+ self.confdata = conf
+
+ def json_into_tree(cls, node, json_data):
+ """Takes a JSON, and puts it into the tree."""
+ from rich.highlighter import ReprHighlighter
+
+ highlighter = ReprHighlighter()
+
+ def add_node(name, node, data) -> None:
+ """Adds a node to the tree.
+ Args:
+ name (str): Name of the node.
+ node (TreeNode): Parent node.
+ data (object): Data associated with the node.
+ """
+ if isinstance(data, dict):
+ node.set_label(Text(f"{{}} {name}"))
+ for key, value in data.items():
+ new_node = node.add("")
+ add_node(key, new_node, value)
+ elif isinstance(data, list):
+ node.set_label(Text(f"[] {name}"))
+ for index, value in enumerate(data):
+ new_node = node.add("")
+ add_node(str(index), new_node, value)
+ else:
+ node.allow_expand = False
+ if name:
+ label = Text.assemble(
+ Text.from_markup(f"[b]{name}[/b]="), highlighter(repr(data))
+ )
+ else:
+ label = Text(repr(data))
+ node.set_label(label)
+
+ add_node("", node, json_data)
+
+ def watch_confdata(self, confdata:dict) -> None:
+ tree = self.query_one(Tree)
+ tree.clear()
+ if confdata:
+ self.json_into_tree(tree.root, confdata)
+ tree.root.expand()
+
+ def on_button_pressed(self, event: Button.Pressed) -> None:
+ if self.confdata:
+ self.app.mount(DiffScreen(self.hostname, self.confdata, id='diffscreen'))
+
+class DiffDisplay(Vertical):
+ confdata = reactive(None)
+
+ def __init__(self, hostname, **kwargs):
+ super().__init__(**kwargs)
+ self.hostname = hostname
+ self.confname = None
+ self.version = None
+
+ def compose(self) -> ComposeResult:
+ yield TitleBox("Diff")
+ yield Vertical(Static(id='diffbox'))
+
+ def on_mount(self) -> None:
+ self.set_interval(0.1, self.update_title)
+
+ def update_title(self) -> None:
+ #If the config is local, we set the version number to -1
+ vold = "(local)" if oldconfver == -1 else f"(v{oldconfver})"
+ vnew = "(local)" if self.version == -1 else f"(v{self.version})"
+
+ if self.version != None:
+ difftext = f"Comparing {str(oldconfname)} {vold} with {self.confname} {vnew}"
+ else:
+ difftext = f"Comparing {str(oldconfname)} {vold} with..."
+ title = self.query_one(TitleBox)
+ title.update(difftext)
+
+ async def get_json(self, conf, ver) -> None:
+ self.confname = conf
+ self.version = ver
+ if self.confname != None and self.version != None:
+ try:
+ async with httpx.AsyncClient() as client:
+ payload = {'name': self.confname, 'version': self.version}
+ r = await client.get(f'{self.hostname}/retrieveVersion', auth=auth, params=payload, timeout=5)
+ self.confdata = r.json()
+ except:
+ self.confdata = {"error": f"Couldn't retrieve the configuration at {self.hostname}/retrieveVersion (payload: {payload})"}
+
+ async def get_json_local(self, name, conf) -> None:
+ self.confname = name
+ self.version = -1
+ self.confdata = conf
+
+ async def watch_confdata(self, confdata:dict) -> None:
+ '''Turns the jsons into a string format with newlines, then generates a diff of the two.'''
+ if confdata:
+ if "error" in confdata:
+ for s in self.query(Static):
+ if s.id == 'diffbox':
+ s.update(confdata['error'])
+ break
+ else:
+ j1 = copy.deepcopy(oldconf)
+ j2 = copy.deepcopy(confdata)
+ if "_id" in j1: del j1["_id"] #We don't want to include the ID in the diff since it's always different.
+ if "_id" in j2: del j2["_id"]
+ a = json.dumps(j1, sort_keys=True, indent=4).splitlines(keepends=True)
+ b = json.dumps(j2, sort_keys=True, indent=4).splitlines(keepends=True)
+ delta = unified_diff(a,b)
+ diff = Text()
+ for d in delta:
+ sym = d[0]
+ match sym:
+ case '+':
+ t = Text(d, style='green')
+ case '-':
+ t = Text(d, style='red')
+ case '@':
+ t = Text(d, style='gold1')
+ case _:
+ t = Text(d)
+ diff += t
+
+ for s in self.query(Static):
+ if s.id == 'diffbox':
+ s.update(diff)
+ break
+
+ def on_button_pressed(self) -> None:
+ self.remove()
+
+class RunSelection(Vertical):
+ def __init__(self, hostname, **kwargs):
+ super().__init__(**kwargs)
+ self.hostname = hostname
+ self.current = None
+
+ def compose(self) -> ComposeResult:
+ yield TitleBox("Run Number")
+ yield Input(placeholder="Enter a run number")
+ yield Horizontal (
+ Button("<--", id="back", variant='primary'),
+ Button("Get Data", id="get", variant='primary'),
+ Button("-->", id="forward", variant='primary'),
+ classes = "runbuttons"
+ )
+ async def on_mount(self) -> None:
+ async with httpx.AsyncClient() as client:
+ route = f'{self.hostname}/runregistry/getRunMetaLast/1'
+ r = await client.get(route, auth=auth, timeout=5)
+ runmeta = r.json()
+ headers = runmeta[0]
+ try:
+ data = runmeta[1][0]
+ await self.show_data("get", data[0])
+ except:
+ self.display_error("No data about most recent run")
+
+ async def on_button_pressed(self, event: Button.Pressed) -> None:
+ button_id = event.button.id
+ await self.show_data(button_id)
+
+ async def show_data(self, button_id, number=None):
+ box = self.query_one(Input)
+ if number == None:
+ number = box.value
+ else:
+ box.value = str(number)
+
+ match button_id:
+ case "get":
+ if number == "": #If no number has been entered, then we should do nothing.
+ self.display_error("Please enter a run number!")
+ return
+ try:
+ number = int(number)
+ except:
+ self.display_error("Run number must be an integer")
+ return
+ if number < 1:
+ self.display_error("Run numbers start at 1!")
+ return
+ self.current = number
+
+ case "back":
+ if self.current == 1 or self.current == None: #If we are at the start, the back button does nothing
+ return
+ self.current -= 1
+ number = self.current
+ box.value = str(number)
+
+ case "forward":
+ if self.current == None:
+ return
+ self.current += 1
+ number = self.current
+ box.value = str(number)
+
+ async with httpx.AsyncClient() as client:
+ route1 = f'{self.hostname}/runregistry/getRunMeta/{number}'
+ route2 = f'{self.hostname}/runregistry/getRunBlob/{number}'
+ r1 = await client.get(route1, auth=auth, timeout=5)
+ r2 = await client.get(route2, auth=auth, timeout=5)
+ runmeta = r1.json() #Format is [[headers], [[row]]], but that's still a JSON
+ headers = runmeta[0]
+ try:
+ data = runmeta[1][0] #We will assume we only get one row at once (true for this sort of query)
+ except:
+ data = None
+ info = self.screen.query_one(RunInfo)
+ info.update(headers, data)
+
+ rc = self.screen.query_one(RegistryConfigs)
+ if r2.status_code == 500:
+ rc.clear()
+ self.display_error(f"No config data found for run {number}")
+ return
+
+ dis = self.screen.query_one(Display)
+ dis.confdata = None
+
+ f = tempfile.NamedTemporaryFile(mode="w+b",suffix='.tar.gz', delete=False)
+ f.write(r2.content)
+ fname = f.name
+ f.close()
+
+ global dir_object #This is a global variable, since otherwise garbage collection deletes the directory!
+ dir_object = tempfile.TemporaryDirectory()
+ temp_name = dir_object.name
+ tar = tarfile.open(fname, "r:gz")
+ tar.extractall(temp_name)
+ tar.close()
+ os.unlink(f.name)
+
+ rc.new_directory(temp_name)
+ self.current = int(number)
+
+ def display_error(self, text):
+ '''If something goes wrong with getting the configs, we hijack the display to tell the user.'''
+ for v in self.screen.query(Vertical):
+ if isinstance(v, Display):
+ e_json = {'error': text}
+ v.confdata = e_json
+ break
+
+class RunInfo(Vertical):
+ def compose(self) -> ComposeResult:
+ yield TitleBox("Run Metadata")
+ yield Static(id='md')
+
+ def update(self, head, row):
+ text = ""
+ if row:
+ for i, val in enumerate(row):
+ text += f"{head[i]}: {val}\n"
+ else:
+ text = '\n'.join([h+':' for h in head])
+ text.rstrip()
+ for s in self.query(Static):
+ if s.id == "md":
+ s.update(text)
+ break
+
+
+class LocalDiffScreen(Screen):
+ BINDINGS = [
+ ("l", "switch_local", "DB Files"),
+ ("d", "end_diff", "Return")
+ ]
+
+ def __init__(self, hostname, path, **kwargs):
+ super().__init__(**kwargs)
+ self.hostname = hostname
+ self.path = path
+
+ def compose(self) -> ComposeResult:
+ yield LocalConfigs(hostname=self.hostname, path=self.path, classes='greencontainer configs', id='localdiffconfigs')
+ yield DiffDisplay(hostname=self.hostname, classes='greencontainer bigdisplay', id='localdiffdisplay')
+
+ yield Header(show_clock=True)
+ yield Footer()
+
+ def action_switch_local(self) -> None:
+ self.app.pop_screen()
+ self.app.push_screen('diff')
+
+ def action_end_diff(self) -> None:
+ self.app.pop_screen()
+ self.app.push_screen('lconf')
+
+
+class DiffScreen(Screen):
+ BINDINGS = [
+ ("l", "switch_local", "Local Files"),
+ ("d", "app.pop_screen", "Return")
+ ]
+
+ def __init__(self, hostname, **kwargs):
+ super().__init__(**kwargs)
+ self.hostname = hostname
+
+ def compose(self) -> ComposeResult:
+ yield Configs(hostname=self.hostname, classes='greencontainer configs', id='diffconfigs')
+ yield Versions(hostname=self.hostname, classes='greencontainer versions', id='diffversions')
+ yield DiffDisplay(hostname=self.hostname, classes='greencontainer display', id='diffdisplay')
+
+ yield Header(show_clock=True)
+ yield Footer()
+
+ def action_switch_local(self) -> None:
+ self.app.pop_screen()
+ self.app.push_screen('ldiff')
+
+
+class LocalConfScreen(Screen):
+ BINDINGS = [
+ ("l", "app.pop_screen", "DB Files"),
+ ("d", "make_diff", "Diff")
+ ]
+ def __init__(self, hostname, path, **kwargs):
+ super().__init__(**kwargs)
+ self.hostname = hostname
+ self.path = path
+
+ def compose(self) -> ComposeResult:
+ yield LocalConfigs(hostname=self.hostname, path=self.path, classes='redcontainer configs', id='localconfigs')
+ yield Display(hostname=self.hostname, classes='redcontainer bigdisplay', id='localdisplay')
+
+ yield Header(show_clock=True)
+ yield Footer()
+
+ def action_make_diff(self) -> None:
+ '''Saves the current config to a global variable, then pushes the (local) diff screen.'''
+ dis = self.query_one(Display)
+ if dis.confdata != None:
+ global oldconf, oldconfname, oldconfver
+ oldconf, oldconfname, oldconfver = dis.confdata, dis.confname, dis.version
+ self.app.pop_screen()
+ self.app.push_screen('ldiff')
+
+class BaseScreen(Screen):
+ BINDINGS = [
+ ("l", "switch_local", "Local Files"),
+ ("d", "make_diff", "Diff"),
+ ("v", "flip_versions", "Reverse Version Order"),
+ ("r", "run_reg", "Display Run Registry"),
+ ]
+
+ def __init__(self, hostname, **kwargs):
+ super().__init__(**kwargs)
+ self.confhost = hostname
+
+ def compose(self) -> ComposeResult:
+ yield Configs(hostname=self.confhost, classes='redcontainer configs', id='configs')
+ yield Versions(hostname=self.confhost, classes='redcontainer versions', id='versions')
+ yield Display(hostname=self.confhost, classes='redcontainer display', id='display')
+
+ yield Header(show_clock=True)
+ yield Footer()
+
+ def action_switch_local(self) -> None:
+ self.app.push_screen('lconf')
+
+ def action_make_diff(self) -> None:
+ '''Saves the current config to a global variable, then pushes the diff screen.'''
+ dis = self.screen.query_one(Display)
+ if dis.confdata != None:
+ global oldconf, oldconfname, oldconfver
+ oldconf, oldconfname, oldconfver = dis.confdata, dis.confname, dis.version
+ self.app.push_screen('diff')
+
+ def action_flip_versions(self) -> None:
+ '''
+ Tells the versions widget to display them the other way around.
+ If that widget doesn't exist on this scren, do nothing.
+ '''
+ try:
+ ver = self.screen.query_one(Versions)
+ ver.reverse = not ver.reverse
+ except:
+ pass
+
+ def action_run_reg(self) -> None:
+ self.app.push_screen('runreg')
+
+class RunRegistryScreen(Screen):
+ BINDINGS = [("r", "app.pop_screen", "Return")]
+
+ def __init__(self, hostname, **kwargs):
+ super().__init__(**kwargs)
+ self.hostname = hostname
+
+ def compose(self) -> ComposeResult:
+ yield RunSelection(hostname=self.hostname, classes='orangecontainer', id="runselect")
+ yield RunInfo(classes='orangecontainer', id="runinfo")
+ yield RegistryConfigs(hostname=self.hostname, classes='orangecontainer shortconfigs', id='regconfigs')
+ yield Display(hostname=self.hostname, classes='orangecontainer smalldisplay', id='regdisplay')
+
+ yield Header(show_clock=True)
+ yield Footer()
+
+ def nothing(self) -> None:
+ '''This function doesn't do anything, the fake bindings call it.'''
+ pass
+
+class ConfViewer(App):
+ CSS_PATH = "daqconf_viewer.css"
+ BINDINGS = [("q", "quit", "Quit")]
+
+ def __init__(self, chost, cport, rhost, rport, dir, **kwargs):
+ super().__init__(**kwargs)
+ self.confhost = f"{chost}:{cport}"
+ self.reghost = f"{rhost}:{rport}"
+ self.path = dir
+
+ def on_mount(self) -> None:
+ self.install_screen(BaseScreen(hostname=self.confhost), name="base")
+ self.install_screen(LocalConfScreen(hostname=self.confhost, path=self.path), name="lconf")
+ self.install_screen(DiffScreen(hostname=self.confhost), name="diff")
+ self.install_screen(LocalDiffScreen(hostname=self.confhost, path=self.path), name="ldiff")
+ self.install_screen(RunRegistryScreen(hostname=self.reghost), name="runreg")
+ self.push_screen("base")
+
+ def action_quit(self):
+ """
+ Called when the quit button is pressed.
+ We redefine it here so that we can add the removal of the temporary directory.
+ """
+ dir_object.cleanup()
+ self.exit()
+
+
+CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
+@click.command(context_settings=CONTEXT_SETTINGS)
+@click.option('--conf-host', default="http://np04-srv-023", help='Machine hosting the config service')
+@click.option('--conf-port', default="31011", help='Port that the config service listens on')
+@click.option('--reg-host', default="http://dunedaq-microservices.cern.ch", help='Machine hosting the run registry service')
+@click.option('--reg-port', default="5005", help='Port that the run registry service listens on')
+@click.option('--dir', default = "./", help='Top-level directory to look for local config files in')
+def start(conf_host:str, conf_port:str, reg_host:str, reg_port:str, dir:str):
+ app = ConfViewer(conf_host, conf_port, reg_host, reg_port, dir)
+ app.run()
+
+if __name__ == "__main__":
+ start()
diff --git a/scripts/daqconf_viewer.css b/scripts/daqconf_viewer.css
new file mode 100644
index 00000000..f67864ab
--- /dev/null
+++ b/scripts/daqconf_viewer.css
@@ -0,0 +1,96 @@
+/* 4 columns 10 rows */
+Screen {
+ layout: grid;
+ layers: below above;
+ grid-size: 12 12;
+ grid-gutter: 0;
+ height: 100%;
+}
+
+RunSelection {
+ row-span: 4;
+ column-span: 6;
+ height: 100%;
+ content-align: center middle;
+}
+
+RunInfo {
+ row-span: 4;
+ column-span: 6;
+ height: 100%;
+}
+
+#buttonbox {
+ overflow-x: auto;
+}
+
+#conftree {
+ height:90%;
+}
+
+#verticalconf {
+ height: 20;
+}
+
+.runbuttons {
+ align_horizontal: center;
+}
+
+.configs {
+ row-span: 12;
+ column-span: 3;
+ height: 100%;
+}
+
+.shortconfigs {
+ row-span: 8;
+ column-span: 3;
+ height: 100%;
+}
+
+.versions {
+ row-span: 3;
+ column-span: 9;
+ height: 100%;
+ align-vertical: middle;
+}
+.bigdisplay {
+ row-span: 12;
+ column-span: 9;
+ height: 100%;
+ align-horizontal: center;
+}
+
+.display {
+ row-span: 9;
+ column-span: 9;
+ height: 100%;
+ align-horizontal: center;
+}
+
+.smalldisplay {
+ row-span: 8;
+ column-span: 9;
+ height: 100%;
+ align-horizontal: center;
+}
+
+.redcontainer{
+ border: wide red;
+}
+
+.orangecontainer{
+ border: wide orange;
+}
+
+.greencontainer{
+ border: wide green;
+}
+
+.vbuttons {
+ align-vertical: middle;
+ width: 5%;
+ margin: 1;
+}
+
+
diff --git a/scripts/dromap_editor b/scripts/dromap_editor
index 2c2384b0..db4f6def 100755
--- a/scripts/dromap_editor
+++ b/scripts/dromap_editor
@@ -1,6 +1,7 @@
#!/usr/bin/env python
import daqconf.detreadoutmap as dromap
+import detdataformats
import click
@@ -27,11 +28,12 @@ def cli(obj):
@cli.command('load', help="Load map from file")
@click.argument('path', type=click.Path(exists=True))
-@click.option('--merge', is_flag=True, type=bool, default=False)
+@click.option('-m', '--merge', is_flag=True, type=bool, default=False)
+@click.option('-o', '--offset', type=int, default=0)
@click.pass_obj
-def load(obj, path, merge):
+def load(obj, path, merge, offset):
m = obj
- m.load(path, merge)
+ m.load(path, merge, offset)
console.print(m.as_table())
@@ -155,6 +157,97 @@ def add_eth(obj, force, src_id, **kwargs):
print(m.as_table())
+
+@cli.command("add-np-wib-crate")
+@click.argument('addrbook_path', type=click.Path(exists=True))
+@click.argument('wib_filter', type=str)
+@click.argument('ru_interface', type=str)
+@click.option('--rx-iface', type=int, default=0, help="Interface id on the receiver host")
+@click.pass_obj
+def add_np_wib_crate(obj, addrbook_path, wib_filter, ru_interface, rx_iface):
+ """Adds collections of wibs to the readout map and routes them to a destination"""
+ m = obj
+
+ with (open(addrbook_path, 'r')) as f:
+ addrbook = json.load(f)
+
+ import re
+
+ wib_re = re.compile(wib_filter)
+ wib_sources = { k:v.copy() for k,v in addrbook.items() if wib_re.match(k)}
+ if not wib_sources:
+ raise RuntimeError(f'No sources selected by {wib_filter}')
+
+ for host in wib_sources:
+ del wib_sources[host][host]
+
+
+
+ ru_hosts = [k for k,v in addrbook.items() if ru_interface in v]
+ if not ru_hosts:
+ raise RuntimeError(f"Readout unit interface '{ru_interface}' not found")
+ elif len(ru_hosts) > 1:
+ raise RuntimeError(f"Readout unit interface '{ru_interface}' found on multiple hosts {ru_hosts}")
+
+ ru_host = ru_hosts[0]
+ ru_rx = addrbook[ru_host][ru_interface]
+
+ # Start from the next available src id
+ src_id = max(m.get())+1 if m.get() else 0
+
+ # Constant
+ link_stream_offset = 0x40
+
+ for name, ifaces in wib_sources.items():
+
+ # Recover detector, crate, slot from NP wib name
+ name_tokens = name.split('-')
+ print(name_tokens)
+
+ match name_tokens[0]:
+ case 'np04':
+ det_id = detdataformats.DetID.kHD_TPC
+ case 'np02':
+ det_id = detdataformats.DetID.kVD_BottomTPC
+ case other:
+ raise ValueError(f'Detector {name_tokens[0]} Unknown')
+
+ wib_id = int(name_tokens[-1])
+
+ crate_id = (wib_id % 1000)//100
+ slot_id = (wib_id % 100)-1
+
+ for ifname, ifdata in ifaces.items():
+
+ link = int(ifname.removeprefix(name+'-d'))
+ if link not in (0,1):
+ raise ValueError(f"Recovered link id {link} from {ifname} is not 0 or 1 as expected")
+
+ for s in range(4):
+ m.add_srcid(
+ src_id,
+ dromap.GeoID(
+ det_id=det_id.value,
+ crate_id=crate_id,
+ slot_id=slot_id,
+ stream_id=link_stream_offset*link+s
+ ),
+ 'eth',
+ protocol='udp',
+ mode='fix_rate',
+ tx_host=name,
+ tx_mac=ifdata['mac'],
+ tx_ip=ifdata['ip'],
+ rx_host=ru_host,
+ rx_mac=ru_rx['mac'],
+ rx_ip=ru_rx['ip'],
+ rx_iface=rx_iface,
+ )
+ src_id += 1
+
+ print(m.as_table())
+
+
@cli.command("save", help="Save the map to json file")
@click.argument('path', type=click.Path())
@click.pass_obj
@@ -172,8 +265,12 @@ def ipy(obj):
m = obj
- import IPython
- IPython.embed(colors="neutral")
+ try:
+ import IPython
+ IPython.embed(colors="neutral")
+ except ModuleNotFoundError:
+ print("[red]Error: IPython is not installed[/red]")
+ raise SystemExit(-1)
if __name__ == "__main__":
cli(obj=dromap.DetReadoutMapService())
diff --git a/test/scripts/daqconf_check_np04_configs.py b/test/scripts/daqconf_check_np04_configs.py
new file mode 100755
index 00000000..be735bdb
--- /dev/null
+++ b/test/scripts/daqconf_check_np04_configs.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+
+
+
+import click
+import json
+import requests
+from pathlib import Path
+from os import environ as env
+from rich import print
+
+host = 'np04-srv-023'
+port = '31011'
+
+# # http://np04-srv-023:31011/listVersions?name=thea-k8s-test
+
+
+@click.group()
+def cli():
+ pass
+
+
+@cli.command('list')
+def list_configs():
+ uri = f'http://{host}:{port}/listConfigs'
+ print(uri)
+ r = requests.get(uri)
+ if (r.status_code != 200):
+ click.Errors("Failed to read the configurations list from db")
+
+ res = r.json()
+ for c in sorted(res['configs']):
+ print(c)
+
+@cli.command('versions')
+@click.argument('config_name')
+def config_versions(config_name):
+ uri = f'http://{host}:{port}/listVersions?name={config_name}'
+ print(uri)
+ r = requests.get(uri)
+ if (r.status_code != 200):
+ click.Errors("Failed to read the configurations list from db")
+
+ res = r.json()
+ for v in res['versions']:
+ print(v)
+
+@cli.command('dump')
+@click.argument('config_name')
+@click.option('-v', '--version', type=int, default=None)
+@click.option('-w', '--write', is_flag=True, default=False)
+@click.option('-o', '--output', default=None)
+def config_versions(config_name, version, write, output):
+
+ if not version is None:
+ uri = f'http://{host}:{port}/retrieveVersion?name={config_name}&version={version}'
+ else:
+ uri = f'http://{host}:{port}/retrieveLast?name={config_name}'
+
+ print(uri)
+ r = requests.get(uri)
+ if (r.status_code != 200):
+ click.Errors("Failed to read the configurations list from db")
+
+ res = r.json()
+ print(res)
+
+ if write:
+ outname = output if not output is None else f"{config_name}_v{version}.json"
+ with open(outname, "w") as outfile:
+ json.dump(
+ res,
+ outfile,
+ sort_keys=True,
+ indent=4,
+ )
+
+if __name__ == '__main__':
+ cli()
+
diff --git a/test/scripts/daqconf_check_schema.py b/test/scripts/daqconf_check_schema.py
new file mode 100755
index 00000000..29432b7f
--- /dev/null
+++ b/test/scripts/daqconf_check_schema.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+
+import click
+from rich import print
+
+from dunedaq.env import get_moo_model_path
+import moo.io
+
+@click.command()
+@click.argument("schema_name")
+def cli(schema_name):
+ moo.io.default_load_path = get_moo_model_path()
+ x = moo.otypes.load_types(schema_name)
+ print(x)
+
+if __name__ == '__main__':
+ cli()